Tracker more sensitive and cv_renderer animates lines
This commit is contained in:
parent
888ef7ff93
commit
cebe102e74
10 changed files with 211 additions and 106 deletions
|
@ -2,10 +2,10 @@
|
||||||
# Default YOLO tracker settings for ByteTrack tracker https://github.com/ifzhang/ByteTrack
|
# Default YOLO tracker settings for ByteTrack tracker https://github.com/ifzhang/ByteTrack
|
||||||
|
|
||||||
tracker_type: bytetrack # tracker type, ['botsort', 'bytetrack']
|
tracker_type: bytetrack # tracker type, ['botsort', 'bytetrack']
|
||||||
track_high_thresh: 0.05 # threshold for the first association
|
track_high_thresh: 0.0001 # threshold for the first association
|
||||||
track_low_thresh: 0.01 # threshold for the second association
|
track_low_thresh: 0.0001 # threshold for the second association
|
||||||
new_track_thresh: 0.1 # threshold for init new track if the detection does not match any tracks
|
new_track_thresh: 0.0001 # threshold for init new track if the detection does not match any tracks
|
||||||
track_buffer: 35 # buffer to calculate the time when to remove tracks
|
track_buffer: 50 # buffer to calculate the time when to remove tracks
|
||||||
match_thresh: 0.9 # threshold for matching tracks
|
match_thresh: 0.95 # threshold for matching tracks
|
||||||
fuse_score: True # Whether to fuse confidence scores with the iou distances before matching
|
fuse_score: True # Whether to fuse confidence scores with the iou distances before matching
|
||||||
# min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now)
|
# min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now)
|
||||||
|
|
|
@ -54,10 +54,10 @@ class AnimationRenderer:
|
||||||
self.tracker_sock.setsockopt(zmq.SUBSCRIBE, b'')
|
self.tracker_sock.setsockopt(zmq.SUBSCRIBE, b'')
|
||||||
self.tracker_sock.connect(config.zmq_trajectory_addr)
|
self.tracker_sock.connect(config.zmq_trajectory_addr)
|
||||||
|
|
||||||
self.frame_sock = context.socket(zmq.SUB)
|
self.frame_noimg_sock = context.socket(zmq.SUB)
|
||||||
self.frame_sock.setsockopt(zmq.CONFLATE, 1) # only keep latest frame. NB. make sure this comes BEFORE connect, otherwise it's ignored!!
|
self.frame_noimg_sock.setsockopt(zmq.CONFLATE, 1) # only keep latest frame. NB. make sure this comes BEFORE connect, otherwise it's ignored!!
|
||||||
self.frame_sock.setsockopt(zmq.SUBSCRIBE, b'')
|
self.frame_noimg_sock.setsockopt(zmq.SUBSCRIBE, b'')
|
||||||
self.frame_sock.connect(config.zmq_frame_addr)
|
self.frame_noimg_sock.connect(config.zmq_frame_noimg_addr)
|
||||||
|
|
||||||
self.H = self.config.H
|
self.H = self.config.H
|
||||||
|
|
||||||
|
@ -324,9 +324,10 @@ class AnimationRenderer:
|
||||||
|
|
||||||
new_tracks = False
|
new_tracks = False
|
||||||
try:
|
try:
|
||||||
self.frame: Frame = self.frame_sock.recv_pyobj(zmq.NOBLOCK)
|
self.frame: Frame = self.frame_noimg_sock.recv_pyobj(zmq.NOBLOCK)
|
||||||
if not self.first_time:
|
if not self.first_time:
|
||||||
self.first_time = self.frame.time
|
self.first_time = self.frame.time
|
||||||
|
if self.frame.img:
|
||||||
img = self.frame.img
|
img = self.frame.img
|
||||||
# newcameramtx, roi = cv2.getOptimalNewCameraMatrix(self.config.camera.mtx, self.config.camera.dist, (self.frame.img.shape[1], self.frame.img.shape[0]), 1, (self.frame.img.shape[1], self.frame.img.shape[0]))
|
# newcameramtx, roi = cv2.getOptimalNewCameraMatrix(self.config.camera.mtx, self.config.camera.dist, (self.frame.img.shape[1], self.frame.img.shape[0]), 1, (self.frame.img.shape[1], self.frame.img.shape[0]))
|
||||||
img = cv2.undistort(img, self.config.camera.mtx, self.config.camera.dist, None, self.config.camera.newcameramtx)
|
img = cv2.undistort(img, self.config.camera.mtx, self.config.camera.dist, None, self.config.camera.newcameramtx)
|
||||||
|
|
|
@ -253,6 +253,11 @@ connection_parser.add_argument('--zmq-frame-addr',
|
||||||
type=str,
|
type=str,
|
||||||
default="ipc:///tmp/feeds_frame")
|
default="ipc:///tmp/feeds_frame")
|
||||||
|
|
||||||
|
connection_parser.add_argument('--zmq-frame-noimg-addr',
|
||||||
|
help='Manually specity communication addr for the frame messages',
|
||||||
|
type=str,
|
||||||
|
default="ipc:///tmp/feeds_frame2")
|
||||||
|
|
||||||
|
|
||||||
connection_parser.add_argument('--ws-port',
|
connection_parser.add_argument('--ws-port',
|
||||||
help='Port to listen for incomming websocket connections. Also serves the testing html-page.',
|
help='Port to listen for incomming websocket connections. Also serves the testing html-page.',
|
||||||
|
|
|
@ -334,7 +334,7 @@ class CvRenderer:
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def run(self):
|
def run(self, timer_counter):
|
||||||
frame = None
|
frame = None
|
||||||
prediction_frame = None
|
prediction_frame = None
|
||||||
tracker_frame = None
|
tracker_frame = None
|
||||||
|
@ -349,6 +349,8 @@ class CvRenderer:
|
||||||
|
|
||||||
while self.is_running.is_set():
|
while self.is_running.is_set():
|
||||||
i+=1
|
i+=1
|
||||||
|
with timer_counter.get_lock():
|
||||||
|
timer_counter.value+=1
|
||||||
|
|
||||||
|
|
||||||
# zmq_ev = self.frame_sock.poll(timeout=2000)
|
# zmq_ev = self.frame_sock.poll(timeout=2000)
|
||||||
|
@ -406,10 +408,10 @@ class CvRenderer:
|
||||||
# clear out old tracks & predictions:
|
# clear out old tracks & predictions:
|
||||||
|
|
||||||
for track_id, track in list(self.tracks.items()):
|
for track_id, track in list(self.tracks.items()):
|
||||||
if get_opacity(track, frame) == 0:
|
if get_animation_position(track, frame) == 1:
|
||||||
self.tracks.pop(track_id)
|
self.tracks.pop(track_id)
|
||||||
for prediction_id, track in list(self.predictions.items()):
|
for prediction_id, track in list(self.predictions.items()):
|
||||||
if get_opacity(track, frame) == 0:
|
if get_animation_position(track, frame) == 1:
|
||||||
self.predictions.pop(prediction_id)
|
self.predictions.pop(prediction_id)
|
||||||
|
|
||||||
logger.info('Stopping')
|
logger.info('Stopping')
|
||||||
|
@ -443,10 +445,10 @@ colorset = [
|
||||||
# (0,0,0),
|
# (0,0,0),
|
||||||
# ]
|
# ]
|
||||||
|
|
||||||
def get_opacity(track: Track, current_frame: Frame):
|
def get_animation_position(track: Track, current_frame: Frame):
|
||||||
fade_duration = current_frame.camera.fps * 1.5
|
fade_duration = current_frame.camera.fps * 3
|
||||||
diff = current_frame.index - track.history[-1].frame_nr
|
diff = current_frame.index - track.history[-1].frame_nr
|
||||||
return max(0, 1 - diff / fade_duration)
|
return max(0, min(1, diff / fade_duration))
|
||||||
# track.history[-1].frame_nr < (current_frame.index - current_frame.camera.fps * 3)
|
# track.history[-1].frame_nr < (current_frame.index - current_frame.camera.fps * 3)
|
||||||
# track.history[-1].frame_nr < (current_frame.index - current_frame.camera.fps * 3)
|
# track.history[-1].frame_nr < (current_frame.index - current_frame.camera.fps * 3)
|
||||||
|
|
||||||
|
@ -497,8 +499,8 @@ def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame,
|
||||||
inv_H = np.linalg.pinv(prediction_frame.H)
|
inv_H = np.linalg.pinv(prediction_frame.H)
|
||||||
# draw_track(img, track, int(track_id))
|
# draw_track(img, track, int(track_id))
|
||||||
draw_trackjectron_history(img, track, int(track.track_id), convert_world_points_to_img_points)
|
draw_trackjectron_history(img, track, int(track.track_id), convert_world_points_to_img_points)
|
||||||
opacity = get_opacity(track, frame)
|
anim_position = get_animation_position(track, frame)
|
||||||
draw_track_predictions(img, track, int(track.track_id)+1, config.camera, convert_world_points_to_img_points, opacity=opacity)
|
draw_track_predictions(img, track, int(track.track_id)+1, config.camera, convert_world_points_to_img_points, anim_position=anim_position)
|
||||||
cv2.putText(img, f"{len(track.predictor_history) if track.predictor_history else 'none'}", to_point(track.history[0].get_foot_coords()), cv2.FONT_HERSHEY_COMPLEX, 1, (255,255,255), 1)
|
cv2.putText(img, f"{len(track.predictor_history) if track.predictor_history else 'none'}", to_point(track.history[0].get_foot_coords()), cv2.FONT_HERSHEY_COMPLEX, 1, (255,255,255), 1)
|
||||||
|
|
||||||
base_color = (255,)*3
|
base_color = (255,)*3
|
||||||
|
@ -532,6 +534,6 @@ def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame,
|
||||||
return img
|
return img
|
||||||
|
|
||||||
|
|
||||||
def run_cv_renderer(config: Namespace, is_running: BaseEvent):
|
def run_cv_renderer(config: Namespace, is_running: BaseEvent, timer_counter):
|
||||||
renderer = CvRenderer(config, is_running)
|
renderer = CvRenderer(config, is_running)
|
||||||
renderer.run()
|
renderer.run(timer_counter)
|
|
@ -420,6 +420,9 @@ class Frame:
|
||||||
} for t in self.tracks.values()
|
} for t in self.tracks.values()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def without_img(self):
|
||||||
|
return Frame(self.index, None, self.time, self.tracks, self.H, self.camera)
|
||||||
|
|
||||||
def video_src_from_config(config) -> UrlOrPath:
|
def video_src_from_config(config) -> UrlOrPath:
|
||||||
if config.video_loop:
|
if config.video_loop:
|
||||||
video_srcs: Iterable[UrlOrPath] = cycle(config.video_src)
|
video_srcs: Iterable[UrlOrPath] = cycle(config.video_src)
|
||||||
|
@ -442,12 +445,17 @@ class FrameEmitter:
|
||||||
self.frame_sock.setsockopt(zmq.CONFLATE, 1) # only keep latest frame. make sure to set BEFORE connect/bind
|
self.frame_sock.setsockopt(zmq.CONFLATE, 1) # only keep latest frame. make sure to set BEFORE connect/bind
|
||||||
self.frame_sock.bind(config.zmq_frame_addr)
|
self.frame_sock.bind(config.zmq_frame_addr)
|
||||||
|
|
||||||
|
self.frame_noimg_sock = context.socket(zmq.PUB)
|
||||||
|
self.frame_noimg_sock.setsockopt(zmq.CONFLATE, 1) # only keep latest frame. make sure to set BEFORE connect/bind
|
||||||
|
self.frame_noimg_sock.bind(config.zmq_frame_noimg_addr)
|
||||||
|
|
||||||
logger.info(f"Connection socket {config.zmq_frame_addr}")
|
logger.info(f"Connection socket {config.zmq_frame_addr}")
|
||||||
|
logger.info(f"Connection socket {config.zmq_frame_noimg_addr}")
|
||||||
|
|
||||||
self.video_srcs = video_src_from_config(self.config)
|
self.video_srcs = video_src_from_config(self.config)
|
||||||
|
|
||||||
|
|
||||||
def emit_video(self):
|
def emit_video(self, timer_counter):
|
||||||
i = 0
|
i = 0
|
||||||
delay_generation = False
|
delay_generation = False
|
||||||
for video_path in self.video_srcs:
|
for video_path in self.video_srcs:
|
||||||
|
@ -462,7 +470,7 @@ class FrameEmitter:
|
||||||
video.set(cv2.CAP_PROP_FPS, 5)
|
video.set(cv2.CAP_PROP_FPS, 5)
|
||||||
fps=5
|
fps=5
|
||||||
elif video_path.url.scheme == 'rtsp':
|
elif video_path.url.scheme == 'rtsp':
|
||||||
gst = f"rtspsrc location={video_path} latency=0 buffer-mode=auto ! decodebin ! videoconvert ! appsink max-buffers=1 drop=true"
|
gst = f"rtspsrc location={video_path} latency=0 buffer-mode=auto ! decodebin ! videoconvert ! appsink max-buffers=0 drop=true"
|
||||||
logger.info(f"Capture gstreamer (gst-launch-1.0): {gst}")
|
logger.info(f"Capture gstreamer (gst-launch-1.0): {gst}")
|
||||||
video = cv2.VideoCapture(gst, cv2.CAP_GSTREAMER)
|
video = cv2.VideoCapture(gst, cv2.CAP_GSTREAMER)
|
||||||
fps=12
|
fps=12
|
||||||
|
@ -496,7 +504,11 @@ class FrameEmitter:
|
||||||
|
|
||||||
prev_time = time.time()
|
prev_time = time.time()
|
||||||
|
|
||||||
|
|
||||||
while self.is_running.is_set():
|
while self.is_running.is_set():
|
||||||
|
with timer_counter.get_lock():
|
||||||
|
timer_counter.value += 1
|
||||||
|
|
||||||
ret, img = video.read()
|
ret, img = video.read()
|
||||||
|
|
||||||
# seek to 0 if video has finished. Infinite loop
|
# seek to 0 if video has finished. Infinite loop
|
||||||
|
@ -515,6 +527,7 @@ class FrameEmitter:
|
||||||
frame = Frame(index=i, img=img, H=self.config.H, camera=self.config.camera)
|
frame = Frame(index=i, img=img, H=self.config.H, camera=self.config.camera)
|
||||||
# TODO: this is very dirty, need to find another way.
|
# TODO: this is very dirty, need to find another way.
|
||||||
# perhaps multiprocessing Array?
|
# perhaps multiprocessing Array?
|
||||||
|
self.frame_noimg_sock.send(pickle.dumps(frame.without_img()))
|
||||||
self.frame_sock.send(pickle.dumps(frame))
|
self.frame_sock.send(pickle.dumps(frame))
|
||||||
|
|
||||||
# only delay consuming the next frame when using a file.
|
# only delay consuming the next frame when using a file.
|
||||||
|
@ -540,7 +553,7 @@ class FrameEmitter:
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def run_frame_emitter(config: Namespace, is_running: Event):
|
def run_frame_emitter(config: Namespace, is_running: Event, timer_counter: int):
|
||||||
router = FrameEmitter(config, is_running)
|
router = FrameEmitter(config, is_running)
|
||||||
router.emit_video()
|
router.emit_video(timer_counter)
|
||||||
is_running.clear()
|
is_running.clear()
|
|
@ -13,6 +13,7 @@ from trap.prediction_server import run_prediction_server
|
||||||
from trap.preview_renderer import run_preview_renderer
|
from trap.preview_renderer import run_preview_renderer
|
||||||
from trap.animation_renderer import run_animation_renderer
|
from trap.animation_renderer import run_animation_renderer
|
||||||
from trap.socket_forwarder import run_ws_forwarder
|
from trap.socket_forwarder import run_ws_forwarder
|
||||||
|
from trap.timer import TimerCollection
|
||||||
from trap.tracker import run_tracker
|
from trap.tracker import run_tracker
|
||||||
|
|
||||||
from setproctitle import setproctitle, setthreadtitle
|
from setproctitle import setproctitle, setthreadtitle
|
||||||
|
@ -83,20 +84,23 @@ def start():
|
||||||
|
|
||||||
# queue_listener.handlers.append(socket_handler)
|
# queue_listener.handlers.append(socket_handler)
|
||||||
|
|
||||||
|
timers = TimerCollection()
|
||||||
|
timer_fe = timers.new('frame_emitter')
|
||||||
|
timer_tracker = timers.new('tracker')
|
||||||
|
|
||||||
# instantiating process with arguments
|
# instantiating process with arguments
|
||||||
procs = [
|
procs = [
|
||||||
# ExceptionHandlingProcess(target=run_ws_forwarder, kwargs={'config': args, 'is_running': isRunning}, name='forwarder'),
|
# ExceptionHandlingProcess(target=run_ws_forwarder, kwargs={'config': args, 'is_running': isRunning}, name='forwarder'),
|
||||||
ExceptionHandlingProcess(target=run_frame_emitter, kwargs={'config': args, 'is_running': isRunning}, name='frame_emitter'),
|
ExceptionHandlingProcess(target=run_frame_emitter, kwargs={'config': args, 'is_running': isRunning, 'timer_counter': timer_fe.iterations}, name='frame_emitter'),
|
||||||
ExceptionHandlingProcess(target=run_tracker, kwargs={'config': args, 'is_running': isRunning}, name='tracker'),
|
ExceptionHandlingProcess(target=run_tracker, kwargs={'config': args, 'is_running': isRunning, 'timer_counter': timer_tracker.iterations}, name='tracker'),
|
||||||
]
|
]
|
||||||
|
|
||||||
# if args.render_file or args.render_url or args.render_window:
|
# if args.render_file or args.render_url or args.render_window:
|
||||||
if args.render_window or args.render_file or args.render_url:
|
if args.render_window or args.render_file or args.render_url:
|
||||||
|
timer_preview = timers.new('preview')
|
||||||
procs.append(
|
procs.append(
|
||||||
# ExceptionHandlingProcess(target=run_cv_renderer, kwargs={'config': args, 'is_running': isRunning}, name='preview')
|
# ExceptionHandlingProcess(target=run_cv_renderer, kwargs={'config': args, 'is_running': isRunning}, name='preview')
|
||||||
ExceptionHandlingProcess(target=run_cv_renderer, kwargs={'config': args, 'is_running': isRunning}, name='preview')
|
ExceptionHandlingProcess(target=run_cv_renderer, kwargs={'config': args, 'is_running': isRunning, 'timer_counter': timer_preview.iterations}, name='preview')
|
||||||
)
|
)
|
||||||
if args.render_animation:
|
if args.render_animation:
|
||||||
procs.append(
|
procs.append(
|
||||||
|
@ -104,8 +108,19 @@ def start():
|
||||||
)
|
)
|
||||||
|
|
||||||
if not args.bypass_prediction:
|
if not args.bypass_prediction:
|
||||||
|
timer_predict = timers.new('predict')
|
||||||
procs.append(
|
procs.append(
|
||||||
ExceptionHandlingProcess(target=run_prediction_server, kwargs={'config': args, 'is_running':isRunning}, name='inference'),
|
ExceptionHandlingProcess(target=run_prediction_server, kwargs={'config': args, 'is_running':isRunning, 'timer_counter': timer_predict.iterations}, name='inference'),
|
||||||
|
)
|
||||||
|
|
||||||
|
def timer_process(timers: TimerCollection, is_running: Event):
|
||||||
|
while is_running.is_set():
|
||||||
|
time.sleep(1)
|
||||||
|
timers.snapshot()
|
||||||
|
print(timers.to_string())
|
||||||
|
|
||||||
|
procs.append(
|
||||||
|
ExceptionHandlingProcess(target=timer_process, kwargs={'is_running':isRunning, 'timers': timers}, name='timer'),
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -166,11 +166,11 @@ class PredictionServer:
|
||||||
def send_frame(self, frame: Frame):
|
def send_frame(self, frame: Frame):
|
||||||
if self.external_predictions:
|
if self.external_predictions:
|
||||||
# data = json.dumps(frame, cls=DataclassJSONEncoder)
|
# data = json.dumps(frame, cls=DataclassJSONEncoder)
|
||||||
self.prediction_socket.send_json(frame, cls=DataclassJSONEncoder)
|
self.prediction_socket.send_json(obj=frame, cls=DataclassJSONEncoder)
|
||||||
else:
|
else:
|
||||||
self.prediction_socket.send_pyobj(frame)
|
self.prediction_socket.send_pyobj(frame)
|
||||||
|
|
||||||
def run(self):
|
def run(self, timer_counter):
|
||||||
print(self.config)
|
print(self.config)
|
||||||
if self.config.seed is not None:
|
if self.config.seed is not None:
|
||||||
random.seed(self.config.seed)
|
random.seed(self.config.seed)
|
||||||
|
@ -247,6 +247,8 @@ class PredictionServer:
|
||||||
prev_run_time = 0
|
prev_run_time = 0
|
||||||
while self.is_running.is_set():
|
while self.is_running.is_set():
|
||||||
timestep += 1
|
timestep += 1
|
||||||
|
with timer_counter.get_lock():
|
||||||
|
timer_counter.value+=1
|
||||||
|
|
||||||
# this_run_time = time.time()
|
# this_run_time = time.time()
|
||||||
# logger.debug(f'test {prev_run_time - this_run_time}')
|
# logger.debug(f'test {prev_run_time - this_run_time}')
|
||||||
|
@ -477,7 +479,7 @@ class PredictionServer:
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def run_prediction_server(config: Namespace, is_running: Event):
|
def run_prediction_server(config: Namespace, is_running: Event, timer_counter):
|
||||||
|
|
||||||
# attempt to trace the warnings coming from pytorch
|
# attempt to trace the warnings coming from pytorch
|
||||||
# def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
|
# def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
|
||||||
|
@ -488,4 +490,4 @@ def run_prediction_server(config: Namespace, is_running: Event):
|
||||||
|
|
||||||
# warnings.showwarning = warn_with_traceback
|
# warnings.showwarning = warn_with_traceback
|
||||||
s = PredictionServer(config, is_running)
|
s = PredictionServer(config, is_running)
|
||||||
s.run()
|
s.run(timer_counter)
|
|
@ -1,6 +1,9 @@
|
||||||
|
import collections
|
||||||
|
from re import A
|
||||||
import time
|
import time
|
||||||
from multiprocessing.sharedctypes import RawValue, Value, Array
|
from multiprocessing.sharedctypes import RawValue, Value, Array
|
||||||
from ctypes import c_double
|
from ctypes import c_double
|
||||||
|
from typing import MutableSequence
|
||||||
|
|
||||||
|
|
||||||
class Timer():
|
class Timer():
|
||||||
|
@ -8,47 +11,58 @@ class Timer():
|
||||||
Measure 2 independent things: the freuency of tic, and the duration of tic->toc
|
Measure 2 independent things: the freuency of tic, and the duration of tic->toc
|
||||||
Note that indeed these don't need to be equal
|
Note that indeed these don't need to be equal
|
||||||
"""
|
"""
|
||||||
def __init__(self) -> None:
|
def __init__(self, name = 'timer') -> None:
|
||||||
self.last_tic = RawValue(c_double, -1)
|
self.name = name
|
||||||
self.last_toc = RawValue(c_double, -1)
|
self.tocs: MutableSequence[(float, int)] = collections.deque(maxlen=5)
|
||||||
self.fps = RawValue(c_double, -1)
|
self.iterations = Value('i', 0)
|
||||||
self.processing_duration = RawValue(c_double, -1)
|
|
||||||
|
|
||||||
self.smoothing = .1
|
# def tic(self):
|
||||||
|
# now = time.time()
|
||||||
|
# if self.last_tic is None:
|
||||||
|
|
||||||
def tic(self):
|
# self.last_tic = now
|
||||||
now = time.time()
|
# return
|
||||||
if self.last_tic is None:
|
|
||||||
|
|
||||||
self.last_tic = now
|
# duration = now - self.last_tic
|
||||||
return
|
# self.last_tic = now
|
||||||
|
|
||||||
duration = now - self.last_tic
|
# current_fps = 1 / duration
|
||||||
self.last_tic = now
|
# if not self.fps:
|
||||||
|
# self.fps = current_fps
|
||||||
current_fps = 1 / duration
|
# else:
|
||||||
if not self.fps:
|
# self.fps = self.fps * (1-self.smoothing) + current_fps * self.smoothing
|
||||||
self.fps = current_fps
|
|
||||||
else:
|
|
||||||
self.fps = self.fps * (1-self.smoothing) + current_fps * self.smoothing
|
|
||||||
|
|
||||||
def toc(self):
|
def toc(self):
|
||||||
self.last_toc = time.time()
|
self.iterations += 1
|
||||||
duration = self.last_toc - self.last_tic
|
|
||||||
|
|
||||||
self.processing_duration = self.processing_duration * (1-self.smoothing) + duration * self.smoothing
|
|
||||||
|
|
||||||
|
def snapshot(self):
|
||||||
|
self.tocs.append((time.perf_counter(), self.iterations.value))
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def fps(self):
|
def fps(self):
|
||||||
|
fpses = []
|
||||||
|
if len(self.tocs) < 2:
|
||||||
|
return 0
|
||||||
|
dt = self.tocs[-1][0] - self.tocs[0][0]
|
||||||
|
di = self.tocs[-1][1] - self.tocs[0][1]
|
||||||
|
return di/dt
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
class TimerCollection():
|
class TimerCollection():
|
||||||
def __init__(self) -> None:
|
def __init__(self) -> None:
|
||||||
self._timers = set()
|
self._timers = set()
|
||||||
|
|
||||||
def print(self)->str:
|
def snapshot(self):
|
||||||
print('Update', end='\r')
|
for timer in self._timers:
|
||||||
|
timer.snapshot()
|
||||||
|
|
||||||
|
def to_string(self)->str:
|
||||||
|
strs = [f"{t.name} {t.fps:.2f}" for t in self._timers]
|
||||||
|
return " ".join(strs)
|
||||||
|
|
||||||
|
def new(self, name='timer'):
|
||||||
|
t = Timer(name)
|
||||||
|
self._timers.add(t)
|
||||||
|
return t
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,7 @@ from ultralytics import YOLO
|
||||||
from ultralytics.engine.results import Results as YOLOResult
|
from ultralytics.engine.results import Results as YOLOResult
|
||||||
import tqdm
|
import tqdm
|
||||||
|
|
||||||
from trap.utils import lerp
|
from trap.utils import inv_lerp, lerp
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -185,40 +185,82 @@ def tracker_compare():
|
||||||
|
|
||||||
bar.set_description(f"[{frames.video_nr}/{len(frames.video_srcs)}] [{frames.frame_idx}/{frames.frame_count}] {str(frames.video_path)}")
|
bar.set_description(f"[{frames.video_nr}/{len(frames.video_srcs)}] [{frames.frame_idx}/{frames.frame_count}] {str(frames.video_path)}")
|
||||||
|
|
||||||
def draw_track_predictions(img: cv2.Mat, track: Track, color_index: int, camera:Camera, convert_points: Optional[Callable], opacity=1):
|
def transition_path_points(path: np.array, t: float):
|
||||||
"""
|
"""
|
||||||
Opacity: 0-1
|
"""
|
||||||
|
|
||||||
|
if t >= 1:
|
||||||
|
return path
|
||||||
|
if t <= 0:
|
||||||
|
return np.array([path[0]])
|
||||||
|
|
||||||
|
# new_path = np.array([])
|
||||||
|
lengths = np.sqrt(np.sum(np.diff(path, axis=0)**2, axis=1))
|
||||||
|
cum_lenghts = np.cumsum(lengths)
|
||||||
|
# distance = cum_lenghts[-1] * t
|
||||||
|
ts = np.concatenate((np.array([0.]), cum_lenghts / cum_lenghts[-1]))
|
||||||
|
new_path = [path[0]]
|
||||||
|
|
||||||
|
for a, b, t_a, t_b in zip(path[:-1], path[1:], ts[:-1], ts[1:]):
|
||||||
|
if t_b < t:
|
||||||
|
new_path.append(b)
|
||||||
|
continue
|
||||||
|
# interpolate
|
||||||
|
relative_t = inv_lerp(t_a, t_b, t)
|
||||||
|
x = lerp(a[0], b[0], relative_t)
|
||||||
|
y = lerp(a[1], b[1], relative_t)
|
||||||
|
print(relative_t, a , b, x, y)
|
||||||
|
new_path.append([x,y])
|
||||||
|
break
|
||||||
|
return np.array(new_path)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def draw_track_predictions(img: cv2.Mat, track: Track, color_index: int, camera:Camera, convert_points: Optional[Callable], anim_position=.8):
|
||||||
|
"""
|
||||||
|
anim_position: 0-1
|
||||||
"""
|
"""
|
||||||
if not track.predictions:
|
if not track.predictions:
|
||||||
return
|
return
|
||||||
|
|
||||||
current_point = track.get_projected_history(camera=camera)[-1]
|
current_point = track.get_projected_history(camera=camera)[-1]
|
||||||
|
|
||||||
if convert_points:
|
opacity = 1-min(1, max(0, inv_lerp(0.8, 1, anim_position))) # fade out
|
||||||
current_point = convert_points([current_point])[0]
|
slide_t = min(1, max(0, inv_lerp(0, 0.8, anim_position))) # slide_position
|
||||||
|
|
||||||
|
|
||||||
|
# if convert_points:
|
||||||
|
# current_point = convert_points([current_point])[0]
|
||||||
|
|
||||||
for pred_i, pred in enumerate(track.predictions):
|
for pred_i, pred in enumerate(track.predictions):
|
||||||
pred_coords = pred #cv2.perspectiveTransform(np.array([pred]), inv_H)[0].tolist()
|
pred_coords = pred #cv2.perspectiveTransform(np.array([pred]), inv_H)[0].tolist()
|
||||||
|
# line_points = np.concatenate(([current_point], pred_coords)) # 'current point' is amoving target
|
||||||
|
line_points = pred_coords
|
||||||
|
# print(pred_coords, current_point, line_points)
|
||||||
|
line_points = transition_path_points(line_points, slide_t)
|
||||||
if convert_points:
|
if convert_points:
|
||||||
pred_coords = convert_points(pred_coords)
|
line_points = convert_points(line_points)
|
||||||
# color = (128,0,128) if pred_i else (128,128,0)
|
# color = (128,0,128) if pred_i else (128,128,0)
|
||||||
|
|
||||||
color = bgr_colors[color_index % len(bgr_colors)]
|
color = bgr_colors[color_index % len(bgr_colors)]
|
||||||
color = tuple([int(c*opacity) for c in color])
|
color = tuple([int(c*opacity) for c in color])
|
||||||
|
|
||||||
|
|
||||||
for ci in range(0, len(pred_coords)):
|
for start, end in zip(line_points[:-1], line_points[1:]):
|
||||||
if ci == 0:
|
# for ci in range(0, len(pred_coords)):
|
||||||
# TODO)) prev point
|
# if ci == 0:
|
||||||
# continue
|
# # TODO)) prev point
|
||||||
start = [int(p) for p in current_point]
|
# # continue
|
||||||
# start = [int(p) for p in coords[-1]]
|
# start = [int(p) for p in current_point]
|
||||||
# start = [0,0]?
|
# # start = [int(p) for p in coords[-1]]
|
||||||
# print(start)
|
# # start = [0,0]?
|
||||||
else:
|
# # print(start)
|
||||||
start = [int(p) for p in pred_coords[ci-1]]
|
# else:
|
||||||
end = [int(p) for p in pred_coords[ci]]
|
# start = [int(p) for p in pred_coords[ci-1]]
|
||||||
cv2.line(img, start, end, color, 1, lineType=cv2.LINE_AA)
|
# end = [int(p) for p in pred_coords[ci]]
|
||||||
|
# print(np.rint(start),np.rint(end).tolist())
|
||||||
|
cv2.line(img, np.rint(start).astype(int), np.rint(end).astype(int), color, 1, lineType=cv2.LINE_AA)
|
||||||
# cv2.circle(img, end, 2, color, 1, lineType=cv2.LINE_AA)
|
# cv2.circle(img, end, 2, color, 1, lineType=cv2.LINE_AA)
|
||||||
|
|
||||||
def draw_trackjectron_history(img: cv2.Mat, track: Track, color_index: int, convert_points: Optional[Callable]):
|
def draw_trackjectron_history(img: cv2.Mat, track: Track, color_index: int, convert_points: Optional[Callable]):
|
||||||
|
|
|
@ -9,7 +9,7 @@ from multiprocessing import Event
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import pickle
|
import pickle
|
||||||
import time
|
import time
|
||||||
from typing import Optional, List
|
from typing import Dict, Optional, List
|
||||||
import jsonlines
|
import jsonlines
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
|
@ -61,7 +61,7 @@ RCNN_SCALE = .4 # seems to have no impact on detections in the corners
|
||||||
|
|
||||||
def _yolov8_track(frame: Frame, model: YOLO, **kwargs) -> List[Detection]:
|
def _yolov8_track(frame: Frame, model: YOLO, **kwargs) -> List[Detection]:
|
||||||
|
|
||||||
results: List[YOLOResult] = list(model.track(frame.img, persist=True, tracker="custom_bytetrack.yaml", verbose=False, **kwargs))
|
results: List[YOLOResult] = list(model.track(frame.img, persist=True, tracker="custom_bytetrack.yaml", verbose=False, conf=0.00001, **kwargs))
|
||||||
if results[0].boxes is None or results[0].boxes.id is None:
|
if results[0].boxes is None or results[0].boxes.id is None:
|
||||||
# work around https://github.com/ultralytics/ultralytics/issues/5968
|
# work around https://github.com/ultralytics/ultralytics/issues/5968
|
||||||
return []
|
return []
|
||||||
|
@ -99,6 +99,10 @@ class TrackFilter:
|
||||||
def apply(self, tracks: List[Track], camera: Camera):
|
def apply(self, tracks: List[Track], camera: Camera):
|
||||||
return [t for t in tracks if self.filter(t, camera)]
|
return [t for t in tracks if self.filter(t, camera)]
|
||||||
|
|
||||||
|
def apply_to_dict(self, tracks: Dict[str, Track], camera: Camera):
|
||||||
|
tracks = self.apply(tracks.values(), camera)
|
||||||
|
return {t.track_id: t for t in tracks}
|
||||||
|
|
||||||
class FinalDisplacementFilter(TrackFilter):
|
class FinalDisplacementFilter(TrackFilter):
|
||||||
def __init__(self, min_displacement):
|
def __init__(self, min_displacement):
|
||||||
self.min_displacement = min_displacement
|
self.min_displacement = min_displacement
|
||||||
|
@ -444,7 +448,7 @@ class Tracker:
|
||||||
|
|
||||||
def track_frame(self, frame: Frame):
|
def track_frame(self, frame: Frame):
|
||||||
if self.config.detector == DETECTOR_YOLOv8:
|
if self.config.detector == DETECTOR_YOLOv8:
|
||||||
detections: List[Detection] = _yolov8_track(frame, self.model, classes=[0], imgsz=[1152, 640])
|
detections: List[Detection] = _yolov8_track(frame, self.model, classes=[0, 15, 16], imgsz=[1152, 640])
|
||||||
else :
|
else :
|
||||||
detections: List[Detection] = self._resnet_track(frame, scale = RCNN_SCALE)
|
detections: List[Detection] = self._resnet_track(frame, scale = RCNN_SCALE)
|
||||||
|
|
||||||
|
@ -458,7 +462,7 @@ class Tracker:
|
||||||
return detections
|
return detections
|
||||||
|
|
||||||
|
|
||||||
def track(self, is_running: Event):
|
def track(self, is_running: Event, timer_counter: int = 0):
|
||||||
"""
|
"""
|
||||||
Live tracking of frames coming in over zmq
|
Live tracking of frames coming in over zmq
|
||||||
"""
|
"""
|
||||||
|
@ -500,7 +504,11 @@ class Tracker:
|
||||||
end_time = None
|
end_time = None
|
||||||
tracker_dt = None
|
tracker_dt = None
|
||||||
w_time = None
|
w_time = None
|
||||||
|
displacement_filter = FinalDisplacementFilter(.2)
|
||||||
while self.is_running.is_set():
|
while self.is_running.is_set():
|
||||||
|
|
||||||
|
with timer_counter.get_lock():
|
||||||
|
timer_counter.value += 1
|
||||||
# this waiting for target_dt causes frame loss. E.g. with target_dt at .1, it
|
# this waiting for target_dt causes frame loss. E.g. with target_dt at .1, it
|
||||||
# skips exactly 1 frame on a 10 fps video (which, it obviously should not do)
|
# skips exactly 1 frame on a 10 fps video (which, it obviously should not do)
|
||||||
# so for now, timing should move to emitter
|
# so for now, timing should move to emitter
|
||||||
|
@ -566,6 +574,9 @@ class Tracker:
|
||||||
# }
|
# }
|
||||||
active_track_ids = [d.track_id for d in detections]
|
active_track_ids = [d.track_id for d in detections]
|
||||||
active_tracks = {t.track_id: t.get_with_interpolated_history() for t in self.tracks.values() if t.track_id in active_track_ids}
|
active_tracks = {t.track_id: t.get_with_interpolated_history() for t in self.tracks.values() if t.track_id in active_track_ids}
|
||||||
|
active_tracks = displacement_filter.apply_to_dict(active_tracks, frame.camera)# a filter to remove just detecting static objects
|
||||||
|
|
||||||
|
|
||||||
# active_tracks = {t.track_id: t for t in self.tracks.values() if t.track_id in active_track_ids}
|
# active_tracks = {t.track_id: t for t in self.tracks.values() if t.track_id in active_track_ids}
|
||||||
# active_tracks = {t.track_id: t for t in self.tracks.values() if t.track_id in active_track_ids}
|
# active_tracks = {t.track_id: t for t in self.tracks.values() if t.track_id in active_track_ids}
|
||||||
# logger.info(f"{trajectories}")
|
# logger.info(f"{trajectories}")
|
||||||
|
@ -579,7 +590,7 @@ class Tracker:
|
||||||
frame = self.smoother.smooth_frame_tracks(frame)
|
frame = self.smoother.smooth_frame_tracks(frame)
|
||||||
|
|
||||||
# print(f"send to {self.trajectory_socket}, {self.config.zmq_trajectory_addr}")
|
# print(f"send to {self.trajectory_socket}, {self.config.zmq_trajectory_addr}")
|
||||||
self.trajectory_socket.send_pyobj(frame)
|
self.trajectory_socket.send_pyobj(frame.without_img()) # ditch image for faster passthrough
|
||||||
|
|
||||||
end_time = time.time()
|
end_time = time.time()
|
||||||
tracker_dt = end_time - start_time
|
tracker_dt = end_time - start_time
|
||||||
|
@ -665,9 +676,9 @@ class Tracker:
|
||||||
return [([d[0], d[1], d[2]-d[0], d[3]-d[1]], d[4], d[5]) for d in detections]
|
return [([d[0], d[1], d[2]-d[0], d[3]-d[1]], d[4], d[5]) for d in detections]
|
||||||
|
|
||||||
|
|
||||||
def run_tracker(config: Namespace, is_running: Event):
|
def run_tracker(config: Namespace, is_running: Event, timer_counter):
|
||||||
router = Tracker(config)
|
router = Tracker(config)
|
||||||
router.track(is_running)
|
router.track(is_running, timer_counter)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue