Tracker more sensitive and cv_renderer animates lines
This commit is contained in:
parent
888ef7ff93
commit
cebe102e74
10 changed files with 211 additions and 106 deletions
|
@ -2,10 +2,10 @@
|
|||
# Default YOLO tracker settings for ByteTrack tracker https://github.com/ifzhang/ByteTrack
|
||||
|
||||
tracker_type: bytetrack # tracker type, ['botsort', 'bytetrack']
|
||||
track_high_thresh: 0.05 # threshold for the first association
|
||||
track_low_thresh: 0.01 # threshold for the second association
|
||||
new_track_thresh: 0.1 # threshold for init new track if the detection does not match any tracks
|
||||
track_buffer: 35 # buffer to calculate the time when to remove tracks
|
||||
match_thresh: 0.9 # threshold for matching tracks
|
||||
track_high_thresh: 0.0001 # threshold for the first association
|
||||
track_low_thresh: 0.0001 # threshold for the second association
|
||||
new_track_thresh: 0.0001 # threshold for init new track if the detection does not match any tracks
|
||||
track_buffer: 50 # buffer to calculate the time when to remove tracks
|
||||
match_thresh: 0.95 # threshold for matching tracks
|
||||
fuse_score: True # Whether to fuse confidence scores with the iou distances before matching
|
||||
# min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now)
|
||||
|
|
|
@ -54,10 +54,10 @@ class AnimationRenderer:
|
|||
self.tracker_sock.setsockopt(zmq.SUBSCRIBE, b'')
|
||||
self.tracker_sock.connect(config.zmq_trajectory_addr)
|
||||
|
||||
self.frame_sock = context.socket(zmq.SUB)
|
||||
self.frame_sock.setsockopt(zmq.CONFLATE, 1) # only keep latest frame. NB. make sure this comes BEFORE connect, otherwise it's ignored!!
|
||||
self.frame_sock.setsockopt(zmq.SUBSCRIBE, b'')
|
||||
self.frame_sock.connect(config.zmq_frame_addr)
|
||||
self.frame_noimg_sock = context.socket(zmq.SUB)
|
||||
self.frame_noimg_sock.setsockopt(zmq.CONFLATE, 1) # only keep latest frame. NB. make sure this comes BEFORE connect, otherwise it's ignored!!
|
||||
self.frame_noimg_sock.setsockopt(zmq.SUBSCRIBE, b'')
|
||||
self.frame_noimg_sock.connect(config.zmq_frame_noimg_addr)
|
||||
|
||||
self.H = self.config.H
|
||||
|
||||
|
@ -324,26 +324,27 @@ class AnimationRenderer:
|
|||
|
||||
new_tracks = False
|
||||
try:
|
||||
self.frame: Frame = self.frame_sock.recv_pyobj(zmq.NOBLOCK)
|
||||
self.frame: Frame = self.frame_noimg_sock.recv_pyobj(zmq.NOBLOCK)
|
||||
if not self.first_time:
|
||||
self.first_time = self.frame.time
|
||||
img = self.frame.img
|
||||
# newcameramtx, roi = cv2.getOptimalNewCameraMatrix(self.config.camera.mtx, self.config.camera.dist, (self.frame.img.shape[1], self.frame.img.shape[0]), 1, (self.frame.img.shape[1], self.frame.img.shape[0]))
|
||||
img = cv2.undistort(img, self.config.camera.mtx, self.config.camera.dist, None, self.config.camera.newcameramtx)
|
||||
img = cv2.warpPerspective(img, convert_world_space_to_img_space(self.config.camera.H), (self.config.camera.w, self.config.camera.h))
|
||||
# img = cv2.GaussianBlur(img, (15, 15), 0)
|
||||
img = cv2.flip(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), 0)
|
||||
img = pyglet.image.ImageData(self.frame_size[0], self.frame_size[1], 'RGB', img.tobytes())
|
||||
# don't draw in batch, so that it is the background
|
||||
if hasattr(self, 'video_sprite') and self.video_sprite:
|
||||
self.video_sprite.delete()
|
||||
self.frame.img = None
|
||||
|
||||
self.video_sprite = pyglet.sprite.Sprite(img=img, batch=self.batch_bg)
|
||||
# transform to flipped coordinate system for pyglet
|
||||
self.video_sprite.y = self.window.height - self.video_sprite.height
|
||||
# self.frame.img = np.array([]) # clearing memory?
|
||||
# self.video_sprite.opacity = 70
|
||||
if self.frame.img:
|
||||
img = self.frame.img
|
||||
# newcameramtx, roi = cv2.getOptimalNewCameraMatrix(self.config.camera.mtx, self.config.camera.dist, (self.frame.img.shape[1], self.frame.img.shape[0]), 1, (self.frame.img.shape[1], self.frame.img.shape[0]))
|
||||
img = cv2.undistort(img, self.config.camera.mtx, self.config.camera.dist, None, self.config.camera.newcameramtx)
|
||||
img = cv2.warpPerspective(img, convert_world_space_to_img_space(self.config.camera.H), (self.config.camera.w, self.config.camera.h))
|
||||
# img = cv2.GaussianBlur(img, (15, 15), 0)
|
||||
img = cv2.flip(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), 0)
|
||||
img = pyglet.image.ImageData(self.frame_size[0], self.frame_size[1], 'RGB', img.tobytes())
|
||||
# don't draw in batch, so that it is the background
|
||||
if hasattr(self, 'video_sprite') and self.video_sprite:
|
||||
self.video_sprite.delete()
|
||||
self.frame.img = None
|
||||
|
||||
self.video_sprite = pyglet.sprite.Sprite(img=img, batch=self.batch_bg)
|
||||
# transform to flipped coordinate system for pyglet
|
||||
self.video_sprite.y = self.window.height - self.video_sprite.height
|
||||
# self.frame.img = np.array([]) # clearing memory?
|
||||
# self.video_sprite.opacity = 70
|
||||
except zmq.ZMQError as e:
|
||||
# idx = frame.index if frame else "NONE"
|
||||
# logger.debug(f"reuse video frame {idx}")
|
||||
|
|
|
@ -251,7 +251,12 @@ connection_parser.add_argument('--zmq-prediction-addr',
|
|||
connection_parser.add_argument('--zmq-frame-addr',
|
||||
help='Manually specity communication addr for the frame messages',
|
||||
type=str,
|
||||
default="ipc:///tmp/feeds_frame")
|
||||
default="ipc:///tmp/feeds_frame")
|
||||
|
||||
connection_parser.add_argument('--zmq-frame-noimg-addr',
|
||||
help='Manually specity communication addr for the frame messages',
|
||||
type=str,
|
||||
default="ipc:///tmp/feeds_frame2")
|
||||
|
||||
|
||||
connection_parser.add_argument('--ws-port',
|
||||
|
|
|
@ -334,7 +334,7 @@ class CvRenderer:
|
|||
|
||||
|
||||
|
||||
def run(self):
|
||||
def run(self, timer_counter):
|
||||
frame = None
|
||||
prediction_frame = None
|
||||
tracker_frame = None
|
||||
|
@ -349,6 +349,8 @@ class CvRenderer:
|
|||
|
||||
while self.is_running.is_set():
|
||||
i+=1
|
||||
with timer_counter.get_lock():
|
||||
timer_counter.value+=1
|
||||
|
||||
|
||||
# zmq_ev = self.frame_sock.poll(timeout=2000)
|
||||
|
@ -406,10 +408,10 @@ class CvRenderer:
|
|||
# clear out old tracks & predictions:
|
||||
|
||||
for track_id, track in list(self.tracks.items()):
|
||||
if get_opacity(track, frame) == 0:
|
||||
if get_animation_position(track, frame) == 1:
|
||||
self.tracks.pop(track_id)
|
||||
for prediction_id, track in list(self.predictions.items()):
|
||||
if get_opacity(track, frame) == 0:
|
||||
if get_animation_position(track, frame) == 1:
|
||||
self.predictions.pop(prediction_id)
|
||||
|
||||
logger.info('Stopping')
|
||||
|
@ -443,10 +445,10 @@ colorset = [
|
|||
# (0,0,0),
|
||||
# ]
|
||||
|
||||
def get_opacity(track: Track, current_frame: Frame):
|
||||
fade_duration = current_frame.camera.fps * 1.5
|
||||
def get_animation_position(track: Track, current_frame: Frame):
|
||||
fade_duration = current_frame.camera.fps * 3
|
||||
diff = current_frame.index - track.history[-1].frame_nr
|
||||
return max(0, 1 - diff / fade_duration)
|
||||
return max(0, min(1, diff / fade_duration))
|
||||
# track.history[-1].frame_nr < (current_frame.index - current_frame.camera.fps * 3)
|
||||
# track.history[-1].frame_nr < (current_frame.index - current_frame.camera.fps * 3)
|
||||
|
||||
|
@ -497,8 +499,8 @@ def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame,
|
|||
inv_H = np.linalg.pinv(prediction_frame.H)
|
||||
# draw_track(img, track, int(track_id))
|
||||
draw_trackjectron_history(img, track, int(track.track_id), convert_world_points_to_img_points)
|
||||
opacity = get_opacity(track, frame)
|
||||
draw_track_predictions(img, track, int(track.track_id)+1, config.camera, convert_world_points_to_img_points, opacity=opacity)
|
||||
anim_position = get_animation_position(track, frame)
|
||||
draw_track_predictions(img, track, int(track.track_id)+1, config.camera, convert_world_points_to_img_points, anim_position=anim_position)
|
||||
cv2.putText(img, f"{len(track.predictor_history) if track.predictor_history else 'none'}", to_point(track.history[0].get_foot_coords()), cv2.FONT_HERSHEY_COMPLEX, 1, (255,255,255), 1)
|
||||
|
||||
base_color = (255,)*3
|
||||
|
@ -532,6 +534,6 @@ def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame,
|
|||
return img
|
||||
|
||||
|
||||
def run_cv_renderer(config: Namespace, is_running: BaseEvent):
|
||||
def run_cv_renderer(config: Namespace, is_running: BaseEvent, timer_counter):
|
||||
renderer = CvRenderer(config, is_running)
|
||||
renderer.run()
|
||||
renderer.run(timer_counter)
|
|
@ -420,6 +420,9 @@ class Frame:
|
|||
} for t in self.tracks.values()
|
||||
}
|
||||
|
||||
def without_img(self):
|
||||
return Frame(self.index, None, self.time, self.tracks, self.H, self.camera)
|
||||
|
||||
def video_src_from_config(config) -> UrlOrPath:
|
||||
if config.video_loop:
|
||||
video_srcs: Iterable[UrlOrPath] = cycle(config.video_src)
|
||||
|
@ -441,13 +444,18 @@ class FrameEmitter:
|
|||
self.frame_sock = context.socket(zmq.PUB)
|
||||
self.frame_sock.setsockopt(zmq.CONFLATE, 1) # only keep latest frame. make sure to set BEFORE connect/bind
|
||||
self.frame_sock.bind(config.zmq_frame_addr)
|
||||
|
||||
self.frame_noimg_sock = context.socket(zmq.PUB)
|
||||
self.frame_noimg_sock.setsockopt(zmq.CONFLATE, 1) # only keep latest frame. make sure to set BEFORE connect/bind
|
||||
self.frame_noimg_sock.bind(config.zmq_frame_noimg_addr)
|
||||
|
||||
logger.info(f"Connection socket {config.zmq_frame_addr}")
|
||||
logger.info(f"Connection socket {config.zmq_frame_noimg_addr}")
|
||||
|
||||
self.video_srcs = video_src_from_config(self.config)
|
||||
|
||||
|
||||
def emit_video(self):
|
||||
def emit_video(self, timer_counter):
|
||||
i = 0
|
||||
delay_generation = False
|
||||
for video_path in self.video_srcs:
|
||||
|
@ -462,7 +470,7 @@ class FrameEmitter:
|
|||
video.set(cv2.CAP_PROP_FPS, 5)
|
||||
fps=5
|
||||
elif video_path.url.scheme == 'rtsp':
|
||||
gst = f"rtspsrc location={video_path} latency=0 buffer-mode=auto ! decodebin ! videoconvert ! appsink max-buffers=1 drop=true"
|
||||
gst = f"rtspsrc location={video_path} latency=0 buffer-mode=auto ! decodebin ! videoconvert ! appsink max-buffers=0 drop=true"
|
||||
logger.info(f"Capture gstreamer (gst-launch-1.0): {gst}")
|
||||
video = cv2.VideoCapture(gst, cv2.CAP_GSTREAMER)
|
||||
fps=12
|
||||
|
@ -496,7 +504,11 @@ class FrameEmitter:
|
|||
|
||||
prev_time = time.time()
|
||||
|
||||
|
||||
while self.is_running.is_set():
|
||||
with timer_counter.get_lock():
|
||||
timer_counter.value += 1
|
||||
|
||||
ret, img = video.read()
|
||||
|
||||
# seek to 0 if video has finished. Infinite loop
|
||||
|
@ -515,6 +527,7 @@ class FrameEmitter:
|
|||
frame = Frame(index=i, img=img, H=self.config.H, camera=self.config.camera)
|
||||
# TODO: this is very dirty, need to find another way.
|
||||
# perhaps multiprocessing Array?
|
||||
self.frame_noimg_sock.send(pickle.dumps(frame.without_img()))
|
||||
self.frame_sock.send(pickle.dumps(frame))
|
||||
|
||||
# only delay consuming the next frame when using a file.
|
||||
|
@ -540,7 +553,7 @@ class FrameEmitter:
|
|||
|
||||
|
||||
|
||||
def run_frame_emitter(config: Namespace, is_running: Event):
|
||||
def run_frame_emitter(config: Namespace, is_running: Event, timer_counter: int):
|
||||
router = FrameEmitter(config, is_running)
|
||||
router.emit_video()
|
||||
router.emit_video(timer_counter)
|
||||
is_running.clear()
|
|
@ -13,6 +13,7 @@ from trap.prediction_server import run_prediction_server
|
|||
from trap.preview_renderer import run_preview_renderer
|
||||
from trap.animation_renderer import run_animation_renderer
|
||||
from trap.socket_forwarder import run_ws_forwarder
|
||||
from trap.timer import TimerCollection
|
||||
from trap.tracker import run_tracker
|
||||
|
||||
from setproctitle import setproctitle, setthreadtitle
|
||||
|
@ -83,20 +84,23 @@ def start():
|
|||
|
||||
# queue_listener.handlers.append(socket_handler)
|
||||
|
||||
|
||||
timers = TimerCollection()
|
||||
timer_fe = timers.new('frame_emitter')
|
||||
timer_tracker = timers.new('tracker')
|
||||
|
||||
# instantiating process with arguments
|
||||
procs = [
|
||||
# ExceptionHandlingProcess(target=run_ws_forwarder, kwargs={'config': args, 'is_running': isRunning}, name='forwarder'),
|
||||
ExceptionHandlingProcess(target=run_frame_emitter, kwargs={'config': args, 'is_running': isRunning}, name='frame_emitter'),
|
||||
ExceptionHandlingProcess(target=run_tracker, kwargs={'config': args, 'is_running': isRunning}, name='tracker'),
|
||||
ExceptionHandlingProcess(target=run_frame_emitter, kwargs={'config': args, 'is_running': isRunning, 'timer_counter': timer_fe.iterations}, name='frame_emitter'),
|
||||
ExceptionHandlingProcess(target=run_tracker, kwargs={'config': args, 'is_running': isRunning, 'timer_counter': timer_tracker.iterations}, name='tracker'),
|
||||
]
|
||||
|
||||
# if args.render_file or args.render_url or args.render_window:
|
||||
if args.render_window or args.render_file or args.render_url:
|
||||
timer_preview = timers.new('preview')
|
||||
procs.append(
|
||||
# ExceptionHandlingProcess(target=run_cv_renderer, kwargs={'config': args, 'is_running': isRunning}, name='preview')
|
||||
ExceptionHandlingProcess(target=run_cv_renderer, kwargs={'config': args, 'is_running': isRunning}, name='preview')
|
||||
ExceptionHandlingProcess(target=run_cv_renderer, kwargs={'config': args, 'is_running': isRunning, 'timer_counter': timer_preview.iterations}, name='preview')
|
||||
)
|
||||
if args.render_animation:
|
||||
procs.append(
|
||||
|
@ -104,10 +108,21 @@ def start():
|
|||
)
|
||||
|
||||
if not args.bypass_prediction:
|
||||
timer_predict = timers.new('predict')
|
||||
procs.append(
|
||||
ExceptionHandlingProcess(target=run_prediction_server, kwargs={'config': args, 'is_running':isRunning}, name='inference'),
|
||||
ExceptionHandlingProcess(target=run_prediction_server, kwargs={'config': args, 'is_running':isRunning, 'timer_counter': timer_predict.iterations}, name='inference'),
|
||||
)
|
||||
|
||||
def timer_process(timers: TimerCollection, is_running: Event):
|
||||
while is_running.is_set():
|
||||
time.sleep(1)
|
||||
timers.snapshot()
|
||||
print(timers.to_string())
|
||||
|
||||
procs.append(
|
||||
ExceptionHandlingProcess(target=timer_process, kwargs={'is_running':isRunning, 'timers': timers}, name='timer'),
|
||||
)
|
||||
|
||||
try:
|
||||
logger.info("start")
|
||||
for proc in procs:
|
||||
|
|
|
@ -166,11 +166,11 @@ class PredictionServer:
|
|||
def send_frame(self, frame: Frame):
|
||||
if self.external_predictions:
|
||||
# data = json.dumps(frame, cls=DataclassJSONEncoder)
|
||||
self.prediction_socket.send_json(frame, cls=DataclassJSONEncoder)
|
||||
self.prediction_socket.send_json(obj=frame, cls=DataclassJSONEncoder)
|
||||
else:
|
||||
self.prediction_socket.send_pyobj(frame)
|
||||
|
||||
def run(self):
|
||||
def run(self, timer_counter):
|
||||
print(self.config)
|
||||
if self.config.seed is not None:
|
||||
random.seed(self.config.seed)
|
||||
|
@ -247,6 +247,8 @@ class PredictionServer:
|
|||
prev_run_time = 0
|
||||
while self.is_running.is_set():
|
||||
timestep += 1
|
||||
with timer_counter.get_lock():
|
||||
timer_counter.value+=1
|
||||
|
||||
# this_run_time = time.time()
|
||||
# logger.debug(f'test {prev_run_time - this_run_time}')
|
||||
|
@ -477,7 +479,7 @@ class PredictionServer:
|
|||
|
||||
|
||||
|
||||
def run_prediction_server(config: Namespace, is_running: Event):
|
||||
def run_prediction_server(config: Namespace, is_running: Event, timer_counter):
|
||||
|
||||
# attempt to trace the warnings coming from pytorch
|
||||
# def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
|
||||
|
@ -488,4 +490,4 @@ def run_prediction_server(config: Namespace, is_running: Event):
|
|||
|
||||
# warnings.showwarning = warn_with_traceback
|
||||
s = PredictionServer(config, is_running)
|
||||
s.run()
|
||||
s.run(timer_counter)
|
|
@ -1,6 +1,9 @@
|
|||
import collections
|
||||
from re import A
|
||||
import time
|
||||
from multiprocessing.sharedctypes import RawValue, Value, Array
|
||||
from ctypes import c_double
|
||||
from typing import MutableSequence
|
||||
|
||||
|
||||
class Timer():
|
||||
|
@ -8,47 +11,58 @@ class Timer():
|
|||
Measure 2 independent things: the freuency of tic, and the duration of tic->toc
|
||||
Note that indeed these don't need to be equal
|
||||
"""
|
||||
def __init__(self) -> None:
|
||||
self.last_tic = RawValue(c_double, -1)
|
||||
self.last_toc = RawValue(c_double, -1)
|
||||
self.fps = RawValue(c_double, -1)
|
||||
self.processing_duration = RawValue(c_double, -1)
|
||||
|
||||
self.smoothing = .1
|
||||
|
||||
def tic(self):
|
||||
now = time.time()
|
||||
if self.last_tic is None:
|
||||
|
||||
self.last_tic = now
|
||||
return
|
||||
def __init__(self, name = 'timer') -> None:
|
||||
self.name = name
|
||||
self.tocs: MutableSequence[(float, int)] = collections.deque(maxlen=5)
|
||||
self.iterations = Value('i', 0)
|
||||
|
||||
duration = now - self.last_tic
|
||||
self.last_tic = now
|
||||
# def tic(self):
|
||||
# now = time.time()
|
||||
# if self.last_tic is None:
|
||||
|
||||
# self.last_tic = now
|
||||
# return
|
||||
|
||||
# duration = now - self.last_tic
|
||||
# self.last_tic = now
|
||||
|
||||
current_fps = 1 / duration
|
||||
if not self.fps:
|
||||
self.fps = current_fps
|
||||
else:
|
||||
self.fps = self.fps * (1-self.smoothing) + current_fps * self.smoothing
|
||||
# current_fps = 1 / duration
|
||||
# if not self.fps:
|
||||
# self.fps = current_fps
|
||||
# else:
|
||||
# self.fps = self.fps * (1-self.smoothing) + current_fps * self.smoothing
|
||||
|
||||
def toc(self):
|
||||
self.last_toc = time.time()
|
||||
duration = self.last_toc - self.last_tic
|
||||
|
||||
self.processing_duration = self.processing_duration * (1-self.smoothing) + duration * self.smoothing
|
||||
|
||||
self.iterations += 1
|
||||
|
||||
def snapshot(self):
|
||||
self.tocs.append((time.perf_counter(), self.iterations.value))
|
||||
|
||||
@property
|
||||
def fps(self):
|
||||
|
||||
pass
|
||||
fpses = []
|
||||
if len(self.tocs) < 2:
|
||||
return 0
|
||||
dt = self.tocs[-1][0] - self.tocs[0][0]
|
||||
di = self.tocs[-1][1] - self.tocs[0][1]
|
||||
return di/dt
|
||||
|
||||
|
||||
class TimerCollection():
|
||||
def __init__(self) -> None:
|
||||
self._timers = set()
|
||||
|
||||
def print(self)->str:
|
||||
print('Update', end='\r')
|
||||
def snapshot(self):
|
||||
for timer in self._timers:
|
||||
timer.snapshot()
|
||||
|
||||
def to_string(self)->str:
|
||||
strs = [f"{t.name} {t.fps:.2f}" for t in self._timers]
|
||||
return " ".join(strs)
|
||||
|
||||
def new(self, name='timer'):
|
||||
t = Timer(name)
|
||||
self._timers.add(t)
|
||||
return t
|
||||
|
||||
|
|
@ -22,7 +22,7 @@ from ultralytics import YOLO
|
|||
from ultralytics.engine.results import Results as YOLOResult
|
||||
import tqdm
|
||||
|
||||
from trap.utils import lerp
|
||||
from trap.utils import inv_lerp, lerp
|
||||
|
||||
|
||||
|
||||
|
@ -185,40 +185,82 @@ def tracker_compare():
|
|||
|
||||
bar.set_description(f"[{frames.video_nr}/{len(frames.video_srcs)}] [{frames.frame_idx}/{frames.frame_count}] {str(frames.video_path)}")
|
||||
|
||||
def draw_track_predictions(img: cv2.Mat, track: Track, color_index: int, camera:Camera, convert_points: Optional[Callable], opacity=1):
|
||||
def transition_path_points(path: np.array, t: float):
|
||||
"""
|
||||
Opacity: 0-1
|
||||
"""
|
||||
|
||||
if t >= 1:
|
||||
return path
|
||||
if t <= 0:
|
||||
return np.array([path[0]])
|
||||
|
||||
# new_path = np.array([])
|
||||
lengths = np.sqrt(np.sum(np.diff(path, axis=0)**2, axis=1))
|
||||
cum_lenghts = np.cumsum(lengths)
|
||||
# distance = cum_lenghts[-1] * t
|
||||
ts = np.concatenate((np.array([0.]), cum_lenghts / cum_lenghts[-1]))
|
||||
new_path = [path[0]]
|
||||
|
||||
for a, b, t_a, t_b in zip(path[:-1], path[1:], ts[:-1], ts[1:]):
|
||||
if t_b < t:
|
||||
new_path.append(b)
|
||||
continue
|
||||
# interpolate
|
||||
relative_t = inv_lerp(t_a, t_b, t)
|
||||
x = lerp(a[0], b[0], relative_t)
|
||||
y = lerp(a[1], b[1], relative_t)
|
||||
print(relative_t, a , b, x, y)
|
||||
new_path.append([x,y])
|
||||
break
|
||||
return np.array(new_path)
|
||||
|
||||
|
||||
|
||||
|
||||
def draw_track_predictions(img: cv2.Mat, track: Track, color_index: int, camera:Camera, convert_points: Optional[Callable], anim_position=.8):
|
||||
"""
|
||||
anim_position: 0-1
|
||||
"""
|
||||
if not track.predictions:
|
||||
return
|
||||
|
||||
current_point = track.get_projected_history(camera=camera)[-1]
|
||||
|
||||
if convert_points:
|
||||
current_point = convert_points([current_point])[0]
|
||||
opacity = 1-min(1, max(0, inv_lerp(0.8, 1, anim_position))) # fade out
|
||||
slide_t = min(1, max(0, inv_lerp(0, 0.8, anim_position))) # slide_position
|
||||
|
||||
|
||||
# if convert_points:
|
||||
# current_point = convert_points([current_point])[0]
|
||||
|
||||
for pred_i, pred in enumerate(track.predictions):
|
||||
pred_coords = pred #cv2.perspectiveTransform(np.array([pred]), inv_H)[0].tolist()
|
||||
# line_points = np.concatenate(([current_point], pred_coords)) # 'current point' is amoving target
|
||||
line_points = pred_coords
|
||||
# print(pred_coords, current_point, line_points)
|
||||
line_points = transition_path_points(line_points, slide_t)
|
||||
if convert_points:
|
||||
pred_coords = convert_points(pred_coords)
|
||||
line_points = convert_points(line_points)
|
||||
# color = (128,0,128) if pred_i else (128,128,0)
|
||||
|
||||
color = bgr_colors[color_index % len(bgr_colors)]
|
||||
color = tuple([int(c*opacity) for c in color])
|
||||
|
||||
|
||||
for ci in range(0, len(pred_coords)):
|
||||
if ci == 0:
|
||||
# TODO)) prev point
|
||||
# continue
|
||||
start = [int(p) for p in current_point]
|
||||
# start = [int(p) for p in coords[-1]]
|
||||
# start = [0,0]?
|
||||
# print(start)
|
||||
else:
|
||||
start = [int(p) for p in pred_coords[ci-1]]
|
||||
end = [int(p) for p in pred_coords[ci]]
|
||||
cv2.line(img, start, end, color, 1, lineType=cv2.LINE_AA)
|
||||
for start, end in zip(line_points[:-1], line_points[1:]):
|
||||
# for ci in range(0, len(pred_coords)):
|
||||
# if ci == 0:
|
||||
# # TODO)) prev point
|
||||
# # continue
|
||||
# start = [int(p) for p in current_point]
|
||||
# # start = [int(p) for p in coords[-1]]
|
||||
# # start = [0,0]?
|
||||
# # print(start)
|
||||
# else:
|
||||
# start = [int(p) for p in pred_coords[ci-1]]
|
||||
# end = [int(p) for p in pred_coords[ci]]
|
||||
# print(np.rint(start),np.rint(end).tolist())
|
||||
cv2.line(img, np.rint(start).astype(int), np.rint(end).astype(int), color, 1, lineType=cv2.LINE_AA)
|
||||
# cv2.circle(img, end, 2, color, 1, lineType=cv2.LINE_AA)
|
||||
|
||||
def draw_trackjectron_history(img: cv2.Mat, track: Track, color_index: int, convert_points: Optional[Callable]):
|
||||
|
|
|
@ -9,7 +9,7 @@ from multiprocessing import Event
|
|||
from pathlib import Path
|
||||
import pickle
|
||||
import time
|
||||
from typing import Optional, List
|
||||
from typing import Dict, Optional, List
|
||||
import jsonlines
|
||||
import numpy as np
|
||||
import torch
|
||||
|
@ -61,7 +61,7 @@ RCNN_SCALE = .4 # seems to have no impact on detections in the corners
|
|||
|
||||
def _yolov8_track(frame: Frame, model: YOLO, **kwargs) -> List[Detection]:
|
||||
|
||||
results: List[YOLOResult] = list(model.track(frame.img, persist=True, tracker="custom_bytetrack.yaml", verbose=False, **kwargs))
|
||||
results: List[YOLOResult] = list(model.track(frame.img, persist=True, tracker="custom_bytetrack.yaml", verbose=False, conf=0.00001, **kwargs))
|
||||
if results[0].boxes is None or results[0].boxes.id is None:
|
||||
# work around https://github.com/ultralytics/ultralytics/issues/5968
|
||||
return []
|
||||
|
@ -98,6 +98,10 @@ class TrackFilter:
|
|||
|
||||
def apply(self, tracks: List[Track], camera: Camera):
|
||||
return [t for t in tracks if self.filter(t, camera)]
|
||||
|
||||
def apply_to_dict(self, tracks: Dict[str, Track], camera: Camera):
|
||||
tracks = self.apply(tracks.values(), camera)
|
||||
return {t.track_id: t for t in tracks}
|
||||
|
||||
class FinalDisplacementFilter(TrackFilter):
|
||||
def __init__(self, min_displacement):
|
||||
|
@ -444,7 +448,7 @@ class Tracker:
|
|||
|
||||
def track_frame(self, frame: Frame):
|
||||
if self.config.detector == DETECTOR_YOLOv8:
|
||||
detections: List[Detection] = _yolov8_track(frame, self.model, classes=[0], imgsz=[1152, 640])
|
||||
detections: List[Detection] = _yolov8_track(frame, self.model, classes=[0, 15, 16], imgsz=[1152, 640])
|
||||
else :
|
||||
detections: List[Detection] = self._resnet_track(frame, scale = RCNN_SCALE)
|
||||
|
||||
|
@ -458,7 +462,7 @@ class Tracker:
|
|||
return detections
|
||||
|
||||
|
||||
def track(self, is_running: Event):
|
||||
def track(self, is_running: Event, timer_counter: int = 0):
|
||||
"""
|
||||
Live tracking of frames coming in over zmq
|
||||
"""
|
||||
|
@ -500,7 +504,11 @@ class Tracker:
|
|||
end_time = None
|
||||
tracker_dt = None
|
||||
w_time = None
|
||||
displacement_filter = FinalDisplacementFilter(.2)
|
||||
while self.is_running.is_set():
|
||||
|
||||
with timer_counter.get_lock():
|
||||
timer_counter.value += 1
|
||||
# this waiting for target_dt causes frame loss. E.g. with target_dt at .1, it
|
||||
# skips exactly 1 frame on a 10 fps video (which, it obviously should not do)
|
||||
# so for now, timing should move to emitter
|
||||
|
@ -566,6 +574,9 @@ class Tracker:
|
|||
# }
|
||||
active_track_ids = [d.track_id for d in detections]
|
||||
active_tracks = {t.track_id: t.get_with_interpolated_history() for t in self.tracks.values() if t.track_id in active_track_ids}
|
||||
active_tracks = displacement_filter.apply_to_dict(active_tracks, frame.camera)# a filter to remove just detecting static objects
|
||||
|
||||
|
||||
# active_tracks = {t.track_id: t for t in self.tracks.values() if t.track_id in active_track_ids}
|
||||
# active_tracks = {t.track_id: t for t in self.tracks.values() if t.track_id in active_track_ids}
|
||||
# logger.info(f"{trajectories}")
|
||||
|
@ -579,7 +590,7 @@ class Tracker:
|
|||
frame = self.smoother.smooth_frame_tracks(frame)
|
||||
|
||||
# print(f"send to {self.trajectory_socket}, {self.config.zmq_trajectory_addr}")
|
||||
self.trajectory_socket.send_pyobj(frame)
|
||||
self.trajectory_socket.send_pyobj(frame.without_img()) # ditch image for faster passthrough
|
||||
|
||||
end_time = time.time()
|
||||
tracker_dt = end_time - start_time
|
||||
|
@ -665,9 +676,9 @@ class Tracker:
|
|||
return [([d[0], d[1], d[2]-d[0], d[3]-d[1]], d[4], d[5]) for d in detections]
|
||||
|
||||
|
||||
def run_tracker(config: Namespace, is_running: Event):
|
||||
def run_tracker(config: Namespace, is_running: Event, timer_counter):
|
||||
router = Tracker(config)
|
||||
router.track(is_running)
|
||||
router.track(is_running, timer_counter)
|
||||
|
||||
|
||||
|
||||
|
|
Loading…
Reference in a new issue