slowly fade predicted tracks

This commit is contained in:
Ruben van de Ven 2024-12-17 11:10:31 +01:00
parent 6b12ddf08a
commit 5ceeda05d7
2 changed files with 44 additions and 9 deletions

View file

@ -18,7 +18,7 @@ import tempfile
from pathlib import Path from pathlib import Path
import shutil import shutil
import math import math
from typing import Iterable, Optional from typing import Dict, Iterable, Optional
from pyglet import shapes from pyglet import shapes
@ -73,6 +73,9 @@ class CvRenderer:
self.tracker_frame: Frame|None = None self.tracker_frame: Frame|None = None
self.prediction_frame: Frame|None = None self.prediction_frame: Frame|None = None
self.tracks: Dict[str, Track] = {}
self.predictions: Dict[str, Track] = {}
# self.init_shapes() # self.init_shapes()
@ -367,18 +370,24 @@ class CvRenderer:
try: try:
prediction_frame: Frame = self.prediction_sock.recv_pyobj(zmq.NOBLOCK) prediction_frame: Frame = self.prediction_sock.recv_pyobj(zmq.NOBLOCK)
for track_id, track in prediction_frame.tracks.items():
prediction_id = f"{track_id}-{track.history[-1].frame_nr}"
self.predictions[prediction_id] = track
except zmq.ZMQError as e: except zmq.ZMQError as e:
logger.debug(f'reuse prediction') logger.debug(f'reuse prediction')
try: try:
tracker_frame: Frame = self.tracker_sock.recv_pyobj(zmq.NOBLOCK) tracker_frame: Frame = self.tracker_sock.recv_pyobj(zmq.NOBLOCK)
for track_id, track in tracker_frame.tracks.items():
self.tracks[track_id] = track
except zmq.ZMQError as e: except zmq.ZMQError as e:
logger.debug(f'reuse tracks') logger.debug(f'reuse tracks')
if first_time is None: if first_time is None:
first_time = frame.time first_time = frame.time
img = decorate_frame(frame, tracker_frame, prediction_frame, first_time, self.config) img = decorate_frame(frame, tracker_frame, prediction_frame, first_time, self.config, self.tracks, self.predictions)
img_path = (self.config.output_dir / f"{i:05d}.png").resolve() img_path = (self.config.output_dir / f"{i:05d}.png").resolve()
@ -390,6 +399,16 @@ class CvRenderer:
if self.config.render_window: if self.config.render_window:
cv2.imshow('frame',cv2.resize(img, (1920, 1080))) cv2.imshow('frame',cv2.resize(img, (1920, 1080)))
cv2.waitKey(1) cv2.waitKey(1)
# clear out old tracks & predictions:
for track_id, track in list(self.tracks.items()):
if get_opacity(track, frame) == 0:
self.tracks.pop(track_id)
for prediction_id, track in list(self.predictions.items()):
if get_opacity(track, frame) == 0:
self.predictions.pop(prediction_id)
logger.info('Stopping') logger.info('Stopping')
# if i>2: # if i>2:
@ -421,6 +440,13 @@ colorset = [
# (0,0,0), # (0,0,0),
# ] # ]
def get_opacity(track: Track, current_frame: Frame):
fade_duration = current_frame.camera.fps * 3
diff = current_frame.index - track.history[-1].frame_nr
return max(0, 1 - diff / fade_duration)
# track.history[-1].frame_nr < (current_frame.index - current_frame.camera.fps * 3)
# track.history[-1].frame_nr < (current_frame.index - current_frame.camera.fps * 3)
def convert_world_space_to_img_space(H: cv2.Mat): def convert_world_space_to_img_space(H: cv2.Mat):
"""Transform the given matrix so that it immediately converts """Transform the given matrix so that it immediately converts
the points to img space""" the points to img space"""
@ -435,8 +461,11 @@ def convert_world_points_to_img_points(points: Iterable):
return np.array(points) * 100 return np.array(points) * 100
return [[p[0]*100, p[1]*100] for p in points] return [[p[0]*100, p[1]*100] for p in points]
# Deprecated # Deprecated
def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame, first_time: float, config: Namespace) -> np.array: def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame, first_time: float, config: Namespace, tracks: Dict[str, Track], predictions: Dict[str, Track]) -> np.array:
# TODO: replace opencv with QPainter to support alpha? https://doc.qt.io/qtforpython-5/PySide2/QtGui/QPainter.html#PySide2.QtGui.PySide2.QtGui.QPainter.drawImage # TODO: replace opencv with QPainter to support alpha? https://doc.qt.io/qtforpython-5/PySide2/QtGui/QPainter.html#PySide2.QtGui.PySide2.QtGui.QPainter.drawImage
# or https://github.com/pygobject/pycairo?tab=readme-ov-file # or https://github.com/pygobject/pycairo?tab=readme-ov-file
# or https://pyglet.readthedocs.io/en/latest/programming_guide/shapes.html # or https://pyglet.readthedocs.io/en/latest/programming_guide/shapes.html
@ -450,7 +479,7 @@ def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame,
# Fill image with red color(set each pixel to red) # Fill image with red color(set each pixel to red)
overlay[:] = (0, 0, 0) overlay[:] = (0, 0, 0)
img = cv2.addWeighted(dst_img, .7, overlay, .3, 0) img = cv2.addWeighted(dst_img, .1, overlay, .3, 0)
# img = frame.img.copy() # img = frame.img.copy()
# all not working: # all not working:
@ -466,7 +495,7 @@ def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame,
if not tracker_frame: if not tracker_frame:
cv2.putText(img, f"and track", (650,17), cv2.FONT_HERSHEY_PLAIN, 1, (255,255,0), 1) cv2.putText(img, f"and track", (650,17), cv2.FONT_HERSHEY_PLAIN, 1, (255,255,0), 1)
else: else:
for track_id, track in tracker_frame.tracks.items(): for track_id, track in tracks.items():
inv_H = np.linalg.pinv(tracker_frame.H) inv_H = np.linalg.pinv(tracker_frame.H)
draw_track_projected(img, track, int(track_id), config.camera, convert_world_points_to_img_points) draw_track_projected(img, track, int(track_id), config.camera, convert_world_points_to_img_points)
@ -474,11 +503,12 @@ def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame,
cv2.putText(img, f"Waiting for prediction...", (500,17), cv2.FONT_HERSHEY_PLAIN, 1, (255,255,0), 1) cv2.putText(img, f"Waiting for prediction...", (500,17), cv2.FONT_HERSHEY_PLAIN, 1, (255,255,0), 1)
# continue # continue
else: else:
for track_id, track in prediction_frame.tracks.items(): for track_id, track in predictions.items():
inv_H = np.linalg.pinv(prediction_frame.H) inv_H = np.linalg.pinv(prediction_frame.H)
# draw_track(img, track, int(track_id)) # draw_track(img, track, int(track_id))
draw_trackjectron_history(img, track, int(track_id), convert_world_points_to_img_points) draw_trackjectron_history(img, track, int(track.track_id), convert_world_points_to_img_points)
draw_track_predictions(img, track, int(track_id)+1, config.camera, convert_world_points_to_img_points) opacity = get_opacity(track, frame)
draw_track_predictions(img, track, int(track.track_id)+1, config.camera, convert_world_points_to_img_points, opacity=opacity)
cv2.putText(img, f"{len(track.predictor_history) if track.predictor_history else 'none'}", to_point(track.history[0].get_foot_coords()), cv2.FONT_HERSHEY_COMPLEX, 1, (255,255,255), 1) cv2.putText(img, f"{len(track.predictor_history) if track.predictor_history else 'none'}", to_point(track.history[0].get_foot_coords()), cv2.FONT_HERSHEY_COMPLEX, 1, (255,255,255), 1)
base_color = (255,)*3 base_color = (255,)*3

View file

@ -185,7 +185,10 @@ def tracker_compare():
bar.set_description(f"[{frames.video_nr}/{len(frames.video_srcs)}] [{frames.frame_idx}/{frames.frame_count}] {str(frames.video_path)}") bar.set_description(f"[{frames.video_nr}/{len(frames.video_srcs)}] [{frames.frame_idx}/{frames.frame_count}] {str(frames.video_path)}")
def draw_track_predictions(img: cv2.Mat, track: Track, color_index: int, camera:Camera, convert_points: Optional[Callable]): def draw_track_predictions(img: cv2.Mat, track: Track, color_index: int, camera:Camera, convert_points: Optional[Callable], opacity=1):
"""
Opacity: 0-1
"""
if not track.predictions: if not track.predictions:
return return
@ -201,6 +204,8 @@ def draw_track_predictions(img: cv2.Mat, track: Track, color_index: int, camera:
# color = (128,0,128) if pred_i else (128,128,0) # color = (128,0,128) if pred_i else (128,128,0)
color = bgr_colors[color_index % len(bgr_colors)] color = bgr_colors[color_index % len(bgr_colors)]
color = tuple([int(c*opacity) for c in color])
for ci in range(0, len(pred_coords)): for ci in range(0, len(pred_coords)):
if ci == 0: if ci == 0: