Refactoring for NN, scenarios, laser tests

This commit is contained in:
Ruben van de Ven 2025-04-03 21:03:04 +02:00
parent eee73d675a
commit fe9efc163f
10 changed files with 285 additions and 41 deletions

View file

@ -29,6 +29,8 @@ dependencies = [
"pyusb>=1.3.1,<2", "pyusb>=1.3.1,<2",
"ipywidgets>=8.1.5,<9", "ipywidgets>=8.1.5,<9",
"foucault", "foucault",
"python-statemachine>=2.5.0",
"facenet-pytorch>=2.5.3",
] ]
[project.scripts] [project.scripts]

View file

@ -203,6 +203,12 @@ class Track:
predictions: Optional[list] = None predictions: Optional[list] = None
fps: int = 12 # TODO)) convert this to camera? That way, incorporates H and dist, alternatively, each track is as a whole attached to a space fps: int = 12 # TODO)) convert this to camera? That way, incorporates H and dist, alternatively, each track is as a whole attached to a space
source: Optional[int] = None # to keep track of processed tracks source: Optional[int] = None # to keep track of processed tracks
lost: bool = False
created_at: Optional[float] = None
def __post_init__(self):
if not self.created_at:
self.created_at = time.perf_counter()
def get_projected_history(self, H: Optional[cv2.Mat] = None, camera: Optional[Camera]= None) -> np.array: def get_projected_history(self, H: Optional[cv2.Mat] = None, camera: Optional[Camera]= None) -> np.array:
foot_coordinates = [d.get_foot_coords() for d in self.history] foot_coordinates = [d.get_foot_coords() for d in self.history]
@ -260,6 +266,7 @@ class Track:
def get_sampled(self, step_size = 1, offset=0): def get_sampled(self, step_size = 1, offset=0):
"""Get copy of track, with every n-th frame"""
if not self.is_complete(): if not self.is_complete():
t = self.get_with_interpolated_history() t = self.get_with_interpolated_history()
else: else:
@ -272,6 +279,37 @@ class Track:
t.predictions, t.predictions,
t.fps/step_size) t.fps/step_size)
def get_simplified_history(self, distance: float, camera: Camera) -> list[tuple[float, float]]:
# TODO)) Simplify to get a point every n-th meter
# usefull for both predicting and rendering with laser
# raise RuntimeError("Not Implemented Yet")
if len(self.history) < 1:
return []
path = self.get_projected_history(H=None, camera=camera)
new_path: List[dict] = [path[0]]
lengths = np.sqrt(np.sum(np.diff(path, axis=0)**2, axis=1))
cum_lengths = np.cumsum(lengths)
pos = distance
for a, b, l_a, l_b in zip(path[:-1], path[1:], cum_lengths[:-1], cum_lengths[1:]):
# check if segment has our next point (pos)
# because running sequentially, this is if point b
# is lower then our target position
if l_b <= pos:
continue
relative_t = inv_lerp(l_a, l_b, pos)
x = lerp(a[0], b[0], relative_t)
y = lerp(a[1], b[1], relative_t)
new_path.append([x,y])
pos += distance
return new_path
def get_binned(self, bin_size, camera: Camera, bin_start=True): def get_binned(self, bin_size, camera: Camera, bin_start=True):
""" """
For an experiment: what if we predict using only concrete positions, by mapping For an experiment: what if we predict using only concrete positions, by mapping
@ -334,17 +372,21 @@ class Track:
# todo Offsets to points:[ history for in points] # todo Offsets to points:[ history for in points]
return new_history return new_history
def to_dataframe(self, camera: Camera) -> pd.DataFrame:
def to_trajectron_node(self, camera: Camera, env: Environment) -> Node:
positions = self.get_projected_history(None, camera) positions = self.get_projected_history(None, camera)
velocity = np.gradient(positions, 1/self.fps, axis=0) velocity = np.gradient(positions, 1/self.fps, axis=0)
acceleration = np.gradient(velocity, 1/self.fps, axis=0) acceleration = np.gradient(velocity, 1/self.fps, axis=0)
new_first_idx = self.history[0].frame_nr # # we can calculate heading based on the velocity components
# heading = (np.arctan2(velocity[:,1], velocity[:,0]) * 180 / np.pi) % 360
# # and derive it to get the rate of change of the heading
# d_heading = np.gradient(heading, 1/self.fps, axis=0)
data_columns = pd.MultiIndex.from_product([['position', 'velocity', 'acceleration'], ['x', 'y']]) data_columns = pd.MultiIndex.from_product([['position', 'velocity', 'acceleration'], ['x', 'y']])
# data_columns = data_columns.append(pd.MultiIndex.from_tuples([('heading', '°'), ('heading', 'd°')]))
# vx = derivative_of(x, scene.dt) # vx = derivative_of(x, scene.dt)
# vy = derivative_of(y, scene.dt) # vy = derivative_of(y, scene.dt)
# ax = derivative_of(vx, scene.dt) # ax = derivative_of(vx, scene.dt)
@ -356,10 +398,16 @@ class Track:
('velocity', 'x'): velocity[:,0], ('velocity', 'x'): velocity[:,0],
('velocity', 'y'): velocity[:,1], ('velocity', 'y'): velocity[:,1],
('acceleration', 'x'): acceleration[:,0], ('acceleration', 'x'): acceleration[:,0],
('acceleration', 'y'): acceleration[:,1] ('acceleration', 'y'): acceleration[:,1],
# ('heading', '°'): heading,
# ('heading', 'd°'): d_heading,
} }
node_data = pd.DataFrame(data_dict, columns=data_columns) return pd.DataFrame(data_dict, columns=data_columns)
def to_trajectron_node(self, camera: Camera, env: Environment) -> Node:
node_data = self.to_dataframe(camera)
new_first_idx = self.history[0].frame_nr
return Node(node_type=env.NodeType.PEDESTRIAN, node_id=self.track_id, data=node_data, first_timestep=new_first_idx) return Node(node_type=env.NodeType.PEDESTRIAN, node_id=self.track_id, data=node_data, first_timestep=new_first_idx)

View file

@ -199,6 +199,10 @@ connection_parser.add_argument('--zmq-trajectory-addr',
help='Manually specity communication addr for the trajectory messages', help='Manually specity communication addr for the trajectory messages',
type=str, type=str,
default="ipc:///tmp/feeds_traj") default="ipc:///tmp/feeds_traj")
connection_parser.add_argument('--zmq-face-addr',
help='Manually specity communication addr for the face detector messages',
type=str,
default="ipc:///tmp/feeds_faces")
connection_parser.add_argument('--zmq-camera-stream-addr', connection_parser.add_argument('--zmq-camera-stream-addr',
help='Manually specity communication addr for the camera stream messages', help='Manually specity communication addr for the camera stream messages',

View file

@ -24,6 +24,7 @@ from typing import Dict, Iterable, Optional
from pyglet import shapes from pyglet import shapes
from PIL import Image from PIL import Image
from trap.counter import CounterListerner
from trap.frame_emitter import DetectionState, Frame, Track, Camera from trap.frame_emitter import DetectionState, Frame, Track, Camera
from trap.preview_renderer import FrameWriter from trap.preview_renderer import FrameWriter
from trap.tools import draw_track, draw_track_predictions, draw_track_projected, draw_trackjectron_history, to_point from trap.tools import draw_track, draw_track_predictions, draw_track_projected, draw_trackjectron_history, to_point
@ -38,6 +39,8 @@ class CvRenderer:
self.config = config self.config = config
self.is_running = is_running self.is_running = is_running
self.counter_listener = CounterListerner()
context = zmq.Context() context = zmq.Context()
self.prediction_sock = context.socket(zmq.SUB) self.prediction_sock = context.socket(zmq.SUB)
self.prediction_sock.setsockopt(zmq.CONFLATE, 1) # only keep latest frame. NB. make sure this comes BEFORE connect, otherwise it's ignored!! self.prediction_sock.setsockopt(zmq.CONFLATE, 1) # only keep latest frame. NB. make sure this comes BEFORE connect, otherwise it's ignored!!
@ -392,7 +395,8 @@ class CvRenderer:
if first_time is None: if first_time is None:
first_time = frame.time first_time = frame.time
img = decorate_frame(frame, tracker_frame, prediction_frame, first_time, self.config, self.tracks, self.predictions, self.config.render_clusters) # img = frame.img
img = decorate_frame(frame, tracker_frame, prediction_frame, first_time, self.config, self.tracks, self.predictions, self.config.render_clusters, self.counter_listener)
logger.debug(f"write frame {frame.time - first_time:.3f}s") logger.debug(f"write frame {frame.time - first_time:.3f}s")
if self.out_writer: if self.out_writer:
@ -456,7 +460,7 @@ def get_animation_position(track: Track, current_frame: Frame):
# Deprecated # Deprecated
def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame, first_time: float, config: Namespace, tracks: Dict[str, Track], predictions: Dict[str, Track], as_clusters = True) -> np.array: def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame, first_time: float, config: Namespace, tracks: Dict[str, Track], predictions: Dict[str, Track], as_clusters = True, counter_listener: CounterListerner|None = None) -> np.array:
# TODO: replace opencv with QPainter to support alpha? https://doc.qt.io/qtforpython-5/PySide2/QtGui/QPainter.html#PySide2.QtGui.PySide2.QtGui.QPainter.drawImage # TODO: replace opencv with QPainter to support alpha? https://doc.qt.io/qtforpython-5/PySide2/QtGui/QPainter.html#PySide2.QtGui.PySide2.QtGui.QPainter.drawImage
# or https://github.com/pygobject/pycairo?tab=readme-ov-file # or https://github.com/pygobject/pycairo?tab=readme-ov-file
# or https://pyglet.readthedocs.io/en/latest/programming_guide/shapes.html # or https://pyglet.readthedocs.io/en/latest/programming_guide/shapes.html
@ -550,6 +554,10 @@ def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame,
cv2.putText(img, options.pop(-1), (20,img.shape[0]-30), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1) cv2.putText(img, options.pop(-1), (20,img.shape[0]-30), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
cv2.putText(img, " | ".join(options), (20,img.shape[0]-10), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1) cv2.putText(img, " | ".join(options), (20,img.shape[0]-10), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
# for i, (k, v) in enumerate(counter_listener.get_latest().items()):
# cv2.putText(img, f"{k} {v.value()}", (20,img.shape[0]-(40*i)-40), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
return img return img

View file

@ -24,10 +24,12 @@ from typing import Dict, Iterable, Optional
from pyglet import shapes from pyglet import shapes
from PIL import Image from PIL import Image
from trap.scenarios import TrackScenario
from trap.counter import CounterSender
from trap.frame_emitter import DetectionState, Frame, Track, Camera from trap.frame_emitter import DetectionState, Frame, Track, Camera
# from trap.helios import HeliosDAC, HeliosPoint # from trap.helios import HeliosDAC, HeliosPoint
from trap.preview_renderer import FrameWriter from trap.preview_renderer import PROJECTION_MAP, DrawnTrack, FrameWriter
from trap.tools import draw_track, draw_track_predictions, draw_track_projected, draw_trackjectron_history, to_point, track_predictions_to_lines from trap.tools import draw_track, draw_track_predictions, draw_track_projected, draw_trackjectron_history, drawntrack_predictions_to_lines, to_point, track_predictions_to_lines
from trap.utils import convert_world_points_to_img_points, convert_world_space_to_img_space, lerp from trap.utils import convert_world_points_to_img_points, convert_world_space_to_img_space, lerp
@ -228,7 +230,9 @@ class LaserRenderer:
self.prediction_frame: Frame|None = None self.prediction_frame: Frame|None = None
self.tracks: Dict[str, Track] = {} self.tracks: Dict[str, Track] = {}
self.scenarios: Dict[str, TrackScenario] = {}
self.predictions: Dict[str, Track] = {} self.predictions: Dict[str, Track] = {}
self.drawn_tracks: Dict[str, DrawnTrack] = {}
self.helios = ctypes.cdll.LoadLibrary("./trap/helios_dac/libHeliosDacAPI.so") self.helios = ctypes.cdll.LoadLibrary("./trap/helios_dac/libHeliosDacAPI.so")
numDevices = self.helios.OpenDevices() numDevices = self.helios.OpenDevices()
@ -310,9 +314,8 @@ class LaserRenderer:
# for j, point in enumerate(pointlist_test): # for j, point in enumerate(pointlist_test):
# frame[j] = CHeliosPoint(point.x, point.y, 0,40,0,0 if point.blank else 255) # frame[j] = CHeliosPoint(point.x, point.y, 0,40,0,0 if point.blank else 255)
counter = CounterSender()
print(f"RENDER DAC\n\n\n") print(f"RENDER DAC\n\n\n")
last_laser_point = None last_laser_point = None
@ -334,6 +337,18 @@ class LaserRenderer:
for track_id, track in prediction_frame.tracks.items(): for track_id, track in prediction_frame.tracks.items():
prediction_id = f"{track_id}-{track.history[-1].frame_nr}" prediction_id = f"{track_id}-{track.history[-1].frame_nr}"
self.predictions[prediction_id] = track self.predictions[prediction_id] = track
# TODO)) also for tracks:
if track_id not in self.drawn_tracks:
self.drawn_tracks[track_id] = DrawnTrack(track_id, track, self, prediction_frame.camera.H, PROJECTION_MAP, prediction_frame.camera)
elif self.drawn_tracks[track_id].update_predictions_at < (time.time() - .5): # TODO)) only update predictions every n frames. configure
# self.drawn_tracks[track_id].pred_track
self.drawn_tracks[track_id].set_predictions(track)
if track_id in self.scenarios:
self.scenarios[track_id].set_prediction(track)
# self.drawn_predictions[track_id] = track
except zmq.ZMQError as e: except zmq.ZMQError as e:
logger.debug(f'reuse prediction') logger.debug(f'reuse prediction')
@ -342,6 +357,11 @@ class LaserRenderer:
for track_id, track in tracker_frame.tracks.items(): for track_id, track in tracker_frame.tracks.items():
self.tracks[track_id] = track self.tracks[track_id] = track
if not track_id in self.scenarios:
self.scenarios[track_id] = TrackScenario(track)
else:
self.scenarios[track_id].set_track(track)
# self.scenarios[track_id].receive_track(track)
except zmq.ZMQError as e: except zmq.ZMQError as e:
logger.debug(f'reuse tracks') logger.debug(f'reuse tracks')
@ -355,13 +375,20 @@ class LaserRenderer:
# print('-------') # print('-------')
paths = render_frame_to_pathlist( tracker_frame, prediction_frame, first_time, self.config, self.tracks, self.predictions, self.config.render_clusters) paths = render_frame_to_pathlist( tracker_frame, prediction_frame, self.drawn_tracks, first_time, self.config, self.tracks, self.predictions, self.config.render_clusters)
print(f"Paths: {len(paths)} ... points {sum([len(p.points) for p in paths])}") counter.set('paths', len(paths))
counter.set('points', sum([len(p.points) for p in paths]))
if self.prediction_frame:
counter.set('pred_render_latency', time.time() - self.prediction_frame.time)
if self.tracker_frame:
counter.set('track_render_latency', time.time() - self.tracker_frame.time)
# print(f"Paths: {len(paths)} ... points {sum([len(p.points) for p in paths])}")
laserframe = LaserFrame(paths) laserframe = LaserFrame(paths)
laserframe_cropped = laserframe.as_cropped_to_projector() laserframe_cropped = laserframe.as_cropped_to_projector()
counter.set('laser.removed', laserframe_cropped.point_count() - laserframe.point_count())
if laserframe.point_count() > laserframe_cropped.point_count(): if laserframe.point_count() > laserframe_cropped.point_count():
logger.warning("Removed laser points out of frame!") # logger.warning("Removed laser points out of frame!")
# print(laserframe.point_count(), laserframe_cropped.point_count())
laserframe = laserframe_cropped laserframe = laserframe_cropped
# pointlist=pointlist_test # pointlist=pointlist_test
# print([(p.x, p.y) for p in pointlist]) # print([(p.x, p.y) for p in pointlist])
@ -408,6 +435,12 @@ class LaserRenderer:
if get_animation_position(track, tracker_frame) == 1: if get_animation_position(track, tracker_frame) == 1:
self.predictions.pop(prediction_id) self.predictions.pop(prediction_id)
for track_id in list(self.drawn_tracks.keys()):
# TODO make delay configurable
if self.drawn_tracks[track_id].update_at < time.time() - 5:
# TODO fade out
del self.drawn_tracks[track_id]
logger.info('Stopping') logger.info('Stopping')
self.helios.CloseDevices() self.helios.CloseDevices()
@ -442,7 +475,7 @@ def get_animation_position(track: Track, current_frame: Frame) -> float:
# track.history[-1].frame_nr < (current_frame.index - current_frame.camera.fps * 3) # track.history[-1].frame_nr < (current_frame.index - current_frame.camera.fps * 3)
def circle_points(cx, cy, r, c: Color): def circle_points(cx, cy, r, c: Color):
r = 100 # r = r
steps = 30 steps = 30
pointlist: list[LaserPoint] = [] pointlist: list[LaserPoint] = []
for i in range(steps): for i in range(steps):
@ -464,7 +497,7 @@ def world_points_to_laser_points(points):
return cv2.perspectiveTransform(np.array([points]), laser_H) return cv2.perspectiveTransform(np.array([points]), laser_H)
# Deprecated # Deprecated
def render_frame_to_pathlist(tracker_frame: Optional[Frame], prediction_frame: Optional[Frame], first_time: Optional[float], config: Namespace, tracks: Dict[str, Track], predictions: Dict[str, Track], as_clusters = True): def render_frame_to_pathlist(tracker_frame: Optional[Frame], prediction_frame: Optional[Frame], drawn_tracks: Optional[Dict[str, DrawnTrack]], first_time: Optional[float], config: Namespace, tracks: Dict[str, Track], predictions: Dict[str, Track], as_clusters = True):
# TODO: replace opencv with QPainter to support alpha? https://doc.qt.io/qtforpython-5/PySide2/QtGui/QPainter.html#PySide2.QtGui.PySide2.QtGui.QPainter.drawImage # TODO: replace opencv with QPainter to support alpha? https://doc.qt.io/qtforpython-5/PySide2/QtGui/QPainter.html#PySide2.QtGui.PySide2.QtGui.QPainter.drawImage
# or https://github.com/pygobject/pycairo?tab=readme-ov-file # or https://github.com/pygobject/pycairo?tab=readme-ov-file
# or https://pyglet.readthedocs.io/en/latest/programming_guide/shapes.html # or https://pyglet.readthedocs.io/en/latest/programming_guide/shapes.html
@ -486,10 +519,10 @@ def render_frame_to_pathlist(tracker_frame: Optional[Frame], prediction_frame: O
# cv2.imwrite(str(self.config.output_dir / "orig.png"), warpedFrame) # cv2.imwrite(str(self.config.output_dir / "orig.png"), warpedFrame)
# cv2.rectangle(img, (0,0), (img.shape[1],25), (0,0,0), -1) # cv2.rectangle(img, (0,0), (img.shape[1],25), (0,0,0), -1)
intensity = 40 # range 0-255 intensity = 39 # range 0-255
test_r = 100 test_r = 100
base_c = (0,0, intensity) base_c = (0,0, intensity)
base_c = (0,intensity, intensity) # base_c = (0,intensity, intensity)
track_c = (intensity,0,0) track_c = (intensity,0,0)
pred_c = (0,intensity,0) pred_c = (0,intensity,0)
@ -520,7 +553,6 @@ def render_frame_to_pathlist(tracker_frame: Optional[Frame], prediction_frame: O
# print(not tracker_frame, not prediction_frame) # print(not tracker_frame, not prediction_frame)
if not tracker_frame: if not tracker_frame:
c = (intensity, intensity, 0) #dac.palette[3] # yellow
paths.append( paths.append(
LaserPath(circle_points(0xFFF/2+2*test_r, 0xFFF/2, test_r, track_c)) LaserPath(circle_points(0xFFF/2+2*test_r, 0xFFF/2, test_r, track_c))
) )
@ -533,36 +565,45 @@ def render_frame_to_pathlist(tracker_frame: Optional[Frame], prediction_frame: O
for track_id, track in tracks.items(): for track_id, track in tracks.items():
inv_H = np.linalg.pinv(tracker_frame.H) inv_H = np.linalg.pinv(tracker_frame.H)
# track = track.get_sampled(4) # track = track.get_sampled(4)
history = track.get_projected_history(camera=config.camera) projected_history = track.get_projected_history(camera=config.camera)
history = world_points_to_laser_points(history)[0] history_for_laser = world_points_to_laser_points(projected_history)[0]
# point_color = bgr_colors[color_index % len(bgr_colors)] # point_color = bgr_colors[color_index % len(bgr_colors)]
points = np.rint(history.reshape((-1,1,2))).astype(np.int32) points = np.rint(history_for_laser.reshape((-1,1,2))).astype(np.int32)
# print('point len',len(points)) # print('point len',len(points))
laserpoints = [] laserpoints = []
for i, point in enumerate(points): for i, point in enumerate(points):
laserpoints.append(LaserPoint(point[0][0], point[0][1], track_c, blank=False)) laserpoints.append(LaserPoint(point[0][0], point[0][1], track_c, blank=False))
path = LaserPath(laserpoints) path = LaserPath(laserpoints)
paths.append(path) paths.append(path)
paths.append(
LaserPath(circle_points(history_for_laser[-1][0], history_for_laser[-1][1], 20, track_c))
)
# draw_track_projected(img, track, int(track_id), config.camera, convert_world_points_to_img_points) # draw_track_projected(img, track, int(track_id), config.camera, convert_world_points_to_img_points)
if not prediction_frame: if not prediction_frame:
c = (intensity,0,intensity) # dac.palette[7] # magenta
paths.append( paths.append(
LaserPath(circle_points(0xFFF/2+4*test_r, 0xFFF/2, test_r, pred_c)) LaserPath(circle_points(0xFFF/2+4*test_r, 0xFFF/2, test_r, pred_c))
) )
# cv2.putText(img, f"Waiting for prediction...", (500,17), cv2.FONT_HERSHEY_PLAIN, 1, (255,255,0), 1) # cv2.putText(img, f"Waiting for prediction...", (500,17), cv2.FONT_HERSHEY_PLAIN, 1, (255,255,0), 1)
# continue # continue
elif True: # elif True:
pass # pass
else: elif drawn_tracks:
for track_id, track in predictions.items(): inv_H = np.linalg.pinv(prediction_frame.H)
for track_id, drawn_track in drawn_tracks.items():
drawn_track.update_drawn_positions(dt=None, no_shapes=True)
inv_H = np.linalg.pinv(prediction_frame.H)
# For debugging: # For debugging:
# draw_trackjectron_history(img, track, int(track.track_id), convert_world_points_to_img_points) # draw_trackjectron_history(img, track, int(track.track_id), convert_world_points_to_img_points)
anim_position = 1 # TODO)) calculate without video frame: get_animation_position(track, tracker_frame) anim_position = 1 # TODO)) calculate without video frame: get_animation_position(track, tracker_frame)
lines = track_predictions_to_lines(track, config.camera, anim_position) lines = drawntrack_predictions_to_lines(drawn_track, config.camera, anim_position)
# if lines:
# lines.extend(get_prediction_text(drawn_track))
if not lines: if not lines:
continue continue
@ -575,7 +616,7 @@ def render_frame_to_pathlist(tracker_frame: Optional[Frame], prediction_frame: O
# print('prediction line') # print('prediction line')
line = world_points_to_laser_points(line)[0] line = world_points_to_laser_points(line)[0]
# line = convert_world_points_to_img_points(line) # line = convert_world_points_to_img_points(line)
line = np.rint(line).astype(int) line = np.rint(line).astype(np.int32)
laserpoints = [] laserpoints = []
for i, point in enumerate(line): for i, point in enumerate(line):
laserpoints.append(LaserPoint(point[0], point[1], pred_c, blank=False)) laserpoints.append(LaserPoint(point[0], point[1], pred_c, blank=False))
@ -588,7 +629,65 @@ def render_frame_to_pathlist(tracker_frame: Optional[Frame], prediction_frame: O
# print(len(paths)) # print(len(paths))
return paths return paths
def get_prediction_text(drawn_track: DrawnTrack)-> list[list[float, float]]:
position_index = 20
if not drawn_track.drawn_predictions:
return []
if len(drawn_track.drawn_predictions[0]) < position_index:
logger.warning("prediction to short!")
return []
# draw only for first prediction
draw_pos = drawn_track.drawn_predictions[0][position_index-1]
current_pos = drawn_track.drawn_positions[-1]
angle = np.arctan2(draw_pos[0]-current_pos[0], draw_pos[1]-current_pos[1]) + np.pi
# print('angle', angle)
text_paths = []
with open("your_future_points_test.json", 'r') as fp:
lines = json.load(fp)
for i, line in enumerate(lines):
if i != 0:
continue
points = np.array(line)
avg_x = np.average(points[:,0])
avg_y = np.average(points[:,1])
minx, maxx = np.min(points[:,0]), np.max(points[:,0])
miny, maxy = np.min(points[:,1]), np.max(points[:,1])
sx = maxx-minx
sy = maxy-miny
points[:,0] -= avg_x
points[:,1] -= avg_y - i/2
points /= sx # scale to 1
points @= rotateMatrix(angle)
points += draw_pos
text_paths.append(points)
return text_paths
def rotateMatrix(a):
return np.array([[np.cos(a), -np.sin(a)], [np.sin(a), np.cos(a)]])
def run_laser_renderer(config: Namespace, is_running: BaseEvent, timer_counter): def run_laser_renderer(config: Namespace, is_running: BaseEvent, timer_counter):
renderer = LaserRenderer(config, is_running) renderer = LaserRenderer(config, is_running)
renderer.run(timer_counter) renderer.run(timer_counter)

View file

@ -7,7 +7,9 @@ import signal
import sys import sys
import time import time
from trap.config import parser from trap.config import parser
from trap.counter import CounterListerner
from trap.cv_renderer import run_cv_renderer from trap.cv_renderer import run_cv_renderer
from trap.face_detector import run_detector
from trap.frame_emitter import run_frame_emitter from trap.frame_emitter import run_frame_emitter
from trap.laser_renderer import run_laser_renderer from trap.laser_renderer import run_laser_renderer
from trap.prediction_server import run_prediction_server from trap.prediction_server import run_prediction_server
@ -88,12 +90,14 @@ def start():
timers = TimerCollection() timers = TimerCollection()
timer_fe = timers.new('frame_emitter') timer_fe = timers.new('frame_emitter')
timer_tracker = timers.new('tracker') timer_tracker = timers.new('tracker')
timer_faces = timers.new('faces')
# instantiating process with arguments # instantiating process with arguments
procs = [ procs = [
# ExceptionHandlingProcess(target=run_ws_forwarder, kwargs={'config': args, 'is_running': isRunning}, name='forwarder'), # ExceptionHandlingProcess(target=run_ws_forwarder, kwargs={'config': args, 'is_running': isRunning}, name='forwarder'),
ExceptionHandlingProcess(target=run_frame_emitter, kwargs={'config': args, 'is_running': isRunning, 'timer_counter': timer_fe.iterations}, name='frame_emitter'), ExceptionHandlingProcess(target=run_frame_emitter, kwargs={'config': args, 'is_running': isRunning, 'timer_counter': timer_fe.iterations}, name='frame_emitter'),
ExceptionHandlingProcess(target=run_tracker, kwargs={'config': args, 'is_running': isRunning, 'timer_counter': timer_tracker.iterations}, name='tracker'), ExceptionHandlingProcess(target=run_tracker, kwargs={'config': args, 'is_running': isRunning, 'timer_counter': timer_tracker.iterations}, name='tracker'),
ExceptionHandlingProcess(target=run_detector, kwargs={'config': args, 'is_running': isRunning, 'timer_counter': timer_faces.iterations}, name='detector'),
] ]
# if args.render_file or args.render_url or args.render_window: # if args.render_file or args.render_url or args.render_window:
@ -119,10 +123,14 @@ def start():
) )
def timer_process(timers: TimerCollection, is_running: Event): def timer_process(timers: TimerCollection, is_running: Event):
counter_listener = CounterListerner()
while is_running.is_set(): while is_running.is_set():
time.sleep(1) time.sleep(1)
timers.snapshot() timers.snapshot()
print(timers.to_string()) counter_listener.snapshot()
print(timers.to_string(), counter_listener.to_string())
procs.append( procs.append(
ExceptionHandlingProcess(target=timer_process, kwargs={'is_running':isRunning, 'timers': timers}, name='timer'), ExceptionHandlingProcess(target=timer_process, kwargs={'is_running':isRunning, 'timers': timers}, name='timer'),

View file

@ -67,7 +67,8 @@ class DrawnTrack:
def __init__(self, track_id, track: Track, renderer: PreviewRenderer, H, draw_projection = PROJECTION_IMG, camera: Optional[Camera] = None): def __init__(self, track_id, track: Track, renderer: PreviewRenderer, H, draw_projection = PROJECTION_IMG, camera: Optional[Camera] = None):
# self.created_at = time.time() # self.created_at = time.time()
self.draw_projection = draw_projection self.draw_projection = draw_projection
self.update_at = self.created_at = time.time() self.update_at = self.created_at = self.update_predictions_at = time.time()
self.last_update_t = time.perf_counter()
self.track_id = track_id self.track_id = track_id
self.renderer = renderer self.renderer = renderer
self.camera = camera self.camera = camera
@ -93,6 +94,7 @@ class DrawnTrack:
self.inv_H = np.linalg.pinv(self.H) self.inv_H = np.linalg.pinv(self.H)
def set_predictions(self, track: Track, H = None): def set_predictions(self, track: Track, H = None):
self.update_predictions_at = time.time()
pred_coords = [] pred_coords = []
pred_history_coords = [] pred_history_coords = []
@ -112,7 +114,7 @@ class DrawnTrack:
# color = (128,0,128) if pred_i else (128, # color = (128,0,128) if pred_i else (128,
def update_drawn_positions(self, dt) -> List: def update_drawn_positions(self, dt: float|None, no_shapes=False) -> List:
''' '''
use dt to lerp the drawn positions in the direction of current prediction use dt to lerp the drawn positions in the direction of current prediction
''' '''
@ -122,6 +124,11 @@ class DrawnTrack:
"""quick wrapper to toggle int'ing""" """quick wrapper to toggle int'ing"""
return v return v
# return int(v) # return int(v)
if dt is None:
t = time.perf_counter()
dt = t - self.last_update_t
self.last_update_t = t
# 1. track history # 1. track history
for i, pos in enumerate(self.drawn_positions): for i, pos in enumerate(self.drawn_positions):
@ -170,7 +177,8 @@ class DrawnTrack:
# finally: update shapes from coordinates # finally: update shapes from coordinates
self.update_shapes(dt) if not no_shapes: # to be used when not rendering to pyglet (e.g. laser renderer)
self.update_shapes(dt)
return self.drawn_positions return self.drawn_positions
def update_shapes(self, dt): def update_shapes(self, dt):

View file

@ -12,6 +12,7 @@ import numpy as np
import pandas as pd import pandas as pd
import shapely import shapely
from shapely.ops import split from shapely.ops import split
from trap.preview_renderer import DrawnTrack
import trap.tracker import trap.tracker
from trap.config import parser from trap.config import parser
from trap.frame_emitter import Camera, Detection, DetectionState, video_src_from_config, Frame from trap.frame_emitter import Camera, Detection, DetectionState, video_src_from_config, Frame
@ -343,6 +344,27 @@ def track_predictions_to_lines(track: Track, camera:Camera, anim_position=.8):
# break # TODO: only one # break # TODO: only one
return lines return lines
def drawntrack_predictions_to_lines(drawn_track: DrawnTrack, camera:Camera, anim_position=.8):
if not drawn_track.drawn_predictions:
return
# current_point = drawn_track.pred_track.get_projected_history(camera=camera)[-1] # not guaranteed to be up to date
current_point = drawn_track.drawn_predictions[0][0]
# print(current_point)
slide_t = min(1, max(0, inv_lerp(0, 0.8, anim_position))) # slide_position
lines = []
for pred_i, pred in enumerate(drawn_track.drawn_predictions):
pred_coords = pred #cv2.perspectiveTransform(np.array([pred]), inv_H)[0].tolist()
# line_points = pred_coords
line_points = np.concatenate(([current_point], pred_coords)) # 'current point' is amoving target
# print(pred_coords, current_point, line_points)
line_points = transition_path_points(line_points, slide_t)
lines.append(line_points)
# print("prediction line", len(line_points))
# break # TODO: only one
return lines
def draw_track_predictions(img: cv2.Mat, track: Track, color_index: int, camera:Camera, convert_points: Optional[Callable], anim_position=.8, as_clusters=False): def draw_track_predictions(img: cv2.Mat, track: Track, color_index: int, camera:Camera, convert_points: Optional[Callable], anim_position=.8, as_clusters=False):
""" """
anim_position: 0-1 anim_position: 0-1

View file

@ -10,11 +10,12 @@ import multiprocessing
from pathlib import Path from pathlib import Path
import pickle import pickle
import time import time
from typing import Dict, Optional, List from typing import DefaultDict, Dict, Optional, List
import jsonlines import jsonlines
import numpy as np import numpy as np
import torch import torch
import torchvision import torchvision
import ultralytics
import zmq import zmq
import cv2 import cv2
@ -64,6 +65,7 @@ RCNN_SCALE = .4 # seems to have no impact on detections in the corners
def _yolov8_track(frame: Frame, model: YOLO, **kwargs) -> List[Detection]: def _yolov8_track(frame: Frame, model: YOLO, **kwargs) -> List[Detection]:
results: List[YOLOResult] = list(model.track(frame.img, persist=True, tracker="custom_bytetrack.yaml", verbose=False, conf=0.00001, **kwargs)) results: List[YOLOResult] = list(model.track(frame.img, persist=True, tracker="custom_bytetrack.yaml", verbose=False, conf=0.00001, **kwargs))
if results[0].boxes is None or results[0].boxes.id is None: if results[0].boxes is None or results[0].boxes.id is None:
# work around https://github.com/ultralytics/ultralytics/issues/5968 # work around https://github.com/ultralytics/ultralytics/issues/5968
return [] return []
@ -178,6 +180,9 @@ class TrackReader:
for track_id in self._tracks: for track_id in self._tracks:
yield self.get(track_id) yield self.get(track_id)
def track_ids(self):
return list(self._tracks.keys())
def read_tracks_json(path: Path, fps): def read_tracks_json(path: Path, fps):
""" """
Reader for tracks.json produced by TrainingDataWriter Reader for tracks.json produced by TrainingDataWriter
@ -374,8 +379,10 @@ class ByteTrackWrapper(TrackerWrapper):
detections = np.ndarray((0,0)) # needs to be 2-D detections = np.ndarray((0,0)) # needs to be 2-D
_ = self.tracker.update(detections) _ = self.tracker.update(detections)
removed_tracks = self.tracker.removed_stracks
active_tracks = [track for track in self.tracker.tracked_stracks if track.is_activated] active_tracks = [track for track in self.tracker.tracked_stracks if track.is_activated]
active_tracks = [track for track in active_tracks if track.start_frame < (self.tracker.frame_id - 5)] # TODO)) why was this in here:
# active_tracks = [track for track in active_tracks if track.start_frame < (self.tracker.frame_id - 5)]
return [Detection.from_bytetrack(track, frame_idx) for track in active_tracks] return [Detection.from_bytetrack(track, frame_idx) for track in active_tracks]
@ -389,7 +396,7 @@ class Tracker:
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# TODO: support removal # TODO: support removal
self.tracks = defaultdict(lambda: Track()) self.tracks: DefaultDict[str, Track] = defaultdict(lambda: Track())
logger.debug(f"Load tracker: {self.config.detector}") logger.debug(f"Load tracker: {self.config.detector}")
@ -579,7 +586,16 @@ class Tracker:
active_tracks = {t.track_id: t.get_with_interpolated_history() for t in self.tracks.values() if t.track_id in active_track_ids} active_tracks = {t.track_id: t.get_with_interpolated_history() for t in self.tracks.values() if t.track_id in active_track_ids}
active_tracks = displacement_filter.apply_to_dict(active_tracks, frame.camera)# a filter to remove just detecting static objects active_tracks = displacement_filter.apply_to_dict(active_tracks, frame.camera)# a filter to remove just detecting static objects
removable_tracks = []
for track_id, track in self.tracks.items():
if not len(track.history):
continue
detection: Detection = track.history[-1]
if detection.frame_nr < (frame.index - frame.camera.fps * 5):
removable_tracks.append(track_id)
for track_id in removable_tracks:
del self.tracks[track_id]
# active_tracks = {t.track_id: t for t in self.tracks.values() if t.track_id in active_track_ids} # active_tracks = {t.track_id: t for t in self.tracks.values() if t.track_id in active_track_ids}
# active_tracks = {t.track_id: t for t in self.tracks.values() if t.track_id in active_track_ids} # active_tracks = {t.track_id: t for t in self.tracks.values() if t.track_id in active_track_ids}
# logger.info(f"{trajectories}") # logger.info(f"{trajectories}")
@ -764,7 +780,7 @@ class Smoother:
frame.tracks = {t.track_id: t for t in new_tracks} frame.tracks = {t.track_id: t for t in new_tracks}
return frame return frame
def smooth_frame_predictions(self, frame) -> Frame: def smooth_frame_predictions(self, frame: Frame) -> Frame:
for track in frame.tracks.values(): for track in frame.tracks.values():
new_predictions = [] new_predictions = []

29
uv.lock
View file

@ -463,6 +463,22 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/7b/8f/c4d9bafc34ad7ad5d8dc16dd1347ee0e507a52c3adb6bfa8887e1c6a26ba/executing-2.2.0-py2.py3-none-any.whl", hash = "sha256:11387150cad388d62750327a53d3339fad4888b39a6fe233c3afbb54ecffd3aa", size = 26702 }, { url = "https://files.pythonhosted.org/packages/7b/8f/c4d9bafc34ad7ad5d8dc16dd1347ee0e507a52c3adb6bfa8887e1c6a26ba/executing-2.2.0-py2.py3-none-any.whl", hash = "sha256:11387150cad388d62750327a53d3339fad4888b39a6fe233c3afbb54ecffd3aa", size = 26702 },
] ]
[[package]]
name = "facenet-pytorch"
version = "2.5.3"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "numpy" },
{ name = "pillow" },
{ name = "requests" },
{ name = "torchvision", version = "0.13.1", source = { registry = "https://pypi.org/simple" }, marker = "sys_platform != 'linux'" },
{ name = "torchvision", version = "0.13.1+cu113", source = { url = "https://download.pytorch.org/whl/cu113/torchvision-0.13.1%2Bcu113-cp310-cp310-linux_x86_64.whl" }, marker = "sys_platform == 'linux'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/80/cd/29d55c5d8675c7f8892c9b79288b633cd97b276d8fb24952f1e74fb7bfcc/facenet-pytorch-2.5.3.tar.gz", hash = "sha256:98cc5b42a48f837c023eb92f2a571cd4ac6a46687c5e71b9e99b491087273e2b", size = 1885286 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/40/c1/0a15058f478c8f0f29cacb6e35530b97ead780be99737e62810e71dc477e/facenet_pytorch-2.5.3-py3-none-any.whl", hash = "sha256:b8002c3ee7a3af471c433cbda1fdcab9ae2d46a4aac43435ba08259c45ffc884", size = 1882784 },
]
[[package]] [[package]]
name = "fastapi" name = "fastapi"
version = "0.115.12" version = "0.115.12"
@ -1851,6 +1867,15 @@ asyncio-client = [
{ name = "aiohttp" }, { name = "aiohttp" },
] ]
[[package]]
name = "python-statemachine"
version = "2.5.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/45/91/4f05f3931d1e9b1df71b17dc08c43feddf2bed7dbf13f95323df2cc8e340/python_statemachine-2.5.0.tar.gz", hash = "sha256:ae88cd22e47930b92b983a2176e61d811e571b69897be2568ec812c2885fb93a", size = 403718 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/bf/2d/1c95ebe84df60d630f8e855d1df2c66368805444ac167e9b50f29eabe917/python_statemachine-2.5.0-py3-none-any.whl", hash = "sha256:0ed53846802c17037fcb2a92323f4bc0c833290fa9d17a3587c50886c1541e62", size = 50415 },
]
[[package]] [[package]]
name = "pytz" name = "pytz"
version = "2025.2" version = "2025.2"
@ -2517,6 +2542,7 @@ dependencies = [
{ name = "baumer-neoapi" }, { name = "baumer-neoapi" },
{ name = "bytetracker" }, { name = "bytetracker" },
{ name = "deep-sort-realtime" }, { name = "deep-sort-realtime" },
{ name = "facenet-pytorch" },
{ name = "ffmpeg-python" }, { name = "ffmpeg-python" },
{ name = "foucault" }, { name = "foucault" },
{ name = "gdown" }, { name = "gdown" },
@ -2526,6 +2552,7 @@ dependencies = [
{ name = "pandas-helper-calc" }, { name = "pandas-helper-calc" },
{ name = "pyglet" }, { name = "pyglet" },
{ name = "pyglet-cornerpin" }, { name = "pyglet-cornerpin" },
{ name = "python-statemachine" },
{ name = "pyusb" }, { name = "pyusb" },
{ name = "qrcode" }, { name = "qrcode" },
{ name = "setproctitle" }, { name = "setproctitle" },
@ -2546,6 +2573,7 @@ requires-dist = [
{ name = "baumer-neoapi", path = "../../Downloads/Baumer_neoAPI_1.4.1_lin_x86_64_python/wheel/baumer_neoapi-1.4.1-cp34.cp35.cp36.cp37.cp38.cp39.cp310.cp311.cp312-none-linux_x86_64.whl" }, { name = "baumer-neoapi", path = "../../Downloads/Baumer_neoAPI_1.4.1_lin_x86_64_python/wheel/baumer_neoapi-1.4.1-cp34.cp35.cp36.cp37.cp38.cp39.cp310.cp311.cp312-none-linux_x86_64.whl" },
{ name = "bytetracker", git = "https://github.com/rubenvandeven/bytetrack-pip" }, { name = "bytetracker", git = "https://github.com/rubenvandeven/bytetrack-pip" },
{ name = "deep-sort-realtime", specifier = ">=1.3.2,<2" }, { name = "deep-sort-realtime", specifier = ">=1.3.2,<2" },
{ name = "facenet-pytorch", specifier = ">=2.5.3" },
{ name = "ffmpeg-python", specifier = ">=0.2.0,<0.3" }, { name = "ffmpeg-python", specifier = ">=0.2.0,<0.3" },
{ name = "foucault", git = "https://git.rubenvandeven.com/r/conductofconduct" }, { name = "foucault", git = "https://git.rubenvandeven.com/r/conductofconduct" },
{ name = "gdown", specifier = ">=4.7.1,<5" }, { name = "gdown", specifier = ">=4.7.1,<5" },
@ -2555,6 +2583,7 @@ requires-dist = [
{ name = "pandas-helper-calc", git = "https://github.com/scls19fr/pandas-helper-calc" }, { name = "pandas-helper-calc", git = "https://github.com/scls19fr/pandas-helper-calc" },
{ name = "pyglet", specifier = ">=2.0.15,<3" }, { name = "pyglet", specifier = ">=2.0.15,<3" },
{ name = "pyglet-cornerpin", specifier = ">=0.3.0,<0.4" }, { name = "pyglet-cornerpin", specifier = ">=0.3.0,<0.4" },
{ name = "python-statemachine", specifier = ">=2.5.0" },
{ name = "pyusb", specifier = ">=1.3.1,<2" }, { name = "pyusb", specifier = ">=1.3.1,<2" },
{ name = "qrcode", specifier = "~=8.0" }, { name = "qrcode", specifier = "~=8.0" },
{ name = "setproctitle", specifier = ">=1.3.3,<2" }, { name = "setproctitle", specifier = ">=1.3.3,<2" },