Compare commits

..

4 commits

Author SHA1 Message Date
Ruben van de Ven
e837617e39 WIP refactor to keep predictions longer 2024-10-02 12:05:53 +02:00
Ruben van de Ven
c3263e7448 Split tracker in renderer 2024-09-30 15:42:06 +02:00
Ruben van de Ven
6e98138cc2 formatting of latency 2024-09-30 15:13:55 +02:00
Ruben van de Ven
6cb55bba47 full screen option 2024-09-30 14:59:03 +02:00
2 changed files with 158 additions and 48 deletions

View file

@ -255,6 +255,9 @@ render_parser.add_argument("--render-file",
render_parser.add_argument("--render-window", render_parser.add_argument("--render-window",
help="Render a previewing to a window", help="Render a previewing to a window",
action='store_true') action='store_true')
render_parser.add_argument("--full-screen",
help="Set Window full screen",
action='store_true')
render_parser.add_argument("--render-url", render_parser.add_argument("--render-url",
help="""Stream renderer on given URL. Two easy approaches: help="""Stream renderer on given URL. Two easy approaches:

View file

@ -18,6 +18,7 @@ import tempfile
from pathlib import Path from pathlib import Path
import shutil import shutil
import math import math
from collections import deque
from pyglet import shapes from pyglet import shapes
from PIL import Image from PIL import Image
@ -58,15 +59,21 @@ def relativePolarToPoint(origin, r, angle) -> tuple[float, float]:
class DrawnTrack: class DrawnTrack:
def __init__(self, track_id, track: Track, renderer: Renderer, H): def __init__(self, track_id, track: Track, renderer: Renderer, H):
# self.created_at = time.time()
self.update_at = self.created_at = time.time()
self.track_id = track_id self.track_id = track_id
self.renderer = renderer self.renderer = renderer
self.set_track(track, H)
self.drawn_positions = [] self.drawn_positions = []
self.drawn_predictions = [] self.drawn_predictions = []
self.predictions: deque[DrawnPrediction] = deque(maxlen=20) # TODO; make configurable
self.shapes: list[pyglet.shapes.Line] = [] self.shapes: list[pyglet.shapes.Line] = []
self.pred_shapes: list[list[pyglet.shapes.Line]] = []
self.set_track(track, H)
self.set_prediction(track)
def set_track(self, track: Track, H): def set_track(self, track: Track, H):
self.update_at = time.time()
self.track = track self.track = track
self.H = H self.H = H
self.coords = [d.get_foot_coords() for d in track.history] self.coords = [d.get_foot_coords() for d in track.history]
@ -74,11 +81,17 @@ class DrawnTrack:
# perhaps only do in constructor: # perhaps only do in constructor:
self.inv_H = np.linalg.pinv(self.H) self.inv_H = np.linalg.pinv(self.H)
def set_prediction(self, track: Track):
# TODO: turn into add_prediction
pred_coords = [] pred_coords = []
if not track.predictions:
return
for pred_i, pred in enumerate(track.predictions): for pred_i, pred in enumerate(track.predictions):
pred_coords.append(cv2.perspectiveTransform(np.array([pred]), self.inv_H)[0].tolist()) pred_coords.append(cv2.perspectiveTransform(np.array([pred]), self.inv_H)[0].tolist())
self.pred_coords = pred_coords # self.pred_coords = pred_coords
self.predictions.append(DrawnPrediction(self, pred_coords))
# color = (128,0,128) if pred_i else (128, # color = (128,0,128) if pred_i else (128,
@ -94,23 +107,26 @@ class DrawnTrack:
if len(self.coords) > len(self.drawn_positions): if len(self.coords) > len(self.drawn_positions):
self.drawn_positions.extend(self.coords[len(self.drawn_positions):]) self.drawn_positions.extend(self.coords[len(self.drawn_positions):])
for a, drawn_prediction in enumerate(self.drawn_predictions): # Superseded by individual drawnprediction elements
for i, pos in enumerate(drawn_prediction): # for a, drawn_prediction in enumerate(self.drawn_predictions):
# TODO: this should be done in polar space starting from origin (i.e. self.drawn_posision[-1]) # for i, pos in enumerate(drawn_prediction):
decay = max(3, (18/i) if i else 10) # points further away move with more delay # # TODO: this should be done in polar space starting from origin (i.e. self.drawn_posision[-1])
decay = 6 # decay = max(3, (18/i) if i else 10) # points further away move with more delay
origin = self.drawn_positions[-1] # decay = 6
drawn_r, drawn_angle = relativePointToPolar( origin, drawn_prediction[i]) # origin = self.drawn_positions[-1]
pred_r, pred_angle = relativePointToPolar(origin, self.pred_coords[a][i]) # drawn_r, drawn_angle = relativePointToPolar( origin, drawn_prediction[i])
r = exponentialDecay(drawn_r, pred_r, decay, dt) # pred_r, pred_angle = relativePointToPolar(origin, self.pred_coords[a][i])
angle = exponentialDecay(drawn_angle, pred_angle, decay, dt) # r = exponentialDecay(drawn_r, pred_r, decay, dt)
x, y = relativePolarToPoint(origin, r, angle) # angle = exponentialDecay(drawn_angle, pred_angle, decay, dt)
self.drawn_predictions[a][i] = int(x), int(y) # x, y = relativePolarToPoint(origin, r, angle)
# self.drawn_predictions[i][0] = int(exponentialDecay(self.drawn_predictions[i][0], self.pred_coords[i][0], decay, dt)) # self.drawn_predictions[a][i] = int(x), int(y)
# self.drawn_predictions[i][1] = int(exponentialDecay(self.drawn_predictions[i][1], self.pred_coords[i][1], decay, dt)) # # self.drawn_predictions[i][0] = int(exponentialDecay(self.drawn_predictions[i][0], self.pred_coords[i][0], decay, dt))
# # self.drawn_predictions[i][1] = int(exponentialDecay(self.drawn_predictions[i][1], self.pred_coords[i][1], decay, dt))
# if len(self.pred_coords) > len(self.drawn_predictions):
# self.drawn_predictions.extend(self.pred_coords[len(self.drawn_predictions):])
if len(self.pred_coords) > len(self.drawn_predictions):
self.drawn_predictions.extend(self.pred_coords[len(self.drawn_predictions):])
# for a, drawn_prediction in self.drawn_predictions: # for a, drawn_prediction in self.drawn_predictions:
# if len(self.pred_coords) > len(self.drawn_predictions): # if len(self.pred_coords) > len(self.drawn_predictions):
# self.drawn_predictions.extend(self.pred_coords[len(self.drawn_predictions):]) # self.drawn_predictions.extend(self.pred_coords[len(self.drawn_predictions):])
@ -187,6 +203,55 @@ class DrawnTrack:
else: else:
target_opacity = (1 - ((ci - half) / half)) * 180 target_opacity = (1 - ((ci - half) / half)) * 180
line.opacity = int(exponentialDecay(line.opacity, target_opacity, decay, dt)) line.opacity = int(exponentialDecay(line.opacity, target_opacity, decay, dt))
class DrawnPrediction:
def __init__(self, drawn_track: DrawnTrack, coords: list[list] = []):
self.created_at = time.time()
# self.renderer = renderer
self.drawn_track = drawn_track
self.coords = coords
self.color = colorset[self.drawn_track.track_id % len(colorset)]
self.pred_shapes: list[list[pyglet.shapes.Line]] = []
# coords is a list of predictions
for a, coords in enumerate(self.coords):
prediction_shapes = []
for ci in range(0, len(coords)):
if ci == 0:
x, y = [int(p) for p in self.drawn_track.coords[-1]]
else:
x, y = [int(p) for p in coords[ci-1]]
x2, y2 = [int(p) for p in coords[ci]]
# flip in window:
y, y2 = self.drawn_track.renderer.window.height - y, self.drawn_track.renderer.window.height - y2
line = self.drawn_track.renderer.gradientLine(x, y, x2, y2, 3, self.color, self.color, batch=self.drawn_track.renderer.batch_anim)
line.opacity = 5
prediction_shapes.append(line)
self.pred_shapes.append(prediction_shapes)
def update_opacities(self, dt: float):
"""
Update the opacties of the drawn line, by only using the dt provided by the renderer
Done using exponential decal, with a different decay value per item
"""
for a, coords in enumerate(self.coords):
for ci in range(0, len(coords)):
line = self.pred_shapes[a][ci-1]
# Positions of prediction no longer update
# line.x, line.y = x, y
# line.x2, line.y2 = x2, y2
# line.color = color
decay = (16/ci) if ci else 16
half = len(coords) / 2
if ci < half:
target_opacity = 180
else:
target_opacity = (1 - ((ci - half) / half)) * 180
line.opacity = int(exponentialDecay(line.opacity, target_opacity, decay, dt))
class FrameWriter: class FrameWriter:
@ -239,6 +304,11 @@ class Renderer:
self.prediction_sock.setsockopt(zmq.SUBSCRIBE, b'') self.prediction_sock.setsockopt(zmq.SUBSCRIBE, b'')
self.prediction_sock.connect(config.zmq_prediction_addr if not self.config.bypass_prediction else config.zmq_trajectory_addr) self.prediction_sock.connect(config.zmq_prediction_addr if not self.config.bypass_prediction else config.zmq_trajectory_addr)
self.tracker_sock = context.socket(zmq.SUB)
self.tracker_sock.setsockopt(zmq.CONFLATE, 1) # only keep latest frame. NB. make sure this comes BEFORE connect, otherwise it's ignored!!
self.tracker_sock.setsockopt(zmq.SUBSCRIBE, b'')
self.tracker_sock.connect(config.zmq_trajectory_addr)
self.frame_sock = context.socket(zmq.SUB) self.frame_sock = context.socket(zmq.SUB)
self.frame_sock.setsockopt(zmq.CONFLATE, 1) # only keep latest frame. NB. make sure this comes BEFORE connect, otherwise it's ignored!! self.frame_sock.setsockopt(zmq.CONFLATE, 1) # only keep latest frame. NB. make sure this comes BEFORE connect, otherwise it's ignored!!
self.frame_sock.setsockopt(zmq.SUBSCRIBE, b'') self.frame_sock.setsockopt(zmq.SUBSCRIBE, b'')
@ -256,7 +326,7 @@ class Renderer:
self.out_writer = self.start_writer() if self.config.render_file else None self.out_writer = self.start_writer() if self.config.render_file else None
self.streaming_process = self.start_streaming() if self.config.render_url else None self.streaming_process = self.start_streaming() if self.config.render_url else None
if self.config.render_window: if self.config.render_window:
pass pass
# cv2.namedWindow("frame", cv2.WND_PROP_FULLSCREEN) # cv2.namedWindow("frame", cv2.WND_PROP_FULLSCREEN)
# cv2.setWindowProperty("frame",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN) # cv2.setWindowProperty("frame",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
@ -265,7 +335,7 @@ class Renderer:
config = pyglet.gl.Config(sample_buffers=1, samples=4) config = pyglet.gl.Config(sample_buffers=1, samples=4)
# , fullscreen=self.config.render_window # , fullscreen=self.config.render_window
self.window = pyglet.window.Window(width=self.frame_size[0], height=self.frame_size[1], config=config) self.window = pyglet.window.Window(width=self.frame_size[0], height=self.frame_size[1], config=config, fullscreen=self.config.full_screen)
self.window.set_handler('on_draw', self.on_draw) self.window.set_handler('on_draw', self.on_draw)
self.window.set_handler('on_refresh', self.on_refresh) self.window.set_handler('on_refresh', self.on_refresh)
self.window.set_handler('on_close', self.on_close) self.window.set_handler('on_close', self.on_close)
@ -282,6 +352,7 @@ class Renderer:
self.first_time: float|None = None self.first_time: float|None = None
self.frame: Frame|None= None self.frame: Frame|None= None
self.tracker_frame: Frame|None = None
self.prediction_frame: Frame|None = None self.prediction_frame: Frame|None = None
@ -341,7 +412,9 @@ class Renderer:
def init_labels(self): def init_labels(self):
base_color = (255,)*4 base_color = (255,)*4
info_color = (255,255,0, 255) color_predictor = (255,255,0, 255)
color_info = (255,0, 255, 255)
color_tracker = (0,255, 255, 255)
options = [] options = []
for option in ['prediction_horizon','num_samples','full_dist','gmm_mode','z_mode', 'model_dir']: for option in ['prediction_horizon','num_samples','full_dist','gmm_mode','z_mode', 'model_dir']:
@ -350,13 +423,15 @@ class Renderer:
self.labels = { self.labels = {
'waiting': pyglet.text.Label("Waiting for prediction"), 'waiting': pyglet.text.Label("Waiting for prediction"),
'frame_idx': pyglet.text.Label("", x=20, y=self.window.height - 17, color=base_color, batch=self.batch_overlay), 'frame_idx': pyglet.text.Label("", x=20, y=self.window.height - 17, color=base_color, batch=self.batch_overlay),
'frame_time': pyglet.text.Label("t", x=120, y=self.window.height - 17, color=base_color, batch=self.batch_overlay), 'tracker_idx': pyglet.text.Label("", x=90, y=self.window.height - 17, color=color_tracker, batch=self.batch_overlay),
'pred_idx': pyglet.text.Label("", x=90, y=self.window.height - 17, color=info_color, batch=self.batch_overlay), 'pred_idx': pyglet.text.Label("", x=110, y=self.window.height - 17, color=color_predictor, batch=self.batch_overlay),
'pred_time': pyglet.text.Label("", x=200, y=self.window.height - 17, color=info_color, batch=self.batch_overlay), 'frame_time': pyglet.text.Label("t", x=140, y=self.window.height - 17, color=base_color, batch=self.batch_overlay),
'track_len': pyglet.text.Label("", x=500, y=self.window.height - 17, color=base_color, batch=self.batch_overlay), 'frame_latency': pyglet.text.Label("", x=235, y=self.window.height - 17, color=color_info, batch=self.batch_overlay),
'tracker_time': pyglet.text.Label("", x=300, y=self.window.height - 17, color=color_tracker, batch=self.batch_overlay),
'pred_time': pyglet.text.Label("", x=360, y=self.window.height - 17, color=color_predictor, batch=self.batch_overlay),
'track_len': pyglet.text.Label("", x=800, y=self.window.height - 17, color=color_tracker, batch=self.batch_overlay),
'options1': pyglet.text.Label(options.pop(-1), x=20, y=30, color=base_color, batch=self.batch_overlay), 'options1': pyglet.text.Label(options.pop(-1), x=20, y=30, color=base_color, batch=self.batch_overlay),
'options2': pyglet.text.Label(" | ".join(options), x=20, y=10, color=base_color, batch=self.batch_overlay), 'options2': pyglet.text.Label(" | ".join(options), x=20, y=10, color=base_color, batch=self.batch_overlay),
} }
def refresh_labels(self, dt: float): def refresh_labels(self, dt: float):
@ -364,12 +439,18 @@ class Renderer:
if self.frame: if self.frame:
self.labels['frame_idx'].text = f"{self.frame.index:06d}" self.labels['frame_idx'].text = f"{self.frame.index:06d}"
self.labels['frame_time'].text = f"{self.frame.time - self.first_time:.3f}s" self.labels['frame_time'].text = f"{self.frame.time - self.first_time: >10.2f}s"
self.labels['frame_latency'].text = f"{self.frame.time - time.time():.2f}s"
if self.tracker_frame:
self.labels['tracker_idx'].text = f"{self.tracker_frame.index - self.frame.index}"
self.labels['tracker_time'].text = f"{self.tracker_frame.time - time.time():.3f}s"
self.labels['track_len'].text = f"{len(self.tracker_frame.tracks)} tracks"
if self.prediction_frame: if self.prediction_frame:
self.labels['pred_idx'].text = f"{self.prediction_frame.index - self.frame.index}" self.labels['pred_idx'].text = f"{self.prediction_frame.index - self.frame.index}"
self.labels['pred_time'].text = f"{self.prediction_frame.time - time.time():.3f}s" self.labels['pred_time'].text = f"{self.prediction_frame.time - time.time():.3f}s"
self.labels['track_len'].text = f"{len(self.prediction_frame.tracks)} tracks" # self.labels['track_len'].text = f"{len(self.prediction_frame.tracks)} tracks"
# cv2.putText(img, f"{frame.index:06d}", (20,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1) # cv2.putText(img, f"{frame.index:06d}", (20,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
@ -400,8 +481,9 @@ class Renderer:
if not self.first_time: if not self.first_time:
self.first_time = self.frame.time self.first_time = self.frame.time
img = cv2.GaussianBlur(self.frame.img, (15, 15), 0) img = cv2.GaussianBlur(self.frame.img, (15, 15), 0)
img = cv2.flip(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), 0) img = cv2.cvtColor(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY), cv2.COLOR_GRAY2RGB)
img = pyglet.image.ImageData(self.frame_size[0], self.frame_size[1], 'RGB', img.tobytes()) channels = 3 # unfortunately, pyglet seems to draw single channel as Red only
img = pyglet.image.ImageData(self.frame_size[0], self.frame_size[1], 'RGB', img.tobytes(), pitch=self.frame_size[0] * -1 * channels)
# don't draw in batch, so that it is the background # don't draw in batch, so that it is the background
self.video_sprite = pyglet.sprite.Sprite(img=img, batch=self.batch_bg) self.video_sprite = pyglet.sprite.Sprite(img=img, batch=self.batch_bg)
self.video_sprite.opacity = 100 self.video_sprite.opacity = 100
@ -411,27 +493,50 @@ class Renderer:
pass pass
try: try:
self.prediction_frame: Frame = self.prediction_sock.recv_pyobj(zmq.NOBLOCK) self.prediction_frame: Frame = self.prediction_sock.recv_pyobj(zmq.NOBLOCK)
self.update_predictions()
except zmq.ZMQError as e:
pass
try:
self.tracker_frame: Frame = self.tracker_sock.recv_pyobj(zmq.NOBLOCK)
self.update_tracks() self.update_tracks()
except zmq.ZMQError as e: except zmq.ZMQError as e:
pass pass
def update_tracks(self): def update_tracks(self):
"""Updates the track objects and shapes. Called after setting `prediction_frame` """Updates the track objects and shapes. Called after setting `prediction_frame`
""" """
# clean up
# for track_id in list(self.drawn_tracks.keys()):
# if track_id not in self.prediction_frame.tracks.keys():
# # TODO fade out
# del self.drawn_tracks[track_id]
if self.tracker_frame:
for track_id, track in self.tracker_frame.tracks.items():
if track_id not in self.drawn_tracks:
self.drawn_tracks[track_id] = DrawnTrack(track_id, track, self, self.tracker_frame.H)
else:
self.drawn_tracks[track_id].set_track(track, self.tracker_frame.H)
# clean up # clean up
for track_id in list(self.drawn_tracks.keys()): for track_id in list(self.drawn_tracks.keys()):
if track_id not in self.prediction_frame.tracks.keys(): # TODO make delay configurable
if self.drawn_tracks[track_id].update_at < time.time() - 5:
# TODO fade out # TODO fade out
del self.drawn_tracks[track_id] del self.drawn_tracks[track_id]
def update_predictions(self):
if self.prediction_frame:
for track_id, track in self.prediction_frame.tracks.items():
if track_id not in self.drawn_tracks:
self.drawn_tracks[track_id] = DrawnTrack(track_id, track, self, self.prediction_frame.H)
logger.warning("Prediction for uninitialised frame. This should not happen? (maybe huge delay in prediction?)")
else:
self.drawn_tracks[track_id].set_prediction(track)
for track_id, track in self.prediction_frame.tracks.items():
if track_id not in self.drawn_tracks:
self.drawn_tracks[track_id] = DrawnTrack(track_id, track, self, self.prediction_frame.H)
else:
self.drawn_tracks[track_id].set_track(track, self.prediction_frame.H)
def on_key_press(self, symbol, modifiers): def on_key_press(self, symbol, modifiers):
print('A key was pressed, use f to hide') print('A key was pressed, use f to hide')
@ -470,9 +575,10 @@ class Renderer:
for shape in track.shapes: for shape in track.shapes:
shape.draw() # for some reason the batches don't work shape.draw() # for some reason the batches don't work
for track in self.drawn_tracks.values(): for track in self.drawn_tracks.values():
for shapes in track.pred_shapes: for prediction in track.predictions:
for shape in shapes: for shapes in prediction.pred_shapes:
shape.draw() for shape in shapes:
shape.draw()
# self.batch_anim.draw() # self.batch_anim.draw()
self.batch_overlay.draw() self.batch_overlay.draw()
@ -548,6 +654,7 @@ class Renderer:
def run(self): def run(self):
frame = None frame = None
prediction_frame = None prediction_frame = None
tracker_frame = None
i=0 i=0
first_time = None first_time = None
@ -624,7 +731,7 @@ colorset = [(0, 0, 0),
(255, 255, 0) (255, 255, 0)
] ]
# Deprecated
def decorate_frame(frame: Frame, prediction_frame: Frame, first_time: float, config: Namespace) -> np.array: def decorate_frame(frame: Frame, prediction_frame: Frame, first_time: float, config: Namespace) -> np.array:
# TODO: replace opencv with QPainter to support alpha? https://doc.qt.io/qtforpython-5/PySide2/QtGui/QPainter.html#PySide2.QtGui.PySide2.QtGui.QPainter.drawImage # TODO: replace opencv with QPainter to support alpha? https://doc.qt.io/qtforpython-5/PySide2/QtGui/QPainter.html#PySide2.QtGui.PySide2.QtGui.QPainter.drawImage
# or https://github.com/pygobject/pycairo?tab=readme-ov-file # or https://github.com/pygobject/pycairo?tab=readme-ov-file
@ -633,12 +740,12 @@ def decorate_frame(frame: Frame, prediction_frame: Frame, first_time: float, con
# or https://api.arcade.academy/en/stable/index.html (supports gradient color in line -- "Arcade is built on top of Pyglet and OpenGL.") # or https://api.arcade.academy/en/stable/index.html (supports gradient color in line -- "Arcade is built on top of Pyglet and OpenGL.")
frame.img frame.img
overlay = np.zeros(frame.img.shape, np.uint8) # # Fill image with red color(set each pixel to red)
# Fill image with red color(set each pixel to red) # overlay = np.zeros(frame.img.shape, np.uint8)
overlay[:] = (130, 0, 75) # overlay[:] = (130, 0, 75)
img = cv2.addWeighted(frame.img, .4, overlay, .6, 0) # img = cv2.addWeighted(frame.img, .4, overlay, .6, 0)
# img = frame.img.copy() img = frame.img.copy()
# all not working: # all not working:
# if i == 1: # if i == 1: