From 373afb1d28e7eb235c8ea243ebff38a86fcd65dd Mon Sep 17 00:00:00 2001 From: Ruben van de Ven Date: Mon, 24 Jun 2024 20:36:14 +0200 Subject: [PATCH] Pyglet for rendering smooth lines and transitions --- poetry.lock | 13 +- pyproject.toml | 1 + trap/frame_emitter.py | 4 +- trap/renderer.py | 449 +++++++++++++++++++++++++++++++++++++----- 4 files changed, 419 insertions(+), 48 deletions(-) diff --git a/poetry.lock b/poetry.lock index 40bdcd6..2ef4d9d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2288,6 +2288,17 @@ files = [ {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, ] +[[package]] +name = "pyglet" +version = "2.0.15" +description = "pyglet is a cross-platform games and multimedia package." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pyglet-2.0.15-py3-none-any.whl", hash = "sha256:9e4cc16efc308106fd3a9ff8f04e7a6f4f6a807c6ac8a331375efbbac8be85af"}, + {file = "pyglet-2.0.15.tar.gz", hash = "sha256:42085567cece0c7f1c14e36eef799938cbf528cfbb0150c484b984f3ff1aa771"}, +] + [[package]] name = "pygments" version = "2.17.2" @@ -3517,4 +3528,4 @@ watchdog = ["watchdog (>=2.3)"] [metadata] lock-version = "2.0" python-versions = "^3.10,<3.12," -content-hash = "66f062f9db921cfa83e576288d09fd9b959780eb189d95765934ae9a6769f200" +content-hash = "5154a99d490755a68e51595424649b5269fcd17ef14094c6285f5de7f972f110" diff --git a/pyproject.toml b/pyproject.toml index e8e283b..e808a9b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -31,6 +31,7 @@ torchreid = "^0.2.5" gdown = "^4.7.1" pandas-helper-calc = {git = "https://github.com/scls19fr/pandas-helper-calc"} tsmoothie = "^1.0.5" +pyglet = "^2.0.15" [build-system] requires = ["poetry-core"] diff --git a/trap/frame_emitter.py b/trap/frame_emitter.py index 1523084..f07a147 100644 --- a/trap/frame_emitter.py +++ b/trap/frame_emitter.py @@ -44,7 +44,7 @@ class Detection: state: DetectionState frame_nr: int - def get_foot_coords(self): + def get_foot_coords(self) -> list[tuple[float, float]]: return [self.l + 0.5 * self.w, self.t+self.h] @classmethod @@ -95,6 +95,8 @@ class Track: coords = self.get_projected_history(H) return [{"x":c[0], "y":c[1]} for c in coords] + + @dataclass diff --git a/trap/renderer.py b/trap/renderer.py index cbfb710..57ee5fa 100644 --- a/trap/renderer.py +++ b/trap/renderer.py @@ -1,3 +1,6 @@ +# used for "Forward Referencing of type annotations" +from __future__ import annotations + import time import ffmpeg from argparse import Namespace @@ -8,16 +11,184 @@ from multiprocessing.synchronize import Event as BaseEvent import cv2 import numpy as np +import pyglet +import pyglet.event import zmq import tempfile from pathlib import Path import shutil +import math + +from pyglet import shapes +from PIL import Image + +from trap.frame_emitter import DetectionState, Frame, Track -from trap.frame_emitter import DetectionState, Frame logger = logging.getLogger("trap.renderer") +class FrameAnimation: + def __init__(self, frame: Frame): + self.start_time = time.time() + self.frame = frame + + @property + def t(self): + duration = .2 + return (time.time() - self.start_time) / duration + + @property + def done(self): + return (time.time() - self.start_time) > 5 + +def exponentialDecay(a, b, decay, dt): + """Exponential decay as alternative to Lerp + Introduced by Freya Holmér: https://www.youtube.com/watch?v=LSNQuFEDOyQ + """ + return b + (a-b) * math.exp(-decay * dt) + +def relativePointToPolar(origin, point) -> tuple[float, float]: + x, y = point[0] - origin[0], point[1] - origin[1] + return np.sqrt(x**2 + y**2), np.arctan2(y, x) + +def relativePolarToPoint(origin, r, angle) -> tuple[float, float]: + return r * np.cos(angle) + origin[0], r * np.sin(angle) + origin[1] + + +class DrawnTrack: + def __init__(self, track_id, track: Track, renderer: Renderer, H): + self.track_id = track_id + self.renderer = renderer + self.set_track(track, H) + self.drawn_positions = [] + self.drawn_predictions = [] + self.shapes: list[pyglet.shapes.Line] = [] + self.pred_shapes: list[list[pyglet.shapes.Line]] = [] + + def set_track(self, track: Track, H): + self.track = track + self.H = H + self.coords = [d.get_foot_coords() for d in track.history] + + # perhaps only do in constructor: + self.inv_H = np.linalg.pinv(self.H) + + pred_coords = [] + for pred_i, pred in enumerate(track.predictions): + pred_coords.append(cv2.perspectiveTransform(np.array([pred]), self.inv_H)[0].tolist()) + + self.pred_coords = pred_coords + # color = (128,0,128) if pred_i else (128, + + + def update_drawn_positions(self, dt) -> []: + ''' + use dt to lerp the drawn positions in the direction of current prediction + ''' + # TODO: make lerp, currently quick way to get results + for i, pos in enumerate(self.drawn_positions): + self.drawn_positions[i][0] = int(exponentialDecay(self.drawn_positions[i][0], self.coords[i][0], 16, dt)) + self.drawn_positions[i][1] = int(exponentialDecay(self.drawn_positions[i][1], self.coords[i][1], 16, dt)) + + if len(self.coords) > len(self.drawn_positions): + self.drawn_positions.extend(self.coords[len(self.drawn_positions):]) + + for a, drawn_prediction in enumerate(self.drawn_predictions): + for i, pos in enumerate(drawn_prediction): + # TODO: this should be done in polar space starting from origin (i.e. self.drawn_posision[-1]) + decay = max(3, (18/i) if i else 10) # points further away move with more delay + decay = 6 + origin = self.drawn_positions[-1] + drawn_r, drawn_angle = relativePointToPolar( origin, drawn_prediction[i]) + pred_r, pred_angle = relativePointToPolar(origin, self.pred_coords[a][i]) + r = exponentialDecay(drawn_r, pred_r, decay, dt) + angle = exponentialDecay(drawn_angle, pred_angle, decay, dt) + x, y = relativePolarToPoint(origin, r, angle) + self.drawn_predictions[a][i] = int(x), int(y) + # self.drawn_predictions[i][0] = int(exponentialDecay(self.drawn_predictions[i][0], self.pred_coords[i][0], decay, dt)) + # self.drawn_predictions[i][1] = int(exponentialDecay(self.drawn_predictions[i][1], self.pred_coords[i][1], decay, dt)) + + if len(self.pred_coords) > len(self.drawn_predictions): + self.drawn_predictions.extend(self.pred_coords[len(self.drawn_predictions):]) + # for a, drawn_prediction in self.drawn_predictions: + # if len(self.pred_coords) > len(self.drawn_predictions): + # self.drawn_predictions.extend(self.pred_coords[len(self.drawn_predictions):]) + + # self.drawn_positions = self.coords + self.update_shapes(dt) + return self.drawn_positions + + def update_shapes(self, dt): + if len(self.shapes) > len(self.drawn_positions): + self.shapes = self.shapes[:len(self.drawn_positions)] + + # for i, pos in self.drawn_positions.enumerate(): + for ci in range(1, len(self.drawn_positions)): + x, y = [int(p) for p in self.drawn_positions[ci-1]] + x2, y2 = [int(p) for p in self.drawn_positions[ci]] + + y, y2 = self.renderer.window.height - y, self.renderer.window.height - y2 + color = [100+155*ci // len(self.drawn_positions)]*3 + # print(x,y,x2,y2,color) + + if ci >= len(self.shapes): + # TODO: add color2 + line = self.renderer.gradientLine(x, y, x2, y2, 2, color, color, batch=self.renderer.batch_anim) + line.opacity = 5 + self.shapes.append(line) + + else: + line = self.shapes[ci-1] + line.x, line.y = x, y + line.x2, line.y2 = x2, y2 + line.color = color + line.opacity = int(exponentialDecay(line.opacity, 255, 3, dt)) + + # TODO: basically a duplication of the above, do this smarter? + # TODO: add intermediate segment + color = colorset[self.track_id % len(colorset)] + + for a, drawn_predictions in enumerate(self.drawn_predictions): + if len(self.pred_shapes) <= a: + self.pred_shapes.append([]) + + if len(self.pred_shapes[a]) > (len(drawn_predictions) +1): + self.pred_shapes[a] = self.pred_shapes[a][:len(drawn_predictions)] + + # for i, pos in drawn_predictions.enumerate(): + for ci in range(0, len(drawn_predictions)): + if ci == 0: + x, y = [int(p) for p in self.drawn_positions[-1]] + else: + x, y = [int(p) for p in drawn_predictions[ci-1]] + + x2, y2 = [int(p) for p in drawn_predictions[ci]] + + y, y2 = self.renderer.window.height - y, self.renderer.window.height - y2 + # color = [255,0,0] + # print(x,y,x2,y2,color) + + if ci >= len(self.pred_shapes[a]): + # TODO: add color2 + line = self.renderer.gradientLine(x, y, x2, y2, 3, color, color, batch=self.renderer.batch_anim) + line.opacity = 5 + self.pred_shapes[a].append(line) + + else: + line = self.pred_shapes[a][ci-1] + line.x, line.y = x, y + line.x2, line.y2 = x2, y2 + line.color = color + decay = (16/ci) if ci else 16 + half = len(drawn_predictions) / 2 + if ci < half: + target_opacity = 255 + else: + target_opacity = (1 - ((ci - half) / half)) * 255 + line.opacity = int(exponentialDecay(line.opacity, target_opacity, decay, dt)) + + class FrameWriter: """ Drop-in compatible interface with cv2.VideoWriter, but support variable @@ -78,12 +249,185 @@ class Renderer: # self.out = cv2.VideoWriter(str(filename), fourcc, 23.97, (1280,720)) self.fps = 10 self.frame_size = (1280,720) + self.hide_stats = False self.out_writer = self.start_writer() if self.config.render_file else None self.streaming_process = self.start_streaming() if self.config.render_url else None if self.config.render_window: cv2.namedWindow("frame", cv2.WND_PROP_FULLSCREEN) cv2.setWindowProperty("frame",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN) + else: + pyglet.options["headless"] = True + + config = pyglet.gl.Config(sample_buffers=1, samples=4) + self.window = pyglet.window.Window(width=self.frame_size[0], height=self.frame_size[1], config=config) + self.window.set_handler('on_draw', self.on_draw) + self.window.set_handler('on_refresh', self.on_refresh) + self.window.set_handler('on_close', self.on_close) + + pyglet.gl.glClearColor(81./255, 20/255, 46./255, 0) + self.fps_display = pyglet.window.FPSDisplay(window=self.window) + + self.drawn_tracks: dict[str, DrawnTrack] = {} + + + self.frame: Frame|None= None + self.prediction_frame: Frame|None = None + + + self.batch_bg = pyglet.graphics.Batch() + self.batch_overlay = pyglet.graphics.Batch() + self.batch_anim = pyglet.graphics.Batch() + + self.init_shapes() + + + def init_shapes(self): + ''' + Due to error when running headless, we need to configure options before extending the shapes class + ''' + class GradientLine(shapes.Line): + def __init__(self, x, y, x2, y2, width=1, color1=[255,255,255], color2=[255,255,255], batch=None, group=None): + # print('colors!', colors) + # assert len(colors) == 6 + + r, g, b, *a = color1 + self._rgba1 = (r, g, b, a[0] if a else 255) + r, g, b, *a = color2 + self._rgba2 = (r, g, b, a[0] if a else 255) + + # print('rgba', self._rgba) + + super().__init__(x, y, x2, y2, width, color1, batch=None, group=None) + # 2: - if self.streaming_process: - self.streaming_process.stdin.close() - if self.out_writer: - self.out_writer.release() - if self.streaming_process: - # oddly wrapped, because both close and release() take time. - self.streaming_process.wait() + # if i>2: + if self.streaming_process: + self.streaming_process.stdin.close() + if self.out_writer: + self.out_writer.release() + if self.streaming_process: + # oddly wrapped, because both close and release() take time. + self.streaming_process.wait() # colorset = itertools.product([0,255], repeat=3) # but remove white colorset = [(0, 0, 0), @@ -197,6 +549,11 @@ colorset = [(0, 0, 0), def decorate_frame(frame: Frame, prediction_frame: Frame, first_time: float, config: Namespace) -> np.array: + # TODO: replace opencv with QPainter to support alpha? https://doc.qt.io/qtforpython-5/PySide2/QtGui/QPainter.html#PySide2.QtGui.PySide2.QtGui.QPainter.drawImage + # or https://github.com/pygobject/pycairo?tab=readme-ov-file + # or https://pyglet.readthedocs.io/en/latest/programming_guide/shapes.html + # and use http://code.astraw.com/projects/motmot/pygarrayimage.html or https://gist.github.com/nkymut/1cb40ea6ae4de0cf9ded7332f1ca0d55 + # or https://api.arcade.academy/en/stable/index.html (supports gradient color in line -- "Arcade is built on top of Pyglet and OpenGL.") frame.img overlay = np.zeros(frame.img.shape, np.uint8)