Preliminary rendering of second window with only animation

This commit is contained in:
Ruben van de Ven 2024-11-05 19:16:57 +01:00
parent 2e2bd76b05
commit a0c63c4929
7 changed files with 107 additions and 24 deletions

22
poetry.lock generated
View file

@ -2290,15 +2290,29 @@ files = [
[[package]] [[package]]
name = "pyglet" name = "pyglet"
version = "2.0.15" version = "2.0.18"
description = "pyglet is a cross-platform games and multimedia package." description = "pyglet is a cross-platform games and multimedia package."
optional = false optional = false
python-versions = ">=3.8" python-versions = ">=3.8"
files = [ files = [
{file = "pyglet-2.0.15-py3-none-any.whl", hash = "sha256:9e4cc16efc308106fd3a9ff8f04e7a6f4f6a807c6ac8a331375efbbac8be85af"}, {file = "pyglet-2.0.18-py3-none-any.whl", hash = "sha256:e592952ae0297e456c587b6486ed8c3e5f9d0c3519d517bb92dde5fdf4c26b41"},
{file = "pyglet-2.0.15.tar.gz", hash = "sha256:42085567cece0c7f1c14e36eef799938cbf528cfbb0150c484b984f3ff1aa771"}, {file = "pyglet-2.0.18.tar.gz", hash = "sha256:7cf9238d70082a2da282759679f8a011cc979753a32224a8ead8ed80e48f99dc"},
] ]
[[package]]
name = "pyglet-cornerpin"
version = "0.2.0"
description = "Add a corner pin transform to a pyglet window"
optional = false
python-versions = "<4.0,>=3.10"
files = [
{file = "pyglet_cornerpin-0.2.0-py3-none-any.whl", hash = "sha256:1e1cf4f2e86929fb74e89939be8f7ebdb110f65bf0923e51466e8fbd44773dc5"},
{file = "pyglet_cornerpin-0.2.0.tar.gz", hash = "sha256:8fe8a7618c11f93ac3b3c8b89b71e4398bf1223eea9ac3ea744e9d36031a44f9"},
]
[package.dependencies]
pyglet = ">=2.0.18,<3.0.0"
[[package]] [[package]]
name = "pygments" name = "pygments"
version = "2.17.2" version = "2.17.2"
@ -3528,4 +3542,4 @@ watchdog = ["watchdog (>=2.3)"]
[metadata] [metadata]
lock-version = "2.0" lock-version = "2.0"
python-versions = "^3.10,<3.12," python-versions = "^3.10,<3.12,"
content-hash = "5154a99d490755a68e51595424649b5269fcd17ef14094c6285f5de7f972f110" content-hash = "bffa0878a620996b47aa5623b951f09ab010c267880c6dcd5a53741f244e675a"

View file

@ -32,6 +32,7 @@ gdown = "^4.7.1"
pandas-helper-calc = {git = "https://github.com/scls19fr/pandas-helper-calc"} pandas-helper-calc = {git = "https://github.com/scls19fr/pandas-helper-calc"}
tsmoothie = "^1.0.5" tsmoothie = "^1.0.5"
pyglet = "^2.0.15" pyglet = "^2.0.15"
pyglet-cornerpin = "^0.2.0"
[build-system] [build-system]
requires = ["poetry-core"] requires = ["poetry-core"]

View file

@ -20,6 +20,7 @@ import shutil
import math import math
from pyglet import shapes from pyglet import shapes
from PIL import Image from PIL import Image
from trap.frame_emitter import DetectionState, Frame, Track from trap.frame_emitter import DetectionState, Frame, Track
@ -70,11 +71,22 @@ class AnimationRenderer:
config = pyglet.gl.Config(sample_buffers=1, samples=4) config = pyglet.gl.Config(sample_buffers=1, samples=4)
# , fullscreen=self.config.render_window # , fullscreen=self.config.render_window
self.window = pyglet.window.Window(width=self.frame_size[0], height=self.frame_size[1], config=config, fullscreen=self.config.full_screen)
display = pyglet.canvas.get_display()
screen = display.get_screens()[1]
# self.window = pyglet.window.Window(width=self.frame_size[0], height=self.frame_size[1], config=config, fullscreen=False, screen=screens[1])
self.window = pyglet.window.Window(width=screen.width, height=screen.height, config=config, fullscreen=True, screen=screen)
self.window.set_handler('on_draw', self.on_draw) self.window.set_handler('on_draw', self.on_draw)
self.window.set_handler('on_refresh', self.on_refresh) self.window.set_handler('on_refresh', self.on_refresh)
self.window.set_handler('on_close', self.on_close) self.window.set_handler('on_close', self.on_close)
# don't know why, but importing this before window leads to "x connection to :1 broken (explicit kill or server shutdown)"
from pyglet_cornerpin import PygletCornerPin
self.pins = PygletCornerPin(self.window)
self.window.push_handlers(self.pins)
pyglet.gl.glClearColor(0,0,0, 0) pyglet.gl.glClearColor(0,0,0, 0)
self.fps_display = pyglet.window.FPSDisplay(window=self.window, color=(255,255,255,255)) self.fps_display = pyglet.window.FPSDisplay(window=self.window, color=(255,255,255,255))
self.fps_display.label.x = self.window.width - 50 self.fps_display.label.x = self.window.width - 50
@ -94,6 +106,13 @@ class AnimationRenderer:
self.batch_bg = pyglet.graphics.Batch() self.batch_bg = pyglet.graphics.Batch()
self.batch_overlay = pyglet.graphics.Batch() self.batch_overlay = pyglet.graphics.Batch()
self.batch_anim = pyglet.graphics.Batch() self.batch_anim = pyglet.graphics.Batch()
self.debug_lines = [
pyglet.shapes.Line(1380, self.config.camera.h, 1380, 690, 2, (255,255,255,255), batch=self.batch_overlay),
pyglet.shapes.Line(0, 660, 1380, 675, 2, (255,255,255,255), batch=self.batch_overlay),
]
self.init_shapes() self.init_shapes()
@ -217,13 +236,15 @@ class AnimationRenderer:
if not self.first_time: if not self.first_time:
self.first_time = self.frame.time self.first_time = self.frame.time
img = self.frame.img img = self.frame.img
# newcameramtx, roi = cv2.getOptimalNewCameraMatrix(self.config.camera.mtx, self.config.camera.dist, (self.frame.img.shape[1], self.frame.img.shape[0]), 1, (self.frame.img.shape[1], self.frame.img.shape[0]))
img = cv2.undistort(img, self.config.camera.mtx, self.config.camera.dist, None, self.config.camera.newcameramtx)
img = cv2.warpPerspective(img, self.H, (self.frame.img.shape[1], self.frame.img.shape[0])) img = cv2.warpPerspective(img, self.H, (self.frame.img.shape[1], self.frame.img.shape[0]))
img = cv2.GaussianBlur(img, (15, 15), 0) img = cv2.GaussianBlur(img, (15, 15), 0)
img = cv2.flip(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), 0) img = cv2.flip(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), 0)
img = pyglet.image.ImageData(self.frame_size[0], self.frame_size[1], 'RGB', img.tobytes()) img = pyglet.image.ImageData(self.frame_size[0], self.frame_size[1], 'RGB', img.tobytes())
# don't draw in batch, so that it is the background # don't draw in batch, so that it is the background
self.video_sprite = pyglet.sprite.Sprite(img=img, batch=self.batch_bg) self.video_sprite = pyglet.sprite.Sprite(img=img, batch=self.batch_bg)
self.video_sprite.opacity = 100 self.video_sprite.opacity = 30
except zmq.ZMQError as e: except zmq.ZMQError as e:
# idx = frame.index if frame else "NONE" # idx = frame.index if frame else "NONE"
# logger.debug(f"reuse video frame {idx}") # logger.debug(f"reuse video frame {idx}")
@ -255,9 +276,9 @@ class AnimationRenderer:
if self.prediction_frame: if self.prediction_frame:
for track_id, track in self.prediction_frame.tracks.items(): for track_id, track in self.prediction_frame.tracks.items():
if track_id not in self.drawn_tracks: if track_id not in self.drawn_tracks:
self.drawn_tracks[track_id] = DrawnTrack(track_id, track, self, self.prediction_frame.H, PROJECTION_MAP) self.drawn_tracks[track_id] = DrawnTrack(track_id, track, self, self.prediction_frame.H, PROJECTION_MAP, self.config.camera)
else: else:
self.drawn_tracks[track_id].set_track(track, self.prediction_frame.H) self.drawn_tracks[track_id].set_track(track)
# clean up # clean up
for track_id in list(self.drawn_tracks.keys()): for track_id in list(self.drawn_tracks.keys()):
@ -282,6 +303,7 @@ class AnimationRenderer:
def on_close(self): def on_close(self):
self.is_running.clear() self.is_running.clear()
def on_refresh(self, dt: float): def on_refresh(self, dt: float):
# update shapes # update shapes
# self.bg = # self.bg =
@ -309,12 +331,10 @@ class AnimationRenderer:
shape.draw() shape.draw()
# self.batch_anim.draw() # self.batch_anim.draw()
self.batch_overlay.draw() self.batch_overlay.draw()
self.pins.draw()
# pyglet.graphics.draw(3, pyglet.gl.GL_LINE, ("v2i", (100,200, 600,800)), ('c3B', (255,255,255, 255,255,255))) # pyglet.graphics.draw(3, pyglet.gl.GL_LINE, ("v2i", (100,200, 600,800)), ('c3B', (255,255,255, 255,255,255)))
if not self.hide_stats: if not self.hide_stats:
self.fps_display.draw() self.fps_display.draw()
@ -400,6 +420,8 @@ class AnimationRenderer:
# cv2.imshow('frame',img) # cv2.imshow('frame',img)
# cv2.waitKey(1) # cv2.waitKey(1)
logger.info('Stopping') logger.info('Stopping')
logger.info(f'used corner pins {self.pins.corners}')
# if i>2: # if i>2:
if self.streaming_process: if self.streaming_process:

View file

@ -5,6 +5,7 @@ import numpy as np
import json import json
from trap.tracker import DETECTORS from trap.tracker import DETECTORS
from trap.frame_emitter import Camera
from pyparsing import Optional from pyparsing import Optional
@ -62,10 +63,32 @@ class HomographyAction(argparse.Action):
H = np.array(json.load(fp)) H = np.array(json.load(fp))
else: else:
H = np.loadtxt(values, delimiter=',') H = np.loadtxt(values, delimiter=',')
print('%r %r %r' % (namespace, values, option_string))
setattr(namespace, self.dest, values) setattr(namespace, self.dest, values)
setattr(namespace, 'H', H) setattr(namespace, 'H', H)
class CameraAction(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super().__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
if values is None:
setattr(namespace, self.dest, None)
else:
values = Path(values)
with values.open('r') as fp:
data = json.load(fp)
# print(data)
# print(data['camera_matrix'])
# camera = {
# 'camera_matrix': np.array(data['camera_matrix']),
# 'dist_coeff': np.array(data['dist_coeff']),
# }
camera = Camera(np.array(data['camera_matrix']), np.array(data['dist_coeff']), namespace.frame_width, namespace.frame_height)
setattr(namespace, 'camera', camera)
inference_parser.add_argument("--model_dir", inference_parser.add_argument("--model_dir",
help="directory with the model to use for inference", help="directory with the model to use for inference",
type=str, # TODO: make into Path type=str, # TODO: make into Path
@ -253,6 +276,11 @@ tracker_parser.add_argument("--homography",
type=Path, type=Path,
default='../DATASETS/VIRAT_subset_0102x/VIRAT_0102_homography_img2world.txt', default='../DATASETS/VIRAT_subset_0102x/VIRAT_0102_homography_img2world.txt',
action=HomographyAction) action=HomographyAction)
tracker_parser.add_argument("--calibration",
help="File with camera intrinsics and lens distortion params (calibration.json)",
# type=Path,
default=None,
action=CameraAction)
tracker_parser.add_argument("--save-for-training", tracker_parser.add_argument("--save-for-training",
help="Specify the path in which to save", help="Specify the path in which to save",
type=Path, type=Path,

View file

@ -32,6 +32,14 @@ class DetectionState(IntFlag):
return cls.Confirmed return cls.Confirmed
raise RuntimeError("Should not run into Deleted entries here") raise RuntimeError("Should not run into Deleted entries here")
class Camera:
def __init__(self, mtx, dist, w, h):
self.mtx = mtx
self.dist = dist
self.w = w
self.h = h
self.newcameramtx, self.roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w,h), 1, (w,h))
@dataclass @dataclass
class Detection: class Detection:
@ -83,19 +91,25 @@ class Track:
predictor_history: Optional[list] = None # in image space predictor_history: Optional[list] = None # in image space
predictions: Optional[list] = None predictions: Optional[list] = None
def get_projected_history(self, H) -> np.array: def get_projected_history(self, H, camera: Optional[Camera]= None) -> np.array:
foot_coordinates = [d.get_foot_coords() for d in self.history] foot_coordinates = [d.get_foot_coords() for d in self.history]
# TODO)) Undistort points before perspective transform # TODO)) Undistort points before perspective transform
if len(foot_coordinates): if len(foot_coordinates):
coords = cv2.perspectiveTransform(np.array([foot_coordinates]),H) if camera:
coords = cv2.undistortPoints(np.array([foot_coordinates]).astype('float32'), camera.mtx, camera.dist, None, camera.newcameramtx)
coords = cv2.perspectiveTransform(np.array(coords),H)
return coords.reshape((coords.shape[0],2))
else:
coords = cv2.perspectiveTransform(np.array([foot_coordinates]),H)
return coords[0] return coords[0]
return np.array([]) return np.array([])
def get_projected_history_as_dict(self, H) -> dict: def get_projected_history_as_dict(self, H, camera: Optional[Camera]= None) -> dict:
coords = self.get_projected_history(H) coords = self.get_projected_history(H, camera)
return [{"x":c[0], "y":c[1]} for c in coords] return [{"x":c[0], "y":c[1]} for c in coords]

View file

@ -269,7 +269,7 @@ class PredictionServer:
# TODO: modify this into a mapping function between JS data an the expected Node format # TODO: modify this into a mapping function between JS data an the expected Node format
# node = FakeNode(online_env.NodeType.PEDESTRIAN) # node = FakeNode(online_env.NodeType.PEDESTRIAN)
history = [[h['x'], h['y']] for h in track.get_projected_history_as_dict(frame.H)] history = [[h['x'], h['y']] for h in track.get_projected_history_as_dict(frame.H, self.config.camera)]
history = np.array(history) history = np.array(history)
x = history[:, 0] x = history[:, 0]
y = history[:, 1] y = history[:, 1]

View file

@ -18,11 +18,13 @@ import tempfile
from pathlib import Path from pathlib import Path
import shutil import shutil
import math import math
from typing import Optional
from pyglet import shapes from pyglet import shapes
from PIL import Image from PIL import Image
from trap.frame_emitter import DetectionState, Frame, Track from trap.frame_emitter import DetectionState, Frame, Track, Camera
@ -61,24 +63,26 @@ PROJECTION_MAP = 2
PROJECTION_PROJECTOR = 4 PROJECTION_PROJECTOR = 4
class DrawnTrack: class DrawnTrack:
def __init__(self, track_id, track: Track, renderer: PreviewRenderer, H, draw_projection = PROJECTION_IMG): def __init__(self, track_id, track: Track, renderer: PreviewRenderer, H, draw_projection = PROJECTION_IMG, camera: Optional[Camera] = None):
# self.created_at = time.time() # self.created_at = time.time()
self.draw_projection = draw_projection self.draw_projection = draw_projection
self.update_at = self.created_at = time.time() self.update_at = self.created_at = time.time()
self.track_id = track_id self.track_id = track_id
self.renderer = renderer self.renderer = renderer
self.camera = camera
self.H = H # TODO)) Move H to Camera object
self.set_track(track, H) self.set_track(track, H)
self.drawn_positions = [] self.drawn_positions = []
self.drawn_predictions = [] self.drawn_predictions = []
self.shapes: list[pyglet.shapes.Line] = [] self.shapes: list[pyglet.shapes.Line] = []
self.pred_shapes: list[list[pyglet.shapes.Line]] = [] self.pred_shapes: list[list[pyglet.shapes.Line]] = []
def set_track(self, track: Track, H): def set_track(self, track: Track, H = None):
self.update_at = time.time() self.update_at = time.time()
self.track = track self.track = track
self.H = H # self.H = H
self.coords = [d.get_foot_coords() for d in track.history] if self.draw_projection == PROJECTION_IMG else track.get_projected_history(self.H) self.coords = [d.get_foot_coords() for d in track.history] if self.draw_projection == PROJECTION_IMG else track.get_projected_history(self.H, self.camera)
# perhaps only do in constructor: # perhaps only do in constructor:
self.inv_H = np.linalg.pinv(self.H) self.inv_H = np.linalg.pinv(self.H)
@ -268,7 +272,7 @@ class PreviewRenderer:
# self.H = np.array(json.load(fp)) # self.H = np.array(json.load(fp))
# else: # else:
# self.H = np.loadtxt(self.config.homography, delimiter=',') # self.H = np.loadtxt(self.config.homography, delimiter=',')
print('h', self.config.H) # print('h', self.config.H)
self.H = self.config.H self.H = self.config.H