trap/trap/animation_renderer.py
2024-11-14 17:01:32 +01:00

483 lines
No EOL
21 KiB
Python

# used for "Forward Referencing of type annotations"
from __future__ import annotations
import time
import ffmpeg
from argparse import Namespace
import datetime
import logging
from multiprocessing import Event
from multiprocessing.synchronize import Event as BaseEvent
import cv2
import numpy as np
import pyglet
import pyglet.event
import zmq
import tempfile
from pathlib import Path
import shutil
import math
from pyglet import shapes
from PIL import Image
import json
from trap.frame_emitter import DetectionState, Frame, Track
from trap.preview_renderer import DrawnTrack, PROJECTION_IMG, PROJECTION_MAP
logger = logging.getLogger("trap.renderer")
class AnimationRenderer:
def __init__(self, config: Namespace, is_running: BaseEvent):
self.config = config
self.is_running = is_running
context = zmq.Context()
self.prediction_sock = context.socket(zmq.SUB)
self.prediction_sock.setsockopt(zmq.CONFLATE, 1) # only keep latest frame. NB. make sure this comes BEFORE connect, otherwise it's ignored!!
self.prediction_sock.setsockopt(zmq.SUBSCRIBE, b'')
self.prediction_sock.connect(config.zmq_prediction_addr)
self.tracker_sock = context.socket(zmq.SUB)
self.tracker_sock.setsockopt(zmq.CONFLATE, 1) # only keep latest frame. NB. make sure this comes BEFORE connect, otherwise it's ignored!!
self.tracker_sock.setsockopt(zmq.SUBSCRIBE, b'')
self.tracker_sock.connect(config.zmq_trajectory_addr)
self.frame_sock = context.socket(zmq.SUB)
self.frame_sock.setsockopt(zmq.CONFLATE, 1) # only keep latest frame. NB. make sure this comes BEFORE connect, otherwise it's ignored!!
self.frame_sock.setsockopt(zmq.SUBSCRIBE, b'')
self.frame_sock.connect(config.zmq_frame_addr)
self.H = self.config.H
self.inv_H = np.linalg.pinv(self.H)
# TODO: get FPS from frame_emitter
# self.out = cv2.VideoWriter(str(filename), fourcc, 23.97, (1280,720))
self.fps = 60
self.frame_size = (self.config.camera.w,self.config.camera.h)
self.hide_stats = False
self.out_writer = None # self.start_writer() if self.config.render_file else None
self.streaming_process = None # self.start_streaming() if self.config.render_url else None
if self.config.render_window:
pass
# cv2.namedWindow("frame", cv2.WND_PROP_FULLSCREEN)
# cv2.setWindowProperty("frame",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
else:
pyglet.options["headless"] = True
config = pyglet.gl.Config(sample_buffers=1, samples=4)
# , fullscreen=self.config.render_window
display = pyglet.canvas.get_display()
screen = display.get_screens()[0]
# self.window = pyglet.window.Window(width=self.frame_size[0], height=self.frame_size[1], config=config, fullscreen=False, screen=screens[1])
self.window = pyglet.window.Window(width=screen.width, height=screen.height, config=config, fullscreen=True, screen=screen)
self.window.set_handler('on_draw', self.on_draw)
self.window.set_handler('on_refresh', self.on_refresh)
self.window.set_handler('on_close', self.on_close)
# don't know why, but importing this before window leads to "x connection to :1 broken (explicit kill or server shutdown)"
from pyglet_cornerpin import PygletCornerPin
# self.pins = PygletCornerPin(self.window, corners=[[-144,-2], [2880,0], [-168,958], [3011,1553]])
# x1 540 y1 760-360
# x2 1380 y2 670-360
self.pins = PygletCornerPin(
self.window,
source_points=[[540, 670-360], [1380,670-360], [540,760-360], [1380,760-360]],
corners=[[471, 304], [1797, 376], [467, 387], [1792, 484]])
self.window.push_handlers(self.pins)
pyglet.gl.glClearColor(255,255,255,255)
self.fps_display = pyglet.window.FPSDisplay(window=self.window, color=(255,255,255,255))
self.fps_display.label.x = self.window.width - 50
self.fps_display.label.y = self.window.height - 17
self.fps_display.label.bold = False
self.fps_display.label.font_size = 10
self.drawn_tracks: dict[str, DrawnTrack] = {}
self.first_time: float|None = None
self.frame: Frame|None= None
self.tracker_frame: Frame|None = None
self.prediction_frame: Frame|None = None
self.batch_bg = pyglet.graphics.Batch()
self.batch_overlay = pyglet.graphics.Batch()
self.batch_anim = pyglet.graphics.Batch()
if self.config.render_debug_shapes:
self.debug_lines = [
pyglet.shapes.Line(1370, self.config.camera.h-360, 1380, 670-360, 2, (255,255,255,255), batch=self.batch_overlay),#v
pyglet.shapes.Line(0, 660-360, 1380, 670-360, 2, (255,255,255,255), batch=self.batch_overlay), #h
pyglet.shapes.Line(1140, 760-360, 1140, 675-360, 2, (255,255,255,255), batch=self.batch_overlay), #h
pyglet.shapes.Line(540, 760-360,540, 675-360, 2, (255,255,255,255), batch=self.batch_overlay), #v
pyglet.shapes.Line(0, 770-360, 1380, 770-360, 2, (255,255,255,255), batch=self.batch_overlay), #h
]
self.debug_points = []
# print(self.config.debug_points_file)
if self.config.debug_points_file:
with self.config.debug_points_file.open('r') as fp:
img_points = np.array(json.load(fp))
# to place points accurate I used a 2160p image, but during calibration and
# prediction I use(d) a 1440p image, so convert points to different space:
img_points = np.array(img_points)
# first undistort the points so that lines are actually straight
undistorted_img_points = cv2.undistortPoints(np.array([img_points]).astype('float32'), self.config.camera.mtx, self.config.camera.dist, None, self.config.camera.newcameramtx)
dst_img_points = cv2.perspectiveTransform(np.array(undistorted_img_points), self.config.camera.H)
if dst_img_points.shape[1:] == (1,2):
dst_img_points = np.reshape(dst_img_points, (dst_img_points.shape[0], 2))
self.debug_points = [
pyglet.shapes.Circle(p[0], self.window.height - p[1], 3, color=(255,0,0,255), batch=self.batch_overlay) for p in dst_img_points
]
self.init_shapes()
self.init_labels()
def init_shapes(self):
'''
Due to error when running headless, we need to configure options before extending the shapes class
'''
class GradientLine(shapes.Line):
def __init__(self, x, y, x2, y2, width=1, color1=[255,255,255], color2=[255,255,255], batch=None, group=None):
# print('colors!', colors)
# assert len(colors) == 6
r, g, b, *a = color1
self._rgba1 = (r, g, b, a[0] if a else 255)
r, g, b, *a = color2
self._rgba2 = (r, g, b, a[0] if a else 255)
# print('rgba', self._rgba)
super().__init__(x, y, x2, y2, width, color1, batch=None, group=None)
# <pyglet.graphics.vertexdomain.VertexList
# pyglet.graphics.vertexdomain
# print(self._vertex_list)
def _create_vertex_list(self):
'''
copy of super()._create_vertex_list but with additional colors'''
self._vertex_list = self._group.program.vertex_list(
6, self._draw_mode, self._batch, self._group,
position=('f', self._get_vertices()),
colors=('Bn', self._rgba1+ self._rgba2 + self._rgba2 + self._rgba1 + self._rgba2 +self._rgba1 ),
translation=('f', (self._x, self._y) * self._num_verts))
def _update_colors(self):
self._vertex_list.colors[:] = self._rgba1+ self._rgba2 + self._rgba2 + self._rgba1 + self._rgba2 +self._rgba1
def color1(self, color):
r, g, b, *a = color
self._rgba1 = (r, g, b, a[0] if a else 255)
self._update_colors()
def color2(self, color):
r, g, b, *a = color
self._rgba2 = (r, g, b, a[0] if a else 255)
self._update_colors()
self.gradientLine = GradientLine
def init_labels(self):
base_color = (255,)*4
color_predictor = (255,255,0, 255)
color_info = (255,0, 255, 255)
color_tracker = (0,255, 255, 255)
options = []
for option in ['prediction_horizon','num_samples','full_dist','gmm_mode','z_mode', 'model_dir']:
options.append(f"{option}: {self.config.__dict__[option]}")
self.labels = {
'waiting': pyglet.text.Label("Waiting for prediction"),
'frame_idx': pyglet.text.Label("", x=20, y=self.window.height - 17, color=base_color, batch=self.batch_overlay),
'tracker_idx': pyglet.text.Label("", x=90, y=self.window.height - 17, color=color_tracker, batch=self.batch_overlay),
'pred_idx': pyglet.text.Label("", x=110, y=self.window.height - 17, color=color_predictor, batch=self.batch_overlay),
'frame_time': pyglet.text.Label("t", x=140, y=self.window.height - 17, color=base_color, batch=self.batch_overlay),
'frame_latency': pyglet.text.Label("", x=235, y=self.window.height - 17, color=color_info, batch=self.batch_overlay),
'tracker_time': pyglet.text.Label("", x=300, y=self.window.height - 17, color=color_tracker, batch=self.batch_overlay),
'pred_time': pyglet.text.Label("", x=360, y=self.window.height - 17, color=color_predictor, batch=self.batch_overlay),
'track_len': pyglet.text.Label("", x=800, y=self.window.height - 17, color=color_tracker, batch=self.batch_overlay),
'options1': pyglet.text.Label(options.pop(-1), x=20, y=30, color=base_color, batch=self.batch_overlay),
'options2': pyglet.text.Label(" | ".join(options), x=20, y=10, color=base_color, batch=self.batch_overlay),
}
def refresh_labels(self, dt: float):
"""Every frame"""
if self.frame:
self.labels['frame_idx'].text = f"{self.frame.index:06d}"
self.labels['frame_time'].text = f"{self.frame.time - self.first_time: >10.2f}s"
self.labels['frame_latency'].text = f"{self.frame.time - time.time():.2f}s"
if self.tracker_frame:
self.labels['tracker_idx'].text = f"{self.tracker_frame.index - self.frame.index}"
self.labels['tracker_time'].text = f"{self.tracker_frame.time - time.time():.3f}s"
self.labels['track_len'].text = f"{len(self.tracker_frame.tracks)} tracks"
if self.prediction_frame:
self.labels['pred_idx'].text = f"{self.prediction_frame.index - self.frame.index}"
self.labels['pred_time'].text = f"{self.prediction_frame.time - time.time():.3f}s"
# self.labels['track_len'].text = f"{len(self.prediction_frame.tracks)} tracks"
# cv2.putText(img, f"{frame.index:06d}", (20,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
# cv2.putText(img, f"{frame.time - first_time:.3f}s", (120,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
# if prediction_frame:
# # render Δt and Δ frames
# cv2.putText(img, f"{prediction_frame.index - frame.index}", (90,17), cv2.FONT_HERSHEY_PLAIN, 1, info_color, 1)
# cv2.putText(img, f"{prediction_frame.time - time.time():.2f}s", (200,17), cv2.FONT_HERSHEY_PLAIN, 1, info_color, 1)
# cv2.putText(img, f"{len(prediction_frame.tracks)} tracks", (500,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
# cv2.putText(img, f"h: {np.average([len(t.history or []) for t in prediction_frame.tracks.values()]):.2f}", (580,17), cv2.FONT_HERSHEY_PLAIN, 1, info_color, 1)
# cv2.putText(img, f"ph: {np.average([len(t.predictor_history or []) for t in prediction_frame.tracks.values()]):.2f}", (660,17), cv2.FONT_HERSHEY_PLAIN, 1, info_color, 1)
# cv2.putText(img, f"p: {np.average([len(t.predictions or []) for t in prediction_frame.tracks.values()]):.2f}", (740,17), cv2.FONT_HERSHEY_PLAIN, 1, info_color, 1)
# options = []
# for option in ['prediction_horizon','num_samples','full_dist','gmm_mode','z_mode', 'model_dir']:
# options.append(f"{option}: {config.__dict__[option]}")
# cv2.putText(img, options.pop(-1), (20,img.shape[0]-30), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
# cv2.putText(img, " | ".join(options), (20,img.shape[0]-10), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
def check_frames(self, dt):
new_tracks = False
try:
self.frame: Frame = self.frame_sock.recv_pyobj(zmq.NOBLOCK)
if not self.first_time:
self.first_time = self.frame.time
img = self.frame.img
# newcameramtx, roi = cv2.getOptimalNewCameraMatrix(self.config.camera.mtx, self.config.camera.dist, (self.frame.img.shape[1], self.frame.img.shape[0]), 1, (self.frame.img.shape[1], self.frame.img.shape[0]))
img = cv2.undistort(img, self.config.camera.mtx, self.config.camera.dist, None, self.config.camera.newcameramtx)
img = cv2.warpPerspective(img, self.config.camera.H, (self.config.camera.w, self.config.camera.h))
# img = cv2.GaussianBlur(img, (15, 15), 0)
img = cv2.flip(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), 0)
img = pyglet.image.ImageData(self.frame_size[0], self.frame_size[1], 'RGB', img.tobytes())
# don't draw in batch, so that it is the background
self.video_sprite = pyglet.sprite.Sprite(img=img, batch=self.batch_bg)
# transform to flipped coordinate system for pyglet
self.video_sprite.y = self.window.height - self.video_sprite.height
self.video_sprite.opacity = 10
except zmq.ZMQError as e:
# idx = frame.index if frame else "NONE"
# logger.debug(f"reuse video frame {idx}")
pass
try:
self.prediction_frame: Frame = self.prediction_sock.recv_pyobj(zmq.NOBLOCK)
new_tracks = True
except zmq.ZMQError as e:
pass
try:
self.tracker_frame: Frame = self.tracker_sock.recv_pyobj(zmq.NOBLOCK)
new_tracks = True
except zmq.ZMQError as e:
pass
if new_tracks:
self.update_tracks()
def update_tracks(self):
"""Updates the track objects and shapes. Called after setting `prediction_frame`
"""
# clean up
# for track_id in list(self.drawn_tracks.keys()):
# if track_id not in self.prediction_frame.tracks.keys():
# # TODO fade out
# del self.drawn_tracks[track_id]
if self.tracker_frame:
for track_id, track in self.tracker_frame.tracks.items():
if track_id not in self.drawn_tracks:
self.drawn_tracks[track_id] = DrawnTrack(track_id, track, self, self.tracker_frame.H, PROJECTION_MAP, self.config.camera)
else:
self.drawn_tracks[track_id].set_track(track)
if self.prediction_frame:
for track_id, track in self.prediction_frame.tracks.items():
if track_id not in self.drawn_tracks:
self.drawn_tracks[track_id] = DrawnTrack(track_id, track, self, self.prediction_frame.H, PROJECTION_MAP, self.config.camera)
else:
self.drawn_tracks[track_id].set_predictions(track)
# clean up
for track_id in list(self.drawn_tracks.keys()):
# TODO make delay configurable
if self.drawn_tracks[track_id].update_at < time.time() - 5:
# TODO fade out
del self.drawn_tracks[track_id]
def on_key_press(self, symbol, modifiers):
print('A key was pressed, use f to hide')
if symbol == ord('f'):
self.window.set_fullscreen(not self.window.fullscreen)
if symbol == ord('h'):
self.hide_stats = not self.hide_stats
def check_running(self, dt):
if not self.is_running.is_set():
self.window.close()
self.event_loop.exit()
def on_close(self):
self.is_running.clear()
def on_refresh(self, dt: float):
# update shapes
# self.bg =
for track_id, track in self.drawn_tracks.items():
track.update_drawn_positions(dt)
self.refresh_labels(dt)
# self.shape1 = shapes.Circle(700, 150, 100, color=(50, 0, 30), batch=self.batch_anim)
# self.shape3 = shapes.Circle(800, 150, 100, color=(100, 225, 30), batch=self.batch_anim)
pass
def on_draw(self):
self.window.clear()
self.batch_bg.draw()
for track in self.drawn_tracks.values():
for shape in track.shapes:
shape.draw() # for some reason the batches don't work
for track in self.drawn_tracks.values():
for shapes in track.pred_shapes:
for shape in shapes:
shape.draw()
# self.batch_anim.draw()
self.batch_overlay.draw()
if self.config.render_debug_shapes:
self.pins.draw()
# pyglet.graphics.draw(3, pyglet.gl.GL_LINE, ("v2i", (100,200, 600,800)), ('c3B', (255,255,255, 255,255,255)))
if not self.hide_stats:
self.fps_display.draw()
# if streaming, capture buffer and send
try:
if self.streaming_process or self.out_writer:
buf = pyglet.image.get_buffer_manager().get_color_buffer()
img_data = buf.get_image_data()
data = img_data.get_data() # alternative: .get_data("RGBA", image_data.pitch)
img = np.asanyarray(data).reshape((img_data.height, img_data.width, 4))
img = cv2.cvtColor(img, cv2.COLOR_BGRA2RGB)
img = np.flip(img, 0)
# img = cv2.flip(img, cv2.0)
# cv2.imshow('frame', img)
# cv2.waitKey(1)
if self.streaming_process:
self.streaming_process.stdin.write(img.tobytes())
if self.out_writer:
self.out_writer.write(img)
except Exception as e:
logger.exception(e)
def run(self):
frame = None
prediction_frame = None
tracker_frame = None
i=0
first_time = None
self.event_loop = pyglet.app.EventLoop()
pyglet.clock.schedule_interval(self.check_running, 0.1)
pyglet.clock.schedule(self.check_frames)
self.event_loop.run()
# while self.is_running.is_set():
# i+=1
# # zmq_ev = self.frame_sock.poll(timeout=2000)
# # if not zmq_ev:
# # # when no data comes in, loop so that is_running is checked
# # continue
# try:
# frame: Frame = self.frame_sock.recv_pyobj(zmq.NOBLOCK)
# except zmq.ZMQError as e:
# # idx = frame.index if frame else "NONE"
# # logger.debug(f"reuse video frame {idx}")
# pass
# # else:
# # logger.debug(f'new video frame {frame.index}')
# if frame is None:
# # might need to wait a few iterations before first frame comes available
# time.sleep(.1)
# continue
# try:
# prediction_frame: Frame = self.prediction_sock.recv_pyobj(zmq.NOBLOCK)
# except zmq.ZMQError as e:
# logger.debug(f'reuse prediction')
# if first_time is None:
# first_time = frame.time
# img = decorate_frame(frame, prediction_frame, first_time, self.config)
# img_path = (self.config.output_dir / f"{i:05d}.png").resolve()
# logger.debug(f"write frame {frame.time - first_time:.3f}s")
# if self.out_writer:
# self.out_writer.write(img)
# if self.streaming_process:
# self.streaming_process.stdin.write(img.tobytes())
# if self.config.render_window:
# cv2.imshow('frame',img)
# cv2.waitKey(1)
logger.info('Stopping')
logger.info(f'used corner pins {self.pins.pin_positions}')
# if i>2:
if self.streaming_process:
self.streaming_process.stdin.close()
if self.out_writer:
self.out_writer.release()
if self.streaming_process:
# oddly wrapped, because both close and release() take time.
self.streaming_process.wait()
def run_animation_renderer(config: Namespace, is_running: BaseEvent):
renderer = AnimationRenderer(config, is_running)
renderer.run()