Pyglet for rendering smooth lines and transitions
This commit is contained in:
parent
53c18d9a7b
commit
373afb1d28
4 changed files with 419 additions and 48 deletions
13
poetry.lock
generated
13
poetry.lock
generated
|
@ -2288,6 +2288,17 @@ files = [
|
|||
{file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pyglet"
|
||||
version = "2.0.15"
|
||||
description = "pyglet is a cross-platform games and multimedia package."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pyglet-2.0.15-py3-none-any.whl", hash = "sha256:9e4cc16efc308106fd3a9ff8f04e7a6f4f6a807c6ac8a331375efbbac8be85af"},
|
||||
{file = "pyglet-2.0.15.tar.gz", hash = "sha256:42085567cece0c7f1c14e36eef799938cbf528cfbb0150c484b984f3ff1aa771"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pygments"
|
||||
version = "2.17.2"
|
||||
|
@ -3517,4 +3528,4 @@ watchdog = ["watchdog (>=2.3)"]
|
|||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = "^3.10,<3.12,"
|
||||
content-hash = "66f062f9db921cfa83e576288d09fd9b959780eb189d95765934ae9a6769f200"
|
||||
content-hash = "5154a99d490755a68e51595424649b5269fcd17ef14094c6285f5de7f972f110"
|
||||
|
|
|
@ -31,6 +31,7 @@ torchreid = "^0.2.5"
|
|||
gdown = "^4.7.1"
|
||||
pandas-helper-calc = {git = "https://github.com/scls19fr/pandas-helper-calc"}
|
||||
tsmoothie = "^1.0.5"
|
||||
pyglet = "^2.0.15"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
|
|
|
@ -44,7 +44,7 @@ class Detection:
|
|||
state: DetectionState
|
||||
frame_nr: int
|
||||
|
||||
def get_foot_coords(self):
|
||||
def get_foot_coords(self) -> list[tuple[float, float]]:
|
||||
return [self.l + 0.5 * self.w, self.t+self.h]
|
||||
|
||||
@classmethod
|
||||
|
@ -95,6 +95,8 @@ class Track:
|
|||
coords = self.get_projected_history(H)
|
||||
return [{"x":c[0], "y":c[1]} for c in coords]
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@dataclass
|
||||
|
|
449
trap/renderer.py
449
trap/renderer.py
|
@ -1,3 +1,6 @@
|
|||
# used for "Forward Referencing of type annotations"
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
import ffmpeg
|
||||
from argparse import Namespace
|
||||
|
@ -8,16 +11,184 @@ from multiprocessing.synchronize import Event as BaseEvent
|
|||
import cv2
|
||||
import numpy as np
|
||||
|
||||
import pyglet
|
||||
import pyglet.event
|
||||
import zmq
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
import shutil
|
||||
import math
|
||||
|
||||
from pyglet import shapes
|
||||
from PIL import Image
|
||||
|
||||
from trap.frame_emitter import DetectionState, Frame, Track
|
||||
|
||||
from trap.frame_emitter import DetectionState, Frame
|
||||
|
||||
|
||||
logger = logging.getLogger("trap.renderer")
|
||||
|
||||
class FrameAnimation:
|
||||
def __init__(self, frame: Frame):
|
||||
self.start_time = time.time()
|
||||
self.frame = frame
|
||||
|
||||
@property
|
||||
def t(self):
|
||||
duration = .2
|
||||
return (time.time() - self.start_time) / duration
|
||||
|
||||
@property
|
||||
def done(self):
|
||||
return (time.time() - self.start_time) > 5
|
||||
|
||||
def exponentialDecay(a, b, decay, dt):
|
||||
"""Exponential decay as alternative to Lerp
|
||||
Introduced by Freya Holmér: https://www.youtube.com/watch?v=LSNQuFEDOyQ
|
||||
"""
|
||||
return b + (a-b) * math.exp(-decay * dt)
|
||||
|
||||
def relativePointToPolar(origin, point) -> tuple[float, float]:
|
||||
x, y = point[0] - origin[0], point[1] - origin[1]
|
||||
return np.sqrt(x**2 + y**2), np.arctan2(y, x)
|
||||
|
||||
def relativePolarToPoint(origin, r, angle) -> tuple[float, float]:
|
||||
return r * np.cos(angle) + origin[0], r * np.sin(angle) + origin[1]
|
||||
|
||||
|
||||
class DrawnTrack:
|
||||
def __init__(self, track_id, track: Track, renderer: Renderer, H):
|
||||
self.track_id = track_id
|
||||
self.renderer = renderer
|
||||
self.set_track(track, H)
|
||||
self.drawn_positions = []
|
||||
self.drawn_predictions = []
|
||||
self.shapes: list[pyglet.shapes.Line] = []
|
||||
self.pred_shapes: list[list[pyglet.shapes.Line]] = []
|
||||
|
||||
def set_track(self, track: Track, H):
|
||||
self.track = track
|
||||
self.H = H
|
||||
self.coords = [d.get_foot_coords() for d in track.history]
|
||||
|
||||
# perhaps only do in constructor:
|
||||
self.inv_H = np.linalg.pinv(self.H)
|
||||
|
||||
pred_coords = []
|
||||
for pred_i, pred in enumerate(track.predictions):
|
||||
pred_coords.append(cv2.perspectiveTransform(np.array([pred]), self.inv_H)[0].tolist())
|
||||
|
||||
self.pred_coords = pred_coords
|
||||
# color = (128,0,128) if pred_i else (128,
|
||||
|
||||
|
||||
def update_drawn_positions(self, dt) -> []:
|
||||
'''
|
||||
use dt to lerp the drawn positions in the direction of current prediction
|
||||
'''
|
||||
# TODO: make lerp, currently quick way to get results
|
||||
for i, pos in enumerate(self.drawn_positions):
|
||||
self.drawn_positions[i][0] = int(exponentialDecay(self.drawn_positions[i][0], self.coords[i][0], 16, dt))
|
||||
self.drawn_positions[i][1] = int(exponentialDecay(self.drawn_positions[i][1], self.coords[i][1], 16, dt))
|
||||
|
||||
if len(self.coords) > len(self.drawn_positions):
|
||||
self.drawn_positions.extend(self.coords[len(self.drawn_positions):])
|
||||
|
||||
for a, drawn_prediction in enumerate(self.drawn_predictions):
|
||||
for i, pos in enumerate(drawn_prediction):
|
||||
# TODO: this should be done in polar space starting from origin (i.e. self.drawn_posision[-1])
|
||||
decay = max(3, (18/i) if i else 10) # points further away move with more delay
|
||||
decay = 6
|
||||
origin = self.drawn_positions[-1]
|
||||
drawn_r, drawn_angle = relativePointToPolar( origin, drawn_prediction[i])
|
||||
pred_r, pred_angle = relativePointToPolar(origin, self.pred_coords[a][i])
|
||||
r = exponentialDecay(drawn_r, pred_r, decay, dt)
|
||||
angle = exponentialDecay(drawn_angle, pred_angle, decay, dt)
|
||||
x, y = relativePolarToPoint(origin, r, angle)
|
||||
self.drawn_predictions[a][i] = int(x), int(y)
|
||||
# self.drawn_predictions[i][0] = int(exponentialDecay(self.drawn_predictions[i][0], self.pred_coords[i][0], decay, dt))
|
||||
# self.drawn_predictions[i][1] = int(exponentialDecay(self.drawn_predictions[i][1], self.pred_coords[i][1], decay, dt))
|
||||
|
||||
if len(self.pred_coords) > len(self.drawn_predictions):
|
||||
self.drawn_predictions.extend(self.pred_coords[len(self.drawn_predictions):])
|
||||
# for a, drawn_prediction in self.drawn_predictions:
|
||||
# if len(self.pred_coords) > len(self.drawn_predictions):
|
||||
# self.drawn_predictions.extend(self.pred_coords[len(self.drawn_predictions):])
|
||||
|
||||
# self.drawn_positions = self.coords
|
||||
self.update_shapes(dt)
|
||||
return self.drawn_positions
|
||||
|
||||
def update_shapes(self, dt):
|
||||
if len(self.shapes) > len(self.drawn_positions):
|
||||
self.shapes = self.shapes[:len(self.drawn_positions)]
|
||||
|
||||
# for i, pos in self.drawn_positions.enumerate():
|
||||
for ci in range(1, len(self.drawn_positions)):
|
||||
x, y = [int(p) for p in self.drawn_positions[ci-1]]
|
||||
x2, y2 = [int(p) for p in self.drawn_positions[ci]]
|
||||
|
||||
y, y2 = self.renderer.window.height - y, self.renderer.window.height - y2
|
||||
color = [100+155*ci // len(self.drawn_positions)]*3
|
||||
# print(x,y,x2,y2,color)
|
||||
|
||||
if ci >= len(self.shapes):
|
||||
# TODO: add color2
|
||||
line = self.renderer.gradientLine(x, y, x2, y2, 2, color, color, batch=self.renderer.batch_anim)
|
||||
line.opacity = 5
|
||||
self.shapes.append(line)
|
||||
|
||||
else:
|
||||
line = self.shapes[ci-1]
|
||||
line.x, line.y = x, y
|
||||
line.x2, line.y2 = x2, y2
|
||||
line.color = color
|
||||
line.opacity = int(exponentialDecay(line.opacity, 255, 3, dt))
|
||||
|
||||
# TODO: basically a duplication of the above, do this smarter?
|
||||
# TODO: add intermediate segment
|
||||
color = colorset[self.track_id % len(colorset)]
|
||||
|
||||
for a, drawn_predictions in enumerate(self.drawn_predictions):
|
||||
if len(self.pred_shapes) <= a:
|
||||
self.pred_shapes.append([])
|
||||
|
||||
if len(self.pred_shapes[a]) > (len(drawn_predictions) +1):
|
||||
self.pred_shapes[a] = self.pred_shapes[a][:len(drawn_predictions)]
|
||||
|
||||
# for i, pos in drawn_predictions.enumerate():
|
||||
for ci in range(0, len(drawn_predictions)):
|
||||
if ci == 0:
|
||||
x, y = [int(p) for p in self.drawn_positions[-1]]
|
||||
else:
|
||||
x, y = [int(p) for p in drawn_predictions[ci-1]]
|
||||
|
||||
x2, y2 = [int(p) for p in drawn_predictions[ci]]
|
||||
|
||||
y, y2 = self.renderer.window.height - y, self.renderer.window.height - y2
|
||||
# color = [255,0,0]
|
||||
# print(x,y,x2,y2,color)
|
||||
|
||||
if ci >= len(self.pred_shapes[a]):
|
||||
# TODO: add color2
|
||||
line = self.renderer.gradientLine(x, y, x2, y2, 3, color, color, batch=self.renderer.batch_anim)
|
||||
line.opacity = 5
|
||||
self.pred_shapes[a].append(line)
|
||||
|
||||
else:
|
||||
line = self.pred_shapes[a][ci-1]
|
||||
line.x, line.y = x, y
|
||||
line.x2, line.y2 = x2, y2
|
||||
line.color = color
|
||||
decay = (16/ci) if ci else 16
|
||||
half = len(drawn_predictions) / 2
|
||||
if ci < half:
|
||||
target_opacity = 255
|
||||
else:
|
||||
target_opacity = (1 - ((ci - half) / half)) * 255
|
||||
line.opacity = int(exponentialDecay(line.opacity, target_opacity, decay, dt))
|
||||
|
||||
|
||||
class FrameWriter:
|
||||
"""
|
||||
Drop-in compatible interface with cv2.VideoWriter, but support variable
|
||||
|
@ -78,12 +249,185 @@ class Renderer:
|
|||
# self.out = cv2.VideoWriter(str(filename), fourcc, 23.97, (1280,720))
|
||||
self.fps = 10
|
||||
self.frame_size = (1280,720)
|
||||
self.hide_stats = False
|
||||
self.out_writer = self.start_writer() if self.config.render_file else None
|
||||
self.streaming_process = self.start_streaming() if self.config.render_url else None
|
||||
|
||||
if self.config.render_window:
|
||||
cv2.namedWindow("frame", cv2.WND_PROP_FULLSCREEN)
|
||||
cv2.setWindowProperty("frame",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
|
||||
else:
|
||||
pyglet.options["headless"] = True
|
||||
|
||||
config = pyglet.gl.Config(sample_buffers=1, samples=4)
|
||||
self.window = pyglet.window.Window(width=self.frame_size[0], height=self.frame_size[1], config=config)
|
||||
self.window.set_handler('on_draw', self.on_draw)
|
||||
self.window.set_handler('on_refresh', self.on_refresh)
|
||||
self.window.set_handler('on_close', self.on_close)
|
||||
|
||||
pyglet.gl.glClearColor(81./255, 20/255, 46./255, 0)
|
||||
self.fps_display = pyglet.window.FPSDisplay(window=self.window)
|
||||
|
||||
self.drawn_tracks: dict[str, DrawnTrack] = {}
|
||||
|
||||
|
||||
self.frame: Frame|None= None
|
||||
self.prediction_frame: Frame|None = None
|
||||
|
||||
|
||||
self.batch_bg = pyglet.graphics.Batch()
|
||||
self.batch_overlay = pyglet.graphics.Batch()
|
||||
self.batch_anim = pyglet.graphics.Batch()
|
||||
|
||||
self.init_shapes()
|
||||
|
||||
|
||||
def init_shapes(self):
|
||||
'''
|
||||
Due to error when running headless, we need to configure options before extending the shapes class
|
||||
'''
|
||||
class GradientLine(shapes.Line):
|
||||
def __init__(self, x, y, x2, y2, width=1, color1=[255,255,255], color2=[255,255,255], batch=None, group=None):
|
||||
# print('colors!', colors)
|
||||
# assert len(colors) == 6
|
||||
|
||||
r, g, b, *a = color1
|
||||
self._rgba1 = (r, g, b, a[0] if a else 255)
|
||||
r, g, b, *a = color2
|
||||
self._rgba2 = (r, g, b, a[0] if a else 255)
|
||||
|
||||
# print('rgba', self._rgba)
|
||||
|
||||
super().__init__(x, y, x2, y2, width, color1, batch=None, group=None)
|
||||
# <pyglet.graphics.vertexdomain.VertexList
|
||||
# pyglet.graphics.vertexdomain
|
||||
# print(self._vertex_list)
|
||||
|
||||
def _create_vertex_list(self):
|
||||
'''
|
||||
copy of super()._create_vertex_list but with additional colors'''
|
||||
self._vertex_list = self._group.program.vertex_list(
|
||||
6, self._draw_mode, self._batch, self._group,
|
||||
position=('f', self._get_vertices()),
|
||||
colors=('Bn', self._rgba1+ self._rgba2 + self._rgba2 + self._rgba1 + self._rgba2 +self._rgba1 ),
|
||||
translation=('f', (self._x, self._y) * self._num_verts))
|
||||
|
||||
def _update_colors(self):
|
||||
self._vertex_list.colors[:] = self._rgba1+ self._rgba2 + self._rgba2 + self._rgba1 + self._rgba2 +self._rgba1
|
||||
|
||||
def color1(self, color):
|
||||
r, g, b, *a = color
|
||||
self._rgba1 = (r, g, b, a[0] if a else 255)
|
||||
self._update_colors()
|
||||
|
||||
def color2(self, color):
|
||||
r, g, b, *a = color
|
||||
self._rgba2 = (r, g, b, a[0] if a else 255)
|
||||
self._update_colors()
|
||||
|
||||
self.gradientLine = GradientLine
|
||||
|
||||
def check_frames(self, dt):
|
||||
try:
|
||||
self.frame: Frame = self.frame_sock.recv_pyobj(zmq.NOBLOCK)
|
||||
img = cv2.GaussianBlur(self.frame.img, (15, 15), 0)
|
||||
img = cv2.flip(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), 0)
|
||||
img = pyglet.image.ImageData(self.frame_size[0], self.frame_size[1], 'RGB', img.tobytes())
|
||||
# don't draw in batch, so that it is the background
|
||||
self.video_sprite = pyglet.sprite.Sprite(img=img, batch=self.batch_bg)
|
||||
self.video_sprite.opacity = 100
|
||||
except zmq.ZMQError as e:
|
||||
# idx = frame.index if frame else "NONE"
|
||||
# logger.debug(f"reuse video frame {idx}")
|
||||
pass
|
||||
try:
|
||||
self.prediction_frame: Frame = self.prediction_sock.recv_pyobj(zmq.NOBLOCK)
|
||||
self.update_tracks()
|
||||
except zmq.ZMQError as e:
|
||||
pass
|
||||
|
||||
def update_tracks(self):
|
||||
"""Updates the track objects and shapes. Called after setting `prediction_frame`
|
||||
"""
|
||||
|
||||
# clean up
|
||||
for track_id in list(self.drawn_tracks.keys()):
|
||||
if track_id not in self.prediction_frame.tracks.keys():
|
||||
# TODO fade out
|
||||
del self.drawn_tracks[track_id]
|
||||
|
||||
|
||||
for track_id, track in self.prediction_frame.tracks.items():
|
||||
if track_id not in self.drawn_tracks:
|
||||
self.drawn_tracks[track_id] = DrawnTrack(track_id, track, self, self.prediction_frame.H)
|
||||
else:
|
||||
self.drawn_tracks[track_id].set_track(track, self.prediction_frame.H)
|
||||
|
||||
|
||||
def on_key_press(self, symbol, modifiers):
|
||||
print('A key was pressed, use f to hide')
|
||||
if symbol == ord('f'):
|
||||
self.window.set_fullscreen(not self.window.fullscreen)
|
||||
if symbol == ord('h'):
|
||||
self.hide_stats = not self.hide_stats
|
||||
|
||||
def check_running(self, dt):
|
||||
if not self.is_running.is_set():
|
||||
self.window.close()
|
||||
|
||||
def on_close(self):
|
||||
self.is_running.clear()
|
||||
|
||||
def on_refresh(self, dt: float):
|
||||
# update shapes
|
||||
# self.bg =
|
||||
for track_id, track in self.drawn_tracks.items():
|
||||
track.update_drawn_positions(dt)
|
||||
|
||||
self.shape1 = shapes.Circle(700, 150, 100, color=(50, 0, 30), batch=self.batch_anim)
|
||||
self.shape3 = shapes.Circle(800, 150, 100, color=(100, 225, 30), batch=self.batch_anim)
|
||||
pass
|
||||
|
||||
def on_draw(self):
|
||||
self.window.clear()
|
||||
|
||||
self.batch_bg.draw()
|
||||
|
||||
for track in self.drawn_tracks.values():
|
||||
for shape in track.shapes:
|
||||
shape.draw() # for some reason the batches don't work
|
||||
for track in self.drawn_tracks.values():
|
||||
for shapes in track.pred_shapes:
|
||||
for shape in shapes:
|
||||
shape.draw()
|
||||
# self.batch_anim.draw()
|
||||
self.batch_overlay.draw()
|
||||
|
||||
|
||||
# pyglet.graphics.draw(3, pyglet.gl.GL_LINE, ("v2i", (100,200, 600,800)), ('c3B', (255,255,255, 255,255,255)))
|
||||
|
||||
|
||||
|
||||
if not self.hide_stats:
|
||||
self.fps_display.draw()
|
||||
|
||||
# if streaming, capture buffer and send
|
||||
if self.streaming_process or self.out_writer:
|
||||
buf = pyglet.image.get_buffer_manager().get_color_buffer()
|
||||
img_data = buf.get_image_data()
|
||||
data = img_data.get_data()
|
||||
img = np.asanyarray(data).reshape((img_data.height, img_data.width, 4))
|
||||
img = cv2.cvtColor(img, cv2.COLOR_BGRA2RGB)
|
||||
img = np.flip(img, 0)
|
||||
# img = cv2.flip(img, cv2.0)
|
||||
|
||||
# cv2.imshow('frame', img)
|
||||
# cv2.waitKey(1)
|
||||
if self.streaming_process:
|
||||
self.streaming_process.stdin.write(img.tobytes())
|
||||
if self.out_writer:
|
||||
self.out_writer.write(img)
|
||||
|
||||
|
||||
def start_writer(self):
|
||||
if not self.config.output_dir.exists():
|
||||
|
@ -111,7 +455,8 @@ class Renderer:
|
|||
pix_fmt="yuv420p",
|
||||
preset="ultrafast",
|
||||
tune="zerolatency",
|
||||
g=f"{self.fps*2}",
|
||||
# g=f"{self.fps*2}",
|
||||
g=f"{60*2}",
|
||||
analyzeduration="2000000",
|
||||
probesize="1000000",
|
||||
f='mpegts'
|
||||
|
@ -130,60 +475,67 @@ class Renderer:
|
|||
|
||||
i=0
|
||||
first_time = None
|
||||
while self.is_running.is_set():
|
||||
i+=1
|
||||
|
||||
|
||||
# zmq_ev = self.frame_sock.poll(timeout=2000)
|
||||
# if not zmq_ev:
|
||||
# # when no data comes in, loop so that is_running is checked
|
||||
# continue
|
||||
event_loop = pyglet.app.EventLoop()
|
||||
pyglet.clock.schedule_interval(self.check_running, 0.1)
|
||||
pyglet.clock.schedule(self.check_frames)
|
||||
event_loop.run()
|
||||
|
||||
try:
|
||||
frame: Frame = self.frame_sock.recv_pyobj(zmq.NOBLOCK)
|
||||
except zmq.ZMQError as e:
|
||||
idx = frame.index if frame else "NONE"
|
||||
logger.debug(f"reuse video frame {idx}")
|
||||
else:
|
||||
logger.debug(f'new video frame {frame.index}')
|
||||
|
||||
# while self.is_running.is_set():
|
||||
# i+=1
|
||||
|
||||
|
||||
# # zmq_ev = self.frame_sock.poll(timeout=2000)
|
||||
# # if not zmq_ev:
|
||||
# # # when no data comes in, loop so that is_running is checked
|
||||
# # continue
|
||||
|
||||
# try:
|
||||
# frame: Frame = self.frame_sock.recv_pyobj(zmq.NOBLOCK)
|
||||
# except zmq.ZMQError as e:
|
||||
# # idx = frame.index if frame else "NONE"
|
||||
# # logger.debug(f"reuse video frame {idx}")
|
||||
# pass
|
||||
# # else:
|
||||
# # logger.debug(f'new video frame {frame.index}')
|
||||
|
||||
|
||||
if frame is None:
|
||||
# might need to wait a few iterations before first frame comes available
|
||||
time.sleep(.1)
|
||||
continue
|
||||
# if frame is None:
|
||||
# # might need to wait a few iterations before first frame comes available
|
||||
# time.sleep(.1)
|
||||
# continue
|
||||
|
||||
try:
|
||||
prediction_frame: Frame = self.prediction_sock.recv_pyobj(zmq.NOBLOCK)
|
||||
except zmq.ZMQError as e:
|
||||
logger.debug(f'reuse prediction')
|
||||
# try:
|
||||
# prediction_frame: Frame = self.prediction_sock.recv_pyobj(zmq.NOBLOCK)
|
||||
# except zmq.ZMQError as e:
|
||||
# logger.debug(f'reuse prediction')
|
||||
|
||||
if first_time is None:
|
||||
first_time = frame.time
|
||||
# if first_time is None:
|
||||
# first_time = frame.time
|
||||
|
||||
img = decorate_frame(frame, prediction_frame, first_time, self.config)
|
||||
# img = decorate_frame(frame, prediction_frame, first_time, self.config)
|
||||
|
||||
img_path = (self.config.output_dir / f"{i:05d}.png").resolve()
|
||||
# img_path = (self.config.output_dir / f"{i:05d}.png").resolve()
|
||||
|
||||
# cv2.imwrite(str(img_path), img)
|
||||
|
||||
logger.debug(f"write frame {frame.time - first_time:.3f}s")
|
||||
if self.out_writer:
|
||||
self.out_writer.write(img)
|
||||
if self.streaming_process:
|
||||
self.streaming_process.stdin.write(img.tobytes())
|
||||
if self.config.render_window:
|
||||
cv2.imshow('frame',img)
|
||||
cv2.waitKey(1)
|
||||
# logger.debug(f"write frame {frame.time - first_time:.3f}s")
|
||||
# if self.out_writer:
|
||||
# self.out_writer.write(img)
|
||||
# if self.streaming_process:
|
||||
# self.streaming_process.stdin.write(img.tobytes())
|
||||
# if self.config.render_window:
|
||||
# cv2.imshow('frame',img)
|
||||
# cv2.waitKey(1)
|
||||
logger.info('Stopping')
|
||||
|
||||
if i>2:
|
||||
if self.streaming_process:
|
||||
self.streaming_process.stdin.close()
|
||||
if self.out_writer:
|
||||
self.out_writer.release()
|
||||
if self.streaming_process:
|
||||
# oddly wrapped, because both close and release() take time.
|
||||
self.streaming_process.wait()
|
||||
# if i>2:
|
||||
if self.streaming_process:
|
||||
self.streaming_process.stdin.close()
|
||||
if self.out_writer:
|
||||
self.out_writer.release()
|
||||
if self.streaming_process:
|
||||
# oddly wrapped, because both close and release() take time.
|
||||
self.streaming_process.wait()
|
||||
|
||||
# colorset = itertools.product([0,255], repeat=3) # but remove white
|
||||
colorset = [(0, 0, 0),
|
||||
|
@ -197,6 +549,11 @@ colorset = [(0, 0, 0),
|
|||
|
||||
|
||||
def decorate_frame(frame: Frame, prediction_frame: Frame, first_time: float, config: Namespace) -> np.array:
|
||||
# TODO: replace opencv with QPainter to support alpha? https://doc.qt.io/qtforpython-5/PySide2/QtGui/QPainter.html#PySide2.QtGui.PySide2.QtGui.QPainter.drawImage
|
||||
# or https://github.com/pygobject/pycairo?tab=readme-ov-file
|
||||
# or https://pyglet.readthedocs.io/en/latest/programming_guide/shapes.html
|
||||
# and use http://code.astraw.com/projects/motmot/pygarrayimage.html or https://gist.github.com/nkymut/1cb40ea6ae4de0cf9ded7332f1ca0d55
|
||||
# or https://api.arcade.academy/en/stable/index.html (supports gradient color in line -- "Arcade is built on top of Pyglet and OpenGL.")
|
||||
frame.img
|
||||
|
||||
overlay = np.zeros(frame.img.shape, np.uint8)
|
||||
|
|
Loading…
Reference in a new issue