CV renderer and binning options

This commit is contained in:
Ruben van de Ven 2024-12-17 10:37:30 +01:00
parent d8004e9125
commit 6b12ddf08a
7 changed files with 120 additions and 62 deletions

View file

@ -63,13 +63,14 @@ class AnimationRenderer:
self.frame_size = (self.config.camera.w,self.config.camera.h) self.frame_size = (self.config.camera.w,self.config.camera.h)
self.hide_stats = False self.hide_stats = False
self.out_writer = None # self.start_writer() if self.config.render_file else None self.out_writer = None # self.start_writer() if self.config.render_file else None
self.streaming_process = None # self.start_streaming() if self.config.render_url else None self.streaming_process = self.start_streaming() if self.config.render_url else None
if self.config.render_window: # if self.config.render_window:
pass # pass
# cv2.namedWindow("frame", cv2.WND_PROP_FULLSCREEN) # # cv2.namedWindow("frame", cv2.WND_PROP_FULLSCREEN)
# cv2.setWindowProperty("frame",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN) # # cv2.setWindowProperty("frame",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
else: # else:
if self.streaming_process is not None:
pyglet.options["headless"] = True pyglet.options["headless"] = True
config = pyglet.gl.Config(sample_buffers=1, samples=4) config = pyglet.gl.Config(sample_buffers=1, samples=4)
@ -78,7 +79,9 @@ class AnimationRenderer:
display = pyglet.canvas.get_display() display = pyglet.canvas.get_display()
screen = display.get_screens()[0] screen = display.get_screens()[0]
# self.window = pyglet.window.Window(width=self.frame_size[0], height=self.frame_size[1], config=config, fullscreen=False, screen=screens[1]) if self.streaming_process is not None:
self.window = pyglet.window.Window(width=self.frame_size[0], height=self.frame_size[1], config=config, fullscreen=False, screen=screen)
else:
self.window = pyglet.window.Window(width=screen.width, height=screen.height, config=config, fullscreen=True, screen=screen) self.window = pyglet.window.Window(width=screen.width, height=screen.height, config=config, fullscreen=True, screen=screen)
self.window.set_handler('on_draw', self.on_draw) self.window.set_handler('on_draw', self.on_draw)
self.window.set_handler('on_refresh', self.on_refresh) self.window.set_handler('on_refresh', self.on_refresh)
@ -93,8 +96,9 @@ class AnimationRenderer:
self.pins = PygletCornerPin( self.pins = PygletCornerPin(
self.window, self.window,
source_points=[[540, 670-360], [1380,670-360], [540,760-360], [1380,760-360]], # source_points=[[540, 670-360], [1380,670-360], [540,760-360], [1380,760-360]],
corners=[[471, 304], [1797, 376], [467, 387], [1792, 484]]) # corners=[[471, 304], [1797, 376], [467, 387], [1792, 484]]
)
self.window.push_handlers(self.pins) self.window.push_handlers(self.pins)
pyglet.gl.glClearColor(255,255,255,255) pyglet.gl.glClearColor(255,255,255,255)
@ -153,6 +157,32 @@ class AnimationRenderer:
self.init_labels() self.init_labels()
def start_streaming(self):
"""TODO)) This should be inherited from a generic renderer"""
return (
ffmpeg
.input('pipe:', format='rawvideo',codec="rawvideo", pix_fmt='bgr24', s='{}x{}'.format(*self.frame_size))
.output(
self.config.render_url,
#codec = "copy", # use same codecs of the original video
codec='libx264',
listen=1, # enables HTTP server
pix_fmt="yuv420p",
preset="ultrafast",
tune="zerolatency",
# g=f"{self.fps*2}",
g=f"{60*2}",
analyzeduration="2000000",
probesize="1000000",
f='mpegts'
)
.overwrite_output()
.run_async(pipe_stdin=True)
)
# return process
def init_shapes(self): def init_shapes(self):
''' '''
Due to error when running headless, we need to configure options before extending the shapes class Due to error when running headless, we need to configure options before extending the shapes class

View file

@ -483,18 +483,22 @@ def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame,
base_color = (255,)*3 base_color = (255,)*3
info_color = (255,255,0) info_color = (255,255,0)
predictor_color = (255,0,255)
tracker_color = (0,255,255)
cv2.putText(img, f"{frame.index:06d}", (20,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1) cv2.putText(img, f"{frame.index:06d}", (20,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
cv2.putText(img, f"{frame.time - first_time:.3f}s", (120,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1) cv2.putText(img, f"{frame.time - first_time: >10.2f}s", (150,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
if prediction_frame: if prediction_frame:
# render Δt and Δ frames # render Δt and Δ frames
cv2.putText(img, f"{prediction_frame.index - frame.index}", (90,17), cv2.FONT_HERSHEY_PLAIN, 1, info_color, 1) cv2.putText(img, f"{tracker_frame.index - frame.index}", (90,17), cv2.FONT_HERSHEY_PLAIN, 1, tracker_color, 1)
cv2.putText(img, f"{prediction_frame.time - time.time():.2f}s", (200,17), cv2.FONT_HERSHEY_PLAIN, 1, info_color, 1) cv2.putText(img, f"{prediction_frame.index - frame.index}", (120,17), cv2.FONT_HERSHEY_PLAIN, 1, predictor_color, 1)
cv2.putText(img, f"{len(prediction_frame.tracks)} tracks", (500,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1) cv2.putText(img, f"{tracker_frame.time - time.time():.2f}s", (230,17), cv2.FONT_HERSHEY_PLAIN, 1, tracker_color, 1)
cv2.putText(img, f"h: {np.average([len(t.history or []) for t in prediction_frame.tracks.values()]):.2f}", (580,17), cv2.FONT_HERSHEY_PLAIN, 1, info_color, 1) cv2.putText(img, f"{prediction_frame.time - time.time():.2f}s", (290,17), cv2.FONT_HERSHEY_PLAIN, 1, predictor_color, 1)
cv2.putText(img, f"ph: {np.average([len(t.predictor_history or []) for t in prediction_frame.tracks.values()]):.2f}", (660,17), cv2.FONT_HERSHEY_PLAIN, 1, info_color, 1) cv2.putText(img, f"{len(tracker_frame.tracks)} tracks", (620,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
cv2.putText(img, f"p: {np.average([len(t.predictions or []) for t in prediction_frame.tracks.values()]):.2f}", (740,17), cv2.FONT_HERSHEY_PLAIN, 1, info_color, 1) cv2.putText(img, f"h: {np.average([len(t.history or []) for t in prediction_frame.tracks.values()]):.2f}", (700,17), cv2.FONT_HERSHEY_PLAIN, 1, tracker_color, 1)
cv2.putText(img, f"ph: {np.average([len(t.predictor_history or []) for t in prediction_frame.tracks.values()]):.2f}", (780,17), cv2.FONT_HERSHEY_PLAIN, 1, predictor_color, 1)
cv2.putText(img, f"p: {np.average([len(t.predictions or []) for t in prediction_frame.tracks.values()]):.2f}", (860,17), cv2.FONT_HERSHEY_PLAIN, 1, predictor_color, 1)
options = [] options = []
for option in ['prediction_horizon','num_samples','full_dist','gmm_mode','z_mode', 'model_dir']: for option in ['prediction_horizon','num_samples','full_dist','gmm_mode','z_mode', 'model_dir']:

View file

@ -25,7 +25,8 @@ from bytetracker.basetrack import TrackState as ByteTrackTrackState
from trajectron.environment import Environment, Node, Scene from trajectron.environment import Environment, Node, Scene
from urllib.parse import urlparse from urllib.parse import urlparse
from trap.utils import lerp from trap.utils import get_bins
from trap.utils import inv_lerp, lerp
logger = logging.getLogger('trap.frame_emitter') logger = logging.getLogger('trap.frame_emitter')
@ -219,7 +220,7 @@ class Track:
history: List[Detection] = field(default_factory=list) history: List[Detection] = field(default_factory=list)
predictor_history: Optional[list] = None # in image space predictor_history: Optional[list] = None # in image space
predictions: Optional[list] = None predictions: Optional[list] = None
fps: int = 12 fps: int = 12 # TODO)) convert this to camera? That way, incorporates H and dist, alternatively, each track is as a whole attached to a space
source: Optional[int] = None # to keep track of processed tracks source: Optional[int] = None # to keep track of processed tracks
def get_projected_history(self, H: Optional[cv2.Mat] = None, camera: Optional[Camera]= None) -> np.array: def get_projected_history(self, H: Optional[cv2.Mat] = None, camera: Optional[Camera]= None) -> np.array:
@ -290,49 +291,67 @@ class Track:
t.predictions, t.predictions,
t.fps/step_size) t.fps/step_size)
# def get_binned(self, bin_size=.5, remove_overlap=True): def get_binned(self, bin_size, camera: Camera, bin_start=True):
# """ """
# For an experiment: what if we predict using only concrete positions, by mapping For an experiment: what if we predict using only concrete positions, by mapping
# dx,dy to a grid. Thus prediction can be for 8 moves, or rather headings dx,dy to a grid. Thus prediction can be for 8 moves, or rather headings
# see ~/notes/attachments example svg see ~/notes/attachments example svg
# """ """
# new_history: List[Detection] = [] history = self.get_projected_history_as_dict(H=None, camera=camera)
# for i, (det0, det1) in enumerate(zip(self.history[:-1], self.history[1:]):
# if i == 0:
# new_history.append(det0)
# continue
# if abs(det1.x - new_history[-1].x) < bin_size or abs(det1.y - new_history[-1].y) < bin_size:
# continue
# # det1 falls outside of the box [-bin_size:+bin_size] around last detection def round_to_grid_precision(x):
factor = 1/bin_size
return round(x * factor) / factor
# # 1. Interpolate exact point between det0 and det1 that this happens new_history: List[dict] = []
# if abs(det1.x - new_history[-1].x) >= bin_size: for i, (det0, det1) in enumerate(zip(history[:-1], history[1:])):
# if det1.x - new_history[-1].x >= bin_size: if i == 0:
# # det1 left of last new_history.append({
# x = new_history[-1].x + bin_size 'x': round_to_grid_precision(det0['x']),
# f = inv_lerp(det0.x, det1.x, x) 'y': round_to_grid_precision(det0['y'])
# elif new_history[-1].x - det1.x >= bin_size: } if bin_start else det0)
# # det1 left of last continue
# x = new_history[-1].x - bin_size if abs(det1['x'] - new_history[-1]['x']) < bin_size and abs(det1['y'] - new_history[-1]['y']) < bin_size:
# f = inv_lerp(det0.x, det1.x, x) continue
# y = lerp(det0.y, det1.y, f)
# if abs(det1.y - new_history[-1].y) >= bin_size: # det1 falls outside of the box [-bin_size:+bin_size] around last detection
# if det1.y - new_history[-1].y >= bin_size:
# # det1 left of last # 1. Interpolate exact point between det0 and det1 that this happens
# y = new_history[-1].y + bin_size if abs(det1['x'] - new_history[-1]['x']) >= bin_size:
# f = inv_lerp(det0.y, det1.y, x) if det1['x'] - new_history[-1]['x'] >= bin_size:
# elif new_history[-1].y - det1.y >= bin_size: # det1 left of last
# # det1 left of last x = new_history[-1]['x'] + bin_size
# y = new_history[-1].y - bin_size f = inv_lerp(det0['x'], det1['x'], x)
# f = inv_lerp(det0.y, det1.y, x) elif new_history[-1]['x'] - det1['x'] >= bin_size:
# x = lerp(det0.x, det1.x, f) # det1 left of last
x = new_history[-1]['x'] - bin_size
f = inv_lerp(det0['x'], det1['x'], x)
y = lerp(det0['y'], det1['y'], f)
if abs(det1['y'] - new_history[-1]['y']) >= bin_size:
if det1['y'] - new_history[-1]['y'] >= bin_size:
# det1 left of last
y = new_history[-1]['y'] + bin_size
f = inv_lerp(det0['y'], det1['y'], y)
elif new_history[-1]['y'] - det1['y'] >= bin_size:
# det1 left of last
y = new_history[-1]['y'] - bin_size
f = inv_lerp(det0['y'], det1['y'], y)
x = lerp(det0['x'], det1['x'], f)
# # 2. Find closest point on rectangle (rectangle's four corners, or 4 midpoints) # 2. Find closest point on rectangle (rectangle's four corners, or 4 midpoints)
# points = [[bin_size, 0], [bin_size, bin_size], [0, bin_size], [-bin_size, bin_size], [-bin_size, 0], [-bin_size, -bin_size], [0, -bin_size], [bin_size, -bin_size]] points = get_bins(bin_size)
# # todo Offsets to points:[ history for in points] points = [[new_history[-1]['x']+p[0], new_history[-1]['y'] + p[1]] for p in points]
distances = [np.linalg.norm([p[0] - x, p[1]-y]) for p in points]
closest = np.argmin(distances)
point = points[closest]
new_history.append({'x': point[0], 'y':point[1]})
# todo Offsets to points:[ history for in points]
return new_history
def to_trajectron_node(self, camera: Camera, env: Environment) -> Node: def to_trajectron_node(self, camera: Camera, env: Environment) -> Node:

View file

@ -93,7 +93,7 @@ def start():
] ]
if args.render_file or args.render_url or args.render_window: if args.render_file or args.render_url or args.render_window:
if not args.render_no_preview or args.render_file or args.render_url: if not args.render_no_preview: #or args.render_file or args.render_url:
procs.append( procs.append(
# ExceptionHandlingProcess(target=run_cv_renderer, kwargs={'config': args, 'is_running': isRunning}, name='preview') # ExceptionHandlingProcess(target=run_cv_renderer, kwargs={'config': args, 'is_running': isRunning}, name='preview')
ExceptionHandlingProcess(target=run_cv_renderer, kwargs={'config': args, 'is_running': isRunning}, name='preview') ExceptionHandlingProcess(target=run_cv_renderer, kwargs={'config': args, 'is_running': isRunning}, name='preview')

View file

@ -361,7 +361,8 @@ class PredictionServer:
# data = json.dumps({}) # data = json.dumps({})
# TODO)) signal doing nothing # TODO)) signal doing nothing
# self.send_frame(frame) # TODO)) And update the network
self.send_frame(frame)
continue continue

View file

@ -17,3 +17,7 @@ def inv_lerp(a: float, b: float, v: float) -> float:
0.8 == inv_lerp(1, 5, 4.2) 0.8 == inv_lerp(1, 5, 4.2)
""" """
return (v - a) / (b - a) return (v - a) / (b - a)
def get_bins(bin_size: float):
return [[bin_size, 0], [bin_size, bin_size], [0, bin_size], [-bin_size, bin_size], [-bin_size, 0], [-bin_size, -bin_size], [0, -bin_size], [bin_size, -bin_size]]