fixes for projection trial

This commit is contained in:
Ruben van de Ven 2024-12-18 12:04:29 +01:00
parent 5ceeda05d7
commit 8bcca04ecc
5 changed files with 89 additions and 49 deletions

View file

@ -30,7 +30,8 @@ from trap.preview_renderer import DrawnTrack, PROJECTION_IMG, PROJECTION_MAP
logger = logging.getLogger("trap.renderer") logger = logging.getLogger("trap.renderer")
COLOR_PRIMARY = (0,0,0,255) # COLOR_PRIMARY = (0,0,0,255)
COLOR_PRIMARY = (255,255,255, 255)
class AnimationRenderer: class AnimationRenderer:
def __init__(self, config: Namespace, is_running: BaseEvent): def __init__(self, config: Namespace, is_running: BaseEvent):
@ -96,12 +97,13 @@ class AnimationRenderer:
self.pins = PygletCornerPin( self.pins = PygletCornerPin(
self.window, self.window,
# source_points=[[540, 670-360], [1380,670-360], [540,760-360], [1380,760-360]], source_points=[[540, 670-360], [1380,670-360], [540,760-360], [1380,760-360]],
corners=[[540, 670-360], [1380,670-360], [540,760-360], [1380,760-360]],
# corners=[[471, 304], [1797, 376], [467, 387], [1792, 484]] # corners=[[471, 304], [1797, 376], [467, 387], [1792, 484]]
) )
self.window.push_handlers(self.pins) self.window.push_handlers(self.pins)
pyglet.gl.glClearColor(255,255,255,255) # pyglet.gl.glClearColor(255,255,255,255)
self.fps_display = pyglet.window.FPSDisplay(window=self.window, color=COLOR_PRIMARY) self.fps_display = pyglet.window.FPSDisplay(window=self.window, color=COLOR_PRIMARY)
self.fps_display.label.x = self.window.width - 50 self.fps_display.label.x = self.window.width - 50
self.fps_display.label.y = self.window.height - 17 self.fps_display.label.y = self.window.height - 17

View file

@ -27,6 +27,7 @@ from PIL import Image
from trap.frame_emitter import DetectionState, Frame, Track, Camera from trap.frame_emitter import DetectionState, Frame, Track, Camera
from trap.preview_renderer import FrameWriter from trap.preview_renderer import FrameWriter
from trap.tools import draw_track, draw_track_predictions, draw_track_projected, draw_trackjectron_history, to_point from trap.tools import draw_track, draw_track_predictions, draw_track_projected, draw_trackjectron_history, to_point
from trap.utils import convert_world_points_to_img_points, convert_world_space_to_img_space
@ -447,19 +448,6 @@ def get_opacity(track: Track, current_frame: Frame):
# track.history[-1].frame_nr < (current_frame.index - current_frame.camera.fps * 3) # track.history[-1].frame_nr < (current_frame.index - current_frame.camera.fps * 3)
# track.history[-1].frame_nr < (current_frame.index - current_frame.camera.fps * 3) # track.history[-1].frame_nr < (current_frame.index - current_frame.camera.fps * 3)
def convert_world_space_to_img_space(H: cv2.Mat):
"""Transform the given matrix so that it immediately converts
the points to img space"""
new_H = H.copy()
new_H[:2] = H[:2] * 100
return new_H
def convert_world_points_to_img_points(points: Iterable):
"""Transform the given matrix so that it immediately converts
the points to img space"""
if isinstance(points, np.ndarray):
return np.array(points) * 100
return [[p[0]*100, p[1]*100] for p in points]
@ -479,7 +467,7 @@ def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame,
# Fill image with red color(set each pixel to red) # Fill image with red color(set each pixel to red)
overlay[:] = (0, 0, 0) overlay[:] = (0, 0, 0)
img = cv2.addWeighted(dst_img, .1, overlay, .3, 0) img = cv2.addWeighted(dst_img, .2, overlay, .3, 0)
# img = frame.img.copy() # img = frame.img.copy()
# all not working: # all not working:

View file

@ -35,10 +35,23 @@ class DataclassJSONEncoder(json.JSONEncoder):
if isinstance(o, np.ndarray): if isinstance(o, np.ndarray):
return o.tolist() return o.tolist()
if dataclasses.is_dataclass(o): if dataclasses.is_dataclass(o):
d = dataclasses.asdict(o)
if isinstance(o, Frame): if isinstance(o, Frame):
# Don't send images over JSON tracks = {}
del d['img'] for track_id, track in o.tracks.items():
track_obj = dataclasses.asdict(track)
track_obj['history'] = track.get_projected_history(None, o.camera)
tracks[track_id] = track_obj
d = {
'index': o.index,
'time': o.time,
'tracks': tracks,
'camera': dataclasses.asdict(o.camera),
}
else:
d = dataclasses.asdict(o)
# if isinstance(o, Frame):
# # Don't send images over JSON
# del d['img']
return d return d
return super().default(o) return super().default(o)

View file

@ -18,12 +18,13 @@ import tempfile
from pathlib import Path from pathlib import Path
import shutil import shutil
import math import math
from typing import Optional from typing import List, Optional
from pyglet import shapes from pyglet import shapes
from PIL import Image from PIL import Image
from trap.utils import convert_world_points_to_img_points
from trap.frame_emitter import DetectionState, Frame, Track, Camera from trap.frame_emitter import DetectionState, Frame, Track, Camera
@ -111,16 +112,21 @@ class DrawnTrack:
# color = (128,0,128) if pred_i else (128, # color = (128,0,128) if pred_i else (128,
def update_drawn_positions(self, dt) -> []: def update_drawn_positions(self, dt) -> List:
''' '''
use dt to lerp the drawn positions in the direction of current prediction use dt to lerp the drawn positions in the direction of current prediction
''' '''
# TODO: make lerp, currently quick way to get results # TODO: make lerp, currently quick way to get results
def int_or_not(v):
"""quick wrapper to toggle int'ing"""
return v
# return int(v)
# 1. track history # 1. track history
for i, pos in enumerate(self.drawn_positions): for i, pos in enumerate(self.drawn_positions):
self.drawn_positions[i][0] = int(exponentialDecay(self.drawn_positions[i][0], self.coords[i][0], 16, dt)) self.drawn_positions[i][0] = int_or_not(exponentialDecay(self.drawn_positions[i][0], self.coords[i][0], 16, dt))
self.drawn_positions[i][1] = int(exponentialDecay(self.drawn_positions[i][1], self.coords[i][1], 16, dt)) self.drawn_positions[i][1] = int_or_not(exponentialDecay(self.drawn_positions[i][1], self.coords[i][1], 16, dt))
if len(self.coords) > len(self.drawn_positions): if len(self.coords) > len(self.drawn_positions):
self.drawn_positions.extend(self.coords[len(self.drawn_positions):]) self.drawn_positions.extend(self.coords[len(self.drawn_positions):])
@ -128,8 +134,8 @@ class DrawnTrack:
# 2. history as seen by predictor (Trajectron) # 2. history as seen by predictor (Trajectron)
for i, pos in enumerate(self.drawn_pred_history): for i, pos in enumerate(self.drawn_pred_history):
if len(self.pred_history_coords) > i: if len(self.pred_history_coords) > i:
self.drawn_pred_history[i][0] = int(exponentialDecay(self.drawn_pred_history[i][0], self.pred_history_coords[i][0], 16, dt)) self.drawn_pred_history[i][0] = int_or_not(exponentialDecay(self.drawn_pred_history[i][0], self.pred_history_coords[i][0], 16, dt))
self.drawn_pred_history[i][1] = int(exponentialDecay(self.drawn_pred_history[i][1], self.pred_history_coords[i][1], 16, dt)) self.drawn_pred_history[i][1] = int_or_not(exponentialDecay(self.drawn_pred_history[i][1], self.pred_history_coords[i][1], 16, dt))
if len(self.pred_history_coords) > len(self.drawn_pred_history): if len(self.pred_history_coords) > len(self.drawn_pred_history):
self.drawn_pred_history.extend(self.coords[len(self.drawn_pred_history):]) self.drawn_pred_history.extend(self.coords[len(self.drawn_pred_history):])
@ -147,7 +153,7 @@ class DrawnTrack:
r = exponentialDecay(drawn_r, pred_r, decay, dt) r = exponentialDecay(drawn_r, pred_r, decay, dt)
angle = exponentialDecay(drawn_angle, pred_angle, decay, dt) angle = exponentialDecay(drawn_angle, pred_angle, decay, dt)
x, y = relativePolarToPoint(origin, r, angle) x, y = relativePolarToPoint(origin, r, angle)
self.drawn_predictions[a][i] = int(x), int(y) self.drawn_predictions[a][i] = int_or_not(x), int_or_not(y)
# self.drawn_predictions[i][0] = int(exponentialDecay(self.drawn_predictions[i][0], self.pred_coords[i][0], decay, dt)) # self.drawn_predictions[i][0] = int(exponentialDecay(self.drawn_predictions[i][0], self.pred_coords[i][0], decay, dt))
# self.drawn_predictions[i][1] = int(exponentialDecay(self.drawn_predictions[i][1], self.pred_coords[i][1], decay, dt)) # self.drawn_predictions[i][1] = int(exponentialDecay(self.drawn_predictions[i][1], self.pred_coords[i][1], decay, dt))
@ -159,21 +165,32 @@ class DrawnTrack:
# self.drawn_positions = self.coords # self.drawn_positions = self.coords
# finally: update shapes from coordinates # finally: update shapes from coordinates
self.update_shapes(dt) self.update_shapes(dt)
return self.drawn_positions return self.drawn_positions
def update_shapes(self, dt): def update_shapes(self, dt):
if len(self.shapes) > len(self.drawn_positions):
self.shapes = self.shapes[:len(self.drawn_positions)]
# for i, pos in self.drawn_positions.enumerate(): drawn_positions = convert_world_points_to_img_points(self.drawn_positions)
for ci in range(1, len(self.drawn_positions)): drawn_pred_history = convert_world_points_to_img_points(self.drawn_pred_history)
x, y = [int(p) for p in self.drawn_positions[ci-1]] drawn_predictions = [convert_world_points_to_img_points(p) for p in self.drawn_predictions]
x2, y2 = [int(p) for p in self.drawn_positions[ci]] # positions = convert_world_points_to_img_points(self.drawn_predictions)
# print("drawn",
# drawn_positions,'self', self.drawn_positions
# )
if len(self.shapes) > len(drawn_positions):
self.shapes = self.shapes[:len(drawn_positions)]
# for i, pos in drawn_positions.enumerate():
for ci in range(1, len(drawn_positions)):
x, y = [int(p) for p in drawn_positions[ci-1]]
x2, y2 = [int(p) for p in drawn_positions[ci]]
y, y2 = self.renderer.window.height - y, self.renderer.window.height - y2 y, y2 = self.renderer.window.height - y, self.renderer.window.height - y2
color = [100+155*ci // len(self.drawn_positions)]*3 color = [100+155*ci // len(drawn_positions)]*3
# print(x,y,x2,y2,color) # print(x,y,x2,y2,color)
if ci >= len(self.shapes): if ci >= len(self.shapes):
@ -195,13 +212,13 @@ class DrawnTrack:
# TODO: add intermediate segment # TODO: add intermediate segment
color = colorset[self.track_id % len(colorset)] color = colorset[self.track_id % len(colorset)]
if len(self.pred_history_shapes) > len(self.drawn_pred_history): if len(self.pred_history_shapes) > len(drawn_pred_history):
self.pred_history_shapes = self.pred_history_shapes[:len(self.drawn_pred_history)] self.pred_history_shapes = self.pred_history_shapes[:len(drawn_pred_history)]
# for i, pos in self.drawn_pred_history.enumerate(): # for i, pos in drawn_pred_history.enumerate():
for ci in range(1, len(self.drawn_pred_history)): for ci in range(1, len(drawn_pred_history)):
x, y = [int(p) for p in self.drawn_pred_history[ci-1]] x, y = [int(p) for p in drawn_pred_history[ci-1]]
x2, y2 = [int(p) for p in self.drawn_pred_history[ci]] x2, y2 = [int(p) for p in drawn_pred_history[ci]]
y, y2 = self.renderer.window.height - y, self.renderer.window.height - y2 y, y2 = self.renderer.window.height - y, self.renderer.window.height - y2
@ -221,22 +238,22 @@ class DrawnTrack:
line.opacity = int(exponentialDecay(line.opacity, 180, 8, dt)) line.opacity = int(exponentialDecay(line.opacity, 180, 8, dt))
for a, drawn_predictions in enumerate(self.drawn_predictions): for a, drawn_prediction in enumerate(drawn_predictions):
if len(self.pred_shapes) <= a: if len(self.pred_shapes) <= a:
self.pred_shapes.append([]) self.pred_shapes.append([])
if len(self.pred_shapes[a]) > (len(drawn_predictions) +1): if len(self.pred_shapes[a]) > (len(drawn_prediction) +1):
self.pred_shapes[a] = self.pred_shapes[a][:len(drawn_predictions)] self.pred_shapes[a] = self.pred_shapes[a][:len(drawn_prediction)]
# for i, pos in drawn_predictions.enumerate(): # for i, pos in drawn_predictions.enumerate():
for ci in range(0, len(drawn_predictions)): for ci in range(0, len(drawn_prediction)):
if ci == 0: if ci == 0:
continue continue
# x, y = [int(p) for p in self.drawn_positions[-1]] # x, y = [int(p) for p in drawn_positions[-1]]
else: else:
x, y = [int(p) for p in drawn_predictions[ci-1]] x, y = [int(p) for p in drawn_prediction[ci-1]]
x2, y2 = [int(p) for p in drawn_predictions[ci]] x2, y2 = [int(p) for p in drawn_prediction[ci]]
y, y2 = self.renderer.window.height - y, self.renderer.window.height - y2 y, y2 = self.renderer.window.height - y, self.renderer.window.height - y2
# color = [255,0,0] # color = [255,0,0]
@ -256,7 +273,7 @@ class DrawnTrack:
line.x2, line.y2 = x2, y2 line.x2, line.y2 = x2, y2
line.color = color line.color = color
decay = (16/ci) if ci else 16 decay = (16/ci) if ci else 16
half = len(drawn_predictions) / 2 half = len(drawn_prediction) / 2
if ci < half: if ci < half:
target_opacity = 60 target_opacity = 60
else: else:

View file

@ -1,4 +1,9 @@
# lerp & inverse lerp from https://gist.github.com/laundmo/b224b1f4c8ef6ca5fe47e132c8deab56 # lerp & inverse lerp from https://gist.github.com/laundmo/b224b1f4c8ef6ca5fe47e132c8deab56
from typing import Iterable
import cv2
import numpy as np
def lerp(a: float, b: float, t: float) -> float: def lerp(a: float, b: float, t: float) -> float:
"""Linear interpolate on the scale given by a to b, using t as the point on that scale. """Linear interpolate on the scale given by a to b, using t as the point on that scale.
Examples Examples
@ -21,3 +26,18 @@ def inv_lerp(a: float, b: float, v: float) -> float:
def get_bins(bin_size: float): def get_bins(bin_size: float):
return [[bin_size, 0], [bin_size, bin_size], [0, bin_size], [-bin_size, bin_size], [-bin_size, 0], [-bin_size, -bin_size], [0, -bin_size], [bin_size, -bin_size]] return [[bin_size, 0], [bin_size, bin_size], [0, bin_size], [-bin_size, bin_size], [-bin_size, 0], [-bin_size, -bin_size], [0, -bin_size], [bin_size, -bin_size]]
def convert_world_space_to_img_space(H: cv2.Mat):
"""Transform the given matrix so that it immediately converts
the points to img space"""
new_H = H.copy()
new_H[:2] = H[:2] * 100
return new_H
def convert_world_points_to_img_points(points: Iterable):
"""Transform the given matrix so that it immediately converts
the points to img space"""
if isinstance(points, np.ndarray):
return np.array(points) * 100
return [[p[0]*100, p[1]*100] for p in points]