CV renderer and binning options
This commit is contained in:
parent
d8004e9125
commit
6b12ddf08a
7 changed files with 120 additions and 62 deletions
|
@ -63,13 +63,14 @@ class AnimationRenderer:
|
|||
self.frame_size = (self.config.camera.w,self.config.camera.h)
|
||||
self.hide_stats = False
|
||||
self.out_writer = None # self.start_writer() if self.config.render_file else None
|
||||
self.streaming_process = None # self.start_streaming() if self.config.render_url else None
|
||||
self.streaming_process = self.start_streaming() if self.config.render_url else None
|
||||
|
||||
if self.config.render_window:
|
||||
pass
|
||||
# cv2.namedWindow("frame", cv2.WND_PROP_FULLSCREEN)
|
||||
# cv2.setWindowProperty("frame",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
|
||||
else:
|
||||
# if self.config.render_window:
|
||||
# pass
|
||||
# # cv2.namedWindow("frame", cv2.WND_PROP_FULLSCREEN)
|
||||
# # cv2.setWindowProperty("frame",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
|
||||
# else:
|
||||
if self.streaming_process is not None:
|
||||
pyglet.options["headless"] = True
|
||||
|
||||
config = pyglet.gl.Config(sample_buffers=1, samples=4)
|
||||
|
@ -78,8 +79,10 @@ class AnimationRenderer:
|
|||
display = pyglet.canvas.get_display()
|
||||
screen = display.get_screens()[0]
|
||||
|
||||
# self.window = pyglet.window.Window(width=self.frame_size[0], height=self.frame_size[1], config=config, fullscreen=False, screen=screens[1])
|
||||
self.window = pyglet.window.Window(width=screen.width, height=screen.height, config=config, fullscreen=True, screen=screen)
|
||||
if self.streaming_process is not None:
|
||||
self.window = pyglet.window.Window(width=self.frame_size[0], height=self.frame_size[1], config=config, fullscreen=False, screen=screen)
|
||||
else:
|
||||
self.window = pyglet.window.Window(width=screen.width, height=screen.height, config=config, fullscreen=True, screen=screen)
|
||||
self.window.set_handler('on_draw', self.on_draw)
|
||||
self.window.set_handler('on_refresh', self.on_refresh)
|
||||
self.window.set_handler('on_close', self.on_close)
|
||||
|
@ -93,8 +96,9 @@ class AnimationRenderer:
|
|||
|
||||
self.pins = PygletCornerPin(
|
||||
self.window,
|
||||
source_points=[[540, 670-360], [1380,670-360], [540,760-360], [1380,760-360]],
|
||||
corners=[[471, 304], [1797, 376], [467, 387], [1792, 484]])
|
||||
# source_points=[[540, 670-360], [1380,670-360], [540,760-360], [1380,760-360]],
|
||||
# corners=[[471, 304], [1797, 376], [467, 387], [1792, 484]]
|
||||
)
|
||||
self.window.push_handlers(self.pins)
|
||||
|
||||
pyglet.gl.glClearColor(255,255,255,255)
|
||||
|
@ -153,6 +157,32 @@ class AnimationRenderer:
|
|||
self.init_labels()
|
||||
|
||||
|
||||
def start_streaming(self):
|
||||
"""TODO)) This should be inherited from a generic renderer"""
|
||||
return (
|
||||
ffmpeg
|
||||
.input('pipe:', format='rawvideo',codec="rawvideo", pix_fmt='bgr24', s='{}x{}'.format(*self.frame_size))
|
||||
.output(
|
||||
self.config.render_url,
|
||||
#codec = "copy", # use same codecs of the original video
|
||||
codec='libx264',
|
||||
listen=1, # enables HTTP server
|
||||
pix_fmt="yuv420p",
|
||||
preset="ultrafast",
|
||||
tune="zerolatency",
|
||||
# g=f"{self.fps*2}",
|
||||
g=f"{60*2}",
|
||||
analyzeduration="2000000",
|
||||
probesize="1000000",
|
||||
f='mpegts'
|
||||
)
|
||||
.overwrite_output()
|
||||
.run_async(pipe_stdin=True)
|
||||
)
|
||||
# return process
|
||||
|
||||
|
||||
|
||||
def init_shapes(self):
|
||||
'''
|
||||
Due to error when running headless, we need to configure options before extending the shapes class
|
||||
|
|
|
@ -483,18 +483,22 @@ def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame,
|
|||
|
||||
base_color = (255,)*3
|
||||
info_color = (255,255,0)
|
||||
predictor_color = (255,0,255)
|
||||
tracker_color = (0,255,255)
|
||||
|
||||
cv2.putText(img, f"{frame.index:06d}", (20,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
|
||||
cv2.putText(img, f"{frame.time - first_time:.3f}s", (120,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
|
||||
cv2.putText(img, f"{frame.time - first_time: >10.2f}s", (150,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
|
||||
|
||||
if prediction_frame:
|
||||
# render Δt and Δ frames
|
||||
cv2.putText(img, f"{prediction_frame.index - frame.index}", (90,17), cv2.FONT_HERSHEY_PLAIN, 1, info_color, 1)
|
||||
cv2.putText(img, f"{prediction_frame.time - time.time():.2f}s", (200,17), cv2.FONT_HERSHEY_PLAIN, 1, info_color, 1)
|
||||
cv2.putText(img, f"{len(prediction_frame.tracks)} tracks", (500,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
|
||||
cv2.putText(img, f"h: {np.average([len(t.history or []) for t in prediction_frame.tracks.values()]):.2f}", (580,17), cv2.FONT_HERSHEY_PLAIN, 1, info_color, 1)
|
||||
cv2.putText(img, f"ph: {np.average([len(t.predictor_history or []) for t in prediction_frame.tracks.values()]):.2f}", (660,17), cv2.FONT_HERSHEY_PLAIN, 1, info_color, 1)
|
||||
cv2.putText(img, f"p: {np.average([len(t.predictions or []) for t in prediction_frame.tracks.values()]):.2f}", (740,17), cv2.FONT_HERSHEY_PLAIN, 1, info_color, 1)
|
||||
cv2.putText(img, f"{tracker_frame.index - frame.index}", (90,17), cv2.FONT_HERSHEY_PLAIN, 1, tracker_color, 1)
|
||||
cv2.putText(img, f"{prediction_frame.index - frame.index}", (120,17), cv2.FONT_HERSHEY_PLAIN, 1, predictor_color, 1)
|
||||
cv2.putText(img, f"{tracker_frame.time - time.time():.2f}s", (230,17), cv2.FONT_HERSHEY_PLAIN, 1, tracker_color, 1)
|
||||
cv2.putText(img, f"{prediction_frame.time - time.time():.2f}s", (290,17), cv2.FONT_HERSHEY_PLAIN, 1, predictor_color, 1)
|
||||
cv2.putText(img, f"{len(tracker_frame.tracks)} tracks", (620,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
|
||||
cv2.putText(img, f"h: {np.average([len(t.history or []) for t in prediction_frame.tracks.values()]):.2f}", (700,17), cv2.FONT_HERSHEY_PLAIN, 1, tracker_color, 1)
|
||||
cv2.putText(img, f"ph: {np.average([len(t.predictor_history or []) for t in prediction_frame.tracks.values()]):.2f}", (780,17), cv2.FONT_HERSHEY_PLAIN, 1, predictor_color, 1)
|
||||
cv2.putText(img, f"p: {np.average([len(t.predictions or []) for t in prediction_frame.tracks.values()]):.2f}", (860,17), cv2.FONT_HERSHEY_PLAIN, 1, predictor_color, 1)
|
||||
|
||||
options = []
|
||||
for option in ['prediction_horizon','num_samples','full_dist','gmm_mode','z_mode', 'model_dir']:
|
||||
|
|
|
@ -25,7 +25,8 @@ from bytetracker.basetrack import TrackState as ByteTrackTrackState
|
|||
from trajectron.environment import Environment, Node, Scene
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from trap.utils import lerp
|
||||
from trap.utils import get_bins
|
||||
from trap.utils import inv_lerp, lerp
|
||||
|
||||
logger = logging.getLogger('trap.frame_emitter')
|
||||
|
||||
|
@ -219,7 +220,7 @@ class Track:
|
|||
history: List[Detection] = field(default_factory=list)
|
||||
predictor_history: Optional[list] = None # in image space
|
||||
predictions: Optional[list] = None
|
||||
fps: int = 12
|
||||
fps: int = 12 # TODO)) convert this to camera? That way, incorporates H and dist, alternatively, each track is as a whole attached to a space
|
||||
source: Optional[int] = None # to keep track of processed tracks
|
||||
|
||||
def get_projected_history(self, H: Optional[cv2.Mat] = None, camera: Optional[Camera]= None) -> np.array:
|
||||
|
@ -290,49 +291,67 @@ class Track:
|
|||
t.predictions,
|
||||
t.fps/step_size)
|
||||
|
||||
# def get_binned(self, bin_size=.5, remove_overlap=True):
|
||||
# """
|
||||
# For an experiment: what if we predict using only concrete positions, by mapping
|
||||
# dx,dy to a grid. Thus prediction can be for 8 moves, or rather headings
|
||||
# see ~/notes/attachments example svg
|
||||
# """
|
||||
def get_binned(self, bin_size, camera: Camera, bin_start=True):
|
||||
"""
|
||||
For an experiment: what if we predict using only concrete positions, by mapping
|
||||
dx,dy to a grid. Thus prediction can be for 8 moves, or rather headings
|
||||
see ~/notes/attachments example svg
|
||||
"""
|
||||
|
||||
# new_history: List[Detection] = []
|
||||
# for i, (det0, det1) in enumerate(zip(self.history[:-1], self.history[1:]):
|
||||
# if i == 0:
|
||||
# new_history.append(det0)
|
||||
# continue
|
||||
# if abs(det1.x - new_history[-1].x) < bin_size or abs(det1.y - new_history[-1].y) < bin_size:
|
||||
# continue
|
||||
history = self.get_projected_history_as_dict(H=None, camera=camera)
|
||||
|
||||
# # det1 falls outside of the box [-bin_size:+bin_size] around last detection
|
||||
def round_to_grid_precision(x):
|
||||
factor = 1/bin_size
|
||||
return round(x * factor) / factor
|
||||
|
||||
# # 1. Interpolate exact point between det0 and det1 that this happens
|
||||
# if abs(det1.x - new_history[-1].x) >= bin_size:
|
||||
# if det1.x - new_history[-1].x >= bin_size:
|
||||
# # det1 left of last
|
||||
# x = new_history[-1].x + bin_size
|
||||
# f = inv_lerp(det0.x, det1.x, x)
|
||||
# elif new_history[-1].x - det1.x >= bin_size:
|
||||
# # det1 left of last
|
||||
# x = new_history[-1].x - bin_size
|
||||
# f = inv_lerp(det0.x, det1.x, x)
|
||||
# y = lerp(det0.y, det1.y, f)
|
||||
# if abs(det1.y - new_history[-1].y) >= bin_size:
|
||||
# if det1.y - new_history[-1].y >= bin_size:
|
||||
# # det1 left of last
|
||||
# y = new_history[-1].y + bin_size
|
||||
# f = inv_lerp(det0.y, det1.y, x)
|
||||
# elif new_history[-1].y - det1.y >= bin_size:
|
||||
# # det1 left of last
|
||||
# y = new_history[-1].y - bin_size
|
||||
# f = inv_lerp(det0.y, det1.y, x)
|
||||
# x = lerp(det0.x, det1.x, f)
|
||||
new_history: List[dict] = []
|
||||
for i, (det0, det1) in enumerate(zip(history[:-1], history[1:])):
|
||||
if i == 0:
|
||||
new_history.append({
|
||||
'x': round_to_grid_precision(det0['x']),
|
||||
'y': round_to_grid_precision(det0['y'])
|
||||
} if bin_start else det0)
|
||||
continue
|
||||
if abs(det1['x'] - new_history[-1]['x']) < bin_size and abs(det1['y'] - new_history[-1]['y']) < bin_size:
|
||||
continue
|
||||
|
||||
# det1 falls outside of the box [-bin_size:+bin_size] around last detection
|
||||
|
||||
# 1. Interpolate exact point between det0 and det1 that this happens
|
||||
if abs(det1['x'] - new_history[-1]['x']) >= bin_size:
|
||||
if det1['x'] - new_history[-1]['x'] >= bin_size:
|
||||
# det1 left of last
|
||||
x = new_history[-1]['x'] + bin_size
|
||||
f = inv_lerp(det0['x'], det1['x'], x)
|
||||
elif new_history[-1]['x'] - det1['x'] >= bin_size:
|
||||
# det1 left of last
|
||||
x = new_history[-1]['x'] - bin_size
|
||||
f = inv_lerp(det0['x'], det1['x'], x)
|
||||
y = lerp(det0['y'], det1['y'], f)
|
||||
if abs(det1['y'] - new_history[-1]['y']) >= bin_size:
|
||||
if det1['y'] - new_history[-1]['y'] >= bin_size:
|
||||
# det1 left of last
|
||||
y = new_history[-1]['y'] + bin_size
|
||||
f = inv_lerp(det0['y'], det1['y'], y)
|
||||
elif new_history[-1]['y'] - det1['y'] >= bin_size:
|
||||
# det1 left of last
|
||||
y = new_history[-1]['y'] - bin_size
|
||||
f = inv_lerp(det0['y'], det1['y'], y)
|
||||
x = lerp(det0['x'], det1['x'], f)
|
||||
|
||||
|
||||
# # 2. Find closest point on rectangle (rectangle's four corners, or 4 midpoints)
|
||||
# points = [[bin_size, 0], [bin_size, bin_size], [0, bin_size], [-bin_size, bin_size], [-bin_size, 0], [-bin_size, -bin_size], [0, -bin_size], [bin_size, -bin_size]]
|
||||
# # todo Offsets to points:[ history for in points]
|
||||
# 2. Find closest point on rectangle (rectangle's four corners, or 4 midpoints)
|
||||
points = get_bins(bin_size)
|
||||
points = [[new_history[-1]['x']+p[0], new_history[-1]['y'] + p[1]] for p in points]
|
||||
|
||||
distances = [np.linalg.norm([p[0] - x, p[1]-y]) for p in points]
|
||||
closest = np.argmin(distances)
|
||||
|
||||
point = points[closest]
|
||||
|
||||
new_history.append({'x': point[0], 'y':point[1]})
|
||||
# todo Offsets to points:[ history for in points]
|
||||
return new_history
|
||||
|
||||
|
||||
def to_trajectron_node(self, camera: Camera, env: Environment) -> Node:
|
||||
|
|
|
@ -93,7 +93,7 @@ def start():
|
|||
]
|
||||
|
||||
if args.render_file or args.render_url or args.render_window:
|
||||
if not args.render_no_preview or args.render_file or args.render_url:
|
||||
if not args.render_no_preview: #or args.render_file or args.render_url:
|
||||
procs.append(
|
||||
# ExceptionHandlingProcess(target=run_cv_renderer, kwargs={'config': args, 'is_running': isRunning}, name='preview')
|
||||
ExceptionHandlingProcess(target=run_cv_renderer, kwargs={'config': args, 'is_running': isRunning}, name='preview')
|
||||
|
|
|
@ -361,7 +361,8 @@ class PredictionServer:
|
|||
|
||||
# data = json.dumps({})
|
||||
# TODO)) signal doing nothing
|
||||
# self.send_frame(frame)
|
||||
# TODO)) And update the network
|
||||
self.send_frame(frame)
|
||||
|
||||
continue
|
||||
|
||||
|
|
|
@ -17,3 +17,7 @@ def inv_lerp(a: float, b: float, v: float) -> float:
|
|||
0.8 == inv_lerp(1, 5, 4.2)
|
||||
"""
|
||||
return (v - a) / (b - a)
|
||||
|
||||
|
||||
def get_bins(bin_size: float):
|
||||
return [[bin_size, 0], [bin_size, bin_size], [0, bin_size], [-bin_size, bin_size], [-bin_size, 0], [-bin_size, -bin_size], [0, -bin_size], [bin_size, -bin_size]]
|
Loading…
Reference in a new issue