Compare commits
No commits in common. "cebe102e743563d221cd932dc73e7c6594b06ae8" and "5ceeda05d745c0c1fd056592a97eea0856559461" have entirely different histories.
cebe102e74
...
5ceeda05d7
12 changed files with 251 additions and 491 deletions
|
@ -2,10 +2,10 @@
|
||||||
# Default YOLO tracker settings for ByteTrack tracker https://github.com/ifzhang/ByteTrack
|
# Default YOLO tracker settings for ByteTrack tracker https://github.com/ifzhang/ByteTrack
|
||||||
|
|
||||||
tracker_type: bytetrack # tracker type, ['botsort', 'bytetrack']
|
tracker_type: bytetrack # tracker type, ['botsort', 'bytetrack']
|
||||||
track_high_thresh: 0.0001 # threshold for the first association
|
track_high_thresh: 0.05 # threshold for the first association
|
||||||
track_low_thresh: 0.0001 # threshold for the second association
|
track_low_thresh: 0.01 # threshold for the second association
|
||||||
new_track_thresh: 0.0001 # threshold for init new track if the detection does not match any tracks
|
new_track_thresh: 0.1 # threshold for init new track if the detection does not match any tracks
|
||||||
track_buffer: 50 # buffer to calculate the time when to remove tracks
|
track_buffer: 35 # buffer to calculate the time when to remove tracks
|
||||||
match_thresh: 0.95 # threshold for matching tracks
|
match_thresh: 0.9 # threshold for matching tracks
|
||||||
fuse_score: True # Whether to fuse confidence scores with the iou distances before matching
|
fuse_score: True # Whether to fuse confidence scores with the iou distances before matching
|
||||||
# min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now)
|
# min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now)
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import time
|
import time
|
||||||
import tracemalloc
|
|
||||||
import ffmpeg
|
import ffmpeg
|
||||||
from argparse import Namespace
|
from argparse import Namespace
|
||||||
import datetime
|
import datetime
|
||||||
|
@ -27,19 +26,14 @@ import json
|
||||||
|
|
||||||
from trap.frame_emitter import DetectionState, Frame, Track
|
from trap.frame_emitter import DetectionState, Frame, Track
|
||||||
from trap.preview_renderer import DrawnTrack, PROJECTION_IMG, PROJECTION_MAP
|
from trap.preview_renderer import DrawnTrack, PROJECTION_IMG, PROJECTION_MAP
|
||||||
from trap.utils import convert_world_space_to_img_space, display_top
|
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger("trap.renderer")
|
logger = logging.getLogger("trap.renderer")
|
||||||
|
|
||||||
# COLOR_PRIMARY = (0,0,0,255)
|
COLOR_PRIMARY = (0,0,0,255)
|
||||||
COLOR_PRIMARY = (255,255,255, 255)
|
|
||||||
|
|
||||||
class AnimationRenderer:
|
class AnimationRenderer:
|
||||||
def __init__(self, config: Namespace, is_running: BaseEvent):
|
def __init__(self, config: Namespace, is_running: BaseEvent):
|
||||||
|
|
||||||
|
|
||||||
tracemalloc.start()
|
|
||||||
self.config = config
|
self.config = config
|
||||||
self.is_running = is_running
|
self.is_running = is_running
|
||||||
|
|
||||||
|
@ -54,10 +48,10 @@ class AnimationRenderer:
|
||||||
self.tracker_sock.setsockopt(zmq.SUBSCRIBE, b'')
|
self.tracker_sock.setsockopt(zmq.SUBSCRIBE, b'')
|
||||||
self.tracker_sock.connect(config.zmq_trajectory_addr)
|
self.tracker_sock.connect(config.zmq_trajectory_addr)
|
||||||
|
|
||||||
self.frame_noimg_sock = context.socket(zmq.SUB)
|
self.frame_sock = context.socket(zmq.SUB)
|
||||||
self.frame_noimg_sock.setsockopt(zmq.CONFLATE, 1) # only keep latest frame. NB. make sure this comes BEFORE connect, otherwise it's ignored!!
|
self.frame_sock.setsockopt(zmq.CONFLATE, 1) # only keep latest frame. NB. make sure this comes BEFORE connect, otherwise it's ignored!!
|
||||||
self.frame_noimg_sock.setsockopt(zmq.SUBSCRIBE, b'')
|
self.frame_sock.setsockopt(zmq.SUBSCRIBE, b'')
|
||||||
self.frame_noimg_sock.connect(config.zmq_frame_noimg_addr)
|
self.frame_sock.connect(config.zmq_frame_addr)
|
||||||
|
|
||||||
self.H = self.config.H
|
self.H = self.config.H
|
||||||
|
|
||||||
|
@ -67,9 +61,7 @@ class AnimationRenderer:
|
||||||
# self.out = cv2.VideoWriter(str(filename), fourcc, 23.97, (1280,720))
|
# self.out = cv2.VideoWriter(str(filename), fourcc, 23.97, (1280,720))
|
||||||
self.fps = 60
|
self.fps = 60
|
||||||
self.frame_size = (self.config.camera.w,self.config.camera.h)
|
self.frame_size = (self.config.camera.w,self.config.camera.h)
|
||||||
self.hide_stats = self.config.render_hide_stats
|
self.hide_stats = False
|
||||||
self.hide_bg = True
|
|
||||||
self.pause = False
|
|
||||||
self.out_writer = None # self.start_writer() if self.config.render_file else None
|
self.out_writer = None # self.start_writer() if self.config.render_file else None
|
||||||
self.streaming_process = self.start_streaming() if self.config.render_url else None
|
self.streaming_process = self.start_streaming() if self.config.render_url else None
|
||||||
|
|
||||||
|
@ -85,9 +77,7 @@ class AnimationRenderer:
|
||||||
# , fullscreen=self.config.render_window
|
# , fullscreen=self.config.render_window
|
||||||
|
|
||||||
display = pyglet.canvas.get_display()
|
display = pyglet.canvas.get_display()
|
||||||
idx = -1 if self.config.render_window else 0
|
screen = display.get_screens()[0]
|
||||||
screen = display.get_screens()[idx]
|
|
||||||
print(display.get_screens())
|
|
||||||
|
|
||||||
if self.streaming_process is not None:
|
if self.streaming_process is not None:
|
||||||
self.window = pyglet.window.Window(width=self.frame_size[0], height=self.frame_size[1], config=config, fullscreen=False, screen=screen)
|
self.window = pyglet.window.Window(width=self.frame_size[0], height=self.frame_size[1], config=config, fullscreen=False, screen=screen)
|
||||||
|
@ -96,7 +86,6 @@ class AnimationRenderer:
|
||||||
self.window.set_handler('on_draw', self.on_draw)
|
self.window.set_handler('on_draw', self.on_draw)
|
||||||
self.window.set_handler('on_refresh', self.on_refresh)
|
self.window.set_handler('on_refresh', self.on_refresh)
|
||||||
self.window.set_handler('on_close', self.on_close)
|
self.window.set_handler('on_close', self.on_close)
|
||||||
self.window.set_handler('on_key_press', self.on_key_press)
|
|
||||||
|
|
||||||
# don't know why, but importing this before window leads to "x connection to :1 broken (explicit kill or server shutdown)"
|
# don't know why, but importing this before window leads to "x connection to :1 broken (explicit kill or server shutdown)"
|
||||||
from pyglet_cornerpin import PygletCornerPin
|
from pyglet_cornerpin import PygletCornerPin
|
||||||
|
@ -107,16 +96,12 @@ class AnimationRenderer:
|
||||||
|
|
||||||
self.pins = PygletCornerPin(
|
self.pins = PygletCornerPin(
|
||||||
self.window,
|
self.window,
|
||||||
source_points=[[540, 670-360], [1380,670-360], [540,760-360], [1380,760-360]],
|
# source_points=[[540, 670-360], [1380,670-360], [540,760-360], [1380,760-360]],
|
||||||
# corners=[[540, 670-360], [1380,670-360], [540,760-360], [1380,760-360]], # original test: short throw?
|
# corners=[[471, 304], [1797, 376], [467, 387], [1792, 484]]
|
||||||
# corners=[[396, 442], [1644, 734], [350, 516], [1572, 796]], # beamer downstairs
|
|
||||||
# corners=[[270, 452], [1698, 784], [314, 568], [1572, 860]], # ??
|
|
||||||
# corners=[[471, 304], [1797, 376], [467, 387], [1792, 484]] # ??
|
|
||||||
# corners=[[576, 706], [1790, 696], [588, 794], [1728, 796]], # beamer boven
|
|
||||||
)
|
)
|
||||||
self.window.push_handlers(self.pins)
|
self.window.push_handlers(self.pins)
|
||||||
|
|
||||||
# pyglet.gl.glClearColor(255,255,255,255)
|
pyglet.gl.glClearColor(255,255,255,255)
|
||||||
self.fps_display = pyglet.window.FPSDisplay(window=self.window, color=COLOR_PRIMARY)
|
self.fps_display = pyglet.window.FPSDisplay(window=self.window, color=COLOR_PRIMARY)
|
||||||
self.fps_display.label.x = self.window.width - 50
|
self.fps_display.label.x = self.window.width - 50
|
||||||
self.fps_display.label.y = self.window.height - 17
|
self.fps_display.label.y = self.window.height - 17
|
||||||
|
@ -135,20 +120,16 @@ class AnimationRenderer:
|
||||||
self.batch_bg = pyglet.graphics.Batch()
|
self.batch_bg = pyglet.graphics.Batch()
|
||||||
self.batch_overlay = pyglet.graphics.Batch()
|
self.batch_overlay = pyglet.graphics.Batch()
|
||||||
self.batch_anim = pyglet.graphics.Batch()
|
self.batch_anim = pyglet.graphics.Batch()
|
||||||
self.batch_debug = pyglet.graphics.Batch()
|
|
||||||
|
|
||||||
# if self.config.render_debug_shapes:
|
if self.config.render_debug_shapes:
|
||||||
self.render_debug_shapes = self.config.render_debug_shapes
|
self.debug_lines = [
|
||||||
self.render_lines = True
|
pyglet.shapes.Line(1370, self.config.camera.h-360, 1380, 670-360, 2, COLOR_PRIMARY, batch=self.batch_overlay),#v
|
||||||
|
pyglet.shapes.Line(0, 660-360, 1380, 670-360, 2, COLOR_PRIMARY, batch=self.batch_overlay), #h
|
||||||
self.debug_lines = [
|
pyglet.shapes.Line(1140, 760-360, 1140, 675-360, 2, COLOR_PRIMARY, batch=self.batch_overlay), #h
|
||||||
pyglet.shapes.Line(1370, self.config.camera.h-360, 1380, 670-360, 2, COLOR_PRIMARY, batch=self.batch_debug),#v
|
pyglet.shapes.Line(540, 760-360,540, 675-360, 2, COLOR_PRIMARY, batch=self.batch_overlay), #v
|
||||||
pyglet.shapes.Line(0, 660-360, 1380, 670-360, 2, COLOR_PRIMARY, batch=self.batch_debug), #h
|
pyglet.shapes.Line(0, 770-360, 1380, 770-360, 2, COLOR_PRIMARY, batch=self.batch_overlay), #h
|
||||||
pyglet.shapes.Line(1140, 760-360, 1140, 675-360, 2, COLOR_PRIMARY, batch=self.batch_debug), #h
|
|
||||||
pyglet.shapes.Line(540, 760-360,540, 675-360, 2, COLOR_PRIMARY, batch=self.batch_debug), #v
|
|
||||||
pyglet.shapes.Line(0, 770-360, 1380, 770-360, 2, COLOR_PRIMARY, batch=self.batch_debug), #h
|
|
||||||
|
|
||||||
]
|
]
|
||||||
|
|
||||||
self.debug_points = []
|
self.debug_points = []
|
||||||
# print(self.config.debug_points_file)
|
# print(self.config.debug_points_file)
|
||||||
|
@ -165,7 +146,7 @@ class AnimationRenderer:
|
||||||
dst_img_points = np.reshape(dst_img_points, (dst_img_points.shape[0], 2))
|
dst_img_points = np.reshape(dst_img_points, (dst_img_points.shape[0], 2))
|
||||||
|
|
||||||
self.debug_points = [
|
self.debug_points = [
|
||||||
pyglet.shapes.Circle(p[0], self.window.height - p[1], 3, color=(255,0,0,255), batch=self.batch_debug) for p in dst_img_points
|
pyglet.shapes.Circle(p[0], self.window.height - p[1], 3, color=(255,0,0,255), batch=self.batch_overlay) for p in dst_img_points
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@ -278,19 +259,13 @@ class AnimationRenderer:
|
||||||
self.labels['frame_idx'].text = f"{self.frame.index:06d}"
|
self.labels['frame_idx'].text = f"{self.frame.index:06d}"
|
||||||
self.labels['frame_time'].text = f"{self.frame.time - self.first_time: >10.2f}s"
|
self.labels['frame_time'].text = f"{self.frame.time - self.first_time: >10.2f}s"
|
||||||
self.labels['frame_latency'].text = f"{self.frame.time - time.time():.2f}s"
|
self.labels['frame_latency'].text = f"{self.frame.time - time.time():.2f}s"
|
||||||
|
|
||||||
if self.frame.time - self.first_time > 30 and (not hasattr(self, 'has_snap') or self.has_snap == False):
|
|
||||||
snapshot = tracemalloc.take_snapshot()
|
|
||||||
display_top(snapshot, 'traceback', 15)
|
|
||||||
tracemalloc.stop()
|
|
||||||
self.has_snap = True
|
|
||||||
|
|
||||||
if self.tracker_frame and self.frame:
|
if self.tracker_frame:
|
||||||
self.labels['tracker_idx'].text = f"{self.tracker_frame.index - self.frame.index}"
|
self.labels['tracker_idx'].text = f"{self.tracker_frame.index - self.frame.index}"
|
||||||
self.labels['tracker_time'].text = f"{self.tracker_frame.time - time.time():.3f}s"
|
self.labels['tracker_time'].text = f"{self.tracker_frame.time - time.time():.3f}s"
|
||||||
self.labels['track_len'].text = f"{len(self.tracker_frame.tracks)} tracks"
|
self.labels['track_len'].text = f"{len(self.tracker_frame.tracks)} tracks"
|
||||||
|
|
||||||
if self.prediction_frame and self.frame:
|
if self.prediction_frame:
|
||||||
self.labels['pred_idx'].text = f"{self.prediction_frame.index - self.frame.index}"
|
self.labels['pred_idx'].text = f"{self.prediction_frame.index - self.frame.index}"
|
||||||
self.labels['pred_time'].text = f"{self.prediction_frame.time - time.time():.3f}s"
|
self.labels['pred_time'].text = f"{self.prediction_frame.time - time.time():.3f}s"
|
||||||
# self.labels['track_len'].text = f"{len(self.prediction_frame.tracks)} tracks"
|
# self.labels['track_len'].text = f"{len(self.prediction_frame.tracks)} tracks"
|
||||||
|
@ -319,32 +294,23 @@ class AnimationRenderer:
|
||||||
|
|
||||||
|
|
||||||
def check_frames(self, dt):
|
def check_frames(self, dt):
|
||||||
if self.pause:
|
|
||||||
return
|
|
||||||
|
|
||||||
new_tracks = False
|
new_tracks = False
|
||||||
try:
|
try:
|
||||||
self.frame: Frame = self.frame_noimg_sock.recv_pyobj(zmq.NOBLOCK)
|
self.frame: Frame = self.frame_sock.recv_pyobj(zmq.NOBLOCK)
|
||||||
if not self.first_time:
|
if not self.first_time:
|
||||||
self.first_time = self.frame.time
|
self.first_time = self.frame.time
|
||||||
if self.frame.img:
|
img = self.frame.img
|
||||||
img = self.frame.img
|
# newcameramtx, roi = cv2.getOptimalNewCameraMatrix(self.config.camera.mtx, self.config.camera.dist, (self.frame.img.shape[1], self.frame.img.shape[0]), 1, (self.frame.img.shape[1], self.frame.img.shape[0]))
|
||||||
# newcameramtx, roi = cv2.getOptimalNewCameraMatrix(self.config.camera.mtx, self.config.camera.dist, (self.frame.img.shape[1], self.frame.img.shape[0]), 1, (self.frame.img.shape[1], self.frame.img.shape[0]))
|
img = cv2.undistort(img, self.config.camera.mtx, self.config.camera.dist, None, self.config.camera.newcameramtx)
|
||||||
img = cv2.undistort(img, self.config.camera.mtx, self.config.camera.dist, None, self.config.camera.newcameramtx)
|
img = cv2.warpPerspective(img, self.config.camera.H, (self.config.camera.w, self.config.camera.h))
|
||||||
img = cv2.warpPerspective(img, convert_world_space_to_img_space(self.config.camera.H), (self.config.camera.w, self.config.camera.h))
|
# img = cv2.GaussianBlur(img, (15, 15), 0)
|
||||||
# img = cv2.GaussianBlur(img, (15, 15), 0)
|
img = cv2.flip(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), 0)
|
||||||
img = cv2.flip(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), 0)
|
img = pyglet.image.ImageData(self.frame_size[0], self.frame_size[1], 'RGB', img.tobytes())
|
||||||
img = pyglet.image.ImageData(self.frame_size[0], self.frame_size[1], 'RGB', img.tobytes())
|
# don't draw in batch, so that it is the background
|
||||||
# don't draw in batch, so that it is the background
|
self.video_sprite = pyglet.sprite.Sprite(img=img, batch=self.batch_bg)
|
||||||
if hasattr(self, 'video_sprite') and self.video_sprite:
|
# transform to flipped coordinate system for pyglet
|
||||||
self.video_sprite.delete()
|
self.video_sprite.y = self.window.height - self.video_sprite.height
|
||||||
self.frame.img = None
|
self.video_sprite.opacity = 70
|
||||||
|
|
||||||
self.video_sprite = pyglet.sprite.Sprite(img=img, batch=self.batch_bg)
|
|
||||||
# transform to flipped coordinate system for pyglet
|
|
||||||
self.video_sprite.y = self.window.height - self.video_sprite.height
|
|
||||||
# self.frame.img = np.array([]) # clearing memory?
|
|
||||||
# self.video_sprite.opacity = 70
|
|
||||||
except zmq.ZMQError as e:
|
except zmq.ZMQError as e:
|
||||||
# idx = frame.index if frame else "NONE"
|
# idx = frame.index if frame else "NONE"
|
||||||
# logger.debug(f"reuse video frame {idx}")
|
# logger.debug(f"reuse video frame {idx}")
|
||||||
|
@ -401,20 +367,11 @@ class AnimationRenderer:
|
||||||
self.window.set_fullscreen(not self.window.fullscreen)
|
self.window.set_fullscreen(not self.window.fullscreen)
|
||||||
if symbol == ord('h'):
|
if symbol == ord('h'):
|
||||||
self.hide_stats = not self.hide_stats
|
self.hide_stats = not self.hide_stats
|
||||||
if symbol == ord('d'):
|
|
||||||
self.render_debug_shapes = not self.render_debug_shapes
|
|
||||||
if symbol == ord('p'):
|
|
||||||
self.pause = not self.pause
|
|
||||||
if symbol == ord('b'):
|
|
||||||
self.hide_bg = not self.hide_bg
|
|
||||||
if symbol == ord('l'):
|
|
||||||
self.render_lines = not self.render_lines
|
|
||||||
|
|
||||||
def check_running(self, dt):
|
def check_running(self, dt):
|
||||||
if not self.is_running.is_set():
|
if not self.is_running.is_set():
|
||||||
self.window.close()
|
self.window.close()
|
||||||
self.event_loop.exit()
|
self.event_loop.exit()
|
||||||
print('quit animation renderer')
|
|
||||||
|
|
||||||
def on_close(self):
|
def on_close(self):
|
||||||
self.is_running.clear()
|
self.is_running.clear()
|
||||||
|
@ -429,8 +386,6 @@ class AnimationRenderer:
|
||||||
|
|
||||||
self.refresh_labels(dt)
|
self.refresh_labels(dt)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# self.shape1 = shapes.Circle(700, 150, 100, color=(50, 0, 30), batch=self.batch_anim)
|
# self.shape1 = shapes.Circle(700, 150, 100, color=(50, 0, 30), batch=self.batch_anim)
|
||||||
# self.shape3 = shapes.Circle(800, 150, 100, color=(100, 225, 30), batch=self.batch_anim)
|
# self.shape3 = shapes.Circle(800, 150, 100, color=(100, 225, 30), batch=self.batch_anim)
|
||||||
pass
|
pass
|
||||||
|
@ -438,29 +393,24 @@ class AnimationRenderer:
|
||||||
def on_draw(self):
|
def on_draw(self):
|
||||||
self.window.clear()
|
self.window.clear()
|
||||||
|
|
||||||
if not self.hide_bg:
|
self.batch_bg.draw()
|
||||||
self.batch_bg.draw()
|
|
||||||
|
for track in self.drawn_tracks.values():
|
||||||
|
for shape in track.shapes:
|
||||||
|
shape.draw() # for some reason the batches don't work
|
||||||
|
for track in self.drawn_tracks.values():
|
||||||
|
for shapes in track.pred_shapes:
|
||||||
|
for shape in shapes:
|
||||||
|
shape.draw()
|
||||||
|
# self.batch_anim.draw()
|
||||||
|
self.batch_overlay.draw()
|
||||||
|
|
||||||
if self.render_debug_shapes:
|
if self.config.render_debug_shapes:
|
||||||
self.batch_debug.draw()
|
|
||||||
self.pins.draw()
|
self.pins.draw()
|
||||||
|
|
||||||
if self.render_lines:
|
# pyglet.graphics.draw(3, pyglet.gl.GL_LINE, ("v2i", (100,200, 600,800)), ('c3B', (255,255,255, 255,255,255)))
|
||||||
for track in self.drawn_tracks.values():
|
|
||||||
for shape in track.shapes:
|
|
||||||
shape.draw() # for some reason the batches don't work
|
|
||||||
for track in self.drawn_tracks.values():
|
|
||||||
for shapes in track.pred_shapes:
|
|
||||||
for shape in shapes:
|
|
||||||
shape.draw()
|
|
||||||
# self.batch_anim.draw()
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# pyglet.graphics.draw(3, pyglet.gl.GL_LINE, ("v2i", (100,200, 600,800)), ('c3B', (255,255,255, 255,255,255)))
|
|
||||||
|
|
||||||
if not self.hide_stats:
|
if not self.hide_stats:
|
||||||
self.batch_overlay.draw()
|
|
||||||
self.fps_display.draw()
|
self.fps_display.draw()
|
||||||
|
|
||||||
# if streaming, capture buffer and send
|
# if streaming, capture buffer and send
|
||||||
|
@ -546,7 +496,6 @@ class AnimationRenderer:
|
||||||
# cv2.waitKey(1)
|
# cv2.waitKey(1)
|
||||||
logger.info('Stopping')
|
logger.info('Stopping')
|
||||||
logger.info(f'used corner pins {self.pins.pin_positions}')
|
logger.info(f'used corner pins {self.pins.pin_positions}')
|
||||||
print(self.pins.pin_positions)
|
|
||||||
|
|
||||||
|
|
||||||
# if i>2:
|
# if i>2:
|
||||||
|
|
|
@ -251,12 +251,7 @@ connection_parser.add_argument('--zmq-prediction-addr',
|
||||||
connection_parser.add_argument('--zmq-frame-addr',
|
connection_parser.add_argument('--zmq-frame-addr',
|
||||||
help='Manually specity communication addr for the frame messages',
|
help='Manually specity communication addr for the frame messages',
|
||||||
type=str,
|
type=str,
|
||||||
default="ipc:///tmp/feeds_frame")
|
default="ipc:///tmp/feeds_frame")
|
||||||
|
|
||||||
connection_parser.add_argument('--zmq-frame-noimg-addr',
|
|
||||||
help='Manually specity communication addr for the frame messages',
|
|
||||||
type=str,
|
|
||||||
default="ipc:///tmp/feeds_frame2")
|
|
||||||
|
|
||||||
|
|
||||||
connection_parser.add_argument('--ws-port',
|
connection_parser.add_argument('--ws-port',
|
||||||
|
@ -341,15 +336,12 @@ render_parser.add_argument("--render-file",
|
||||||
render_parser.add_argument("--render-window",
|
render_parser.add_argument("--render-window",
|
||||||
help="Render a previewing to a window",
|
help="Render a previewing to a window",
|
||||||
action='store_true')
|
action='store_true')
|
||||||
render_parser.add_argument("--render-animation",
|
render_parser.add_argument("--render-no-preview",
|
||||||
help="Render animation (pyglet)",
|
help="No preview, but only animation",
|
||||||
action='store_true')
|
action='store_true')
|
||||||
render_parser.add_argument("--render-debug-shapes",
|
render_parser.add_argument("--render-debug-shapes",
|
||||||
help="Lines and points for debugging/mapping",
|
help="Lines and points for debugging/mapping",
|
||||||
action='store_true')
|
action='store_true')
|
||||||
render_parser.add_argument("--render-hide-stats",
|
|
||||||
help="Default toggle to hide (switch with 'h')",
|
|
||||||
action='store_true')
|
|
||||||
render_parser.add_argument("--full-screen",
|
render_parser.add_argument("--full-screen",
|
||||||
help="Set Window full screen",
|
help="Set Window full screen",
|
||||||
action='store_true')
|
action='store_true')
|
||||||
|
|
|
@ -27,7 +27,6 @@ from PIL import Image
|
||||||
from trap.frame_emitter import DetectionState, Frame, Track, Camera
|
from trap.frame_emitter import DetectionState, Frame, Track, Camera
|
||||||
from trap.preview_renderer import FrameWriter
|
from trap.preview_renderer import FrameWriter
|
||||||
from trap.tools import draw_track, draw_track_predictions, draw_track_projected, draw_trackjectron_history, to_point
|
from trap.tools import draw_track, draw_track_predictions, draw_track_projected, draw_trackjectron_history, to_point
|
||||||
from trap.utils import convert_world_points_to_img_points, convert_world_space_to_img_space
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -334,7 +333,7 @@ class CvRenderer:
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def run(self, timer_counter):
|
def run(self):
|
||||||
frame = None
|
frame = None
|
||||||
prediction_frame = None
|
prediction_frame = None
|
||||||
tracker_frame = None
|
tracker_frame = None
|
||||||
|
@ -342,15 +341,11 @@ class CvRenderer:
|
||||||
i=0
|
i=0
|
||||||
first_time = None
|
first_time = None
|
||||||
|
|
||||||
cv2.namedWindow("frame", cv2.WINDOW_NORMAL)
|
cv2.namedWindow("frame", cv2.WND_PROP_FULLSCREEN)
|
||||||
# https://gist.github.com/ronekko/dc3747211543165108b11073f929b85e
|
|
||||||
cv2.moveWindow("frame", 1920, -1)
|
|
||||||
cv2.setWindowProperty("frame",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
|
cv2.setWindowProperty("frame",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
|
||||||
|
|
||||||
while self.is_running.is_set():
|
while self.is_running.is_set():
|
||||||
i+=1
|
i+=1
|
||||||
with timer_counter.get_lock():
|
|
||||||
timer_counter.value+=1
|
|
||||||
|
|
||||||
|
|
||||||
# zmq_ev = self.frame_sock.poll(timeout=2000)
|
# zmq_ev = self.frame_sock.poll(timeout=2000)
|
||||||
|
@ -408,10 +403,10 @@ class CvRenderer:
|
||||||
# clear out old tracks & predictions:
|
# clear out old tracks & predictions:
|
||||||
|
|
||||||
for track_id, track in list(self.tracks.items()):
|
for track_id, track in list(self.tracks.items()):
|
||||||
if get_animation_position(track, frame) == 1:
|
if get_opacity(track, frame) == 0:
|
||||||
self.tracks.pop(track_id)
|
self.tracks.pop(track_id)
|
||||||
for prediction_id, track in list(self.predictions.items()):
|
for prediction_id, track in list(self.predictions.items()):
|
||||||
if get_animation_position(track, frame) == 1:
|
if get_opacity(track, frame) == 0:
|
||||||
self.predictions.pop(prediction_id)
|
self.predictions.pop(prediction_id)
|
||||||
|
|
||||||
logger.info('Stopping')
|
logger.info('Stopping')
|
||||||
|
@ -445,13 +440,26 @@ colorset = [
|
||||||
# (0,0,0),
|
# (0,0,0),
|
||||||
# ]
|
# ]
|
||||||
|
|
||||||
def get_animation_position(track: Track, current_frame: Frame):
|
def get_opacity(track: Track, current_frame: Frame):
|
||||||
fade_duration = current_frame.camera.fps * 3
|
fade_duration = current_frame.camera.fps * 3
|
||||||
diff = current_frame.index - track.history[-1].frame_nr
|
diff = current_frame.index - track.history[-1].frame_nr
|
||||||
return max(0, min(1, diff / fade_duration))
|
return max(0, 1 - diff / fade_duration)
|
||||||
# track.history[-1].frame_nr < (current_frame.index - current_frame.camera.fps * 3)
|
# track.history[-1].frame_nr < (current_frame.index - current_frame.camera.fps * 3)
|
||||||
# track.history[-1].frame_nr < (current_frame.index - current_frame.camera.fps * 3)
|
# track.history[-1].frame_nr < (current_frame.index - current_frame.camera.fps * 3)
|
||||||
|
|
||||||
|
def convert_world_space_to_img_space(H: cv2.Mat):
|
||||||
|
"""Transform the given matrix so that it immediately converts
|
||||||
|
the points to img space"""
|
||||||
|
new_H = H.copy()
|
||||||
|
new_H[:2] = H[:2] * 100
|
||||||
|
return new_H
|
||||||
|
|
||||||
|
def convert_world_points_to_img_points(points: Iterable):
|
||||||
|
"""Transform the given matrix so that it immediately converts
|
||||||
|
the points to img space"""
|
||||||
|
if isinstance(points, np.ndarray):
|
||||||
|
return np.array(points) * 100
|
||||||
|
return [[p[0]*100, p[1]*100] for p in points]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -471,8 +479,8 @@ def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame,
|
||||||
# Fill image with red color(set each pixel to red)
|
# Fill image with red color(set each pixel to red)
|
||||||
overlay[:] = (0, 0, 0)
|
overlay[:] = (0, 0, 0)
|
||||||
|
|
||||||
# img = cv2.addWeighted(dst_img, .2, overlay, .3, 0)
|
img = cv2.addWeighted(dst_img, .1, overlay, .3, 0)
|
||||||
img = dst_img.copy()
|
# img = frame.img.copy()
|
||||||
|
|
||||||
# all not working:
|
# all not working:
|
||||||
# if i == 1:
|
# if i == 1:
|
||||||
|
@ -499,8 +507,8 @@ def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame,
|
||||||
inv_H = np.linalg.pinv(prediction_frame.H)
|
inv_H = np.linalg.pinv(prediction_frame.H)
|
||||||
# draw_track(img, track, int(track_id))
|
# draw_track(img, track, int(track_id))
|
||||||
draw_trackjectron_history(img, track, int(track.track_id), convert_world_points_to_img_points)
|
draw_trackjectron_history(img, track, int(track.track_id), convert_world_points_to_img_points)
|
||||||
anim_position = get_animation_position(track, frame)
|
opacity = get_opacity(track, frame)
|
||||||
draw_track_predictions(img, track, int(track.track_id)+1, config.camera, convert_world_points_to_img_points, anim_position=anim_position)
|
draw_track_predictions(img, track, int(track.track_id)+1, config.camera, convert_world_points_to_img_points, opacity=opacity)
|
||||||
cv2.putText(img, f"{len(track.predictor_history) if track.predictor_history else 'none'}", to_point(track.history[0].get_foot_coords()), cv2.FONT_HERSHEY_COMPLEX, 1, (255,255,255), 1)
|
cv2.putText(img, f"{len(track.predictor_history) if track.predictor_history else 'none'}", to_point(track.history[0].get_foot_coords()), cv2.FONT_HERSHEY_COMPLEX, 1, (255,255,255), 1)
|
||||||
|
|
||||||
base_color = (255,)*3
|
base_color = (255,)*3
|
||||||
|
@ -510,14 +518,13 @@ def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame,
|
||||||
|
|
||||||
cv2.putText(img, f"{frame.index:06d}", (20,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
|
cv2.putText(img, f"{frame.index:06d}", (20,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
|
||||||
cv2.putText(img, f"{frame.time - first_time: >10.2f}s", (150,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
|
cv2.putText(img, f"{frame.time - first_time: >10.2f}s", (150,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
|
||||||
cv2.putText(img, f"{frame.time - time.time():.2f}s", (250,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
|
|
||||||
|
|
||||||
if prediction_frame:
|
if prediction_frame:
|
||||||
# render Δt and Δ frames
|
# render Δt and Δ frames
|
||||||
cv2.putText(img, f"{tracker_frame.index - frame.index}", (90,17), cv2.FONT_HERSHEY_PLAIN, 1, tracker_color, 1)
|
cv2.putText(img, f"{tracker_frame.index - frame.index}", (90,17), cv2.FONT_HERSHEY_PLAIN, 1, tracker_color, 1)
|
||||||
cv2.putText(img, f"{prediction_frame.index - frame.index}", (120,17), cv2.FONT_HERSHEY_PLAIN, 1, predictor_color, 1)
|
cv2.putText(img, f"{prediction_frame.index - frame.index}", (120,17), cv2.FONT_HERSHEY_PLAIN, 1, predictor_color, 1)
|
||||||
cv2.putText(img, f"{tracker_frame.time - time.time():.2f}s", (310,17), cv2.FONT_HERSHEY_PLAIN, 1, tracker_color, 1)
|
cv2.putText(img, f"{tracker_frame.time - time.time():.2f}s", (230,17), cv2.FONT_HERSHEY_PLAIN, 1, tracker_color, 1)
|
||||||
cv2.putText(img, f"{prediction_frame.time - time.time():.2f}s", (380,17), cv2.FONT_HERSHEY_PLAIN, 1, predictor_color, 1)
|
cv2.putText(img, f"{prediction_frame.time - time.time():.2f}s", (290,17), cv2.FONT_HERSHEY_PLAIN, 1, predictor_color, 1)
|
||||||
cv2.putText(img, f"{len(tracker_frame.tracks)} tracks", (620,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
|
cv2.putText(img, f"{len(tracker_frame.tracks)} tracks", (620,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
|
||||||
cv2.putText(img, f"h: {np.average([len(t.history or []) for t in prediction_frame.tracks.values()]):.2f}", (700,17), cv2.FONT_HERSHEY_PLAIN, 1, tracker_color, 1)
|
cv2.putText(img, f"h: {np.average([len(t.history or []) for t in prediction_frame.tracks.values()]):.2f}", (700,17), cv2.FONT_HERSHEY_PLAIN, 1, tracker_color, 1)
|
||||||
cv2.putText(img, f"ph: {np.average([len(t.predictor_history or []) for t in prediction_frame.tracks.values()]):.2f}", (780,17), cv2.FONT_HERSHEY_PLAIN, 1, predictor_color, 1)
|
cv2.putText(img, f"ph: {np.average([len(t.predictor_history or []) for t in prediction_frame.tracks.values()]):.2f}", (780,17), cv2.FONT_HERSHEY_PLAIN, 1, predictor_color, 1)
|
||||||
|
@ -534,6 +541,6 @@ def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame,
|
||||||
return img
|
return img
|
||||||
|
|
||||||
|
|
||||||
def run_cv_renderer(config: Namespace, is_running: BaseEvent, timer_counter):
|
def run_cv_renderer(config: Namespace, is_running: BaseEvent):
|
||||||
renderer = CvRenderer(config, is_running)
|
renderer = CvRenderer(config, is_running)
|
||||||
renderer.run(timer_counter)
|
renderer.run()
|
|
@ -35,23 +35,10 @@ class DataclassJSONEncoder(json.JSONEncoder):
|
||||||
if isinstance(o, np.ndarray):
|
if isinstance(o, np.ndarray):
|
||||||
return o.tolist()
|
return o.tolist()
|
||||||
if dataclasses.is_dataclass(o):
|
if dataclasses.is_dataclass(o):
|
||||||
|
d = dataclasses.asdict(o)
|
||||||
if isinstance(o, Frame):
|
if isinstance(o, Frame):
|
||||||
tracks = {}
|
# Don't send images over JSON
|
||||||
for track_id, track in o.tracks.items():
|
del d['img']
|
||||||
track_obj = dataclasses.asdict(track)
|
|
||||||
track_obj['history'] = track.get_projected_history(None, o.camera)
|
|
||||||
tracks[track_id] = track_obj
|
|
||||||
d = {
|
|
||||||
'index': o.index,
|
|
||||||
'time': o.time,
|
|
||||||
'tracks': tracks,
|
|
||||||
'camera': dataclasses.asdict(o.camera),
|
|
||||||
}
|
|
||||||
else:
|
|
||||||
d = dataclasses.asdict(o)
|
|
||||||
# if isinstance(o, Frame):
|
|
||||||
# # Don't send images over JSON
|
|
||||||
# del d['img']
|
|
||||||
return d
|
return d
|
||||||
return super().default(o)
|
return super().default(o)
|
||||||
|
|
||||||
|
@ -420,9 +407,6 @@ class Frame:
|
||||||
} for t in self.tracks.values()
|
} for t in self.tracks.values()
|
||||||
}
|
}
|
||||||
|
|
||||||
def without_img(self):
|
|
||||||
return Frame(self.index, None, self.time, self.tracks, self.H, self.camera)
|
|
||||||
|
|
||||||
def video_src_from_config(config) -> UrlOrPath:
|
def video_src_from_config(config) -> UrlOrPath:
|
||||||
if config.video_loop:
|
if config.video_loop:
|
||||||
video_srcs: Iterable[UrlOrPath] = cycle(config.video_src)
|
video_srcs: Iterable[UrlOrPath] = cycle(config.video_src)
|
||||||
|
@ -444,18 +428,13 @@ class FrameEmitter:
|
||||||
self.frame_sock = context.socket(zmq.PUB)
|
self.frame_sock = context.socket(zmq.PUB)
|
||||||
self.frame_sock.setsockopt(zmq.CONFLATE, 1) # only keep latest frame. make sure to set BEFORE connect/bind
|
self.frame_sock.setsockopt(zmq.CONFLATE, 1) # only keep latest frame. make sure to set BEFORE connect/bind
|
||||||
self.frame_sock.bind(config.zmq_frame_addr)
|
self.frame_sock.bind(config.zmq_frame_addr)
|
||||||
|
|
||||||
self.frame_noimg_sock = context.socket(zmq.PUB)
|
|
||||||
self.frame_noimg_sock.setsockopt(zmq.CONFLATE, 1) # only keep latest frame. make sure to set BEFORE connect/bind
|
|
||||||
self.frame_noimg_sock.bind(config.zmq_frame_noimg_addr)
|
|
||||||
|
|
||||||
logger.info(f"Connection socket {config.zmq_frame_addr}")
|
logger.info(f"Connection socket {config.zmq_frame_addr}")
|
||||||
logger.info(f"Connection socket {config.zmq_frame_noimg_addr}")
|
|
||||||
|
|
||||||
self.video_srcs = video_src_from_config(self.config)
|
self.video_srcs = video_src_from_config(self.config)
|
||||||
|
|
||||||
|
|
||||||
def emit_video(self, timer_counter):
|
def emit_video(self):
|
||||||
i = 0
|
i = 0
|
||||||
delay_generation = False
|
delay_generation = False
|
||||||
for video_path in self.video_srcs:
|
for video_path in self.video_srcs:
|
||||||
|
@ -470,7 +449,7 @@ class FrameEmitter:
|
||||||
video.set(cv2.CAP_PROP_FPS, 5)
|
video.set(cv2.CAP_PROP_FPS, 5)
|
||||||
fps=5
|
fps=5
|
||||||
elif video_path.url.scheme == 'rtsp':
|
elif video_path.url.scheme == 'rtsp':
|
||||||
gst = f"rtspsrc location={video_path} latency=0 buffer-mode=auto ! decodebin ! videoconvert ! appsink max-buffers=0 drop=true"
|
gst = f"rtspsrc location={video_path} latency=0 buffer-mode=auto ! decodebin ! videoconvert ! appsink max-buffers=1 drop=true"
|
||||||
logger.info(f"Capture gstreamer (gst-launch-1.0): {gst}")
|
logger.info(f"Capture gstreamer (gst-launch-1.0): {gst}")
|
||||||
video = cv2.VideoCapture(gst, cv2.CAP_GSTREAMER)
|
video = cv2.VideoCapture(gst, cv2.CAP_GSTREAMER)
|
||||||
fps=12
|
fps=12
|
||||||
|
@ -504,11 +483,7 @@ class FrameEmitter:
|
||||||
|
|
||||||
prev_time = time.time()
|
prev_time = time.time()
|
||||||
|
|
||||||
|
|
||||||
while self.is_running.is_set():
|
while self.is_running.is_set():
|
||||||
with timer_counter.get_lock():
|
|
||||||
timer_counter.value += 1
|
|
||||||
|
|
||||||
ret, img = video.read()
|
ret, img = video.read()
|
||||||
|
|
||||||
# seek to 0 if video has finished. Infinite loop
|
# seek to 0 if video has finished. Infinite loop
|
||||||
|
@ -527,7 +502,6 @@ class FrameEmitter:
|
||||||
frame = Frame(index=i, img=img, H=self.config.H, camera=self.config.camera)
|
frame = Frame(index=i, img=img, H=self.config.H, camera=self.config.camera)
|
||||||
# TODO: this is very dirty, need to find another way.
|
# TODO: this is very dirty, need to find another way.
|
||||||
# perhaps multiprocessing Array?
|
# perhaps multiprocessing Array?
|
||||||
self.frame_noimg_sock.send(pickle.dumps(frame.without_img()))
|
|
||||||
self.frame_sock.send(pickle.dumps(frame))
|
self.frame_sock.send(pickle.dumps(frame))
|
||||||
|
|
||||||
# only delay consuming the next frame when using a file.
|
# only delay consuming the next frame when using a file.
|
||||||
|
@ -553,7 +527,7 @@ class FrameEmitter:
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def run_frame_emitter(config: Namespace, is_running: Event, timer_counter: int):
|
def run_frame_emitter(config: Namespace, is_running: Event):
|
||||||
router = FrameEmitter(config, is_running)
|
router = FrameEmitter(config, is_running)
|
||||||
router.emit_video(timer_counter)
|
router.emit_video()
|
||||||
is_running.clear()
|
is_running.clear()
|
|
@ -13,7 +13,6 @@ from trap.prediction_server import run_prediction_server
|
||||||
from trap.preview_renderer import run_preview_renderer
|
from trap.preview_renderer import run_preview_renderer
|
||||||
from trap.animation_renderer import run_animation_renderer
|
from trap.animation_renderer import run_animation_renderer
|
||||||
from trap.socket_forwarder import run_ws_forwarder
|
from trap.socket_forwarder import run_ws_forwarder
|
||||||
from trap.timer import TimerCollection
|
|
||||||
from trap.tracker import run_tracker
|
from trap.tracker import run_tracker
|
||||||
|
|
||||||
from setproctitle import setproctitle, setthreadtitle
|
from setproctitle import setproctitle, setthreadtitle
|
||||||
|
@ -84,45 +83,31 @@ def start():
|
||||||
|
|
||||||
# queue_listener.handlers.append(socket_handler)
|
# queue_listener.handlers.append(socket_handler)
|
||||||
|
|
||||||
timers = TimerCollection()
|
|
||||||
timer_fe = timers.new('frame_emitter')
|
|
||||||
timer_tracker = timers.new('tracker')
|
|
||||||
|
|
||||||
# instantiating process with arguments
|
# instantiating process with arguments
|
||||||
procs = [
|
procs = [
|
||||||
# ExceptionHandlingProcess(target=run_ws_forwarder, kwargs={'config': args, 'is_running': isRunning}, name='forwarder'),
|
# ExceptionHandlingProcess(target=run_ws_forwarder, kwargs={'config': args, 'is_running': isRunning}, name='forwarder'),
|
||||||
ExceptionHandlingProcess(target=run_frame_emitter, kwargs={'config': args, 'is_running': isRunning, 'timer_counter': timer_fe.iterations}, name='frame_emitter'),
|
ExceptionHandlingProcess(target=run_frame_emitter, kwargs={'config': args, 'is_running': isRunning}, name='frame_emitter'),
|
||||||
ExceptionHandlingProcess(target=run_tracker, kwargs={'config': args, 'is_running': isRunning, 'timer_counter': timer_tracker.iterations}, name='tracker'),
|
ExceptionHandlingProcess(target=run_tracker, kwargs={'config': args, 'is_running': isRunning}, name='tracker'),
|
||||||
]
|
]
|
||||||
|
|
||||||
# if args.render_file or args.render_url or args.render_window:
|
if args.render_file or args.render_url or args.render_window:
|
||||||
if args.render_window or args.render_file or args.render_url:
|
if not args.render_no_preview: #or args.render_file or args.render_url:
|
||||||
timer_preview = timers.new('preview')
|
procs.append(
|
||||||
procs.append(
|
# ExceptionHandlingProcess(target=run_cv_renderer, kwargs={'config': args, 'is_running': isRunning}, name='preview')
|
||||||
# ExceptionHandlingProcess(target=run_cv_renderer, kwargs={'config': args, 'is_running': isRunning}, name='preview')
|
ExceptionHandlingProcess(target=run_cv_renderer, kwargs={'config': args, 'is_running': isRunning}, name='preview')
|
||||||
ExceptionHandlingProcess(target=run_cv_renderer, kwargs={'config': args, 'is_running': isRunning, 'timer_counter': timer_preview.iterations}, name='preview')
|
)
|
||||||
)
|
if args.render_no_preview:
|
||||||
if args.render_animation:
|
procs.append(
|
||||||
procs.append(
|
ExceptionHandlingProcess(target=run_animation_renderer, kwargs={'config': args, 'is_running': isRunning}, name='renderer')
|
||||||
ExceptionHandlingProcess(target=run_animation_renderer, kwargs={'config': args, 'is_running': isRunning}, name='renderer')
|
)
|
||||||
)
|
|
||||||
|
|
||||||
if not args.bypass_prediction:
|
if not args.bypass_prediction:
|
||||||
timer_predict = timers.new('predict')
|
|
||||||
procs.append(
|
procs.append(
|
||||||
ExceptionHandlingProcess(target=run_prediction_server, kwargs={'config': args, 'is_running':isRunning, 'timer_counter': timer_predict.iterations}, name='inference'),
|
ExceptionHandlingProcess(target=run_prediction_server, kwargs={'config': args, 'is_running':isRunning}, name='inference'),
|
||||||
)
|
)
|
||||||
|
|
||||||
def timer_process(timers: TimerCollection, is_running: Event):
|
|
||||||
while is_running.is_set():
|
|
||||||
time.sleep(1)
|
|
||||||
timers.snapshot()
|
|
||||||
print(timers.to_string())
|
|
||||||
|
|
||||||
procs.append(
|
|
||||||
ExceptionHandlingProcess(target=timer_process, kwargs={'is_running':isRunning, 'timers': timers}, name='timer'),
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
logger.info("start")
|
logger.info("start")
|
||||||
for proc in procs:
|
for proc in procs:
|
||||||
|
|
|
@ -166,11 +166,11 @@ class PredictionServer:
|
||||||
def send_frame(self, frame: Frame):
|
def send_frame(self, frame: Frame):
|
||||||
if self.external_predictions:
|
if self.external_predictions:
|
||||||
# data = json.dumps(frame, cls=DataclassJSONEncoder)
|
# data = json.dumps(frame, cls=DataclassJSONEncoder)
|
||||||
self.prediction_socket.send_json(obj=frame, cls=DataclassJSONEncoder)
|
self.prediction_socket.send_json(frame, cls=DataclassJSONEncoder)
|
||||||
else:
|
else:
|
||||||
self.prediction_socket.send_pyobj(frame)
|
self.prediction_socket.send_pyobj(frame)
|
||||||
|
|
||||||
def run(self, timer_counter):
|
def run(self):
|
||||||
print(self.config)
|
print(self.config)
|
||||||
if self.config.seed is not None:
|
if self.config.seed is not None:
|
||||||
random.seed(self.config.seed)
|
random.seed(self.config.seed)
|
||||||
|
@ -247,8 +247,6 @@ class PredictionServer:
|
||||||
prev_run_time = 0
|
prev_run_time = 0
|
||||||
while self.is_running.is_set():
|
while self.is_running.is_set():
|
||||||
timestep += 1
|
timestep += 1
|
||||||
with timer_counter.get_lock():
|
|
||||||
timer_counter.value+=1
|
|
||||||
|
|
||||||
# this_run_time = time.time()
|
# this_run_time = time.time()
|
||||||
# logger.debug(f'test {prev_run_time - this_run_time}')
|
# logger.debug(f'test {prev_run_time - this_run_time}')
|
||||||
|
@ -479,7 +477,7 @@ class PredictionServer:
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def run_prediction_server(config: Namespace, is_running: Event, timer_counter):
|
def run_prediction_server(config: Namespace, is_running: Event):
|
||||||
|
|
||||||
# attempt to trace the warnings coming from pytorch
|
# attempt to trace the warnings coming from pytorch
|
||||||
# def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
|
# def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
|
||||||
|
@ -490,4 +488,4 @@ def run_prediction_server(config: Namespace, is_running: Event, timer_counter):
|
||||||
|
|
||||||
# warnings.showwarning = warn_with_traceback
|
# warnings.showwarning = warn_with_traceback
|
||||||
s = PredictionServer(config, is_running)
|
s = PredictionServer(config, is_running)
|
||||||
s.run(timer_counter)
|
s.run()
|
|
@ -18,13 +18,12 @@ import tempfile
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import shutil
|
import shutil
|
||||||
import math
|
import math
|
||||||
from typing import List, Optional
|
from typing import Optional
|
||||||
|
|
||||||
|
|
||||||
from pyglet import shapes
|
from pyglet import shapes
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
|
|
||||||
from trap.utils import convert_world_points_to_img_points
|
|
||||||
from trap.frame_emitter import DetectionState, Frame, Track, Camera
|
from trap.frame_emitter import DetectionState, Frame, Track, Camera
|
||||||
|
|
||||||
|
|
||||||
|
@ -87,7 +86,7 @@ class DrawnTrack:
|
||||||
|
|
||||||
self.track = track
|
self.track = track
|
||||||
# self.H = H
|
# self.H = H
|
||||||
self.coords = [d.get_foot_coords() for d in track.history] if self.draw_projection == PROJECTION_IMG else track.get_projected_history(None, self.camera)
|
self.coords = [d.get_foot_coords() for d in track.history] if self.draw_projection == PROJECTION_IMG else track.get_projected_history(self.H, self.camera)
|
||||||
|
|
||||||
# perhaps only do in constructor:
|
# perhaps only do in constructor:
|
||||||
self.inv_H = np.linalg.pinv(self.H)
|
self.inv_H = np.linalg.pinv(self.H)
|
||||||
|
@ -112,24 +111,16 @@ class DrawnTrack:
|
||||||
# color = (128,0,128) if pred_i else (128,
|
# color = (128,0,128) if pred_i else (128,
|
||||||
|
|
||||||
|
|
||||||
def update_drawn_positions(self, dt) -> List:
|
def update_drawn_positions(self, dt) -> []:
|
||||||
'''
|
'''
|
||||||
use dt to lerp the drawn positions in the direction of current prediction
|
use dt to lerp the drawn positions in the direction of current prediction
|
||||||
'''
|
'''
|
||||||
# TODO: make lerp, currently quick way to get results
|
# TODO: make lerp, currently quick way to get results
|
||||||
|
|
||||||
def int_or_not(v):
|
|
||||||
"""quick wrapper to toggle int'ing"""
|
|
||||||
return v
|
|
||||||
# return int(v)
|
|
||||||
|
|
||||||
# 1. track history
|
# 1. track history
|
||||||
for i, pos in enumerate(self.drawn_positions):
|
for i, pos in enumerate(self.drawn_positions):
|
||||||
self.drawn_positions[i][0] = self.coords[i][0]
|
self.drawn_positions[i][0] = int(exponentialDecay(self.drawn_positions[i][0], self.coords[i][0], 16, dt))
|
||||||
self.drawn_positions[i][1] = self.coords[i][1]
|
self.drawn_positions[i][1] = int(exponentialDecay(self.drawn_positions[i][1], self.coords[i][1], 16, dt))
|
||||||
# self.drawn_positions[i][0] = int_or_not(exponentialDecay(self.drawn_positions[i][0], self.coords[i][0], 16, dt))
|
|
||||||
# self.drawn_positions[i][1] = int_or_not(exponentialDecay(self.drawn_positions[i][1], self.coords[i][1], 16, dt))
|
|
||||||
# print(self.drawn_positions)
|
|
||||||
|
|
||||||
if len(self.coords) > len(self.drawn_positions):
|
if len(self.coords) > len(self.drawn_positions):
|
||||||
self.drawn_positions.extend(self.coords[len(self.drawn_positions):])
|
self.drawn_positions.extend(self.coords[len(self.drawn_positions):])
|
||||||
|
@ -137,8 +128,8 @@ class DrawnTrack:
|
||||||
# 2. history as seen by predictor (Trajectron)
|
# 2. history as seen by predictor (Trajectron)
|
||||||
for i, pos in enumerate(self.drawn_pred_history):
|
for i, pos in enumerate(self.drawn_pred_history):
|
||||||
if len(self.pred_history_coords) > i:
|
if len(self.pred_history_coords) > i:
|
||||||
self.drawn_pred_history[i][0] = int_or_not(exponentialDecay(self.drawn_pred_history[i][0], self.pred_history_coords[i][0], 16, dt))
|
self.drawn_pred_history[i][0] = int(exponentialDecay(self.drawn_pred_history[i][0], self.pred_history_coords[i][0], 16, dt))
|
||||||
self.drawn_pred_history[i][1] = int_or_not(exponentialDecay(self.drawn_pred_history[i][1], self.pred_history_coords[i][1], 16, dt))
|
self.drawn_pred_history[i][1] = int(exponentialDecay(self.drawn_pred_history[i][1], self.pred_history_coords[i][1], 16, dt))
|
||||||
|
|
||||||
if len(self.pred_history_coords) > len(self.drawn_pred_history):
|
if len(self.pred_history_coords) > len(self.drawn_pred_history):
|
||||||
self.drawn_pred_history.extend(self.coords[len(self.drawn_pred_history):])
|
self.drawn_pred_history.extend(self.coords[len(self.drawn_pred_history):])
|
||||||
|
@ -156,7 +147,7 @@ class DrawnTrack:
|
||||||
r = exponentialDecay(drawn_r, pred_r, decay, dt)
|
r = exponentialDecay(drawn_r, pred_r, decay, dt)
|
||||||
angle = exponentialDecay(drawn_angle, pred_angle, decay, dt)
|
angle = exponentialDecay(drawn_angle, pred_angle, decay, dt)
|
||||||
x, y = relativePolarToPoint(origin, r, angle)
|
x, y = relativePolarToPoint(origin, r, angle)
|
||||||
self.drawn_predictions[a][i] = int_or_not(x), int_or_not(y)
|
self.drawn_predictions[a][i] = int(x), int(y)
|
||||||
# self.drawn_predictions[i][0] = int(exponentialDecay(self.drawn_predictions[i][0], self.pred_coords[i][0], decay, dt))
|
# self.drawn_predictions[i][0] = int(exponentialDecay(self.drawn_predictions[i][0], self.pred_coords[i][0], decay, dt))
|
||||||
# self.drawn_predictions[i][1] = int(exponentialDecay(self.drawn_predictions[i][1], self.pred_coords[i][1], decay, dt))
|
# self.drawn_predictions[i][1] = int(exponentialDecay(self.drawn_predictions[i][1], self.pred_coords[i][1], decay, dt))
|
||||||
|
|
||||||
|
@ -168,130 +159,109 @@ class DrawnTrack:
|
||||||
|
|
||||||
# self.drawn_positions = self.coords
|
# self.drawn_positions = self.coords
|
||||||
|
|
||||||
|
|
||||||
# finally: update shapes from coordinates
|
# finally: update shapes from coordinates
|
||||||
self.update_shapes(dt)
|
self.update_shapes(dt)
|
||||||
return self.drawn_positions
|
return self.drawn_positions
|
||||||
|
|
||||||
def update_shapes(self, dt):
|
def update_shapes(self, dt):
|
||||||
|
if len(self.shapes) > len(self.drawn_positions):
|
||||||
|
self.shapes = self.shapes[:len(self.drawn_positions)]
|
||||||
|
|
||||||
drawn_positions = convert_world_points_to_img_points(self.coords[:500]) # TODO)) Glitch in self.drawn_positions, now also capped
|
# for i, pos in self.drawn_positions.enumerate():
|
||||||
drawn_pred_history = convert_world_points_to_img_points(self.drawn_pred_history)
|
for ci in range(1, len(self.drawn_positions)):
|
||||||
drawn_predictions = [convert_world_points_to_img_points(p) for p in self.drawn_predictions]
|
x, y = [int(p) for p in self.drawn_positions[ci-1]]
|
||||||
# positions = convert_world_points_to_img_points(self.drawn_predictions)
|
x2, y2 = [int(p) for p in self.drawn_positions[ci]]
|
||||||
|
|
||||||
# print("drawn",
|
y, y2 = self.renderer.window.height - y, self.renderer.window.height - y2
|
||||||
# drawn_positions,'self', self.drawn_positions
|
color = [100+155*ci // len(self.drawn_positions)]*3
|
||||||
# )
|
# print(x,y,x2,y2,color)
|
||||||
|
|
||||||
if len(self.shapes) > len(drawn_positions):
|
if ci >= len(self.shapes):
|
||||||
self.shapes = self.shapes[:len(drawn_positions)]
|
# TODO: add color2
|
||||||
|
line = self.renderer.gradientLine(x, y, x2, y2, 3, color, color, batch=self.renderer.batch_anim)
|
||||||
# for i, pos in drawn_positions.enumerate():
|
line = pyglet.shapes.Arc(x2, y2, 10, thickness=2, color=color, batch=self.renderer.batch_anim)
|
||||||
draw_dot = False # if False, draw line
|
line.opacity = 20
|
||||||
for_laser = True
|
self.shapes.append(line)
|
||||||
|
|
||||||
if True:
|
else:
|
||||||
for ci in range(1, len(drawn_positions)):
|
line = self.shapes[ci-1]
|
||||||
x, y = [int(p) for p in drawn_positions[ci-1]]
|
line.x, line.y = x, y
|
||||||
x2, y2 = [int(p) for p in drawn_positions[ci]]
|
line.x2, line.y2 = x2, y2
|
||||||
|
line.radius = int(exponentialDecay(line.radius, 1.5, 3, dt))
|
||||||
y, y2 = self.renderer.window.height - y, self.renderer.window.height - y2
|
line.color = color
|
||||||
color = [100+155*ci // len(drawn_positions)]*3
|
line.opacity = int(exponentialDecay(line.opacity, 180, 8, dt))
|
||||||
# print(x,y,x2,y2,color)
|
|
||||||
|
|
||||||
if ci >= len(self.shapes):
|
|
||||||
# TODO: add color2
|
|
||||||
if draw_dot:
|
|
||||||
line = pyglet.shapes.Arc(x2, y2, 10, thickness=2, color=color, batch=self.renderer.batch_anim)
|
|
||||||
else:
|
|
||||||
line = self.renderer.gradientLine(x, y, x2, y2, 3, color, color, batch=self.renderer.batch_anim)
|
|
||||||
line.opacity = 20 if not for_laser else 255
|
|
||||||
self.shapes.append(line)
|
|
||||||
|
|
||||||
else:
|
|
||||||
line = self.shapes[ci-1]
|
|
||||||
line.x, line.y = x, y
|
|
||||||
if draw_dot:
|
|
||||||
line.radius = int(exponentialDecay(line.radius, 1.5, 3, dt))
|
|
||||||
else:
|
|
||||||
line.x2, line.y2 = x2, y2
|
|
||||||
line.color = color
|
|
||||||
if not for_laser:
|
|
||||||
line.opacity = int(exponentialDecay(line.opacity, 180, 8, dt))
|
|
||||||
|
|
||||||
# TODO: basically a duplication of the above, do this smarter?
|
# TODO: basically a duplication of the above, do this smarter?
|
||||||
# TODO: add intermediate segment
|
# TODO: add intermediate segment
|
||||||
color = colorset[self.track_id % len(colorset)]
|
color = colorset[self.track_id % len(colorset)]
|
||||||
|
|
||||||
if False:
|
if len(self.pred_history_shapes) > len(self.drawn_pred_history):
|
||||||
if len(self.pred_history_shapes) > len(drawn_pred_history):
|
self.pred_history_shapes = self.pred_history_shapes[:len(self.drawn_pred_history)]
|
||||||
self.pred_history_shapes = self.pred_history_shapes[:len(drawn_pred_history)]
|
|
||||||
|
|
||||||
# for i, pos in drawn_pred_history.enumerate():
|
# for i, pos in self.drawn_pred_history.enumerate():
|
||||||
for ci in range(1, len(drawn_pred_history)):
|
for ci in range(1, len(self.drawn_pred_history)):
|
||||||
x, y = [int(p) for p in drawn_pred_history[ci-1]]
|
x, y = [int(p) for p in self.drawn_pred_history[ci-1]]
|
||||||
x2, y2 = [int(p) for p in drawn_pred_history[ci]]
|
x2, y2 = [int(p) for p in self.drawn_pred_history[ci]]
|
||||||
|
|
||||||
|
y, y2 = self.renderer.window.height - y, self.renderer.window.height - y2
|
||||||
|
|
||||||
|
if ci >= len(self.pred_history_shapes):
|
||||||
|
# line = self.renderer.gradientLine(x, y, x2, y2, 3, color, color, batch=self.renderer.batch_anim)
|
||||||
|
line = pyglet.shapes.Line(x,y ,x2, y2, 2.5, color, batch=self.renderer.batch_anim)
|
||||||
|
# line = pyglet.shapes.Arc(x2, y2, 10, thickness=2, color=color, batch=self.renderer.batch_anim)
|
||||||
|
line.opacity = 120
|
||||||
|
self.pred_history_shapes.append(line)
|
||||||
|
|
||||||
|
else:
|
||||||
|
line = self.pred_history_shapes[ci-1]
|
||||||
|
line.x, line.y = x, y
|
||||||
|
line.x2, line.y2 = x2, y2
|
||||||
|
# line.radius = int(exponentialDecay(line.radius, 1.5, 3, dt))
|
||||||
|
line.color = color
|
||||||
|
line.opacity = int(exponentialDecay(line.opacity, 180, 8, dt))
|
||||||
|
|
||||||
|
|
||||||
|
for a, drawn_predictions in enumerate(self.drawn_predictions):
|
||||||
|
if len(self.pred_shapes) <= a:
|
||||||
|
self.pred_shapes.append([])
|
||||||
|
|
||||||
|
if len(self.pred_shapes[a]) > (len(drawn_predictions) +1):
|
||||||
|
self.pred_shapes[a] = self.pred_shapes[a][:len(drawn_predictions)]
|
||||||
|
|
||||||
|
# for i, pos in drawn_predictions.enumerate():
|
||||||
|
for ci in range(0, len(drawn_predictions)):
|
||||||
|
if ci == 0:
|
||||||
|
continue
|
||||||
|
# x, y = [int(p) for p in self.drawn_positions[-1]]
|
||||||
|
else:
|
||||||
|
x, y = [int(p) for p in drawn_predictions[ci-1]]
|
||||||
|
|
||||||
|
x2, y2 = [int(p) for p in drawn_predictions[ci]]
|
||||||
|
|
||||||
y, y2 = self.renderer.window.height - y, self.renderer.window.height - y2
|
y, y2 = self.renderer.window.height - y, self.renderer.window.height - y2
|
||||||
|
# color = [255,0,0]
|
||||||
|
# print(x,y,x2,y2,color)
|
||||||
|
|
||||||
if ci >= len(self.pred_history_shapes):
|
if ci >= len(self.pred_shapes[a]):
|
||||||
|
# TODO: add color2
|
||||||
# line = self.renderer.gradientLine(x, y, x2, y2, 3, color, color, batch=self.renderer.batch_anim)
|
# line = self.renderer.gradientLine(x, y, x2, y2, 3, color, color, batch=self.renderer.batch_anim)
|
||||||
line = pyglet.shapes.Line(x,y ,x2, y2, 2.5, color, batch=self.renderer.batch_anim)
|
line = pyglet.shapes.Line(x,y ,x2, y2, 1.5, color, batch=self.renderer.batch_anim)
|
||||||
# line = pyglet.shapes.Arc(x2, y2, 10, thickness=2, color=color, batch=self.renderer.batch_anim)
|
# line = pyglet.shapes.Arc(x,y ,1.5, thickness=1.5, color=color, batch=self.renderer.batch_anim)
|
||||||
line.opacity = 120
|
line.opacity = 5
|
||||||
self.pred_history_shapes.append(line)
|
self.pred_shapes[a].append(line)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
line = self.pred_history_shapes[ci-1]
|
line = self.pred_shapes[a][ci-1]
|
||||||
line.x, line.y = x, y
|
line.x, line.y = x, y
|
||||||
line.x2, line.y2 = x2, y2
|
line.x2, line.y2 = x2, y2
|
||||||
# line.radius = int(exponentialDecay(line.radius, 1.5, 3, dt))
|
|
||||||
line.color = color
|
line.color = color
|
||||||
line.opacity = int(exponentialDecay(line.opacity, 180, 8, dt))
|
decay = (16/ci) if ci else 16
|
||||||
|
half = len(drawn_predictions) / 2
|
||||||
if True:
|
if ci < half:
|
||||||
for a, drawn_prediction in enumerate(drawn_predictions):
|
target_opacity = 60
|
||||||
if len(self.pred_shapes) <= a:
|
|
||||||
self.pred_shapes.append([])
|
|
||||||
|
|
||||||
if len(self.pred_shapes[a]) > (len(drawn_prediction) +1):
|
|
||||||
self.pred_shapes[a] = self.pred_shapes[a][:len(drawn_prediction)]
|
|
||||||
|
|
||||||
# for i, pos in drawn_predictions.enumerate():
|
|
||||||
for ci in range(0, len(drawn_prediction)):
|
|
||||||
if ci == 0:
|
|
||||||
continue
|
|
||||||
# x, y = [int(p) for p in drawn_positions[-1]]
|
|
||||||
else:
|
else:
|
||||||
x, y = [int(p) for p in drawn_prediction[ci-1]]
|
target_opacity = (1 - ((ci - half) / half)) * 60
|
||||||
|
line.opacity = int(exponentialDecay(line.opacity, target_opacity, decay, dt))
|
||||||
x2, y2 = [int(p) for p in drawn_prediction[ci]]
|
|
||||||
|
|
||||||
y, y2 = self.renderer.window.height - y, self.renderer.window.height - y2
|
|
||||||
# color = [255,0,0]
|
|
||||||
# print(x,y,x2,y2,color)
|
|
||||||
|
|
||||||
if ci >= len(self.pred_shapes[a]):
|
|
||||||
# TODO: add color2
|
|
||||||
# line = self.renderer.gradientLine(x, y, x2, y2, 3, color, color, batch=self.renderer.batch_anim)
|
|
||||||
line = pyglet.shapes.Line(x,y ,x2, y2, 1.5, color, batch=self.renderer.batch_anim)
|
|
||||||
# line = pyglet.shapes.Arc(x,y ,1.5, thickness=1.5, color=color, batch=self.renderer.batch_anim)
|
|
||||||
line.opacity = 5
|
|
||||||
self.pred_shapes[a].append(line)
|
|
||||||
|
|
||||||
else:
|
|
||||||
line = self.pred_shapes[a][ci-1]
|
|
||||||
line.x, line.y = x, y
|
|
||||||
line.x2, line.y2 = x2, y2
|
|
||||||
line.color = color
|
|
||||||
decay = (16/ci) if ci else 16
|
|
||||||
half = len(drawn_prediction) / 2
|
|
||||||
if ci < half:
|
|
||||||
target_opacity = 60
|
|
||||||
else:
|
|
||||||
target_opacity = (1 - ((ci - half) / half)) * 60
|
|
||||||
line.opacity = int(exponentialDecay(line.opacity, target_opacity, decay, dt))
|
|
||||||
|
|
||||||
|
|
||||||
class FrameWriter:
|
class FrameWriter:
|
||||||
|
|
|
@ -1,9 +1,6 @@
|
||||||
import collections
|
|
||||||
from re import A
|
|
||||||
import time
|
import time
|
||||||
from multiprocessing.sharedctypes import RawValue, Value, Array
|
from multiprocessing.sharedctypes import RawValue, Value, Array
|
||||||
from ctypes import c_double
|
from ctypes import c_double
|
||||||
from typing import MutableSequence
|
|
||||||
|
|
||||||
|
|
||||||
class Timer():
|
class Timer():
|
||||||
|
@ -11,58 +8,47 @@ class Timer():
|
||||||
Measure 2 independent things: the freuency of tic, and the duration of tic->toc
|
Measure 2 independent things: the freuency of tic, and the duration of tic->toc
|
||||||
Note that indeed these don't need to be equal
|
Note that indeed these don't need to be equal
|
||||||
"""
|
"""
|
||||||
def __init__(self, name = 'timer') -> None:
|
def __init__(self) -> None:
|
||||||
self.name = name
|
self.last_tic = RawValue(c_double, -1)
|
||||||
self.tocs: MutableSequence[(float, int)] = collections.deque(maxlen=5)
|
self.last_toc = RawValue(c_double, -1)
|
||||||
self.iterations = Value('i', 0)
|
self.fps = RawValue(c_double, -1)
|
||||||
|
self.processing_duration = RawValue(c_double, -1)
|
||||||
# def tic(self):
|
|
||||||
# now = time.time()
|
|
||||||
# if self.last_tic is None:
|
|
||||||
|
|
||||||
# self.last_tic = now
|
|
||||||
# return
|
|
||||||
|
|
||||||
# duration = now - self.last_tic
|
|
||||||
# self.last_tic = now
|
|
||||||
|
|
||||||
# current_fps = 1 / duration
|
self.smoothing = .1
|
||||||
# if not self.fps:
|
|
||||||
# self.fps = current_fps
|
def tic(self):
|
||||||
# else:
|
now = time.time()
|
||||||
# self.fps = self.fps * (1-self.smoothing) + current_fps * self.smoothing
|
if self.last_tic is None:
|
||||||
|
|
||||||
|
self.last_tic = now
|
||||||
|
return
|
||||||
|
|
||||||
|
duration = now - self.last_tic
|
||||||
|
self.last_tic = now
|
||||||
|
|
||||||
|
current_fps = 1 / duration
|
||||||
|
if not self.fps:
|
||||||
|
self.fps = current_fps
|
||||||
|
else:
|
||||||
|
self.fps = self.fps * (1-self.smoothing) + current_fps * self.smoothing
|
||||||
|
|
||||||
def toc(self):
|
def toc(self):
|
||||||
self.iterations += 1
|
self.last_toc = time.time()
|
||||||
|
duration = self.last_toc - self.last_tic
|
||||||
def snapshot(self):
|
|
||||||
self.tocs.append((time.perf_counter(), self.iterations.value))
|
self.processing_duration = self.processing_duration * (1-self.smoothing) + duration * self.smoothing
|
||||||
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def fps(self):
|
def fps(self):
|
||||||
fpses = []
|
|
||||||
if len(self.tocs) < 2:
|
pass
|
||||||
return 0
|
|
||||||
dt = self.tocs[-1][0] - self.tocs[0][0]
|
|
||||||
di = self.tocs[-1][1] - self.tocs[0][1]
|
|
||||||
return di/dt
|
|
||||||
|
|
||||||
|
|
||||||
class TimerCollection():
|
class TimerCollection():
|
||||||
def __init__(self) -> None:
|
def __init__(self) -> None:
|
||||||
self._timers = set()
|
self._timers = set()
|
||||||
|
|
||||||
def snapshot(self):
|
def print(self)->str:
|
||||||
for timer in self._timers:
|
print('Update', end='\r')
|
||||||
timer.snapshot()
|
|
||||||
|
|
||||||
def to_string(self)->str:
|
|
||||||
strs = [f"{t.name} {t.fps:.2f}" for t in self._timers]
|
|
||||||
return " ".join(strs)
|
|
||||||
|
|
||||||
def new(self, name='timer'):
|
|
||||||
t = Timer(name)
|
|
||||||
self._timers.add(t)
|
|
||||||
return t
|
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,7 @@ from ultralytics import YOLO
|
||||||
from ultralytics.engine.results import Results as YOLOResult
|
from ultralytics.engine.results import Results as YOLOResult
|
||||||
import tqdm
|
import tqdm
|
||||||
|
|
||||||
from trap.utils import inv_lerp, lerp
|
from trap.utils import lerp
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -185,82 +185,40 @@ def tracker_compare():
|
||||||
|
|
||||||
bar.set_description(f"[{frames.video_nr}/{len(frames.video_srcs)}] [{frames.frame_idx}/{frames.frame_count}] {str(frames.video_path)}")
|
bar.set_description(f"[{frames.video_nr}/{len(frames.video_srcs)}] [{frames.frame_idx}/{frames.frame_count}] {str(frames.video_path)}")
|
||||||
|
|
||||||
def transition_path_points(path: np.array, t: float):
|
def draw_track_predictions(img: cv2.Mat, track: Track, color_index: int, camera:Camera, convert_points: Optional[Callable], opacity=1):
|
||||||
"""
|
"""
|
||||||
"""
|
Opacity: 0-1
|
||||||
|
|
||||||
if t >= 1:
|
|
||||||
return path
|
|
||||||
if t <= 0:
|
|
||||||
return np.array([path[0]])
|
|
||||||
|
|
||||||
# new_path = np.array([])
|
|
||||||
lengths = np.sqrt(np.sum(np.diff(path, axis=0)**2, axis=1))
|
|
||||||
cum_lenghts = np.cumsum(lengths)
|
|
||||||
# distance = cum_lenghts[-1] * t
|
|
||||||
ts = np.concatenate((np.array([0.]), cum_lenghts / cum_lenghts[-1]))
|
|
||||||
new_path = [path[0]]
|
|
||||||
|
|
||||||
for a, b, t_a, t_b in zip(path[:-1], path[1:], ts[:-1], ts[1:]):
|
|
||||||
if t_b < t:
|
|
||||||
new_path.append(b)
|
|
||||||
continue
|
|
||||||
# interpolate
|
|
||||||
relative_t = inv_lerp(t_a, t_b, t)
|
|
||||||
x = lerp(a[0], b[0], relative_t)
|
|
||||||
y = lerp(a[1], b[1], relative_t)
|
|
||||||
print(relative_t, a , b, x, y)
|
|
||||||
new_path.append([x,y])
|
|
||||||
break
|
|
||||||
return np.array(new_path)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def draw_track_predictions(img: cv2.Mat, track: Track, color_index: int, camera:Camera, convert_points: Optional[Callable], anim_position=.8):
|
|
||||||
"""
|
|
||||||
anim_position: 0-1
|
|
||||||
"""
|
"""
|
||||||
if not track.predictions:
|
if not track.predictions:
|
||||||
return
|
return
|
||||||
|
|
||||||
current_point = track.get_projected_history(camera=camera)[-1]
|
current_point = track.get_projected_history(camera=camera)[-1]
|
||||||
|
|
||||||
opacity = 1-min(1, max(0, inv_lerp(0.8, 1, anim_position))) # fade out
|
if convert_points:
|
||||||
slide_t = min(1, max(0, inv_lerp(0, 0.8, anim_position))) # slide_position
|
current_point = convert_points([current_point])[0]
|
||||||
|
|
||||||
|
|
||||||
# if convert_points:
|
|
||||||
# current_point = convert_points([current_point])[0]
|
|
||||||
|
|
||||||
for pred_i, pred in enumerate(track.predictions):
|
for pred_i, pred in enumerate(track.predictions):
|
||||||
pred_coords = pred #cv2.perspectiveTransform(np.array([pred]), inv_H)[0].tolist()
|
pred_coords = pred #cv2.perspectiveTransform(np.array([pred]), inv_H)[0].tolist()
|
||||||
# line_points = np.concatenate(([current_point], pred_coords)) # 'current point' is amoving target
|
|
||||||
line_points = pred_coords
|
|
||||||
# print(pred_coords, current_point, line_points)
|
|
||||||
line_points = transition_path_points(line_points, slide_t)
|
|
||||||
if convert_points:
|
if convert_points:
|
||||||
line_points = convert_points(line_points)
|
pred_coords = convert_points(pred_coords)
|
||||||
# color = (128,0,128) if pred_i else (128,128,0)
|
# color = (128,0,128) if pred_i else (128,128,0)
|
||||||
|
|
||||||
color = bgr_colors[color_index % len(bgr_colors)]
|
color = bgr_colors[color_index % len(bgr_colors)]
|
||||||
color = tuple([int(c*opacity) for c in color])
|
color = tuple([int(c*opacity) for c in color])
|
||||||
|
|
||||||
|
|
||||||
for start, end in zip(line_points[:-1], line_points[1:]):
|
for ci in range(0, len(pred_coords)):
|
||||||
# for ci in range(0, len(pred_coords)):
|
if ci == 0:
|
||||||
# if ci == 0:
|
# TODO)) prev point
|
||||||
# # TODO)) prev point
|
# continue
|
||||||
# # continue
|
start = [int(p) for p in current_point]
|
||||||
# start = [int(p) for p in current_point]
|
# start = [int(p) for p in coords[-1]]
|
||||||
# # start = [int(p) for p in coords[-1]]
|
# start = [0,0]?
|
||||||
# # start = [0,0]?
|
# print(start)
|
||||||
# # print(start)
|
else:
|
||||||
# else:
|
start = [int(p) for p in pred_coords[ci-1]]
|
||||||
# start = [int(p) for p in pred_coords[ci-1]]
|
end = [int(p) for p in pred_coords[ci]]
|
||||||
# end = [int(p) for p in pred_coords[ci]]
|
cv2.line(img, start, end, color, 1, lineType=cv2.LINE_AA)
|
||||||
# print(np.rint(start),np.rint(end).tolist())
|
|
||||||
cv2.line(img, np.rint(start).astype(int), np.rint(end).astype(int), color, 1, lineType=cv2.LINE_AA)
|
|
||||||
# cv2.circle(img, end, 2, color, 1, lineType=cv2.LINE_AA)
|
# cv2.circle(img, end, 2, color, 1, lineType=cv2.LINE_AA)
|
||||||
|
|
||||||
def draw_trackjectron_history(img: cv2.Mat, track: Track, color_index: int, convert_points: Optional[Callable]):
|
def draw_trackjectron_history(img: cv2.Mat, track: Track, color_index: int, convert_points: Optional[Callable]):
|
||||||
|
|
|
@ -9,7 +9,7 @@ from multiprocessing import Event
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import pickle
|
import pickle
|
||||||
import time
|
import time
|
||||||
from typing import Dict, Optional, List
|
from typing import Optional, List
|
||||||
import jsonlines
|
import jsonlines
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
|
@ -61,7 +61,7 @@ RCNN_SCALE = .4 # seems to have no impact on detections in the corners
|
||||||
|
|
||||||
def _yolov8_track(frame: Frame, model: YOLO, **kwargs) -> List[Detection]:
|
def _yolov8_track(frame: Frame, model: YOLO, **kwargs) -> List[Detection]:
|
||||||
|
|
||||||
results: List[YOLOResult] = list(model.track(frame.img, persist=True, tracker="custom_bytetrack.yaml", verbose=False, conf=0.00001, **kwargs))
|
results: List[YOLOResult] = list(model.track(frame.img, persist=True, tracker="custom_bytetrack.yaml", verbose=False, **kwargs))
|
||||||
if results[0].boxes is None or results[0].boxes.id is None:
|
if results[0].boxes is None or results[0].boxes.id is None:
|
||||||
# work around https://github.com/ultralytics/ultralytics/issues/5968
|
# work around https://github.com/ultralytics/ultralytics/issues/5968
|
||||||
return []
|
return []
|
||||||
|
@ -98,10 +98,6 @@ class TrackFilter:
|
||||||
|
|
||||||
def apply(self, tracks: List[Track], camera: Camera):
|
def apply(self, tracks: List[Track], camera: Camera):
|
||||||
return [t for t in tracks if self.filter(t, camera)]
|
return [t for t in tracks if self.filter(t, camera)]
|
||||||
|
|
||||||
def apply_to_dict(self, tracks: Dict[str, Track], camera: Camera):
|
|
||||||
tracks = self.apply(tracks.values(), camera)
|
|
||||||
return {t.track_id: t for t in tracks}
|
|
||||||
|
|
||||||
class FinalDisplacementFilter(TrackFilter):
|
class FinalDisplacementFilter(TrackFilter):
|
||||||
def __init__(self, min_displacement):
|
def __init__(self, min_displacement):
|
||||||
|
@ -448,7 +444,7 @@ class Tracker:
|
||||||
|
|
||||||
def track_frame(self, frame: Frame):
|
def track_frame(self, frame: Frame):
|
||||||
if self.config.detector == DETECTOR_YOLOv8:
|
if self.config.detector == DETECTOR_YOLOv8:
|
||||||
detections: List[Detection] = _yolov8_track(frame, self.model, classes=[0, 15, 16], imgsz=[1152, 640])
|
detections: List[Detection] = _yolov8_track(frame, self.model, classes=[0], imgsz=[1152, 640])
|
||||||
else :
|
else :
|
||||||
detections: List[Detection] = self._resnet_track(frame, scale = RCNN_SCALE)
|
detections: List[Detection] = self._resnet_track(frame, scale = RCNN_SCALE)
|
||||||
|
|
||||||
|
@ -462,7 +458,7 @@ class Tracker:
|
||||||
return detections
|
return detections
|
||||||
|
|
||||||
|
|
||||||
def track(self, is_running: Event, timer_counter: int = 0):
|
def track(self, is_running: Event):
|
||||||
"""
|
"""
|
||||||
Live tracking of frames coming in over zmq
|
Live tracking of frames coming in over zmq
|
||||||
"""
|
"""
|
||||||
|
@ -504,11 +500,7 @@ class Tracker:
|
||||||
end_time = None
|
end_time = None
|
||||||
tracker_dt = None
|
tracker_dt = None
|
||||||
w_time = None
|
w_time = None
|
||||||
displacement_filter = FinalDisplacementFilter(.2)
|
|
||||||
while self.is_running.is_set():
|
while self.is_running.is_set():
|
||||||
|
|
||||||
with timer_counter.get_lock():
|
|
||||||
timer_counter.value += 1
|
|
||||||
# this waiting for target_dt causes frame loss. E.g. with target_dt at .1, it
|
# this waiting for target_dt causes frame loss. E.g. with target_dt at .1, it
|
||||||
# skips exactly 1 frame on a 10 fps video (which, it obviously should not do)
|
# skips exactly 1 frame on a 10 fps video (which, it obviously should not do)
|
||||||
# so for now, timing should move to emitter
|
# so for now, timing should move to emitter
|
||||||
|
@ -574,9 +566,6 @@ class Tracker:
|
||||||
# }
|
# }
|
||||||
active_track_ids = [d.track_id for d in detections]
|
active_track_ids = [d.track_id for d in detections]
|
||||||
active_tracks = {t.track_id: t.get_with_interpolated_history() for t in self.tracks.values() if t.track_id in active_track_ids}
|
active_tracks = {t.track_id: t.get_with_interpolated_history() for t in self.tracks.values() if t.track_id in active_track_ids}
|
||||||
active_tracks = displacement_filter.apply_to_dict(active_tracks, frame.camera)# a filter to remove just detecting static objects
|
|
||||||
|
|
||||||
|
|
||||||
# active_tracks = {t.track_id: t for t in self.tracks.values() if t.track_id in active_track_ids}
|
# active_tracks = {t.track_id: t for t in self.tracks.values() if t.track_id in active_track_ids}
|
||||||
# active_tracks = {t.track_id: t for t in self.tracks.values() if t.track_id in active_track_ids}
|
# active_tracks = {t.track_id: t for t in self.tracks.values() if t.track_id in active_track_ids}
|
||||||
# logger.info(f"{trajectories}")
|
# logger.info(f"{trajectories}")
|
||||||
|
@ -590,7 +579,7 @@ class Tracker:
|
||||||
frame = self.smoother.smooth_frame_tracks(frame)
|
frame = self.smoother.smooth_frame_tracks(frame)
|
||||||
|
|
||||||
# print(f"send to {self.trajectory_socket}, {self.config.zmq_trajectory_addr}")
|
# print(f"send to {self.trajectory_socket}, {self.config.zmq_trajectory_addr}")
|
||||||
self.trajectory_socket.send_pyobj(frame.without_img()) # ditch image for faster passthrough
|
self.trajectory_socket.send_pyobj(frame)
|
||||||
|
|
||||||
end_time = time.time()
|
end_time = time.time()
|
||||||
tracker_dt = end_time - start_time
|
tracker_dt = end_time - start_time
|
||||||
|
@ -676,9 +665,9 @@ class Tracker:
|
||||||
return [([d[0], d[1], d[2]-d[0], d[3]-d[1]], d[4], d[5]) for d in detections]
|
return [([d[0], d[1], d[2]-d[0], d[3]-d[1]], d[4], d[5]) for d in detections]
|
||||||
|
|
||||||
|
|
||||||
def run_tracker(config: Namespace, is_running: Event, timer_counter):
|
def run_tracker(config: Namespace, is_running: Event):
|
||||||
router = Tracker(config)
|
router = Tracker(config)
|
||||||
router.track(is_running, timer_counter)
|
router.track(is_running)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,12 +1,4 @@
|
||||||
# lerp & inverse lerp from https://gist.github.com/laundmo/b224b1f4c8ef6ca5fe47e132c8deab56
|
# lerp & inverse lerp from https://gist.github.com/laundmo/b224b1f4c8ef6ca5fe47e132c8deab56
|
||||||
import linecache
|
|
||||||
import os
|
|
||||||
import tracemalloc
|
|
||||||
from typing import Iterable
|
|
||||||
import cv2
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
|
|
||||||
def lerp(a: float, b: float, t: float) -> float:
|
def lerp(a: float, b: float, t: float) -> float:
|
||||||
"""Linear interpolate on the scale given by a to b, using t as the point on that scale.
|
"""Linear interpolate on the scale given by a to b, using t as the point on that scale.
|
||||||
Examples
|
Examples
|
||||||
|
@ -28,44 +20,4 @@ def inv_lerp(a: float, b: float, v: float) -> float:
|
||||||
|
|
||||||
|
|
||||||
def get_bins(bin_size: float):
|
def get_bins(bin_size: float):
|
||||||
return [[bin_size, 0], [bin_size, bin_size], [0, bin_size], [-bin_size, bin_size], [-bin_size, 0], [-bin_size, -bin_size], [0, -bin_size], [bin_size, -bin_size]]
|
return [[bin_size, 0], [bin_size, bin_size], [0, bin_size], [-bin_size, bin_size], [-bin_size, 0], [-bin_size, -bin_size], [0, -bin_size], [bin_size, -bin_size]]
|
||||||
|
|
||||||
|
|
||||||
def convert_world_space_to_img_space(H: cv2.Mat):
|
|
||||||
"""Transform the given matrix so that it immediately converts
|
|
||||||
the points to img space"""
|
|
||||||
new_H = H.copy()
|
|
||||||
new_H[:2] = H[:2] * 100
|
|
||||||
return new_H
|
|
||||||
|
|
||||||
def convert_world_points_to_img_points(points: Iterable):
|
|
||||||
"""Transform the given matrix so that it immediately converts
|
|
||||||
the points to img space"""
|
|
||||||
if isinstance(points, np.ndarray):
|
|
||||||
return np.array(points) * 100
|
|
||||||
return [[p[0]*100, p[1]*100] for p in points]
|
|
||||||
|
|
||||||
def display_top(snapshot: tracemalloc.Snapshot, key_type='lineno', limit=5):
|
|
||||||
snapshot = snapshot.filter_traces((
|
|
||||||
tracemalloc.Filter(False, "<frozen importlib._bootstrap>"),
|
|
||||||
tracemalloc.Filter(False, "<unknown>"),
|
|
||||||
))
|
|
||||||
top_stats = snapshot.statistics(key_type)
|
|
||||||
|
|
||||||
print("Top %s lines" % limit)
|
|
||||||
for index, stat in enumerate(top_stats[:limit], 1):
|
|
||||||
frame = stat.traceback[0]
|
|
||||||
# replace "/path/to/module/file.py" with "module/file.py"
|
|
||||||
filename = os.sep.join(frame.filename.split(os.sep)[-2:])
|
|
||||||
print("#%s: %s:%s: %.1f KiB"
|
|
||||||
% (index, filename, frame.lineno, stat.size / 1024))
|
|
||||||
line = linecache.getline(frame.filename, frame.lineno).strip()
|
|
||||||
if line:
|
|
||||||
print(' %s' % line)
|
|
||||||
|
|
||||||
other = top_stats[limit:]
|
|
||||||
if other:
|
|
||||||
size = sum(stat.size for stat in other)
|
|
||||||
print("%s other: %.1f KiB" % (len(other), size / 1024))
|
|
||||||
total = sum(stat.size for stat in top_stats)
|
|
||||||
print("Total allocated size: %.1f KiB" % (total / 1024))
|
|
Loading…
Reference in a new issue