fixes during playtest
This commit is contained in:
parent
dc5e2ff28c
commit
888ef7ff93
5 changed files with 180 additions and 88 deletions
|
@ -2,6 +2,7 @@
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import time
|
import time
|
||||||
|
import tracemalloc
|
||||||
import ffmpeg
|
import ffmpeg
|
||||||
from argparse import Namespace
|
from argparse import Namespace
|
||||||
import datetime
|
import datetime
|
||||||
|
@ -26,6 +27,7 @@ import json
|
||||||
|
|
||||||
from trap.frame_emitter import DetectionState, Frame, Track
|
from trap.frame_emitter import DetectionState, Frame, Track
|
||||||
from trap.preview_renderer import DrawnTrack, PROJECTION_IMG, PROJECTION_MAP
|
from trap.preview_renderer import DrawnTrack, PROJECTION_IMG, PROJECTION_MAP
|
||||||
|
from trap.utils import convert_world_space_to_img_space, display_top
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger("trap.renderer")
|
logger = logging.getLogger("trap.renderer")
|
||||||
|
@ -35,6 +37,9 @@ COLOR_PRIMARY = (255,255,255, 255)
|
||||||
|
|
||||||
class AnimationRenderer:
|
class AnimationRenderer:
|
||||||
def __init__(self, config: Namespace, is_running: BaseEvent):
|
def __init__(self, config: Namespace, is_running: BaseEvent):
|
||||||
|
|
||||||
|
|
||||||
|
tracemalloc.start()
|
||||||
self.config = config
|
self.config = config
|
||||||
self.is_running = is_running
|
self.is_running = is_running
|
||||||
|
|
||||||
|
@ -62,7 +67,9 @@ class AnimationRenderer:
|
||||||
# self.out = cv2.VideoWriter(str(filename), fourcc, 23.97, (1280,720))
|
# self.out = cv2.VideoWriter(str(filename), fourcc, 23.97, (1280,720))
|
||||||
self.fps = 60
|
self.fps = 60
|
||||||
self.frame_size = (self.config.camera.w,self.config.camera.h)
|
self.frame_size = (self.config.camera.w,self.config.camera.h)
|
||||||
self.hide_stats = False
|
self.hide_stats = self.config.render_hide_stats
|
||||||
|
self.hide_bg = True
|
||||||
|
self.pause = False
|
||||||
self.out_writer = None # self.start_writer() if self.config.render_file else None
|
self.out_writer = None # self.start_writer() if self.config.render_file else None
|
||||||
self.streaming_process = self.start_streaming() if self.config.render_url else None
|
self.streaming_process = self.start_streaming() if self.config.render_url else None
|
||||||
|
|
||||||
|
@ -78,7 +85,8 @@ class AnimationRenderer:
|
||||||
# , fullscreen=self.config.render_window
|
# , fullscreen=self.config.render_window
|
||||||
|
|
||||||
display = pyglet.canvas.get_display()
|
display = pyglet.canvas.get_display()
|
||||||
screen = display.get_screens()[0]
|
idx = -1 if self.config.render_window else 0
|
||||||
|
screen = display.get_screens()[idx]
|
||||||
print(display.get_screens())
|
print(display.get_screens())
|
||||||
|
|
||||||
if self.streaming_process is not None:
|
if self.streaming_process is not None:
|
||||||
|
@ -88,6 +96,7 @@ class AnimationRenderer:
|
||||||
self.window.set_handler('on_draw', self.on_draw)
|
self.window.set_handler('on_draw', self.on_draw)
|
||||||
self.window.set_handler('on_refresh', self.on_refresh)
|
self.window.set_handler('on_refresh', self.on_refresh)
|
||||||
self.window.set_handler('on_close', self.on_close)
|
self.window.set_handler('on_close', self.on_close)
|
||||||
|
self.window.set_handler('on_key_press', self.on_key_press)
|
||||||
|
|
||||||
# don't know why, but importing this before window leads to "x connection to :1 broken (explicit kill or server shutdown)"
|
# don't know why, but importing this before window leads to "x connection to :1 broken (explicit kill or server shutdown)"
|
||||||
from pyglet_cornerpin import PygletCornerPin
|
from pyglet_cornerpin import PygletCornerPin
|
||||||
|
@ -99,8 +108,11 @@ class AnimationRenderer:
|
||||||
self.pins = PygletCornerPin(
|
self.pins = PygletCornerPin(
|
||||||
self.window,
|
self.window,
|
||||||
source_points=[[540, 670-360], [1380,670-360], [540,760-360], [1380,760-360]],
|
source_points=[[540, 670-360], [1380,670-360], [540,760-360], [1380,760-360]],
|
||||||
corners=[[540, 670-360], [1380,670-360], [540,760-360], [1380,760-360]],
|
# corners=[[540, 670-360], [1380,670-360], [540,760-360], [1380,760-360]], # original test: short throw?
|
||||||
# corners=[[471, 304], [1797, 376], [467, 387], [1792, 484]]
|
# corners=[[396, 442], [1644, 734], [350, 516], [1572, 796]], # beamer downstairs
|
||||||
|
# corners=[[270, 452], [1698, 784], [314, 568], [1572, 860]], # ??
|
||||||
|
# corners=[[471, 304], [1797, 376], [467, 387], [1792, 484]] # ??
|
||||||
|
# corners=[[576, 706], [1790, 696], [588, 794], [1728, 796]], # beamer boven
|
||||||
)
|
)
|
||||||
self.window.push_handlers(self.pins)
|
self.window.push_handlers(self.pins)
|
||||||
|
|
||||||
|
@ -123,16 +135,20 @@ class AnimationRenderer:
|
||||||
self.batch_bg = pyglet.graphics.Batch()
|
self.batch_bg = pyglet.graphics.Batch()
|
||||||
self.batch_overlay = pyglet.graphics.Batch()
|
self.batch_overlay = pyglet.graphics.Batch()
|
||||||
self.batch_anim = pyglet.graphics.Batch()
|
self.batch_anim = pyglet.graphics.Batch()
|
||||||
|
self.batch_debug = pyglet.graphics.Batch()
|
||||||
|
|
||||||
if self.config.render_debug_shapes:
|
# if self.config.render_debug_shapes:
|
||||||
self.debug_lines = [
|
self.render_debug_shapes = self.config.render_debug_shapes
|
||||||
pyglet.shapes.Line(1370, self.config.camera.h-360, 1380, 670-360, 2, COLOR_PRIMARY, batch=self.batch_overlay),#v
|
self.render_lines = True
|
||||||
pyglet.shapes.Line(0, 660-360, 1380, 670-360, 2, COLOR_PRIMARY, batch=self.batch_overlay), #h
|
|
||||||
pyglet.shapes.Line(1140, 760-360, 1140, 675-360, 2, COLOR_PRIMARY, batch=self.batch_overlay), #h
|
self.debug_lines = [
|
||||||
pyglet.shapes.Line(540, 760-360,540, 675-360, 2, COLOR_PRIMARY, batch=self.batch_overlay), #v
|
pyglet.shapes.Line(1370, self.config.camera.h-360, 1380, 670-360, 2, COLOR_PRIMARY, batch=self.batch_debug),#v
|
||||||
pyglet.shapes.Line(0, 770-360, 1380, 770-360, 2, COLOR_PRIMARY, batch=self.batch_overlay), #h
|
pyglet.shapes.Line(0, 660-360, 1380, 670-360, 2, COLOR_PRIMARY, batch=self.batch_debug), #h
|
||||||
|
pyglet.shapes.Line(1140, 760-360, 1140, 675-360, 2, COLOR_PRIMARY, batch=self.batch_debug), #h
|
||||||
|
pyglet.shapes.Line(540, 760-360,540, 675-360, 2, COLOR_PRIMARY, batch=self.batch_debug), #v
|
||||||
|
pyglet.shapes.Line(0, 770-360, 1380, 770-360, 2, COLOR_PRIMARY, batch=self.batch_debug), #h
|
||||||
|
|
||||||
]
|
]
|
||||||
|
|
||||||
self.debug_points = []
|
self.debug_points = []
|
||||||
# print(self.config.debug_points_file)
|
# print(self.config.debug_points_file)
|
||||||
|
@ -149,7 +165,7 @@ class AnimationRenderer:
|
||||||
dst_img_points = np.reshape(dst_img_points, (dst_img_points.shape[0], 2))
|
dst_img_points = np.reshape(dst_img_points, (dst_img_points.shape[0], 2))
|
||||||
|
|
||||||
self.debug_points = [
|
self.debug_points = [
|
||||||
pyglet.shapes.Circle(p[0], self.window.height - p[1], 3, color=(255,0,0,255), batch=self.batch_overlay) for p in dst_img_points
|
pyglet.shapes.Circle(p[0], self.window.height - p[1], 3, color=(255,0,0,255), batch=self.batch_debug) for p in dst_img_points
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@ -262,13 +278,19 @@ class AnimationRenderer:
|
||||||
self.labels['frame_idx'].text = f"{self.frame.index:06d}"
|
self.labels['frame_idx'].text = f"{self.frame.index:06d}"
|
||||||
self.labels['frame_time'].text = f"{self.frame.time - self.first_time: >10.2f}s"
|
self.labels['frame_time'].text = f"{self.frame.time - self.first_time: >10.2f}s"
|
||||||
self.labels['frame_latency'].text = f"{self.frame.time - time.time():.2f}s"
|
self.labels['frame_latency'].text = f"{self.frame.time - time.time():.2f}s"
|
||||||
|
|
||||||
|
if self.frame.time - self.first_time > 30 and (not hasattr(self, 'has_snap') or self.has_snap == False):
|
||||||
|
snapshot = tracemalloc.take_snapshot()
|
||||||
|
display_top(snapshot, 'traceback', 15)
|
||||||
|
tracemalloc.stop()
|
||||||
|
self.has_snap = True
|
||||||
|
|
||||||
if self.tracker_frame:
|
if self.tracker_frame and self.frame:
|
||||||
self.labels['tracker_idx'].text = f"{self.tracker_frame.index - self.frame.index}"
|
self.labels['tracker_idx'].text = f"{self.tracker_frame.index - self.frame.index}"
|
||||||
self.labels['tracker_time'].text = f"{self.tracker_frame.time - time.time():.3f}s"
|
self.labels['tracker_time'].text = f"{self.tracker_frame.time - time.time():.3f}s"
|
||||||
self.labels['track_len'].text = f"{len(self.tracker_frame.tracks)} tracks"
|
self.labels['track_len'].text = f"{len(self.tracker_frame.tracks)} tracks"
|
||||||
|
|
||||||
if self.prediction_frame:
|
if self.prediction_frame and self.frame:
|
||||||
self.labels['pred_idx'].text = f"{self.prediction_frame.index - self.frame.index}"
|
self.labels['pred_idx'].text = f"{self.prediction_frame.index - self.frame.index}"
|
||||||
self.labels['pred_time'].text = f"{self.prediction_frame.time - time.time():.3f}s"
|
self.labels['pred_time'].text = f"{self.prediction_frame.time - time.time():.3f}s"
|
||||||
# self.labels['track_len'].text = f"{len(self.prediction_frame.tracks)} tracks"
|
# self.labels['track_len'].text = f"{len(self.prediction_frame.tracks)} tracks"
|
||||||
|
@ -297,6 +319,9 @@ class AnimationRenderer:
|
||||||
|
|
||||||
|
|
||||||
def check_frames(self, dt):
|
def check_frames(self, dt):
|
||||||
|
if self.pause:
|
||||||
|
return
|
||||||
|
|
||||||
new_tracks = False
|
new_tracks = False
|
||||||
try:
|
try:
|
||||||
self.frame: Frame = self.frame_sock.recv_pyobj(zmq.NOBLOCK)
|
self.frame: Frame = self.frame_sock.recv_pyobj(zmq.NOBLOCK)
|
||||||
|
@ -305,15 +330,20 @@ class AnimationRenderer:
|
||||||
img = self.frame.img
|
img = self.frame.img
|
||||||
# newcameramtx, roi = cv2.getOptimalNewCameraMatrix(self.config.camera.mtx, self.config.camera.dist, (self.frame.img.shape[1], self.frame.img.shape[0]), 1, (self.frame.img.shape[1], self.frame.img.shape[0]))
|
# newcameramtx, roi = cv2.getOptimalNewCameraMatrix(self.config.camera.mtx, self.config.camera.dist, (self.frame.img.shape[1], self.frame.img.shape[0]), 1, (self.frame.img.shape[1], self.frame.img.shape[0]))
|
||||||
img = cv2.undistort(img, self.config.camera.mtx, self.config.camera.dist, None, self.config.camera.newcameramtx)
|
img = cv2.undistort(img, self.config.camera.mtx, self.config.camera.dist, None, self.config.camera.newcameramtx)
|
||||||
img = cv2.warpPerspective(img, self.config.camera.H, (self.config.camera.w, self.config.camera.h))
|
img = cv2.warpPerspective(img, convert_world_space_to_img_space(self.config.camera.H), (self.config.camera.w, self.config.camera.h))
|
||||||
# img = cv2.GaussianBlur(img, (15, 15), 0)
|
# img = cv2.GaussianBlur(img, (15, 15), 0)
|
||||||
img = cv2.flip(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), 0)
|
img = cv2.flip(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), 0)
|
||||||
img = pyglet.image.ImageData(self.frame_size[0], self.frame_size[1], 'RGB', img.tobytes())
|
img = pyglet.image.ImageData(self.frame_size[0], self.frame_size[1], 'RGB', img.tobytes())
|
||||||
# don't draw in batch, so that it is the background
|
# don't draw in batch, so that it is the background
|
||||||
|
if hasattr(self, 'video_sprite') and self.video_sprite:
|
||||||
|
self.video_sprite.delete()
|
||||||
|
self.frame.img = None
|
||||||
|
|
||||||
self.video_sprite = pyglet.sprite.Sprite(img=img, batch=self.batch_bg)
|
self.video_sprite = pyglet.sprite.Sprite(img=img, batch=self.batch_bg)
|
||||||
# transform to flipped coordinate system for pyglet
|
# transform to flipped coordinate system for pyglet
|
||||||
self.video_sprite.y = self.window.height - self.video_sprite.height
|
self.video_sprite.y = self.window.height - self.video_sprite.height
|
||||||
self.video_sprite.opacity = 70
|
# self.frame.img = np.array([]) # clearing memory?
|
||||||
|
# self.video_sprite.opacity = 70
|
||||||
except zmq.ZMQError as e:
|
except zmq.ZMQError as e:
|
||||||
# idx = frame.index if frame else "NONE"
|
# idx = frame.index if frame else "NONE"
|
||||||
# logger.debug(f"reuse video frame {idx}")
|
# logger.debug(f"reuse video frame {idx}")
|
||||||
|
@ -370,11 +400,20 @@ class AnimationRenderer:
|
||||||
self.window.set_fullscreen(not self.window.fullscreen)
|
self.window.set_fullscreen(not self.window.fullscreen)
|
||||||
if symbol == ord('h'):
|
if symbol == ord('h'):
|
||||||
self.hide_stats = not self.hide_stats
|
self.hide_stats = not self.hide_stats
|
||||||
|
if symbol == ord('d'):
|
||||||
|
self.render_debug_shapes = not self.render_debug_shapes
|
||||||
|
if symbol == ord('p'):
|
||||||
|
self.pause = not self.pause
|
||||||
|
if symbol == ord('b'):
|
||||||
|
self.hide_bg = not self.hide_bg
|
||||||
|
if symbol == ord('l'):
|
||||||
|
self.render_lines = not self.render_lines
|
||||||
|
|
||||||
def check_running(self, dt):
|
def check_running(self, dt):
|
||||||
if not self.is_running.is_set():
|
if not self.is_running.is_set():
|
||||||
self.window.close()
|
self.window.close()
|
||||||
self.event_loop.exit()
|
self.event_loop.exit()
|
||||||
|
print('quit animation renderer')
|
||||||
|
|
||||||
def on_close(self):
|
def on_close(self):
|
||||||
self.is_running.clear()
|
self.is_running.clear()
|
||||||
|
@ -389,6 +428,8 @@ class AnimationRenderer:
|
||||||
|
|
||||||
self.refresh_labels(dt)
|
self.refresh_labels(dt)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# self.shape1 = shapes.Circle(700, 150, 100, color=(50, 0, 30), batch=self.batch_anim)
|
# self.shape1 = shapes.Circle(700, 150, 100, color=(50, 0, 30), batch=self.batch_anim)
|
||||||
# self.shape3 = shapes.Circle(800, 150, 100, color=(100, 225, 30), batch=self.batch_anim)
|
# self.shape3 = shapes.Circle(800, 150, 100, color=(100, 225, 30), batch=self.batch_anim)
|
||||||
pass
|
pass
|
||||||
|
@ -396,24 +437,29 @@ class AnimationRenderer:
|
||||||
def on_draw(self):
|
def on_draw(self):
|
||||||
self.window.clear()
|
self.window.clear()
|
||||||
|
|
||||||
self.batch_bg.draw()
|
if not self.hide_bg:
|
||||||
|
self.batch_bg.draw()
|
||||||
for track in self.drawn_tracks.values():
|
|
||||||
for shape in track.shapes:
|
|
||||||
shape.draw() # for some reason the batches don't work
|
|
||||||
for track in self.drawn_tracks.values():
|
|
||||||
for shapes in track.pred_shapes:
|
|
||||||
for shape in shapes:
|
|
||||||
shape.draw()
|
|
||||||
# self.batch_anim.draw()
|
|
||||||
self.batch_overlay.draw()
|
|
||||||
|
|
||||||
if self.config.render_debug_shapes:
|
if self.render_debug_shapes:
|
||||||
|
self.batch_debug.draw()
|
||||||
self.pins.draw()
|
self.pins.draw()
|
||||||
|
|
||||||
# pyglet.graphics.draw(3, pyglet.gl.GL_LINE, ("v2i", (100,200, 600,800)), ('c3B', (255,255,255, 255,255,255)))
|
if self.render_lines:
|
||||||
|
for track in self.drawn_tracks.values():
|
||||||
|
for shape in track.shapes:
|
||||||
|
shape.draw() # for some reason the batches don't work
|
||||||
|
for track in self.drawn_tracks.values():
|
||||||
|
for shapes in track.pred_shapes:
|
||||||
|
for shape in shapes:
|
||||||
|
shape.draw()
|
||||||
|
# self.batch_anim.draw()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# pyglet.graphics.draw(3, pyglet.gl.GL_LINE, ("v2i", (100,200, 600,800)), ('c3B', (255,255,255, 255,255,255)))
|
||||||
|
|
||||||
if not self.hide_stats:
|
if not self.hide_stats:
|
||||||
|
self.batch_overlay.draw()
|
||||||
self.fps_display.draw()
|
self.fps_display.draw()
|
||||||
|
|
||||||
# if streaming, capture buffer and send
|
# if streaming, capture buffer and send
|
||||||
|
@ -499,6 +545,7 @@ class AnimationRenderer:
|
||||||
# cv2.waitKey(1)
|
# cv2.waitKey(1)
|
||||||
logger.info('Stopping')
|
logger.info('Stopping')
|
||||||
logger.info(f'used corner pins {self.pins.pin_positions}')
|
logger.info(f'used corner pins {self.pins.pin_positions}')
|
||||||
|
print(self.pins.pin_positions)
|
||||||
|
|
||||||
|
|
||||||
# if i>2:
|
# if i>2:
|
||||||
|
|
|
@ -342,6 +342,9 @@ render_parser.add_argument("--render-animation",
|
||||||
render_parser.add_argument("--render-debug-shapes",
|
render_parser.add_argument("--render-debug-shapes",
|
||||||
help="Lines and points for debugging/mapping",
|
help="Lines and points for debugging/mapping",
|
||||||
action='store_true')
|
action='store_true')
|
||||||
|
render_parser.add_argument("--render-hide-stats",
|
||||||
|
help="Default toggle to hide (switch with 'h')",
|
||||||
|
action='store_true')
|
||||||
render_parser.add_argument("--full-screen",
|
render_parser.add_argument("--full-screen",
|
||||||
help="Set Window full screen",
|
help="Set Window full screen",
|
||||||
action='store_true')
|
action='store_true')
|
||||||
|
|
|
@ -342,7 +342,9 @@ class CvRenderer:
|
||||||
i=0
|
i=0
|
||||||
first_time = None
|
first_time = None
|
||||||
|
|
||||||
cv2.namedWindow("frame", cv2.WND_PROP_FULLSCREEN)
|
cv2.namedWindow("frame", cv2.WINDOW_NORMAL)
|
||||||
|
# https://gist.github.com/ronekko/dc3747211543165108b11073f929b85e
|
||||||
|
cv2.moveWindow("frame", 1920, -1)
|
||||||
cv2.setWindowProperty("frame",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
|
cv2.setWindowProperty("frame",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
|
||||||
|
|
||||||
while self.is_running.is_set():
|
while self.is_running.is_set():
|
||||||
|
@ -467,8 +469,8 @@ def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame,
|
||||||
# Fill image with red color(set each pixel to red)
|
# Fill image with red color(set each pixel to red)
|
||||||
overlay[:] = (0, 0, 0)
|
overlay[:] = (0, 0, 0)
|
||||||
|
|
||||||
img = cv2.addWeighted(dst_img, .2, overlay, .3, 0)
|
# img = cv2.addWeighted(dst_img, .2, overlay, .3, 0)
|
||||||
# img = frame.img.copy()
|
img = dst_img.copy()
|
||||||
|
|
||||||
# all not working:
|
# all not working:
|
||||||
# if i == 1:
|
# if i == 1:
|
||||||
|
@ -506,13 +508,14 @@ def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame,
|
||||||
|
|
||||||
cv2.putText(img, f"{frame.index:06d}", (20,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
|
cv2.putText(img, f"{frame.index:06d}", (20,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
|
||||||
cv2.putText(img, f"{frame.time - first_time: >10.2f}s", (150,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
|
cv2.putText(img, f"{frame.time - first_time: >10.2f}s", (150,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
|
||||||
|
cv2.putText(img, f"{frame.time - time.time():.2f}s", (250,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
|
||||||
|
|
||||||
if prediction_frame:
|
if prediction_frame:
|
||||||
# render Δt and Δ frames
|
# render Δt and Δ frames
|
||||||
cv2.putText(img, f"{tracker_frame.index - frame.index}", (90,17), cv2.FONT_HERSHEY_PLAIN, 1, tracker_color, 1)
|
cv2.putText(img, f"{tracker_frame.index - frame.index}", (90,17), cv2.FONT_HERSHEY_PLAIN, 1, tracker_color, 1)
|
||||||
cv2.putText(img, f"{prediction_frame.index - frame.index}", (120,17), cv2.FONT_HERSHEY_PLAIN, 1, predictor_color, 1)
|
cv2.putText(img, f"{prediction_frame.index - frame.index}", (120,17), cv2.FONT_HERSHEY_PLAIN, 1, predictor_color, 1)
|
||||||
cv2.putText(img, f"{tracker_frame.time - time.time():.2f}s", (230,17), cv2.FONT_HERSHEY_PLAIN, 1, tracker_color, 1)
|
cv2.putText(img, f"{tracker_frame.time - time.time():.2f}s", (310,17), cv2.FONT_HERSHEY_PLAIN, 1, tracker_color, 1)
|
||||||
cv2.putText(img, f"{prediction_frame.time - time.time():.2f}s", (290,17), cv2.FONT_HERSHEY_PLAIN, 1, predictor_color, 1)
|
cv2.putText(img, f"{prediction_frame.time - time.time():.2f}s", (380,17), cv2.FONT_HERSHEY_PLAIN, 1, predictor_color, 1)
|
||||||
cv2.putText(img, f"{len(tracker_frame.tracks)} tracks", (620,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
|
cv2.putText(img, f"{len(tracker_frame.tracks)} tracks", (620,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
|
||||||
cv2.putText(img, f"h: {np.average([len(t.history or []) for t in prediction_frame.tracks.values()]):.2f}", (700,17), cv2.FONT_HERSHEY_PLAIN, 1, tracker_color, 1)
|
cv2.putText(img, f"h: {np.average([len(t.history or []) for t in prediction_frame.tracks.values()]):.2f}", (700,17), cv2.FONT_HERSHEY_PLAIN, 1, tracker_color, 1)
|
||||||
cv2.putText(img, f"ph: {np.average([len(t.predictor_history or []) for t in prediction_frame.tracks.values()]):.2f}", (780,17), cv2.FONT_HERSHEY_PLAIN, 1, predictor_color, 1)
|
cv2.putText(img, f"ph: {np.average([len(t.predictor_history or []) for t in prediction_frame.tracks.values()]):.2f}", (780,17), cv2.FONT_HERSHEY_PLAIN, 1, predictor_color, 1)
|
||||||
|
|
|
@ -87,7 +87,7 @@ class DrawnTrack:
|
||||||
|
|
||||||
self.track = track
|
self.track = track
|
||||||
# self.H = H
|
# self.H = H
|
||||||
self.coords = [d.get_foot_coords() for d in track.history] if self.draw_projection == PROJECTION_IMG else track.get_projected_history(self.H, self.camera)
|
self.coords = [d.get_foot_coords() for d in track.history] if self.draw_projection == PROJECTION_IMG else track.get_projected_history(None, self.camera)
|
||||||
|
|
||||||
# perhaps only do in constructor:
|
# perhaps only do in constructor:
|
||||||
self.inv_H = np.linalg.pinv(self.H)
|
self.inv_H = np.linalg.pinv(self.H)
|
||||||
|
@ -125,8 +125,11 @@ class DrawnTrack:
|
||||||
|
|
||||||
# 1. track history
|
# 1. track history
|
||||||
for i, pos in enumerate(self.drawn_positions):
|
for i, pos in enumerate(self.drawn_positions):
|
||||||
self.drawn_positions[i][0] = int_or_not(exponentialDecay(self.drawn_positions[i][0], self.coords[i][0], 16, dt))
|
self.drawn_positions[i][0] = self.coords[i][0]
|
||||||
self.drawn_positions[i][1] = int_or_not(exponentialDecay(self.drawn_positions[i][1], self.coords[i][1], 16, dt))
|
self.drawn_positions[i][1] = self.coords[i][1]
|
||||||
|
# self.drawn_positions[i][0] = int_or_not(exponentialDecay(self.drawn_positions[i][0], self.coords[i][0], 16, dt))
|
||||||
|
# self.drawn_positions[i][1] = int_or_not(exponentialDecay(self.drawn_positions[i][1], self.coords[i][1], 16, dt))
|
||||||
|
# print(self.drawn_positions)
|
||||||
|
|
||||||
if len(self.coords) > len(self.drawn_positions):
|
if len(self.coords) > len(self.drawn_positions):
|
||||||
self.drawn_positions.extend(self.coords[len(self.drawn_positions):])
|
self.drawn_positions.extend(self.coords[len(self.drawn_positions):])
|
||||||
|
@ -172,7 +175,7 @@ class DrawnTrack:
|
||||||
|
|
||||||
def update_shapes(self, dt):
|
def update_shapes(self, dt):
|
||||||
|
|
||||||
drawn_positions = convert_world_points_to_img_points(self.drawn_positions)
|
drawn_positions = convert_world_points_to_img_points(self.coords[:500]) # TODO)) Glitch in self.drawn_positions, now also capped
|
||||||
drawn_pred_history = convert_world_points_to_img_points(self.drawn_pred_history)
|
drawn_pred_history = convert_world_points_to_img_points(self.drawn_pred_history)
|
||||||
drawn_predictions = [convert_world_points_to_img_points(p) for p in self.drawn_predictions]
|
drawn_predictions = [convert_world_points_to_img_points(p) for p in self.drawn_predictions]
|
||||||
# positions = convert_world_points_to_img_points(self.drawn_predictions)
|
# positions = convert_world_points_to_img_points(self.drawn_predictions)
|
||||||
|
@ -185,6 +188,9 @@ class DrawnTrack:
|
||||||
self.shapes = self.shapes[:len(drawn_positions)]
|
self.shapes = self.shapes[:len(drawn_positions)]
|
||||||
|
|
||||||
# for i, pos in drawn_positions.enumerate():
|
# for i, pos in drawn_positions.enumerate():
|
||||||
|
draw_dot = False # if False, draw line
|
||||||
|
for_laser = True
|
||||||
|
|
||||||
if True:
|
if True:
|
||||||
for ci in range(1, len(drawn_positions)):
|
for ci in range(1, len(drawn_positions)):
|
||||||
x, y = [int(p) for p in drawn_positions[ci-1]]
|
x, y = [int(p) for p in drawn_positions[ci-1]]
|
||||||
|
@ -196,28 +202,33 @@ class DrawnTrack:
|
||||||
|
|
||||||
if ci >= len(self.shapes):
|
if ci >= len(self.shapes):
|
||||||
# TODO: add color2
|
# TODO: add color2
|
||||||
# line = self.renderer.gradientLine(x, y, x2, y2, 3, color, color, batch=self.renderer.batch_anim)
|
if draw_dot:
|
||||||
line = pyglet.shapes.Arc(x2, y2, 10, thickness=2, color=color, batch=self.renderer.batch_anim)
|
line = pyglet.shapes.Arc(x2, y2, 10, thickness=2, color=color, batch=self.renderer.batch_anim)
|
||||||
line.opacity = 20
|
else:
|
||||||
|
line = self.renderer.gradientLine(x, y, x2, y2, 3, color, color, batch=self.renderer.batch_anim)
|
||||||
|
line.opacity = 20 if not for_laser else 255
|
||||||
self.shapes.append(line)
|
self.shapes.append(line)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
line = self.shapes[ci-1]
|
line = self.shapes[ci-1]
|
||||||
line.x, line.y = x, y
|
line.x, line.y = x, y
|
||||||
# line.x2, line.y2 = x2, y2
|
if draw_dot:
|
||||||
line.radius = int(exponentialDecay(line.radius, 1.5, 3, dt))
|
line.radius = int(exponentialDecay(line.radius, 1.5, 3, dt))
|
||||||
|
else:
|
||||||
|
line.x2, line.y2 = x2, y2
|
||||||
line.color = color
|
line.color = color
|
||||||
line.opacity = int(exponentialDecay(line.opacity, 180, 8, dt))
|
if not for_laser:
|
||||||
|
line.opacity = int(exponentialDecay(line.opacity, 180, 8, dt))
|
||||||
|
|
||||||
# TODO: basically a duplication of the above, do this smarter?
|
# TODO: basically a duplication of the above, do this smarter?
|
||||||
# TODO: add intermediate segment
|
# TODO: add intermediate segment
|
||||||
color = colorset[self.track_id % len(colorset)]
|
color = colorset[self.track_id % len(colorset)]
|
||||||
|
|
||||||
if len(self.pred_history_shapes) > len(drawn_pred_history):
|
if False:
|
||||||
self.pred_history_shapes = self.pred_history_shapes[:len(drawn_pred_history)]
|
if len(self.pred_history_shapes) > len(drawn_pred_history):
|
||||||
|
self.pred_history_shapes = self.pred_history_shapes[:len(drawn_pred_history)]
|
||||||
|
|
||||||
# for i, pos in drawn_pred_history.enumerate():
|
# for i, pos in drawn_pred_history.enumerate():
|
||||||
if False:
|
|
||||||
for ci in range(1, len(drawn_pred_history)):
|
for ci in range(1, len(drawn_pred_history)):
|
||||||
x, y = [int(p) for p in drawn_pred_history[ci-1]]
|
x, y = [int(p) for p in drawn_pred_history[ci-1]]
|
||||||
x2, y2 = [int(p) for p in drawn_pred_history[ci]]
|
x2, y2 = [int(p) for p in drawn_pred_history[ci]]
|
||||||
|
@ -239,48 +250,48 @@ class DrawnTrack:
|
||||||
line.color = color
|
line.color = color
|
||||||
line.opacity = int(exponentialDecay(line.opacity, 180, 8, dt))
|
line.opacity = int(exponentialDecay(line.opacity, 180, 8, dt))
|
||||||
|
|
||||||
|
if True:
|
||||||
for a, drawn_prediction in enumerate(drawn_predictions):
|
for a, drawn_prediction in enumerate(drawn_predictions):
|
||||||
if len(self.pred_shapes) <= a:
|
if len(self.pred_shapes) <= a:
|
||||||
self.pred_shapes.append([])
|
self.pred_shapes.append([])
|
||||||
|
|
||||||
if len(self.pred_shapes[a]) > (len(drawn_prediction) +1):
|
if len(self.pred_shapes[a]) > (len(drawn_prediction) +1):
|
||||||
self.pred_shapes[a] = self.pred_shapes[a][:len(drawn_prediction)]
|
self.pred_shapes[a] = self.pred_shapes[a][:len(drawn_prediction)]
|
||||||
|
|
||||||
# for i, pos in drawn_predictions.enumerate():
|
# for i, pos in drawn_predictions.enumerate():
|
||||||
for ci in range(0, len(drawn_prediction)):
|
for ci in range(0, len(drawn_prediction)):
|
||||||
if ci == 0:
|
if ci == 0:
|
||||||
continue
|
continue
|
||||||
# x, y = [int(p) for p in drawn_positions[-1]]
|
# x, y = [int(p) for p in drawn_positions[-1]]
|
||||||
else:
|
|
||||||
x, y = [int(p) for p in drawn_prediction[ci-1]]
|
|
||||||
|
|
||||||
x2, y2 = [int(p) for p in drawn_prediction[ci]]
|
|
||||||
|
|
||||||
y, y2 = self.renderer.window.height - y, self.renderer.window.height - y2
|
|
||||||
# color = [255,0,0]
|
|
||||||
# print(x,y,x2,y2,color)
|
|
||||||
|
|
||||||
if ci >= len(self.pred_shapes[a]):
|
|
||||||
# TODO: add color2
|
|
||||||
# line = self.renderer.gradientLine(x, y, x2, y2, 3, color, color, batch=self.renderer.batch_anim)
|
|
||||||
line = pyglet.shapes.Line(x,y ,x2, y2, 1.5, color, batch=self.renderer.batch_anim)
|
|
||||||
# line = pyglet.shapes.Arc(x,y ,1.5, thickness=1.5, color=color, batch=self.renderer.batch_anim)
|
|
||||||
line.opacity = 5
|
|
||||||
self.pred_shapes[a].append(line)
|
|
||||||
|
|
||||||
else:
|
|
||||||
line = self.pred_shapes[a][ci-1]
|
|
||||||
line.x, line.y = x, y
|
|
||||||
line.x2, line.y2 = x2, y2
|
|
||||||
line.color = color
|
|
||||||
decay = (16/ci) if ci else 16
|
|
||||||
half = len(drawn_prediction) / 2
|
|
||||||
if ci < half:
|
|
||||||
target_opacity = 60
|
|
||||||
else:
|
else:
|
||||||
target_opacity = (1 - ((ci - half) / half)) * 60
|
x, y = [int(p) for p in drawn_prediction[ci-1]]
|
||||||
line.opacity = int(exponentialDecay(line.opacity, target_opacity, decay, dt))
|
|
||||||
|
x2, y2 = [int(p) for p in drawn_prediction[ci]]
|
||||||
|
|
||||||
|
y, y2 = self.renderer.window.height - y, self.renderer.window.height - y2
|
||||||
|
# color = [255,0,0]
|
||||||
|
# print(x,y,x2,y2,color)
|
||||||
|
|
||||||
|
if ci >= len(self.pred_shapes[a]):
|
||||||
|
# TODO: add color2
|
||||||
|
# line = self.renderer.gradientLine(x, y, x2, y2, 3, color, color, batch=self.renderer.batch_anim)
|
||||||
|
line = pyglet.shapes.Line(x,y ,x2, y2, 1.5, color, batch=self.renderer.batch_anim)
|
||||||
|
# line = pyglet.shapes.Arc(x,y ,1.5, thickness=1.5, color=color, batch=self.renderer.batch_anim)
|
||||||
|
line.opacity = 5
|
||||||
|
self.pred_shapes[a].append(line)
|
||||||
|
|
||||||
|
else:
|
||||||
|
line = self.pred_shapes[a][ci-1]
|
||||||
|
line.x, line.y = x, y
|
||||||
|
line.x2, line.y2 = x2, y2
|
||||||
|
line.color = color
|
||||||
|
decay = (16/ci) if ci else 16
|
||||||
|
half = len(drawn_prediction) / 2
|
||||||
|
if ci < half:
|
||||||
|
target_opacity = 60
|
||||||
|
else:
|
||||||
|
target_opacity = (1 - ((ci - half) / half)) * 60
|
||||||
|
line.opacity = int(exponentialDecay(line.opacity, target_opacity, decay, dt))
|
||||||
|
|
||||||
|
|
||||||
class FrameWriter:
|
class FrameWriter:
|
||||||
|
|
|
@ -1,4 +1,7 @@
|
||||||
# lerp & inverse lerp from https://gist.github.com/laundmo/b224b1f4c8ef6ca5fe47e132c8deab56
|
# lerp & inverse lerp from https://gist.github.com/laundmo/b224b1f4c8ef6ca5fe47e132c8deab56
|
||||||
|
import linecache
|
||||||
|
import os
|
||||||
|
import tracemalloc
|
||||||
from typing import Iterable
|
from typing import Iterable
|
||||||
import cv2
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
@ -40,4 +43,29 @@ def convert_world_points_to_img_points(points: Iterable):
|
||||||
the points to img space"""
|
the points to img space"""
|
||||||
if isinstance(points, np.ndarray):
|
if isinstance(points, np.ndarray):
|
||||||
return np.array(points) * 100
|
return np.array(points) * 100
|
||||||
return [[p[0]*100, p[1]*100] for p in points]
|
return [[p[0]*100, p[1]*100] for p in points]
|
||||||
|
|
||||||
|
def display_top(snapshot: tracemalloc.Snapshot, key_type='lineno', limit=5):
|
||||||
|
snapshot = snapshot.filter_traces((
|
||||||
|
tracemalloc.Filter(False, "<frozen importlib._bootstrap>"),
|
||||||
|
tracemalloc.Filter(False, "<unknown>"),
|
||||||
|
))
|
||||||
|
top_stats = snapshot.statistics(key_type)
|
||||||
|
|
||||||
|
print("Top %s lines" % limit)
|
||||||
|
for index, stat in enumerate(top_stats[:limit], 1):
|
||||||
|
frame = stat.traceback[0]
|
||||||
|
# replace "/path/to/module/file.py" with "module/file.py"
|
||||||
|
filename = os.sep.join(frame.filename.split(os.sep)[-2:])
|
||||||
|
print("#%s: %s:%s: %.1f KiB"
|
||||||
|
% (index, filename, frame.lineno, stat.size / 1024))
|
||||||
|
line = linecache.getline(frame.filename, frame.lineno).strip()
|
||||||
|
if line:
|
||||||
|
print(' %s' % line)
|
||||||
|
|
||||||
|
other = top_stats[limit:]
|
||||||
|
if other:
|
||||||
|
size = sum(stat.size for stat in other)
|
||||||
|
print("%s other: %.1f KiB" % (len(other), size / 1024))
|
||||||
|
total = sum(stat.size for stat in top_stats)
|
||||||
|
print("Total allocated size: %.1f KiB" % (total / 1024))
|
||||||
|
|
Loading…
Reference in a new issue