Compare commits

..

4 commits

Author SHA1 Message Date
Ruben van de Ven
cebe102e74 Tracker more sensitive and cv_renderer animates lines 2024-12-23 15:35:51 +01:00
Ruben van de Ven
888ef7ff93 fixes during playtest 2024-12-20 16:22:31 +01:00
Ruben van de Ven
dc5e2ff28c change render switches 2024-12-18 15:24:42 +01:00
Ruben van de Ven
8bcca04ecc fixes for projection trial 2024-12-18 12:04:29 +01:00
12 changed files with 492 additions and 252 deletions

View file

@ -2,10 +2,10 @@
# Default YOLO tracker settings for ByteTrack tracker https://github.com/ifzhang/ByteTrack
tracker_type: bytetrack # tracker type, ['botsort', 'bytetrack']
track_high_thresh: 0.05 # threshold for the first association
track_low_thresh: 0.01 # threshold for the second association
new_track_thresh: 0.1 # threshold for init new track if the detection does not match any tracks
track_buffer: 35 # buffer to calculate the time when to remove tracks
match_thresh: 0.9 # threshold for matching tracks
track_high_thresh: 0.0001 # threshold for the first association
track_low_thresh: 0.0001 # threshold for the second association
new_track_thresh: 0.0001 # threshold for init new track if the detection does not match any tracks
track_buffer: 50 # buffer to calculate the time when to remove tracks
match_thresh: 0.95 # threshold for matching tracks
fuse_score: True # Whether to fuse confidence scores with the iou distances before matching
# min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now)

View file

@ -2,6 +2,7 @@
from __future__ import annotations
import time
import tracemalloc
import ffmpeg
from argparse import Namespace
import datetime
@ -26,14 +27,19 @@ import json
from trap.frame_emitter import DetectionState, Frame, Track
from trap.preview_renderer import DrawnTrack, PROJECTION_IMG, PROJECTION_MAP
from trap.utils import convert_world_space_to_img_space, display_top
logger = logging.getLogger("trap.renderer")
COLOR_PRIMARY = (0,0,0,255)
# COLOR_PRIMARY = (0,0,0,255)
COLOR_PRIMARY = (255,255,255, 255)
class AnimationRenderer:
def __init__(self, config: Namespace, is_running: BaseEvent):
tracemalloc.start()
self.config = config
self.is_running = is_running
@ -48,10 +54,10 @@ class AnimationRenderer:
self.tracker_sock.setsockopt(zmq.SUBSCRIBE, b'')
self.tracker_sock.connect(config.zmq_trajectory_addr)
self.frame_sock = context.socket(zmq.SUB)
self.frame_sock.setsockopt(zmq.CONFLATE, 1) # only keep latest frame. NB. make sure this comes BEFORE connect, otherwise it's ignored!!
self.frame_sock.setsockopt(zmq.SUBSCRIBE, b'')
self.frame_sock.connect(config.zmq_frame_addr)
self.frame_noimg_sock = context.socket(zmq.SUB)
self.frame_noimg_sock.setsockopt(zmq.CONFLATE, 1) # only keep latest frame. NB. make sure this comes BEFORE connect, otherwise it's ignored!!
self.frame_noimg_sock.setsockopt(zmq.SUBSCRIBE, b'')
self.frame_noimg_sock.connect(config.zmq_frame_noimg_addr)
self.H = self.config.H
@ -61,7 +67,9 @@ class AnimationRenderer:
# self.out = cv2.VideoWriter(str(filename), fourcc, 23.97, (1280,720))
self.fps = 60
self.frame_size = (self.config.camera.w,self.config.camera.h)
self.hide_stats = False
self.hide_stats = self.config.render_hide_stats
self.hide_bg = True
self.pause = False
self.out_writer = None # self.start_writer() if self.config.render_file else None
self.streaming_process = self.start_streaming() if self.config.render_url else None
@ -77,7 +85,9 @@ class AnimationRenderer:
# , fullscreen=self.config.render_window
display = pyglet.canvas.get_display()
screen = display.get_screens()[0]
idx = -1 if self.config.render_window else 0
screen = display.get_screens()[idx]
print(display.get_screens())
if self.streaming_process is not None:
self.window = pyglet.window.Window(width=self.frame_size[0], height=self.frame_size[1], config=config, fullscreen=False, screen=screen)
@ -86,6 +96,7 @@ class AnimationRenderer:
self.window.set_handler('on_draw', self.on_draw)
self.window.set_handler('on_refresh', self.on_refresh)
self.window.set_handler('on_close', self.on_close)
self.window.set_handler('on_key_press', self.on_key_press)
# don't know why, but importing this before window leads to "x connection to :1 broken (explicit kill or server shutdown)"
from pyglet_cornerpin import PygletCornerPin
@ -96,12 +107,16 @@ class AnimationRenderer:
self.pins = PygletCornerPin(
self.window,
# source_points=[[540, 670-360], [1380,670-360], [540,760-360], [1380,760-360]],
# corners=[[471, 304], [1797, 376], [467, 387], [1792, 484]]
source_points=[[540, 670-360], [1380,670-360], [540,760-360], [1380,760-360]],
# corners=[[540, 670-360], [1380,670-360], [540,760-360], [1380,760-360]], # original test: short throw?
# corners=[[396, 442], [1644, 734], [350, 516], [1572, 796]], # beamer downstairs
# corners=[[270, 452], [1698, 784], [314, 568], [1572, 860]], # ??
# corners=[[471, 304], [1797, 376], [467, 387], [1792, 484]] # ??
# corners=[[576, 706], [1790, 696], [588, 794], [1728, 796]], # beamer boven
)
self.window.push_handlers(self.pins)
pyglet.gl.glClearColor(255,255,255,255)
# pyglet.gl.glClearColor(255,255,255,255)
self.fps_display = pyglet.window.FPSDisplay(window=self.window, color=COLOR_PRIMARY)
self.fps_display.label.x = self.window.width - 50
self.fps_display.label.y = self.window.height - 17
@ -120,14 +135,18 @@ class AnimationRenderer:
self.batch_bg = pyglet.graphics.Batch()
self.batch_overlay = pyglet.graphics.Batch()
self.batch_anim = pyglet.graphics.Batch()
self.batch_debug = pyglet.graphics.Batch()
# if self.config.render_debug_shapes:
self.render_debug_shapes = self.config.render_debug_shapes
self.render_lines = True
if self.config.render_debug_shapes:
self.debug_lines = [
pyglet.shapes.Line(1370, self.config.camera.h-360, 1380, 670-360, 2, COLOR_PRIMARY, batch=self.batch_overlay),#v
pyglet.shapes.Line(0, 660-360, 1380, 670-360, 2, COLOR_PRIMARY, batch=self.batch_overlay), #h
pyglet.shapes.Line(1140, 760-360, 1140, 675-360, 2, COLOR_PRIMARY, batch=self.batch_overlay), #h
pyglet.shapes.Line(540, 760-360,540, 675-360, 2, COLOR_PRIMARY, batch=self.batch_overlay), #v
pyglet.shapes.Line(0, 770-360, 1380, 770-360, 2, COLOR_PRIMARY, batch=self.batch_overlay), #h
pyglet.shapes.Line(1370, self.config.camera.h-360, 1380, 670-360, 2, COLOR_PRIMARY, batch=self.batch_debug),#v
pyglet.shapes.Line(0, 660-360, 1380, 670-360, 2, COLOR_PRIMARY, batch=self.batch_debug), #h
pyglet.shapes.Line(1140, 760-360, 1140, 675-360, 2, COLOR_PRIMARY, batch=self.batch_debug), #h
pyglet.shapes.Line(540, 760-360,540, 675-360, 2, COLOR_PRIMARY, batch=self.batch_debug), #v
pyglet.shapes.Line(0, 770-360, 1380, 770-360, 2, COLOR_PRIMARY, batch=self.batch_debug), #h
]
@ -146,7 +165,7 @@ class AnimationRenderer:
dst_img_points = np.reshape(dst_img_points, (dst_img_points.shape[0], 2))
self.debug_points = [
pyglet.shapes.Circle(p[0], self.window.height - p[1], 3, color=(255,0,0,255), batch=self.batch_overlay) for p in dst_img_points
pyglet.shapes.Circle(p[0], self.window.height - p[1], 3, color=(255,0,0,255), batch=self.batch_debug) for p in dst_img_points
]
@ -260,12 +279,18 @@ class AnimationRenderer:
self.labels['frame_time'].text = f"{self.frame.time - self.first_time: >10.2f}s"
self.labels['frame_latency'].text = f"{self.frame.time - time.time():.2f}s"
if self.tracker_frame:
if self.frame.time - self.first_time > 30 and (not hasattr(self, 'has_snap') or self.has_snap == False):
snapshot = tracemalloc.take_snapshot()
display_top(snapshot, 'traceback', 15)
tracemalloc.stop()
self.has_snap = True
if self.tracker_frame and self.frame:
self.labels['tracker_idx'].text = f"{self.tracker_frame.index - self.frame.index}"
self.labels['tracker_time'].text = f"{self.tracker_frame.time - time.time():.3f}s"
self.labels['track_len'].text = f"{len(self.tracker_frame.tracks)} tracks"
if self.prediction_frame:
if self.prediction_frame and self.frame:
self.labels['pred_idx'].text = f"{self.prediction_frame.index - self.frame.index}"
self.labels['pred_time'].text = f"{self.prediction_frame.time - time.time():.3f}s"
# self.labels['track_len'].text = f"{len(self.prediction_frame.tracks)} tracks"
@ -294,23 +319,32 @@ class AnimationRenderer:
def check_frames(self, dt):
if self.pause:
return
new_tracks = False
try:
self.frame: Frame = self.frame_sock.recv_pyobj(zmq.NOBLOCK)
self.frame: Frame = self.frame_noimg_sock.recv_pyobj(zmq.NOBLOCK)
if not self.first_time:
self.first_time = self.frame.time
if self.frame.img:
img = self.frame.img
# newcameramtx, roi = cv2.getOptimalNewCameraMatrix(self.config.camera.mtx, self.config.camera.dist, (self.frame.img.shape[1], self.frame.img.shape[0]), 1, (self.frame.img.shape[1], self.frame.img.shape[0]))
img = cv2.undistort(img, self.config.camera.mtx, self.config.camera.dist, None, self.config.camera.newcameramtx)
img = cv2.warpPerspective(img, self.config.camera.H, (self.config.camera.w, self.config.camera.h))
img = cv2.warpPerspective(img, convert_world_space_to_img_space(self.config.camera.H), (self.config.camera.w, self.config.camera.h))
# img = cv2.GaussianBlur(img, (15, 15), 0)
img = cv2.flip(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), 0)
img = pyglet.image.ImageData(self.frame_size[0], self.frame_size[1], 'RGB', img.tobytes())
# don't draw in batch, so that it is the background
if hasattr(self, 'video_sprite') and self.video_sprite:
self.video_sprite.delete()
self.frame.img = None
self.video_sprite = pyglet.sprite.Sprite(img=img, batch=self.batch_bg)
# transform to flipped coordinate system for pyglet
self.video_sprite.y = self.window.height - self.video_sprite.height
self.video_sprite.opacity = 70
# self.frame.img = np.array([]) # clearing memory?
# self.video_sprite.opacity = 70
except zmq.ZMQError as e:
# idx = frame.index if frame else "NONE"
# logger.debug(f"reuse video frame {idx}")
@ -367,11 +401,20 @@ class AnimationRenderer:
self.window.set_fullscreen(not self.window.fullscreen)
if symbol == ord('h'):
self.hide_stats = not self.hide_stats
if symbol == ord('d'):
self.render_debug_shapes = not self.render_debug_shapes
if symbol == ord('p'):
self.pause = not self.pause
if symbol == ord('b'):
self.hide_bg = not self.hide_bg
if symbol == ord('l'):
self.render_lines = not self.render_lines
def check_running(self, dt):
if not self.is_running.is_set():
self.window.close()
self.event_loop.exit()
print('quit animation renderer')
def on_close(self):
self.is_running.clear()
@ -386,6 +429,8 @@ class AnimationRenderer:
self.refresh_labels(dt)
# self.shape1 = shapes.Circle(700, 150, 100, color=(50, 0, 30), batch=self.batch_anim)
# self.shape3 = shapes.Circle(800, 150, 100, color=(100, 225, 30), batch=self.batch_anim)
pass
@ -393,8 +438,14 @@ class AnimationRenderer:
def on_draw(self):
self.window.clear()
if not self.hide_bg:
self.batch_bg.draw()
if self.render_debug_shapes:
self.batch_debug.draw()
self.pins.draw()
if self.render_lines:
for track in self.drawn_tracks.values():
for shape in track.shapes:
shape.draw() # for some reason the batches don't work
@ -403,14 +454,13 @@ class AnimationRenderer:
for shape in shapes:
shape.draw()
# self.batch_anim.draw()
self.batch_overlay.draw()
if self.config.render_debug_shapes:
self.pins.draw()
# pyglet.graphics.draw(3, pyglet.gl.GL_LINE, ("v2i", (100,200, 600,800)), ('c3B', (255,255,255, 255,255,255)))
if not self.hide_stats:
self.batch_overlay.draw()
self.fps_display.draw()
# if streaming, capture buffer and send
@ -496,6 +546,7 @@ class AnimationRenderer:
# cv2.waitKey(1)
logger.info('Stopping')
logger.info(f'used corner pins {self.pins.pin_positions}')
print(self.pins.pin_positions)
# if i>2:

View file

@ -253,6 +253,11 @@ connection_parser.add_argument('--zmq-frame-addr',
type=str,
default="ipc:///tmp/feeds_frame")
connection_parser.add_argument('--zmq-frame-noimg-addr',
help='Manually specity communication addr for the frame messages',
type=str,
default="ipc:///tmp/feeds_frame2")
connection_parser.add_argument('--ws-port',
help='Port to listen for incomming websocket connections. Also serves the testing html-page.',
@ -336,12 +341,15 @@ render_parser.add_argument("--render-file",
render_parser.add_argument("--render-window",
help="Render a previewing to a window",
action='store_true')
render_parser.add_argument("--render-no-preview",
help="No preview, but only animation",
render_parser.add_argument("--render-animation",
help="Render animation (pyglet)",
action='store_true')
render_parser.add_argument("--render-debug-shapes",
help="Lines and points for debugging/mapping",
action='store_true')
render_parser.add_argument("--render-hide-stats",
help="Default toggle to hide (switch with 'h')",
action='store_true')
render_parser.add_argument("--full-screen",
help="Set Window full screen",
action='store_true')

View file

@ -27,6 +27,7 @@ from PIL import Image
from trap.frame_emitter import DetectionState, Frame, Track, Camera
from trap.preview_renderer import FrameWriter
from trap.tools import draw_track, draw_track_predictions, draw_track_projected, draw_trackjectron_history, to_point
from trap.utils import convert_world_points_to_img_points, convert_world_space_to_img_space
@ -333,7 +334,7 @@ class CvRenderer:
def run(self):
def run(self, timer_counter):
frame = None
prediction_frame = None
tracker_frame = None
@ -341,11 +342,15 @@ class CvRenderer:
i=0
first_time = None
cv2.namedWindow("frame", cv2.WND_PROP_FULLSCREEN)
cv2.namedWindow("frame", cv2.WINDOW_NORMAL)
# https://gist.github.com/ronekko/dc3747211543165108b11073f929b85e
cv2.moveWindow("frame", 1920, -1)
cv2.setWindowProperty("frame",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
while self.is_running.is_set():
i+=1
with timer_counter.get_lock():
timer_counter.value+=1
# zmq_ev = self.frame_sock.poll(timeout=2000)
@ -403,10 +408,10 @@ class CvRenderer:
# clear out old tracks & predictions:
for track_id, track in list(self.tracks.items()):
if get_opacity(track, frame) == 0:
if get_animation_position(track, frame) == 1:
self.tracks.pop(track_id)
for prediction_id, track in list(self.predictions.items()):
if get_opacity(track, frame) == 0:
if get_animation_position(track, frame) == 1:
self.predictions.pop(prediction_id)
logger.info('Stopping')
@ -440,26 +445,13 @@ colorset = [
# (0,0,0),
# ]
def get_opacity(track: Track, current_frame: Frame):
def get_animation_position(track: Track, current_frame: Frame):
fade_duration = current_frame.camera.fps * 3
diff = current_frame.index - track.history[-1].frame_nr
return max(0, 1 - diff / fade_duration)
return max(0, min(1, diff / fade_duration))
# track.history[-1].frame_nr < (current_frame.index - current_frame.camera.fps * 3)
# track.history[-1].frame_nr < (current_frame.index - current_frame.camera.fps * 3)
def convert_world_space_to_img_space(H: cv2.Mat):
"""Transform the given matrix so that it immediately converts
the points to img space"""
new_H = H.copy()
new_H[:2] = H[:2] * 100
return new_H
def convert_world_points_to_img_points(points: Iterable):
"""Transform the given matrix so that it immediately converts
the points to img space"""
if isinstance(points, np.ndarray):
return np.array(points) * 100
return [[p[0]*100, p[1]*100] for p in points]
@ -479,8 +471,8 @@ def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame,
# Fill image with red color(set each pixel to red)
overlay[:] = (0, 0, 0)
img = cv2.addWeighted(dst_img, .1, overlay, .3, 0)
# img = frame.img.copy()
# img = cv2.addWeighted(dst_img, .2, overlay, .3, 0)
img = dst_img.copy()
# all not working:
# if i == 1:
@ -507,8 +499,8 @@ def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame,
inv_H = np.linalg.pinv(prediction_frame.H)
# draw_track(img, track, int(track_id))
draw_trackjectron_history(img, track, int(track.track_id), convert_world_points_to_img_points)
opacity = get_opacity(track, frame)
draw_track_predictions(img, track, int(track.track_id)+1, config.camera, convert_world_points_to_img_points, opacity=opacity)
anim_position = get_animation_position(track, frame)
draw_track_predictions(img, track, int(track.track_id)+1, config.camera, convert_world_points_to_img_points, anim_position=anim_position)
cv2.putText(img, f"{len(track.predictor_history) if track.predictor_history else 'none'}", to_point(track.history[0].get_foot_coords()), cv2.FONT_HERSHEY_COMPLEX, 1, (255,255,255), 1)
base_color = (255,)*3
@ -518,13 +510,14 @@ def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame,
cv2.putText(img, f"{frame.index:06d}", (20,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
cv2.putText(img, f"{frame.time - first_time: >10.2f}s", (150,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
cv2.putText(img, f"{frame.time - time.time():.2f}s", (250,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
if prediction_frame:
# render Δt and Δ frames
cv2.putText(img, f"{tracker_frame.index - frame.index}", (90,17), cv2.FONT_HERSHEY_PLAIN, 1, tracker_color, 1)
cv2.putText(img, f"{prediction_frame.index - frame.index}", (120,17), cv2.FONT_HERSHEY_PLAIN, 1, predictor_color, 1)
cv2.putText(img, f"{tracker_frame.time - time.time():.2f}s", (230,17), cv2.FONT_HERSHEY_PLAIN, 1, tracker_color, 1)
cv2.putText(img, f"{prediction_frame.time - time.time():.2f}s", (290,17), cv2.FONT_HERSHEY_PLAIN, 1, predictor_color, 1)
cv2.putText(img, f"{tracker_frame.time - time.time():.2f}s", (310,17), cv2.FONT_HERSHEY_PLAIN, 1, tracker_color, 1)
cv2.putText(img, f"{prediction_frame.time - time.time():.2f}s", (380,17), cv2.FONT_HERSHEY_PLAIN, 1, predictor_color, 1)
cv2.putText(img, f"{len(tracker_frame.tracks)} tracks", (620,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
cv2.putText(img, f"h: {np.average([len(t.history or []) for t in prediction_frame.tracks.values()]):.2f}", (700,17), cv2.FONT_HERSHEY_PLAIN, 1, tracker_color, 1)
cv2.putText(img, f"ph: {np.average([len(t.predictor_history or []) for t in prediction_frame.tracks.values()]):.2f}", (780,17), cv2.FONT_HERSHEY_PLAIN, 1, predictor_color, 1)
@ -541,6 +534,6 @@ def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame,
return img
def run_cv_renderer(config: Namespace, is_running: BaseEvent):
def run_cv_renderer(config: Namespace, is_running: BaseEvent, timer_counter):
renderer = CvRenderer(config, is_running)
renderer.run()
renderer.run(timer_counter)

View file

@ -35,10 +35,23 @@ class DataclassJSONEncoder(json.JSONEncoder):
if isinstance(o, np.ndarray):
return o.tolist()
if dataclasses.is_dataclass(o):
d = dataclasses.asdict(o)
if isinstance(o, Frame):
# Don't send images over JSON
del d['img']
tracks = {}
for track_id, track in o.tracks.items():
track_obj = dataclasses.asdict(track)
track_obj['history'] = track.get_projected_history(None, o.camera)
tracks[track_id] = track_obj
d = {
'index': o.index,
'time': o.time,
'tracks': tracks,
'camera': dataclasses.asdict(o.camera),
}
else:
d = dataclasses.asdict(o)
# if isinstance(o, Frame):
# # Don't send images over JSON
# del d['img']
return d
return super().default(o)
@ -407,6 +420,9 @@ class Frame:
} for t in self.tracks.values()
}
def without_img(self):
return Frame(self.index, None, self.time, self.tracks, self.H, self.camera)
def video_src_from_config(config) -> UrlOrPath:
if config.video_loop:
video_srcs: Iterable[UrlOrPath] = cycle(config.video_src)
@ -429,12 +445,17 @@ class FrameEmitter:
self.frame_sock.setsockopt(zmq.CONFLATE, 1) # only keep latest frame. make sure to set BEFORE connect/bind
self.frame_sock.bind(config.zmq_frame_addr)
self.frame_noimg_sock = context.socket(zmq.PUB)
self.frame_noimg_sock.setsockopt(zmq.CONFLATE, 1) # only keep latest frame. make sure to set BEFORE connect/bind
self.frame_noimg_sock.bind(config.zmq_frame_noimg_addr)
logger.info(f"Connection socket {config.zmq_frame_addr}")
logger.info(f"Connection socket {config.zmq_frame_noimg_addr}")
self.video_srcs = video_src_from_config(self.config)
def emit_video(self):
def emit_video(self, timer_counter):
i = 0
delay_generation = False
for video_path in self.video_srcs:
@ -449,7 +470,7 @@ class FrameEmitter:
video.set(cv2.CAP_PROP_FPS, 5)
fps=5
elif video_path.url.scheme == 'rtsp':
gst = f"rtspsrc location={video_path} latency=0 buffer-mode=auto ! decodebin ! videoconvert ! appsink max-buffers=1 drop=true"
gst = f"rtspsrc location={video_path} latency=0 buffer-mode=auto ! decodebin ! videoconvert ! appsink max-buffers=0 drop=true"
logger.info(f"Capture gstreamer (gst-launch-1.0): {gst}")
video = cv2.VideoCapture(gst, cv2.CAP_GSTREAMER)
fps=12
@ -483,7 +504,11 @@ class FrameEmitter:
prev_time = time.time()
while self.is_running.is_set():
with timer_counter.get_lock():
timer_counter.value += 1
ret, img = video.read()
# seek to 0 if video has finished. Infinite loop
@ -502,6 +527,7 @@ class FrameEmitter:
frame = Frame(index=i, img=img, H=self.config.H, camera=self.config.camera)
# TODO: this is very dirty, need to find another way.
# perhaps multiprocessing Array?
self.frame_noimg_sock.send(pickle.dumps(frame.without_img()))
self.frame_sock.send(pickle.dumps(frame))
# only delay consuming the next frame when using a file.
@ -527,7 +553,7 @@ class FrameEmitter:
def run_frame_emitter(config: Namespace, is_running: Event):
def run_frame_emitter(config: Namespace, is_running: Event, timer_counter: int):
router = FrameEmitter(config, is_running)
router.emit_video()
router.emit_video(timer_counter)
is_running.clear()

View file

@ -13,6 +13,7 @@ from trap.prediction_server import run_prediction_server
from trap.preview_renderer import run_preview_renderer
from trap.animation_renderer import run_animation_renderer
from trap.socket_forwarder import run_ws_forwarder
from trap.timer import TimerCollection
from trap.tracker import run_tracker
from setproctitle import setproctitle, setthreadtitle
@ -83,29 +84,43 @@ def start():
# queue_listener.handlers.append(socket_handler)
timers = TimerCollection()
timer_fe = timers.new('frame_emitter')
timer_tracker = timers.new('tracker')
# instantiating process with arguments
procs = [
# ExceptionHandlingProcess(target=run_ws_forwarder, kwargs={'config': args, 'is_running': isRunning}, name='forwarder'),
ExceptionHandlingProcess(target=run_frame_emitter, kwargs={'config': args, 'is_running': isRunning}, name='frame_emitter'),
ExceptionHandlingProcess(target=run_tracker, kwargs={'config': args, 'is_running': isRunning}, name='tracker'),
ExceptionHandlingProcess(target=run_frame_emitter, kwargs={'config': args, 'is_running': isRunning, 'timer_counter': timer_fe.iterations}, name='frame_emitter'),
ExceptionHandlingProcess(target=run_tracker, kwargs={'config': args, 'is_running': isRunning, 'timer_counter': timer_tracker.iterations}, name='tracker'),
]
if args.render_file or args.render_url or args.render_window:
if not args.render_no_preview: #or args.render_file or args.render_url:
# if args.render_file or args.render_url or args.render_window:
if args.render_window or args.render_file or args.render_url:
timer_preview = timers.new('preview')
procs.append(
# ExceptionHandlingProcess(target=run_cv_renderer, kwargs={'config': args, 'is_running': isRunning}, name='preview')
ExceptionHandlingProcess(target=run_cv_renderer, kwargs={'config': args, 'is_running': isRunning}, name='preview')
ExceptionHandlingProcess(target=run_cv_renderer, kwargs={'config': args, 'is_running': isRunning, 'timer_counter': timer_preview.iterations}, name='preview')
)
if args.render_no_preview:
if args.render_animation:
procs.append(
ExceptionHandlingProcess(target=run_animation_renderer, kwargs={'config': args, 'is_running': isRunning}, name='renderer')
)
if not args.bypass_prediction:
timer_predict = timers.new('predict')
procs.append(
ExceptionHandlingProcess(target=run_prediction_server, kwargs={'config': args, 'is_running':isRunning}, name='inference'),
ExceptionHandlingProcess(target=run_prediction_server, kwargs={'config': args, 'is_running':isRunning, 'timer_counter': timer_predict.iterations}, name='inference'),
)
def timer_process(timers: TimerCollection, is_running: Event):
while is_running.is_set():
time.sleep(1)
timers.snapshot()
print(timers.to_string())
procs.append(
ExceptionHandlingProcess(target=timer_process, kwargs={'is_running':isRunning, 'timers': timers}, name='timer'),
)
try:

View file

@ -166,11 +166,11 @@ class PredictionServer:
def send_frame(self, frame: Frame):
if self.external_predictions:
# data = json.dumps(frame, cls=DataclassJSONEncoder)
self.prediction_socket.send_json(frame, cls=DataclassJSONEncoder)
self.prediction_socket.send_json(obj=frame, cls=DataclassJSONEncoder)
else:
self.prediction_socket.send_pyobj(frame)
def run(self):
def run(self, timer_counter):
print(self.config)
if self.config.seed is not None:
random.seed(self.config.seed)
@ -247,6 +247,8 @@ class PredictionServer:
prev_run_time = 0
while self.is_running.is_set():
timestep += 1
with timer_counter.get_lock():
timer_counter.value+=1
# this_run_time = time.time()
# logger.debug(f'test {prev_run_time - this_run_time}')
@ -477,7 +479,7 @@ class PredictionServer:
def run_prediction_server(config: Namespace, is_running: Event):
def run_prediction_server(config: Namespace, is_running: Event, timer_counter):
# attempt to trace the warnings coming from pytorch
# def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
@ -488,4 +490,4 @@ def run_prediction_server(config: Namespace, is_running: Event):
# warnings.showwarning = warn_with_traceback
s = PredictionServer(config, is_running)
s.run()
s.run(timer_counter)

View file

@ -18,12 +18,13 @@ import tempfile
from pathlib import Path
import shutil
import math
from typing import Optional
from typing import List, Optional
from pyglet import shapes
from PIL import Image
from trap.utils import convert_world_points_to_img_points
from trap.frame_emitter import DetectionState, Frame, Track, Camera
@ -86,7 +87,7 @@ class DrawnTrack:
self.track = track
# self.H = H
self.coords = [d.get_foot_coords() for d in track.history] if self.draw_projection == PROJECTION_IMG else track.get_projected_history(self.H, self.camera)
self.coords = [d.get_foot_coords() for d in track.history] if self.draw_projection == PROJECTION_IMG else track.get_projected_history(None, self.camera)
# perhaps only do in constructor:
self.inv_H = np.linalg.pinv(self.H)
@ -111,16 +112,24 @@ class DrawnTrack:
# color = (128,0,128) if pred_i else (128,
def update_drawn_positions(self, dt) -> []:
def update_drawn_positions(self, dt) -> List:
'''
use dt to lerp the drawn positions in the direction of current prediction
'''
# TODO: make lerp, currently quick way to get results
def int_or_not(v):
"""quick wrapper to toggle int'ing"""
return v
# return int(v)
# 1. track history
for i, pos in enumerate(self.drawn_positions):
self.drawn_positions[i][0] = int(exponentialDecay(self.drawn_positions[i][0], self.coords[i][0], 16, dt))
self.drawn_positions[i][1] = int(exponentialDecay(self.drawn_positions[i][1], self.coords[i][1], 16, dt))
self.drawn_positions[i][0] = self.coords[i][0]
self.drawn_positions[i][1] = self.coords[i][1]
# self.drawn_positions[i][0] = int_or_not(exponentialDecay(self.drawn_positions[i][0], self.coords[i][0], 16, dt))
# self.drawn_positions[i][1] = int_or_not(exponentialDecay(self.drawn_positions[i][1], self.coords[i][1], 16, dt))
# print(self.drawn_positions)
if len(self.coords) > len(self.drawn_positions):
self.drawn_positions.extend(self.coords[len(self.drawn_positions):])
@ -128,8 +137,8 @@ class DrawnTrack:
# 2. history as seen by predictor (Trajectron)
for i, pos in enumerate(self.drawn_pred_history):
if len(self.pred_history_coords) > i:
self.drawn_pred_history[i][0] = int(exponentialDecay(self.drawn_pred_history[i][0], self.pred_history_coords[i][0], 16, dt))
self.drawn_pred_history[i][1] = int(exponentialDecay(self.drawn_pred_history[i][1], self.pred_history_coords[i][1], 16, dt))
self.drawn_pred_history[i][0] = int_or_not(exponentialDecay(self.drawn_pred_history[i][0], self.pred_history_coords[i][0], 16, dt))
self.drawn_pred_history[i][1] = int_or_not(exponentialDecay(self.drawn_pred_history[i][1], self.pred_history_coords[i][1], 16, dt))
if len(self.pred_history_coords) > len(self.drawn_pred_history):
self.drawn_pred_history.extend(self.coords[len(self.drawn_pred_history):])
@ -147,7 +156,7 @@ class DrawnTrack:
r = exponentialDecay(drawn_r, pred_r, decay, dt)
angle = exponentialDecay(drawn_angle, pred_angle, decay, dt)
x, y = relativePolarToPoint(origin, r, angle)
self.drawn_predictions[a][i] = int(x), int(y)
self.drawn_predictions[a][i] = int_or_not(x), int_or_not(y)
# self.drawn_predictions[i][0] = int(exponentialDecay(self.drawn_predictions[i][0], self.pred_coords[i][0], decay, dt))
# self.drawn_predictions[i][1] = int(exponentialDecay(self.drawn_predictions[i][1], self.pred_coords[i][1], decay, dt))
@ -159,49 +168,70 @@ class DrawnTrack:
# self.drawn_positions = self.coords
# finally: update shapes from coordinates
self.update_shapes(dt)
return self.drawn_positions
def update_shapes(self, dt):
if len(self.shapes) > len(self.drawn_positions):
self.shapes = self.shapes[:len(self.drawn_positions)]
# for i, pos in self.drawn_positions.enumerate():
for ci in range(1, len(self.drawn_positions)):
x, y = [int(p) for p in self.drawn_positions[ci-1]]
x2, y2 = [int(p) for p in self.drawn_positions[ci]]
drawn_positions = convert_world_points_to_img_points(self.coords[:500]) # TODO)) Glitch in self.drawn_positions, now also capped
drawn_pred_history = convert_world_points_to_img_points(self.drawn_pred_history)
drawn_predictions = [convert_world_points_to_img_points(p) for p in self.drawn_predictions]
# positions = convert_world_points_to_img_points(self.drawn_predictions)
# print("drawn",
# drawn_positions,'self', self.drawn_positions
# )
if len(self.shapes) > len(drawn_positions):
self.shapes = self.shapes[:len(drawn_positions)]
# for i, pos in drawn_positions.enumerate():
draw_dot = False # if False, draw line
for_laser = True
if True:
for ci in range(1, len(drawn_positions)):
x, y = [int(p) for p in drawn_positions[ci-1]]
x2, y2 = [int(p) for p in drawn_positions[ci]]
y, y2 = self.renderer.window.height - y, self.renderer.window.height - y2
color = [100+155*ci // len(self.drawn_positions)]*3
color = [100+155*ci // len(drawn_positions)]*3
# print(x,y,x2,y2,color)
if ci >= len(self.shapes):
# TODO: add color2
line = self.renderer.gradientLine(x, y, x2, y2, 3, color, color, batch=self.renderer.batch_anim)
if draw_dot:
line = pyglet.shapes.Arc(x2, y2, 10, thickness=2, color=color, batch=self.renderer.batch_anim)
line.opacity = 20
else:
line = self.renderer.gradientLine(x, y, x2, y2, 3, color, color, batch=self.renderer.batch_anim)
line.opacity = 20 if not for_laser else 255
self.shapes.append(line)
else:
line = self.shapes[ci-1]
line.x, line.y = x, y
line.x2, line.y2 = x2, y2
if draw_dot:
line.radius = int(exponentialDecay(line.radius, 1.5, 3, dt))
else:
line.x2, line.y2 = x2, y2
line.color = color
if not for_laser:
line.opacity = int(exponentialDecay(line.opacity, 180, 8, dt))
# TODO: basically a duplication of the above, do this smarter?
# TODO: add intermediate segment
color = colorset[self.track_id % len(colorset)]
if len(self.pred_history_shapes) > len(self.drawn_pred_history):
self.pred_history_shapes = self.pred_history_shapes[:len(self.drawn_pred_history)]
if False:
if len(self.pred_history_shapes) > len(drawn_pred_history):
self.pred_history_shapes = self.pred_history_shapes[:len(drawn_pred_history)]
# for i, pos in self.drawn_pred_history.enumerate():
for ci in range(1, len(self.drawn_pred_history)):
x, y = [int(p) for p in self.drawn_pred_history[ci-1]]
x2, y2 = [int(p) for p in self.drawn_pred_history[ci]]
# for i, pos in drawn_pred_history.enumerate():
for ci in range(1, len(drawn_pred_history)):
x, y = [int(p) for p in drawn_pred_history[ci-1]]
x2, y2 = [int(p) for p in drawn_pred_history[ci]]
y, y2 = self.renderer.window.height - y, self.renderer.window.height - y2
@ -220,23 +250,23 @@ class DrawnTrack:
line.color = color
line.opacity = int(exponentialDecay(line.opacity, 180, 8, dt))
for a, drawn_predictions in enumerate(self.drawn_predictions):
if True:
for a, drawn_prediction in enumerate(drawn_predictions):
if len(self.pred_shapes) <= a:
self.pred_shapes.append([])
if len(self.pred_shapes[a]) > (len(drawn_predictions) +1):
self.pred_shapes[a] = self.pred_shapes[a][:len(drawn_predictions)]
if len(self.pred_shapes[a]) > (len(drawn_prediction) +1):
self.pred_shapes[a] = self.pred_shapes[a][:len(drawn_prediction)]
# for i, pos in drawn_predictions.enumerate():
for ci in range(0, len(drawn_predictions)):
for ci in range(0, len(drawn_prediction)):
if ci == 0:
continue
# x, y = [int(p) for p in self.drawn_positions[-1]]
# x, y = [int(p) for p in drawn_positions[-1]]
else:
x, y = [int(p) for p in drawn_predictions[ci-1]]
x, y = [int(p) for p in drawn_prediction[ci-1]]
x2, y2 = [int(p) for p in drawn_predictions[ci]]
x2, y2 = [int(p) for p in drawn_prediction[ci]]
y, y2 = self.renderer.window.height - y, self.renderer.window.height - y2
# color = [255,0,0]
@ -256,7 +286,7 @@ class DrawnTrack:
line.x2, line.y2 = x2, y2
line.color = color
decay = (16/ci) if ci else 16
half = len(drawn_predictions) / 2
half = len(drawn_prediction) / 2
if ci < half:
target_opacity = 60
else:

View file

@ -1,6 +1,9 @@
import collections
from re import A
import time
from multiprocessing.sharedctypes import RawValue, Value, Array
from ctypes import c_double
from typing import MutableSequence
class Timer():
@ -8,47 +11,58 @@ class Timer():
Measure 2 independent things: the freuency of tic, and the duration of tic->toc
Note that indeed these don't need to be equal
"""
def __init__(self) -> None:
self.last_tic = RawValue(c_double, -1)
self.last_toc = RawValue(c_double, -1)
self.fps = RawValue(c_double, -1)
self.processing_duration = RawValue(c_double, -1)
def __init__(self, name = 'timer') -> None:
self.name = name
self.tocs: MutableSequence[(float, int)] = collections.deque(maxlen=5)
self.iterations = Value('i', 0)
self.smoothing = .1
# def tic(self):
# now = time.time()
# if self.last_tic is None:
def tic(self):
now = time.time()
if self.last_tic is None:
# self.last_tic = now
# return
self.last_tic = now
return
# duration = now - self.last_tic
# self.last_tic = now
duration = now - self.last_tic
self.last_tic = now
current_fps = 1 / duration
if not self.fps:
self.fps = current_fps
else:
self.fps = self.fps * (1-self.smoothing) + current_fps * self.smoothing
# current_fps = 1 / duration
# if not self.fps:
# self.fps = current_fps
# else:
# self.fps = self.fps * (1-self.smoothing) + current_fps * self.smoothing
def toc(self):
self.last_toc = time.time()
duration = self.last_toc - self.last_tic
self.processing_duration = self.processing_duration * (1-self.smoothing) + duration * self.smoothing
self.iterations += 1
def snapshot(self):
self.tocs.append((time.perf_counter(), self.iterations.value))
@property
def fps(self):
fpses = []
if len(self.tocs) < 2:
return 0
dt = self.tocs[-1][0] - self.tocs[0][0]
di = self.tocs[-1][1] - self.tocs[0][1]
return di/dt
pass
class TimerCollection():
def __init__(self) -> None:
self._timers = set()
def print(self)->str:
print('Update', end='\r')
def snapshot(self):
for timer in self._timers:
timer.snapshot()
def to_string(self)->str:
strs = [f"{t.name} {t.fps:.2f}" for t in self._timers]
return " ".join(strs)
def new(self, name='timer'):
t = Timer(name)
self._timers.add(t)
return t

View file

@ -22,7 +22,7 @@ from ultralytics import YOLO
from ultralytics.engine.results import Results as YOLOResult
import tqdm
from trap.utils import lerp
from trap.utils import inv_lerp, lerp
@ -185,40 +185,82 @@ def tracker_compare():
bar.set_description(f"[{frames.video_nr}/{len(frames.video_srcs)}] [{frames.frame_idx}/{frames.frame_count}] {str(frames.video_path)}")
def draw_track_predictions(img: cv2.Mat, track: Track, color_index: int, camera:Camera, convert_points: Optional[Callable], opacity=1):
def transition_path_points(path: np.array, t: float):
"""
Opacity: 0-1
"""
if t >= 1:
return path
if t <= 0:
return np.array([path[0]])
# new_path = np.array([])
lengths = np.sqrt(np.sum(np.diff(path, axis=0)**2, axis=1))
cum_lenghts = np.cumsum(lengths)
# distance = cum_lenghts[-1] * t
ts = np.concatenate((np.array([0.]), cum_lenghts / cum_lenghts[-1]))
new_path = [path[0]]
for a, b, t_a, t_b in zip(path[:-1], path[1:], ts[:-1], ts[1:]):
if t_b < t:
new_path.append(b)
continue
# interpolate
relative_t = inv_lerp(t_a, t_b, t)
x = lerp(a[0], b[0], relative_t)
y = lerp(a[1], b[1], relative_t)
print(relative_t, a , b, x, y)
new_path.append([x,y])
break
return np.array(new_path)
def draw_track_predictions(img: cv2.Mat, track: Track, color_index: int, camera:Camera, convert_points: Optional[Callable], anim_position=.8):
"""
anim_position: 0-1
"""
if not track.predictions:
return
current_point = track.get_projected_history(camera=camera)[-1]
if convert_points:
current_point = convert_points([current_point])[0]
opacity = 1-min(1, max(0, inv_lerp(0.8, 1, anim_position))) # fade out
slide_t = min(1, max(0, inv_lerp(0, 0.8, anim_position))) # slide_position
# if convert_points:
# current_point = convert_points([current_point])[0]
for pred_i, pred in enumerate(track.predictions):
pred_coords = pred #cv2.perspectiveTransform(np.array([pred]), inv_H)[0].tolist()
# line_points = np.concatenate(([current_point], pred_coords)) # 'current point' is amoving target
line_points = pred_coords
# print(pred_coords, current_point, line_points)
line_points = transition_path_points(line_points, slide_t)
if convert_points:
pred_coords = convert_points(pred_coords)
line_points = convert_points(line_points)
# color = (128,0,128) if pred_i else (128,128,0)
color = bgr_colors[color_index % len(bgr_colors)]
color = tuple([int(c*opacity) for c in color])
for ci in range(0, len(pred_coords)):
if ci == 0:
# TODO)) prev point
# continue
start = [int(p) for p in current_point]
# start = [int(p) for p in coords[-1]]
# start = [0,0]?
# print(start)
else:
start = [int(p) for p in pred_coords[ci-1]]
end = [int(p) for p in pred_coords[ci]]
cv2.line(img, start, end, color, 1, lineType=cv2.LINE_AA)
for start, end in zip(line_points[:-1], line_points[1:]):
# for ci in range(0, len(pred_coords)):
# if ci == 0:
# # TODO)) prev point
# # continue
# start = [int(p) for p in current_point]
# # start = [int(p) for p in coords[-1]]
# # start = [0,0]?
# # print(start)
# else:
# start = [int(p) for p in pred_coords[ci-1]]
# end = [int(p) for p in pred_coords[ci]]
# print(np.rint(start),np.rint(end).tolist())
cv2.line(img, np.rint(start).astype(int), np.rint(end).astype(int), color, 1, lineType=cv2.LINE_AA)
# cv2.circle(img, end, 2, color, 1, lineType=cv2.LINE_AA)
def draw_trackjectron_history(img: cv2.Mat, track: Track, color_index: int, convert_points: Optional[Callable]):

View file

@ -9,7 +9,7 @@ from multiprocessing import Event
from pathlib import Path
import pickle
import time
from typing import Optional, List
from typing import Dict, Optional, List
import jsonlines
import numpy as np
import torch
@ -61,7 +61,7 @@ RCNN_SCALE = .4 # seems to have no impact on detections in the corners
def _yolov8_track(frame: Frame, model: YOLO, **kwargs) -> List[Detection]:
results: List[YOLOResult] = list(model.track(frame.img, persist=True, tracker="custom_bytetrack.yaml", verbose=False, **kwargs))
results: List[YOLOResult] = list(model.track(frame.img, persist=True, tracker="custom_bytetrack.yaml", verbose=False, conf=0.00001, **kwargs))
if results[0].boxes is None or results[0].boxes.id is None:
# work around https://github.com/ultralytics/ultralytics/issues/5968
return []
@ -99,6 +99,10 @@ class TrackFilter:
def apply(self, tracks: List[Track], camera: Camera):
return [t for t in tracks if self.filter(t, camera)]
def apply_to_dict(self, tracks: Dict[str, Track], camera: Camera):
tracks = self.apply(tracks.values(), camera)
return {t.track_id: t for t in tracks}
class FinalDisplacementFilter(TrackFilter):
def __init__(self, min_displacement):
self.min_displacement = min_displacement
@ -444,7 +448,7 @@ class Tracker:
def track_frame(self, frame: Frame):
if self.config.detector == DETECTOR_YOLOv8:
detections: List[Detection] = _yolov8_track(frame, self.model, classes=[0], imgsz=[1152, 640])
detections: List[Detection] = _yolov8_track(frame, self.model, classes=[0, 15, 16], imgsz=[1152, 640])
else :
detections: List[Detection] = self._resnet_track(frame, scale = RCNN_SCALE)
@ -458,7 +462,7 @@ class Tracker:
return detections
def track(self, is_running: Event):
def track(self, is_running: Event, timer_counter: int = 0):
"""
Live tracking of frames coming in over zmq
"""
@ -500,7 +504,11 @@ class Tracker:
end_time = None
tracker_dt = None
w_time = None
displacement_filter = FinalDisplacementFilter(.2)
while self.is_running.is_set():
with timer_counter.get_lock():
timer_counter.value += 1
# this waiting for target_dt causes frame loss. E.g. with target_dt at .1, it
# skips exactly 1 frame on a 10 fps video (which, it obviously should not do)
# so for now, timing should move to emitter
@ -566,6 +574,9 @@ class Tracker:
# }
active_track_ids = [d.track_id for d in detections]
active_tracks = {t.track_id: t.get_with_interpolated_history() for t in self.tracks.values() if t.track_id in active_track_ids}
active_tracks = displacement_filter.apply_to_dict(active_tracks, frame.camera)# a filter to remove just detecting static objects
# active_tracks = {t.track_id: t for t in self.tracks.values() if t.track_id in active_track_ids}
# active_tracks = {t.track_id: t for t in self.tracks.values() if t.track_id in active_track_ids}
# logger.info(f"{trajectories}")
@ -579,7 +590,7 @@ class Tracker:
frame = self.smoother.smooth_frame_tracks(frame)
# print(f"send to {self.trajectory_socket}, {self.config.zmq_trajectory_addr}")
self.trajectory_socket.send_pyobj(frame)
self.trajectory_socket.send_pyobj(frame.without_img()) # ditch image for faster passthrough
end_time = time.time()
tracker_dt = end_time - start_time
@ -665,9 +676,9 @@ class Tracker:
return [([d[0], d[1], d[2]-d[0], d[3]-d[1]], d[4], d[5]) for d in detections]
def run_tracker(config: Namespace, is_running: Event):
def run_tracker(config: Namespace, is_running: Event, timer_counter):
router = Tracker(config)
router.track(is_running)
router.track(is_running, timer_counter)

View file

@ -1,4 +1,12 @@
# lerp & inverse lerp from https://gist.github.com/laundmo/b224b1f4c8ef6ca5fe47e132c8deab56
import linecache
import os
import tracemalloc
from typing import Iterable
import cv2
import numpy as np
def lerp(a: float, b: float, t: float) -> float:
"""Linear interpolate on the scale given by a to b, using t as the point on that scale.
Examples
@ -21,3 +29,43 @@ def inv_lerp(a: float, b: float, v: float) -> float:
def get_bins(bin_size: float):
return [[bin_size, 0], [bin_size, bin_size], [0, bin_size], [-bin_size, bin_size], [-bin_size, 0], [-bin_size, -bin_size], [0, -bin_size], [bin_size, -bin_size]]
def convert_world_space_to_img_space(H: cv2.Mat):
"""Transform the given matrix so that it immediately converts
the points to img space"""
new_H = H.copy()
new_H[:2] = H[:2] * 100
return new_H
def convert_world_points_to_img_points(points: Iterable):
"""Transform the given matrix so that it immediately converts
the points to img space"""
if isinstance(points, np.ndarray):
return np.array(points) * 100
return [[p[0]*100, p[1]*100] for p in points]
def display_top(snapshot: tracemalloc.Snapshot, key_type='lineno', limit=5):
snapshot = snapshot.filter_traces((
tracemalloc.Filter(False, "<frozen importlib._bootstrap>"),
tracemalloc.Filter(False, "<unknown>"),
))
top_stats = snapshot.statistics(key_type)
print("Top %s lines" % limit)
for index, stat in enumerate(top_stats[:limit], 1):
frame = stat.traceback[0]
# replace "/path/to/module/file.py" with "module/file.py"
filename = os.sep.join(frame.filename.split(os.sep)[-2:])
print("#%s: %s:%s: %.1f KiB"
% (index, filename, frame.lineno, stat.size / 1024))
line = linecache.getline(frame.filename, frame.lineno).strip()
if line:
print(' %s' % line)
other = top_stats[limit:]
if other:
size = sum(stat.size for stat in other)
print("%s other: %.1f KiB" % (len(other), size / 1024))
total = sum(stat.size for stat in top_stats)
print("Total allocated size: %.1f KiB" % (total / 1024))