Run tracker with smoother enabled

This commit is contained in:
Ruben van de Ven 2024-04-29 14:46:44 +02:00
parent 7c05c060c3
commit c9f573fcdd
6 changed files with 261 additions and 107 deletions

File diff suppressed because one or more lines are too long

View File

@ -152,6 +152,9 @@ inference_parser.add_argument('--predict_training_data',
help='Ignore tracker and predict data from the training dataset',
action='store_true')
inference_parser.add_argument("--smooth-predictions",
help="Smooth the predicted tracks",
action='store_true')
# Internal connections.
@ -214,6 +217,9 @@ tracker_parser.add_argument("--detector",
help="Specify the detector to use",
type=str,
choices=DETECTORS)
tracker_parser.add_argument("--smooth-tracks",
help="Smooth the tracker tracks before sending them to the predictor",
action='store_true')
# Renderer

View File

@ -42,6 +42,7 @@ class Detection:
h: int # height - image space
conf: float # object detector probablity
state: DetectionState
frame_nr: int
def get_foot_coords(self):
return [self.l + 0.5 * self.w, self.t+self.h]

View File

@ -27,7 +27,7 @@ import matplotlib.pyplot as plt
import zmq
from trap.frame_emitter import Frame
from trap.tracker import Track
from trap.tracker import Track, Smoother
logger = logging.getLogger("trap.prediction")
@ -120,6 +120,9 @@ class PredictionServer:
if self.config.eval_device == 'cpu':
logger.warning("Running on CPU. Specifying --eval_device cuda:0 should dramatically speed up prediction")
if self.config.smooth_predictions:
self.smoother = Smoother()
context = zmq.Context()
self.trajectory_socket: zmq.Socket = context.socket(zmq.SUB)
@ -387,6 +390,10 @@ class PredictionServer:
logger.info(f"Frame prediction: {len(trajectron.nodes)} nodes & {trajectron.scene_graph.get_num_edges()} edges. Trajectron: {end - start}s")
else:
logger.info(f"Total frame delay = {time.time()-frame.time}s ({len(trajectron.nodes)} nodes & {trajectron.scene_graph.get_num_edges()} edges. Trajectron: {end - start}s)")
if self.config.smooth_predictions:
frame = self.smoother.smooth_frame_predictions(frame)
self.prediction_socket.send_pyobj(frame)
logger.info('Stopping')

View File

@ -107,9 +107,9 @@ class Renderer:
logger.debug(f"write frame {frame.time - first_time:.3f}s")
if self.out_writer:
self.out_writer.write(img)
self.out_writer.write(frame.img)
if self.streaming_process:
self.streaming_process.stdin.write(img.tobytes())
self.streaming_process.stdin.write(frame.img.tobytes())
logger.info('Stopping')
if i>2:

View File

@ -24,6 +24,9 @@ from ultralytics.engine.results import Results as YOLOResult
from trap.frame_emitter import DetectionState, Frame, Detection, Track
from tsmoothie.smoother import KalmanSmoother, ConvolutionSmoother
# Detection = [int, int, int, int, float, int]
# Detections = [Detection]
@ -103,6 +106,13 @@ class Tracker:
self.H = np.loadtxt(self.config.homography, delimiter=',')
if self.config.smooth_tracks:
logger.info("Smoother enabled")
self.smoother = Smoother()
else:
logger.info("Smoother Disabled (enable with --smooth-tracks)")
logger.debug("Set up tracker")
@ -160,7 +170,7 @@ class Tracker:
if self.config.detector == DETECTOR_YOLOv8:
detections: [Detection] = self._yolov8_track(frame.img)
detections: [Detection] = self._yolov8_track(frame)
else :
detections: [Detection] = self._resnet_track(frame.img, scale = 1)
@ -199,6 +209,9 @@ class Tracker:
# self.trajectory_socket.send_string(json.dumps(trajectories))
# else:
# self.trajectory_socket.send(pickle.dumps(frame))
if self.config.smooth_tracks:
frame = self.smoother.smooth_frame_tracks(frame)
self.trajectory_socket.send_pyobj(frame)
current_time = time.time()
@ -249,12 +262,12 @@ class Tracker:
logger.info('Stopping')
def _yolov8_track(self, img) -> [Detection]:
results: [YOLOResult] = self.model.track(img, persist=True)
def _yolov8_track(self, frame: Frame,) -> [Detection]:
results: [YOLOResult] = self.model.track(frame.img, persist=True, tracker="bytetrack.yaml", verbose=False)
if results[0].boxes is None or results[0].boxes.id is None:
# work around https://github.com/ultralytics/ultralytics/issues/5968
return []
return [Detection(track_id, *bbox) for bbox, track_id in zip(results[0].boxes.xywh.cpu(), results[0].boxes.id.int().cpu().tolist())]
return [Detection(track_id, bbox[0]-.5*bbox[2], bbox[1]-.5*bbox[3], bbox[2], bbox[3], 1, DetectionState.Confirmed, frame.index) for bbox, track_id in zip(results[0].boxes.xywh.cpu(), results[0].boxes.id.int().cpu().tolist())]
def _resnet_track(self, img, scale: float = 1) -> [Detection]:
if scale != 1:
@ -303,4 +316,55 @@ class Tracker:
def run_tracker(config: Namespace, is_running: Event):
router = Tracker(config, is_running)
router.track()
router.track()
class Smoother:
def __init__(self):
self.smoother = ConvolutionSmoother(window_len=20, window_type='ones', copy=None)
def smooth_frame_tracks(self, frame: Frame) -> Frame:
new_tracks = []
for track in frame.tracks.values():
ls = [d.l for d in track.history]
ts = [d.t for d in track.history]
ws = [d.w for d in track.history]
hs = [d.h for d in track.history]
self.smoother.smooth(ls)
ls = self.smoother.smooth_data[0]
self.smoother.smooth(ts)
ts = self.smoother.smooth_data[0]
self.smoother.smooth(ws)
ws = self.smoother.smooth_data[0]
self.smoother.smooth(hs)
hs = self.smoother.smooth_data[0]
new_history = [Detection(d.track_id, l, t, w, h, d.conf, d.state, d.frame_nr) for l, t, w, h, d in zip(ls,ts,ws,hs, track.history)]
new_track = Track(track.track_id, new_history, track.predictor_history, track.predictions)
new_tracks.append(new_track)
frame.tracks = {t.track_id: t for t in new_tracks}
return frame
def smooth_frame_predictions(self, frame) -> Frame:
for track in frame.tracks.values():
new_predictions = []
if not track.predictions:
continue
for prediction in track.predictions:
xs = [d[0] for d in prediction]
ys = [d[1] for d in prediction]
self.smoother.smooth(xs)
xs = self.smoother.smooth_data[0]
self.smoother.smooth(ys)
ys = self.smoother.smooth_data[0]
smooth_prediction = [[x,y] for x, y in zip(xs, ys)]
new_predictions.append(smooth_prediction)
track.predictions = new_predictions
return frame