Tweaks for running live with lidar

This commit is contained in:
Ruben van de Ven 2025-11-07 12:07:05 +01:00
parent 858bb91244
commit 3a73806a52
15 changed files with 840 additions and 199 deletions

View file

@ -2,12 +2,12 @@
"batch_size": 512, "batch_size": 512,
"grad_clip": 1.0, "grad_clip": 1.0,
"learning_rate_style": "exp", "learning_rate_style": "exp",
"learning_rate": 0.01, "learning_rate": 0.0001,
"min_learning_rate": 1e-05, "min_learning_rate": 1e-05,
"learning_decay_rate": 0.9999, "learning_decay_rate": 0.9999,
"prediction_horizon": 30, "prediction_horizon": 60,
"minimum_history_length": 5, "minimum_history_length": 5,
"maximum_history_length": 50, "maximum_history_length": 150,
"map_encoder": { "map_encoder": {
"PEDESTRIAN": { "PEDESTRIAN": {
"heading_state_index": [2, 3], "heading_state_index": [2, 3],

View file

@ -21,9 +21,14 @@ These are roughly the steps to go from datagathering to training
2. Lidar: `uv run trap_lidar --min-box-area 0 --pi LOCAL_IP --smooth-tracks` 2. Lidar: `uv run trap_lidar --min-box-area 0 --pi LOCAL_IP --smooth-tracks`
4. Save the tracks emitted by the video or lidar tracker: `uv run trap_track_writer --output-dir EXPERIMENTS/raw/hof-lidar` 4. Save the tracks emitted by the video or lidar tracker: `uv run trap_track_writer --output-dir EXPERIMENTS/raw/hof-lidar`
* Each recording adds a new txt file to the `raw` folder. * Each recording adds a new txt file to the `raw` folder.
4. Parse tracker data to Trajectron format: `uv run process_data --src-dir EXPERIMENTS/raw/NAME --dst-dir EXPERIMENTS/trajectron-data/ --name NAME` Optionally, smooth tracks: `--smooth-tracks` 4. Parse tracker data to Trajectron format: `uv run process_data --src-dir EXPERIMENTS/raw/NAME --dst-dir EXPERIMENTS/trajectron-data/ --name NAME`
* Optionally, smooth tracks: `--smooth-tracks`
* Optionally, and variations with noise: `--noise-tracks 2` (creates 2 variations)
* Optionally, and variations with at a random offset: `--offset-tracks 2` (creates 2 variations)
* Optionally, add a map: ideally a RGB png: 3 layers of 0-255 * Optionally, add a map: ideally a RGB png: 3 layers of 0-255
* `uv run process_data --src-dir EXPERIMENTS/raw/NAME --dst-dir EXPERIMENTS/trajectron-data/ --name NAME --smooth-tracks --camera-fps 12 --homography ../DATASETS/NAME/homography.json --calibration ../DATASETS/NAME/calibration.json --filter-displacement 2 --map-img-path ../DATASETS/NAME/map.png` * `uv run process_data --src-dir EXPERIMENTS/raw/NAME --dst-dir EXPERIMENTS/trajectron-data/ --name NAME --smooth-tracks --camera-fps 12 --homography ../DATASETS/NAME/homography.json --calibration ../DATASETS/NAME/calibration.json --filter-displacement 2 --map-img-path ../DATASETS/NAME/map.png`
* See [[tests/trajectron_maps.ipynb]] for more info how to do so (e.g. the homography map/scale settings, which are also set in process_data)
5. Train Trajectron model `uv run trajectron_train --eval_every 10 --vis_every 1 --train_data_dict NAME_train.pkl --eval_data_dict NAME_val.pkl --offline_scene_graph no --preprocess_workers 8 --log_dir EXPERIMENTS/models --log_tag _NAME --train_epochs 100 --conf EXPERIMENTS/config.json --batch_size 256 --data_dir EXPERIMENTS/trajectron-data ` 5. Train Trajectron model `uv run trajectron_train --eval_every 10 --vis_every 1 --train_data_dict NAME_train.pkl --eval_data_dict NAME_val.pkl --offline_scene_graph no --preprocess_workers 8 --log_dir EXPERIMENTS/models --log_tag _NAME --train_epochs 100 --conf EXPERIMENTS/config.json --batch_size 256 --data_dir EXPERIMENTS/trajectron-data `
6. The run! 6. The run!
* `uv run supervisord` * `uv run supervisord`

View file

@ -28,10 +28,24 @@ command=uv run trap_video_source --homography ../DATASETS/hof3-cam-baumer-cropp
directory=%(here)s directory=%(here)s
[program:tracker] [program:tracker]
# command=uv run trap_tracker --smooth-tracks command=uv run trap_tracker --smooth-tracks
command=uv run trap_lidar --min-box-area 0 --viz --smooth-tracks # command=uv run trap_lidar --min-box-area 0 --viz --smooth-tracks
# environment=DISPLAY=":0"
directory=%(here)s
autostart=false
[program:lidar]
command=uv run trap_lidar --min-box-area 0.1 --viz --smooth-tracks
environment=DISPLAY=":0" environment=DISPLAY=":0"
directory=%(here)s directory=%(here)s
autostart=false
[program:track_writer]
command=uv run trap_track_writer --output-dir EXPERIMENTS/raw/hof-lidar
# environment=DISPLAY=":0"
directory=%(here)s
autostart=false
stopwaitsecs=60
[program:stage] [program:stage]
# command=uv run trap_stage # command=uv run trap_stage
@ -46,7 +60,7 @@ directory=%(here)s
[program:predictor] [program:predictor]
command=uv run trap_prediction --eval_device cuda:0 --model_dir EXPERIMENTS/models/models_20241229_21_35_13_hof3-m2-ud-split-conv12-f2.0-map-2024-12-29/ --num-samples 1 --map_encoding --eval_data_dict EXPERIMENTS/trajectron-data/hof3-m2-ud-split-nostep-conv12-f2.0-map-2024-12-29_val.pkl --prediction-horizon 120 --gmm-mode True --z-mode command=uv run trap_prediction --eval_device cuda:0 --model_dir EXPERIMENTS/models/models_20241229_21_35_13_hof3-m2-ud-split-conv12-f2.0-map-2024-12-29/ --num-samples 1 --map_encoding --eval_data_dict EXPERIMENTS/trajectron-data/hof3-m2-ud-split-nostep-conv12-f2.0-map-2024-12-29_val.pkl --prediction-horizon 120 --gmm-mode True --z-mode
# command=uv run trap_prediction --eval_device cuda:0 --model_dir EXPERIMENTS/models/models_20251106_11_51_00_hof-lidar-m2-ud-nostep-kalsmooth-noise2-offsets2-f2.0-map-2025-11-06/ --num-samples 1 --map_encoding --eval_data_dict EXPERIMENTS/trajectron-data/hof-lidar-m2-ud-nostep-kalsmooth-noise2-offsets2-f2.0-map-2025-11-06_val.pkl --prediction-horizon 120 --gmm-mode True --z-mode
# uv run trajectron_train --continue_training_from EXPERIMENTS/models/models_20241229_21_35_13_hof3-m2-ud-split-conv12-f2.0-map-2024-12-29/ --eval_every 5 --train_data_dict hof3-nostep-conv12-f2.0-map-2024-12-27_train.pkl --eval_data_dict hof3-nostep-conv12-f2.0-map-2024-12-27_val.pkl --offline_scene_graph no --preprocess_workers 8 --log_dir EXPERIMENTS/models --log_tag _hof3-conv12-f2.0-map-2024-12-27 --train_epochs 10 --conf EXPERIMENTS/config.json --data_dir EXPERIMENTS/trajectron-data --map_encoding # uv run trajectron_train --continue_training_from EXPERIMENTS/models/models_20241229_21_35_13_hof3-m2-ud-split-conv12-f2.0-map-2024-12-29/ --eval_every 5 --train_data_dict hof3-nostep-conv12-f2.0-map-2024-12-27_train.pkl --eval_data_dict hof3-nostep-conv12-f2.0-map-2024-12-27_val.pkl --offline_scene_graph no --preprocess_workers 8 --log_dir EXPERIMENTS/models --log_tag _hof3-conv12-f2.0-map-2024-12-27 --train_epochs 10 --conf EXPERIMENTS/config.json --data_dir EXPERIMENTS/trajectron-data --map_encoding
directory=%(here)s directory=%(here)s
@ -58,6 +72,22 @@ autostart=false
; can be long to quit if rendering to video file ; can be long to quit if rendering to video file
stopwaitsecs=60 stopwaitsecs=60
[program:render_cv]
command=uv run trap_render_cv
directory=%(here)s
environment=DISPLAY=":0"
autostart=false
; can be long to quit if rendering to video file
stopwaitsecs=60
[program:laserspace]
command=cargo run --release tcp://127.0.0.1:99174
directory=%(here)s/../laserspace
environment=DISPLAY=":0"
autostart=false
; can be long to quit if rendering to video file
stopwaitsecs=60
# during development auto restart some services when the code changes # during development auto restart some services when the code changes
[program:superfsmon] [program:superfsmon]

248
tests/stage_lines.ipynb Normal file

File diff suppressed because one or more lines are too long

View file

@ -36,9 +36,9 @@
"import matplotlib.pyplot as plt\n", "import matplotlib.pyplot as plt\n",
"import numpy as np\n", "import numpy as np\n",
"\n", "\n",
"path = Path(\"EXPERIMENTS/raw/hof3/\")\n", "path = Path(\"../EXPERIMENTS/raw/hof3/\")\n",
"calibration_path = Path(\"../DATASETS/hof3/calibration.json\")\n", "calibration_path = Path(\"../../DATASETS/hof3/calibration.json\")\n",
"homography_path = Path(\"../DATASETS/hof3/homography.json\")\n", "homography_path = Path(\"../../DATASETS/hof3/homography.json\")\n",
"\n", "\n",
"camera = Camera.from_paths(calibration_path, homography_path, 12)\n" "camera = Camera.from_paths(calibration_path, homography_path, 12)\n"
] ]
@ -52,13 +52,13 @@
"name": "stdout", "name": "stdout",
"output_type": "stream", "output_type": "stream",
"text": [ "text": [
"../DATASETS/hof-lidar/map-undistorted-H-3.png True\n" "../../DATASETS/hof-lidar/map-undistorted-H-3.png True\n"
] ]
} }
], ],
"source": [ "source": [
"image_path = Path(\"../DATASETS/hof3/map-undistorted-H-2.png\")\n", "image_path = Path(\"../../DATASETS/hof3/map-undistorted-H-2.png\")\n",
"image_path = Path(\"../DATASETS/hof-lidar/map-undistorted-H-3.png\")\n", "image_path = Path(\"../../DATASETS/hof-lidar/map-undistorted-H-3.png\")\n",
"print(image_path, image_path.exists())\n" "print(image_path, image_path.exists())\n"
] ]
}, },
@ -79,7 +79,7 @@
{ {
"data": { "data": {
"text/plain": [ "text/plain": [
"<matplotlib.image.AxesImage at 0x7ff399835e10>" "<matplotlib.image.AxesImage at 0x7f4d6e049e10>"
] ]
}, },
"execution_count": 3, "execution_count": 3,
@ -142,7 +142,7 @@
{ {
"data": { "data": {
"text/plain": [ "text/plain": [
"<matplotlib.image.AxesImage at 0x7ff399703d60>" "<matplotlib.image.AxesImage at 0x7f4d6df23ac0>"
] ]
}, },
"execution_count": 5, "execution_count": 5,

View file

@ -274,7 +274,7 @@ class CvRenderer(Node):
render_parser.add_argument('--debug-map', render_parser.add_argument('--debug-map',
help='specify a map (svg-file) from which to load lines which will be overlayed', help='specify a map (svg-file) from which to load lines which will be overlayed',
type=str, type=str,
default="../DATASETS/hof3/map_hof.svg") default="../DATASETS/hof-lidar/map_hof.svg")
return render_parser return render_parser
def click_print_position(self, event,x,y,flags,param): def click_print_position(self, event,x,y,flags,param):
@ -410,8 +410,9 @@ def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame,
inv_H = np.linalg.pinv(prediction_frame.H) inv_H = np.linalg.pinv(prediction_frame.H)
# For debugging: # For debugging:
# draw_trackjectron_history(img, track, int(track.track_id), conversion) # draw_trackjectron_history(img, track, int(track.track_id), conversion)
anim_position = get_animation_position(track, frame) # anim_position = get_animation_position(track, frame)
draw_track_predictions(img, track, int(track.track_id)+1, frame.camera, conversion, anim_position=anim_position, as_clusters=as_clusters) anim_position = 1
draw_track_predictions(img, track, int(track.track_id)+1, prediction_frame.camera, conversion, anim_position=anim_position, as_clusters=as_clusters)
cv2.putText(img, f"{len(track.predictor_history) if track.predictor_history else 'none'}", to_point(track.history[0].get_foot_coords()), cv2.FONT_HERSHEY_COMPLEX, 1, (255,255,255), 1) cv2.putText(img, f"{len(track.predictor_history) if track.predictor_history else 'none'}", to_point(track.history[0].get_foot_coords()), cv2.FONT_HERSHEY_COMPLEX, 1, (255,255,255), 1)
if prediction_frame.maps: if prediction_frame.maps:
for i, m in enumerate(prediction_frame.maps): for i, m in enumerate(prediction_frame.maps):

View file

@ -4,18 +4,21 @@ from copy import copy
from pathlib import Path from pathlib import Path
import select import select
import socket import socket
from typing import DefaultDict, Dict, List from turtle import update
from typing import DefaultDict, Dict, List, Tuple
import dpkt import dpkt
import numpy as np import numpy as np
import open3d as o3d import open3d as o3d
import shapely import shapely
from sklearn.cluster import DBSCAN from sklearn.cluster import DBSCAN, OPTICS, Birch
from scipy.spatial.distance import cdist from scipy.spatial.distance import cdist
import logging import logging
import time import time
from sklearn.decomposition import PCA
from trap.base import Camera, Detection, Frame, Track, UndistortedCamera from trap.base import Camera, Detection, Frame, Track, UndistortedCamera
from trap.counter import CounterSender from trap.counter import CounterSender
from trap.lines import load_lines_from_svg from trap.lines import Coordinate, load_lines_from_svg
from trap.node import Node from trap.node import Node
from bytetracker import BYTETracker from bytetracker import BYTETracker
import velodyne_decoder as vd import velodyne_decoder as vd
@ -196,7 +199,7 @@ def filter_pcd_points_below_z(pcd: o3d.geometry.PointCloud, z_threshold: float)
return pcd_sel return pcd_sel
ASSUMED_FPS = 12
class Lidar(Node): class Lidar(Node):
def setup(self): def setup(self):
@ -220,18 +223,27 @@ class Lidar(Node):
self.tracks: DefaultDict[str, Track] = defaultdict(lambda: Track()) self.tracks: DefaultDict[str, Track] = defaultdict(lambda: Track())
self.room_filter = VoxelOccupancyFilter(
self.room_filter = AdaptiveVoxelFilter(
voxel_size=.5, voxel_size=.5,
history_size=80, nr_of_frames_to_static=120 * ASSUMED_FPS , # (confidence_threshold * update_interval) = nr of frames needed to count as static
history_interval=8, #not every frame needs to impact history nr_of_frames_to_unstatic=10 * ASSUMED_FPS,
ready_for_action_at=25, update_interval=20, #not every frame needs to impact history
static_threshold=0.3 decay_rate=1
) )
# self.room_filter = VoxelOccupancyFilter(
# voxel_size=.5,
# history_size=80,
# history_interval=20, #not every frame needs to impact history
# ready_for_action_at=25,
# static_threshold=0.3
# )
self.debug_lines = load_lines_from_svg(self.config.debug_map, 100, '') if self.config.debug_map else [] self.debug_lines = load_lines_from_svg(self.config.debug_map, 100, '') if self.config.debug_map else []
self.map_outline = None self.map_outline = None
self.map_outline_volume = None self.map_outline_volume = None
self.map_outline_geom = None
if self.config.map_outlines: if self.config.map_outlines:
lines = load_lines_from_svg(self.config.map_outlines, 100, '') lines = load_lines_from_svg(self.config.map_outlines, 100, '')
@ -240,22 +252,34 @@ class Lidar(Node):
else: else:
if len(lines) > 1: if len(lines) > 1:
logger.warning("Multiple lines in outline file, using first only") logger.warning("Multiple lines in outline file, using first only")
self.map_outline = shapely.Polygon([p.position for p in lines[0].points])
polygon_points =np.array([[*p.position, 0] for p in lines[0].points])
# self.map_outline = shapely.Polygon([p.position for p in lines[0].points])
boundary_lines = [[i, (i+1) % len(lines[0].points)] for i in range(len(lines[0].points))]
line_set = o3d.geometry.LineSet(
points=o3d.utility.Vector3dVector(polygon_points),
lines=o3d.utility.Vector2iVector(boundary_lines)
)
# Optional: set color
line_set.colors = o3d.utility.Vector3dVector([[1, 0, 0] for _ in lines[0].points])
self.map_outline_geom = line_set
self.map_outline_volume = o3d.visualization.SelectionPolygonVolume() self.map_outline_volume = o3d.visualization.SelectionPolygonVolume()
self.map_outline_volume.orthogonal_axis = "Z" # extrusion direction of polygon self.map_outline_volume.orthogonal_axis = "Z" # extrusion direction of polygon
self.map_outline_volume.axis_min = .3 # filter from slightly above ground self.map_outline_volume.axis_min = .3 # filter from slightly above ground
self.map_outline_volume.axis_max = 2.2 self.map_outline_volume.axis_max = 2.2
polygon_points =np.array([[*p.position, 0] for p in lines[0].points])
self.map_outline_volume.bounding_polygon = o3d.utility.Vector3dVector(polygon_points) self.map_outline_volume.bounding_polygon = o3d.utility.Vector3dVector(polygon_points)
if self.config.smooth_tracks: if self.config.smooth_tracks:
fps = 12 # TODO)) make configurable, or get from cam # TODO)) make configurable
logger.info(f"Smoother enabled, assuming {fps} fps") logger.info(f"Smoother enabled, assuming {ASSUMED_FPS} fps")
self.smoother = Smoother(window_len=fps*5, convolution=False) self.smoother = Smoother(window_len=ASSUMED_FPS*5, convolution=False)
else: else:
logger.info("Smoother Disabled (enable with --smooth-tracks)") logger.info("Smoother Disabled (enable with --smooth-tracks)")
@ -334,6 +358,8 @@ class Lidar(Node):
) )
ground_lines.paint_uniform_color([0,1,0]) ground_lines.paint_uniform_color([0,1,0])
vis.add_geometry(ground_lines, False) vis.add_geometry(ground_lines, False)
if self.map_outline_geom:
vis.add_geometry(self.map_outline_geom, False)
for line in self.debug_lines: for line in self.debug_lines:
coords = [[*p.position, 0] for p in line.points] coords = [[*p.position, 0] for p in line.points]
@ -455,14 +481,12 @@ class Lidar(Node):
points_2d = project_to_xy(filtered_pcd) points_2d = project_to_xy(filtered_pcd)
labels = cluster_2d( clusters = self.cluster_2d(
points_2d, points_2d,
self.get_setting('lidar.eps', .3),
self.get_setting('lidar.min_samples', 8)
) )
# boxes, centroids = get_cluster_boxes(points_2d, labels, min_area= 0.3*0.3) # boxes, centroids = get_cluster_boxes(points_2d, labels, min_area= 0.3*0.3)
boxes, centroids = get_cluster_boxes(points_2d, labels, min_area= self.get_setting('lidar.min_box_area', .1)) boxes, centroids = get_cluster_boxes(clusters, min_area= self.get_setting('lidar.min_box_area', .1))
# append confidence and class (placeholders) # append confidence and class (placeholders)
@ -633,13 +657,62 @@ class Lidar(Node):
argparser.add_argument('--debug-map', argparser.add_argument('--debug-map',
help='specify a map (svg-file) from which to load lines which will be overlayed', help='specify a map (svg-file) from which to load lines which will be overlayed',
type=str, type=str,
default="../DATASETS/hof3/map_hof.svg") default="../DATASETS/hof-lidar/map_hof.svg")
argparser.add_argument('--map-outlines', argparser.add_argument('--map-outlines',
help='specify a map (svg-file) from which to load boundary of the map. Any points outside will be filtered (removing floor and walls)', help='specify a map (svg-file) from which to load boundary of the map. Any points outside will be filtered (removing floor and walls)',
type=Path, type=Path,
default="../DATASETS/hof3/map_hof_lidar_boundaries.svg") default="../DATASETS/hof-lidar/map_hof_lidar_boundaries.svg")
return argparser return argparser
# ==== Clustering and Centroids ====
def cluster_2d(self, points_xy) -> List[np.ndarray]:
"""
Returns a list of clusters
"""
# skip when only having a few points
# this is anyhow good, regardless of clustering method
if len(points_xy) < self.get_setting('lidar.min_samples', 8):
return []
method = self.get_setting('lidar.cluster.method', default='dbscan')
if method == 'dbscan':
labels = DBSCAN(
eps=self.get_setting('lidar.eps', .132),
min_samples=self.get_setting('lidar.min_samples', 6)
).fit_predict(points_xy)
elif method == 'optics':
labels = OPTICS(
max_eps=self.get_setting('lidar.eps', .132),
min_samples=self.get_setting('lidar.min_samples', 6), algorithm="ball_tree"
).fit_predict(points_xy)
elif method == 'birch':
process_subclusters = self.get_setting('lidar.birch_process_subclusters', True)
labels = Birch(
max_eps=self.get_setting('lidar.birch_threshold', 1), # "The radius of the subcluster obtained by merging a new sample"
branching_factor=self.get_setting('lidar.birch_branching_factor', 50),
n_clusters=None if not process_subclusters else DBSCAN(
eps=self.get_setting('lidar.eps', .132),
min_samples=self.get_setting('lidar.min_samples', 6)
)
).fit_predict(points_xy)
else:
self.logger.critical("Unknown clustering method")
return []
# db.labels_
clusters = []
for label in labels:
if label == -1:
continue # Skip noise
clusters.append(points_xy[labels == label])
return clusters
# ==== Ground Removal ==== # ==== Ground Removal ====
def find_ground(pcd: o3d.geometry.PointCloud, distance_threshold=0.1): def find_ground(pcd: o3d.geometry.PointCloud, distance_threshold=0.1):
@ -651,6 +724,39 @@ def find_ground(pcd: o3d.geometry.PointCloud, distance_threshold=0.1):
non_ground = pcd.select_by_index(inliers, invert=True) non_ground = pcd.select_by_index(inliers, invert=True)
return np.asarray(non_ground.points), np.asarray(ground) return np.asarray(non_ground.points), np.asarray(ground)
# def fit_ellipse_pca(points):
# """
# Fit an ellipse approximation to 2D points using PCA.
# Returns: (center_x, center_y), (major_axis_length, minor_axis_length), angle (in radians)
# """
# # Center the data
# center = np.mean(points, axis=0)
# centered_points = points - center
# # Perform PCA
# pca = PCA(n_components=2)
# pca.fit(centered_points)
# # Get the principal axes (eigenvectors) and their lengths (eigenvalues)
# axes = pca.components_
# lengths = np.sqrt(pca.explained_variance_)
# # The angle of the major axis
# angle = np.arctan2(axes[0, 1], axes[0, 0])
# dx = typical_person_depth * np.cos(angle)
# dy = typical_person_depth * np.sin(angle)
# adjusted_center = center + np.array([dx, dy])
# return center, lengths, angle
# def weighted_mean(points, weights):
# return np.average(points, axis=0, weights=weights)
# # Example: Assign higher weights to points farther from the LiDAR
# weights = 1 / np.linalg.norm(points - lidar_position, axis=1) # Inverse distance weighting
# weighted_center = weighted_mean(points, weights)
# print("Weighted center:", weighted_center)
def segment_planes(pcd, num_planes = 5, num_iterations=100, distance_threshold=0.1) -> List[o3d.geometry.PointCloud]: def segment_planes(pcd, num_planes = 5, num_iterations=100, distance_threshold=0.1) -> List[o3d.geometry.PointCloud]:
@ -689,15 +795,6 @@ def segment_planes(pcd, num_planes = 5, num_iterations=100, distance_threshold=0
return planes return planes
# ==== Clustering and Centroids ====
def cluster_2d(points_xy, eps=0.5, min_samples=5):
if len(points_xy) < min_samples:
return []
db = DBSCAN(eps=eps, min_samples=min_samples).fit(points_xy)
return db.labels_
def split_cluster_by_convex_hull(points, max_hull_area): def split_cluster_by_convex_hull(points, max_hull_area):
""" """
Splits a cluster of points based on its convex hull area. Splits a cluster of points based on its convex hull area.
@ -743,18 +840,13 @@ def split_cluster_by_convex_hull(points, max_hull_area):
def get_cluster_boxes(points_xy, labels, min_area = 0): def get_cluster_boxes(clusters, min_area = 0):
if not len(labels): if not len(clusters):
return np.empty([0,4]), np.empty([0,2]) return np.empty([0,4]), np.empty([0,2])
boxes = [] # Each box: [x1, y1, x2, y2] boxes = [] # Each box: [x1, y1, x2, y2]
centroids = [] centroids = []
unique_labels = set(labels) for cluster in clusters:
for label in unique_labels:
if label == -1:
continue # Skip noise
cluster = points_xy[labels == label]
x_min, y_min = cluster.min(axis=0) x_min, y_min = cluster.min(axis=0)
x_max, y_max = cluster.max(axis=0) x_max, y_max = cluster.max(axis=0)
@ -928,57 +1020,94 @@ class VoxelOccupancyFilter:
return filtered_pcd, dropped_indexes return filtered_pcd, dropped_indexes
class DecayingVoxelOccupancyFilter: class AdaptiveVoxelFilter:
def __init__(self, voxel_size=0.1, decay_rate=0.01, appear_increment=0.01, static_threshold=0.2): def __init__(
self,
voxel_size=0.1,
nr_of_frames_to_static=720,
nr_of_frames_to_unstatic=720,
decay_rate=1,
update_interval=10,
):
""" """
WIP!! Initializes the voxel occupancy filter with decay. Initialize the adaptive voxel filter.
Parameters: Parameters:
voxel_size (float): Size of each voxel in meters. voxel_size (float): Size of each voxel in meters.
decay_rate (float): Amount to decay occupancy per frame. confidence_threshold (int): (update_interval * confidence_threshold = nr of frames filled to count as static)
appear_increment (float): Value added when a voxel appears in current frame. max_occupancy_score (int): max value of occupied voxels (((max_occupancy_score-confidence_treshhold) / decay_rate) * update_interval = conseccutive empty measurements needed to become unstatic)
static_threshold (float): Threshold to consider voxel as static. decay_rate (int): How much to decrement confidence for unoccupied voxels per scan.
update_interval (int): Update static map every N scans.
""" """
self.confidence_threshold = (nr_of_frames_to_static / update_interval) * decay_rate
self.max_occupancy_score = (nr_of_frames_to_unstatic / update_interval) * decay_rate + self.confidence_threshold
self.voxel_size = voxel_size self.voxel_size = voxel_size
# self.confidence_threshold = confidence_threshold
# self.max_occupancy_score = max_occupancy_score
self.decay_rate = decay_rate self.decay_rate = decay_rate
self.appear_increment = appear_increment
self.static_threshold = static_threshold self.update_interval = update_interval
self.voxel_occupancy = {} # dict: voxel tuple -> occupancy score
# Static map: {voxel: confidence}
self.static_map: Dict[Tuple[int, int], int] = defaultdict(int)
# Counter for update interval
self.scan_counter = 0
self.initialised = False
def _point_cloud_to_voxel_set(self, pcd):
points = np.asarray(pcd.points)
voxel_coords = np.floor(points / self.voxel_size).astype(np.int32)
return set(map(tuple, voxel_coords))
def remove_static(self, current_pcd): def remove_static(self, current_pcd):
current_voxels = self._point_cloud_to_voxel_set(current_pcd) """
Remove static points from the current point cloud.
# Decay all existing voxel scores Parameters:
for voxel in list(self.voxel_occupancy.keys()): current_pcd (open3d.geometry.PointCloud): Current point cloud.
self.voxel_occupancy[voxel] *= (1.0 - self.decay_rate)
if self.voxel_occupancy[voxel] < self.appear_increment:
del self.voxel_occupancy[voxel] # clean up low-score entries
# Update occupancy with current frame Returns:
for voxel in current_voxels: filtered_pcd (open3d.geometry.PointCloud): Point cloud with static points removed.
self.voxel_occupancy[voxel] = self.voxel_occupancy.get(voxel, 0.0) + self.appear_increment dropped_indices (list): Indices of dropped (static) points.
self.voxel_occupancy[voxel] = min(self.voxel_occupancy[voxel], 1.0) # clamp to max=1.0 """
# Identify static voxels points = np.asarray(current_pcd.points)
static_voxels = {v for v, score in self.voxel_occupancy.items() if score >= self.static_threshold}
# Filter points
filtered_points = [] filtered_points = []
for pt in np.asarray(current_pcd.points): dropped_indices = []
current_voxel_set = set()
# Check each point in the current scan
for i, pt in enumerate(points):
voxel = tuple(np.floor(pt / self.voxel_size).astype(np.int32)) voxel = tuple(np.floor(pt / self.voxel_size).astype(np.int32))
if voxel not in static_voxels: current_voxel_set.add(voxel)
if voxel in self.static_map and self.static_map[voxel] > self.confidence_threshold:
dropped_indices.append(i)
else:
filtered_points.append(pt) filtered_points.append(pt)
# Update static map and confidence
self.scan_counter += 1
if self.scan_counter % self.update_interval == 0:
if not self.initialised and self.scan_counter > self.confidence_threshold:
self.initialised = True
# Add new voxels to static map
for voxel in current_voxel_set:
if self.static_map[voxel] < self.max_occupancy_score:
self.static_map[voxel] += 1
# Decay confidence for voxels not in current scan
for voxel in list(self.static_map.keys()):
if voxel not in current_voxel_set:
self.static_map[voxel] -= self.decay_rate
if self.static_map[voxel] <= 0:
del self.static_map[voxel]
# Create filtered point cloud
filtered_pcd = o3d.geometry.PointCloud() filtered_pcd = o3d.geometry.PointCloud()
if filtered_points: if filtered_points:
filtered_pcd.points = o3d.utility.Vector3dVector(np.array(filtered_points)) filtered_pcd.points = o3d.utility.Vector3dVector(np.array(filtered_points))
return filtered_pcd return filtered_pcd, dropped_indices
# def point_cloud_to_voxel_set(pcd, voxel_size): # def point_cloud_to_voxel_set(pcd, voxel_size):
# points = np.asarray(pcd.points) # points = np.asarray(pcd.points)

View file

@ -589,6 +589,20 @@ class FadeOutLine(LineAnimator):
return target_line return target_line
class SimplifyLine(LineAnimator):
"""
Simplify the line
"""
def __init__(self, target_line = None, factor: float =.003):
super().__init__(target_line)
self.factor = factor
self.ready = True # filter holds no state, so always ready
def apply(self, target_line: RenderableLine, dt: DeltaT) -> RenderableLine:
return target_line.as_simplified(SimplifyMethod.RDP, self.factor)
class FadeOutJitterLine(LineAnimator): class FadeOutJitterLine(LineAnimator):
""" """
Fade the line providing an alpha, 1 by default Fade the line providing an alpha, 1 by default
@ -978,7 +992,46 @@ class DashedLine(LineAnimator):
return RenderableLine.from_multilinestring(multilinestring, color) return RenderableLine.from_multilinestring(multilinestring, color)
class StartFromClosestPoint(LineAnimator):
"""
Dashed line
"""
def __init__(self, target_line = None, from_point: Optional[Tuple[float,float]] = None):
super().__init__(target_line)
self.from_point = from_point
self.ready = True # static filter, always ready
def disable(self):
self.from_point = None
def set_point(self, point: Tuple[float,float]):
self.from_point = point
def apply(self, target_line: RenderableLine, dt: DeltaT) -> RenderableLine:
"""
warning, dashing (for now) removes all color
"""
if len(target_line) < 1 or self.from_point is None:
return target_line
distance = math.inf
idx = 0
from_point = np.array(self.from_point)
for i, point in enumerate(target_line.points):
to_point = np.array(point.position)
new_distance = np.linalg.norm(from_point-to_point)
if new_distance < distance:
distance = new_distance
idx = i
else:
break
# print(idx, target_line.points[idx])
return RenderableLine(target_line.points[idx:])
class RotatingLine(LineAnimator): class RotatingLine(LineAnimator):
""" """
@ -1013,6 +1066,8 @@ class RotatingLine(LineAnimator):
# associated_diff = self.prediction_diffs[a] # associated_diff = self.prediction_diffs[a]
# progress = associated_diff.nr_of_passed_points() # progress = associated_diff.nr_of_passed_points()
is_ready: List[bool] = []
for i, (target_point, drawn_point) in enumerate(zip(target_line.points, list(self.drawn_points))): for i, (target_point, drawn_point) in enumerate(zip(target_line.points, list(self.drawn_points))):
# TODO: this should be done in polar space starting from origin (i.e. self.drawn_posision[-1]) # TODO: this should be done in polar space starting from origin (i.e. self.drawn_posision[-1])
decay = max(3, (18/i) if i else 10) # points further away move with more delay decay = max(3, (18/i) if i else 10) # points further away move with more delay
@ -1026,18 +1081,77 @@ class RotatingLine(LineAnimator):
if abs(drawn_angle - pred_angle) > math.pi: if abs(drawn_angle - pred_angle) > math.pi:
pred_angle -= math.pi * 2 pred_angle -= math.pi * 2
angle = exponentialDecay(drawn_angle, pred_angle, decay, dt) angle = exponentialDecay(drawn_angle, pred_angle, decay, dt)
x, y = relativePolarToPoint(origin, r, angle) x, y = relativePolarToPoint(origin.position, r, angle)
r = exponentialDecay(drawn_point.color.red, target_point.color.red, decay, dt) r = exponentialDecay(drawn_point.color.red, target_point.color.red, decay, dt)
g = exponentialDecay(drawn_point.color.green, target_point.color.green, decay, dt) g = exponentialDecay(drawn_point.color.green, target_point.color.green, decay, dt)
b = exponentialDecay(drawn_point.color.blue, target_point.color.blue, decay, dt) b = exponentialDecay(drawn_point.color.blue, target_point.color.blue, decay, dt)
a = exponentialDecay(drawn_point.color.alpha, target_point.color.alpha, decay, dt) a = exponentialDecay(drawn_point.color.alpha, target_point.color.alpha, decay, dt)
self.drawn_points[i].position = (x, y) self.drawn_points[i].position = (x, y)
self.drawn_points[i].color = SrgbaColor(r, g, b, a) self.drawn_points[i].color = SrgbaColor(r, g, b, a)
is_ready.append(np.isclose(drawn_point.position, target_point.position, atol=.05).all())
self.ready = all(is_ready)
return RenderableLine(self.drawn_points) return RenderableLine(self.drawn_points)
class NoodleWiggler(LineAnimator):
"""When a line is 'noodling' don't draw it as a whole, but sway it animated
"""
def __init__(self, target_line = None):
super().__init__(target_line)
def apply(self, target_line, dt):
return target_line
@classmethod
def detect_noodling_sections(cls, line, segment_length=0.1, ratio_threshold=2.0):
"""
Detects noodling sections in a LineString using the ratio of actual length to straight-line distance.
Args:
line (LineString): Input LineString.
segment_length (float): Length of each segment as a fraction of the total line length.
ratio_threshold (float): Threshold for the ratio (actual_length / straight_line_distance).
Returns:
list: List of noodling segments (as LineStrings).
"""
noodling_sections = []
total_length = line.length
segment_start = 0.0
while segment_start < 1.0:
segment_end = min(segment_start + segment_length, 1.0)
segment = substring(line, segment_start, segment_end, normalized=True)
# Calculate straight-line distance between start and end points
start_point = Point(segment.coords[0])
end_point = Point(segment.coords[-1])
straight_line_distance = start_point.distance(end_point)
# Calculate actual length of the segment
actual_length = segment.length
# Check if the ratio exceeds the threshold
if straight_line_distance > 0 and (actual_length / straight_line_distance) > ratio_threshold:
noodling_sections.append(segment)
segment_start = segment_end
return noodling_sections
# # Example usage:
# line = LineString([(0, 0), (0.1, 0.1), (0.2, 0.2), (0.3, 0.1), (0.4, 0.2), (1, 1)])
# noodling_segments = detect_noodling_sections(line, segment_length=0.2, area_threshold=0.02)
# for i, segment in enumerate(noodling_segments):
# print(f"Noodling segment {i+1}: {segment}")
IndexAndOffset = Tuple[int, float] IndexAndOffset = Tuple[int, float]

View file

@ -87,6 +87,7 @@ def get_maps_for_input(input_dict, scene: Scene, hyperparams, device):
heading_angle = None heading_angle = None
scene_map: ImageMap = scene.map[node.type] scene_map: ImageMap = scene.map[node.type]
scene_map.set_bounds() # update old pickled maps
# map_point = x[-1, :2] # map_point = x[-1, :2]
map_point = x[:2] map_point = x[:2]
# map_point = x[:2].clip(0) # prevent crash for out of map point. # map_point = x[:2].clip(0) # prevent crash for out of map point.

View file

@ -8,6 +8,7 @@ import time
from xml.dom.pulldom import default_bufsize from xml.dom.pulldom import default_bufsize
from attr import dataclass from attr import dataclass
import cv2 import cv2
import noise
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import dill import dill
@ -18,7 +19,7 @@ from typing import Dict, List, Optional
from trap.base import Track from trap.base import Track
from trap.config import CameraAction, HomographyAction from trap.config import CameraAction, HomographyAction
from trap.frame_emitter import Camera from trap.frame_emitter import Camera
from trap.tracker import FinalDisplacementFilter, Smoother, TrackReader from trap.tracker import FinalDisplacementFilter, Noiser, RandomOffset, Smoother, TrackReader
#sys.path.append("../../") #sys.path.append("../../")
from trajectron.environment import Environment, Scene, Node from trajectron.environment import Environment, Scene, Node
@ -73,22 +74,29 @@ class TrackIteration:
smooth: bool smooth: bool
step_size: int step_size: int
step_offset: int step_offset: int
noisy: bool = False
offset: bool = False
@classmethod @classmethod
def iteration_variations(cls, smooth = True, toggle_smooth=True, sample_step_size=1): def iteration_variations(cls, smooth = True, toggle_smooth=True, sample_step_size=1, noisy_variations=0, offset_variations=0):
iterations: List[TrackIteration] = [] iterations: List[TrackIteration] = []
for i in range(sample_step_size): for i in range(sample_step_size):
iterations.append(TrackIteration(smooth, sample_step_size, i)) for n in range(noisy_variations+1):
if toggle_smooth: for f in range(offset_variations+1):
iterations.append(TrackIteration(not smooth, sample_step_size, i)) iterations.append(TrackIteration(smooth, sample_step_size, i, noisy=bool(n), offset=bool(f)))
if toggle_smooth:
iterations.append(TrackIteration(not smooth, sample_step_size, i, noisy=bool(n), offset=bool(f)))
return iterations return iterations
# maybe_makedirs('trajectron-data') # maybe_makedirs('trajectron-data')
# for desired_source in [ 'hof2', ]:# ,'hof-maskrcnn', 'hof-yolov8', 'VIRAT-0102-parsed', 'virat-resnet-keypoints-full']: # for desired_source in [ 'hof2', ]:# ,'hof-maskrcnn', 'hof-yolov8', 'VIRAT-0102-parsed', 'virat-resnet-keypoints-full']:
def process_data(src_dir: Path, dst_dir: Path, name: str, smooth_tracks: bool, cm_to_m: bool, center_data: bool, bin_positions: bool, camera: Camera, step_size: int, filter_displacement:float, map_img_path: Optional[Path]): def process_data(src_dir: Path, dst_dir: Path, name: str, smooth_tracks: bool, noise_tracks: int, offset_tracks: int, center_data: bool, bin_positions: bool, camera: Camera, step_size: int, filter_displacement:float, map_img_path: Optional[Path]):
name += f"-nostep" if step_size == 1 else f"-step{step_size}" name += f"-nostep" if step_size == 1 else f"-step{step_size}"
name += f"-conv{smooth_window}" if smooth_tracks else f"-nosmooth" # name += f"-conv{smooth_window}" if smooth_tracks else f"-nosmooth"
name += f"-kalsmooth" if smooth_tracks else f"-nosmooth"
name += f"-noise{noise_tracks}" if noise_tracks else f""
name += f"-offsets{offset_tracks}" if offset_tracks else f""
name += f"-f{filter_displacement}" if filter_displacement > 0 else "" name += f"-f{filter_displacement}" if filter_displacement > 0 else ""
name += "-map" if map_img_path else "-nomap" name += "-map" if map_img_path else "-nomap"
name += f"-{datetime.date.today()}" name += f"-{datetime.date.today()}"
@ -128,8 +136,10 @@ def process_data(src_dir: Path, dst_dir: Path, name: str, smooth_tracks: bool, c
skipped_for_error = 0 skipped_for_error = 0
created = 0 created = 0
smoother = Smoother(window_len=smooth_window, convolution=True) if smooth_tracks else None # smoother = Smoother(window_len=smooth_window, convolution=True) if smooth_tracks else None
smoother = Smoother(convolution=False) if smooth_tracks else None
noiser = Noiser(amplitude=.1) if noise_tracks else None
reader = TrackReader(src_dir, camera.fps) reader = TrackReader(src_dir, camera.fps)
tracks = [t for t in reader] tracks = [t for t in reader]
print(f"Unfiltered total: {len(tracks)} tracks") print(f"Unfiltered total: {len(tracks)} tracks")
@ -142,9 +152,9 @@ def process_data(src_dir: Path, dst_dir: Path, name: str, smooth_tracks: bool, c
bar = tqdm.tqdm(total=total) bar = tqdm.tqdm(total=total)
destinations = { destinations = {
'train': int(total * .8), 'train': int(total * .91),
'val': int(total * .12), 'val': int(total * .08),
'test': int(total * .08), 'test': int(total * .01), # I don't realyl care about this
} }
max_track = reader.get(str(max([int(k) for k in reader._tracks.keys()]))) max_track = reader.get(str(max([int(k) for k in reader._tracks.keys()])))
@ -170,7 +180,7 @@ def process_data(src_dir: Path, dst_dir: Path, name: str, smooth_tracks: bool, c
print(f"Camera FPS: {camera.fps}, actual fps: {camera.fps/step_size} (or {(1/camera.fps)*step_size})") print(f"Camera FPS: {camera.fps}, actual fps: {camera.fps/step_size} (or {(1/camera.fps)*step_size})")
names = {} names: Dict[str, Path] = {}
max_pos = 0 max_pos = 0
for data_class, nr_of_items in destinations.items(): for data_class, nr_of_items in destinations.items():
@ -190,7 +200,9 @@ def process_data(src_dir: Path, dst_dir: Path, name: str, smooth_tracks: bool, c
# scene = None # scene = None
scene_nodes = defaultdict(lambda: []) scene_nodes = defaultdict(lambda: [])
variations = TrackIteration.iteration_variations(smooth_tracks, False, step_size) variations = TrackIteration.iteration_variations(smooth_tracks, True, step_size, noise_tracks, offset_tracks)
print(f"Create {len(variations)} variations")
for i, track in enumerate(sets[data_class]): for i, track in enumerate(sets[data_class]):
bar.update() bar.update()
@ -221,12 +233,17 @@ def process_data(src_dir: Path, dst_dir: Path, name: str, smooth_tracks: bool, c
for variation_nr, iteration_settings in enumerate(variations): for variation_nr, iteration_settings in enumerate(variations):
track = interpolated_track
if iteration_settings.noisy:
track = noiser.apply_track(track)
if iteration_settings.offset:
offset = RandomOffset(amplitude=.1)
track = offset.apply_track(track)
if iteration_settings.smooth: if iteration_settings.smooth:
track = smoother.smooth_track(interpolated_track) track = smoother.smooth_track(track)
# track = Smoother(smooth_window, False).smooth_track(track) # track = Smoother(smooth_window, False).smooth_track(track)
else:
track = interpolated_track # TODO)) Copy & move smooth outside iter loop
c = time.time() c = time.time()
if iteration_settings.step_size > 1: if iteration_settings.step_size > 1:
@ -310,9 +327,26 @@ def process_data(src_dir: Path, dst_dir: Path, name: str, smooth_tracks: bool, c
with open(data_dict_path, 'wb') as f: with open(data_dict_path, 'wb') as f:
dill.dump(env, f, protocol=dill.HIGHEST_PROTOCOL) dill.dump(env, f, protocol=dill.HIGHEST_PROTOCOL)
bar.close()
# print(f"Linear: {l}") # print(f"Linear: {l}")
# print(f"Non-Linear: {nl}") # print(f"Non-Linear: {nl}")
print(f"error: {skipped_for_error}, used: {created}") print(f"error: {skipped_for_error}, used: {created}")
print("Run with")
# set eval_every very high, because we're not interested in theoretical evaluations, and we don't mind overfitting
print(f"""
uv run trajectron_train --eval_every 200 \\
--train_data_dict {names['train'].name} \\
--eval_data_dict {names['val'].name} \\
--offline_scene_graph no --preprocess_workers 8 \\
--log_dir EXPERIMENTS/models \\
--log_tag _{name} \\
--train_epochs 100 \\
--conf EXPERIMENTS/config.json \\
--data_dir {dst_dir} \\
{"--map_encoding" if map_img_path else ""}
""")
return names return names
def main(): def main():
@ -321,6 +355,8 @@ def main():
parser.add_argument("--dst-dir", "-d", type=Path, required=True, help="Destination directory to store parsed .pkl files (typically 'trajectron-data')") parser.add_argument("--dst-dir", "-d", type=Path, required=True, help="Destination directory to store parsed .pkl files (typically 'trajectron-data')")
parser.add_argument("--name", "-n", type=str, required=True, help="Identifier to prefix the output .pkl files with (result is NAME-train.pkl, NAME-test.pkl)") parser.add_argument("--name", "-n", type=str, required=True, help="Identifier to prefix the output .pkl files with (result is NAME-train.pkl, NAME-test.pkl)")
parser.add_argument("--smooth-tracks", action='store_true', help=f"Enable smoother. Set to {smooth_window} frames") parser.add_argument("--smooth-tracks", action='store_true', help=f"Enable smoother. Set to {smooth_window} frames")
parser.add_argument("--noise-tracks", type=int, default=0, help=f"Enable Noiser. provide number for how many noisy variations")
parser.add_argument("--offset-tracks", type=int, default=0, help=f"Enable Offset. provide number for how many random offset variations")
parser.add_argument("--cm-to-m", action='store_true', help=f"If homography is in cm, convert tracked points to meter for beter results") parser.add_argument("--cm-to-m", action='store_true', help=f"If homography is in cm, convert tracked points to meter for beter results")
parser.add_argument("--center-data", action='store_true', help=f"Normalise around center") parser.add_argument("--center-data", action='store_true', help=f"Normalise around center")
parser.add_argument("--bin-positions", action='store_true', help=f"Experiment to put round positions to a grid") parser.add_argument("--bin-positions", action='store_true', help=f"Experiment to put round positions to a grid")
@ -358,7 +394,8 @@ def main():
args.dst_dir, args.dst_dir,
args.name, args.name,
args.smooth_tracks, args.smooth_tracks,
args.cm_to_m, args.noise_tracks,
args.offset_tracks,
args.center_data, args.center_data,
args.bin_positions, args.bin_positions,
args.camera, args.camera,

View file

@ -38,13 +38,27 @@ class Settings(Node):
dpg.add_text(f"Settings from {self.config.settings_file}") dpg.add_text(f"Settings from {self.config.settings_file}")
dpg.add_button(label="Save", callback=self.save) dpg.add_button(label="Save", callback=self.save)
with dpg.window(label="Lidar", pos=(0, 150)): with dpg.window(label="Stage", pos=(0, 150)):
self.register_setting(f'stage.fps', dpg.add_checkbox(label="FPS cap", default_value=self.get_setting(f'stage.fps', 30), callback=self.on_change))
with dpg.window(label="Lidar", pos=(0, 250)):
self.register_setting(f'lidar.crop_map_boundaries', dpg.add_checkbox(label="crop_map_boundaries", default_value=self.get_setting(f'lidar.crop_map_boundaries', True), callback=self.on_change)) self.register_setting(f'lidar.crop_map_boundaries', dpg.add_checkbox(label="crop_map_boundaries", default_value=self.get_setting(f'lidar.crop_map_boundaries', True), callback=self.on_change))
self.register_setting(f'lidar.viz_cropping', dpg.add_checkbox(label="viz_cropping", default_value=self.get_setting(f'lidar.viz_cropping', True), callback=self.on_change)) self.register_setting(f'lidar.viz_cropping', dpg.add_checkbox(label="viz_cropping", default_value=self.get_setting(f'lidar.viz_cropping', True), callback=self.on_change))
self.register_setting(f'lidar.tracking_enabled', dpg.add_checkbox(label="tracking_enabled", default_value=self.get_setting(f'lidar.tracking_enabled', True), callback=self.on_change)) self.register_setting(f'lidar.tracking_enabled', dpg.add_checkbox(label="tracking_enabled", default_value=self.get_setting(f'lidar.tracking_enabled', True), callback=self.on_change))
dpg.add_separator(label="Clustering")
cluster_methods = ("birch", "optics", "dbscan")
self.register_setting('lidar.cluster.method', dpg.add_combo(label="Method", items=cluster_methods, default_value=self.get_setting('lidar.cluster.method', default='dbscan'), callback=self.on_change))
self.register_setting(f'lidar.eps', dpg.add_slider_float(label="DBSCAN epsilon", default_value=self.get_setting(f'lidar.eps', 0.3), max_value=1, callback=self.on_change)) self.register_setting(f'lidar.eps', dpg.add_slider_float(label="DBSCAN epsilon", default_value=self.get_setting(f'lidar.eps', 0.3), max_value=1, callback=self.on_change))
self.register_setting(f'lidar.min_samples', dpg.add_slider_int(label="DBSCAN min_samples", default_value=self.get_setting(f'lidar.min_samples', 8), max_value=30, callback=self.on_change)) self.register_setting(f'lidar.min_samples', dpg.add_slider_int(label="DBSCAN min_samples", default_value=self.get_setting(f'lidar.min_samples', 8), max_value=30, callback=self.on_change))
self.register_setting(f'lidar.min_box_area', dpg.add_slider_float(label="DBSCAN min_samples", default_value=self.get_setting(f'lidar.min_box_area', .1), min_value=0, max_value=1, callback=self.on_change)) dpg.add_text("When using BIRCH, the resulting subclusters can be postprocessed by DBSCAN:")
self.register_setting('lidar.birch_process_subclusters', dpg.add_checkbox(label="Process subclusters", default_value=self.get_setting('lidar.birch_process_subclusters', True), callback=self.on_change))
self.register_setting('lidar.birch_threshold', dpg.add_slider_float(label="Threshold", default_value=self.get_setting('lidar.birch_threshold', 1), max_value=2.5, callback=self.on_change))
self.register_setting('lidar.birch_branching_factor', dpg.add_slider_int(label="Branching factor", default_value=self.get_setting('lidar.birch_branching_factor', 50), max_value=100, callback=self.on_change))
dpg.add_separator(label="Cluster filter")
self.register_setting(f'lidar.min_box_area', dpg.add_slider_float(label="min_box_area", default_value=self.get_setting(f'lidar.min_box_area', .1), min_value=0, max_value=1, callback=self.on_change))
for lidar in ["192.168.0.16", "192.168.0.10"]: for lidar in ["192.168.0.16", "192.168.0.10"]:
name = lidar.replace(".", "_") name = lidar.replace(".", "_")

View file

@ -22,7 +22,7 @@ import zmq
from trap.anomaly import DiffSegment, calc_anomaly, calculate_loitering_scores from trap.anomaly import DiffSegment, calc_anomaly, calculate_loitering_scores
from trap.base import CameraAction, DataclassJSONEncoder, Frame, HomographyAction, ProjectedTrack, Track from trap.base import CameraAction, DataclassJSONEncoder, Frame, HomographyAction, ProjectedTrack, Track
from trap.counter import CounterSender from trap.counter import CounterSender
from trap.lines import AppendableLine, AppendableLineAnimator, Coordinate, CoordinateSpace, CropAnimationLine, CropLine, DashedLine, DeltaT, FadeOutJitterLine, FadeOutLine, FadedEndsLine, FadedTailLine, LineAnimationStack, LineAnimator, NoiseLine, RenderableLayers, RenderableLine, RenderableLines, RotatingLine, SegmentLine, SimplifyMethod, SrgbaColor, StaticLine, layers_to_message, load_lines_from_svg from trap.lines import AppendableLine, AppendableLineAnimator, Coordinate, CoordinateSpace, CropAnimationLine, CropLine, DashedLine, DeltaT, FadeOutJitterLine, FadeOutLine, FadedEndsLine, FadedTailLine, LineAnimationStack, LineAnimator, NoiseLine, RenderableLayers, RenderableLine, RenderableLines, RotatingLine, SegmentLine, SimplifyLine, SimplifyMethod, SrgbaColor, StartFromClosestPoint, StaticLine, layers_to_message, load_lines_from_svg
from trap.node import Node from trap.node import Node
from trap.track_history import TrackHistory from trap.track_history import TrackHistory
@ -316,6 +316,7 @@ def build_line_others():
others_color = SrgbaColor(1,1,0,1) others_color = SrgbaColor(1,1,0,1)
line_others = LineAnimationStack(StaticLine([], others_color)) line_others = LineAnimationStack(StaticLine([], others_color))
# line_others.add(SegmentLine(line_others.tail, duration=3, anim_f=partial(SegmentLine.anim_grow, in_and_out=True, max_len=5))) # line_others.add(SegmentLine(line_others.tail, duration=3, anim_f=partial(SegmentLine.anim_grow, in_and_out=True, max_len=5)))
line_others.add(SimplifyLine(line_others.tail, 0.001)) # Simplify before effects, so they don't distort
line_others.add(CropAnimationLine(line_others.tail, lambda dt: 10 + math.sin(dt/4) * 70, assume_fps=TRACK_ASSUMED_FPS*2)) # speed up line_others.add(CropAnimationLine(line_others.tail, lambda dt: 10 + math.sin(dt/4) * 70, assume_fps=TRACK_ASSUMED_FPS*2)) # speed up
line_others.add(NoiseLine(line_others.tail, amplitude=0, t_factor=.3)) line_others.add(NoiseLine(line_others.tail, amplitude=0, t_factor=.3))
# line_others.add(DashedLine(line_others.tail, t_factor=4, loop_offset=True)) # line_others.add(DashedLine(line_others.tail, t_factor=4, loop_offset=True))
@ -332,7 +333,7 @@ class DrawnScenario(Scenario):
This distinction is only for ordering the code This distinction is only for ordering the code
""" """
MAX_HISTORY = 200 # points of history of trajectory to display (preventing too long lines) MAX_HISTORY = 130 # points of history of trajectory to display (preventing too long lines)
CUT_GAP = 5 # when adding a new prediction, keep the existing prediction until that point + this CUT_GAP margin CUT_GAP = 5 # when adding a new prediction, keep the existing prediction until that point + this CUT_GAP margin
def __init__(self, track_id): def __init__(self, track_id):
@ -343,9 +344,9 @@ class DrawnScenario(Scenario):
history_color = SrgbaColor(1.,0.,1.,1.) history_color = SrgbaColor(1.,0.,1.,1.)
history = StaticLine([], history_color) history = StaticLine([], history_color)
self.line_history = LineAnimationStack(history) self.line_history = LineAnimationStack(history)
self.line_history.add(AppendableLineAnimator(self.line_history.tail, draw_decay_speed=25)) self.line_history.add(AppendableLineAnimator(self.line_history.tail, draw_decay_speed=120))
self.line_history.add(CropLine(self.line_history.tail, self.MAX_HISTORY)) self.line_history.add(CropLine(self.line_history.tail, self.MAX_HISTORY))
self.line_history.add(SimplifyLine(self.line_history.tail, 0.003)) # Simplify before effects, so they don't distort
self.line_history.add(FadedTailLine(self.line_history.tail, TRACK_FADE_AFTER_DURATION * TRACK_ASSUMED_FPS, TRACK_END_FADE)) self.line_history.add(FadedTailLine(self.line_history.tail, TRACK_FADE_AFTER_DURATION * TRACK_ASSUMED_FPS, TRACK_END_FADE))
self.line_history.add(NoiseLine(self.line_history.tail, amplitude=0, t_factor=.3)) self.line_history.add(NoiseLine(self.line_history.tail, amplitude=0, t_factor=.3))
self.line_history.add(FadeOutJitterLine(self.line_history.tail, frequency=5, t_factor=.5)) self.line_history.add(FadeOutJitterLine(self.line_history.tail, frequency=5, t_factor=.5))
@ -353,10 +354,13 @@ class DrawnScenario(Scenario):
self.prediction_color = SrgbaColor(0,1,0,1) self.prediction_color = SrgbaColor(0,1,0,1)
self.line_prediction = LineAnimationStack(StaticLine([], self.prediction_color)) self.line_prediction = LineAnimationStack(StaticLine([], self.prediction_color))
self.line_prediction.add(RotatingLine(self.line_prediction.tail, decay_speed=16)) self.line_prediction.add(RotatingLine(self.line_prediction.tail, decay_speed=16))
self.line_prediction.get(RotatingLine).skip = True self.line_prediction.get(RotatingLine).skip = False
self.line_prediction.add(SegmentLine(self.line_prediction.tail, duration=.5)) self.line_prediction.add(SegmentLine(self.line_prediction.tail, duration=.5))
self.line_prediction.add(DashedLine(self.line_prediction.tail, t_factor=4, loop_offset=True)) self.line_prediction.get(SegmentLine).skip = True
self.line_prediction.get(DashedLine).skip = True self.line_prediction.add(SimplifyLine(self.line_prediction.tail, 0.003)) # Simplify before effects, so they don't distort
self.line_prediction.add(DashedLine(self.line_prediction.tail, dash_len=.6, gap_len=0.9, t_factor=2, loop_offset=True))
self.line_prediction.get(DashedLine).skip = False
self.line_prediction.add(StartFromClosestPoint(self.line_prediction.tail))
self.line_prediction.add(FadeOutLine(self.line_prediction.tail)) self.line_prediction.add(FadeOutLine(self.line_prediction.tail))
# when rendering tracks from others similar/close to the current one # when rendering tracks from others similar/close to the current one
@ -374,41 +378,55 @@ class DrawnScenario(Scenario):
self.line_history.root.points = self.track.projected_history self.line_history.root.points = self.track.projected_history
if len(self.prediction_tracks): if len(self.prediction_tracks):
now_p = np.array(self.line_history.root.points[-1])
prev_p = np.array(self.line_history.root.points[-1 * min(4, len(self.line_history.root.points))])
diff = now_p - prev_p
self.line_prediction.get(StartFromClosestPoint).set_point(now_p + diff)
# print("set origin", self.line_history.root.points[-1])
# TODO: only when animation is ready for it? or collect lines # TODO: only when animation is ready for it? or collect lines
if not self.active_ptrack: if not self.active_ptrack:
# draw the first prediction
self.active_ptrack = self.prediction_tracks[-1] self.active_ptrack = self.prediction_tracks[-1]
self.line_prediction.root.points = self.active_ptrack._track.predictions[0]
self.line_prediction.start() # reset positions self.line_prediction.start() # reset positions
elif self.active_ptrack._track.updated_at < self.prediction_tracks[-1]._track.updated_at: elif self.active_ptrack._track.updated_at < self.prediction_tracks[-1]._track.updated_at:
# stale prediction
# switch only if drawing animation is ready # switch only if drawing animation is ready
if self.line_prediction.is_ready(): # if self.line_prediction.is_ready():
self.active_ptrack = self.prediction_tracks[-1] self.active_ptrack = self.prediction_tracks[-1]
self.line_prediction.get(SegmentLine).anim_f = partial(SegmentLine.anim_arrive, length=.3) self.line_prediction.root.points = self.active_ptrack._track.predictions[0]
self.line_prediction.get(SegmentLine).duration = .5
self.line_prediction.get(DashedLine).skip = True
# print('restart')
self.line_prediction.start() # reset positions self.line_prediction.start() # reset positions
# print(self.line_prediction.get(SegmentLine).running_for())
else: # self.line_prediction.get(SegmentLine).anim_f = partial(SegmentLine.anim_arrive, length=.3)
if self.line_prediction.is_ready(): # self.line_prediction.get(SegmentLine).duration = .5
# little hack: check is dashedline skips, to only run this once per animation: # self.line_prediction.get(DashedLine).skip = True
if self.line_prediction.get(DashedLine).skip: # # print('restart')
# no new yet, but ready with anim, start stage 2 # self.line_prediction.start() # reset positions
self.line_prediction.get(SegmentLine).anim_f = partial(SegmentLine.anim_grow) # # print(self.line_prediction.get(SegmentLine).running_for())
self.line_prediction.get(SegmentLine).duration = 1 # else:
# self.line_prediction.get(SegmentLine).skip = True # if self.line_prediction.is_ready():
self.line_prediction.get(DashedLine).skip = False # # little hack: check is dashedline skips, to only run this once per animation:
self.line_prediction.start() # if self.line_prediction.get(DashedLine).skip:
elif self.line_prediction.get(SegmentLine).duration != 2: # hack to only play once # # no new yet, but ready with anim, start stage 2
self.line_prediction.get(SegmentLine).anim_f = partial(SegmentLine.anim_grow, reverse=True) # self.line_prediction.get(SegmentLine).anim_f = partial(SegmentLine.anim_grow)
self.line_prediction.get(SegmentLine).duration = 2 # self.line_prediction.get(SegmentLine).duration = 1
self.line_prediction.get(SegmentLine).start() # # self.line_prediction.get(SegmentLine).skip = True
# self.line_prediction.get(DashedLine).skip = False
# self.line_prediction.start()
# elif self.line_prediction.get(SegmentLine).duration != 2: # hack to only play once
# self.line_prediction.get(SegmentLine).anim_f = partial(SegmentLine.anim_grow, reverse=True)
# self.line_prediction.get(SegmentLine).duration = 2
# self.line_prediction.get(SegmentLine).start()
# self.line_prediction_dashed.set_offset_t(self.active_ptrack._track.track_update_dt() * 4) # self.line_prediction_dashed.set_offset_t(self.active_ptrack._track.track_update_dt() * 4)
self.line_prediction.root.points = self.active_ptrack._track.predictions[0]
# special case: LOITERING # special case: LOITERING
@ -428,8 +446,6 @@ class DrawnScenario(Scenario):
# print(transition > .999, self.is_running, current_position_rounded, time_diff) # print(transition > .999, self.is_running, current_position_rounded, time_diff)
if transition > .999 and self.is_running and not all(self.tracks_to_self_pos == current_position_rounded) and time_diff > 5: # only do these expensive calls when running if transition > .999 and self.is_running and not all(self.tracks_to_self_pos == current_position_rounded) and time_diff > 5: # only do these expensive calls when running
logger.info(f"Fetch similar tracks for {self.track_id}")
t = time.perf_counter()
self.tracks_to_self_pos = current_position_rounded self.tracks_to_self_pos = current_position_rounded
self.tracks_to_self_fetched_at = time.perf_counter() self.tracks_to_self_fetched_at = time.perf_counter()
@ -438,7 +454,7 @@ class DrawnScenario(Scenario):
self.track_ids_to_self = iter(track_ids) self.track_ids_to_self = iter(track_ids)
self.tracks_to_self = stage.history.ids_as_trajectory(track_ids) self.tracks_to_self = stage.history.ids_as_trajectory(track_ids)
print(time.perf_counter() - t, "fetch delay") stage.logger.info(f"Fetched similar tracks for {self.track_id}. (Took {time.perf_counter() - self.tracks_to_self_fetched_at}s)")
# if self.tracks_to_self and not len(self.line_others.root.points): # if self.tracks_to_self and not len(self.line_others.root.points):
if self.tracks_to_self and not self.line_others.is_running(): if self.tracks_to_self and not self.line_others.is_running():
@ -479,9 +495,10 @@ class DrawnScenario(Scenario):
# 1) history, fade out when lost # 1) history, fade out when lost
# self.line_history.get(StaticLine).color = SrgbaColor(1, 0, 1-self.anomaly_factor(), 1) # self.line_history.get(StaticLine).color = SrgbaColor(1, 0, 1-self.anomaly_factor(), 1)
self.line_history.get(FadeOutJitterLine).set_alpha(1-self.lost_factor()) self.line_history.get(FadeOutJitterLine).set_alpha(1-self.lost_factor())
self.line_prediction.get(FadeOutLine).set_alpha(1-self.lost_factor()) self.line_prediction.get(FadeOutLine).set_alpha(1-self.lost_factor())
self.line_history.get(NoiseLine).amplitude = self.lost_factor() self.line_history.get(NoiseLine).amplitude = self.lost_factor() * 1.5
# fade out history after max duration, given in frames # fade out history after max duration, given in frames
track_age_in_frames = self.track_age() * TRACK_ASSUMED_FPS track_age_in_frames = self.track_age() * TRACK_ASSUMED_FPS
@ -559,12 +576,16 @@ class DatasetDrawer():
line_color = SrgbaColor(0,1,1,1) line_color = SrgbaColor(0,1,1,1)
self.track_line = LineAnimationStack(StaticLine([], line_color)) self.track_line = LineAnimationStack(StaticLine([], line_color))
self.track_line.add(CropAnimationLine(self.track_line.tail, 100, assume_fps=TRACK_ASSUMED_FPS*30)) # speed up self.track_line.add(SimplifyLine(self.track_line.tail, 0.003)) # Simplify before cropping, to get less noodling
self.track_line.add(CropAnimationLine(self.track_line.tail, 50, assume_fps=TRACK_ASSUMED_FPS*20)) # speed up
# self.track_line.add(DashedLine(self.track_line.tail, t_factor=4, loop_offset=True)) # self.track_line.add(DashedLine(self.track_line.tail, t_factor=4, loop_offset=True))
# self.track_line.get(DashedLine).skip = True # self.track_line.get(DashedLine).skip = True
self.track_line.add(FadedEndsLine(self.track_line.tail, 10, 10)) # self.track_line.add(FadedEndsLine(self.track_line.tail, 10, 10))
self.track_line.add(FadeOutJitterLine(self.track_line.tail, t_factor=3))
# self.track_line.add(FadeOutLine(self.track_line.tail)) # self.track_line.add(FadeOutLine(self.track_line.tail))
# self.track_line.get(FadeOutLine).set_alpha(1) self.track_line.get(FadeOutJitterLine).set_alpha(np.random.random()*.3+.7)
def to_renderable_lines(self, dt: DeltaT): def to_renderable_lines(self, dt: DeltaT):
lines = RenderableLines([], CoordinateSpace.WORLD) lines = RenderableLines([], CoordinateSpace.WORLD)
@ -587,12 +608,12 @@ class DatasetDrawer():
class Stage(Node): class Stage(Node):
FPS = 60 FALLBACK_FPS = 30 # we render to lasers, no need to go faster!
def setup(self): def setup(self):
self.active_scenarios: List[DrawnScenario] = [] # List of currently running Scenario instances self.active_scenarios: List[DrawnScenario] = [] # List of currently running Scenario instances
self.scenarios: Dict[str, DrawnScenario] = DefaultDictKeyed(lambda key: DrawnScenario(key)) self.scenarios: Dict[str, DrawnScenario] = DefaultDictKeyed(lambda key: DrawnScenario(key))
self.frame_noimg_sock = self.sub(self.config.zmq_frame_noimg_addr) self.frame_noimg_sock = self.sub(self.config.zmq_frame_noimg_addr)
self.trajectory_sock = self.sub(self.config.zmq_trajectory_addr) self.trajectory_sock = self.sub(self.config.zmq_trajectory_addr)
@ -616,8 +637,8 @@ class Stage(Node):
def run(self): def run(self):
while self.run_loop_capped_fps(self.FPS, warn_below_fps=10): while self.run_loop_capped_fps(self.get_setting('stage.fps', self.FALLBACK_FPS), warn_below_fps=10):
dt = max(1/ self.FPS, self.dt_since_last_tick) # never dt of 0 dt = max(1/ self.get_setting('stage.fps', self.FALLBACK_FPS), self.dt_since_last_tick) # never dt of 0
# t1 = time.perf_counter() # t1 = time.perf_counter()
self.loop_receive() self.loop_receive()
@ -720,18 +741,18 @@ class Stage(Node):
lines = training_lines lines = training_lines
t2b = time.perf_counter() t2b = time.perf_counter()
rl_scenario = lines.as_simplified(SimplifyMethod.RDP, .003) # or segmentise (see shapely) # rl_scenario = lines.as_simplified(SimplifyMethod.RDP, .003) # or segmentise (see shapely)
rl_training = training_lines.as_simplified(SimplifyMethod.RDP, .003) # or segmentise (see shapely) # rl_training = training_lines.as_simplified(SimplifyMethod.RDP, .003) # or segmentise (see shapely)
self.counter.set("stage.lines", len(lines.lines)) self.counter.set("stage.lines", len(lines.lines))
self.counter.set("stage.points_orig", lines.point_count()) # self.counter.set("stage.points_orig", lines.point_count())
self.counter.set("stage.points", rl_scenario.point_count()) self.counter.set("stage.points", lines.point_count())
t3 = time.perf_counter() t3 = time.perf_counter()
layers: RenderableLayers = { layers: RenderableLayers = {
1: rl_scenario, 1: lines,
2: self.debug_lines, 2: self.debug_lines,
3: rl_training, 3: training_lines,
} }
t4 = time.perf_counter() t4 = time.perf_counter()
@ -784,7 +805,7 @@ class Stage(Node):
argparser.add_argument('--debug-map', argparser.add_argument('--debug-map',
help='specify a map (svg-file) from which to load lines which will be overlayed', help='specify a map (svg-file) from which to load lines which will be overlayed',
type=str, type=str,
default="../DATASETS/hof3/map_hof.svg") default="../DATASETS/hof-lidar/map_hof.svg")
argparser.add_argument('--max-active-scenarios', argparser.add_argument('--max-active-scenarios',
help='Maximum number of active scenarios that can be drawn at once (to not overlod the laser)', help='Maximum number of active scenarios that can be drawn at once (to not overlod the laser)',
type=int, type=int,

View file

@ -330,6 +330,7 @@ def track_predictions_to_lines(track: Track, camera:Camera, anim_position=.8):
return return
current_point = track.get_projected_history(camera=camera)[-1] current_point = track.get_projected_history(camera=camera)[-1]
slide_t = min(1, max(0, inv_lerp(0, 0.8, anim_position))) # slide_position slide_t = min(1, max(0, inv_lerp(0, 0.8, anim_position))) # slide_position
lines = [] lines = []
@ -371,6 +372,7 @@ def draw_track_predictions(img: cv2.Mat, track: Track, color_index: int, camera:
""" """
lines = track_predictions_to_lines(track, camera, anim_position) lines = track_predictions_to_lines(track, camera, anim_position)
if not lines: if not lines:
return return

View file

@ -1,3 +1,4 @@
from abc import ABC, abstractmethod
import argparse import argparse
import csv import csv
import json import json
@ -776,7 +777,54 @@ def run():
is_running.clear() is_running.clear()
class Smoother: class TrackPointFilter(ABC):
@abstractmethod
def apply(self, points: List[float]):
pass
def apply_track(self, track: Track) -> Track:
ls = [d.l for d in track.history]
ts = [d.t for d in track.history]
ws = [d.w for d in track.history]
hs = [d.h for d in track.history]
ls = self.apply(ls)
ts = self.apply(ts)
ws = self.apply(ws)
hs = self.apply(hs)
new_history = [Detection(d.track_id, l, t, w, h, d.conf, d.state, d.frame_nr, d.det_class) for l, t, w, h, d in zip(ls,ts,ws,hs, track.history)]
return track.get_with_new_history(new_history)
def apply_to_frame_tracks(self, frame: Frame) -> Frame:
new_tracks = []
for track in frame.tracks.values():
new_track = self.apply_track(track)
new_tracks.append(new_track)
frame.tracks = {t.track_id: t for t in new_tracks}
return frame
def apply_to_frame_predictions(self, frame: Frame) -> Frame:
for track in frame.tracks.values():
new_predictions = []
if not track.predictions:
continue
for prediction in track.predictions:
xs = [d[0] for d in prediction]
ys = [d[1] for d in prediction]
xs = self.apply(xs)
ys = self.apply(ys)
filtered_prediction = [[x,y] for x, y in zip(xs, ys)]
new_predictions.append(filtered_prediction)
track.predictions = new_predictions
return frame
class Smoother(TrackPointFilter):
def __init__(self, window_len=6, convolution=False): def __init__(self, window_len=6, convolution=False):
# for some reason this smoother messes the predictions. Probably skews the points too much?? # for some reason this smoother messes the predictions. Probably skews the points too much??
@ -789,54 +837,45 @@ class Smoother:
def smooth(self, points: List[float]):
def apply(self, points: List[float]):
self.smoother.smooth(points) self.smoother.smooth(points)
return self.smoother.smooth_data[0] return self.smoother.smooth_data[0]
# aliases, for historic reasons
def smooth(self, points: List[float]):
return self.apply(points)
def smooth_track(self, track: Track) -> Track: def smooth_track(self, track: Track) -> Track:
ls = [d.l for d in track.history] return self.apply_track(track)
ts = [d.t for d in track.history]
ws = [d.w for d in track.history]
hs = [d.h for d in track.history]
self.smoother.smooth(ls)
ls = self.smoother.smooth_data[0]
self.smoother.smooth(ts)
ts = self.smoother.smooth_data[0]
self.smoother.smooth(ws)
ws = self.smoother.smooth_data[0]
self.smoother.smooth(hs)
hs = self.smoother.smooth_data[0]
new_history = [Detection(d.track_id, l, t, w, h, d.conf, d.state, d.frame_nr, d.det_class) for l, t, w, h, d in zip(ls,ts,ws,hs, track.history)]
return track.get_with_new_history(new_history)
# return Track(track.track_id, new_history, track.predictor_history, track.predictions, track.fps)
def smooth_frame_tracks(self, frame: Frame) -> Frame: def smooth_frame_tracks(self, frame: Frame) -> Frame:
new_tracks = [] return self.apply_to_frame_tracks(frame)
for track in frame.tracks.values():
new_track = self.smooth_track(track)
new_tracks.append(new_track)
frame.tracks = {t.track_id: t for t in new_tracks}
return frame
def smooth_frame_predictions(self, frame: Frame) -> Frame: def smooth_frame_predictions(self, frame: Frame) -> Frame:
return self.apply_to_frame_predictions(frame)
class Noiser(TrackPointFilter):
def __init__(self, amplitude=.1):
self.amplitude = amplitude
def apply(self, points: List[float]):
return np.random.normal(points, scale=self.amplitude).tolist()
class RandomOffset(TrackPointFilter):
"""
A bit hacky way to offset the whole track. Does x & y & w & h with the same value
"""
def __init__(self, amplitude=.1):
self.amplitude = np.random.normal(scale=amplitude)
for track in frame.tracks.values():
new_predictions = []
if not track.predictions: def apply(self, points: List[float]):
continue return [p + self.amplitude for p in points]
for prediction in track.predictions:
xs = [d[0] for d in prediction]
ys = [d[1] for d in prediction]
self.smoother.smooth(xs)
xs = self.smoother.smooth_data[0]
self.smoother.smooth(ys)
ys = self.smoother.smooth_data[0]
smooth_prediction = [[x,y] for x, y in zip(xs, ys)]
new_predictions.append(smooth_prediction)
track.predictions = new_predictions
return frame

View file

@ -158,7 +158,7 @@ class ImageMap(GeometricMap): # TODO Implement for image maps -> watch flipped
""" """
Use homography and image to calculate the limits of positions in world coordinates Use homography and image to calculate the limits of positions in world coordinates
""" """
print(self.data.shape) # print(self.data.shape)
max_x = self.data.shape[1] max_x = self.data.shape[1]
max_y = self.data.shape[2] max_y = self.data.shape[2]