Dit begint ergens op te lijken (prep voor U? @ hof)

This commit is contained in:
Ruben van de Ven 2025-11-07 23:57:54 +01:00
parent 3a73806a52
commit 3b9a7c033f
11 changed files with 2058 additions and 147 deletions

View file

@ -2,7 +2,7 @@
"batch_size": 512,
"grad_clip": 1.0,
"learning_rate_style": "exp",
"learning_rate": 0.0001,
"learning_rate": 0.001,
"min_learning_rate": 1e-05,
"learning_decay_rate": 0.9999,
"prediction_horizon": 60,

File diff suppressed because one or more lines are too long

776
tests/track_history.ipynb Normal file

File diff suppressed because one or more lines are too long

View file

@ -168,8 +168,8 @@ def calculate_loitering_scores(track: ProjectedTrack, min_duration_to_linger, li
linger_frames +=1
else:
# decay if moving faster
linger_duration = max(linger_duration - 3, 0)
linger_frames = max(linger_frames - 3, 0)
linger_duration = max(linger_duration - 1.5, 0)
linger_frames = max(linger_frames - 1.5, 0)
# Calculate loitering score
if total_frames > 0:

View file

@ -1,7 +1,9 @@
from argparse import ArgumentParser
from collections import defaultdict, deque, namedtuple
from copy import copy
import json
from pathlib import Path
import pickle
import select
import socket
from turtle import update
@ -219,25 +221,27 @@ class Lidar(Node):
else:
self.sequence_generator = read_live_data(self.config.ip, self.config.port, config)
self.tracker = BYTETracker(frame_rate=10)
self.tracker = BYTETracker(frame_rate=ASSUMED_FPS)
self.tracks: DefaultDict[str, Track] = defaultdict(lambda: Track())
self.room_filter = AdaptiveVoxelFilter(
voxel_size=.5,
nr_of_frames_to_static=120 * ASSUMED_FPS , # (confidence_threshold * update_interval) = nr of frames needed to count as static
nr_of_frames_to_unstatic=10 * ASSUMED_FPS,
update_interval=20, #not every frame needs to impact history
decay_rate=1
)
# self.room_filter = VoxelOccupancyFilter(
# voxel_size=.5,
# history_size=80,
# history_interval=20, #not every frame needs to impact history
# ready_for_action_at=25,
# static_threshold=0.3
# self.room_filter = AdaptiveVoxelFilter(
# voxel_size=.25,
# nr_of_frames_to_static=120 * ASSUMED_FPS , # (confidence_threshold * update_interval) = nr of frames needed to count as static
# nr_of_frames_to_unstatic=120 * ASSUMED_FPS, # 1/3
# update_interval=20, #not every frame needs to impact history
# grow_rate=4,
# decay_rate=1,
# )
self.room_filter = VoxelOccupancyFilter(
voxel_size=.4,
history_size=80,
history_interval=20, #not every frame needs to impact history
ready_for_action_at=25,
static_threshold=0.3
)
self.debug_lines = load_lines_from_svg(self.config.debug_map, 100, '') if self.config.debug_map else []
@ -442,6 +446,7 @@ class Lidar(Node):
# a = time.perf_counter()
filtered_pcd, dropped_indexes = self.room_filter.remove_static(current_pcd)
# print(len(filtered_pcd.points), len(dropped_indexes))
if self.config.viz and len(dropped_indexes):
@ -480,13 +485,18 @@ class Lidar(Node):
points_2d = project_to_xy(filtered_pcd)
# with open('/tmp/points.pcl', 'wb') as fp:
# pickle.dump(points_2d, fp)
clusters = self.cluster_2d(
points_2d,
)
# print(len(clusters))
# boxes, centroids = get_cluster_boxes(points_2d, labels, min_area= 0.3*0.3)
boxes, centroids = get_cluster_boxes(clusters, min_area= self.get_setting('lidar.min_box_area', .1))
# print(centroids)
# append confidence and class (placeholders)
@ -558,8 +568,12 @@ class Lidar(Node):
for line_set in line_sets:
vis.add_geometry(line_set, False)
elif self.config.viz:
print('initializing')
if hasattr(self.room_filter, 'scan_counter'):
print(self.room_filter.scan_counter, self.room_filter.nr_of_frames_to_init)
line_sets = []
filtered_pcd.paint_uniform_color([.5]*3)
dropped_pcds.append(filtered_pcd)
live_filtered_pcd.points = o3d.utility.Vector3dVector(np.empty([0,3]))
@ -584,9 +598,9 @@ class Lidar(Node):
live_pcd.points = dropped_pcd.points
live_pcd.colors = dropped_pcd.colors
color = [0.5, 0.5, 0.7] if self.room_filter.initialised else [0.7, 0.7, 0.7]
color = [0.2, 0.7, 0.2] if self.room_filter.initialised else [0.7, 0.7, 0.7]
# live_pcd.paint_uniform_color(color)
live_filtered_pcd.paint_uniform_color([0.2, 0.7, 0.2])
live_filtered_pcd.paint_uniform_color(color)
if not geom_added:
vis.add_geometry(live_filtered_pcd, True)
@ -693,7 +707,7 @@ class Lidar(Node):
elif method == 'birch':
process_subclusters = self.get_setting('lidar.birch_process_subclusters', True)
labels = Birch(
max_eps=self.get_setting('lidar.birch_threshold', 1), # "The radius of the subcluster obtained by merging a new sample"
threshold=self.get_setting('lidar.birch_threshold', 1), # "The radius of the subcluster obtained by merging a new sample"
branching_factor=self.get_setting('lidar.birch_branching_factor', 50),
n_clusters=None if not process_subclusters else DBSCAN(
eps=self.get_setting('lidar.eps', .132),
@ -705,9 +719,9 @@ class Lidar(Node):
return []
# db.labels_
clusters = []
for label in labels:
for label in np.unique(labels):
if label == -1:
continue # Skip noise
@ -1019,6 +1033,7 @@ class VoxelOccupancyFilter:
filtered_pcd.points = o3d.utility.Vector3dVector(np.array(filtered_points))
return filtered_pcd, dropped_indexes
AdaptiveVoxelFilterCache = namedtuple("AdaptiveVoxelFilterCache", ("scan_counter", "static_map", "initialised"))
class AdaptiveVoxelFilter:
def __init__(
@ -1027,6 +1042,7 @@ class AdaptiveVoxelFilter:
nr_of_frames_to_static=720,
nr_of_frames_to_unstatic=720,
decay_rate=1,
grow_rate=3,
update_interval=10,
):
"""
@ -1039,22 +1055,50 @@ class AdaptiveVoxelFilter:
decay_rate (int): How much to decrement confidence for unoccupied voxels per scan.
update_interval (int): Update static map every N scans.
"""
self.nr_of_frames_to_static = nr_of_frames_to_static
self.nr_of_frames_to_unstatic = nr_of_frames_to_unstatic
self.nr_of_frames_to_init = self.nr_of_frames_to_static + self.nr_of_frames_to_unstatic
self.confidence_threshold = (nr_of_frames_to_static / update_interval) * decay_rate
self.max_occupancy_score = (nr_of_frames_to_unstatic / update_interval) * decay_rate + self.confidence_threshold
self.voxel_size = voxel_size
# self.confidence_threshold = confidence_threshold
# self.max_occupancy_score = max_occupancy_score
self.decay_rate = decay_rate
self.grow_rate = grow_rate
self.update_interval = update_interval
# Static map: {voxel: confidence}
self.static_map: Dict[Tuple[int, int], int] = defaultdict(int)
self.static_map: Dict[Tuple[int, int, int], int] = defaultdict(int)
# Counter for update interval
self.scan_counter = 0
self.cache_load_if_exists()
self.initialised = False
def cache_save(self):
data = AdaptiveVoxelFilterCache(self.scan_counter, self.static_map, self.initialised)
with self.cache_fname().open('wb') as fp:
pickle.dump(data, fp)
logger.info(f"Wrote voxel filter to cache {self.cache_fname()}")
def cache_load_if_exists(self):
# data = AdaptiveVoxelFilterCache(self.scan_counter, self.static_map)
if self.cache_fname().exists():
logger.info(f"Load voxel filter from cache {self.cache_fname()}")
with self.cache_fname().open('rb') as fp:
data = pickle.load(fp)
assert type(data) == AdaptiveVoxelFilterCache
self.static_map = data.static_map
self.scan_counter = data.scan_counter
self.initialised = data.initialised
def cache_fname(self):
props = f"{self.voxel_size}-{self.nr_of_frames_to_static}-{self.nr_of_frames_to_unstatic}"
return Path(f"/tmp/voxel_filter-{props}.pcl")
def remove_static(self, current_pcd):
@ -1086,14 +1130,16 @@ class AdaptiveVoxelFilter:
# Update static map and confidence
self.scan_counter += 1
if self.scan_counter % self.update_interval == 0:
if not self.initialised and self.scan_counter > self.confidence_threshold:
if not self.initialised or self.scan_counter % self.update_interval == 0:
if not self.initialised and self.scan_counter > self.nr_of_frames_to_init:
self.initialised = True
logger.info('Initialised static filter')
self.cache_save()
# Add new voxels to static map
for voxel in current_voxel_set:
if self.static_map[voxel] < self.max_occupancy_score:
self.static_map[voxel] += 1
self.static_map[voxel] += self.grow_rate
# Decay confidence for voxels not in current scan
for voxel in list(self.static_map.keys()):

View file

@ -22,7 +22,7 @@ import svgpathtools
from noise import snoise2
from trap import renderable_pb2
from trap.utils import exponentialDecay, exponentialDecayRounded, inv_lerp, relativePointToPolar, relativePolarToPoint
from trap.utils import easeInOutQuad, exponentialDecay, exponentialDecayRounded, inv_lerp, lerp, relativePointToPolar, relativePolarToPoint
"""
@ -57,6 +57,9 @@ class SrgbaColor():
def __eq__(self, other):
return math.isclose(self.red, other.red) and math.isclose(self.green, other.green) and math.isclose(self.blue, other.blue) and math.isclose(self.alpha, other.alpha)
def as_array(self):
return [self.red, self.green, self.blue, self.alpha]
@dataclass
@ -311,24 +314,43 @@ class AppendableLine(LineGenerator):
# create origin
self.drawn_points.append(self.target_points[0])
# and drawing head
self.drawn_points.append(self.target_points[0])
# self.drawn_points.append(self.target_points[0])
target_l = shapely.geometry.LineString(self.target_points).length
current_l = shapely.geometry.LineString(self.drawn_points).length if len(self.drawn_points) > 1 else 0
req_l = exponentialDecayRounded(self.current_l, target_l, self.draw_decay_speed, dt, .05)
if np.isclose(req_l, target_l):
self.drawn_points = self.target_points
else:
distance_to_do = req_l - current_l
idx = len(self.drawn_points) - 1
while distance_to_do:
target = self.target_points[idx]
this_distance = np.array(self.drawn_points[-1] ) - np.array(target)
if this_distance > distance_to_do:
break
distance_to_do - this_distance
idx+=1
self.drawn_points.append(target)
idx = len(self.drawn_points) - 1
target = self.target_points[idx]
if np.isclose(self.drawn_points[-1], target, atol=.05).all():
# TODO: might want to migrate to np.isclose()
if len(self.drawn_points) == len(self.target_points):
self.ready = True
return # done until a new point is added
# add new point as drawing head
self.drawn_points.append(self.drawn_points[-1])
self.ready = False
x = exponentialDecayRounded(self.drawn_points[-1][0], target[0], self.draw_decay_speed, dt, .05)
y = exponentialDecayRounded(self.drawn_points[-1][1], target[1], self.draw_decay_speed, dt, .05)
self.drawn_points[-1] = (float(x), float(y))
if np.isclose(self.drawn_points[-1], self.target_points[idx], atol=.05).all():
# TODO: might want to migrate to np.isclose()
if len(self.drawn_points) == len(self.target_points):
self.ready = True
return # done until a new point is added
# add new point as drawing head
self.drawn_points.append(self.drawn_points[-1])
self.ready = False
x = exponentialDecayRounded(self.drawn_points[-1][0], target[0], self.draw_decay_speed, dt, .05)
y = exponentialDecayRounded(self.drawn_points[-1][1], target[1], self.draw_decay_speed, dt, .05)
self.drawn_points[-1] = (float(x), float(y))
class ProceduralChain(LineGenerator):
"""A line that can be 'dragged' to a target. In which
@ -640,13 +662,21 @@ class CropLine(LineAnimator):
Crop the line at a max nr of points (thus not actual lenght!)
Keeps the tail, removes the start
"""
def __init__(self, target_line = None, max_points = 200):
def __init__(self, target_line = None, max_points: Optional[int] = None, start_offset: Optional[int] = None):
super().__init__(target_line)
self.start_offset = start_offset
self.max_points = max_points
self.ready = True # static filter, always ready
def apply(self, target_line: RenderableLine, dt: DeltaT) -> RenderableLine:
target_line.points = target_line.points[-1 * self.max_points:]
if self.start_offset:
if len(target_line) <= self.start_offset:
return RenderableLine([])
target_line.points = target_line.points[self.start_offset:]
if self.max_points:
target_line.points = target_line.points[-1 * self.max_points:]
return target_line
@ -834,7 +864,7 @@ class NoiseLine(LineAnimator):
# noise_y = noise([x * frequency, (y + dt) * frequency]) * amplitude * normal_y
noise = snoise2(i * frequency, t % 1000, octaves=4)
use_amp = amplitude
use_amp = amplitude[i] if hasattr(amplitude, "__getitem__") else amplitude
if fade_over_n_points > 0:
rev_step = len(drawable_points) - i
amp_factor = rev_step / fade_over_n_points
@ -905,6 +935,19 @@ class SegmentLine(LineAnimator):
return (ls.length * t, ls.length)
else:
return (0, ls.length * t)
@classmethod
def anim_follow_in_front(cls, t: float, ls: shapely.geometry.LineString):
keypoints = [
(0.0, 0, 0), #
(0.07, 1.0, 2.0), #
(0.3, 1.5, 2.0), # At t=0.7, value=0.5
(0.6, 1, 1.5), # At t=0.7, value=0.5
(0.7, 1.2, 1.5), # At t=0.7, value=0.5
(0.7, 1, 1.8), # At t=0.7, value=0.5
(1.0, .5, ls.length), # At t=1, value=0
]
return KeyframeAnimator(keypoints, easeInOutQuad).get_value(t)
def apply(self, target_line: RenderableLine, dt: DeltaT):
@ -925,6 +968,57 @@ class SegmentLine(LineAnimator):
return RenderableLine.from_linestring(line, target_line.points[0].color)
class KeyframeAnimator:
def __init__(
self,
keypoints: List[Tuple[float, float, float]],
easing_func: Callable[[float], float] = None,
):
"""
Initialize the animator with keypoints and an optional easing function.
Args:
keypoints: List of (t_value, value) tuples, where t_value is in [0, 1].
easing_func: Optional function to apply easing to `t` (e.g., easeInOutQuad).
"""
self.keypoints = sorted(keypoints, key=lambda x: x[0])
self.easing_func = easing_func
def get_value(self, t: float) -> float:
"""
Get the interpolated value at time `t` (0 <= t <= 1).
Args:
t: Normalized time (0 to 1).
Returns:
Interpolated value at time `t`.
"""
if self.easing_func:
t = self.easing_func(t)
# Handle edge cases
if t <= self.keypoints[0][0]:
return self.keypoints[0][1], self.keypoints[0][2]
if t >= self.keypoints[-1][0]:
return self.keypoints[-1][1], self.keypoints[-1][2]
# Find the two keypoints surrounding `t`
for i in range(len(self.keypoints) - 1):
t0, val0, valb0 = self.keypoints[i]
t1, val1, valb1 = self.keypoints[i + 1]
if t0 <= t <= t1:
# Normalize `t` between t0 and t1
local_t = inv_lerp(t0, t1, t)
# Interpolate between val0 and val1
return lerp(val0, val1, local_t), lerp(valb0, valb1, local_t)
return self.keypoints[-1][1], self.keypoints[-1][2] # fallback
class DashedLine(LineAnimator):
@ -980,9 +1074,12 @@ class DashedLine(LineAnimator):
if len(target_line) < 2:
self.ready = True
return target_line
gap_len = gap_len if not callable(self.gap_len) else self.gap_len(dt, self.running_for())
dash_len = dash_len if not callable(self.dash_len) else self.dash_len(dt, self.running_for())
ls = target_line.as_linestring()
multilinestring = self.dashed_line(ls, self.dash_len, self.gap_len, self.t_factor * self.running_for(), self.loop_offset)
multilinestring = self.dashed_line(ls, dash_len, gap_len, self.t_factor * self.running_for(), self.loop_offset)
# when looping, it is always ready, otherwise, only if totally gone
@ -1019,17 +1116,54 @@ class StartFromClosestPoint(LineAnimator):
distance = math.inf
idx = 0
from_point = np.array(self.from_point)
# print(from_point)
for i, point in enumerate(target_line.points):
if i < 2:
continue # skip the first to avoid jitter
to_point = np.array(point.position)
new_distance = np.linalg.norm(from_point-to_point)
if new_distance < distance:
distance = new_distance
idx = i
idx = i+1
else:
break
# print(idx, target_line.points[idx])
return RenderableLine(target_line.points[idx:])
if idx >= len(target_line.points):
logger.warning("Empty line")
return RenderableLine([])
points = []
if idx > 0:
p = target_line.points[idx]
p.position = self.from_point
points.append(p)
points.extend(target_line.points[idx:])
# print(from_point, idx, [p.position for p in target_line.points[max(0,idx-5):idx+5]])
return RenderableLine(points)
@classmethod
def find_closest_to_point(cls, point: Tuple[Coordinate], points: List[Coordinate]):
from_point = np.array(point)
# idx = 0
# distance = math.inf
if len(points) == 0:
return None
# print(points)
# print(points- from_point)
# print(np.array(points) - from_point)
idx = np.argmin(np.linalg.norm(np.array(points) - from_point, axis=1))
# for i, to_point in enumerate(points):
# to_point = np.array(to_point)
# new_distance = np.linalg.norm(from_point-to_point)
# if new_distance < distance:
# distance = new_distance
# idx = i+1
# else:
# break
return idx
@ -1053,11 +1187,19 @@ class RotatingLine(LineAnimator):
self.ready = True
return target_line
origin = target_line.points[0]
if len(self.drawn_points) < 1:
self.drawn_points = target_line.points
# find closest point to start from:
origin = target_line.points[0]
# closest_idx = StartFromClosestPoint.find_closest_to_point(origin.position, [p.position for p in self.drawn_points])
# if closest_idx:
# print('cut at', closest_idx)
# self.drawn_points = self.drawn_points[closest_idx:] # hard cutof
diff_length = len(target_line) - len(self.drawn_points)
if diff_length < 0: # drawn points is larger
self.drawn_points = self.drawn_points[:len(target_line)]
@ -1095,56 +1237,218 @@ class RotatingLine(LineAnimator):
is_ready.append(np.isclose(drawn_point.position, target_point.position, atol=.05).all())
self.ready = all(is_ready)
return RenderableLine(self.drawn_points)
from scipy.ndimage import gaussian_filter1d
from scipy.signal import find_peaks
class NoodleWiggler(LineAnimator):
"""When a line is 'noodling' don't draw it as a whole, but sway it animated
Work in progress
"""
def __init__(self, target_line = None):
super().__init__(target_line)
self.range = 10
self.threshold = 1.8
self.smoothing_sigma = 2
self.t = 0
self.t_factor = 4
def apply(self, target_line, dt):
return target_line
if len(target_line) < 2:
return target_line
self.t += dt
scores = []
distances = [np.linalg.norm(np.array(a.position)- np.array(b.position)) for a,b in zip(target_line.points, target_line.points[1:])]
# 1) find points with high distance traveled relative to displacement. This is noodling
for i, point in enumerate(target_line.points):
if i < self.range:
scores.append(0)
if len(target_line.points) < i + self.range:
scores.append(0)
a = target_line.points[i-self.range]
b = target_line.points[i+self.range]
net_distance = np.linalg.norm(np.array(a.position)- np.array(b.position))
gross_distance = sum(distances[i-self.range:i-self.range-1])
self.scores.append(max(0, gross_distance/net_distance - self.threshold))
# 2) smooth the curve
smoothed_scores = gaussian_filter1d(scores, sigma=self.smoothing_sigma)
# 3) find the peaks ; most intense noodling points
peak_idxs, _ = find_peaks(smoothed_scores, height=0.5)
# 4) for peaks, find start-peak-end indexes
segments = self.connect_peaks_to_segments(smoothed_scores, peak_idxs)
points, scores = self.replace_noodling_segments(scores, target_line, segments, 4)
# 5) apply noise according to score
new_points = NoiseLine.apply_perlin_noise_to_line_normal(
drawable_points=[p.position for p in points],
amplitude=scores,
t = self.t * self.t_factor,
frequency= 2
)
# 6) rebuild as line
points = []
for point, pos in zip(target_line.points, new_positions):
p = copy.deepcopy(point)
p.position = pos
points.append(p)
return RenderableLine(points)
@classmethod
def connect_peaks_to_segments(cls, scores, peaks, threshold=0.3, min_segment_length=1) -> List [Tuple[int, int, int]]:
"""
returns a list of tuples: (start_idx, peak_idx, end_idx)
"""
segments = []
i = 0
n = len(scores)
while i < len(peaks):
peak = peaks[i]
start = peak
end = peak
# Expand left
while start > 0 and scores[start - 1] > threshold:
start -= 1
# Expand right
while end < n - 1 and scores[end + 1] > threshold:
end += 1
# Merge with next peak if close
if i < len(peaks) - 1 and peaks[i + 1] - end < min_segment_length:
i += 1
continue
segments.append((start, peak, end))
i += 1
return segments
@classmethod
def detect_noodling_sections(cls, line, segment_length=0.1, ratio_threshold=2.0):
def replace_noodling_segments(cls, scores: List[float], target_line: RenderableLine, segments, num_interpolated_points = 4):
"""
Detects noodling sections in a LineString using the ratio of actual length to straight-line distance.
Replace noodling segments in target_line with a fixed number of interpolated points,
while preserving all non-noodling sections.
Args:
line (LineString): Input LineString.
segment_length (float): Length of each segment as a fraction of the total line length.
ratio_threshold (float): Threshold for the ratio (actual_length / straight_line_distance).
target_line: Object with a `points` attribute (list of point objects).
segments: List of tuples (start_index, end_index) for noodling segments.
num_interpolated_points: Number of intermediate points to insert between start and end.
Returns:
list: List of noodling segments (as LineStrings).
A new list of points with noodling segments replaced.
"""
noodling_sections = []
total_length = line.length
segment_start = 0.0
new_points = []
new_scores = []
i = 0
n = len(target_line.points)
while segment_start < 1.0:
segment_end = min(segment_start + segment_length, 1.0)
segment = substring(line, segment_start, segment_end, normalized=True)
for start, peak, end in segments:
# Add all points up to the start of the noodling segment
while i < start:
new_points.append(target_line.points[i])
new_scores.append(scores[i])
i += 1
# Calculate straight-line distance between start and end points
start_point = Point(segment.coords[0])
end_point = Point(segment.coords[-1])
straight_line_distance = start_point.distance(end_point)
# Skip the noodling segment and add interpolated points
start_point = target_line.points[start]
peak_point = target_line.points[peak]
end_point = target_line.points[end]
# Calculate actual length of the segment
actual_length = segment.length
start_score = scores[start]
peak_score = scores[peak]
end_score = scores[end]
# Check if the ratio exceeds the threshold
if straight_line_distance > 0 and (actual_length / straight_line_distance) > ratio_threshold:
noodling_sections.append(segment)
# Interpolate between start and peak
for j in range(num_interpolated_points + 2): # +2 to include start and peak
t = inv_lerp(0, num_interpolated_points + 1, j)
new_x = lerp(start_point.position[0], peak_point.position[0], t)
new_y = lerp(start_point.position[1], peak_point.position[1], t)
new_score = lerp(start_score, peak_score, t)
new_point = RenderablePoint(position=(new_x, new_y), color=start_point.color)
new_points.append(new_point)
new_scores.append(new_score)
segment_start = segment_end
# Interpolate between peak and end (skip peak to avoid duplication)
for j in range(1, num_interpolated_points + 2): # +2 to include peak and end
t = inv_lerp(0, num_interpolated_points + 1, j)
new_x = lerp(peak_point.position[0], end_point.position[0], t)
new_y = lerp(peak_point.position[1], end_point.position[1], t)
new_score = lerp(peak_score, end_score, t)
new_point = RenderablePoint(position=(new_x, new_y), color=start_point.color)
new_points.append(new_point)
new_scores.append(new_score)
return noodling_sections
i = end + 1 # Skip to the end of the noodling segment
# Add remaining points after the last noodling segment
while i < n:
new_points.append(target_line.points[i])
new_scores.append(scores[i])
i += 1
return new_points, scores
# @classmethod
# def detect_noodling_sections(cls, line, segment_length=0.1, ratio_threshold=2.0):
# """
# Detects noodling sections in a LineString using the ratio of actual length to straight-line distance.
# Args:
# line (LineString): Input LineString.
# segment_length (float): Length of each segment as a fraction of the total line length.
# ratio_threshold (float): Threshold for the ratio (actual_length / straight_line_distance).
# Returns:
# list: List of noodling segments (as LineStrings).
# """
# noodling_sections = []
# total_length = line.length
# segment_start = 0.0
# while segment_start < 1.0:
# segment_end = min(segment_start + segment_length, 1.0)
# segment = substring(line, segment_start, segment_end, normalized=True)
# # Calculate straight-line distance between start and end points
# start_point = Point(segment.coords[0])
# end_point = Point(segment.coords[-1])
# straight_line_distance = start_point.distance(end_point)
# # Calculate actual length of the segment
# actual_length = segment.length
# # Check if the ratio exceeds the threshold
# if straight_line_distance > 0 and (actual_length / straight_line_distance) > ratio_threshold:
# noodling_sections.append(segment)
# segment_start = segment_end
# return noodling_sections
# # Example usage:
# line = LineString([(0, 0), (0.1, 0.1), (0.2, 0.2), (0.3, 0.1), (0.4, 0.2), (1, 1)])

View file

@ -449,7 +449,9 @@ class PredictionServer(Node):
# print('preds', len(predictions[0][0]))
if not len(history) or np.isnan(history[-1]).any():
logger.warning('skip for no history')
logger.warning(f'skip for no history @ {ts_key} [{len(prediction_dict)=}, {len(histories_dict)=}, {len(futures_dict)=}]')
logger.info(f"{preds=}")
continue
# response[node.id] = {

View file

@ -148,6 +148,18 @@ def process_data(src_dir: Path, dst_dir: Path, name: str, smooth_tracks: bool, n
tracks = filter.apply(tracks, camera)
print(f"Filtered: {len(tracks)} tracks")
skip_idxs = []
for idx, track in enumerate(tracks):
track_history = track.get_projected_history(camera=camera)
distances = np.sqrt(np.sum(np.diff(track_history, axis=0)**2, axis=1))
# print(trajectory_org)
# print(distances)
if any(distances > 3):
skip_idxs.append(idx)
for idx in skip_idxs:
tracks.pop(idx)
print(f"Filtered {len(skip_idxs)} tracks which contained leaps")
total = len(tracks)
bar = tqdm.tqdm(total=total)

View file

@ -30,7 +30,7 @@ class Settings(Node):
self.load()
dpg.create_context()
dpg.create_viewport(title='Trap settings', width=600, height=200)
dpg.create_viewport(title='Trap settings', width=600, height=1200)
dpg.setup_dearpygui()
@ -38,10 +38,11 @@ class Settings(Node):
dpg.add_text(f"Settings from {self.config.settings_file}")
dpg.add_button(label="Save", callback=self.save)
with dpg.window(label="Stage", pos=(0, 150)):
self.register_setting(f'stage.fps', dpg.add_checkbox(label="FPS cap", default_value=self.get_setting(f'stage.fps', 30), callback=self.on_change))
with dpg.window(label="Stage", pos=(150, 0)):
self.register_setting(f'stage.fps', dpg.add_slider_int(label="FPS cap", default_value=self.get_setting(f'stage.fps', 30), callback=self.on_change))
self.register_setting(f'stage.prediction_interval', dpg.add_slider_int(label="prediction interval", default_value=self.get_setting('stage.prediction_interval', 18), callback=self.on_change))
with dpg.window(label="Lidar", pos=(0, 250)):
with dpg.window(label="Lidar", pos=(0, 100), autosize=True):
self.register_setting(f'lidar.crop_map_boundaries', dpg.add_checkbox(label="crop_map_boundaries", default_value=self.get_setting(f'lidar.crop_map_boundaries', True), callback=self.on_change))
self.register_setting(f'lidar.viz_cropping', dpg.add_checkbox(label="viz_cropping", default_value=self.get_setting(f'lidar.viz_cropping', True), callback=self.on_change))
self.register_setting(f'lidar.tracking_enabled', dpg.add_checkbox(label="tracking_enabled", default_value=self.get_setting(f'lidar.tracking_enabled', True), callback=self.on_change))
@ -60,9 +61,9 @@ class Settings(Node):
dpg.add_separator(label="Cluster filter")
self.register_setting(f'lidar.min_box_area', dpg.add_slider_float(label="min_box_area", default_value=self.get_setting(f'lidar.min_box_area', .1), min_value=0, max_value=1, callback=self.on_change))
for lidar in ["192.168.0.16", "192.168.0.10"]:
for i, lidar in enumerate(["192.168.0.16", "192.168.0.10"]):
name = lidar.replace(".", "_")
with dpg.window(label=f"Lidar {lidar}", pos=(200, 0),autosize=True):
with dpg.window(label=f"Lidar {lidar}", pos=(i * 300, 450),autosize=True):
# dpg.add_text("test")
# dpg.add_input_text(label="string", default_value="Quick brown fox")
self.register_setting(f'lidar.{name}.enabled', dpg.add_checkbox(label="enabled", default_value=self.get_setting(f'lidar.{name}.enabled', True), callback=self.on_change))

View file

@ -25,6 +25,7 @@ from trap.counter import CounterSender
from trap.lines import AppendableLine, AppendableLineAnimator, Coordinate, CoordinateSpace, CropAnimationLine, CropLine, DashedLine, DeltaT, FadeOutJitterLine, FadeOutLine, FadedEndsLine, FadedTailLine, LineAnimationStack, LineAnimator, NoiseLine, RenderableLayers, RenderableLine, RenderableLines, RotatingLine, SegmentLine, SimplifyLine, SimplifyMethod, SrgbaColor, StartFromClosestPoint, StaticLine, layers_to_message, load_lines_from_svg
from trap.node import Node
from trap.track_history import TrackHistory
from trap.utils import lerp
logger = logging.getLogger('trap.stage')
@ -39,13 +40,13 @@ TRACK_ASSUMED_FPS = 12
LOST_FADEOUT = 2 # seconds
PREDICTION_INTERVAL: float|None = 20 # frames
PREDICTION_INTERVAL: int|None = int(TRACK_ASSUMED_FPS * 1.2) # frames
PREDICTION_FADE_IN: float = 3
PREDICTION_FADE_SLOPE: float = -10
PREDICTION_FADE_AFTER_DURATION: float = 8 # seconds
PREDICTION_END_FADE = 2 #frames
# TRACK_MAX_POINTS = 100
TRACK_FADE_AFTER_DURATION = 15. # seconds
TRACK_FADE_AFTER_DURATION = 9. # seconds
TRACK_END_FADE = 30 # points
TRACK_FADE_ASSUME_FPS = TRACK_ASSUMED_FPS
@ -70,15 +71,16 @@ class SceneInfo:
priority: int
description: str = ""
takeover_possible: bool = False # whether to allow for other scenarios to steal the stage
takeover_possible_after: float = -1
class ScenarioScene(Enum):
DETECTED = SceneInfo(4, "First detection")
SUBSTANTIAL = SceneInfo(6, "Multiple detections")
FIRST_PREDICTION = SceneInfo(10, "Prediction is ready")
CORRECTED_PREDICTION = SceneInfo(11, "Multiple predictions")
LOITERING = SceneInfo(7, "Foundto be loitering", takeover_possible=True) # TODO: create "possible after"
PLAY = SceneInfo(7, description="After many predictions; just fooling around", takeover_possible=True)
LOST = SceneInfo(-1, description="Track lost", takeover_possible=True)
LOITERING = SceneInfo(7, "Foundto be loitering", takeover_possible=True, takeover_possible_after=10) # TODO: create "possible after"
PLAY = SceneInfo(7, description="After many predictions; just fooling around", takeover_possible=True, takeover_possible_after=10)
LOST = SceneInfo(-1, description="Track lost", takeover_possible=True, takeover_possible_after=0)
Time = float
@ -131,8 +133,9 @@ class PrioritySlotItem():
class Scenario(PrioritySlotItem):
def __init__(self, track_id):
def __init__(self, track_id, stage: Stage):
super().__init__(track_id)
self.stage = stage
self.track_id = track_id
self.scene: ScenarioScene = ScenarioScene.DETECTED
@ -161,7 +164,10 @@ class Scenario(PrioritySlotItem):
return self.scene.value.priority
def can_be_taken_over(self):
return self.scene.value.takeover_possible
if self.scene.value.takeover_possible:
if time.perf_counter() - self.state_change_at > self.scene.value.takeover_possible_after:
return True
return False
def track_age(self):
if not self.track:
@ -291,8 +297,8 @@ class Scenario(PrioritySlotItem):
# in case of the unlikely event that prediction was received sooner
self.recv_track(track)
if PREDICTION_INTERVAL is not None and len(self.prediction_tracks) and (track.frame_index - self.prediction_tracks[-1].frame_index) < PREDICTION_INTERVAL:
interval = self.stage.get_setting('stage.prediction_interval', PREDICTION_INTERVAL)
if interval is not None and len(self.prediction_tracks) and (track.frame_index - self.prediction_tracks[-1].frame_index) < interval:
# just drop tracks if the predictions come to quick
return
@ -317,7 +323,7 @@ def build_line_others():
line_others = LineAnimationStack(StaticLine([], others_color))
# line_others.add(SegmentLine(line_others.tail, duration=3, anim_f=partial(SegmentLine.anim_grow, in_and_out=True, max_len=5)))
line_others.add(SimplifyLine(line_others.tail, 0.001)) # Simplify before effects, so they don't distort
line_others.add(CropAnimationLine(line_others.tail, lambda dt: 10 + math.sin(dt/4) * 70, assume_fps=TRACK_ASSUMED_FPS*2)) # speed up
line_others.add(CropAnimationLine(line_others.tail, 70, assume_fps=TRACK_ASSUMED_FPS*2)) # speed up
line_others.add(NoiseLine(line_others.tail, amplitude=0, t_factor=.3))
# line_others.add(DashedLine(line_others.tail, t_factor=4, loop_offset=True))
# line_others.get(DashedLine).skip = True
@ -336,8 +342,8 @@ class DrawnScenario(Scenario):
MAX_HISTORY = 130 # points of history of trajectory to display (preventing too long lines)
CUT_GAP = 5 # when adding a new prediction, keep the existing prediction until that point + this CUT_GAP margin
def __init__(self, track_id):
super().__init__(track_id)
def __init__(self, track_id, stage: Stage):
super().__init__(track_id, stage)
self.last_update_t = time.perf_counter()
self.active_ptrack: Optional[ProjectedTrack] = None
@ -353,14 +359,25 @@ class DrawnScenario(Scenario):
self.prediction_color = SrgbaColor(0,1,0,1)
self.line_prediction = LineAnimationStack(StaticLine([], self.prediction_color))
self.line_prediction.add(CropLine(self.line_prediction.tail, start_offset=0))
self.line_prediction.add(StartFromClosestPoint(self.line_prediction.tail))
self.line_prediction.get(StartFromClosestPoint).skip=True
self.line_prediction.add(RotatingLine(self.line_prediction.tail, decay_speed=16))
self.line_prediction.get(RotatingLine).skip = False
self.line_prediction.add(SegmentLine(self.line_prediction.tail, duration=.5))
self.line_prediction.get(SegmentLine).skip = True
self.line_prediction.add(SegmentLine(self.line_prediction.tail, duration=7, anim_f=SegmentLine.anim_follow_in_front))
self.line_prediction.get(SegmentLine).skip = False
self.line_prediction.add(SimplifyLine(self.line_prediction.tail, 0.003)) # Simplify before effects, so they don't distort
self.line_prediction.add(DashedLine(self.line_prediction.tail, dash_len=.6, gap_len=0.9, t_factor=2, loop_offset=True))
self.line_prediction.get(DashedLine).skip = False
self.line_prediction.add(StartFromClosestPoint(self.line_prediction.tail))
GAP_DURATION = 5
def dash_len(dt, t):
t=min(1, t/GAP_DURATION)
return lerp(.99, .6, t)
def gap_len(dt, t):
t=min(1, t/GAP_DURATION)
return lerp(0.01, .9, t)
self.line_prediction.add(DashedLine(self.line_prediction.tail, dash_len=dash_len, gap_len=gap_len, t_factor=2, loop_offset=True))
self.line_prediction.get(DashedLine).skip = True
self.line_prediction.add(FadeOutLine(self.line_prediction.tail))
# when rendering tracks from others similar/close to the current one
@ -372,18 +389,24 @@ class DrawnScenario(Scenario):
# self.line_prediction_drawn = self.line_prediction_faded
def update(self, stage: Stage):
def update(self):
super().update()
if self.track:
self.line_history.root.points = self.track.projected_history
lf = self.lost_factor()
self.line_history.get(FadeOutJitterLine).set_alpha(1-lf)
self.line_prediction.get(FadeOutLine).set_alpha(1-lf)
self.line_history.get(NoiseLine).amplitude = lf * 1.8
if len(self.prediction_tracks):
now_p = np.array(self.line_history.root.points[-1])
prev_p = np.array(self.line_history.root.points[-1 * min(4, len(self.line_history.root.points))])
diff = now_p - prev_p
self.line_prediction.get(StartFromClosestPoint).set_point(now_p + diff)
# now_p = np.array(self.line_history.root.points[-1])
# prev_p = np.array(self.line_history.root.points[-1 * min(4, len(self.line_history.root.points))])
# diff = now_p - prev_p
self.line_prediction.get(StartFromClosestPoint).set_point(self.line_history.root.points[-1])
# print("set origin", self.line_history.root.points[-1])
# TODO: only when animation is ready for it? or collect lines
if not self.active_ptrack:
# draw the first prediction
@ -396,8 +419,11 @@ class DrawnScenario(Scenario):
# stale prediction
# switch only if drawing animation is ready
# if self.line_prediction.is_ready():
self.active_ptrack = self.prediction_tracks[-1]
self.line_prediction.root.points = self.active_ptrack._track.predictions[0]
self.active_ptrack = self.prediction_tracks[-1]
self.line_prediction.root.points = self.active_ptrack._track.predictions[0]
if self.line_prediction.is_ready() and self.line_prediction.get(DashedLine).skip == True:
self.line_prediction.get(SegmentLine).skip = True
self.line_prediction.get(DashedLine).skip = False
self.line_prediction.start() # reset positions
@ -421,7 +447,10 @@ class DrawnScenario(Scenario):
# self.line_prediction.get(SegmentLine).anim_f = partial(SegmentLine.anim_grow, reverse=True)
# self.line_prediction.get(SegmentLine).duration = 2
# self.line_prediction.get(SegmentLine).start()
if self.active_ptrack:
# TODO: this should crop by distance/lenght
self.line_prediction.get(CropLine).start_offset = self.track._track.frame_index - self.active_ptrack._track.frame_index
@ -430,9 +459,10 @@ class DrawnScenario(Scenario):
# special case: LOITERING
if self.scene is ScenarioScene.LOITERING: # or self.state_change_at:
if False and self.scene is ScenarioScene.LOITERING: # or self.state_change_at:
# logger.info('loitering')
transition = min(1, (time.perf_counter() - self.state_change_at)/1.4)
# print('loitering factor', transition)
# TODO: transition fade, using to_alpha(), so it can fade back in again:
@ -450,21 +480,24 @@ class DrawnScenario(Scenario):
self.tracks_to_self_fetched_at = time.perf_counter()
# fetch lines nearby
track_ids = stage.history.get_nearest_tracks(current_position, 30)
track_ids = self.stage.history.get_nearest_tracks(current_position, 30)
self.track_ids_to_self = iter(track_ids)
self.tracks_to_self = stage.history.ids_as_trajectory(track_ids)
self.tracks_to_self = self.stage.history.ids_as_trajectory(track_ids)
stage.logger.info(f"Fetched similar tracks for {self.track_id}. (Took {time.perf_counter() - self.tracks_to_self_fetched_at}s)")
self.stage.logger.info(f"Fetched similar tracks for {self.track_id}. (Took {time.perf_counter() - self.tracks_to_self_fetched_at}s)")
# if self.tracks_to_self and not len(self.line_others.root.points):
if self.tracks_to_self and not self.line_others.is_running():
try:
current_history = next(self.tracks_to_self)
current_history_id = next(self.track_ids_to_self)
self.line_others.get(CropAnimationLine).assume_fps += TRACK_ASSUMED_FPS*1.5 # faster each time
self.line_others.get(CropAnimationLine).assume_fps = min(
self.line_others.get(CropAnimationLine).assume_fps + TRACK_ASSUMED_FPS*1.5 , # faster each time
TRACK_ASSUMED_FPS * 6 # capped at 6x
)
self.line_others.get(NoiseLine).amplitude = .05
logger.info(f"play history item: {current_history_id}")
self.line_others.get(FadeOutLine).set_alpha(1)
@ -496,9 +529,7 @@ class DrawnScenario(Scenario):
# 1) history, fade out when lost
# self.line_history.get(StaticLine).color = SrgbaColor(1, 0, 1-self.anomaly_factor(), 1)
self.line_history.get(FadeOutJitterLine).set_alpha(1-self.lost_factor())
self.line_prediction.get(FadeOutLine).set_alpha(1-self.lost_factor())
self.line_history.get(NoiseLine).amplitude = self.lost_factor() * 1.5
# fade out history after max duration, given in frames
track_age_in_frames = self.track_age() * TRACK_ASSUMED_FPS
@ -506,8 +537,6 @@ class DrawnScenario(Scenario):
t2 = time.perf_counter()
# 2) also fade-out when moving into loitering mode.
# when fading out is done, start drawing historical data
history_line = self.line_history.as_renderable_line(dt)
t3 = time.perf_counter()
@ -576,7 +605,7 @@ class DatasetDrawer():
line_color = SrgbaColor(0,1,1,1)
self.track_line = LineAnimationStack(StaticLine([], line_color))
self.track_line.add(SimplifyLine(self.track_line.tail, 0.003)) # Simplify before cropping, to get less noodling
self.track_line.add(SimplifyLine(self.track_line.tail, 0.004)) # Simplify before cropping, to get less noodling
self.track_line.add(CropAnimationLine(self.track_line.tail, 50, assume_fps=TRACK_ASSUMED_FPS*20)) # speed up
# self.track_line.add(DashedLine(self.track_line.tail, t_factor=4, loop_offset=True))
@ -614,7 +643,7 @@ class Stage(Node):
self.active_scenarios: List[DrawnScenario] = [] # List of currently running Scenario instances
self.scenarios: Dict[str, DrawnScenario] = DefaultDictKeyed(lambda key: DrawnScenario(key))
self.scenarios: Dict[str, DrawnScenario] = DefaultDictKeyed(lambda key: DrawnScenario(key, self))
self.frame_noimg_sock = self.sub(self.config.zmq_frame_noimg_addr)
self.trajectory_sock = self.sub(self.config.zmq_trajectory_addr)
self.prediction_sock = self.sub(self.config.zmq_prediction_addr)
@ -676,7 +705,7 @@ class Stage(Node):
"""Update active scenarios and handle pauses/completions."""
# 1) process timestep for all scenarios
for s in self.scenarios.values():
s.update(self)
s.update()
# 2) Remove stale tracks and take-overs

View file

@ -30,6 +30,12 @@ def inv_lerp(a: float, b: float, v: float) -> float:
"""
return (v - a) / (b - a)
def easeInOutQuad(t: float) -> float:
"""Quadratic easing in/out - smoothing the transition."""
if t < 0.5:
return 2 * t * t
else:
return 1 - np.power(-2 * t + 2, 2) / 2