preliminary Multi lidar support
This commit is contained in:
parent
c6747ac8e6
commit
acdb869430
9 changed files with 360 additions and 49 deletions
|
|
@ -42,6 +42,7 @@ dependencies = [
|
||||||
"nptyping>=2.5.0",
|
"nptyping>=2.5.0",
|
||||||
"py-to-proto>=0.6.0",
|
"py-to-proto>=0.6.0",
|
||||||
"grpcio-tools>=1.76.0",
|
"grpcio-tools>=1.76.0",
|
||||||
|
"dearpygui>=2.1.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[project.scripts]
|
[project.scripts]
|
||||||
|
|
@ -64,6 +65,7 @@ trap_prediction = "trap.prediction_server:PredictionServer.parse_and_start"
|
||||||
trap_render_cv = "trap.cv_renderer:CvRenderer.parse_and_start"
|
trap_render_cv = "trap.cv_renderer:CvRenderer.parse_and_start"
|
||||||
trap_monitor = "trap.monitor:Monitor.parse_and_start" # migrate timer
|
trap_monitor = "trap.monitor:Monitor.parse_and_start" # migrate timer
|
||||||
trap_laser_calibration = "trap.laser_calibration:LaserCalibration.parse_and_start" # migrate timer
|
trap_laser_calibration = "trap.laser_calibration:LaserCalibration.parse_and_start" # migrate timer
|
||||||
|
trap_settings = "trap.settings:Settings.parse_and_start" # migrate timer
|
||||||
|
|
||||||
[tool.uv]
|
[tool.uv]
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -23,8 +23,8 @@ directory=%(here)s
|
||||||
autostart=false
|
autostart=false
|
||||||
|
|
||||||
[program:video]
|
[program:video]
|
||||||
command=uv run trap_video_source --homography ../DATASETS/hof3/homography.json --video-src ../DATASETS/hof3/hof3-cam-demo-twoperson.mp4 --calibration ../DATASETS/hof3/calibration.json --video-loop
|
# command=uv run trap_video_source --homography ../DATASETS/hof3/homography.json --video-src ../DATASETS/hof3/hof3-cam-demo-twoperson.mp4 --calibration ../DATASETS/hof3/calibration.json --video-loop
|
||||||
# command=uv run trap_video_source --homography ../DATASETS/hof3-cam-baumer-cropped/homography.json --video-src gige://../DATASETS/hof3-cam-baumer-cropped/gige_config.json --calibration ../DATASETS/hof3-cam-baumer-cropped/calibration.json
|
command=uv run trap_video_source --homography ../DATASETS/hof3-cam-baumer-cropped/homography.json --video-src gige://../DATASETS/hof3-cam-baumer-cropped/gige_config.json --calibration ../DATASETS/hof3-cam-baumer-cropped/calibration.json
|
||||||
directory=%(here)s
|
directory=%(here)s
|
||||||
directory=%(here)s
|
directory=%(here)s
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -25,7 +25,7 @@ from bytetracker.basetrack import TrackState as ByteTrackTrackState
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from shapely import Point
|
from shapely import Point
|
||||||
|
|
||||||
from trap.anomaly import calculate_loitering_scores
|
|
||||||
from trap.utils import get_bins, inv_lerp, lerp
|
from trap.utils import get_bins, inv_lerp, lerp
|
||||||
from trajectron.environment import Environment, Node, Scene
|
from trajectron.environment import Environment, Node, Scene
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
|
|
||||||
|
|
@ -37,7 +37,6 @@ class FrameEmitter(node.Node):
|
||||||
video_gen = enumerate(source, start = offset)
|
video_gen = enumerate(source, start = offset)
|
||||||
|
|
||||||
# writer = FrameWriter(self.config.record, None, None) if self.config.record else nullcontext
|
# writer = FrameWriter(self.config.record, None, None) if self.config.record else nullcontext
|
||||||
print(self.config.record)
|
|
||||||
writer = FrameWriter(str(self.config.record), None, None) if self.config.record else None
|
writer = FrameWriter(str(self.config.record), None, None) if self.config.record else None
|
||||||
try:
|
try:
|
||||||
processor = ImgMovementFilter()
|
processor = ImgMovementFilter()
|
||||||
|
|
|
||||||
|
|
@ -4,14 +4,16 @@ from copy import copy
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import select
|
import select
|
||||||
import socket
|
import socket
|
||||||
from typing import DefaultDict, List
|
from typing import DefaultDict, Dict, List
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import open3d as o3d
|
import open3d as o3d
|
||||||
|
import shapely
|
||||||
from sklearn.cluster import DBSCAN
|
from sklearn.cluster import DBSCAN
|
||||||
from scipy.spatial.distance import cdist
|
from scipy.spatial.distance import cdist
|
||||||
import logging
|
import logging
|
||||||
import time
|
import time
|
||||||
from trap.base import Camera, Detection, Frame, Track, UndistortedCamera
|
from trap.base import Camera, Detection, Frame, Track, UndistortedCamera
|
||||||
|
from trap.lines import load_lines_from_svg
|
||||||
from trap.node import Node
|
from trap.node import Node
|
||||||
from bytetracker import BYTETracker
|
from bytetracker import BYTETracker
|
||||||
import velodyne_decoder as vd
|
import velodyne_decoder as vd
|
||||||
|
|
@ -30,7 +32,8 @@ ResultTuple = namedtuple("StampCloudTuple", ("stamp", "points"))
|
||||||
# like velodyne_decoder.read_pcap but with live streamed data
|
# like velodyne_decoder.read_pcap but with live streamed data
|
||||||
# see https://github.com/valgur/velodyne_decoder/issues/4
|
# see https://github.com/valgur/velodyne_decoder/issues/4
|
||||||
def read_live_data(ip, port, config, as_pcl_structs=False):
|
def read_live_data(ip, port, config, as_pcl_structs=False):
|
||||||
decoder = vd.StreamDecoder(config)
|
decoders = defaultdict(lambda: vd.StreamDecoder(config))
|
||||||
|
# decoder = vd.StreamDecoder(config)
|
||||||
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||||
# To increase the buffer at network device, you might want to run sudo sysctl -w net.core.rmem_max=4194304 && sudo sysctl -w net.core.rmem_default=4194304
|
# To increase the buffer at network device, you might want to run sudo sysctl -w net.core.rmem_max=4194304 && sudo sysctl -w net.core.rmem_default=4194304
|
||||||
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 4 * 1024 * 1024) # 4MB
|
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 4 * 1024 * 1024) # 4MB
|
||||||
|
|
@ -38,6 +41,8 @@ def read_live_data(ip, port, config, as_pcl_structs=False):
|
||||||
sock.setblocking(False)
|
sock.setblocking(False)
|
||||||
inputs = [sock]
|
inputs = [sock]
|
||||||
|
|
||||||
|
remotes = []
|
||||||
|
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
readable, _, _ = select.select([sock], [], [], 1.0)
|
readable, _, _ = select.select([sock], [], [], 1.0)
|
||||||
|
|
@ -49,45 +54,87 @@ def read_live_data(ip, port, config, as_pcl_structs=False):
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
data, addr = sock.recvfrom(vd.PACKET_SIZE * 2)
|
data, addr = sock.recvfrom(vd.PACKET_SIZE * 2)
|
||||||
|
ip = addr[0]
|
||||||
|
if ip not in remotes:
|
||||||
|
logger.info(f"New remote {ip}")
|
||||||
|
remotes.append(ip)
|
||||||
|
|
||||||
recv_stamp = time.time()
|
recv_stamp = time.time()
|
||||||
result = decoder.decode(recv_stamp, data, as_pcl_structs)
|
result = decoders[ip].decode(recv_stamp, data, as_pcl_structs)
|
||||||
if result is not None:
|
if result is not None:
|
||||||
latest_result[addr] = ResultTuple(*result)
|
latest_result[ip] = ResultTuple(*result)
|
||||||
except BlockingIOError:
|
except BlockingIOError:
|
||||||
break # No more packets in the queue
|
break # No more packets in the queue
|
||||||
|
|
||||||
for addr, data in latest_result.items():
|
if len(latest_result):
|
||||||
yield data
|
yield latest_result
|
||||||
|
# for ip, data in latest_result.items():
|
||||||
|
# yield data
|
||||||
else:
|
else:
|
||||||
logger.warning("No LiDAR data in the last second.")
|
logger.warning("No LiDAR data in the last second.")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def filter_raw_points_by_distance(points: np.ndarray, center: np.ndarray, radius):
|
def filter_raw_points_by_distance(pcd: o3d.geometry.PointCloud, distance_on_axis):
|
||||||
"""
|
"""
|
||||||
Filters an Open3D point cloud based on distance from a center point.
|
Filters an Open3D point cloud based on distance from a center point.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
pointcloud: An Open3D PointCloud object.
|
pointcloud: An Open3D PointCloud object.
|
||||||
center: A NumPy array representing the center point (x, y, z).
|
center: A NumPy array representing the center point (x, y, z).
|
||||||
radius: The maximum distance from the center point to keep points.
|
distance_on_axis: the distance in positive and negative direction (so no distance is calculated)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
A new Open3D PointCloud object containing only the points within the specified radius.
|
A new Open3D PointCloud object containing only the points within the specified radius.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
distances = np.linalg.norm(points - center, axis=1) # Calculate distance from each point to the center
|
points = np.asarray(pcd.points)
|
||||||
indices = np.where(distances <= radius)[0] # Find indices of points within the radius
|
mask = points[:, 0] > distance_on_axis | points[:, 0] < -distance_on_axis | points[:, 1] > distance_on_axis | points[:, 1] < -distance_on_axis | points[:, 2] > distance_on_axis | points[:, 2] < -distance_on_axis
|
||||||
|
indices = np.where(mask)[0]
|
||||||
|
pcd_sel = pcd.select_by_index(np.where(mask)[0])
|
||||||
|
|
||||||
|
|
||||||
|
return pcd_sel
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def filter_pcd_points_below_z(pcd: o3d.geometry.PointCloud, z_threshold: float) -> o3d.geometry.PointCloud:
|
||||||
|
"""
|
||||||
|
Removes all points from a point cloud that have a z-coordinate
|
||||||
|
smaller than a given threshold (ground?).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
point_cloud (open3d.geometry.PointCloud): The input point cloud.
|
||||||
|
z_threshold (float): The z-coordinate threshold. Points with
|
||||||
|
z < z_threshold will be removed.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
open3d.geometry.PointCloud: The filtered point cloud.
|
||||||
|
"""
|
||||||
|
|
||||||
|
points = np.asarray(pcd.points)
|
||||||
|
|
||||||
|
pcd_sel = pcd.select_by_index(np.where(points[:, 2] > z_threshold)[0])
|
||||||
|
return pcd_sel
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def filter_pcd_points_outside_bounds(pcd: o3d.geometry.PointCloud, polygon: shapely.Polygon) -> o3d.geometry.PointCloud:
|
||||||
|
points = np.asarray(pcd.points)
|
||||||
|
geoms = np.array([shapely.Point(p[0], p[1]) for p in points])
|
||||||
|
|
||||||
|
inside_idxs = shapely.contains(polygon, geoms)
|
||||||
|
return pcd.select_by_index(np.where(inside_idxs)[0])
|
||||||
|
|
||||||
filtered_points = points[indices]
|
|
||||||
|
|
||||||
return filtered_points
|
|
||||||
|
|
||||||
class LidarTracker(Node):
|
class LidarTracker(Node):
|
||||||
def setup(self):
|
def setup(self):
|
||||||
if self.config.smooth_tracks:
|
if self.config.smooth_tracks:
|
||||||
self.logger.warning("Smoothing not yet implemented")
|
self.logger.warning("Smoothing not yet implemented")
|
||||||
self.track_sock = self.pub(self.config.zmq_trajectory_addr)
|
self.track_sock = self.pub(self.config.zmq_trajectory_addr)
|
||||||
|
self.detection_sock = self.pub(self.config.zmq_detection_addr)
|
||||||
|
|
||||||
calibration = vd.Calibration.read( "VLP-16.yaml")
|
calibration = vd.Calibration.read( "VLP-16.yaml")
|
||||||
config = vd.Config(model=vd.Model.VLP16, calibration=calibration)
|
config = vd.Config(model=vd.Model.VLP16, calibration=calibration)
|
||||||
|
|
@ -110,6 +157,54 @@ class LidarTracker(Node):
|
||||||
ready_for_action_at=25,
|
ready_for_action_at=25,
|
||||||
static_threshold=0.3
|
static_threshold=0.3
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self.debug_lines = load_lines_from_svg(self.config.debug_map, 100, '') if self.config.debug_map else []
|
||||||
|
|
||||||
|
self.map_outline = None
|
||||||
|
if self.config.map_outlines:
|
||||||
|
|
||||||
|
lines = load_lines_from_svg(self.config.map_outlines, 100, '')
|
||||||
|
if not len(lines):
|
||||||
|
logger.warning("No lines in loaded outlines svg")
|
||||||
|
else:
|
||||||
|
if len(lines) > 1:
|
||||||
|
logger.warning("Multiple lines in outline file, using first only")
|
||||||
|
self.map_outline = shapely.Polygon([p.position for p in lines[0].points])
|
||||||
|
|
||||||
|
|
||||||
|
self.remotes = {}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def ip_to_name(cls, lidar_ip: str) -> str:
|
||||||
|
return lidar_ip.replace('.', '_')
|
||||||
|
|
||||||
|
def build_cloud(self, lidar_items: Dict[str, np.array]):
|
||||||
|
total_pcd = o3d.geometry.PointCloud()
|
||||||
|
for ip, (stamp, lidar_points) in lidar_items.items():
|
||||||
|
name = self.ip_to_name(ip)
|
||||||
|
raw_frame_points = velodyne_to_points(lidar_points)
|
||||||
|
|
||||||
|
|
||||||
|
# raw_frame_points = filter_raw_points_by_distance(raw_frame_points, np.array([0,0,0]), 15)
|
||||||
|
|
||||||
|
pcd = o3d.geometry.PointCloud()
|
||||||
|
pcd.points = points_to_o3d(raw_frame_points)
|
||||||
|
R = pcd.get_rotation_matrix_from_xyz((
|
||||||
|
self.get_setting(f'lidar.{name}.rot_x', 0),
|
||||||
|
self.get_setting(f'lidar.{name}.rot_y', 0),
|
||||||
|
self.get_setting(f'lidar.{name}.rot_z', 0))
|
||||||
|
)
|
||||||
|
pcd.rotate(R, center=(0,0,0))
|
||||||
|
pcd.translate( [
|
||||||
|
self.get_setting(f'lidar.{name}.trans_x', 0),
|
||||||
|
self.get_setting(f'lidar.{name}.trans_y', 0),
|
||||||
|
self.get_setting(f'lidar.{name}.trans_z', 0)])
|
||||||
|
pcd = flip_points_along_axis(pcd, 1)
|
||||||
|
|
||||||
|
total_pcd += pcd
|
||||||
|
|
||||||
|
return total_pcd
|
||||||
|
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
|
|
||||||
|
|
@ -153,12 +248,23 @@ class LidarTracker(Node):
|
||||||
ground_lines.paint_uniform_color([0,1,0])
|
ground_lines.paint_uniform_color([0,1,0])
|
||||||
vis.add_geometry(ground_lines, False)
|
vis.add_geometry(ground_lines, False)
|
||||||
|
|
||||||
|
for line in self.debug_lines:
|
||||||
|
coords = [[*p.position, 0] for p in line.points]
|
||||||
|
vertexes = [[i, i+1] for i in range(len(line.points)-1)]
|
||||||
|
# print(coords, vertexes)
|
||||||
|
debug_line = o3d.geometry.LineSet(
|
||||||
|
o3d.utility.Vector3dVector(coords),
|
||||||
|
o3d.utility.Vector2iVector(vertexes)
|
||||||
|
)
|
||||||
|
debug_line.paint_uniform_color([0,1,0])
|
||||||
|
vis.add_geometry(debug_line, False)
|
||||||
|
|
||||||
|
|
||||||
ground_normal = np.array([1,1,0])
|
ground_normal = np.array([1,1,0])
|
||||||
|
|
||||||
found_planes = False
|
found_planes = False
|
||||||
|
|
||||||
transform_matrix = np.array([[0.8457653099655785, -0.4625540400071917, -0.26594134791689383, -6.176546342430921], [-0.4412543343066861, -0.886591089552013, 0.13874744099455633, 4.588060710249307], [-0.29995941877926136, -4.340500985936626e-17, -0.9539519626719197, 0.6514765464071752], [0.0, 0.0, 0.0, 1.0]])
|
# transform_matrix = np.array([[0.8457653099655785, -0.4625540400071917, -0.26594134791689383, -6.176546342430921], [-0.4412543343066861, -0.886591089552013, 0.13874744099455633, 4.588060710249307], [-0.29995941877926136, -4.340500985936626e-17, -0.9539519626719197, 0.6514765464071752], [0.0, 0.0, 0.0, 1.0]])
|
||||||
# transform_matrix = np.array([[0.8393535624356702, -0.4568202093538991, -0.2946199137064719, 7.480984770326982], [-0.46710069069557, -0.8833469220220505, 0.03892505755796417, 1.2182001838057144], [-0.2780333468817559, 0.1049452794556715, -0.9548214211786936, -1.598860001424085], [0.0, 0.0, 0.0, 1.0]])
|
# transform_matrix = np.array([[0.8393535624356702, -0.4568202093538991, -0.2946199137064719, 7.480984770326982], [-0.46710069069557, -0.8833469220220505, 0.03892505755796417, 1.2182001838057144], [-0.2780333468817559, 0.1049452794556715, -0.9548214211786936, -1.598860001424085], [0.0, 0.0, 0.0, 1.0]])
|
||||||
|
|
||||||
camera = UndistortedCamera()
|
camera = UndistortedCamera()
|
||||||
|
|
@ -167,20 +273,22 @@ class LidarTracker(Node):
|
||||||
while self.run_loop():
|
while self.run_loop():
|
||||||
frame_idx += 1
|
frame_idx += 1
|
||||||
|
|
||||||
stamp, lidar_points = next(self.sequence_generator)
|
lidar_items = next(self.sequence_generator)
|
||||||
|
|
||||||
|
current_pcd = self.build_cloud(lidar_items)
|
||||||
|
current_pcd = filter_pcd_points_below_z(current_pcd, .1)
|
||||||
|
current_pcd = filter_raw_points_by_distance(current_pcd, 10)
|
||||||
|
if self.map_outline and self.get_setting('lidar.crop_map_boundaries', False):
|
||||||
|
current_pcd = filter_pcd_points_outside_bounds(current_pcd, self.map_outline)
|
||||||
|
# a = list(lidar_items.keys())
|
||||||
|
# stamp, lidar_points = lidar_items[a[0]]
|
||||||
|
|
||||||
|
# stamp, lidar_points = next(self.sequence_generator)
|
||||||
|
# stamp2, lidar_points2 = next(self.sequence_generator2)
|
||||||
# print(len(lidar_points))
|
# print(len(lidar_points))
|
||||||
# print(stamp)
|
# print(stamp)
|
||||||
|
|
||||||
raw_frame_points = velodyne_to_points(lidar_points)
|
|
||||||
|
|
||||||
raw_frame_points = filter_raw_points_by_distance(raw_frame_points, np.array([0,0,0]), 15)
|
|
||||||
|
|
||||||
current_pcd = o3d.geometry.PointCloud()
|
|
||||||
current_pcd.points = points_to_o3d(raw_frame_points)
|
|
||||||
R = current_pcd.get_rotation_matrix_from_xyz((0,0, np.pi / 4+.1))
|
|
||||||
current_pcd.rotate(R)
|
|
||||||
current_pcd.translate( [9.7, -11.14, 0])
|
|
||||||
current_pcd = flip_points_along_axis(current_pcd, 1)
|
|
||||||
# current_pcd.scale(np.array([1,-1, 1]), )
|
# current_pcd.scale(np.array([1,-1, 1]), )
|
||||||
# current_pcd.transform(transform_matrix)
|
# current_pcd.transform(transform_matrix)
|
||||||
|
|
||||||
|
|
@ -217,7 +325,7 @@ class LidarTracker(Node):
|
||||||
|
|
||||||
|
|
||||||
# down sample
|
# down sample
|
||||||
filtered_pcd = filtered_pcd.voxel_down_sample(voxel_size=0.05)
|
filtered_pcd = filtered_pcd.voxel_down_sample(voxel_size=0.15)
|
||||||
|
|
||||||
if self.config.viz:
|
if self.config.viz:
|
||||||
live_filtered_pcd.points = filtered_pcd.points
|
live_filtered_pcd.points = filtered_pcd.points
|
||||||
|
|
@ -260,6 +368,8 @@ class LidarTracker(Node):
|
||||||
# active_stracks = [track for track in self.tracker.tracked_stracks if track.is_activated]
|
# active_stracks = [track for track in self.tracker.tracked_stracks if track.is_activated]
|
||||||
active_stracks = [track for track in self.tracker.tracked_stracks if track.is_activated]
|
active_stracks = [track for track in self.tracker.tracked_stracks if track.is_activated]
|
||||||
detections = [Detection.from_bytetrack(track, frame_idx) for track in active_stracks]
|
detections = [Detection.from_bytetrack(track, frame_idx) for track in active_stracks]
|
||||||
|
|
||||||
|
self.detection_sock.send_pyobj(detections)
|
||||||
|
|
||||||
|
|
||||||
for detection in detections:
|
for detection in detections:
|
||||||
|
|
@ -368,6 +478,10 @@ class LidarTracker(Node):
|
||||||
help='Manually specity communication addr for the trajectory messages',
|
help='Manually specity communication addr for the trajectory messages',
|
||||||
type=str,
|
type=str,
|
||||||
default="ipc:///tmp/feeds_traj")
|
default="ipc:///tmp/feeds_traj")
|
||||||
|
argparser.add_argument('--zmq-detection-addr',
|
||||||
|
help='Manually specity communication addr for the detection messages',
|
||||||
|
type=str,
|
||||||
|
default="ipc:///tmp/feeds_dets")
|
||||||
argparser.add_argument('--ip',
|
argparser.add_argument('--ip',
|
||||||
help='IP of this computer on which to listen for IP packets broadcast by lidar (so NOT the ip of the Lidar itself)',
|
help='IP of this computer on which to listen for IP packets broadcast by lidar (so NOT the ip of the Lidar itself)',
|
||||||
type=str,
|
type=str,
|
||||||
|
|
@ -398,6 +512,14 @@ class LidarTracker(Node):
|
||||||
argparser.add_argument("--viz",
|
argparser.add_argument("--viz",
|
||||||
help="Render pointclouds in open3d",
|
help="Render pointclouds in open3d",
|
||||||
action='store_true')
|
action='store_true')
|
||||||
|
argparser.add_argument('--debug-map',
|
||||||
|
help='specify a map (svg-file) from which to load lines which will be overlayed',
|
||||||
|
type=str,
|
||||||
|
default="../DATASETS/hof3/map_hof.svg")
|
||||||
|
argparser.add_argument('--map-outlines',
|
||||||
|
help='specify a map (svg-file) from which to load boundary of the map. Any points outside will be filtered (removing floor and walls)',
|
||||||
|
type=Path,
|
||||||
|
default="../DATASETS/hof3/map_hof_boundaries.svg")
|
||||||
|
|
||||||
return argparser
|
return argparser
|
||||||
|
|
||||||
|
|
@ -412,6 +534,7 @@ def find_ground(pcd: o3d.geometry.PointCloud, distance_threshold=0.1):
|
||||||
return np.asarray(non_ground.points), np.asarray(ground)
|
return np.asarray(non_ground.points), np.asarray(ground)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def segment_planes(pcd, num_planes = 5, num_iterations=100, distance_threshold=0.1) -> List[o3d.geometry.PointCloud]:
|
def segment_planes(pcd, num_planes = 5, num_iterations=100, distance_threshold=0.1) -> List[o3d.geometry.PointCloud]:
|
||||||
"""
|
"""
|
||||||
Segments a point cloud into multiple planes using RANSAC.
|
Segments a point cloud into multiple planes using RANSAC.
|
||||||
|
|
@ -451,21 +574,56 @@ def segment_planes(pcd, num_planes = 5, num_iterations=100, distance_threshold=0
|
||||||
|
|
||||||
# ==== Clustering and Centroids ====
|
# ==== Clustering and Centroids ====
|
||||||
def cluster_2d(points_xy, eps=0.5, min_samples=5):
|
def cluster_2d(points_xy, eps=0.5, min_samples=5):
|
||||||
if not len(points_xy):
|
if len(points_xy) < min_samples:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
db = DBSCAN(eps=eps, min_samples=min_samples).fit(points_xy)
|
db = DBSCAN(eps=eps, min_samples=min_samples).fit(points_xy)
|
||||||
return db.labels_
|
return db.labels_
|
||||||
|
|
||||||
def compute_centroids(points_xy, labels):
|
def split_cluster_by_convex_hull(points, max_hull_area):
|
||||||
centroids = []
|
"""
|
||||||
for label in set(labels):
|
Splits a cluster of points based on its convex hull area.
|
||||||
if label == -1:
|
|
||||||
continue
|
Args:
|
||||||
cluster = points_xy[labels == label]
|
points: A NumPy array of shape (N, 3) representing the cluster points.
|
||||||
centroid = cluster.mean(axis=0)
|
max_hull_area: The maximum allowed area for the convex hull.
|
||||||
centroids.append(centroid)
|
|
||||||
return np.array(centroids)
|
Returns:
|
||||||
|
A list of NumPy arrays, each representing a sub-cluster.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if len(points) == 0:
|
||||||
|
return []
|
||||||
|
|
||||||
|
point_cloud = o3d.geometry.PointCloud()
|
||||||
|
point_cloud.points = o3d.utility.Vector3dVector(points)
|
||||||
|
|
||||||
|
# Calculate the convex hull
|
||||||
|
hull, _ = point_cloud.compute_convex_hull()
|
||||||
|
|
||||||
|
# Get the area of the convex hull
|
||||||
|
hull_area = hull.get_area()
|
||||||
|
|
||||||
|
if hull_area <= max_hull_area:
|
||||||
|
return [points] # No splitting needed
|
||||||
|
|
||||||
|
# If the hull area exceeds the limit, split the cluster
|
||||||
|
print(f"Splitting cluster - Convex Hull Area: {hull_area}")
|
||||||
|
|
||||||
|
# Simple splitting strategy: Use K-Means within the cluster
|
||||||
|
from sklearn.cluster import KMeans
|
||||||
|
kmeans = KMeans(n_clusters=2, random_state=0, n_init='auto') # Increase n_init for stability
|
||||||
|
kmeans.fit(points)
|
||||||
|
labels = kmeans.labels_
|
||||||
|
|
||||||
|
sub_clusters = []
|
||||||
|
for label in np.unique(labels):
|
||||||
|
sub_cluster_points = points[labels == label]
|
||||||
|
sub_clusters.append(sub_cluster_points)
|
||||||
|
|
||||||
|
return sub_clusters
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def get_cluster_boxes(points_xy, labels, min_area = 0):
|
def get_cluster_boxes(points_xy, labels, min_area = 0):
|
||||||
if not len(labels):
|
if not len(labels):
|
||||||
|
|
|
||||||
36
trap/node.py
36
trap/node.py
|
|
@ -1,3 +1,4 @@
|
||||||
|
from collections import defaultdict
|
||||||
import logging
|
import logging
|
||||||
from logging.handlers import QueueHandler, QueueListener, SocketHandler
|
from logging.handlers import QueueHandler, QueueListener, SocketHandler
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
|
|
@ -24,6 +25,9 @@ class Node():
|
||||||
|
|
||||||
self.dt_since_last_tick = 0
|
self.dt_since_last_tick = 0
|
||||||
|
|
||||||
|
self.config_sock = self.sub(self.config.zmq_config_addr)
|
||||||
|
self.settings = defaultdict(None)
|
||||||
|
|
||||||
self.setup()
|
self.setup()
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
|
@ -41,13 +45,32 @@ class Node():
|
||||||
def run(self):
|
def run(self):
|
||||||
raise RuntimeError("Not implemented run()")
|
raise RuntimeError("Not implemented run()")
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
pass
|
||||||
|
|
||||||
def run_loop(self):
|
def run_loop(self):
|
||||||
"""Use in run(), to check if it should keep looping
|
"""Use in run(), to check if it should keep looping
|
||||||
Takes care of tick()'ing the iterations/second counter
|
Takes care of tick()'ing the iterations/second counter
|
||||||
"""
|
"""
|
||||||
self.tick()
|
self.tick()
|
||||||
|
self.check_config()
|
||||||
return self.is_running.is_set()
|
return self.is_running.is_set()
|
||||||
|
|
||||||
|
def check_config(self):
|
||||||
|
try:
|
||||||
|
config = self.config_sock.recv_json(zmq.NOBLOCK)
|
||||||
|
for field, value in config.items():
|
||||||
|
self.settings[field] = value
|
||||||
|
except zmq.ZMQError as e:
|
||||||
|
# no msgs
|
||||||
|
pass
|
||||||
|
|
||||||
|
def get_setting(self, name: str, default):
|
||||||
|
if name in self.settings:
|
||||||
|
return self.settings[name]
|
||||||
|
return default
|
||||||
|
|
||||||
|
|
||||||
def run_loop_capped_fps(self, max_fps: float, warn_below_fps: float = 0.):
|
def run_loop_capped_fps(self, max_fps: float, warn_below_fps: float = 0.):
|
||||||
"""Use in run(), to check if it should keep looping
|
"""Use in run(), to check if it should keep looping
|
||||||
Takes care of tick()'ing the iterations/second counter
|
Takes care of tick()'ing the iterations/second counter
|
||||||
|
|
@ -92,6 +115,10 @@ class Node():
|
||||||
type=int,
|
type=int,
|
||||||
default=19996
|
default=19996
|
||||||
)
|
)
|
||||||
|
parser.add_argument('--zmq-config-addr',
|
||||||
|
help='Manually specity communication addr for the config messages',
|
||||||
|
type=str,
|
||||||
|
default="ipc:///tmp/feeds_config")
|
||||||
return parser
|
return parser
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -113,7 +140,11 @@ class Node():
|
||||||
@classmethod
|
@classmethod
|
||||||
def start(cls, config: Namespace, is_running: BaseEvent, timer_counter: Optional[Timer]):
|
def start(cls, config: Namespace, is_running: BaseEvent, timer_counter: Optional[Timer]):
|
||||||
instance = cls(config, is_running, timer_counter)
|
instance = cls(config, is_running, timer_counter)
|
||||||
instance.run()
|
try:
|
||||||
|
instance.run()
|
||||||
|
except Exception as e:
|
||||||
|
instance.logger.exception(f"{e}")
|
||||||
|
instance.stop()
|
||||||
instance.logger.info("Stopping")
|
instance.logger.info("Stopping")
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
|
@ -139,7 +170,8 @@ def setup_logging(config: Namespace):
|
||||||
logging.captureWarnings(True)
|
logging.captureWarnings(True)
|
||||||
# root_logger.setLevel(logging.NOTSET) # to send all records to cutelog
|
# root_logger.setLevel(logging.NOTSET) # to send all records to cutelog
|
||||||
socket_handler = SocketHandler(config.remote_log_addr, config.remote_log_port)
|
socket_handler = SocketHandler(config.remote_log_addr, config.remote_log_port)
|
||||||
print(socket_handler.host, socket_handler.port)
|
|
||||||
|
# print(socket_handler.host, socket_handler.port)
|
||||||
socket_handler.setLevel(logging.NOTSET)
|
socket_handler.setLevel(logging.NOTSET)
|
||||||
log_handlers.append(socket_handler)
|
log_handlers.append(socket_handler)
|
||||||
|
|
||||||
|
|
|
||||||
102
trap/settings.py
Normal file
102
trap/settings.py
Normal file
|
|
@ -0,0 +1,102 @@
|
||||||
|
from argparse import ArgumentParser
|
||||||
|
import json
|
||||||
|
import math
|
||||||
|
from pathlib import Path
|
||||||
|
from trap.node import Node
|
||||||
|
|
||||||
|
import dearpygui.dearpygui as dpg
|
||||||
|
|
||||||
|
class Settings(Node):
|
||||||
|
"""
|
||||||
|
Quickndirty gui to change some settings ad-hoc
|
||||||
|
no storage of values, no defaults. No detection of lost nodes, or sending config on them starting
|
||||||
|
|
||||||
|
"""
|
||||||
|
def setup(self):
|
||||||
|
self.config_sock.close() # setup by default for all nodes, but we want to publish
|
||||||
|
self.config_sock = self.pub(self.config.zmq_config_addr)
|
||||||
|
|
||||||
|
self.settings_fields = {}
|
||||||
|
self.settings = {}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
self.load()
|
||||||
|
|
||||||
|
dpg.create_context()
|
||||||
|
dpg.create_viewport(title='Trap settings', width=600, height=200)
|
||||||
|
dpg.setup_dearpygui()
|
||||||
|
|
||||||
|
|
||||||
|
with dpg.window(label="General", pos=(0, 0)):
|
||||||
|
dpg.add_text(f"Settings from {self.config.settings_file}")
|
||||||
|
dpg.add_button(label="Save", callback=self.save)
|
||||||
|
|
||||||
|
with dpg.window(label="Lidar", pos=(0, 150)):
|
||||||
|
self.register_setting(f'lidar.crop_map_boundaries', dpg.add_checkbox(label="crop_map_boundaries", default_value=self.get_setting(f'lidar.crop_map_boundaries', False), callback=self.on_change))
|
||||||
|
for lidar in ["192.168.0.16", "192.168.0.10"]:
|
||||||
|
name = lidar.replace(".", "_")
|
||||||
|
with dpg.window(label=f"Lidar {lidar}", pos=(200, 0),autosize=True):
|
||||||
|
# dpg.add_text("test")
|
||||||
|
# dpg.add_input_text(label="string", default_value="Quick brown fox")
|
||||||
|
self.register_setting(f'lidar.{name}.rot_x', dpg.add_slider_float(label="rot_x", default_value=self.get_setting(f'lidar.{name}.rot_x', 0), max_value=math.pi * 2, callback=self.on_change))
|
||||||
|
self.register_setting(f'lidar.{name}.rot_y', dpg.add_slider_float(label="rot_y", default_value=self.get_setting(f'lidar.{name}.rot_y', 0), max_value=math.pi * 2, callback=self.on_change))
|
||||||
|
self.register_setting(f'lidar.{name}.rot_z', dpg.add_slider_float(label="rot_z", default_value=self.get_setting(f'lidar.{name}.rot_z', 0), max_value=math.pi * 2, callback=self.on_change))
|
||||||
|
self.register_setting(f'lidar.{name}.trans_x', dpg.add_slider_float(label="trans_x", default_value=self.get_setting(f'lidar.{name}.trans_x', 0), min_value=-15, max_value=15, callback=self.on_change))
|
||||||
|
self.register_setting(f'lidar.{name}.trans_y', dpg.add_slider_float(label="trans_y", default_value=self.get_setting(f'lidar.{name}.trans_y', 0), min_value=-15, max_value=15, callback=self.on_change))
|
||||||
|
self.register_setting(f'lidar.{name}.trans_z', dpg.add_slider_float(label="trans_z", default_value=self.get_setting(f'lidar.{name}.trans_z', 0), min_value=-15, max_value=15, callback=self.on_change))
|
||||||
|
|
||||||
|
dpg.show_viewport()
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
|
||||||
|
dpg.destroy_context()
|
||||||
|
|
||||||
|
|
||||||
|
def check_config(self):
|
||||||
|
# override node function to disable it
|
||||||
|
pass
|
||||||
|
|
||||||
|
def register_setting(self, name: str, field: int):
|
||||||
|
self.settings_fields[field] = name
|
||||||
|
|
||||||
|
def on_change(self, sender, value, user_data = None):
|
||||||
|
# print(sender, app_data, user_data)
|
||||||
|
setting = self.settings_fields[sender]
|
||||||
|
self.settings[setting] = value
|
||||||
|
self.config_sock.send_json({setting: value})
|
||||||
|
|
||||||
|
|
||||||
|
def save(self):
|
||||||
|
with self.config.settings_file.open('w') as fp:
|
||||||
|
self.logger.info(f"Save to {self.config.settings_file}")
|
||||||
|
json.dump(self.settings, fp)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def load(self):
|
||||||
|
if not self.config.settings_file.exists():
|
||||||
|
self.logger.info(f"No config at {self.config.settings_file}")
|
||||||
|
return {}
|
||||||
|
|
||||||
|
self.logger.info(f"Loading from {self.config.settings_file}")
|
||||||
|
with self.config.settings_file.open('r') as fp:
|
||||||
|
self.settings = json.load(fp)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
# below replaces, start_dearpygui()
|
||||||
|
while self.run_loop() and dpg.is_dearpygui_running():
|
||||||
|
dpg.render_dearpygui_frame()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def arg_parser(cls):
|
||||||
|
argparser = ArgumentParser()
|
||||||
|
argparser.add_argument('--settings-file',
|
||||||
|
help='Where to store settings',
|
||||||
|
type=Path,
|
||||||
|
default=Path("./settings.json"))
|
||||||
|
|
||||||
|
return argparser
|
||||||
|
|
||||||
|
|
@ -492,6 +492,7 @@ class Stage(Node):
|
||||||
self.frame_noimg_sock = self.sub(self.config.zmq_frame_noimg_addr)
|
self.frame_noimg_sock = self.sub(self.config.zmq_frame_noimg_addr)
|
||||||
self.trajectory_sock = self.sub(self.config.zmq_trajectory_addr)
|
self.trajectory_sock = self.sub(self.config.zmq_trajectory_addr)
|
||||||
self.prediction_sock = self.sub(self.config.zmq_prediction_addr)
|
self.prediction_sock = self.sub(self.config.zmq_prediction_addr)
|
||||||
|
self.detection_sock = self.sub(self.config.zmq_detection_addr)
|
||||||
self.stage_sock = self.pub(self.config.zmq_stage_addr)
|
self.stage_sock = self.pub(self.config.zmq_stage_addr)
|
||||||
self.counter = CounterSender()
|
self.counter = CounterSender()
|
||||||
|
|
||||||
|
|
@ -659,6 +660,10 @@ class Stage(Node):
|
||||||
help='Manually specity communication addr for the prediction messages',
|
help='Manually specity communication addr for the prediction messages',
|
||||||
type=str,
|
type=str,
|
||||||
default="ipc:///tmp/feeds_preds")
|
default="ipc:///tmp/feeds_preds")
|
||||||
|
argparser.add_argument('--zmq-detection-addr',
|
||||||
|
help='Manually specity communication addr for the detection messages',
|
||||||
|
type=str,
|
||||||
|
default="ipc:///tmp/feeds_dets")
|
||||||
argparser.add_argument('--zmq-stage-addr',
|
argparser.add_argument('--zmq-stage-addr',
|
||||||
help='Manually specity communication addr for the stage messages (the rendered lines)',
|
help='Manually specity communication addr for the stage messages (the rendered lines)',
|
||||||
type=str,
|
type=str,
|
||||||
|
|
|
||||||
13
uv.lock
13
uv.lock
|
|
@ -417,6 +417,17 @@ wheels = [
|
||||||
{ url = "https://files.pythonhosted.org/packages/d3/36/e0010483ca49b9bf6f389631ccea07b3ff6b678d14d8c7a0a4357860c36a/dash-3.2.0-py3-none-any.whl", hash = "sha256:4c1819588d83bed2cbcf5807daa5c2380c8c85789a6935a733f018f04ad8a6a2", size = 7900661 },
|
{ url = "https://files.pythonhosted.org/packages/d3/36/e0010483ca49b9bf6f389631ccea07b3ff6b678d14d8c7a0a4357860c36a/dash-3.2.0-py3-none-any.whl", hash = "sha256:4c1819588d83bed2cbcf5807daa5c2380c8c85789a6935a733f018f04ad8a6a2", size = 7900661 },
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "dearpygui"
|
||||||
|
version = "2.1.0"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/2e/26/93db234a69d01ae84ed61abb991a3da5555410abdf38d5932fb7ed594e12/dearpygui-2.1.0-cp310-cp310-macosx_10_6_x86_64.whl", hash = "sha256:374d4c605affdf8a49eef4cf434f76e17df374590e51745b62c67025d1d41ec3", size = 2101011 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/ef/13/0301fd7fd3ac01ed23003873681c835f18d14267953c54ff6889cb1d0212/dearpygui-2.1.0-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:074985fa9d1622edda89c7f113d7a25ae5543f2e3f684bba3601e688c873936f", size = 1874384 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/5d/54/5e53d99a1d352f387bd18250d8bcfe9e60594eefc062f246f61810c1bd80/dearpygui-2.1.0-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:49808389f1d1acfb4f5cbd9f1b1ec188fbcd2d828414094864f7035e27158db2", size = 2636636 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/34/44/2508c4ba08b28cc9e827a04ae00fc73dbe6820531241eb43ba28f370372b/dearpygui-2.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:792c1cd34dd0d03bf15838cc1e66ad01282c672ef0d2b9981368b6dcd37e67d3", size = 1810184 },
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "debugpy"
|
name = "debugpy"
|
||||||
version = "1.8.13"
|
version = "1.8.13"
|
||||||
|
|
@ -2757,6 +2768,7 @@ source = { editable = "." }
|
||||||
dependencies = [
|
dependencies = [
|
||||||
{ name = "baumer-neoapi" },
|
{ name = "baumer-neoapi" },
|
||||||
{ name = "bytetracker" },
|
{ name = "bytetracker" },
|
||||||
|
{ name = "dearpygui" },
|
||||||
{ name = "deep-sort-realtime" },
|
{ name = "deep-sort-realtime" },
|
||||||
{ name = "facenet-pytorch" },
|
{ name = "facenet-pytorch" },
|
||||||
{ name = "ffmpeg-python" },
|
{ name = "ffmpeg-python" },
|
||||||
|
|
@ -2798,6 +2810,7 @@ dependencies = [
|
||||||
requires-dist = [
|
requires-dist = [
|
||||||
{ name = "baumer-neoapi", path = "../../Downloads/Baumer_neoAPI_1.5.0_lin_x86_64_python/wheel/baumer_neoapi-1.5.0-cp34.cp35.cp36.cp37.cp38.cp39.cp310.cp311.cp312-none-linux_x86_64.whl" },
|
{ name = "baumer-neoapi", path = "../../Downloads/Baumer_neoAPI_1.5.0_lin_x86_64_python/wheel/baumer_neoapi-1.5.0-cp34.cp35.cp36.cp37.cp38.cp39.cp310.cp311.cp312-none-linux_x86_64.whl" },
|
||||||
{ name = "bytetracker", git = "https://github.com/rubenvandeven/bytetrack-pip" },
|
{ name = "bytetracker", git = "https://github.com/rubenvandeven/bytetrack-pip" },
|
||||||
|
{ name = "dearpygui", specifier = ">=2.1.0" },
|
||||||
{ name = "deep-sort-realtime", specifier = ">=1.3.2,<2" },
|
{ name = "deep-sort-realtime", specifier = ">=1.3.2,<2" },
|
||||||
{ name = "facenet-pytorch", specifier = ">=2.5.3" },
|
{ name = "facenet-pytorch", specifier = ">=2.5.3" },
|
||||||
{ name = "ffmpeg-python", specifier = ">=0.2.0,<0.3" },
|
{ name = "ffmpeg-python", specifier = ">=0.2.0,<0.3" },
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue