Compare commits
No commits in common. "main" and "cluster_predictions" have entirely different histories.
main
...
cluster_pr
21 changed files with 176 additions and 1475 deletions
28
README.md
28
README.md
|
@ -7,7 +7,7 @@
|
|||
|
||||
## How to
|
||||
|
||||
> See also the sibling repo [traptools](https://git.rubenvandeven.com/security_vision/traptools) for camera calibration and homography tools that are needed for this repo. Also, [laserspace](https://git.rubenvandeven.com/security_vision/laserspace) is used to map the shapes (which are generated by `stage.py`) to lasers, as to use specific optimization techniques for the paths before sending them to the DAC.
|
||||
> See also the sibling repo [traptools](https://git.rubenvandeven.com/security_vision/traptools) for camera calibration and homography tools that are needed for this repo.
|
||||
|
||||
These are roughly the steps to go from datagathering to training
|
||||
|
||||
|
@ -25,29 +25,3 @@ These are roughly the steps to go from datagathering to training
|
|||
<!-- * On a video file (you can use a wildcard) `DISPLAY=:1 uv run trapserv --remote-log-addr 100.69.123.91 --eval_device cuda:0 --detector ultralytics --homography ../DATASETS/NAME/homography.json --eval_data_dict EXPERIMENTS/trajectron-data/hof2s-m_test.pkl --video-src ../DATASETS/NAME/*.mp4 --model_dir EXPERIMENTS/models/models_DATE_NAME/--smooth-predictions --smooth-tracks --num-samples 3 --render-window --calibration ../DATASETS/NAME/calibration.json` (the DISPLAY environment variable is used here to running over SSH connection and display on local monitor)
|
||||
* or on the RTSP stream. Which uses gstreamer to substantially reduce latency compared to the default ffmpeg bindings in OpenCV.
|
||||
* To just have a single trajectory pulled from distribution use `--full-dist`. Also try `--z_mode`. -->
|
||||
|
||||
|
||||
## Testnight 2025-06-13
|
||||
|
||||
Stappenplan:
|
||||
|
||||
* Hang lasers. Connect all cables etc.
|
||||
* `DISPLAY=:0 cargo run --example laser_frame_stream_gui`
|
||||
* Use numbers to pick a nice shape. Use this to make sure both lasers cover the right area. (if it doesn't work. Flip some switches in the gui, the laser output should now start)
|
||||
* In trap folder: `uv run supervisorctl start video`
|
||||
* In laserspace folder: `DISPLAY=:0 cargo run --bin render_lines_gui` and use gui to draw and tweak projection area
|
||||
* Use the save button to store configuration
|
||||
/*
|
||||
* in trap folder: `DISPLAY=:0 uv run trap_laser_calibration`
|
||||
* follow instructions:
|
||||
* camera points: 1-9 or cursor to create/select/move points
|
||||
* move laser: vim movement keys : hjkl, use shift to move faster
|
||||
* `c` to calibrate. Matrix is output to cli.
|
||||
* `q` to quit
|
||||
* saved to `laser_calib.json`, copy H field to `trap_rust/src/trap/laser.rs` (to e.g. TMP_STUDIO_CM_8)
|
||||
* Restart `render_lines_gui` with new homographies
|
||||
* `DISPLAY=:0 cargo run --bin render_lines_gui`
|
||||
*/
|
||||
* change video source in `supervisord.conf` and run `uv run supervisorctl update` to switch
|
||||
* **if tracking is slow and there's no prediction.**
|
||||
* `uv run python -c "import torch;print(torch.cuda.is_available())"`
|
||||
|
|
|
@ -2,10 +2,10 @@
|
|||
# Default YOLO tracker settings for ByteTrack tracker https://github.com/ifzhang/ByteTrack
|
||||
|
||||
tracker_type: bytetrack # tracker type, ['botsort', 'bytetrack']
|
||||
track_high_thresh: 0.000001 # threshold for the first association
|
||||
track_low_thresh: 0.000001 # threshold for the second association
|
||||
new_track_thresh: 0.000001 # threshold for init new track if the detection does not match any tracks
|
||||
track_buffer: 10 # buffer to calculate the time when to remove tracks
|
||||
match_thresh: 0.99 # threshold for matching tracks
|
||||
track_high_thresh: 0.0001 # threshold for the first association
|
||||
track_low_thresh: 0.0001 # threshold for the second association
|
||||
new_track_thresh: 0.0001 # threshold for init new track if the detection does not match any tracks
|
||||
track_buffer: 50 # buffer to calculate the time when to remove tracks
|
||||
match_thresh: 0.95 # threshold for matching tracks
|
||||
fuse_score: True # Whether to fuse confidence scores with the iou distances before matching
|
||||
# min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now)
|
||||
|
|
|
@ -34,9 +34,6 @@ dependencies = [
|
|||
"facenet-pytorch>=2.5.3",
|
||||
"simplification>=0.7.12",
|
||||
"supervisor>=4.2.5",
|
||||
"superfsmon>=1.2.3",
|
||||
"noise>=1.2.2",
|
||||
"svgpathtools>=1.7.1",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
@ -48,16 +45,12 @@ process_data = "trap.process_data:main"
|
|||
blacklist = "trap.tools:blacklist_tracks"
|
||||
rewrite_tracks = "trap.tools:rewrite_raw_track_files"
|
||||
|
||||
model_train = "trap.models.train:train"
|
||||
|
||||
trap_video_source = "trap.frame_emitter:FrameEmitter.parse_and_start"
|
||||
trap_video_writer = "trap.frame_writer:FrameWriter.parse_and_start"
|
||||
trap_tracker = "trap.tracker:Tracker.parse_and_start"
|
||||
trap_stage = "trap.stage:Stage.parse_and_start"
|
||||
trap_prediction = "trap.prediction_server:PredictionServer.parse_and_start"
|
||||
trap_render_cv = "trap.cv_renderer:CvRenderer.parse_and_start"
|
||||
trap_monitor = "trap.monitor:Monitor.parse_and_start" # migrate timer
|
||||
trap_laser_calibration = "trap.laser_calibration:LaserCalibration.parse_and_start" # migrate timer
|
||||
|
||||
[tool.uv]
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@ port = *:8293
|
|||
# password = 123
|
||||
|
||||
[supervisord]
|
||||
nodaemon = false
|
||||
nodaemon = True
|
||||
|
||||
|
||||
; The rpcinterface:supervisor section must remain in the config file for
|
||||
|
@ -20,16 +20,15 @@ serverurl = http://localhost:8293
|
|||
command=uv run trap_monitor
|
||||
numprocs=1
|
||||
directory=%(here)s
|
||||
autostart=false
|
||||
|
||||
[program:video]
|
||||
# command=uv run trap_video_source --homography ../DATASETS/hof3/homography.json --video-src ../DATASETS/hof3/hof3-cam-demo-twoperson.mp4 --calibration ../DATASETS/hof3/calibration.json --video-loop
|
||||
command=uv run trap_video_source --homography ../DATASETS/hof3-cam-baumer-cropped/homography.json --video-src gige://../DATASETS/hof3-cam-baumer-cropped/gige_config.json --calibration ../DATASETS/hof3-cam-baumer-cropped/calibration.json
|
||||
command=uv run trap_video_source --homography ../DATASETS/hof3/homography.json --video-src ../DATASETS/hof3/hof3-cam-demo-twoperson.mp4 --calibration ../DATASETS/hof3/calibration.json --video-loop
|
||||
# command=uv run trap_video_source --homography ../DATASETS/hof3-cam-baumer/homography.json --video-src gige://../DATASETS/hof3-cam-baumer/gige_config.json --calibration ../DATASETS/hof3-cam-baumer/calibration.json
|
||||
directory=%(here)s
|
||||
directory=%(here)s
|
||||
|
||||
[program:tracker]
|
||||
command=uv run trap_tracker --smooth-tracks
|
||||
command=uv run trap_tracker
|
||||
directory=%(here)s
|
||||
|
||||
[program:stage]
|
||||
|
@ -38,20 +37,12 @@ directory=%(here)s
|
|||
|
||||
[program:predictor]
|
||||
command=uv run trap_prediction --eval_device cuda:0 --model_dir EXPERIMENTS/models/models_20241229_21_35_13_hof3-m2-ud-split-conv12-f2.0-map-2024-12-29/ --num-samples 1 --map_encoding --eval_data_dict EXPERIMENTS/trajectron-data/hof3-m2-ud-split-nostep-conv12-f2.0-map-2024-12-29_val.pkl --prediction-horizon 120 --gmm-mode True --z-mode
|
||||
|
||||
# uv run trajectron_train --continue_training_from EXPERIMENTS/models/models_20241229_21_35_13_hof3-m2-ud-split-conv12-f2.0-map-2024-12-29/ --eval_every 5 --train_data_dict hof3-nostep-conv12-f2.0-map-2024-12-27_train.pkl --eval_data_dict hof3-nostep-conv12-f2.0-map-2024-12-27_val.pkl --offline_scene_graph no --preprocess_workers 8 --log_dir EXPERIMENTS/models --log_tag _hof3-conv12-f2.0-map-2024-12-27 --train_epochs 10 --conf EXPERIMENTS/config.json --data_dir EXPERIMENTS/trajectron-data --map_encoding
|
||||
directory=%(here)s
|
||||
|
||||
[program:render_cv]
|
||||
command=uv run trap_render_cv
|
||||
command=uv run trap_render_cv
|
||||
directory=%(here)s
|
||||
environment=DISPLAY=":0"
|
||||
autostart=false
|
||||
; can be long to quit if rendering to video file
|
||||
stopwaitsecs=60
|
||||
|
||||
|
||||
# during development auto restart some services when the code changes
|
||||
[program:superfsmon]
|
||||
command=superfsmon trap/stage.py stage
|
||||
directory=%(here)s
|
||||
|
|
|
@ -36,7 +36,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
|
@ -151,7 +151,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
|
@ -161,7 +161,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
|
@ -187,7 +187,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
|
@ -196,34 +196,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"0"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"len(tracks)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
|
|
27
trap/base.py
27
trap/base.py
|
@ -156,13 +156,11 @@ class DistortedCamera(ABC):
|
|||
def from_calibfile(cls, calibration_path, H, fps):
|
||||
with calibration_path.open('r') as fp:
|
||||
data = json.load(fp)
|
||||
camera = cls.from_calibdata(data, H, fps)
|
||||
|
||||
return camera
|
||||
return cls.from_calibdata(data, H, fps)
|
||||
|
||||
|
||||
@classmethod
|
||||
def from_paths(cls, calibration_path: Path, h_path: Path, fps: float):
|
||||
def from_paths(cls, calibration_path, h_path, fps):
|
||||
H = H_from_path(h_path)
|
||||
with calibration_path.open('r') as fp:
|
||||
calibdata = json.load(fp)
|
||||
|
@ -170,7 +168,6 @@ class DistortedCamera(ABC):
|
|||
camera = FisheyeCamera.from_calibdata(calibdata, H, fps)
|
||||
else:
|
||||
camera = Camera.from_calibdata(calibdata, H, fps)
|
||||
|
||||
return camera
|
||||
|
||||
# return cls.from_calibfile(calibration_path, H, fps)
|
||||
|
@ -181,8 +178,6 @@ class DistortedCamera(ABC):
|
|||
|
||||
coords = self.project_points(coords, scale)
|
||||
return coords
|
||||
|
||||
|
||||
|
||||
class FisheyeCamera(DistortedCamera):
|
||||
def __init__(self, dim1, dim2, dim3, K, D, new_K, scaled_K, balance, H, fps):
|
||||
|
@ -203,24 +198,8 @@ class FisheyeCamera(DistortedCamera):
|
|||
|
||||
|
||||
self.map1, self.map2 = cv2.fisheye.initUndistortRectifyMap(self.scaled_K, self.D, self._R, self.new_K, self.dim3, cv2.CV_16SC2)
|
||||
# self.map1, self.map2 = cv2.fisheye.initUndistortRectifyMap(self.scaled_K, self.D, self._R, self.new_K, self.dim3, cv2.CV_32FC1)
|
||||
|
||||
def undistort_img(self, img: MatLike):
|
||||
# map1, map2 = adjust_remap_maps(self.map1, self.map2, 2, (0,0))
|
||||
# this only works on the undistort, but screws up when doing subsequent homography,
|
||||
# there needs to be a way to combine both this remap and warpPerspective into a
|
||||
# single remap call...
|
||||
# scale = 0.3
|
||||
# cx = self.dim3[0] / 2
|
||||
# cy = self.dim3[1] / 2
|
||||
|
||||
# map1 = (self.map1 - cx) / scale + cx
|
||||
# map2 = (self.map2 - cy) / scale + cy
|
||||
|
||||
# map1 += 900 #translate x (>0 left, <0 right)
|
||||
# map2 += 1500 #translate y (>0 up, <0 down)
|
||||
|
||||
|
||||
return cv2.remap(img, self.map1, self.map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
|
||||
|
||||
def undistort_points(self, distorted_points: PointList):
|
||||
|
@ -310,7 +289,7 @@ class Detection:
|
|||
|
||||
@classmethod
|
||||
def from_deepsort(cls, dstrack: DeepsortTrack, frame_nr: int):
|
||||
return cls(dstrack.track_id, *dstrack.to_ltwh(), dstrack.det_conf or 0, DetectionState.from_deepsort_track(dstrack), frame_nr, dstrack.det_class)
|
||||
return cls(dstrack.track_id, *dstrack.to_ltwh(), dstrack.det_conf, DetectionState.from_deepsort_track(dstrack), frame_nr, dstrack.det_class)
|
||||
|
||||
|
||||
@classmethod
|
||||
|
|
|
@ -2,15 +2,12 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import datetime
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
import time
|
||||
from argparse import ArgumentParser, Namespace
|
||||
from multiprocessing.synchronize import Event as BaseEvent
|
||||
from typing import Dict, List, Optional
|
||||
from typing import Dict
|
||||
|
||||
from charset_normalizer import detect
|
||||
import cv2
|
||||
import ffmpeg
|
||||
import numpy as np
|
||||
|
@ -18,10 +15,8 @@ import pyglet
|
|||
import zmq
|
||||
from pyglet import shapes
|
||||
|
||||
from trap.base import Detection
|
||||
from trap.counter import CounterListerner
|
||||
from trap.frame_emitter import Frame, Track
|
||||
from trap.lines import load_lines_from_svg
|
||||
from trap.node import Node
|
||||
from trap.preview_renderer import FrameWriter
|
||||
from trap.tools import draw_track_predictions, draw_track_projected, to_point
|
||||
|
@ -33,7 +28,6 @@ class CvRenderer(Node):
|
|||
def setup(self):
|
||||
self.prediction_sock = self.sub(self.config.zmq_prediction_addr)
|
||||
self.tracker_sock = self.sub(self.config.zmq_trajectory_addr)
|
||||
self.detector_sock = self.sub(self.config.zmq_detection_addr)
|
||||
self.frame_sock = self.sub(self.config.zmq_frame_addr)
|
||||
|
||||
# self.H = self.config.H
|
||||
|
@ -52,15 +46,10 @@ class CvRenderer(Node):
|
|||
self.frame: Frame|None= None
|
||||
self.tracker_frame: Frame|None = None
|
||||
self.prediction_frame: Frame|None = None
|
||||
self.detections: List[Detection]|None = None
|
||||
|
||||
self.tracks: Dict[str, Track] = {}
|
||||
self.predictions: Dict[str, Track] = {}
|
||||
|
||||
self.scale = 100
|
||||
self.debug_lines = debug_lines = load_lines_from_svg(self.config.debug_map, self.scale, '') if self.config.debug_map else []
|
||||
|
||||
|
||||
def refresh_labels(self, dt: float):
|
||||
"""Every frame"""
|
||||
|
||||
|
@ -126,7 +115,7 @@ class CvRenderer(Node):
|
|||
|
||||
cv2.namedWindow("frame", cv2.WINDOW_NORMAL)
|
||||
# https://gist.github.com/ronekko/dc3747211543165108b11073f929b85e
|
||||
cv2.moveWindow("frame", 0, -1)
|
||||
cv2.moveWindow("frame", 1920, -1)
|
||||
if self.config.full_screen:
|
||||
cv2.setWindowProperty("frame",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
|
||||
# bgsub = cv2.createBackgroundSubtractorMOG2(120, 50, detectShadows=True)
|
||||
|
@ -170,35 +159,21 @@ class CvRenderer(Node):
|
|||
except zmq.ZMQError as e:
|
||||
logger.debug(f'reuse tracks')
|
||||
|
||||
try:
|
||||
self.detections = self.detector_sock.recv_pyobj(zmq.NOBLOCK)
|
||||
# print('detections')
|
||||
except zmq.ZMQError as e:
|
||||
# print('no detections')
|
||||
# idx = frame.index if frame else "NONE"
|
||||
# logger.debug(f"reuse video frame {idx}")
|
||||
pass
|
||||
|
||||
if first_time is None:
|
||||
first_time = frame.time
|
||||
|
||||
# img = frame.img
|
||||
# save_file = Path("videos/snap.png")
|
||||
# if not save_file.exists():
|
||||
# img = frame.camera.img_to_world(frame.img, 100)
|
||||
# cv2.imwrite(save_file, img)
|
||||
|
||||
img = decorate_frame(frame, tracker_frame, prediction_frame, first_time, self.config, self.tracks, self.predictions, self.detections, self.config.render_clusters, self.debug_lines, self.scale)
|
||||
img = decorate_frame(frame, tracker_frame, prediction_frame, first_time, self.config, self.tracks, self.predictions, self.config.render_clusters)
|
||||
|
||||
logger.debug(f"write frame {frame.time - first_time:.3f}s")
|
||||
if self.out_writer:
|
||||
self.out_writer.write(img)
|
||||
if self.streaming_process:
|
||||
self.streaming_process.stdin.write(img.tobytes())
|
||||
if not self.config.no_window:
|
||||
if self.config.render_window:
|
||||
cv2.imshow('frame',cv2.resize(img, (1920, 1080)))
|
||||
# cv2.imshow('frame',img)
|
||||
cv2.waitKey(10)
|
||||
cv2.waitKey(1)
|
||||
|
||||
# clear out old tracks & predictions:
|
||||
|
||||
|
@ -235,12 +210,6 @@ class CvRenderer(Node):
|
|||
help='Manually specity communication addr for the trajectory messages',
|
||||
type=str,
|
||||
default="ipc:///tmp/feeds_traj")
|
||||
|
||||
render_parser.add_argument('--zmq-detection-addr',
|
||||
help='Manually specity communication addr for the detection messages',
|
||||
type=str,
|
||||
default="ipc:///tmp/feeds_dets")
|
||||
|
||||
render_parser.add_argument('--zmq-prediction-addr',
|
||||
help='Manually specity communication addr for the prediction messages',
|
||||
type=str,
|
||||
|
@ -249,8 +218,8 @@ class CvRenderer(Node):
|
|||
render_parser.add_argument("--render-file",
|
||||
help="Render a video file previewing the prediction, and its delay compared to the current frame",
|
||||
action='store_true')
|
||||
render_parser.add_argument("--no-window",
|
||||
help="Disable a previewing to a window",
|
||||
render_parser.add_argument("--render-window",
|
||||
help="Render a previewing to a window",
|
||||
action='store_true')
|
||||
|
||||
render_parser.add_argument("--full-screen",
|
||||
|
@ -269,10 +238,7 @@ class CvRenderer(Node):
|
|||
""",
|
||||
type=str,
|
||||
default=None)
|
||||
render_parser.add_argument('--debug-map',
|
||||
help='specify a map (svg-file) from which to load lines which will be overlayed',
|
||||
type=str,
|
||||
default="../DATASETS/hof3/map_hof.svg")
|
||||
|
||||
return render_parser
|
||||
|
||||
# colorset = itertools.product([0,255], repeat=3) # but remove white
|
||||
|
@ -304,8 +270,8 @@ def get_animation_position(track: Track, current_frame: Frame):
|
|||
|
||||
|
||||
|
||||
def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame, first_time: float, config: Namespace, tracks: Dict[str, Track], predictions: Dict[str, Track], detections: Optional[List[Detection]], as_clusters = True, debug_lines = [], scale: float = 100) -> np.array:
|
||||
|
||||
def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame, first_time: float, config: Namespace, tracks: Dict[str, Track], predictions: Dict[str, Track], as_clusters = True) -> np.array:
|
||||
scale = 100
|
||||
# TODO: replace opencv with QPainter to support alpha? https://doc.qt.io/qtforpython-5/PySide2/QtGui/QPainter.html#PySide2.QtGui.PySide2.QtGui.QPainter.drawImage
|
||||
# or https://github.com/pygobject/pycairo?tab=readme-ov-file
|
||||
# or https://pyglet.readthedocs.io/en/latest/programming_guide/shapes.html
|
||||
|
@ -338,22 +304,6 @@ def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame,
|
|||
# cv2.imwrite(str(self.config.output_dir / "orig.png"), warpedFrame)
|
||||
cv2.rectangle(img, (0,0), (img.shape[1],25), (0,0,0), -1)
|
||||
|
||||
if detections:
|
||||
for detection in detections:
|
||||
points = [
|
||||
detection.get_foot_coords(),
|
||||
[detection.l, detection.t],
|
||||
[detection.l + detection.w, detection.t + detection.h],
|
||||
]
|
||||
points = frame.camera.points_img_to_world(points, scale)
|
||||
points = [to_point(p) for p in points] # to int
|
||||
|
||||
w = points[1][0]-points[2][0]
|
||||
feet = [int(points[2][0] + .5 * w), points[2][1]]
|
||||
cv2.rectangle(img, points[1], points[2], (255,255,0), 2)
|
||||
cv2.circle(img, points[0], 5, (255,255,0), 2)
|
||||
cv2.putText(img, f"{detection.conf:.02f}", (points[0][0], points[0][1]+20), cv2.FONT_HERSHEY_PLAIN, 1, (255,255,0), 1)
|
||||
|
||||
|
||||
def conversion(points):
|
||||
return convert_world_points_to_img_points(points, scale)
|
||||
|
@ -365,29 +315,6 @@ def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame,
|
|||
inv_H = np.linalg.pinv(tracker_frame.H)
|
||||
draw_track_projected(img, track, int(track_id), frame.camera, conversion)
|
||||
|
||||
for line in debug_lines:
|
||||
for rp1, rp2 in zip(line.points, line.points[1:]):
|
||||
p1 = (
|
||||
int(rp1.position[0]*scale),
|
||||
int(rp1.position[1]*scale),
|
||||
)
|
||||
p2 = (
|
||||
int(rp2.position[0]*scale),
|
||||
int(rp2.position[1]*scale),
|
||||
)
|
||||
cv2.line(img, p1, p2, (255,0,0), 2)
|
||||
# points = [(int(point[0]*scale), int(point[1]*scale)) for point in points]
|
||||
|
||||
# for num, points in enumerate(frame.camera.debug_lines):
|
||||
# cv2.line(img, points[0], points[1], (255,0,0), 2)
|
||||
|
||||
|
||||
|
||||
# if hasattr(frame.camera, 'debug_points'):
|
||||
# for num, point in enumerate(frame.camera.debug_points):
|
||||
# cv2.circle(img, (int(point[0]*scale), int(point[1]*scale)), 5, (255,0,0), 2)
|
||||
# cv2.putText(img, f"{num}", (int(point[0]*scale)+20, int(point[1]*scale)), cv2.FONT_HERSHEY_PLAIN, 1, (255,0,0), 1)
|
||||
|
||||
if not prediction_frame:
|
||||
cv2.putText(img, f"Waiting for prediction...", (500,17), cv2.FONT_HERSHEY_PLAIN, 1, (255,255,0), 1)
|
||||
# continue
|
||||
|
@ -444,9 +371,9 @@ def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame,
|
|||
for option, value in prediction_frame.log['predictor'].items():
|
||||
options.append(f"{option}: {value}")
|
||||
|
||||
if len(options):
|
||||
cv2.putText(img, options.pop(-1), (20,img.shape[0]-30), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
|
||||
cv2.putText(img, " | ".join(options), (20,img.shape[0]-10), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
|
||||
|
||||
cv2.putText(img, options.pop(-1), (20,img.shape[0]-30), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
|
||||
cv2.putText(img, " | ".join(options), (20,img.shape[0]-10), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
|
||||
|
||||
return img
|
||||
|
||||
|
|
|
@ -1,16 +1,18 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import multiprocessing
|
||||
import pickle
|
||||
from argparse import ArgumentParser, Namespace
|
||||
from multiprocessing import Event
|
||||
from pathlib import Path
|
||||
|
||||
import zmq
|
||||
|
||||
from trap import node
|
||||
from trap.base import *
|
||||
from trap.base import LambdaParser
|
||||
from trap.gemma import ImgMovementFilter
|
||||
from trap.preview_renderer import FrameWriter
|
||||
from trap.timer import Timer
|
||||
from trap.video_sources import get_video_source
|
||||
|
||||
logger = logging.getLogger('trap.frame_emitter')
|
||||
|
@ -30,39 +32,27 @@ class FrameEmitter(node.Node):
|
|||
|
||||
self.video_srcs = self.config.video_src
|
||||
|
||||
|
||||
|
||||
|
||||
def run(self):
|
||||
offset = int(self.config.video_offset or 0)
|
||||
source = get_video_source(self.video_srcs, self.config.camera, offset, self.config.video_end, self.config.video_loop)
|
||||
video_gen = enumerate(source, start = offset)
|
||||
while self.run_loop():
|
||||
|
||||
# writer = FrameWriter(self.config.record, None, None) if self.config.record else nullcontext
|
||||
print(self.config.record)
|
||||
writer = FrameWriter(str(self.config.record), None, None) if self.config.record else None
|
||||
try:
|
||||
processor = ImgMovementFilter()
|
||||
while self.run_loop():
|
||||
try:
|
||||
i, img = next(video_gen)
|
||||
except StopIteration as e:
|
||||
logger.info("Video source ended")
|
||||
break
|
||||
|
||||
try:
|
||||
i, img = next(video_gen)
|
||||
except StopIteration as e:
|
||||
logger.info("Video source ended")
|
||||
break
|
||||
frame = Frame(i, img=img, H=self.config.camera.H, camera=self.config.camera)
|
||||
|
||||
frame = Frame(i, img=img, H=self.config.camera.H, camera=self.config.camera)
|
||||
|
||||
# frame.img = processor.apply(frame.img)
|
||||
|
||||
# TODO: this is very dirty, need to find another way.
|
||||
# perhaps multiprocessing Array?
|
||||
self.frame_noimg_sock.send(pickle.dumps(frame.without_img()))
|
||||
self.frame_sock.send(pickle.dumps(frame))
|
||||
|
||||
if writer:
|
||||
writer.write(frame.img)
|
||||
finally:
|
||||
if writer:
|
||||
writer.release()
|
||||
# TODO: this is very dirty, need to find another way.
|
||||
# perhaps multiprocessing Array?
|
||||
self.frame_noimg_sock.send(pickle.dumps(frame.without_img()))
|
||||
self.frame_sock.send(pickle.dumps(frame))
|
||||
|
||||
|
||||
logger.info("Stopping")
|
||||
|
@ -94,10 +84,6 @@ class FrameEmitter(node.Node):
|
|||
help="End (or loop) playback at given frame.",
|
||||
default=None,
|
||||
type=int)
|
||||
argparser.add_argument("--record",
|
||||
help="Record source video to given filename",
|
||||
default=None,
|
||||
type=Path)
|
||||
|
||||
argparser.add_argument("--video-loop",
|
||||
help="By default it emitter will run only once. This allows it to loop the video file to keep testing.",
|
||||
|
|
|
@ -1,97 +0,0 @@
|
|||
# used for "Forward Referencing of type annotations"
|
||||
from __future__ import annotations
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
import time
|
||||
from argparse import ArgumentParser
|
||||
from pathlib import Path
|
||||
|
||||
import zmq
|
||||
|
||||
from trap.frame_emitter import Frame
|
||||
from trap.node import Node
|
||||
from trap.preview_renderer import FrameWriter as CvFrameWriter
|
||||
|
||||
logger = logging.getLogger("trap.simple_renderer")
|
||||
|
||||
class FrameWriter(Node):
|
||||
def setup(self):
|
||||
self.frame_sock = self.sub(self.config.zmq_frame_addr)
|
||||
|
||||
self.out_writer = self.start_writer()
|
||||
|
||||
def start_writer(self):
|
||||
if not self.config.output_dir.exists():
|
||||
raise FileNotFoundError("Path does not exist")
|
||||
|
||||
date_str = datetime.datetime.now().isoformat(timespec="minutes")
|
||||
filename = self.config.output_dir / f"render-source-{date_str}.mp4"
|
||||
logger.info(f"Write to {filename}")
|
||||
|
||||
return CvFrameWriter(str(filename), None, None)
|
||||
|
||||
# fourcc = cv2.VideoWriter_fourcc(*'vp09')
|
||||
|
||||
# return cv2.VideoWriter(str(filename), fourcc, self.fps, self.frame_size)
|
||||
|
||||
def run(self):
|
||||
i=0
|
||||
try:
|
||||
while self.run_loop():
|
||||
i += 1
|
||||
|
||||
# zmq_ev = self.frame_sock.poll(timeout=2000)
|
||||
# if not zmq_ev:
|
||||
# # when no data comes in, loop so that is_running is checked
|
||||
# continue
|
||||
|
||||
try:
|
||||
frame: Frame = self.frame_sock.recv_pyobj(zmq.NOBLOCK)
|
||||
|
||||
|
||||
# else:
|
||||
# logger.debug(f'new video frame {frame.index}')
|
||||
|
||||
|
||||
if frame is None:
|
||||
# might need to wait a few iterations before first frame comes available
|
||||
time.sleep(.1)
|
||||
continue
|
||||
|
||||
self.logger.debug(f"write frame {frame.time:.3f}")
|
||||
self.out_writer.write(frame.img)
|
||||
|
||||
except zmq.ZMQError as e:
|
||||
# idx = frame.index if frame else "NONE"
|
||||
# logger.debug(f"reuse video frame {idx}")
|
||||
|
||||
pass
|
||||
except KeyboardInterrupt as e:
|
||||
print('stopping on interrupt')
|
||||
|
||||
self.logger.info('Stopping')
|
||||
|
||||
# if i>2:
|
||||
if self.out_writer:
|
||||
self.out_writer.release()
|
||||
self.logger.info(f'Wrote to {self.out_writer.filename}')
|
||||
|
||||
self.logger.info('stopped')
|
||||
|
||||
|
||||
@classmethod
|
||||
def arg_parser(cls):
|
||||
argparser = ArgumentParser()
|
||||
argparser.add_argument('--zmq-frame-addr',
|
||||
help='Manually specity communication addr for the frame messages',
|
||||
type=str,
|
||||
default="ipc:///tmp/feeds_frame")
|
||||
|
||||
argparser.add_argument("--output-dir",
|
||||
help="Directory to save the video in",
|
||||
required=True,
|
||||
type=Path)
|
||||
return argparser
|
||||
|
||||
|
|
@ -1,292 +0,0 @@
|
|||
|
||||
|
||||
from argparse import ArgumentParser
|
||||
import enum
|
||||
import json
|
||||
from pathlib import Path
|
||||
import time
|
||||
from typing import Optional
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from trap.base import DataclassJSONEncoder, DistortedCamera, Frame
|
||||
from trap.lines import CoordinateSpace, RenderableLine, RenderableLines, RenderablePoint, RenderablePosition, SrgbaColor, cross_points
|
||||
from trap.node import Node
|
||||
from trap.stage import Coordinate
|
||||
|
||||
|
||||
class Modes(enum.Enum):
|
||||
POINTS = 1
|
||||
TEST_LINE = 2
|
||||
|
||||
class LaserCalibration(Node):
|
||||
"""
|
||||
A calibrated camera can be used to reverse-map the points of the laser to world coordinates.
|
||||
Note, it publishes on the address of the stage node, so they cannot run at the same time.
|
||||
|
||||
1. Draw points with the laser (use 1-9 to create/select, then position them with arrow keys)
|
||||
2. Use cursor on camera stream to create an image point for.
|
||||
- Locate nearby point to select and drag
|
||||
3. Use image coordinate of point, undistort, homograph, gives world coordinate.
|
||||
4. Perform homography on world coordinates + laser coordinates
|
||||
"""
|
||||
|
||||
def setup(self):
|
||||
# self.scenarios: List[DrawnScenario] = []
|
||||
|
||||
self.frame_sock = self.sub(self.config.zmq_frame_addr)
|
||||
self.laser_sock = self.pub(self.config.zmq_stage_addr)
|
||||
|
||||
self.camera: Optional[DistortedCamera] = None
|
||||
|
||||
self._selected_point = None
|
||||
self._is_dragging = False
|
||||
self.laser_points = {}
|
||||
self.image_points = {}
|
||||
self.mode = Modes.POINTS
|
||||
self.H = None
|
||||
|
||||
self.img_size = (1920,1080)
|
||||
self.frame_img_factor = (1,1)
|
||||
|
||||
if self.config.calibfile.exists():
|
||||
with self.config.calibfile.open('r') as fp:
|
||||
calibdata = json.load(fp)
|
||||
self.laser_points = calibdata['laser_points']
|
||||
self.image_points = calibdata['image_points']
|
||||
self.H = calibdata['H']
|
||||
|
||||
|
||||
|
||||
|
||||
def run(self):
|
||||
|
||||
cv2.namedWindow("laser_calib", cv2.WINDOW_NORMAL)
|
||||
# https://gist.github.com/ronekko/dc3747211543165108b11073f929b85e
|
||||
# cv2.moveWindow("laser_calib", 0, -1)
|
||||
cv2.setMouseCallback('laser_calib',self.mouse_event)
|
||||
cv2.setWindowProperty("laser_calib",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
|
||||
|
||||
# arrow up (82), down (84), arrow left(81)
|
||||
|
||||
frame = None
|
||||
while self.run_loop_capped_fps(60):
|
||||
if self.frame_sock.poll(0):
|
||||
frame: Frame = self.frame_sock.recv_pyobj()
|
||||
if not self.camera:
|
||||
self.camera = frame.camera
|
||||
|
||||
if frame is None:
|
||||
continue
|
||||
|
||||
self.frame_img_factor = frame.img.shape[1] / self.img_size[0], frame.img.shape[0] / self.img_size[1]
|
||||
|
||||
|
||||
img = frame.img
|
||||
img = cv2.resize(img, self.img_size)
|
||||
|
||||
cv2.putText(img, 'press 1-0 to create/edit points', (10,20), cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,255))
|
||||
if len(self.laser_points) < 4:
|
||||
cv2.putText(img, 'add points to calculate homography', (10,40), cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,255))
|
||||
else:
|
||||
cv2.putText(img, 'press c to calculate homography', (10,40), cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,0))
|
||||
|
||||
cv2.putText(img, str(self.config.calibfile), (10,self.img_size[1]-30), cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,0))
|
||||
|
||||
if self._selected_point:
|
||||
color = (0,255,255)
|
||||
cv2.putText(img, f'selected {self._selected_point}', (10,60), cv2.FONT_HERSHEY_SIMPLEX, .5, color)
|
||||
cv2.putText(img, 'press d to delete', (10,80), cv2.FONT_HERSHEY_SIMPLEX, .5, color)
|
||||
cv2.putText(img, 'use arrows to position laser for this point', (10,100), cv2.FONT_HERSHEY_SIMPLEX, .5, color)
|
||||
target = self.camera.points_img_to_world([self.image_points[self._selected_point]])[0].tolist()
|
||||
target = round(target[0], 2), round(target[1], 2)
|
||||
cv2.putText(img, f'map {self.laser_points[self._selected_point]} to {target} ({self.image_points[self._selected_point]})', (10,120), cv2.FONT_HERSHEY_SIMPLEX, .5, color)
|
||||
|
||||
|
||||
for k, coord in self.image_points.items():
|
||||
color = (0,0,255) if self._selected_point == k else (255,0,0)
|
||||
coord = int(coord[0] / self.frame_img_factor[0]), int(coord[1] / self.frame_img_factor[1])
|
||||
cv2.circle(img, coord, 4, color, thickness=2)
|
||||
cv2.putText(img, str(k), (coord[0]+10, coord[1]), cv2.FONT_HERSHEY_SIMPLEX, .5, color)
|
||||
|
||||
key = cv2.waitKey(5) # or for arrows: full_key_code = cv2.waitKeyEx(0)
|
||||
self.key_event(key)
|
||||
# nr_keys = [ord(i) for i in range(10)] # select/add point
|
||||
# cv2.
|
||||
cv2.imshow('laser_calib', img)
|
||||
|
||||
lines = []
|
||||
if self.mode == Modes.TEST_LINE:
|
||||
lines.append(RenderableLine([
|
||||
RenderablePoint((i,time.time()%18), SrgbaColor(0,1,0,1)) for i in range(-15, 40)
|
||||
|
||||
]))
|
||||
# render in laser space
|
||||
rl = RenderableLines(lines, CoordinateSpace.WORLD)
|
||||
self.laser_sock.send_json(rl, cls=DataclassJSONEncoder)
|
||||
else:
|
||||
if self._selected_point:
|
||||
point = self.laser_points[self._selected_point]
|
||||
lines.extend(cross_points(point[0], point[1], .5, SrgbaColor(0,1,0,1)))
|
||||
|
||||
# render in laser space
|
||||
rl = RenderableLines(lines, CoordinateSpace.LASER)
|
||||
self.laser_sock.send_json(rl, cls=DataclassJSONEncoder)
|
||||
|
||||
# print(json.dumps(rl, cls=DataclassJSONEncoder))
|
||||
|
||||
def key_event(self, key: int):
|
||||
if key < 0:
|
||||
return
|
||||
|
||||
if key == ord('q'):
|
||||
exit()
|
||||
|
||||
if key == 27: #esc
|
||||
self._selected_point = None
|
||||
|
||||
|
||||
if key == ord('c'):
|
||||
self.calculate_homography()
|
||||
self.save()
|
||||
|
||||
if key == ord('d') and self._selected_point:
|
||||
self.delete_point(self._selected_point)
|
||||
|
||||
if key == ord('t'):
|
||||
self.mode = Modes.TEST_LINE if self.mode == Modes.POINTS else Modes.POINTS
|
||||
print(self.mode)
|
||||
|
||||
# arrow up (82), down (84), arrow left(81)
|
||||
if self._selected_point and key in [81, 84, 82, 83,
|
||||
ord('h'), ord('j'), ord('k'), ord('l'),
|
||||
ord('H'), ord('J'), ord('K'), ord('L'),
|
||||
]:
|
||||
diff = [0,0]
|
||||
if key in [81, ord('h')]:
|
||||
diff[0] -= 1
|
||||
if key == ord('H'):
|
||||
diff[0] -= 10
|
||||
if key in [83, ord('l')]:
|
||||
diff[0] += 1
|
||||
if key == ord('L'):
|
||||
diff[0] += 10
|
||||
|
||||
if key in [82, ord('k')]:
|
||||
diff[1] += 1
|
||||
if key == ord('K'):
|
||||
diff[1] += 10
|
||||
if key in [84, ord('j')]:
|
||||
diff[1] -= 1
|
||||
if key == ord('J'):
|
||||
diff[1] -= 10
|
||||
|
||||
self.laser_points[self._selected_point] = (
|
||||
self.laser_points[self._selected_point][0] + diff[0],
|
||||
self.laser_points[self._selected_point][1] + diff[1],
|
||||
)
|
||||
|
||||
|
||||
nr_keys = [ord(str(i)) for i in range(10)]
|
||||
if key in nr_keys:
|
||||
select = str(nr_keys.index(key))
|
||||
self.create_or_select(select)
|
||||
|
||||
|
||||
|
||||
|
||||
def mouse_event(self, event,x,y,flags,param):
|
||||
x *= self.frame_img_factor[0]
|
||||
y *= self.frame_img_factor[1]
|
||||
if event == cv2.EVENT_MOUSEMOVE:
|
||||
if not self._is_dragging or not self._selected_point:
|
||||
return
|
||||
|
||||
self.image_points[self._selected_point] = (x, y)
|
||||
|
||||
if event == cv2.EVENT_LBUTTONDOWN:
|
||||
# select or create
|
||||
self._selected_point = None
|
||||
for i, p in self.image_points.items():
|
||||
d = (p[0]-x)**2 + (p[1]-y)**2
|
||||
if d < 30:
|
||||
self._selected_point = i
|
||||
break
|
||||
if self._selected_point is None:
|
||||
self._selected_point = self.new_point((x,y), None)
|
||||
|
||||
self._is_dragging = True
|
||||
|
||||
if event == cv2.EVENT_LBUTTONUP:
|
||||
self._is_dragging = False
|
||||
# ... point stays selected to tweak laser
|
||||
|
||||
def create_or_select(self, nr: str):
|
||||
if nr not in self.image_points:
|
||||
self.new_point(None, None, nr)
|
||||
self._selected_point = nr
|
||||
return nr
|
||||
|
||||
def new_point(self, img_coord: Optional[Coordinate], laser_coord: Optional[Coordinate], nr: Optional[str]=None):
|
||||
if nr:
|
||||
new_nr = nr
|
||||
else:
|
||||
new_nr = None
|
||||
for i in range(100):
|
||||
k = str(i)
|
||||
if k not in self.image_points:
|
||||
new_nr = k
|
||||
break
|
||||
if not new_nr:
|
||||
new_nr = 0 # cover unlikely case
|
||||
|
||||
self.image_points[new_nr] = img_coord or (100,100)
|
||||
self.laser_points[new_nr] = laser_coord or (100,100)
|
||||
return new_nr
|
||||
|
||||
def delete_point(self, point: str):
|
||||
del self.image_points[point]
|
||||
del self.laser_points[point]
|
||||
self._selected_point = None
|
||||
|
||||
def calculate_homography(self):
|
||||
if len(self.image_points) < 4:
|
||||
return
|
||||
|
||||
world_points = self.camera.points_img_to_world(list(self.image_points.values()))
|
||||
laser_points = np.array(list(self.laser_points.values()))
|
||||
print('from', world_points)
|
||||
print('to', laser_points)
|
||||
self.H, status = cv2.findHomography(world_points, laser_points)
|
||||
|
||||
print('Found')
|
||||
print(self.H)
|
||||
|
||||
def save(self):
|
||||
with self.config.calibfile.open('w') as fp:
|
||||
json.dump({
|
||||
'laser_points': self.laser_points,
|
||||
'image_points': self.image_points,
|
||||
'H': self.H.tolist()
|
||||
}, fp)
|
||||
|
||||
|
||||
|
||||
@classmethod
|
||||
def arg_parser(cls) -> ArgumentParser:
|
||||
argparser = ArgumentParser()
|
||||
argparser.add_argument('--zmq-frame-addr',
|
||||
help='Manually specity communication addr for the frame messages',
|
||||
type=str,
|
||||
default="ipc:///tmp/feeds_frame")
|
||||
argparser.add_argument('--zmq-stage-addr',
|
||||
help='Manually specity communication addr for the stage messages (the rendered lines)',
|
||||
type=str,
|
||||
default="tcp://0.0.0.0:99174")
|
||||
argparser.add_argument('--calibfile',
|
||||
help='specify file to save & load points with',
|
||||
type=Path,
|
||||
default=Path("./laser_calib.json"))
|
||||
|
||||
return argparser
|
109
trap/lines.py
109
trap/lines.py
|
@ -1,26 +1,18 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum, IntEnum
|
||||
from enum import Enum
|
||||
import math
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Tuple
|
||||
from typing import List, Tuple
|
||||
import numpy as np
|
||||
|
||||
from simplification.cutil import simplify_coords_idx, simplify_coords_vw_idx
|
||||
import svgpathtools
|
||||
|
||||
"""
|
||||
See [notebook](../test_path_transforms.ipynb) for examples
|
||||
"""
|
||||
|
||||
RenderablePosition = Tuple[float,float]
|
||||
|
||||
class CoordinateSpace(IntEnum):
|
||||
CAMERA = 1
|
||||
UNDISTORTED_CAMERA = 2
|
||||
WORLD = 3
|
||||
LASER = 4
|
||||
|
||||
@dataclass
|
||||
class SrgbaColor():
|
||||
|
@ -63,12 +55,12 @@ class SimplifyMethod(Enum):
|
|||
class RenderableLine():
|
||||
points: List[RenderablePoint]
|
||||
|
||||
def as_simplified(self, method: SimplifyMethod = SimplifyMethod.RDP, factor = SIMPLIFY_FACTOR_RDP):
|
||||
def as_simplified(self, method: SimplifyMethod = SimplifyMethod.RDP):
|
||||
linestring = [p.position for p in self.points]
|
||||
if method == SimplifyMethod.RDP:
|
||||
indexes = simplify_coords_idx(linestring, factor)
|
||||
indexes = simplify_coords_idx(linestring, SIMPLIFY_FACTOR_RDP)
|
||||
elif method == SimplifyMethod.VW:
|
||||
indexes = simplify_coords_vw_idx(linestring, factor)
|
||||
indexes = simplify_coords_vw_idx(linestring, SIMPLIFY_FACTOR_VW)
|
||||
points = [self.points[i] for i in indexes]
|
||||
return RenderableLine(points)
|
||||
|
||||
|
@ -76,12 +68,11 @@ class RenderableLine():
|
|||
@dataclass
|
||||
class RenderableLines():
|
||||
lines: List[RenderableLine]
|
||||
space: CoordinateSpace = CoordinateSpace.WORLD
|
||||
|
||||
def as_simplified(self, method: SimplifyMethod = SimplifyMethod.RDP, factor = SIMPLIFY_FACTOR_RDP):
|
||||
"""Wraps RenderableLine simplification, smaller factor is more detailed"""
|
||||
def as_simplified(self, method: SimplifyMethod = SimplifyMethod.RDP):
|
||||
"""Wraps RenderableLine simplification"""
|
||||
return RenderableLines(
|
||||
[line.as_simplified(method, factor) for line in self.lines]
|
||||
[line.as_simplified(method) for line in self.lines]
|
||||
)
|
||||
|
||||
def append(self, rl: RenderableLine):
|
||||
|
@ -90,12 +81,9 @@ class RenderableLines():
|
|||
def append_lines(self, rls: RenderableLines):
|
||||
self.lines.extend(rls.lines)
|
||||
|
||||
def point_count(self):
|
||||
return sum([len(l.points) for l in self.lines])
|
||||
|
||||
# def merge(self, rl: RenderableLines):
|
||||
|
||||
RenderableLayers = Dict[int, RenderableLines]
|
||||
|
||||
|
||||
def circle_arc(cx, cy, r, t, l, c: SrgbaColor):
|
||||
"""
|
||||
|
@ -103,7 +91,7 @@ def circle_arc(cx, cy, r, t, l, c: SrgbaColor):
|
|||
for l*2pi, offset by t. Both t and l are 0<= [t,l] <= 1
|
||||
"""
|
||||
|
||||
resolution = 30
|
||||
resolution = 40
|
||||
steps = int(resolution * l)
|
||||
offset = int(resolution * t)
|
||||
pointlist: list[RenderablePoint] = []
|
||||
|
@ -114,79 +102,4 @@ def circle_arc(cx, cy, r, t, l, c: SrgbaColor):
|
|||
pointlist.append(RenderablePoint((x, y), c))
|
||||
|
||||
|
||||
return RenderableLine(pointlist)
|
||||
|
||||
def cross_points(cx, cy, r, c: SrgbaColor):
|
||||
# r = 100
|
||||
steps = 3
|
||||
pointlist: list[RenderablePoint] = []
|
||||
for i in range(steps):
|
||||
x = int(cx)
|
||||
y = int(cy + r - i * 2 * r/steps)
|
||||
pos = (x, y)
|
||||
pointlist.append(RenderablePoint(pos, c))
|
||||
path = RenderableLine(pointlist)
|
||||
pointlist: list[RenderablePoint] = []
|
||||
for i in range(steps):
|
||||
y = int(cy)
|
||||
x = int(cx + r - i * 2 * r/steps)
|
||||
pos = (x, y)
|
||||
pointlist.append(RenderablePoint(pos, c))
|
||||
path2 = RenderableLine(pointlist)
|
||||
|
||||
return [path, path2]
|
||||
|
||||
|
||||
def load_lines_from_svg(svg_path: Path, scale: float, c: SrgbaColor) -> List[RenderableLine]:
|
||||
|
||||
lines = []
|
||||
paths, attributes = svgpathtools.svg2paths(svg_path)
|
||||
|
||||
for path in paths:
|
||||
try:
|
||||
# segments = path.segments
|
||||
coordinates = []
|
||||
for i, segment in enumerate(path):
|
||||
if isinstance(segment, svgpathtools.Line):
|
||||
if i == 0:
|
||||
# avoid duplicate coords
|
||||
coordinates.append((segment.start.real, segment.start.imag))
|
||||
coordinates.append((segment.end.real, segment.end.imag))
|
||||
elif isinstance(segment, svgpathtools.Arc):
|
||||
#Approximating arcs with line segments (adjust steps for precision)
|
||||
steps = 10
|
||||
for i in range(steps + 1):
|
||||
t = i / steps
|
||||
x = segment.point(t).real
|
||||
y = segment.point(t).imag
|
||||
coordinates.append((x, y))
|
||||
elif isinstance(segment, svgpathtools.CubicBezier):
|
||||
steps = 10
|
||||
for i in range(steps + 1):
|
||||
t = i / steps
|
||||
x = segment.point(t).real
|
||||
y = segment.point(t).imag
|
||||
coordinates.append((x, y))
|
||||
|
||||
elif isinstance(segment, svgpathtools.QuadraticBezier):
|
||||
steps = 10
|
||||
for i in range(steps + 1):
|
||||
t = i / steps
|
||||
x = segment.point(t).real
|
||||
y = segment.point(t).imag
|
||||
coordinates.append((x, y))
|
||||
else:
|
||||
print(f"Unsupported segment type: {type(segment)}")
|
||||
|
||||
# Create LineString from coordinates
|
||||
if len(coordinates) > 1:
|
||||
coordinates = (np.array(coordinates) / scale).tolist()
|
||||
points = [RenderablePoint(pos, c) for pos in coordinates]
|
||||
line = RenderableLine(points)
|
||||
lines.append(line)
|
||||
# linestring = shapely.geometry.LineString(coordinates)
|
||||
# linestrings.append(linestring)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error processing path: {e}")
|
||||
return lines
|
||||
return RenderableLine(pointlist)
|
|
@ -1,65 +0,0 @@
|
|||
|
||||
from argparse import ArgumentParser
|
||||
import time
|
||||
from trap.counter import CounterListerner
|
||||
from trap.node import Node
|
||||
|
||||
|
||||
class Monitor(Node):
|
||||
"""
|
||||
Render a stage, on which different TrackScenarios take place to a
|
||||
single image of lines. Which can be passed to different renderers
|
||||
E.g. the laser or image renderers.
|
||||
"""
|
||||
|
||||
FPS = 1
|
||||
|
||||
def setup(self):
|
||||
# self.scenarios: List[DrawnScenario] = []
|
||||
self.counter_listener = CounterListerner()
|
||||
|
||||
def run(self):
|
||||
prev_time = time.perf_counter()
|
||||
while self.is_running.is_set():
|
||||
# self.tick() # don't polute it with own data
|
||||
|
||||
self.counter_listener.snapshot()
|
||||
stats = self.counter_listener.to_string()
|
||||
if len(stats):
|
||||
self.logger.info(stats)
|
||||
# else:
|
||||
# self.logger.info("no stats")
|
||||
|
||||
# for i, (k, v) in enumerate(self.counter_listener.get_latest().items()):
|
||||
# print(k,v)
|
||||
# cv2.putText(img, f"{k} {v.value()}", (20,img.shape[0]-(40*i)-40), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
|
||||
|
||||
# 3) calculate latency for desired FPS
|
||||
now = time.perf_counter()
|
||||
time_diff = (now - prev_time)
|
||||
if time_diff < 1/self.FPS:
|
||||
# print(f"sleep {1/self.FPS - time_diff}")
|
||||
time.sleep(1/self.FPS - time_diff)
|
||||
now += 1/self.FPS - time_diff
|
||||
|
||||
prev_time = now
|
||||
|
||||
|
||||
@classmethod
|
||||
def arg_parser(cls) -> ArgumentParser:
|
||||
argparser = ArgumentParser()
|
||||
# argparser.add_argument('--zmq-trajectory-addr',
|
||||
# help='Manually specity communication addr for the trajectory messages',
|
||||
# type=str,
|
||||
# default="ipc:///tmp/feeds_traj")
|
||||
# argparser.add_argument('--zmq-prediction-addr',
|
||||
# help='Manually specity communication addr for the prediction messages',
|
||||
# type=str,
|
||||
# default="ipc:///tmp/feeds_preds")
|
||||
# argparser.add_argument('--zmq-stage-addr',
|
||||
# help='Manually specity communication addr for the stage messages (the rendered lines)',
|
||||
# type=str,
|
||||
# default="tcp://0.0.0.0:99174")
|
||||
return argparser
|
||||
|
||||
|
69
trap/node.py
69
trap/node.py
|
@ -1,9 +1,7 @@
|
|||
import logging
|
||||
from logging.handlers import QueueHandler, QueueListener, SocketHandler
|
||||
import multiprocessing
|
||||
from multiprocessing.synchronize import Event as BaseEvent
|
||||
from argparse import ArgumentParser, Namespace
|
||||
import time
|
||||
from typing import Optional
|
||||
|
||||
import zmq
|
||||
|
@ -20,8 +18,6 @@ class Node():
|
|||
self.zmq_context = zmq.Context()
|
||||
self.logger = self._logger()
|
||||
|
||||
self._prev_loop_time = 0
|
||||
|
||||
self.setup()
|
||||
|
||||
@classmethod
|
||||
|
@ -46,49 +42,9 @@ class Node():
|
|||
self.tick()
|
||||
return self.is_running.is_set()
|
||||
|
||||
def run_loop_capped_fps(self, max_fps: float):
|
||||
"""Use in run(), to check if it should keep looping
|
||||
Takes care of tick()'ing the iterations/second counter
|
||||
"""
|
||||
|
||||
now = time.perf_counter()
|
||||
time_diff = (now - self._prev_loop_time)
|
||||
if time_diff < 1/max_fps:
|
||||
# print(f"sleep {1/max_fps - time_diff}")
|
||||
time.sleep(1/max_fps - time_diff)
|
||||
now += 1/max_fps - time_diff
|
||||
self._prev_loop_time = now
|
||||
|
||||
return self.run_loop()
|
||||
|
||||
@classmethod
|
||||
def arg_parser(cls) -> ArgumentParser:
|
||||
raise RuntimeError("Not implemented arg_parser()")
|
||||
|
||||
@classmethod
|
||||
def _get_arg_parser(cls) -> ArgumentParser:
|
||||
parser = cls.arg_parser()
|
||||
# add some defaults
|
||||
parser.add_argument(
|
||||
'--verbose',
|
||||
'-v',
|
||||
help="Increase verbosity. Add multiple times to increase further.",
|
||||
action='count', default=0
|
||||
)
|
||||
parser.add_argument(
|
||||
'--remote-log-addr',
|
||||
help="Connect to a remote logger like cutelog. Specify the ip",
|
||||
type=str,
|
||||
default="100.72.38.82"
|
||||
)
|
||||
parser.add_argument(
|
||||
'--remote-log-port',
|
||||
help="Connect to a remote logger like cutelog. Specify the port",
|
||||
type=int,
|
||||
default=19996
|
||||
)
|
||||
return parser
|
||||
|
||||
|
||||
def sub(self, addr: str):
|
||||
"Default zmq sub configuration"
|
||||
|
@ -113,32 +69,11 @@ class Node():
|
|||
|
||||
@classmethod
|
||||
def parse_and_start(cls):
|
||||
"""To start the node from CLI/supervisor"""
|
||||
config = cls._get_arg_parser().parse_args()
|
||||
setup_logging(config) # running from cli, we need to setup logging
|
||||
config = cls.arg_parser().parse_args()
|
||||
is_running = multiprocessing.Event()
|
||||
is_running.set()
|
||||
statsender = CounterSender()
|
||||
counter = CounterFpsSender(f"trap.{cls.__name__}", statsender)
|
||||
# timer_counter = Timer(cls.__name__)
|
||||
|
||||
cls.start(config, is_running, counter)
|
||||
|
||||
|
||||
def setup_logging(config: Namespace):
|
||||
loglevel = logging.NOTSET if config.verbose > 1 else logging.DEBUG if config.verbose > 0 else logging.INFO
|
||||
stream_handler = logging.StreamHandler()
|
||||
log_handlers = [stream_handler]
|
||||
|
||||
if config.remote_log_addr:
|
||||
logging.captureWarnings(True)
|
||||
# root_logger.setLevel(logging.NOTSET) # to send all records to cutelog
|
||||
socket_handler = SocketHandler(config.remote_log_addr, config.remote_log_port)
|
||||
print(socket_handler.host, socket_handler.port)
|
||||
socket_handler.setLevel(logging.NOTSET)
|
||||
log_handlers.append(socket_handler)
|
||||
|
||||
logging.basicConfig(
|
||||
level=loglevel,
|
||||
handlers=log_handlers # [queue_handler]
|
||||
)
|
||||
cls.start(config, is_running, counter)
|
|
@ -86,8 +86,7 @@ def get_maps_for_input(input_dict, scene, hyperparams, device):
|
|||
|
||||
scene_map = scene.map[node.type]
|
||||
# map_point = x[-1, :2]
|
||||
# map_point = x[:2]
|
||||
map_point = x[:2].clip(0) # prevent crash for out of map point.
|
||||
map_point = x[:2]
|
||||
|
||||
patch_size = hyperparams['map_encoder'][node.type]['patch_size']
|
||||
|
||||
|
@ -103,7 +102,6 @@ def get_maps_for_input(input_dict, scene, hyperparams, device):
|
|||
heading_angles = torch.Tensor(heading_angles)
|
||||
|
||||
# print(scene_maps, patch_sizes, heading_angles)
|
||||
# print(scene_pts)
|
||||
maps = scene_maps[0].get_cropped_maps_from_scene_map_batch(scene_maps,
|
||||
scene_pts=torch.Tensor(scene_pts),
|
||||
patch_size=patch_sizes[0],
|
||||
|
|
|
@ -300,7 +300,7 @@ class FrameWriter:
|
|||
"""
|
||||
def __init__(self, filename: str, fps: float, frame_size: Optional[tuple] = None) -> None:
|
||||
self.filename = filename
|
||||
self._fps = fps
|
||||
self.fps = fps
|
||||
self.frame_size = frame_size
|
||||
|
||||
self.tmp_dir = tempfile.TemporaryDirectory(prefix="trap-output-")
|
||||
|
|
|
@ -163,8 +163,6 @@ def process_data(src_dir: Path, dst_dir: Path, name: str, smooth_tracks: bool, c
|
|||
|
||||
print(f"Camera FPS: {camera.fps}, actual fps: {camera.fps/step_size} (or {(1/camera.fps)*step_size})")
|
||||
|
||||
names = {}
|
||||
|
||||
for data_class, nr_of_items in destinations.items():
|
||||
env = Environment(node_type_list=['PEDESTRIAN'], standardization=standardization)
|
||||
attention_radius = dict()
|
||||
|
@ -174,7 +172,6 @@ def process_data(src_dir: Path, dst_dir: Path, name: str, smooth_tracks: bool, c
|
|||
scenes = []
|
||||
split_id = f"{name}_{data_class}"
|
||||
data_dict_path = dst_dir / (split_id + '.pkl')
|
||||
names[data_class] = data_dict_path
|
||||
# subpath = src_dir / data_class
|
||||
|
||||
|
||||
|
@ -301,7 +298,6 @@ def process_data(src_dir: Path, dst_dir: Path, name: str, smooth_tracks: bool, c
|
|||
# print(f"Linear: {l}")
|
||||
# print(f"Non-Linear: {nl}")
|
||||
print(f"error: {skipped_for_error}, used: {created}")
|
||||
return names
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
|
|
479
trap/stage.py
479
trap/stage.py
|
@ -13,10 +13,7 @@ import time
|
|||
from typing import Dict, List, Optional, Tuple
|
||||
from matplotlib.pyplot import isinteractive
|
||||
import numpy as np
|
||||
from shapely import LineString, MultiLineString, line_locate_point, linestrings
|
||||
|
||||
import shapely
|
||||
from shapely.ops import substring
|
||||
from shapely import LineString, line_locate_point, linestrings
|
||||
from statemachine import Event, State, StateMachine
|
||||
from statemachine.exceptions import TransitionNotAllowed
|
||||
import zmq
|
||||
|
@ -27,23 +24,20 @@ from trap import shapes
|
|||
from trap.base import Camera, DataclassJSONEncoder, DistortedCamera, Frame, ProjectedTrack, Track
|
||||
from trap.counter import CounterSender
|
||||
from trap.laser_renderer import circle_points, rotateMatrix
|
||||
from trap.lines import RenderableLayers, RenderableLine, RenderableLines, RenderablePoint, RenderablePosition, SimplifyMethod, SrgbaColor, circle_arc, load_lines_from_svg
|
||||
from trap.lines import RenderableLine, RenderableLines, RenderablePoint, RenderablePosition, SrgbaColor, circle_arc
|
||||
from trap.node import Node
|
||||
from trap.timer import Timer
|
||||
from trap.utils import exponentialDecay, exponentialDecayRounded, lerp, relativePointToPolar, relativePolarToPoint
|
||||
from trap.utils import exponentialDecay, exponentialDecayRounded, relativePointToPolar, relativePolarToPoint
|
||||
|
||||
from noise import snoise2
|
||||
|
||||
logger = logging.getLogger('trap.stage')
|
||||
|
||||
Coordinate = Tuple[float, float]
|
||||
DeltaT = float # delta_t in seconds
|
||||
|
||||
OPTION_RENDER_DEBUG = False
|
||||
OPTION_POSITION_MARKER = False
|
||||
OPTION_GROW_ANOMALY_CIRCLE = False
|
||||
# OPTION_RENDER_DIFF_SEGMENT = True
|
||||
OPTION_TRACK_NOISE = False
|
||||
# current_fraction = line_locate_point(new_line_string, Point(old_ls.coords[-1]), normalized=True)
|
||||
# new_fraction = current_fraction + stepsize
|
||||
# grown_string = shapely.ops.substring(new_line_string, 0, new_fraction, normalized=True)
|
||||
|
||||
class LineGenerator(ABC):
|
||||
@abstractmethod
|
||||
|
@ -66,10 +60,6 @@ class AppendableLine(LineGenerator):
|
|||
self.ready = len(self.points) == 0
|
||||
self.draw_decay_speed = draw_decay_speed
|
||||
|
||||
def nr_of_passed_points(self):
|
||||
"""The number of points passed in the animation"""
|
||||
return len(self._drawn_points) - 1
|
||||
|
||||
def update_drawn_positions(self, dt: DeltaT):
|
||||
if len(self.points) == 0:
|
||||
# nothing to draw yet
|
||||
|
@ -100,8 +90,6 @@ class AppendableLine(LineGenerator):
|
|||
self._drawn_points[-1] = (float(x), float(y))
|
||||
|
||||
class ProceduralChain(LineGenerator):
|
||||
"""A line that can be 'dragged' to a target. In which
|
||||
it disappears."""
|
||||
MOVE_DECAY_SPEED = 80 # speed at which the drawing head should approach the next point
|
||||
VELOCITY_DAMPING = 10
|
||||
VELOCITY_FACTOR = 2
|
||||
|
@ -169,15 +157,7 @@ class ProceduralChain(LineGenerator):
|
|||
|
||||
|
||||
class DiffSegment():
|
||||
"""
|
||||
A segment of a prediction track, that can be diffed
|
||||
with a track. The track is continously update.
|
||||
If a new prediction comes in, the diff is marked as
|
||||
finished. After which it is animated and added to the
|
||||
Scenario's anomaly score.
|
||||
"""
|
||||
DRAW_DECAY_SPEED = 25
|
||||
POINT_INTERVAL = 4
|
||||
|
||||
def __init__(self, prediction: ProjectedTrack):
|
||||
self.ptrack = prediction
|
||||
|
@ -191,12 +171,6 @@ class DiffSegment():
|
|||
|
||||
def finish(self):
|
||||
self.finished = True
|
||||
|
||||
def nr_of_passed_points(self):
|
||||
if isinstance(self.line, AppendableLine):
|
||||
return self.line.nr_of_passed_points() * self.POINT_INTERVAL
|
||||
else:
|
||||
return len(self.points) * self.POINT_INTERVAL
|
||||
|
||||
# run on each track update received
|
||||
def update_track(self, track: ProjectedTrack):
|
||||
|
@ -224,7 +198,7 @@ class DiffSegment():
|
|||
line = []
|
||||
for i, (p1, p2) in enumerate(zip(trajectory_range, prediction_range)):
|
||||
offset_from_start = (pred_diff_steps_forward + i)
|
||||
if offset_from_start % self.POINT_INTERVAL == 0:
|
||||
if offset_from_start % 4 == 0:
|
||||
self.line.points.extend([p1, p2])
|
||||
self.points.extend([p1, p2])
|
||||
|
||||
|
@ -242,6 +216,33 @@ class DiffSegment():
|
|||
if isinstance(self.line, ProceduralChain):
|
||||
self.line.target = self._target_track.projected_history[-1]
|
||||
|
||||
# if len(self.points) == 0:
|
||||
# # nothing to draw yet
|
||||
# return
|
||||
|
||||
# # self._drawn_points = self.points
|
||||
|
||||
# if len(self._drawn_points) == 0:
|
||||
# # create origin
|
||||
# self._drawn_points.append(self.points[0])
|
||||
# # and drawing head
|
||||
# self._drawn_points.append(self.points[0])
|
||||
|
||||
# idx = len(self._drawn_points) - 1
|
||||
# target = self.points[idx]
|
||||
|
||||
# if np.isclose(self._drawn_points[-1], target, atol=.05).all():
|
||||
# # TODO: might want to migrate to np.isclose()
|
||||
# if len(self._drawn_points) == len(self.points):
|
||||
# self.ready = True
|
||||
# return # done until a new point is added
|
||||
# # add new point as drawing head
|
||||
# self._drawn_points.append(self._drawn_points[-1])
|
||||
# self.ready = False
|
||||
|
||||
# x = exponentialDecayRounded(self._drawn_points[-1][0], target[0], self.DRAW_DECAY_SPEED, dt, .05)
|
||||
# y = exponentialDecayRounded(self._drawn_points[-1][1], target[1], self.DRAW_DECAY_SPEED, dt, .05)
|
||||
# self._drawn_points[-1] = (float(x), float(y))
|
||||
|
||||
# if not self.finished or not self.line.ready:
|
||||
self.line.update_drawn_positions(dt)
|
||||
|
@ -262,69 +263,6 @@ class DiffSegment():
|
|||
return RenderableLines([])
|
||||
|
||||
|
||||
class DiffSegmentScan(DiffSegment):
|
||||
"""
|
||||
Provide alternative diffing, in the form of a sort of scan line
|
||||
Should be faster with the laser
|
||||
TODO: This is work in progress, does not work yet!
|
||||
"""
|
||||
|
||||
def __init__(self, prediction: ProjectedTrack):
|
||||
self.ptrack = prediction
|
||||
self._target_track = prediction
|
||||
self.finished = False
|
||||
self._last_diff_frame_idx = 0
|
||||
|
||||
def finish(self):
|
||||
self.finished = True
|
||||
|
||||
def prediction_offset(self):
|
||||
"""Difference is starting moment between track and prediction"""
|
||||
return self.ptrack.frame_index - self._target_track.frame_index
|
||||
|
||||
def nr_of_passed_points(self):
|
||||
"""Number of points of the given ptrack that have passed"""
|
||||
return len(self._target_track.projected_history) - 1 - self.prediction_offset()
|
||||
# len(self.points) * self.POINT_INTERVAL
|
||||
|
||||
# run on each track update received
|
||||
def update_track(self, track: ProjectedTrack):
|
||||
self._target_track = track
|
||||
|
||||
if self.finished:
|
||||
# don't add new points if finished
|
||||
return
|
||||
|
||||
start_frame_idx = max(self.ptrack.frame_index, self._last_diff_frame_idx)
|
||||
traj_diff_steps_back = track.frame_index - start_frame_idx # positive value
|
||||
pred_diff_steps_forward = start_frame_idx - self.ptrack.frame_index # positive value
|
||||
self._last_diff_frame_idx = track.frame_index
|
||||
|
||||
# run each render tick
|
||||
def update_drawn_positions(self, dt: DeltaT, scenario: DrawnScenario):
|
||||
# if not self.finished or not self.line.ready:
|
||||
# self.line.update_drawn_positions(dt)
|
||||
pass # TODO: use easing
|
||||
|
||||
|
||||
|
||||
def as_renderable(self) -> RenderableLines:
|
||||
if self.finished:
|
||||
return RenderableLines([])
|
||||
color = SrgbaColor(0,0,1,1)
|
||||
# steps_diff = self.nr_of_passed_points()
|
||||
idx = self.nr_of_passed_points()
|
||||
if len(self.ptrack.predictions[0]) < idx+1:
|
||||
self.finish()
|
||||
return RenderableLines([])
|
||||
points = [self._target_track.projected_history[-1], self.ptrack.predictions[0][idx]]
|
||||
|
||||
points = [RenderablePoint(pos, color) for pos in points]
|
||||
line = RenderableLine(points)
|
||||
|
||||
return RenderableLines([line])
|
||||
|
||||
|
||||
class ScenarioScene(Enum):
|
||||
DETECTED = 1
|
||||
FIRST_PREDICTION = 2
|
||||
|
@ -337,11 +275,11 @@ LOST_FADEOUT = 3
|
|||
PREDICTION_INTERVAL: float|None = 20 # frames
|
||||
PREDICTION_FADE_IN: float = 3
|
||||
PREDICTION_FADE_SLOPE: float = -10
|
||||
PREDICTION_FADE_AFTER_DURATION: float = 8 # seconds
|
||||
PREDICTION_FADE_AFTER_DURATION: float = 10 # seconds
|
||||
PREDICTION_END_FADE = 2 #frames
|
||||
# TRACK_MAX_POINTS = 100
|
||||
TRACK_FADE_AFTER_DURATION = 15. # seconds
|
||||
TRACK_END_FADE = 30 # points
|
||||
TRACK_FADE_AFTER_DURATION = 10. # seconds
|
||||
TRACK_END_FADE = 50 # points
|
||||
TRACK_FADE_ASSUME_FPS = 12
|
||||
|
||||
# Don't render the first n points of the prediction,
|
||||
|
@ -365,7 +303,6 @@ class TrackScenario(StateMachine):
|
|||
|
||||
receive_prediction = detected.to(first_prediction) | substantial.to(first_prediction) | first_prediction.to(corrected_prediction, cond="prediction_is_stale") | corrected_prediction.to(play, cond="prediction_is_playing")
|
||||
|
||||
|
||||
def __init__(self):
|
||||
self.track: ProjectedTrack = None
|
||||
self.camera: Optional[Camera] = None
|
||||
|
@ -398,7 +335,7 @@ class TrackScenario(StateMachine):
|
|||
return False
|
||||
|
||||
def check_lost(self):
|
||||
if self.current_state is not self.lost and self.track and self.track.updated_at < time.time() - 5:
|
||||
if self.current_state is not self.lost and self.track and self.track.created_at < time.time() - 5:
|
||||
self.mark_lost()
|
||||
|
||||
def set_track(self, track: ProjectedTrack):
|
||||
|
@ -440,10 +377,6 @@ class TrackScenario(StateMachine):
|
|||
if PREDICTION_INTERVAL is not None and len(self.predictions) and (track.frame_index - self.predictions[-1].frame_index) < PREDICTION_INTERVAL:
|
||||
# just drop tracks if the predictions come to quick
|
||||
return
|
||||
|
||||
if track._track.predictions is None or not len(track._track.predictions):
|
||||
# don't count to predictions if no prediction is set of given track (e.g. young tracks)
|
||||
return
|
||||
|
||||
|
||||
self.predictions.append(track)
|
||||
|
@ -451,7 +384,6 @@ class TrackScenario(StateMachine):
|
|||
self.prediction_diffs[-1].finish() # existing diffing can end
|
||||
# and create a new one
|
||||
self.prediction_diffs.append(DiffSegment(track))
|
||||
# self.prediction_diffs.append(DiffSegmentScan(track))
|
||||
|
||||
# check to change state
|
||||
try:
|
||||
|
@ -526,8 +458,8 @@ class DrawnScenario(TrackScenario):
|
|||
"""
|
||||
|
||||
ANOMALY_DECAY = .2 # speed with which the cirlce shrinks over time
|
||||
DISTANCE_ANOMALY_FACTOR = .03 # the ammount to which the difference counts to the anomaly score
|
||||
MAX_HISTORY = 200 # points of history of trajectory to display (preventing too long lines)
|
||||
DISTANCE_ANOMALY_FACTOR = .05 # the ammount to which the difference counts to the anomaly score
|
||||
MAX_HISTORY = 80 # points of history of trajectory to display (preventing too long lines)
|
||||
CUT_GAP = 5 # when adding a new prediction, keep the existing prediction until that point + this CUT_GAP margin
|
||||
|
||||
def __init__(self):
|
||||
|
@ -535,7 +467,6 @@ class DrawnScenario(TrackScenario):
|
|||
# self.track_id = track_id
|
||||
self.last_update_t = time.perf_counter()
|
||||
|
||||
self.drawn_position: Optional[Coordinate] = None
|
||||
self.drawn_positions: List[Coordinate] = []
|
||||
self.drawn_pred_history: List[Coordinate] = []
|
||||
self.drawn_predictions: List[List[Coordinate]] = []
|
||||
|
@ -582,45 +513,35 @@ class DrawnScenario(TrackScenario):
|
|||
# 0. Update anomaly, slowly decreasing it over time
|
||||
self.decay_anomaly_score(dt)
|
||||
|
||||
# for diff in self.prediction_diffs:
|
||||
# diff.update_drawn_positions(dt, self)
|
||||
for diff in self.prediction_diffs:
|
||||
diff.update_drawn_positions(dt, self)
|
||||
|
||||
# 1. track history, direct update
|
||||
|
||||
# positions = self._track.get_projected_history(None, self.camera)[-MAX_HISTORY:]
|
||||
# self.drawn_positions = self.track.projected_history[-self.MAX_HISTORY:]
|
||||
self.drawn_positions = self.track.projected_history
|
||||
if self.drawn_position is None:
|
||||
self.drawn_position = self.drawn_positions[-1]
|
||||
else:
|
||||
self.drawn_position[0] = exponentialDecay(self.drawn_position[0], self.drawn_positions[-1][0], 13, dt)
|
||||
self.drawn_position[1] = exponentialDecay(self.drawn_position[1], self.drawn_positions[-1][1], 13, dt)
|
||||
self.drawn_positions = self.track.projected_history[-self.MAX_HISTORY:]
|
||||
|
||||
# 3. predictions
|
||||
if len(self.drawn_predictions) < len(self.predictions):
|
||||
# first prediction
|
||||
if len(self.drawn_predictions) == 0:
|
||||
last_pred = self.predictions[-1]
|
||||
self.drawn_predictions.append(last_pred.predictions[0])
|
||||
self.drawn_predictions.append(self.predictions[-1].predictions[0])
|
||||
else:
|
||||
# if a new prediction has arised, transition from existing one.
|
||||
# First, cut existing prediction
|
||||
# CUT_GAP indicates that some is lost in the transition, to prevent glitches when velocity of person changes
|
||||
end_step = self.predictions[-1].frame_index - self.predictions[-2].frame_index + self.CUT_GAP
|
||||
# cut existing prediction
|
||||
end_step = self.predictions[-1].frame_index - self.predictions[-2].frame_index + self.CUT_GAP
|
||||
# print(end_step)
|
||||
keep = self.drawn_predictions[-1][end_step:]
|
||||
last_item: Coordinate = (keep)[-1]
|
||||
self.drawn_predictions[-1] = self.drawn_predictions[-1][:end_step] # cut the old part
|
||||
last_item: Coordinate = keep[-1]
|
||||
self.drawn_predictions[-1] = self.drawn_predictions[-1][:end_step]
|
||||
# print(self.predictions[-1].frame_index, self.predictions[-2].frame_index, end_step, len(keep))
|
||||
# duplicate last item, so the new one has the same nr. of points as the incoming prediction (so it can actually transition)
|
||||
ext = [last_item] * (len(self.predictions[-1].predictions[0]) - len(keep))
|
||||
ext = [last_item] * (len(self.predictions[-1].predictions[0]) - len(keep))
|
||||
# print(ext)
|
||||
keep.extend(ext)
|
||||
self.drawn_predictions.append(keep)
|
||||
|
||||
for a, drawn_prediction in enumerate(self.drawn_predictions):
|
||||
# origin = self.predictions[a].predictions[0][0]
|
||||
origin = self.predictions[a].predictions[0][0]
|
||||
# associated_diff = self.prediction_diffs[a]
|
||||
# progress = associated_diff.nr_of_passed_points()
|
||||
for i, pos in enumerate(drawn_prediction):
|
||||
# TODO: this should be done in polar space starting from origin (i.e. self.drawn_posision[-1])
|
||||
decay = max(3, (18/i) if i else 10) # points further away move with more delay
|
||||
|
@ -697,81 +618,38 @@ class DrawnScenario(TrackScenario):
|
|||
|
||||
def to_renderable_lines(self) -> RenderableLines:
|
||||
t = time.time()
|
||||
track_age = t - self.track.updated_at # Should be beginning
|
||||
track_age = t - self.track.created_at
|
||||
lines = RenderableLines([])
|
||||
|
||||
|
||||
|
||||
# track_age_in_frames = int(track_age * TRACK_FADE_ASSUME_FPS)
|
||||
# track_max_points = TRACK_FADE_AFTER_DURATION * TRACK_FADE_ASSUME_FPS - track_age_in_frames
|
||||
|
||||
# 1. Trajectory history
|
||||
# drawable_points, alphas = self.drawn_positions[:self.MAX_HISTORY], [1]*len(self.drawn_positions)
|
||||
|
||||
# perlin/simplex noise
|
||||
# dt: change speed. Divide to make slower
|
||||
# amp: amplitude of noise
|
||||
# frequency: make smaller to make longer waves
|
||||
if OPTION_TRACK_NOISE:
|
||||
noisy_points = apply_perlin_noise_to_line_normal(self.drawn_positions, t/5, .3, .02)
|
||||
else:
|
||||
noisy_points = self.drawn_positions
|
||||
|
||||
drawable_points, alphas = points_fade_out_alpha_mask(noisy_points, track_age, TRACK_FADE_AFTER_DURATION, TRACK_END_FADE)
|
||||
color = SrgbaColor(1.,0.,1.,1.-self.lost_factor())
|
||||
|
||||
# TODO: effect configuration
|
||||
|
||||
|
||||
drawable_points, alphas = points_fade_out_alpha_mask(self.drawn_positions, track_age, TRACK_FADE_AFTER_DURATION, TRACK_END_FADE)
|
||||
color = SrgbaColor(1.,0.,0.,1.-self.lost_factor())
|
||||
points = [RenderablePoint(pos, color.as_faded(a)) for pos, a in zip(drawable_points, alphas)]
|
||||
# points = [RenderablePoint(pos, color.as_faded(a)) for pos, a in zip(drawable_points, alphas)]
|
||||
|
||||
lines.append(RenderableLine(points))
|
||||
|
||||
# 2. Position Marker / anomaly score
|
||||
|
||||
if OPTION_POSITION_MARKER:
|
||||
anomaly_marker_color = SrgbaColor(0.,0.,1, 1.-self.lost_factor()) # fadeout
|
||||
# lines.append(circle_arc(self.drawn_positions[-1][0], self.drawn_positions[-1][1], 1, t, self.anomaly_score, anomaly_marker_color))
|
||||
# last point, (but this draws line in circle, requiring a 'jump back' for the laser)
|
||||
cx, cy = self.drawn_position[0], self.drawn_position[1],
|
||||
|
||||
radius = max(.1, self._drawn_anomaly_score * 1.) if OPTION_GROW_ANOMALY_CIRCLE else .1
|
||||
|
||||
steps=0
|
||||
if len(self.drawn_positions) >= steps:
|
||||
dx, dy = self.drawn_positions[-1][0] - self.drawn_positions[-steps][0], self.drawn_positions[-1][1] - self.drawn_positions[-steps][1],
|
||||
diff = np.array([dx,dy])
|
||||
diff = diff/np.linalg.norm(diff) * radius * 1.1
|
||||
cx += diff[0]
|
||||
cy += diff[1]
|
||||
|
||||
lines.append(circle_arc(
|
||||
cx, cy,
|
||||
radius,
|
||||
0, 1,
|
||||
anomaly_marker_color)
|
||||
)
|
||||
anomaly_marker_color = SrgbaColor(0.,0.,1, 1.-self.lost_factor()) # fadeout
|
||||
# lines.append(circle_arc(self.drawn_positions[-1][0], self.drawn_positions[-1][1], 1, t, self.anomaly_score, anomaly_marker_color))
|
||||
lines.append(circle_arc(
|
||||
self.drawn_positions[-1][0], self.drawn_positions[-1][1],
|
||||
max(.1, self._drawn_anomaly_score * 1.),
|
||||
0, 1,
|
||||
anomaly_marker_color)
|
||||
)
|
||||
|
||||
# 3. Predictions
|
||||
if len(self.drawn_predictions):
|
||||
color = SrgbaColor(0.,1,0.,1.-self.lost_factor())
|
||||
prediction_track_age = time.time() - self.predictions[0].created_at
|
||||
t_factor = prediction_track_age / PREDICTION_FADE_IN
|
||||
# positions = [RenderablePosition.from_list(pos) for pos in self.drawn_positions]
|
||||
for a, drawn_prediction in enumerate(self.drawn_predictions):
|
||||
if a < (len(self.drawn_predictions) - 1):
|
||||
# not the newest: fade out:
|
||||
deprecation_age = t - self.predictions[a+1].updated_at
|
||||
if deprecation_age > PREDICTION_FADE_IN:
|
||||
# old: skip drawing.
|
||||
continue
|
||||
else:
|
||||
fade_factor = 1 - (deprecation_age / PREDICTION_FADE_IN)
|
||||
color = color.as_faded(fade_factor)
|
||||
|
||||
prediction_track_age = time.time() - self.predictions[a].updated_at
|
||||
t_factor = prediction_track_age / PREDICTION_FADE_IN
|
||||
|
||||
associated_diff = self.prediction_diffs[a]
|
||||
progress = associated_diff.nr_of_passed_points()
|
||||
for drawn_prediction in self.drawn_predictions:
|
||||
|
||||
# drawn_prediction, alphas1 = points_fade_out_alpha_mask(drawn_prediction, prediction_track_age, TRACK_FADE_AFTER_DURATION, TRACK_END_FADE, no_frame_max=True)
|
||||
|
||||
|
@ -790,41 +668,7 @@ class DrawnScenario(TrackScenario):
|
|||
|
||||
# points = [RenderablePoint(pos, pos_color) for pos, pos_color in zip(drawn_prediction[PREDICTION_OFFSET:], colors[PREDICTION_OFFSET:])]
|
||||
points = [RenderablePoint(pos, pos_color) for pos, pos_color in zip(drawn_prediction, colors)]
|
||||
points = points[progress//2:]
|
||||
ls = LineString(drawn_prediction)
|
||||
if t_factor < 1:
|
||||
ls = substring(ls, 0, t_factor*ls.length, ls.length)
|
||||
|
||||
# print(prediction_track_age)
|
||||
|
||||
# Option 1 : dashes
|
||||
dashed = dashed_line(ls, .8, 1., prediction_track_age, False)
|
||||
for line in dashed.geoms:
|
||||
dash_points = [RenderablePoint(point, color) for point in line.coords]
|
||||
lines.append(RenderableLine(dash_points))
|
||||
|
||||
# Option 2 : flash
|
||||
flash_distance = prediction_track_age * 5
|
||||
# flashes = []
|
||||
# for i in range(10):
|
||||
# flashes.append(substring(ls, flash_distance*i, flash_distance + .5))
|
||||
|
||||
# flash_multiline = shapely.union_all(flashes)
|
||||
# flashes = flash_multiline.geoms if isinstance(flash_multiline, MultiLineString) else [flash_multiline]
|
||||
# print(flashes)
|
||||
# for flash_ls in flashes:
|
||||
# flash_points = [RenderablePoint(point, color) for point in flash_ls.coords]
|
||||
# if len(flash_points) > 1:
|
||||
# lines.append(RenderableLine(flash_points))
|
||||
|
||||
|
||||
# flash_points = [RenderablePoint(point, color) for point in flash_ls.coords]
|
||||
# if len(flash_points) > 1:
|
||||
# lines.append(RenderableLine(flash_points))
|
||||
|
||||
|
||||
|
||||
# lines.append(RenderableLine(points))
|
||||
lines.append(RenderableLine(points))
|
||||
|
||||
# 4. Diffs
|
||||
# for drawn_diff in self.drawn_diffs:
|
||||
|
@ -832,11 +676,8 @@ class DrawnScenario(TrackScenario):
|
|||
# colors = [color.as_faded(1) for a2 in range(len(drawn_diff))]
|
||||
# points = [RenderablePoint(pos, pos_color) for pos, pos_color in zip(drawn_diff, colors)]
|
||||
# lines.append(RenderableLine(points))
|
||||
|
||||
# if OPTION_RENDER_DIFF_SEGMENT:
|
||||
# for diff in self.prediction_diffs:
|
||||
# lines.append_lines(diff.as_renderable())
|
||||
# pass
|
||||
for diff in self.prediction_diffs:
|
||||
lines.append_lines(diff.as_renderable())
|
||||
|
||||
|
||||
# # print(self.current_state)
|
||||
|
@ -953,24 +794,17 @@ class Stage(Node):
|
|||
def setup(self):
|
||||
# self.scenarios: List[DrawnScenario] = []
|
||||
self.scenarios: Dict[str, DrawnScenario] = defaultdict(lambda: DrawnScenario())
|
||||
self.frame_noimg_sock = self.sub(self.config.zmq_frame_noimg_addr)
|
||||
self.trajectory_sock = self.sub(self.config.zmq_trajectory_addr)
|
||||
self.prediction_sock = self.sub(self.config.zmq_prediction_addr)
|
||||
self.stage_sock = self.pub(self.config.zmq_stage_addr)
|
||||
|
||||
self.counter = CounterSender()
|
||||
self.frame: Optional[Frame] = None
|
||||
|
||||
|
||||
if self.config.debug_map:
|
||||
debug_color = SrgbaColor(0.,0.,1.,1.)
|
||||
self.debug_lines = RenderableLines(load_lines_from_svg(self.config.debug_map, 100, debug_color))
|
||||
self.camera: Optional[DistortedCamera] = None
|
||||
|
||||
|
||||
def run(self):
|
||||
prev_time = time.perf_counter()
|
||||
while self.is_running.is_set():
|
||||
|
||||
self.tick()
|
||||
|
||||
# 1) poll & update
|
||||
|
@ -990,13 +824,6 @@ class Stage(Node):
|
|||
prev_time = now
|
||||
|
||||
def loop_receive(self):
|
||||
# 1) receive frames
|
||||
try:
|
||||
camera_frame: Frame = self.frame_noimg_sock.recv_pyobj(zmq.NOBLOCK)
|
||||
self.frame = camera_frame
|
||||
except zmq.ZMQError as e:
|
||||
pass
|
||||
|
||||
# 1) receive predictions
|
||||
try:
|
||||
prediction_frame: Frame = self.prediction_sock.recv_pyobj(zmq.NOBLOCK)
|
||||
|
@ -1028,13 +855,6 @@ class Stage(Node):
|
|||
|
||||
def loop_render(self):
|
||||
lines = RenderableLines([])
|
||||
|
||||
|
||||
# 0. DEBUG lines:
|
||||
|
||||
|
||||
|
||||
# 1. Draw each scenario:
|
||||
for track_id, scenario in self.scenarios.items():
|
||||
scenario.update_drawn_positions()
|
||||
|
||||
|
@ -1044,44 +864,16 @@ class Stage(Node):
|
|||
# rl = RenderableLines(lines)
|
||||
# with open('/tmp/lines.pcl', 'wb') as fp:
|
||||
# pickle.dump(rl, fp)
|
||||
# rl = lines
|
||||
rl = lines.as_simplified(SimplifyMethod.RDP, .003) # or segmentise (see shapely)
|
||||
rl = lines.as_simplified() # or segmentise (see shapely)
|
||||
self.counter.set("stage.lines", len(lines.lines))
|
||||
self.counter.set("stage.points_orig", lines.point_count())
|
||||
self.counter.set("stage.points", rl.point_count())
|
||||
|
||||
|
||||
# debug_lines = RenderableLines([])
|
||||
# if self.frame and hasattr(self.frame.camera, 'debug_lines'):
|
||||
# for points in self.frame.camera.debug_lines:
|
||||
# line_points = []
|
||||
# # interpolate, so the laser can correct the lines
|
||||
# for i in range(20):
|
||||
# t = i / 19
|
||||
# x = lerp(points[0][0], points[1][0], t)
|
||||
# y = lerp(points[0][1], points[1][1], t)
|
||||
# line_points.append(RenderablePoint((x, y), debug_color))
|
||||
|
||||
# debug_lines.append(RenderableLine(line_points))
|
||||
|
||||
layers: RenderableLayers = {
|
||||
1: rl,
|
||||
2: self.debug_lines,
|
||||
}
|
||||
|
||||
# print(rl.__dict__)
|
||||
|
||||
self.stage_sock.send_json(obj=layers, cls=DataclassJSONEncoder)
|
||||
self.stage_sock.send_json(rl, cls=DataclassJSONEncoder)
|
||||
|
||||
# print(json.dumps(rl, cls=DataclassJSONEncoder))
|
||||
|
||||
@classmethod
|
||||
def arg_parser(cls) -> ArgumentParser:
|
||||
argparser = ArgumentParser()
|
||||
argparser.add_argument('--zmq-frame-noimg-addr',
|
||||
help='Manually specity communication addr for the frame messages',
|
||||
type=str,
|
||||
default="ipc:///tmp/feeds_frame2")
|
||||
argparser.add_argument('--zmq-trajectory-addr',
|
||||
help='Manually specity communication addr for the trajectory messages',
|
||||
type=str,
|
||||
|
@ -1094,133 +886,6 @@ class Stage(Node):
|
|||
help='Manually specity communication addr for the stage messages (the rendered lines)',
|
||||
type=str,
|
||||
default="tcp://0.0.0.0:99174")
|
||||
argparser.add_argument('--debug-map',
|
||||
help='specify a map (svg-file) from which to load lines which will be overlayed',
|
||||
type=str,
|
||||
default="../DATASETS/hof3/map_hof.svg")
|
||||
return argparser
|
||||
|
||||
|
||||
|
||||
|
||||
# TODO place somewhere else:
|
||||
# Gemma3:27b prompt: "python. Given a list of coordinates, that describes a line: `drawable_points: List[Tuple[float,float]]` apply perlin noise over the normal of the line, that changes over time `dt`."
|
||||
def apply_perlin_noise_to_line_normal(drawable_points: np.ndarray, dt: float, amplitude: float = 1.0, frequency: float = 1.0, fade_over_n_points = 8) -> np.ndarray:
|
||||
"""
|
||||
Applies Perlin noise to the normals of a line described by a list of coordinates, changing over time.
|
||||
|
||||
Args:
|
||||
drawable_points: A list of (x, y) tuples representing the points of the line.
|
||||
dt: The time delta, used to animate the Perlin noise.
|
||||
amplitude: The strength of the Perlin noise effect.
|
||||
frequency: The frequency of the Perlin noise (how many waves per unit).
|
||||
|
||||
Returns:
|
||||
A new list of (x, y) tuples representing the line with Perlin noise applied to the normals. If drawable_points
|
||||
has fewer than 2 points, it returns the original list unchanged.
|
||||
|
||||
Raises:
|
||||
TypeError: If drawable_points is not a list or dt is not a float.
|
||||
ValueError: If the input points are not tuples of length 2.
|
||||
"""
|
||||
|
||||
# if not isinstance(drawable_points, list):
|
||||
# print(drawable_points, type(drawable_points))
|
||||
# raise TypeError("drawable_points must be a list.")
|
||||
if not isinstance(dt, float):
|
||||
raise TypeError("dt must be a float.")
|
||||
|
||||
if len(drawable_points) < 2:
|
||||
return drawable_points # Nothing to do with fewer than 2 points
|
||||
|
||||
# for point in drawable_points:
|
||||
# if not isinstance(point, tuple) or len(point) != 2:
|
||||
# raise ValueError("Each point in drawable_points must be a tuple of length 2.")
|
||||
|
||||
|
||||
# noise = PerlinNoise(octaves=4) # You can adjust octaves for different noise patterns
|
||||
|
||||
new_points = []
|
||||
for i in range(len(drawable_points)):
|
||||
x, y = drawable_points[i]
|
||||
|
||||
# Calculate the normal vector. We'll approximate it using the previous and next points.
|
||||
if i == 0:
|
||||
# For the first point, use the next point to estimate the normal
|
||||
next_x, next_y = drawable_points[i + 1]
|
||||
normal_x = next_y - y
|
||||
normal_y = -(next_x - x)
|
||||
elif i == len(drawable_points) - 1:
|
||||
# For the last point, use the previous point
|
||||
prev_x, prev_y = drawable_points[i - 1]
|
||||
normal_x = y - prev_y
|
||||
normal_y = -(x - prev_x)
|
||||
else:
|
||||
prev_x, prev_y = drawable_points[i - 1]
|
||||
next_x, next_y = drawable_points[i + 1]
|
||||
normal_x = next_y - prev_y
|
||||
normal_y = -(next_x - prev_x)
|
||||
|
||||
# Normalize the normal vector
|
||||
norm = np.sqrt(normal_x**2 + normal_y**2)
|
||||
if norm > 0:
|
||||
normal_x /= norm
|
||||
normal_y /= norm
|
||||
|
||||
# Apply Perlin noise to the normal
|
||||
# noise_x = noise([x * frequency, (y + dt) * frequency]) * amplitude * normal_x
|
||||
# noise_y = noise([x * frequency, (y + dt) * frequency]) * amplitude * normal_y
|
||||
noise = snoise2(i * frequency, dt % 1000, octaves=4)
|
||||
|
||||
use_amp = amplitude
|
||||
if fade_over_n_points > 0:
|
||||
rev_step = len(drawable_points) - i
|
||||
amp_factor = rev_step / fade_over_n_points
|
||||
if amp_factor < 1:
|
||||
use_amp *= amp_factor
|
||||
|
||||
noise_x = noise * use_amp * normal_x
|
||||
noise_y = noise * use_amp * normal_y
|
||||
|
||||
# print(noise_x, noise_y, dt, frequency, i, dt, snoise2(i * frequency, dt % 1000, octaves=4))
|
||||
|
||||
|
||||
# Add the noise to the point's coordinates
|
||||
new_x = x + noise_x
|
||||
new_y = y + noise_y
|
||||
|
||||
new_points.append((new_x, new_y))
|
||||
|
||||
# print(drawable_points, new_points)
|
||||
|
||||
return np.array(new_points)
|
||||
|
||||
|
||||
import math
|
||||
|
||||
def distance(p1, p2):
|
||||
return math.hypot(p2[0] - p1[0], p2[1] - p1[1])
|
||||
|
||||
|
||||
def dashed_line(line: LineString, dash_len: float, gap_len: float, offset: float = 0, loop_offset = True) -> MultiLineString:
|
||||
total_length = line.length
|
||||
|
||||
segments = []
|
||||
|
||||
if loop_offset:
|
||||
# by default, prepend skipped gap
|
||||
pos = offset % (dash_len + gap_len)
|
||||
|
||||
if pos > gap_len:
|
||||
segments.append(substring(line, 0, pos - gap_len))
|
||||
else:
|
||||
pos = offset
|
||||
|
||||
while pos < total_length:
|
||||
end = min(pos + dash_len, total_length)
|
||||
if pos < end:
|
||||
dash = substring(line, pos, end)
|
||||
segments.append(dash)
|
||||
pos += dash_len + gap_len
|
||||
|
||||
return MultiLineString(segments)
|
|
@ -16,7 +16,7 @@ from trap.preview_renderer import DrawnTrack
|
|||
import trap.tracker
|
||||
from trap.config import parser
|
||||
from trap.frame_emitter import Camera, Detection, DetectionState, video_src_from_config, Frame
|
||||
from trap.tracker import DETECTOR_YOLOv8, FinalDisplacementFilter, Smoother, TrackReader, _ultralytics_track, Track, TrainingDataWriter, Tracker, read_tracks_json
|
||||
from trap.tracker import DETECTOR_YOLOv8, FinalDisplacementFilter, Smoother, TrackReader, _yolov8_track, Track, TrainingDataWriter, Tracker, read_tracks_json
|
||||
from collections import defaultdict
|
||||
|
||||
import logging
|
||||
|
@ -461,12 +461,9 @@ def draw_track_projected(img: cv2.Mat, track: Track, color_index: int, camera: C
|
|||
for j in range(len(history)-1):
|
||||
# a = history[j]
|
||||
b = history[j+1]
|
||||
detection = track.history[j+1]
|
||||
|
||||
color = point_color if detection.state == DetectionState.Confirmed else (100,100,100)
|
||||
|
||||
# cv2.line(img, to_point(a), to_point(b), point_color, 1)
|
||||
cv2.circle(img, to_point(b), 3, color, 2)
|
||||
cv2.circle(img, to_point(b), 3, point_color, 2)
|
||||
|
||||
|
||||
def draw_track(img: cv2.Mat, track: Track, color_index: int):
|
||||
|
|
|
@ -28,14 +28,12 @@ from torchvision.models.detection import (FasterRCNN_ResNet50_FPN_V2_Weights,
|
|||
keypointrcnn_resnet50_fpn,
|
||||
maskrcnn_resnet50_fpn_v2)
|
||||
from tsmoothie.smoother import ConvolutionSmoother, KalmanSmoother
|
||||
from ultralytics import YOLO, RTDETR
|
||||
from ultralytics.engine.model import Model as UltralyticsModel
|
||||
from ultralytics.engine.results import Results as UltralyticsResult
|
||||
from ultralytics import YOLO
|
||||
from ultralytics.engine.results import Results as YOLOResult
|
||||
|
||||
from trap import timer
|
||||
from trap.frame_emitter import (Camera, DataclassJSONEncoder, Detection,
|
||||
DetectionState, Frame, Track)
|
||||
from trap.gemma import ImgMovementFilter
|
||||
from trap.node import Node
|
||||
|
||||
# Detection = [int, int, int, int, float, int]
|
||||
|
@ -53,32 +51,30 @@ DETECTOR_RETINANET = 'retinanet'
|
|||
DETECTOR_MASKRCNN = 'maskrcnn'
|
||||
DETECTOR_FASTERRCNN = 'fasterrcnn'
|
||||
DETECTOR_YOLOv8 = 'ultralytics'
|
||||
DETECTOR_RTDETR = 'rtdetr'
|
||||
|
||||
TRACKER_DEEPSORT = 'deepsort'
|
||||
TRACKER_BYTETRACK = 'bytetrack'
|
||||
|
||||
DETECTORS = [DETECTOR_RETINANET, DETECTOR_MASKRCNN, DETECTOR_FASTERRCNN, DETECTOR_YOLOv8, DETECTOR_RTDETR]
|
||||
DETECTORS = [DETECTOR_RETINANET, DETECTOR_MASKRCNN, DETECTOR_FASTERRCNN, DETECTOR_YOLOv8]
|
||||
TRACKERS =[TRACKER_DEEPSORT, TRACKER_BYTETRACK]
|
||||
|
||||
TRACKER_CONFIDENCE_MINIMUM = .001
|
||||
TRACKER_BYTETRACK_MINIMUM = .001 # bytetrack can track items iwth lower thershold
|
||||
TRACKER_CONFIDENCE_MINIMUM = .2
|
||||
TRACKER_BYTETRACK_MINIMUM = .1 # bytetrack can track items iwth lower thershold
|
||||
NON_MAXIMUM_SUPRESSION = 1
|
||||
RCNN_SCALE = .4 # seems to have no impact on detections in the corners
|
||||
|
||||
def _ultralytics_track(img: cv2.Mat, frame_idx: int, model: UltralyticsModel, **kwargs) -> List[Detection]:
|
||||
def _yolov8_track(frame: Frame, model: YOLO, **kwargs) -> List[Detection]:
|
||||
|
||||
results: List[UltralyticsResult] = list(model.track(img, persist=True, tracker="custom_bytetrack.yaml", verbose=False, conf=0.001, **kwargs))
|
||||
results: List[YOLOResult] = list(model.track(frame.img, persist=True, tracker="custom_bytetrack.yaml", verbose=False, conf=0.00001, **kwargs))
|
||||
|
||||
if results[0].boxes is None or results[0].boxes.id is None:
|
||||
# work around https://github.com/ultralytics/ultralytics/issues/5968
|
||||
return []
|
||||
|
||||
boxes = results[0].boxes.xywh.cpu()
|
||||
confidence = results[0].boxes.conf.cpu().tolist()
|
||||
track_ids = results[0].boxes.id.int().cpu().tolist()
|
||||
classes = results[0].boxes.cls.int().cpu().tolist()
|
||||
return [Detection(track_id, bbox[0]-.5*bbox[2], bbox[1]-.5*bbox[3], bbox[2], bbox[3], conf, DetectionState.Confirmed, frame_idx, class_id) for bbox, track_id, class_id, conf in zip(boxes, track_ids, classes, confidence)]
|
||||
return [Detection(track_id, bbox[0]-.5*bbox[2], bbox[1]-.5*bbox[3], bbox[2], bbox[3], 1, DetectionState.Confirmed, frame.index, class_id) for bbox, track_id, class_id in zip(boxes, track_ids, classes)]
|
||||
|
||||
class Multifile():
|
||||
def __init__(self, srcs: List[Path]):
|
||||
|
@ -399,8 +395,6 @@ class Tracker(Node):
|
|||
# # TODO: config device
|
||||
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
|
||||
self.frame_preprocess = ImgMovementFilter()
|
||||
|
||||
# TODO: support removal
|
||||
self.tracks: DefaultDict[str, Track] = defaultdict(lambda: Track())
|
||||
|
||||
|
@ -442,17 +436,7 @@ class Tracker(Node):
|
|||
self.mot_tracker = TrackerWrapper.init_type(self.config.tracker)
|
||||
elif self.config.detector == DETECTOR_YOLOv8:
|
||||
# self.model = YOLO('EXPERIMENTS/yolov8x.pt')
|
||||
# best from arsen:
|
||||
# self.model = YOLO('./tracker/all_yolo11-2-20-15-41/weights')
|
||||
# self.model = YOLO('tracker/all_yolo11-2-20-15-41/weights/best.pt')
|
||||
# self.model = YOLO('models/yolo11x-pose.pt')
|
||||
# self.model = YOLO("models/yolo12l.pt")
|
||||
# self.model = YOLO("models/yolo12x.pt", imgsz=self.config.imgsz) #see https://github.com/orgs/ultralytics/discussions/8812
|
||||
self.model = YOLO("models/yolo12x.pt")
|
||||
# NOTE: changing the model, also tweak imgsz in
|
||||
elif self.config.detector == DETECTOR_RTDETR:
|
||||
# self.model = RTDETR('models/rtdetr-x.pt') # drops frames
|
||||
self.model = RTDETR('models/rtdetr-l.pt') # somewhat less good in corners, but less frame dropping == better tracking
|
||||
self.model = YOLO('yolo11x.pt')
|
||||
else:
|
||||
raise RuntimeError(f"{self.config.detector} is not implemented yet. See --help")
|
||||
|
||||
|
@ -471,22 +455,14 @@ class Tracker(Node):
|
|||
|
||||
self.frame_sock = self.sub(self.config.zmq_frame_addr)
|
||||
self.trajectory_socket = self.pub(self.config.zmq_trajectory_addr)
|
||||
self.detection_socket = self.pub(self.config.zmq_detection_addr)
|
||||
|
||||
logger.debug("Set up tracker")
|
||||
|
||||
def track_frame(self, frame: Frame):
|
||||
det_img = frame.img
|
||||
# det_img = self.frame_preprocess.apply(frame.img)
|
||||
|
||||
if self.config.detector in [DETECTOR_YOLOv8, DETECTOR_RTDETR]:
|
||||
# both ultralytics
|
||||
detections: List[Detection] = _ultralytics_track(det_img, frame.index, self.model, classes=[0, 15, 16], imgsz=self.config.imgsz)
|
||||
if self.config.detector == DETECTOR_YOLOv8:
|
||||
detections: List[Detection] = _yolov8_track(frame, self.model, classes=[0, 15, 16], imgsz=[1152, 640])
|
||||
else :
|
||||
detections: List[Detection] = self._resnet_track(det_img, frame.index, scale = RCNN_SCALE)
|
||||
|
||||
# emit raw detections
|
||||
self.detection_socket.send_pyobj(detections)
|
||||
detections: List[Detection] = self._resnet_track(frame, scale = RCNN_SCALE)
|
||||
|
||||
for detection in detections:
|
||||
track = self.tracks[detection.track_id]
|
||||
|
@ -499,7 +475,8 @@ class Tracker(Node):
|
|||
track.history.append(detection) # add to history
|
||||
|
||||
return detections
|
||||
|
||||
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Live tracking of frames coming in over zmq
|
||||
|
@ -634,12 +611,13 @@ class Tracker(Node):
|
|||
logger.info('Stopping')
|
||||
|
||||
|
||||
def _resnet_track(self, img: cv2.Mat, frame_idx: int, scale: float = 1) -> List[Detection]:
|
||||
def _resnet_track(self, frame: Frame, scale: float = 1) -> List[Detection]:
|
||||
img = frame.img
|
||||
if scale != 1:
|
||||
dsize = (int(img.shape[1] * scale), int(img.shape[0] * scale))
|
||||
img = cv2.resize(img, dsize)
|
||||
detections = self._resnet_detect_persons(img)
|
||||
tracks: List[Detection] = self.mot_tracker.track_detections(detections, img, frame_idx)
|
||||
tracks: List[Detection] = self.mot_tracker.track_detections(detections, img, frame.index)
|
||||
# active_tracks = [t for t in tracks if t.is_confirmed()]
|
||||
return [d.get_scaled(1/scale) for d in tracks]
|
||||
|
||||
|
@ -701,11 +679,6 @@ class Tracker(Node):
|
|||
help='Manually specity communication addr for the trajectory messages',
|
||||
type=str,
|
||||
default="ipc:///tmp/feeds_traj")
|
||||
|
||||
argparser.add_argument('--zmq-detection-addr',
|
||||
help='Manually specity communication addr for the detection messages',
|
||||
type=str,
|
||||
default="ipc:///tmp/feeds_dets")
|
||||
|
||||
argparser.add_argument("--save-for-training",
|
||||
help="Specify the path in which to save",
|
||||
|
@ -724,10 +697,6 @@ class Tracker(Node):
|
|||
argparser.add_argument("--smooth-tracks",
|
||||
help="Smooth the tracker tracks before sending them to the predictor",
|
||||
action='store_true')
|
||||
argparser.add_argument("--imgsz",
|
||||
help="Detector imgsz parameter (applicable to ultralytics detectors)",
|
||||
type=int,
|
||||
default=640)
|
||||
return argparser
|
||||
|
||||
|
||||
|
|
|
@ -35,14 +35,6 @@ class GigEConfig:
|
|||
binning_v: BinningValue = 1
|
||||
pixel_format: int = neoapi.PixelFormat_BayerRG8
|
||||
|
||||
# when changing these values, make sure you also tweak the calibration
|
||||
width: int = 2448
|
||||
height: int = 2048
|
||||
|
||||
# changing these _automatically changes calibration cx and cy_!!
|
||||
offset_x: int = 0
|
||||
offset_y: int = 0
|
||||
|
||||
post_crop_tl: Optional[Coordinate] = None
|
||||
post_crop_br: Optional[Coordinate] = None
|
||||
|
||||
|
@ -66,92 +58,47 @@ class GigE(VideoSource):
|
|||
self.camera.SetImageBufferCycleCount(1)
|
||||
self.setPixelFormat(self.config.pixel_format)
|
||||
|
||||
self.cam_is_configured = False
|
||||
|
||||
self.converter_settings = neoapi.ConverterSettings()
|
||||
self.converter_settings.SetDebayerFormat('BGR8') # opencv
|
||||
self.converter_settings.SetDemosaicingMethod(neoapi.ConverterSettings.Demosaicing_Baumer5x5)
|
||||
# self.converter_settings.SetSharpeningMode(neoapi.ConverterSettings.Sharpening_Global)
|
||||
# self.converter_settings.SetSharpeningMode(neoapi.ConverterSettings.Sharpening_Adaptive)
|
||||
# self.converter_settings.SetSharpeningMode(neoapi.ConverterSettings.Sharpening_ActiveNoiseReduction)
|
||||
self.converter_settings.SetSharpeningMode(neoapi.ConverterSettings.Sharpening_Off)
|
||||
self.converter_settings.SetSharpeningFactor(1)
|
||||
self.converter_settings.SetSharpeningSensitivityThreshold(2)
|
||||
|
||||
|
||||
|
||||
|
||||
def configCam(self):
|
||||
if self.camera.IsConnected():
|
||||
self.setPixelFormat(self.config.pixel_format)
|
||||
|
||||
# self.camera.f.PixelFormat.Set(neoapi.PixelFormat_RGB8)
|
||||
self.camera.f.BinningHorizontal.Set(self.config.binning_h)
|
||||
self.camera.f.BinningVertical.Set(self.config.binning_v)
|
||||
self.camera.f.Height.Set(self.config.height)
|
||||
self.camera.f.Width.Set(self.config.width)
|
||||
self.camera.f.OffsetX.Set(self.config.offset_x)
|
||||
self.camera.f.OffsetY.Set(self.config.offset_y)
|
||||
|
||||
# print('exposure time', self.camera.f.ExposureAutoMaxValue.Set(20000)) # shutter 1/50 (hence; 1000000/shutter)
|
||||
print('exposure time', self.camera.f.ExposureAutoMaxValue.Set(60000)) # otherwise it becomes too blurry in movements
|
||||
# print('exposure time', self.camera.f.ExposureAutoMaxValue.Set(20000)) # shutter 1/50
|
||||
print('exposure time', self.camera.f.ExposureAutoMaxValue.Set(25000))
|
||||
print('brightness targt', self.camera.f.BrightnessAutoNominalValue.Get())
|
||||
print('brightness targt', self.camera.f.BrightnessAutoNominalValue.Set(35))
|
||||
print('brightness targt', self.camera.f.BrightnessAutoNominalValue.Set(30))
|
||||
print('exposure time', self.camera.f.ExposureTime.Get())
|
||||
print('Gamma', self.camera.f.Gamma.Set(0.45))
|
||||
|
||||
# neoapi.region
|
||||
# self.camera.f.regeo
|
||||
print('Gamma', self.camera.f.Gamma.Set(0.39))
|
||||
# print('LUT', self.camera.f.LUTIndex.Get())
|
||||
# print('LUT', self.camera.f.LUTEnable.Get())
|
||||
# print('exposure time max', self.camera.f.ExposureTimeGapMax.Get())
|
||||
# print('exposure time min', self.camera.f.ExposureTimeGapMin.Get())
|
||||
# self.pixfmt = self.camera.f.PixelFormat.Get()
|
||||
|
||||
self.cam_is_configured = True
|
||||
|
||||
def setPixelFormat(self, pixfmt):
|
||||
self.pixfmt = pixfmt
|
||||
self.camera.f.PixelFormat.Set(pixfmt)
|
||||
# self.pixfmt = self.camera.f.PixelFormat.Get()
|
||||
|
||||
|
||||
def recv(self):
|
||||
while True:
|
||||
# print('receive')
|
||||
if not self.camera.IsConnected():
|
||||
self.cam_is_configured = False
|
||||
return
|
||||
|
||||
if not self.cam_is_configured:
|
||||
self.configCam()
|
||||
|
||||
|
||||
|
||||
i = self.camera.GetImage(0)
|
||||
if i.IsEmpty():
|
||||
time.sleep(.01)
|
||||
continue
|
||||
|
||||
# print(i.GetAvailablePixelFormats())
|
||||
i = i.Convert(self.converter_settings)
|
||||
imgarray = i.GetNPArray()
|
||||
if self.pixfmt == neoapi.PixelFormat_BayerRG12:
|
||||
img = cv2.cvtColor(imgarray, cv2.COLOR_BayerRG2RGB)
|
||||
elif self.pixfmt == neoapi.PixelFormat_BayerRG8:
|
||||
img = cv2.cvtColor(imgarray, cv2.COLOR_BayerRG2RGB)
|
||||
else:
|
||||
img = cv2.cvtColor(imgarray, cv2.COLOR_BGR2RGB)
|
||||
|
||||
if i.IsEmpty():
|
||||
time.sleep(.01)
|
||||
continue
|
||||
|
||||
img = i.GetNPArray()
|
||||
|
||||
# imgarray = i.GetNPArray()
|
||||
# if self.pixfmt == neoapi.PixelFormat_BayerRG12:
|
||||
# img = cv2.cvtColor(imgarray, cv2.COLOR_BayerRG2RGB)
|
||||
# elif self.pixfmt == neoapi.PixelFormat_BayerRG8:
|
||||
# img = cv2.cvtColor(imgarray, cv2.COLOR_BayerRG2RGB)
|
||||
# else:
|
||||
# img = cv2.cvtColor(imgarray, cv2.COLOR_BGR2RGB)
|
||||
|
||||
# if img.dtype == np.uint16:
|
||||
# img = cv2.convertScaleAbs(img, alpha=(255.0/65535.0))
|
||||
if img.dtype == np.uint16:
|
||||
img = cv2.convertScaleAbs(img, alpha=(255.0/65535.0))
|
||||
img = self._crop(img)
|
||||
yield img
|
||||
|
||||
|
@ -160,6 +107,8 @@ class GigE(VideoSource):
|
|||
br = self.config.post_crop_br or (img.shape[1], img.shape[0])
|
||||
|
||||
return img[tl[1]:br[1],tl[0]:br[0],:]
|
||||
|
||||
|
||||
|
||||
class SingleCvVideoSource(VideoSource):
|
||||
def recv(self):
|
||||
|
@ -177,10 +126,7 @@ class SingleCvVideoSource(VideoSource):
|
|||
|
||||
class RtspSource(SingleCvVideoSource):
|
||||
def __init__(self, video_url: str | Path, camera: Camera = None):
|
||||
# keep max 1 frame in app-buffer (0 = unlimited)
|
||||
# When using gstreamer 1.28 drop=true is deprecated, use: leaky-type=2 which frame to drop: https://gstreamer.freedesktop.org/documentation/applib/gstappsrc.html?gi-language=c
|
||||
|
||||
gst = f"rtspsrc location={video_url} latency=0 buffer-mode=auto ! decodebin ! videoconvert ! appsink max-buffers=1 drop=true"
|
||||
gst = f"rtspsrc location={video_url} latency=0 buffer-mode=auto ! decodebin ! videoconvert ! appsink max-buffers=0 drop=true"
|
||||
logger.info(f"Capture gstreamer (gst-launch-1.0): {gst}")
|
||||
self.video = cv2.VideoCapture(gst, cv2.CAP_GSTREAMER)
|
||||
self.frame_idx = 0
|
||||
|
@ -265,7 +211,7 @@ class CameraSource(SingleCvVideoSource):
|
|||
self.video.set(cv2.CAP_PROP_FPS, self.camera.fps)
|
||||
self.frame_idx = 0
|
||||
|
||||
def get_video_source(video_sources: List[UrlOrPath], camera: Optional[Camera] = None, frame_offset=0, frame_end:Optional[int]=None, loop=False):
|
||||
def get_video_source(video_sources: List[UrlOrPath], camera: Camera, frame_offset=0, frame_end:Optional[int]=None, loop=False):
|
||||
|
||||
if str(video_sources[0]).isdigit():
|
||||
# numeric input is a CV camera
|
||||
|
@ -286,7 +232,3 @@ def get_video_source(video_sources: List[UrlOrPath], camera: Optional[Camera] =
|
|||
return FilelistSource(video_sources, offset = frame_offset, end=frame_end, loop=loop)
|
||||
# os.environ["OPENCV_FFMPEG_CAPTURE_OPTIONS"] = "fflags;nobuffer|flags;low_delay|avioflags;direct|rtsp_transport;udp"
|
||||
|
||||
|
||||
def get_video_source_from_str(video_sources: List[str]):
|
||||
paths = [UrlOrPath(s) for s in video_sources]
|
||||
return get_video_source(paths)
|
91
uv.lock
91
uv.lock
|
@ -1300,12 +1300,6 @@ wheels = [
|
|||
{ url = "https://files.pythonhosted.org/packages/2e/5e/cb3dbdf3ae18e281b8b1b4691bb5d3465b383e04bde2c2a782c893f1ee21/nicegui-2.13.0-py3-none-any.whl", hash = "sha256:2343d37885df2c2e388a4f4c3f0ce9b308be02e16b0303108471a1a38fe3508f", size = 16482500 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "noise"
|
||||
version = "1.2.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/18/29/bb830ee6d934311e17a7a4fa1368faf3e73fbb09c0d80fc44e41828df177/noise-1.2.2.tar.gz", hash = "sha256:57a2797436574391ff63a111e852e53a4164ecd81ad23639641743cd1a209b65", size = 125615 }
|
||||
|
||||
[[package]]
|
||||
name = "notebook"
|
||||
version = "7.3.3"
|
||||
|
@ -1830,11 +1824,12 @@ wheels = [
|
|||
|
||||
[[package]]
|
||||
name = "pywin32"
|
||||
version = "306"
|
||||
version = "310"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/08/dc/28c668097edfaf4eac4617ef7adf081b9cf50d254672fcf399a70f5efc41/pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d", size = 8506422 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d3/d6/891894edec688e72c2e308b3243fad98b4066e1839fd2fe78f04129a9d31/pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8", size = 9226392 },
|
||||
{ url = "https://files.pythonhosted.org/packages/95/da/a5f38fffbba2fb99aa4aa905480ac4b8e83ca486659ac8c95bce47fb5276/pywin32-310-cp310-cp310-win32.whl", hash = "sha256:6dd97011efc8bf51d6793a82292419eba2c71cf8e7250cfac03bba284454abc1", size = 8848240 },
|
||||
{ url = "https://files.pythonhosted.org/packages/aa/fe/d873a773324fa565619ba555a82c9dabd677301720f3660a731a5d07e49a/pywin32-310-cp310-cp310-win_amd64.whl", hash = "sha256:c3e78706e4229b915a0821941a84e7ef420bf2b77e08c9dae3c76fd03fd2ae3d", size = 9601854 },
|
||||
{ url = "https://files.pythonhosted.org/packages/3c/84/1a8e3d7a15490d28a5d816efa229ecb4999cdc51a7c30dd8914f669093b8/pywin32-310-cp310-cp310-win_arm64.whl", hash = "sha256:33babed0cf0c92a6f94cc6cc13546ab24ee13e3e800e61ed87609ab91e4c8213", size = 8522963 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -2196,20 +2191,6 @@ wheels = [
|
|||
{ url = "https://files.pythonhosted.org/packages/a0/4b/528ccf7a982216885a1ff4908e886b8fb5f19862d1962f56a3fce2435a70/starlette-0.46.1-py3-none-any.whl", hash = "sha256:77c74ed9d2720138b25875133f3a2dae6d854af2ec37dceb56aef370c1d8a227", size = 71995 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "superfsmon"
|
||||
version = "1.2.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "supervisor", marker = "sys_platform != 'win32'" },
|
||||
{ name = "supervisor-win", marker = "sys_platform == 'win32'" },
|
||||
{ name = "watchdog" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/e9/c2/269264babce3c29f5721cdb7c79ab4930562b67786bb6e5cc838e36e3530/superfsmon-1.2.3.tar.gz", hash = "sha256:fe5918872dc258eacff98cd054b28b73531f9897f72f8583fb2bbd448fc33928", size = 5186 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/bd/c5/d8fbf5c3901db69f7b1e25708fc865570712264026d06f75c5d535ec4ab1/superfsmon-1.2.3-py3-none-any.whl", hash = "sha256:da798e2a2c260fa633213df9f2f26d504fe234f78886e5f62ae4d81f0130bdf7", size = 4738 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "supervisor"
|
||||
version = "4.2.5"
|
||||
|
@ -2222,41 +2203,6 @@ wheels = [
|
|||
{ url = "https://files.pythonhosted.org/packages/2c/7a/0ad3973941590c040475046fef37a2b08a76691e61aa59540828ee235a6e/supervisor-4.2.5-py2.py3-none-any.whl", hash = "sha256:2ecaede32fc25af814696374b79e42644ecaba5c09494c51016ffda9602d0f08", size = 319561 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "supervisor-win"
|
||||
version = "4.7.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "pywin32", marker = "sys_platform == 'win32'" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/98/48/5d6cd1b7492bf2c11452fd638de45519d2c103caed70c5bdb4ecebbac568/supervisor-win-4.7.0.tar.gz", hash = "sha256:c474d92edc7050b55adae2f7c7789d5d69f180dee7868a27673b1d38f8bea484", size = 397342 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/69/4d/3a493f15f5b80608857ef157f382ace494f51d9031e6bee6082437dd1403/supervisor_win-4.7.0-py2.py3-none-any.whl", hash = "sha256:bd98554c2a0878704c3f3fd95e38965d9986eae6a2ad29f34d73d0aee138a481", size = 303996 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "svgpathtools"
|
||||
version = "1.7.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "numpy" },
|
||||
{ name = "scipy" },
|
||||
{ name = "svgwrite" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/df/5c/27c896f25e794d8eb1e75a1ab04fad3fcc272b5251d20f634a669e858da0/svgpathtools-1.7.1.tar.gz", hash = "sha256:beaef20fd78164aa5f0a7d4fd164ef20cb0d3d015cdec50c8c168e9d6547f041", size = 2135227 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/a6/00/c23f53a9e91092239ff6f1fcc39463626e293f6b24898739996fe2a6eebd/svgpathtools-1.7.1-py2.py3-none-any.whl", hash = "sha256:3cbb8ba0e8d200f9639034608d9c55b68efbc1bef99ea99559a3e7cb024fb738", size = 68280 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "svgwrite"
|
||||
version = "1.4.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/16/c1/263d4e93b543390d86d8eb4fc23d9ce8a8d6efd146f9427364109004fa9b/svgwrite-1.4.3.zip", hash = "sha256:a8fbdfd4443302a6619a7f76bc937fc683daf2628d9b737c891ec08b8ce524c3", size = 189516 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/84/15/640e399579024a6875918839454025bb1d5f850bb70d96a11eabb644d11c/svgwrite-1.4.3-py3-none-any.whl", hash = "sha256:bb6b2b5450f1edbfa597d924f9ac2dd099e625562e492021d7dd614f65f8a22d", size = 67122 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tensorboard"
|
||||
version = "2.19.0"
|
||||
|
@ -2551,7 +2497,6 @@ dependencies = [
|
|||
{ name = "gdown" },
|
||||
{ name = "ipywidgets" },
|
||||
{ name = "jsonlines" },
|
||||
{ name = "noise" },
|
||||
{ name = "opencv-python" },
|
||||
{ name = "pandas-helper-calc" },
|
||||
{ name = "pyglet" },
|
||||
|
@ -2562,9 +2507,7 @@ dependencies = [
|
|||
{ name = "setproctitle" },
|
||||
{ name = "shapely" },
|
||||
{ name = "simplification" },
|
||||
{ name = "superfsmon" },
|
||||
{ name = "supervisor" },
|
||||
{ name = "svgpathtools" },
|
||||
{ name = "tensorboardx" },
|
||||
{ name = "torch", version = "1.12.1", source = { registry = "https://pypi.org/simple" }, marker = "sys_platform != 'linux'" },
|
||||
{ name = "torch", version = "1.12.1+cu113", source = { url = "https://download.pytorch.org/whl/cu113/torch-1.12.1%2Bcu113-cp310-cp310-linux_x86_64.whl" }, marker = "sys_platform == 'linux'" },
|
||||
|
@ -2587,7 +2530,6 @@ requires-dist = [
|
|||
{ name = "gdown", specifier = ">=4.7.1,<5" },
|
||||
{ name = "ipywidgets", specifier = ">=8.1.5,<9" },
|
||||
{ name = "jsonlines", specifier = ">=4.0.0,<5" },
|
||||
{ name = "noise", specifier = ">=1.2.2" },
|
||||
{ name = "opencv-python", path = "opencv_python-4.10.0.84-cp310-cp310-linux_x86_64.whl" },
|
||||
{ name = "pandas-helper-calc", git = "https://github.com/scls19fr/pandas-helper-calc" },
|
||||
{ name = "pyglet", specifier = ">=2.0.15,<3" },
|
||||
|
@ -2598,9 +2540,7 @@ requires-dist = [
|
|||
{ name = "setproctitle", specifier = ">=1.3.3,<2" },
|
||||
{ name = "shapely", specifier = ">=2.1" },
|
||||
{ name = "simplification", specifier = ">=0.7.12" },
|
||||
{ name = "superfsmon", specifier = ">=1.2.3" },
|
||||
{ name = "supervisor", specifier = ">=4.2.5" },
|
||||
{ name = "svgpathtools", specifier = ">=1.7.1" },
|
||||
{ name = "tensorboardx", specifier = ">=2.6.2.2,<3" },
|
||||
{ name = "torch", marker = "python_full_version < '3.10' or python_full_version >= '4' or sys_platform != 'linux'", specifier = "==1.12.1" },
|
||||
{ name = "torch", marker = "python_full_version >= '3.10' and python_full_version < '4' and sys_platform == 'linux'", url = "https://download.pytorch.org/whl/cu113/torch-1.12.1%2Bcu113-cp310-cp310-linux_x86_64.whl" },
|
||||
|
@ -2764,29 +2704,6 @@ wheels = [
|
|||
{ url = "https://files.pythonhosted.org/packages/a6/3d/7b22abbdb059d551507275a2815bc2b1974e3b9f6a13781c1eac9e858965/vbuild-0.8.2-py2.py3-none-any.whl", hash = "sha256:d76bcc976a1c53b6a5776ac947606f9e7786c25df33a587ebe33ed09dd8a1076", size = 9371 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "watchdog"
|
||||
version = "6.0.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/db/7d/7f3d619e951c88ed75c6037b246ddcf2d322812ee8ea189be89511721d54/watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282", size = 131220 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/0c/56/90994d789c61df619bfc5ce2ecdabd5eeff564e1eb47512bd01b5e019569/watchdog-6.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d1cdb490583ebd691c012b3d6dae011000fe42edb7a82ece80965b42abd61f26", size = 96390 },
|
||||
{ url = "https://files.pythonhosted.org/packages/55/46/9a67ee697342ddf3c6daa97e3a587a56d6c4052f881ed926a849fcf7371c/watchdog-6.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bc64ab3bdb6a04d69d4023b29422170b74681784ffb9463ed4870cf2f3e66112", size = 88389 },
|
||||
{ url = "https://files.pythonhosted.org/packages/44/65/91b0985747c52064d8701e1075eb96f8c40a79df889e59a399453adfb882/watchdog-6.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c897ac1b55c5a1461e16dae288d22bb2e412ba9807df8397a635d88f671d36c3", size = 89020 },
|
||||
{ url = "https://files.pythonhosted.org/packages/30/ad/d17b5d42e28a8b91f8ed01cb949da092827afb9995d4559fd448d0472763/watchdog-6.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c7ac31a19f4545dd92fc25d200694098f42c9a8e391bc00bdd362c5736dbf881", size = 87902 },
|
||||
{ url = "https://files.pythonhosted.org/packages/5c/ca/c3649991d140ff6ab67bfc85ab42b165ead119c9e12211e08089d763ece5/watchdog-6.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9513f27a1a582d9808cf21a07dae516f0fab1cf2d7683a742c498b93eedabb11", size = 88380 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a9/c7/ca4bf3e518cb57a686b2feb4f55a1892fd9a3dd13f470fca14e00f80ea36/watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13", size = 79079 },
|
||||
{ url = "https://files.pythonhosted.org/packages/5c/51/d46dc9332f9a647593c947b4b88e2381c8dfc0942d15b8edc0310fa4abb1/watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379", size = 79078 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d4/57/04edbf5e169cd318d5f07b4766fee38e825d64b6913ca157ca32d1a42267/watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e", size = 79076 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ab/cc/da8422b300e13cb187d2203f20b9253e91058aaf7db65b74142013478e66/watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f", size = 79077 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2c/3b/b8964e04ae1a025c44ba8e4291f86e97fac443bca31de8bd98d3263d2fcf/watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26", size = 79078 },
|
||||
{ url = "https://files.pythonhosted.org/packages/62/ae/a696eb424bedff7407801c257d4b1afda455fe40821a2be430e173660e81/watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c", size = 79077 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b5/e8/dbf020b4d98251a9860752a094d09a65e1b436ad181faf929983f697048f/watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2", size = 79078 },
|
||||
{ url = "https://files.pythonhosted.org/packages/07/f6/d0e5b343768e8bcb4cda79f0f2f55051bf26177ecd5651f84c07567461cf/watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a", size = 79065 },
|
||||
{ url = "https://files.pythonhosted.org/packages/db/d9/c495884c6e548fce18a8f40568ff120bc3a4b7b99813081c8ac0c936fa64/watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680", size = 79070 },
|
||||
{ url = "https://files.pythonhosted.org/packages/33/e8/e40370e6d74ddba47f002a32919d91310d6074130fe4e17dabcafc15cbf1/watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f", size = 79067 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "watchfiles"
|
||||
version = "1.0.4"
|
||||
|
|
Loading…
Reference in a new issue