Compare commits
No commits in common. "main" and "cluster_predictions" have entirely different histories.
main
...
cluster_pr
17 changed files with 133 additions and 984 deletions
28
README.md
28
README.md
|
@ -7,7 +7,7 @@
|
|||
|
||||
## How to
|
||||
|
||||
> See also the sibling repo [traptools](https://git.rubenvandeven.com/security_vision/traptools) for camera calibration and homography tools that are needed for this repo. Also, [laserspace](https://git.rubenvandeven.com/security_vision/laserspace) is used to map the shapes (which are generated by `stage.py`) to lasers, as to use specific optimization techniques for the paths before sending them to the DAC.
|
||||
> See also the sibling repo [traptools](https://git.rubenvandeven.com/security_vision/traptools) for camera calibration and homography tools that are needed for this repo.
|
||||
|
||||
These are roughly the steps to go from datagathering to training
|
||||
|
||||
|
@ -25,29 +25,3 @@ These are roughly the steps to go from datagathering to training
|
|||
<!-- * On a video file (you can use a wildcard) `DISPLAY=:1 uv run trapserv --remote-log-addr 100.69.123.91 --eval_device cuda:0 --detector ultralytics --homography ../DATASETS/NAME/homography.json --eval_data_dict EXPERIMENTS/trajectron-data/hof2s-m_test.pkl --video-src ../DATASETS/NAME/*.mp4 --model_dir EXPERIMENTS/models/models_DATE_NAME/--smooth-predictions --smooth-tracks --num-samples 3 --render-window --calibration ../DATASETS/NAME/calibration.json` (the DISPLAY environment variable is used here to running over SSH connection and display on local monitor)
|
||||
* or on the RTSP stream. Which uses gstreamer to substantially reduce latency compared to the default ffmpeg bindings in OpenCV.
|
||||
* To just have a single trajectory pulled from distribution use `--full-dist`. Also try `--z_mode`. -->
|
||||
|
||||
|
||||
## Testnight 2025-06-13
|
||||
|
||||
Stappenplan:
|
||||
|
||||
* Hang lasers. Connect all cables etc.
|
||||
* `DISPLAY=:0 cargo run --example laser_frame_stream_gui`
|
||||
* Use numbers to pick a nice shape. Use this to make sure both lasers cover the right area. (if it doesn't work. Flip some switches in the gui, the laser output should now start)
|
||||
* In trap folder: `uv run supervisorctl start video`
|
||||
* In laserspace folder: `DISPLAY=:0 cargo run --bin render_lines_gui` and use gui to draw and tweak projection area
|
||||
* Use the save button to store configuration
|
||||
/*
|
||||
* in trap folder: `DISPLAY=:0 uv run trap_laser_calibration`
|
||||
* follow instructions:
|
||||
* camera points: 1-9 or cursor to create/select/move points
|
||||
* move laser: vim movement keys : hjkl, use shift to move faster
|
||||
* `c` to calibrate. Matrix is output to cli.
|
||||
* `q` to quit
|
||||
* saved to `laser_calib.json`, copy H field to `trap_rust/src/trap/laser.rs` (to e.g. TMP_STUDIO_CM_8)
|
||||
* Restart `render_lines_gui` with new homographies
|
||||
* `DISPLAY=:0 cargo run --bin render_lines_gui`
|
||||
*/
|
||||
* change video source in `supervisord.conf` and run `uv run supervisorctl update` to switch
|
||||
* **if tracking is slow and there's no prediction.**
|
||||
* `uv run python -c "import torch;print(torch.cuda.is_available())"`
|
||||
|
|
|
@ -2,10 +2,10 @@
|
|||
# Default YOLO tracker settings for ByteTrack tracker https://github.com/ifzhang/ByteTrack
|
||||
|
||||
tracker_type: bytetrack # tracker type, ['botsort', 'bytetrack']
|
||||
track_high_thresh: 0.000001 # threshold for the first association
|
||||
track_low_thresh: 0.000001 # threshold for the second association
|
||||
new_track_thresh: 0.000001 # threshold for init new track if the detection does not match any tracks
|
||||
track_buffer: 10 # buffer to calculate the time when to remove tracks
|
||||
match_thresh: 0.99 # threshold for matching tracks
|
||||
track_high_thresh: 0.0001 # threshold for the first association
|
||||
track_low_thresh: 0.0001 # threshold for the second association
|
||||
new_track_thresh: 0.0001 # threshold for init new track if the detection does not match any tracks
|
||||
track_buffer: 50 # buffer to calculate the time when to remove tracks
|
||||
match_thresh: 0.95 # threshold for matching tracks
|
||||
fuse_score: True # Whether to fuse confidence scores with the iou distances before matching
|
||||
# min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now)
|
||||
|
|
|
@ -34,8 +34,6 @@ dependencies = [
|
|||
"facenet-pytorch>=2.5.3",
|
||||
"simplification>=0.7.12",
|
||||
"supervisor>=4.2.5",
|
||||
"superfsmon>=1.2.3",
|
||||
"noise>=1.2.2",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
@ -53,7 +51,6 @@ trap_stage = "trap.stage:Stage.parse_and_start"
|
|||
trap_prediction = "trap.prediction_server:PredictionServer.parse_and_start"
|
||||
trap_render_cv = "trap.cv_renderer:CvRenderer.parse_and_start"
|
||||
trap_monitor = "trap.monitor:Monitor.parse_and_start" # migrate timer
|
||||
trap_laser_calibration = "trap.laser_calibration:LaserCalibration.parse_and_start" # migrate timer
|
||||
|
||||
[tool.uv]
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@ port = *:8293
|
|||
# password = 123
|
||||
|
||||
[supervisord]
|
||||
nodaemon = false
|
||||
nodaemon = True
|
||||
|
||||
|
||||
; The rpcinterface:supervisor section must remain in the config file for
|
||||
|
@ -20,7 +20,6 @@ serverurl = http://localhost:8293
|
|||
command=uv run trap_monitor
|
||||
numprocs=1
|
||||
directory=%(here)s
|
||||
autostart=false
|
||||
|
||||
[program:video]
|
||||
command=uv run trap_video_source --homography ../DATASETS/hof3/homography.json --video-src ../DATASETS/hof3/hof3-cam-demo-twoperson.mp4 --calibration ../DATASETS/hof3/calibration.json --video-loop
|
||||
|
@ -29,7 +28,7 @@ directory=%(here)s
|
|||
directory=%(here)s
|
||||
|
||||
[program:tracker]
|
||||
command=uv run trap_tracker --smooth-tracks
|
||||
command=uv run trap_tracker
|
||||
directory=%(here)s
|
||||
|
||||
[program:stage]
|
||||
|
@ -47,9 +46,3 @@ environment=DISPLAY=":0"
|
|||
autostart=false
|
||||
; can be long to quit if rendering to video file
|
||||
stopwaitsecs=60
|
||||
|
||||
|
||||
# during development auto restart some services when the code changes
|
||||
[program:superfsmon]
|
||||
command=superfsmon trap/stage.py stage
|
||||
directory=%(here)s
|
||||
|
|
|
@ -36,7 +36,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
|
@ -151,7 +151,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
|
@ -161,7 +161,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
|
@ -187,7 +187,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
|
@ -196,34 +196,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"0"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"len(tracks)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
|
|
|
@ -6,9 +6,8 @@ import logging
|
|||
import time
|
||||
from argparse import ArgumentParser, Namespace
|
||||
from multiprocessing.synchronize import Event as BaseEvent
|
||||
from typing import Dict, List, Optional
|
||||
from typing import Dict
|
||||
|
||||
from charset_normalizer import detect
|
||||
import cv2
|
||||
import ffmpeg
|
||||
import numpy as np
|
||||
|
@ -16,7 +15,6 @@ import pyglet
|
|||
import zmq
|
||||
from pyglet import shapes
|
||||
|
||||
from trap.base import Detection
|
||||
from trap.counter import CounterListerner
|
||||
from trap.frame_emitter import Frame, Track
|
||||
from trap.node import Node
|
||||
|
@ -30,7 +28,6 @@ class CvRenderer(Node):
|
|||
def setup(self):
|
||||
self.prediction_sock = self.sub(self.config.zmq_prediction_addr)
|
||||
self.tracker_sock = self.sub(self.config.zmq_trajectory_addr)
|
||||
self.detector_sock = self.sub(self.config.zmq_detection_addr)
|
||||
self.frame_sock = self.sub(self.config.zmq_frame_addr)
|
||||
|
||||
# self.H = self.config.H
|
||||
|
@ -49,7 +46,6 @@ class CvRenderer(Node):
|
|||
self.frame: Frame|None= None
|
||||
self.tracker_frame: Frame|None = None
|
||||
self.prediction_frame: Frame|None = None
|
||||
self.detections: List[Detection]|None = None
|
||||
|
||||
self.tracks: Dict[str, Track] = {}
|
||||
self.predictions: Dict[str, Track] = {}
|
||||
|
@ -119,7 +115,7 @@ class CvRenderer(Node):
|
|||
|
||||
cv2.namedWindow("frame", cv2.WINDOW_NORMAL)
|
||||
# https://gist.github.com/ronekko/dc3747211543165108b11073f929b85e
|
||||
cv2.moveWindow("frame", 0, -1)
|
||||
cv2.moveWindow("frame", 1920, -1)
|
||||
if self.config.full_screen:
|
||||
cv2.setWindowProperty("frame",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
|
||||
# bgsub = cv2.createBackgroundSubtractorMOG2(120, 50, detectShadows=True)
|
||||
|
@ -163,30 +159,21 @@ class CvRenderer(Node):
|
|||
except zmq.ZMQError as e:
|
||||
logger.debug(f'reuse tracks')
|
||||
|
||||
try:
|
||||
self.detections = self.detector_sock.recv_pyobj(zmq.NOBLOCK)
|
||||
# print('detections')
|
||||
except zmq.ZMQError as e:
|
||||
# print('no detections')
|
||||
# idx = frame.index if frame else "NONE"
|
||||
# logger.debug(f"reuse video frame {idx}")
|
||||
pass
|
||||
|
||||
if first_time is None:
|
||||
first_time = frame.time
|
||||
|
||||
# img = frame.img
|
||||
img = decorate_frame(frame, tracker_frame, prediction_frame, first_time, self.config, self.tracks, self.predictions, self.detections, self.config.render_clusters)
|
||||
img = decorate_frame(frame, tracker_frame, prediction_frame, first_time, self.config, self.tracks, self.predictions, self.config.render_clusters)
|
||||
|
||||
logger.debug(f"write frame {frame.time - first_time:.3f}s")
|
||||
if self.out_writer:
|
||||
self.out_writer.write(img)
|
||||
if self.streaming_process:
|
||||
self.streaming_process.stdin.write(img.tobytes())
|
||||
if not self.config.no_window:
|
||||
if self.config.render_window:
|
||||
cv2.imshow('frame',cv2.resize(img, (1920, 1080)))
|
||||
# cv2.imshow('frame',img)
|
||||
cv2.waitKey(10)
|
||||
cv2.waitKey(1)
|
||||
|
||||
# clear out old tracks & predictions:
|
||||
|
||||
|
@ -223,12 +210,6 @@ class CvRenderer(Node):
|
|||
help='Manually specity communication addr for the trajectory messages',
|
||||
type=str,
|
||||
default="ipc:///tmp/feeds_traj")
|
||||
|
||||
render_parser.add_argument('--zmq-detection-addr',
|
||||
help='Manually specity communication addr for the detection messages',
|
||||
type=str,
|
||||
default="ipc:///tmp/feeds_dets")
|
||||
|
||||
render_parser.add_argument('--zmq-prediction-addr',
|
||||
help='Manually specity communication addr for the prediction messages',
|
||||
type=str,
|
||||
|
@ -237,8 +218,8 @@ class CvRenderer(Node):
|
|||
render_parser.add_argument("--render-file",
|
||||
help="Render a video file previewing the prediction, and its delay compared to the current frame",
|
||||
action='store_true')
|
||||
render_parser.add_argument("--no-window",
|
||||
help="Disable a previewing to a window",
|
||||
render_parser.add_argument("--render-window",
|
||||
help="Render a previewing to a window",
|
||||
action='store_true')
|
||||
|
||||
render_parser.add_argument("--full-screen",
|
||||
|
@ -289,7 +270,7 @@ def get_animation_position(track: Track, current_frame: Frame):
|
|||
|
||||
|
||||
|
||||
def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame, first_time: float, config: Namespace, tracks: Dict[str, Track], predictions: Dict[str, Track], detections: Optional[List[Detection]], as_clusters = True) -> np.array:
|
||||
def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame, first_time: float, config: Namespace, tracks: Dict[str, Track], predictions: Dict[str, Track], as_clusters = True) -> np.array:
|
||||
scale = 100
|
||||
# TODO: replace opencv with QPainter to support alpha? https://doc.qt.io/qtforpython-5/PySide2/QtGui/QPainter.html#PySide2.QtGui.PySide2.QtGui.QPainter.drawImage
|
||||
# or https://github.com/pygobject/pycairo?tab=readme-ov-file
|
||||
|
@ -323,19 +304,6 @@ def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame,
|
|||
# cv2.imwrite(str(self.config.output_dir / "orig.png"), warpedFrame)
|
||||
cv2.rectangle(img, (0,0), (img.shape[1],25), (0,0,0), -1)
|
||||
|
||||
if detections:
|
||||
for detection in detections:
|
||||
points = [
|
||||
detection.get_foot_coords(),
|
||||
[detection.l, detection.t],
|
||||
[detection.l + detection.w, detection.t + detection.h],
|
||||
]
|
||||
points = frame.camera.points_img_to_world(points, scale)
|
||||
points = [to_point(p) for p in points] # to int
|
||||
|
||||
cv2.rectangle(img, points[1], points[2], (255,255,0), 2)
|
||||
cv2.circle(img, points[0], 5, (255,255,0), 2)
|
||||
|
||||
|
||||
def conversion(points):
|
||||
return convert_world_points_to_img_points(points, scale)
|
||||
|
@ -403,7 +371,7 @@ def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame,
|
|||
for option, value in prediction_frame.log['predictor'].items():
|
||||
options.append(f"{option}: {value}")
|
||||
|
||||
if len(options):
|
||||
|
||||
cv2.putText(img, options.pop(-1), (20,img.shape[0]-30), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
|
||||
cv2.putText(img, " | ".join(options), (20,img.shape[0]-10), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
|
||||
|
||||
|
|
|
@ -1,16 +1,18 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import multiprocessing
|
||||
import pickle
|
||||
from argparse import ArgumentParser, Namespace
|
||||
from multiprocessing import Event
|
||||
from pathlib import Path
|
||||
|
||||
import zmq
|
||||
|
||||
from trap import node
|
||||
from trap.base import *
|
||||
from trap.base import LambdaParser
|
||||
from trap.gemma import ImgMovementFilter
|
||||
from trap.preview_renderer import FrameWriter
|
||||
from trap.timer import Timer
|
||||
from trap.video_sources import get_video_source
|
||||
|
||||
logger = logging.getLogger('trap.frame_emitter')
|
||||
|
@ -37,12 +39,6 @@ class FrameEmitter(node.Node):
|
|||
offset = int(self.config.video_offset or 0)
|
||||
source = get_video_source(self.video_srcs, self.config.camera, offset, self.config.video_end, self.config.video_loop)
|
||||
video_gen = enumerate(source, start = offset)
|
||||
|
||||
# writer = FrameWriter(self.config.record, None, None) if self.config.record else nullcontext
|
||||
print(self.config.record)
|
||||
writer = FrameWriter(str(self.config.record), None, None) if self.config.record else None
|
||||
try:
|
||||
processor = ImgMovementFilter()
|
||||
while self.run_loop():
|
||||
|
||||
try:
|
||||
|
@ -53,19 +49,11 @@ class FrameEmitter(node.Node):
|
|||
|
||||
frame = Frame(i, img=img, H=self.config.camera.H, camera=self.config.camera)
|
||||
|
||||
# frame.img = processor.apply(frame.img)
|
||||
|
||||
# TODO: this is very dirty, need to find another way.
|
||||
# perhaps multiprocessing Array?
|
||||
self.frame_noimg_sock.send(pickle.dumps(frame.without_img()))
|
||||
self.frame_sock.send(pickle.dumps(frame))
|
||||
|
||||
if writer:
|
||||
writer.write(frame.img)
|
||||
finally:
|
||||
if writer:
|
||||
writer.release()
|
||||
|
||||
|
||||
logger.info("Stopping")
|
||||
|
||||
|
@ -96,10 +84,6 @@ class FrameEmitter(node.Node):
|
|||
help="End (or loop) playback at given frame.",
|
||||
default=None,
|
||||
type=int)
|
||||
argparser.add_argument("--record",
|
||||
help="Record source video to given filename",
|
||||
default=None,
|
||||
type=Path)
|
||||
|
||||
argparser.add_argument("--video-loop",
|
||||
help="By default it emitter will run only once. This allows it to loop the video file to keep testing.",
|
||||
|
|
|
@ -1,292 +0,0 @@
|
|||
|
||||
|
||||
from argparse import ArgumentParser
|
||||
import enum
|
||||
import json
|
||||
from pathlib import Path
|
||||
import time
|
||||
from typing import Optional
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from trap.base import DataclassJSONEncoder, DistortedCamera, Frame
|
||||
from trap.lines import CoordinateSpace, RenderableLine, RenderableLines, RenderablePoint, RenderablePosition, SrgbaColor, cross_points
|
||||
from trap.node import Node
|
||||
from trap.stage import Coordinate
|
||||
|
||||
|
||||
class Modes(enum.Enum):
|
||||
POINTS = 1
|
||||
TEST_LINE = 2
|
||||
|
||||
class LaserCalibration(Node):
|
||||
"""
|
||||
A calibrated camera can be used to reverse-map the points of the laser to world coordinates.
|
||||
Note, it publishes on the address of the stage node, so they cannot run at the same time.
|
||||
|
||||
1. Draw points with the laser (use 1-9 to create/select, then position them with arrow keys)
|
||||
2. Use cursor on camera stream to create an image point for.
|
||||
- Locate nearby point to select and drag
|
||||
3. Use image coordinate of point, undistort, homograph, gives world coordinate.
|
||||
4. Perform homography on world coordinates + laser coordinates
|
||||
"""
|
||||
|
||||
def setup(self):
|
||||
# self.scenarios: List[DrawnScenario] = []
|
||||
|
||||
self.frame_sock = self.sub(self.config.zmq_frame_addr)
|
||||
self.laser_sock = self.pub(self.config.zmq_stage_addr)
|
||||
|
||||
self.camera: Optional[DistortedCamera] = None
|
||||
|
||||
self._selected_point = None
|
||||
self._is_dragging = False
|
||||
self.laser_points = {}
|
||||
self.image_points = {}
|
||||
self.mode = Modes.POINTS
|
||||
self.H = None
|
||||
|
||||
self.img_size = (1920,1080)
|
||||
self.frame_img_factor = (1,1)
|
||||
|
||||
if self.config.calibfile.exists():
|
||||
with self.config.calibfile.open('r') as fp:
|
||||
calibdata = json.load(fp)
|
||||
self.laser_points = calibdata['laser_points']
|
||||
self.image_points = calibdata['image_points']
|
||||
self.H = calibdata['H']
|
||||
|
||||
|
||||
|
||||
|
||||
def run(self):
|
||||
|
||||
cv2.namedWindow("laser_calib", cv2.WINDOW_NORMAL)
|
||||
# https://gist.github.com/ronekko/dc3747211543165108b11073f929b85e
|
||||
# cv2.moveWindow("laser_calib", 0, -1)
|
||||
cv2.setMouseCallback('laser_calib',self.mouse_event)
|
||||
cv2.setWindowProperty("laser_calib",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
|
||||
|
||||
# arrow up (82), down (84), arrow left(81)
|
||||
|
||||
frame = None
|
||||
while self.run_loop_capped_fps(60):
|
||||
if self.frame_sock.poll(0):
|
||||
frame: Frame = self.frame_sock.recv_pyobj()
|
||||
if not self.camera:
|
||||
self.camera = frame.camera
|
||||
|
||||
if frame is None:
|
||||
continue
|
||||
|
||||
self.frame_img_factor = frame.img.shape[1] / self.img_size[0], frame.img.shape[0] / self.img_size[1]
|
||||
|
||||
|
||||
img = frame.img
|
||||
img = cv2.resize(img, self.img_size)
|
||||
|
||||
cv2.putText(img, 'press 1-0 to create/edit points', (10,20), cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,255))
|
||||
if len(self.laser_points) < 4:
|
||||
cv2.putText(img, 'add points to calculate homography', (10,40), cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,255))
|
||||
else:
|
||||
cv2.putText(img, 'press c to calculate homography', (10,40), cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,0))
|
||||
|
||||
cv2.putText(img, str(self.config.calibfile), (10,self.img_size[1]-30), cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,0))
|
||||
|
||||
if self._selected_point:
|
||||
color = (0,255,255)
|
||||
cv2.putText(img, f'selected {self._selected_point}', (10,60), cv2.FONT_HERSHEY_SIMPLEX, .5, color)
|
||||
cv2.putText(img, 'press d to delete', (10,80), cv2.FONT_HERSHEY_SIMPLEX, .5, color)
|
||||
cv2.putText(img, 'use arrows to position laser for this point', (10,100), cv2.FONT_HERSHEY_SIMPLEX, .5, color)
|
||||
target = self.camera.points_img_to_world([self.image_points[self._selected_point]])[0].tolist()
|
||||
target = round(target[0], 2), round(target[1], 2)
|
||||
cv2.putText(img, f'map {self.laser_points[self._selected_point]} to {target} ({self.image_points[self._selected_point]})', (10,120), cv2.FONT_HERSHEY_SIMPLEX, .5, color)
|
||||
|
||||
|
||||
for k, coord in self.image_points.items():
|
||||
color = (0,0,255) if self._selected_point == k else (255,0,0)
|
||||
coord = int(coord[0] / self.frame_img_factor[0]), int(coord[1] / self.frame_img_factor[1])
|
||||
cv2.circle(img, coord, 4, color, thickness=2)
|
||||
cv2.putText(img, str(k), (coord[0]+10, coord[1]), cv2.FONT_HERSHEY_SIMPLEX, .5, color)
|
||||
|
||||
key = cv2.waitKey(5) # or for arrows: full_key_code = cv2.waitKeyEx(0)
|
||||
self.key_event(key)
|
||||
# nr_keys = [ord(i) for i in range(10)] # select/add point
|
||||
# cv2.
|
||||
cv2.imshow('laser_calib', img)
|
||||
|
||||
lines = []
|
||||
if self.mode == Modes.TEST_LINE:
|
||||
lines.append(RenderableLine([
|
||||
RenderablePoint((i,time.time()%18), SrgbaColor(0,1,0,1)) for i in range(-15, 40)
|
||||
|
||||
]))
|
||||
# render in laser space
|
||||
rl = RenderableLines(lines, CoordinateSpace.WORLD)
|
||||
self.laser_sock.send_json(rl, cls=DataclassJSONEncoder)
|
||||
else:
|
||||
if self._selected_point:
|
||||
point = self.laser_points[self._selected_point]
|
||||
lines.extend(cross_points(point[0], point[1], 100, SrgbaColor(0,1,0,1)))
|
||||
|
||||
# render in laser space
|
||||
rl = RenderableLines(lines, CoordinateSpace.LASER)
|
||||
self.laser_sock.send_json(rl, cls=DataclassJSONEncoder)
|
||||
|
||||
# print(json.dumps(rl, cls=DataclassJSONEncoder))
|
||||
|
||||
def key_event(self, key: int):
|
||||
if key < 0:
|
||||
return
|
||||
|
||||
if key == ord('q'):
|
||||
exit()
|
||||
|
||||
if key == 27: #esc
|
||||
self._selected_point = None
|
||||
|
||||
|
||||
if key == ord('c'):
|
||||
self.calculate_homography()
|
||||
self.save()
|
||||
|
||||
if key == ord('d') and self._selected_point:
|
||||
self.delete_point(self._selected_point)
|
||||
|
||||
if key == ord('t'):
|
||||
self.mode = Modes.TEST_LINE if self.mode == Modes.POINTS else Modes.POINTS
|
||||
print(self.mode)
|
||||
|
||||
# arrow up (82), down (84), arrow left(81)
|
||||
if self._selected_point and key in [81, 84, 82, 83,
|
||||
ord('h'), ord('j'), ord('k'), ord('l'),
|
||||
ord('H'), ord('J'), ord('K'), ord('L'),
|
||||
]:
|
||||
diff = [0,0]
|
||||
if key in [81, ord('h')]:
|
||||
diff[0] -= 1
|
||||
if key == ord('H'):
|
||||
diff[0] -= 10
|
||||
if key in [83, ord('l')]:
|
||||
diff[0] += 1
|
||||
if key == ord('L'):
|
||||
diff[0] += 10
|
||||
|
||||
if key in [82, ord('k')]:
|
||||
diff[1] += 1
|
||||
if key == ord('K'):
|
||||
diff[1] += 10
|
||||
if key in [84, ord('j')]:
|
||||
diff[1] -= 1
|
||||
if key == ord('J'):
|
||||
diff[1] -= 10
|
||||
|
||||
self.laser_points[self._selected_point] = (
|
||||
self.laser_points[self._selected_point][0] + diff[0],
|
||||
self.laser_points[self._selected_point][1] + diff[1],
|
||||
)
|
||||
|
||||
|
||||
nr_keys = [ord(str(i)) for i in range(10)]
|
||||
if key in nr_keys:
|
||||
select = str(nr_keys.index(key))
|
||||
self.create_or_select(select)
|
||||
|
||||
|
||||
|
||||
|
||||
def mouse_event(self, event,x,y,flags,param):
|
||||
x *= self.frame_img_factor[0]
|
||||
y *= self.frame_img_factor[1]
|
||||
if event == cv2.EVENT_MOUSEMOVE:
|
||||
if not self._is_dragging or not self._selected_point:
|
||||
return
|
||||
|
||||
self.image_points[self._selected_point] = (x, y)
|
||||
|
||||
if event == cv2.EVENT_LBUTTONDOWN:
|
||||
# select or create
|
||||
self._selected_point = None
|
||||
for i, p in self.image_points.items():
|
||||
d = (p[0]-x)**2 + (p[1]-y)**2
|
||||
if d < 30:
|
||||
self._selected_point = i
|
||||
break
|
||||
if self._selected_point is None:
|
||||
self._selected_point = self.new_point((x,y), None)
|
||||
|
||||
self._is_dragging = True
|
||||
|
||||
if event == cv2.EVENT_LBUTTONUP:
|
||||
self._is_dragging = False
|
||||
# ... point stays selected to tweak laser
|
||||
|
||||
def create_or_select(self, nr: str):
|
||||
if nr not in self.image_points:
|
||||
self.new_point(None, None, nr)
|
||||
self._selected_point = nr
|
||||
return nr
|
||||
|
||||
def new_point(self, img_coord: Optional[Coordinate], laser_coord: Optional[Coordinate], nr: Optional[str]=None):
|
||||
if nr:
|
||||
new_nr = nr
|
||||
else:
|
||||
new_nr = None
|
||||
for i in range(100):
|
||||
k = str(i)
|
||||
if k not in self.image_points:
|
||||
new_nr = k
|
||||
break
|
||||
if not new_nr:
|
||||
new_nr = 0 # cover unlikely case
|
||||
|
||||
self.image_points[new_nr] = img_coord or (100,100)
|
||||
self.laser_points[new_nr] = laser_coord or (100,100)
|
||||
return new_nr
|
||||
|
||||
def delete_point(self, point: str):
|
||||
del self.image_points[point]
|
||||
del self.laser_points[point]
|
||||
self._selected_point = None
|
||||
|
||||
def calculate_homography(self):
|
||||
if len(self.image_points) < 4:
|
||||
return
|
||||
|
||||
world_points = self.camera.points_img_to_world(list(self.image_points.values()))
|
||||
laser_points = np.array(list(self.laser_points.values()))
|
||||
print('from', world_points)
|
||||
print('to', laser_points)
|
||||
self.H, status = cv2.findHomography(world_points, laser_points)
|
||||
|
||||
print('Found')
|
||||
print(self.H)
|
||||
|
||||
def save(self):
|
||||
with self.config.calibfile.open('w') as fp:
|
||||
json.dump({
|
||||
'laser_points': self.laser_points,
|
||||
'image_points': self.image_points,
|
||||
'H': self.H.tolist()
|
||||
}, fp)
|
||||
|
||||
|
||||
|
||||
@classmethod
|
||||
def arg_parser(cls) -> ArgumentParser:
|
||||
argparser = ArgumentParser()
|
||||
argparser.add_argument('--zmq-frame-addr',
|
||||
help='Manually specity communication addr for the frame messages',
|
||||
type=str,
|
||||
default="ipc:///tmp/feeds_frame")
|
||||
argparser.add_argument('--zmq-stage-addr',
|
||||
help='Manually specity communication addr for the stage messages (the rendered lines)',
|
||||
type=str,
|
||||
default="tcp://0.0.0.0:99174")
|
||||
argparser.add_argument('--calibfile',
|
||||
help='specify file to save & load points with',
|
||||
type=Path,
|
||||
default=Path("./laser_calib.json"))
|
||||
|
||||
return argparser
|
|
@ -1,7 +1,7 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum, IntEnum
|
||||
from enum import Enum
|
||||
import math
|
||||
from typing import List, Tuple
|
||||
import numpy as np
|
||||
|
@ -14,12 +14,6 @@ See [notebook](../test_path_transforms.ipynb) for examples
|
|||
|
||||
RenderablePosition = Tuple[float,float]
|
||||
|
||||
class CoordinateSpace(IntEnum):
|
||||
CAMERA = 1
|
||||
UNDISTORTED_CAMERA = 2
|
||||
WORLD = 3
|
||||
LASER = 4
|
||||
|
||||
@dataclass
|
||||
class SrgbaColor():
|
||||
red: float
|
||||
|
@ -61,12 +55,12 @@ class SimplifyMethod(Enum):
|
|||
class RenderableLine():
|
||||
points: List[RenderablePoint]
|
||||
|
||||
def as_simplified(self, method: SimplifyMethod = SimplifyMethod.RDP, factor = SIMPLIFY_FACTOR_RDP):
|
||||
def as_simplified(self, method: SimplifyMethod = SimplifyMethod.RDP):
|
||||
linestring = [p.position for p in self.points]
|
||||
if method == SimplifyMethod.RDP:
|
||||
indexes = simplify_coords_idx(linestring, factor)
|
||||
indexes = simplify_coords_idx(linestring, SIMPLIFY_FACTOR_RDP)
|
||||
elif method == SimplifyMethod.VW:
|
||||
indexes = simplify_coords_vw_idx(linestring, factor)
|
||||
indexes = simplify_coords_vw_idx(linestring, SIMPLIFY_FACTOR_VW)
|
||||
points = [self.points[i] for i in indexes]
|
||||
return RenderableLine(points)
|
||||
|
||||
|
@ -74,12 +68,11 @@ class RenderableLine():
|
|||
@dataclass
|
||||
class RenderableLines():
|
||||
lines: List[RenderableLine]
|
||||
space: CoordinateSpace = CoordinateSpace.WORLD
|
||||
|
||||
def as_simplified(self, method: SimplifyMethod = SimplifyMethod.RDP, factor = SIMPLIFY_FACTOR_RDP):
|
||||
"""Wraps RenderableLine simplification, smaller factor is more detailed"""
|
||||
def as_simplified(self, method: SimplifyMethod = SimplifyMethod.RDP):
|
||||
"""Wraps RenderableLine simplification"""
|
||||
return RenderableLines(
|
||||
[line.as_simplified(method, factor) for line in self.lines]
|
||||
[line.as_simplified(method) for line in self.lines]
|
||||
)
|
||||
|
||||
def append(self, rl: RenderableLine):
|
||||
|
@ -88,9 +81,6 @@ class RenderableLines():
|
|||
def append_lines(self, rls: RenderableLines):
|
||||
self.lines.extend(rls.lines)
|
||||
|
||||
def point_count(self):
|
||||
return sum([len(l.points) for l in self.lines])
|
||||
|
||||
# def merge(self, rl: RenderableLines):
|
||||
|
||||
|
||||
|
@ -101,7 +91,7 @@ def circle_arc(cx, cy, r, t, l, c: SrgbaColor):
|
|||
for l*2pi, offset by t. Both t and l are 0<= [t,l] <= 1
|
||||
"""
|
||||
|
||||
resolution = 30
|
||||
resolution = 40
|
||||
steps = int(resolution * l)
|
||||
offset = int(resolution * t)
|
||||
pointlist: list[RenderablePoint] = []
|
||||
|
@ -113,23 +103,3 @@ def circle_arc(cx, cy, r, t, l, c: SrgbaColor):
|
|||
|
||||
|
||||
return RenderableLine(pointlist)
|
||||
|
||||
def cross_points(cx, cy, r, c: SrgbaColor):
|
||||
# r = 100
|
||||
steps = 3
|
||||
pointlist: list[RenderablePoint] = []
|
||||
for i in range(steps):
|
||||
x = int(cx)
|
||||
y = int(cy + r - i * 2 * r/steps)
|
||||
pos = (x, y)
|
||||
pointlist.append(RenderablePoint(pos, c))
|
||||
path = RenderableLine(pointlist)
|
||||
pointlist: list[RenderablePoint] = []
|
||||
for i in range(steps):
|
||||
y = int(cy)
|
||||
x = int(cx + r - i * 2 * r/steps)
|
||||
pos = (x, y)
|
||||
pointlist.append(RenderablePoint(pos, c))
|
||||
path2 = RenderableLine(pointlist)
|
||||
|
||||
return [path, path2]
|
|
@ -1,65 +0,0 @@
|
|||
|
||||
from argparse import ArgumentParser
|
||||
import time
|
||||
from trap.counter import CounterListerner
|
||||
from trap.node import Node
|
||||
|
||||
|
||||
class Monitor(Node):
|
||||
"""
|
||||
Render a stage, on which different TrackScenarios take place to a
|
||||
single image of lines. Which can be passed to different renderers
|
||||
E.g. the laser or image renderers.
|
||||
"""
|
||||
|
||||
FPS = 1
|
||||
|
||||
def setup(self):
|
||||
# self.scenarios: List[DrawnScenario] = []
|
||||
self.counter_listener = CounterListerner()
|
||||
|
||||
def run(self):
|
||||
prev_time = time.perf_counter()
|
||||
while self.is_running.is_set():
|
||||
# self.tick() # don't polute it with own data
|
||||
|
||||
self.counter_listener.snapshot()
|
||||
stats = self.counter_listener.to_string()
|
||||
if len(stats):
|
||||
self.logger.info(stats)
|
||||
# else:
|
||||
# self.logger.info("no stats")
|
||||
|
||||
# for i, (k, v) in enumerate(self.counter_listener.get_latest().items()):
|
||||
# print(k,v)
|
||||
# cv2.putText(img, f"{k} {v.value()}", (20,img.shape[0]-(40*i)-40), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
|
||||
|
||||
# 3) calculate latency for desired FPS
|
||||
now = time.perf_counter()
|
||||
time_diff = (now - prev_time)
|
||||
if time_diff < 1/self.FPS:
|
||||
# print(f"sleep {1/self.FPS - time_diff}")
|
||||
time.sleep(1/self.FPS - time_diff)
|
||||
now += 1/self.FPS - time_diff
|
||||
|
||||
prev_time = now
|
||||
|
||||
|
||||
@classmethod
|
||||
def arg_parser(cls) -> ArgumentParser:
|
||||
argparser = ArgumentParser()
|
||||
# argparser.add_argument('--zmq-trajectory-addr',
|
||||
# help='Manually specity communication addr for the trajectory messages',
|
||||
# type=str,
|
||||
# default="ipc:///tmp/feeds_traj")
|
||||
# argparser.add_argument('--zmq-prediction-addr',
|
||||
# help='Manually specity communication addr for the prediction messages',
|
||||
# type=str,
|
||||
# default="ipc:///tmp/feeds_preds")
|
||||
# argparser.add_argument('--zmq-stage-addr',
|
||||
# help='Manually specity communication addr for the stage messages (the rendered lines)',
|
||||
# type=str,
|
||||
# default="tcp://0.0.0.0:99174")
|
||||
return argparser
|
||||
|
||||
|
67
trap/node.py
67
trap/node.py
|
@ -1,9 +1,7 @@
|
|||
import logging
|
||||
from logging.handlers import QueueHandler, QueueListener, SocketHandler
|
||||
import multiprocessing
|
||||
from multiprocessing.synchronize import Event as BaseEvent
|
||||
from argparse import ArgumentParser, Namespace
|
||||
import time
|
||||
from typing import Optional
|
||||
|
||||
import zmq
|
||||
|
@ -20,8 +18,6 @@ class Node():
|
|||
self.zmq_context = zmq.Context()
|
||||
self.logger = self._logger()
|
||||
|
||||
self._prev_loop_time = 0
|
||||
|
||||
self.setup()
|
||||
|
||||
@classmethod
|
||||
|
@ -46,50 +42,10 @@ class Node():
|
|||
self.tick()
|
||||
return self.is_running.is_set()
|
||||
|
||||
def run_loop_capped_fps(self, max_fps: float):
|
||||
"""Use in run(), to check if it should keep looping
|
||||
Takes care of tick()'ing the iterations/second counter
|
||||
"""
|
||||
|
||||
now = time.perf_counter()
|
||||
time_diff = (now - self._prev_loop_time)
|
||||
if time_diff < 1/max_fps:
|
||||
# print(f"sleep {1/max_fps - time_diff}")
|
||||
time.sleep(1/max_fps - time_diff)
|
||||
now += 1/max_fps - time_diff
|
||||
self._prev_loop_time = now
|
||||
|
||||
return self.run_loop()
|
||||
|
||||
@classmethod
|
||||
def arg_parser(cls) -> ArgumentParser:
|
||||
raise RuntimeError("Not implemented arg_parser()")
|
||||
|
||||
@classmethod
|
||||
def _get_arg_parser(cls) -> ArgumentParser:
|
||||
parser = cls.arg_parser()
|
||||
# add some defaults
|
||||
parser.add_argument(
|
||||
'--verbose',
|
||||
'-v',
|
||||
help="Increase verbosity. Add multiple times to increase further.",
|
||||
action='count', default=0
|
||||
)
|
||||
parser.add_argument(
|
||||
'--remote-log-addr',
|
||||
help="Connect to a remote logger like cutelog. Specify the ip",
|
||||
type=str,
|
||||
default="100.72.38.82"
|
||||
)
|
||||
parser.add_argument(
|
||||
'--remote-log-port',
|
||||
help="Connect to a remote logger like cutelog. Specify the port",
|
||||
type=int,
|
||||
default=19996
|
||||
)
|
||||
return parser
|
||||
|
||||
|
||||
def sub(self, addr: str):
|
||||
"Default zmq sub configuration"
|
||||
sock = self.zmq_context.socket(zmq.SUB)
|
||||
|
@ -113,9 +69,7 @@ class Node():
|
|||
|
||||
@classmethod
|
||||
def parse_and_start(cls):
|
||||
"""To start the node from CLI/supervisor"""
|
||||
config = cls._get_arg_parser().parse_args()
|
||||
setup_logging(config) # running from cli, we need to setup logging
|
||||
config = cls.arg_parser().parse_args()
|
||||
is_running = multiprocessing.Event()
|
||||
is_running.set()
|
||||
statsender = CounterSender()
|
||||
|
@ -123,22 +77,3 @@ class Node():
|
|||
# timer_counter = Timer(cls.__name__)
|
||||
|
||||
cls.start(config, is_running, counter)
|
||||
|
||||
|
||||
def setup_logging(config: Namespace):
|
||||
loglevel = logging.NOTSET if config.verbose > 1 else logging.DEBUG if config.verbose > 0 else logging.INFO
|
||||
stream_handler = logging.StreamHandler()
|
||||
log_handlers = [stream_handler]
|
||||
|
||||
if config.remote_log_addr:
|
||||
logging.captureWarnings(True)
|
||||
# root_logger.setLevel(logging.NOTSET) # to send all records to cutelog
|
||||
socket_handler = SocketHandler(config.remote_log_addr, config.remote_log_port)
|
||||
print(socket_handler.host, socket_handler.port)
|
||||
socket_handler.setLevel(logging.NOTSET)
|
||||
log_handlers.append(socket_handler)
|
||||
|
||||
logging.basicConfig(
|
||||
level=loglevel,
|
||||
handlers=log_handlers # [queue_handler]
|
||||
)
|
|
@ -300,7 +300,7 @@ class FrameWriter:
|
|||
"""
|
||||
def __init__(self, filename: str, fps: float, frame_size: Optional[tuple] = None) -> None:
|
||||
self.filename = filename
|
||||
self._fps = fps
|
||||
self.fps = fps
|
||||
self.frame_size = frame_size
|
||||
|
||||
self.tmp_dir = tempfile.TemporaryDirectory(prefix="trap-output-")
|
||||
|
|
298
trap/stage.py
298
trap/stage.py
|
@ -24,20 +24,20 @@ from trap import shapes
|
|||
from trap.base import Camera, DataclassJSONEncoder, DistortedCamera, Frame, ProjectedTrack, Track
|
||||
from trap.counter import CounterSender
|
||||
from trap.laser_renderer import circle_points, rotateMatrix
|
||||
from trap.lines import RenderableLine, RenderableLines, RenderablePoint, RenderablePosition, SimplifyMethod, SrgbaColor, circle_arc
|
||||
from trap.lines import RenderableLine, RenderableLines, RenderablePoint, RenderablePosition, SrgbaColor, circle_arc
|
||||
from trap.node import Node
|
||||
from trap.timer import Timer
|
||||
from trap.utils import exponentialDecay, exponentialDecayRounded, relativePointToPolar, relativePolarToPoint
|
||||
|
||||
from noise import snoise2
|
||||
|
||||
logger = logging.getLogger('trap.stage')
|
||||
|
||||
Coordinate = Tuple[float, float]
|
||||
DeltaT = float # delta_t in seconds
|
||||
|
||||
OPTION_GROW_ANOMALY_CIRCLE = False
|
||||
OPTION_RENDER_DIFF_SEGMENT = True
|
||||
# current_fraction = line_locate_point(new_line_string, Point(old_ls.coords[-1]), normalized=True)
|
||||
# new_fraction = current_fraction + stepsize
|
||||
# grown_string = shapely.ops.substring(new_line_string, 0, new_fraction, normalized=True)
|
||||
|
||||
class LineGenerator(ABC):
|
||||
@abstractmethod
|
||||
|
@ -60,10 +60,6 @@ class AppendableLine(LineGenerator):
|
|||
self.ready = len(self.points) == 0
|
||||
self.draw_decay_speed = draw_decay_speed
|
||||
|
||||
def nr_of_passed_points(self):
|
||||
"""The number of points passed in the animation"""
|
||||
return len(self._drawn_points) - 1
|
||||
|
||||
def update_drawn_positions(self, dt: DeltaT):
|
||||
if len(self.points) == 0:
|
||||
# nothing to draw yet
|
||||
|
@ -94,8 +90,6 @@ class AppendableLine(LineGenerator):
|
|||
self._drawn_points[-1] = (float(x), float(y))
|
||||
|
||||
class ProceduralChain(LineGenerator):
|
||||
"""A line that can be 'dragged' to a target. In which
|
||||
it disappears."""
|
||||
MOVE_DECAY_SPEED = 80 # speed at which the drawing head should approach the next point
|
||||
VELOCITY_DAMPING = 10
|
||||
VELOCITY_FACTOR = 2
|
||||
|
@ -163,15 +157,7 @@ class ProceduralChain(LineGenerator):
|
|||
|
||||
|
||||
class DiffSegment():
|
||||
"""
|
||||
A segment of a prediction track, that can be diffed
|
||||
with a track. The track is continously update.
|
||||
If a new prediction comes in, the diff is marked as
|
||||
finished. After which it is animated and added to the
|
||||
Scenario's anomaly score.
|
||||
"""
|
||||
DRAW_DECAY_SPEED = 25
|
||||
POINT_INTERVAL = 4
|
||||
|
||||
def __init__(self, prediction: ProjectedTrack):
|
||||
self.ptrack = prediction
|
||||
|
@ -186,12 +172,6 @@ class DiffSegment():
|
|||
def finish(self):
|
||||
self.finished = True
|
||||
|
||||
def nr_of_passed_points(self):
|
||||
if isinstance(self.line, AppendableLine):
|
||||
return self.line.nr_of_passed_points() * self.POINT_INTERVAL
|
||||
else:
|
||||
return len(self.points) * self.POINT_INTERVAL
|
||||
|
||||
# run on each track update received
|
||||
def update_track(self, track: ProjectedTrack):
|
||||
self._target_track = track
|
||||
|
@ -218,7 +198,7 @@ class DiffSegment():
|
|||
line = []
|
||||
for i, (p1, p2) in enumerate(zip(trajectory_range, prediction_range)):
|
||||
offset_from_start = (pred_diff_steps_forward + i)
|
||||
if offset_from_start % self.POINT_INTERVAL == 0:
|
||||
if offset_from_start % 4 == 0:
|
||||
self.line.points.extend([p1, p2])
|
||||
self.points.extend([p1, p2])
|
||||
|
||||
|
@ -236,6 +216,33 @@ class DiffSegment():
|
|||
if isinstance(self.line, ProceduralChain):
|
||||
self.line.target = self._target_track.projected_history[-1]
|
||||
|
||||
# if len(self.points) == 0:
|
||||
# # nothing to draw yet
|
||||
# return
|
||||
|
||||
# # self._drawn_points = self.points
|
||||
|
||||
# if len(self._drawn_points) == 0:
|
||||
# # create origin
|
||||
# self._drawn_points.append(self.points[0])
|
||||
# # and drawing head
|
||||
# self._drawn_points.append(self.points[0])
|
||||
|
||||
# idx = len(self._drawn_points) - 1
|
||||
# target = self.points[idx]
|
||||
|
||||
# if np.isclose(self._drawn_points[-1], target, atol=.05).all():
|
||||
# # TODO: might want to migrate to np.isclose()
|
||||
# if len(self._drawn_points) == len(self.points):
|
||||
# self.ready = True
|
||||
# return # done until a new point is added
|
||||
# # add new point as drawing head
|
||||
# self._drawn_points.append(self._drawn_points[-1])
|
||||
# self.ready = False
|
||||
|
||||
# x = exponentialDecayRounded(self._drawn_points[-1][0], target[0], self.DRAW_DECAY_SPEED, dt, .05)
|
||||
# y = exponentialDecayRounded(self._drawn_points[-1][1], target[1], self.DRAW_DECAY_SPEED, dt, .05)
|
||||
# self._drawn_points[-1] = (float(x), float(y))
|
||||
|
||||
# if not self.finished or not self.line.ready:
|
||||
self.line.update_drawn_positions(dt)
|
||||
|
@ -256,69 +263,6 @@ class DiffSegment():
|
|||
return RenderableLines([])
|
||||
|
||||
|
||||
class DiffSegmentScan(DiffSegment):
|
||||
"""
|
||||
Provide alternative diffing, in the form of a sort of scan line
|
||||
Should be faster with the laser
|
||||
TODO: This is work in progress, does not work yet!
|
||||
"""
|
||||
|
||||
def __init__(self, prediction: ProjectedTrack):
|
||||
self.ptrack = prediction
|
||||
self._target_track = prediction
|
||||
self.finished = False
|
||||
self._last_diff_frame_idx = 0
|
||||
|
||||
def finish(self):
|
||||
self.finished = True
|
||||
|
||||
def prediction_offset(self):
|
||||
"""Difference is starting moment between track and prediction"""
|
||||
return self.ptrack.frame_index - self._target_track.frame_index
|
||||
|
||||
def nr_of_passed_points(self):
|
||||
"""Number of points of the given ptrack that have passed"""
|
||||
return len(self._target_track.projected_history) - 1 - self.prediction_offset()
|
||||
# len(self.points) * self.POINT_INTERVAL
|
||||
|
||||
# run on each track update received
|
||||
def update_track(self, track: ProjectedTrack):
|
||||
self._target_track = track
|
||||
|
||||
if self.finished:
|
||||
# don't add new points if finished
|
||||
return
|
||||
|
||||
start_frame_idx = max(self.ptrack.frame_index, self._last_diff_frame_idx)
|
||||
traj_diff_steps_back = track.frame_index - start_frame_idx # positive value
|
||||
pred_diff_steps_forward = start_frame_idx - self.ptrack.frame_index # positive value
|
||||
self._last_diff_frame_idx = track.frame_index
|
||||
|
||||
# run each render tick
|
||||
def update_drawn_positions(self, dt: DeltaT, scenario: DrawnScenario):
|
||||
# if not self.finished or not self.line.ready:
|
||||
# self.line.update_drawn_positions(dt)
|
||||
pass # TODO: use easing
|
||||
|
||||
|
||||
|
||||
def as_renderable(self) -> RenderableLines:
|
||||
if self.finished:
|
||||
return RenderableLines([])
|
||||
color = SrgbaColor(0,0,1,1)
|
||||
# steps_diff = self.nr_of_passed_points()
|
||||
idx = self.nr_of_passed_points()
|
||||
if len(self.ptrack.predictions[0]) < idx+1:
|
||||
self.finish()
|
||||
return RenderableLines([])
|
||||
points = [self._target_track.projected_history[-1], self.ptrack.predictions[0][idx]]
|
||||
|
||||
points = [RenderablePoint(pos, color) for pos in points]
|
||||
line = RenderableLine(points)
|
||||
|
||||
return RenderableLines([line])
|
||||
|
||||
|
||||
class ScenarioScene(Enum):
|
||||
DETECTED = 1
|
||||
FIRST_PREDICTION = 2
|
||||
|
@ -334,8 +278,8 @@ PREDICTION_FADE_SLOPE: float = -10
|
|||
PREDICTION_FADE_AFTER_DURATION: float = 10 # seconds
|
||||
PREDICTION_END_FADE = 2 #frames
|
||||
# TRACK_MAX_POINTS = 100
|
||||
TRACK_FADE_AFTER_DURATION = 8. # seconds
|
||||
TRACK_END_FADE = 30 # points
|
||||
TRACK_FADE_AFTER_DURATION = 10. # seconds
|
||||
TRACK_END_FADE = 50 # points
|
||||
TRACK_FADE_ASSUME_FPS = 12
|
||||
|
||||
# Don't render the first n points of the prediction,
|
||||
|
@ -359,7 +303,6 @@ class TrackScenario(StateMachine):
|
|||
|
||||
receive_prediction = detected.to(first_prediction) | substantial.to(first_prediction) | first_prediction.to(corrected_prediction, cond="prediction_is_stale") | corrected_prediction.to(play, cond="prediction_is_playing")
|
||||
|
||||
|
||||
def __init__(self):
|
||||
self.track: ProjectedTrack = None
|
||||
self.camera: Optional[Camera] = None
|
||||
|
@ -392,7 +335,7 @@ class TrackScenario(StateMachine):
|
|||
return False
|
||||
|
||||
def check_lost(self):
|
||||
if self.current_state is not self.lost and self.track and self.track.updated_at < time.time() - 5:
|
||||
if self.current_state is not self.lost and self.track and self.track.created_at < time.time() - 5:
|
||||
self.mark_lost()
|
||||
|
||||
def set_track(self, track: ProjectedTrack):
|
||||
|
@ -435,17 +378,12 @@ class TrackScenario(StateMachine):
|
|||
# just drop tracks if the predictions come to quick
|
||||
return
|
||||
|
||||
if track._track.predictions is None or not len(track._track.predictions):
|
||||
# don't count to predictions if no prediction is set of given track (e.g. young tracks)
|
||||
return
|
||||
|
||||
|
||||
self.predictions.append(track)
|
||||
if len(self.prediction_diffs):
|
||||
self.prediction_diffs[-1].finish() # existing diffing can end
|
||||
# and create a new one
|
||||
self.prediction_diffs.append(DiffSegment(track))
|
||||
# self.prediction_diffs.append(DiffSegmentScan(track))
|
||||
|
||||
# check to change state
|
||||
try:
|
||||
|
@ -520,8 +458,8 @@ class DrawnScenario(TrackScenario):
|
|||
"""
|
||||
|
||||
ANOMALY_DECAY = .2 # speed with which the cirlce shrinks over time
|
||||
DISTANCE_ANOMALY_FACTOR = .03 # the ammount to which the difference counts to the anomaly score
|
||||
MAX_HISTORY = 100 # points of history of trajectory to display (preventing too long lines)
|
||||
DISTANCE_ANOMALY_FACTOR = .05 # the ammount to which the difference counts to the anomaly score
|
||||
MAX_HISTORY = 80 # points of history of trajectory to display (preventing too long lines)
|
||||
CUT_GAP = 5 # when adding a new prediction, keep the existing prediction until that point + this CUT_GAP margin
|
||||
|
||||
def __init__(self):
|
||||
|
@ -529,7 +467,6 @@ class DrawnScenario(TrackScenario):
|
|||
# self.track_id = track_id
|
||||
self.last_update_t = time.perf_counter()
|
||||
|
||||
self.drawn_position: Optional[Coordinate] = None
|
||||
self.drawn_positions: List[Coordinate] = []
|
||||
self.drawn_pred_history: List[Coordinate] = []
|
||||
self.drawn_predictions: List[List[Coordinate]] = []
|
||||
|
@ -582,39 +519,29 @@ class DrawnScenario(TrackScenario):
|
|||
# 1. track history, direct update
|
||||
|
||||
# positions = self._track.get_projected_history(None, self.camera)[-MAX_HISTORY:]
|
||||
# self.drawn_positions = self.track.projected_history[-self.MAX_HISTORY:]
|
||||
self.drawn_positions = self.track.projected_history
|
||||
if self.drawn_position is None:
|
||||
self.drawn_position = self.drawn_positions[-1]
|
||||
else:
|
||||
self.drawn_position[0] = exponentialDecay(self.drawn_position[0], self.drawn_positions[-1][0], 3, dt)
|
||||
self.drawn_position[1] = exponentialDecay(self.drawn_position[1], self.drawn_positions[-1][1], 3, dt)
|
||||
self.drawn_positions = self.track.projected_history[-self.MAX_HISTORY:]
|
||||
|
||||
# 3. predictions
|
||||
if len(self.drawn_predictions) < len(self.predictions):
|
||||
# first prediction
|
||||
if len(self.drawn_predictions) == 0:
|
||||
last_pred = self.predictions[-1]
|
||||
self.drawn_predictions.append(last_pred.predictions[0])
|
||||
self.drawn_predictions.append(self.predictions[-1].predictions[0])
|
||||
else:
|
||||
# if a new prediction has arised, transition from existing one.
|
||||
# First, cut existing prediction
|
||||
# CUT_GAP indicates that some is lost in the transition, to prevent glitches when velocity of person changes
|
||||
# cut existing prediction
|
||||
end_step = self.predictions[-1].frame_index - self.predictions[-2].frame_index + self.CUT_GAP
|
||||
# print(end_step)
|
||||
keep = self.drawn_predictions[-1][end_step:]
|
||||
last_item: Coordinate = (keep)[-1]
|
||||
self.drawn_predictions[-1] = self.drawn_predictions[-1][:end_step] # cut the old part
|
||||
last_item: Coordinate = keep[-1]
|
||||
self.drawn_predictions[-1] = self.drawn_predictions[-1][:end_step]
|
||||
# print(self.predictions[-1].frame_index, self.predictions[-2].frame_index, end_step, len(keep))
|
||||
# duplicate last item, so the new one has the same nr. of points as the incoming prediction (so it can actually transition)
|
||||
ext = [last_item] * (len(self.predictions[-1].predictions[0]) - len(keep))
|
||||
# print(ext)
|
||||
keep.extend(ext)
|
||||
self.drawn_predictions.append(keep)
|
||||
|
||||
for a, drawn_prediction in enumerate(self.drawn_predictions):
|
||||
# origin = self.predictions[a].predictions[0][0]
|
||||
origin = self.predictions[a].predictions[0][0]
|
||||
# associated_diff = self.prediction_diffs[a]
|
||||
# progress = associated_diff.nr_of_passed_points()
|
||||
for i, pos in enumerate(drawn_prediction):
|
||||
# TODO: this should be done in polar space starting from origin (i.e. self.drawn_posision[-1])
|
||||
decay = max(3, (18/i) if i else 10) # points further away move with more delay
|
||||
|
@ -691,7 +618,7 @@ class DrawnScenario(TrackScenario):
|
|||
|
||||
def to_renderable_lines(self) -> RenderableLines:
|
||||
t = time.time()
|
||||
track_age = t - self.track.updated_at # Should be beginning
|
||||
track_age = t - self.track.created_at
|
||||
lines = RenderableLines([])
|
||||
|
||||
|
||||
|
@ -700,44 +627,18 @@ class DrawnScenario(TrackScenario):
|
|||
# track_max_points = TRACK_FADE_AFTER_DURATION * TRACK_FADE_ASSUME_FPS - track_age_in_frames
|
||||
|
||||
# 1. Trajectory history
|
||||
# drawable_points, alphas = self.drawn_positions[:self.MAX_HISTORY], [1]*len(self.drawn_positions)
|
||||
|
||||
# perlin/simplex noise
|
||||
# dt: change speed. Divide to make slower
|
||||
# amp: amplitude of noise
|
||||
# frequency: make smaller to make longer waves
|
||||
noisy_points = apply_perlin_noise_to_line_normal(self.drawn_positions, t/5, .3, .02)
|
||||
drawable_points, alphas = points_fade_out_alpha_mask(noisy_points, track_age, TRACK_FADE_AFTER_DURATION, TRACK_END_FADE)
|
||||
color = SrgbaColor(1.,0.,1.,1.-self.lost_factor())
|
||||
|
||||
# TODO: effect configuration
|
||||
|
||||
|
||||
drawable_points, alphas = points_fade_out_alpha_mask(self.drawn_positions, track_age, TRACK_FADE_AFTER_DURATION, TRACK_END_FADE)
|
||||
color = SrgbaColor(1.,0.,0.,1.-self.lost_factor())
|
||||
points = [RenderablePoint(pos, color.as_faded(a)) for pos, a in zip(drawable_points, alphas)]
|
||||
# points = [RenderablePoint(pos, color.as_faded(a)) for pos, a in zip(drawable_points, alphas)]
|
||||
|
||||
lines.append(RenderableLine(points))
|
||||
|
||||
# 2. Position Marker / anomaly score
|
||||
|
||||
anomaly_marker_color = SrgbaColor(0.,0.,1, 1.-self.lost_factor()) # fadeout
|
||||
# lines.append(circle_arc(self.drawn_positions[-1][0], self.drawn_positions[-1][1], 1, t, self.anomaly_score, anomaly_marker_color))
|
||||
# last point, (but this draws line in circle, requiring a 'jump back' for the laser)
|
||||
cx, cy = self.drawn_positions[-1][0], self.drawn_positions[-1][1],
|
||||
|
||||
radius = max(.1, self._drawn_anomaly_score * 1.) if OPTION_GROW_ANOMALY_CIRCLE else .1
|
||||
|
||||
steps=5
|
||||
if len(self.drawn_positions) >= steps:
|
||||
dx, dy = self.drawn_positions[-1][0] - self.drawn_positions[-steps][0], self.drawn_positions[-1][1] - self.drawn_positions[-steps][1],
|
||||
diff = np.array([dx,dy])
|
||||
diff = diff/np.linalg.norm(diff) * radius * 1.1
|
||||
cx += diff[0]
|
||||
cy += diff[1]
|
||||
|
||||
lines.append(circle_arc(
|
||||
cx, cy,
|
||||
radius,
|
||||
self.drawn_positions[-1][0], self.drawn_positions[-1][1],
|
||||
max(.1, self._drawn_anomaly_score * 1.),
|
||||
0, 1,
|
||||
anomaly_marker_color)
|
||||
)
|
||||
|
@ -748,11 +649,7 @@ class DrawnScenario(TrackScenario):
|
|||
prediction_track_age = time.time() - self.predictions[0].created_at
|
||||
t_factor = prediction_track_age / PREDICTION_FADE_IN
|
||||
# positions = [RenderablePosition.from_list(pos) for pos in self.drawn_positions]
|
||||
for a, drawn_prediction in enumerate(self.drawn_predictions):
|
||||
|
||||
|
||||
associated_diff = self.prediction_diffs[a]
|
||||
progress = associated_diff.nr_of_passed_points()
|
||||
for drawn_prediction in self.drawn_predictions:
|
||||
|
||||
# drawn_prediction, alphas1 = points_fade_out_alpha_mask(drawn_prediction, prediction_track_age, TRACK_FADE_AFTER_DURATION, TRACK_END_FADE, no_frame_max=True)
|
||||
|
||||
|
@ -771,7 +668,6 @@ class DrawnScenario(TrackScenario):
|
|||
|
||||
# points = [RenderablePoint(pos, pos_color) for pos, pos_color in zip(drawn_prediction[PREDICTION_OFFSET:], colors[PREDICTION_OFFSET:])]
|
||||
points = [RenderablePoint(pos, pos_color) for pos, pos_color in zip(drawn_prediction, colors)]
|
||||
points = points[progress//2:]
|
||||
lines.append(RenderableLine(points))
|
||||
|
||||
# 4. Diffs
|
||||
|
@ -780,8 +676,6 @@ class DrawnScenario(TrackScenario):
|
|||
# colors = [color.as_faded(1) for a2 in range(len(drawn_diff))]
|
||||
# points = [RenderablePoint(pos, pos_color) for pos, pos_color in zip(drawn_diff, colors)]
|
||||
# lines.append(RenderableLine(points))
|
||||
|
||||
if OPTION_RENDER_DIFF_SEGMENT:
|
||||
for diff in self.prediction_diffs:
|
||||
lines.append_lines(diff.as_renderable())
|
||||
|
||||
|
@ -970,12 +864,10 @@ class Stage(Node):
|
|||
# rl = RenderableLines(lines)
|
||||
# with open('/tmp/lines.pcl', 'wb') as fp:
|
||||
# pickle.dump(rl, fp)
|
||||
rl = lines.as_simplified(SimplifyMethod.RDP, .003) # or segmentise (see shapely)
|
||||
rl = lines.as_simplified() # or segmentise (see shapely)
|
||||
self.counter.set("stage.lines", len(lines.lines))
|
||||
self.counter.set("stage.points_orig", lines.point_count())
|
||||
self.counter.set("stage.points", rl.point_count())
|
||||
# print(rl.__dict__)
|
||||
self.stage_sock.send_json(obj=rl, cls=DataclassJSONEncoder)
|
||||
self.stage_sock.send_json(rl, cls=DataclassJSONEncoder)
|
||||
|
||||
# print(json.dumps(rl, cls=DataclassJSONEncoder))
|
||||
|
||||
|
@ -997,89 +889,3 @@ class Stage(Node):
|
|||
return argparser
|
||||
|
||||
|
||||
|
||||
|
||||
# TODO place somewhere else:
|
||||
# Gemma3:27b prompt: "python. Given a list of coordinates, that describes a line: `drawable_points: List[Tuple[float,float]]` apply perlin noise over the normal of the line, that changes over time `dt`."
|
||||
def apply_perlin_noise_to_line_normal(drawable_points: np.ndarray, dt: float, amplitude: float = 1.0, frequency: float = 1.0) -> np.ndarray:
|
||||
"""
|
||||
Applies Perlin noise to the normals of a line described by a list of coordinates, changing over time.
|
||||
|
||||
Args:
|
||||
drawable_points: A list of (x, y) tuples representing the points of the line.
|
||||
dt: The time delta, used to animate the Perlin noise.
|
||||
amplitude: The strength of the Perlin noise effect.
|
||||
frequency: The frequency of the Perlin noise (how many waves per unit).
|
||||
|
||||
Returns:
|
||||
A new list of (x, y) tuples representing the line with Perlin noise applied to the normals. If drawable_points
|
||||
has fewer than 2 points, it returns the original list unchanged.
|
||||
|
||||
Raises:
|
||||
TypeError: If drawable_points is not a list or dt is not a float.
|
||||
ValueError: If the input points are not tuples of length 2.
|
||||
"""
|
||||
|
||||
# if not isinstance(drawable_points, list):
|
||||
# print(drawable_points, type(drawable_points))
|
||||
# raise TypeError("drawable_points must be a list.")
|
||||
if not isinstance(dt, float):
|
||||
raise TypeError("dt must be a float.")
|
||||
|
||||
if len(drawable_points) < 2:
|
||||
return drawable_points # Nothing to do with fewer than 2 points
|
||||
|
||||
# for point in drawable_points:
|
||||
# if not isinstance(point, tuple) or len(point) != 2:
|
||||
# raise ValueError("Each point in drawable_points must be a tuple of length 2.")
|
||||
|
||||
|
||||
# noise = PerlinNoise(octaves=4) # You can adjust octaves for different noise patterns
|
||||
|
||||
new_points = []
|
||||
for i in range(len(drawable_points)):
|
||||
x, y = drawable_points[i]
|
||||
|
||||
# Calculate the normal vector. We'll approximate it using the previous and next points.
|
||||
if i == 0:
|
||||
# For the first point, use the next point to estimate the normal
|
||||
next_x, next_y = drawable_points[i + 1]
|
||||
normal_x = next_y - y
|
||||
normal_y = -(next_x - x)
|
||||
elif i == len(drawable_points) - 1:
|
||||
# For the last point, use the previous point
|
||||
prev_x, prev_y = drawable_points[i - 1]
|
||||
normal_x = y - prev_y
|
||||
normal_y = -(x - prev_x)
|
||||
else:
|
||||
prev_x, prev_y = drawable_points[i - 1]
|
||||
next_x, next_y = drawable_points[i + 1]
|
||||
normal_x = next_y - prev_y
|
||||
normal_y = -(next_x - prev_x)
|
||||
|
||||
# Normalize the normal vector
|
||||
norm = np.sqrt(normal_x**2 + normal_y**2)
|
||||
if norm > 0:
|
||||
normal_x /= norm
|
||||
normal_y /= norm
|
||||
|
||||
# Apply Perlin noise to the normal
|
||||
# noise_x = noise([x * frequency, (y + dt) * frequency]) * amplitude * normal_x
|
||||
# noise_y = noise([x * frequency, (y + dt) * frequency]) * amplitude * normal_y
|
||||
noise = snoise2(i * frequency, dt % 1000, octaves=4)
|
||||
|
||||
noise_x = noise * amplitude * normal_x
|
||||
noise_y = noise * amplitude * normal_y
|
||||
|
||||
# print(noise_x, noise_y, dt, frequency, i, dt, snoise2(i * frequency, dt % 1000, octaves=4))
|
||||
|
||||
|
||||
# Add the noise to the point's coordinates
|
||||
new_x = x + noise_x
|
||||
new_y = y + noise_y
|
||||
|
||||
new_points.append((new_x, new_y))
|
||||
|
||||
# print(drawable_points, new_points)
|
||||
|
||||
return np.array(new_points)
|
|
@ -16,7 +16,7 @@ from trap.preview_renderer import DrawnTrack
|
|||
import trap.tracker
|
||||
from trap.config import parser
|
||||
from trap.frame_emitter import Camera, Detection, DetectionState, video_src_from_config, Frame
|
||||
from trap.tracker import DETECTOR_YOLOv8, FinalDisplacementFilter, Smoother, TrackReader, _ultralytics_track, Track, TrainingDataWriter, Tracker, read_tracks_json
|
||||
from trap.tracker import DETECTOR_YOLOv8, FinalDisplacementFilter, Smoother, TrackReader, _yolov8_track, Track, TrainingDataWriter, Tracker, read_tracks_json
|
||||
from collections import defaultdict
|
||||
|
||||
import logging
|
||||
|
@ -461,12 +461,9 @@ def draw_track_projected(img: cv2.Mat, track: Track, color_index: int, camera: C
|
|||
for j in range(len(history)-1):
|
||||
# a = history[j]
|
||||
b = history[j+1]
|
||||
detection = track.history[j+1]
|
||||
|
||||
color = point_color if detection.state == DetectionState.Confirmed else (100,100,100)
|
||||
|
||||
# cv2.line(img, to_point(a), to_point(b), point_color, 1)
|
||||
cv2.circle(img, to_point(b), 3, color, 2)
|
||||
cv2.circle(img, to_point(b), 3, point_color, 2)
|
||||
|
||||
|
||||
def draw_track(img: cv2.Mat, track: Track, color_index: int):
|
||||
|
|
|
@ -28,14 +28,12 @@ from torchvision.models.detection import (FasterRCNN_ResNet50_FPN_V2_Weights,
|
|||
keypointrcnn_resnet50_fpn,
|
||||
maskrcnn_resnet50_fpn_v2)
|
||||
from tsmoothie.smoother import ConvolutionSmoother, KalmanSmoother
|
||||
from ultralytics import YOLO, RTDETR
|
||||
from ultralytics.engine.model import Model as UltralyticsModel
|
||||
from ultralytics.engine.results import Results as UltralyticsResult
|
||||
from ultralytics import YOLO
|
||||
from ultralytics.engine.results import Results as YOLOResult
|
||||
|
||||
from trap import timer
|
||||
from trap.frame_emitter import (Camera, DataclassJSONEncoder, Detection,
|
||||
DetectionState, Frame, Track)
|
||||
from trap.gemma import ImgMovementFilter
|
||||
from trap.node import Node
|
||||
|
||||
# Detection = [int, int, int, int, float, int]
|
||||
|
@ -53,12 +51,11 @@ DETECTOR_RETINANET = 'retinanet'
|
|||
DETECTOR_MASKRCNN = 'maskrcnn'
|
||||
DETECTOR_FASTERRCNN = 'fasterrcnn'
|
||||
DETECTOR_YOLOv8 = 'ultralytics'
|
||||
DETECTOR_RTDETR = 'rtdetr'
|
||||
|
||||
TRACKER_DEEPSORT = 'deepsort'
|
||||
TRACKER_BYTETRACK = 'bytetrack'
|
||||
|
||||
DETECTORS = [DETECTOR_RETINANET, DETECTOR_MASKRCNN, DETECTOR_FASTERRCNN, DETECTOR_YOLOv8, DETECTOR_RTDETR]
|
||||
DETECTORS = [DETECTOR_RETINANET, DETECTOR_MASKRCNN, DETECTOR_FASTERRCNN, DETECTOR_YOLOv8]
|
||||
TRACKERS =[TRACKER_DEEPSORT, TRACKER_BYTETRACK]
|
||||
|
||||
TRACKER_CONFIDENCE_MINIMUM = .2
|
||||
|
@ -66,9 +63,9 @@ TRACKER_BYTETRACK_MINIMUM = .1 # bytetrack can track items iwth lower thershold
|
|||
NON_MAXIMUM_SUPRESSION = 1
|
||||
RCNN_SCALE = .4 # seems to have no impact on detections in the corners
|
||||
|
||||
def _ultralytics_track(img: cv2.Mat, frame_idx: int, model: UltralyticsModel, **kwargs) -> List[Detection]:
|
||||
def _yolov8_track(frame: Frame, model: YOLO, **kwargs) -> List[Detection]:
|
||||
|
||||
results: List[UltralyticsResult] = list(model.track(img, persist=True, tracker="custom_bytetrack.yaml", verbose=False, conf=0.000001, **kwargs))
|
||||
results: List[YOLOResult] = list(model.track(frame.img, persist=True, tracker="custom_bytetrack.yaml", verbose=False, conf=0.00001, **kwargs))
|
||||
|
||||
if results[0].boxes is None or results[0].boxes.id is None:
|
||||
# work around https://github.com/ultralytics/ultralytics/issues/5968
|
||||
|
@ -77,7 +74,7 @@ def _ultralytics_track(img: cv2.Mat, frame_idx: int, model: UltralyticsModel, **
|
|||
boxes = results[0].boxes.xywh.cpu()
|
||||
track_ids = results[0].boxes.id.int().cpu().tolist()
|
||||
classes = results[0].boxes.cls.int().cpu().tolist()
|
||||
return [Detection(track_id, bbox[0]-.5*bbox[2], bbox[1]-.5*bbox[3], bbox[2], bbox[3], 1, DetectionState.Confirmed, frame_idx, class_id) for bbox, track_id, class_id in zip(boxes, track_ids, classes)]
|
||||
return [Detection(track_id, bbox[0]-.5*bbox[2], bbox[1]-.5*bbox[3], bbox[2], bbox[3], 1, DetectionState.Confirmed, frame.index, class_id) for bbox, track_id, class_id in zip(boxes, track_ids, classes)]
|
||||
|
||||
class Multifile():
|
||||
def __init__(self, srcs: List[Path]):
|
||||
|
@ -398,8 +395,6 @@ class Tracker(Node):
|
|||
# # TODO: config device
|
||||
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
|
||||
self.frame_preprocess = ImgMovementFilter()
|
||||
|
||||
# TODO: support removal
|
||||
self.tracks: DefaultDict[str, Track] = defaultdict(lambda: Track())
|
||||
|
||||
|
@ -441,15 +436,7 @@ class Tracker(Node):
|
|||
self.mot_tracker = TrackerWrapper.init_type(self.config.tracker)
|
||||
elif self.config.detector == DETECTOR_YOLOv8:
|
||||
# self.model = YOLO('EXPERIMENTS/yolov8x.pt')
|
||||
# best from arsen:
|
||||
# self.model = YOLO('./tracker/all_yolo11-2-20-15-41/weights')
|
||||
# self.model = YOLO('models/yolo11x-pose.pt')
|
||||
# self.model = YOLO("models/yolo12l.pt")
|
||||
self.model = YOLO("models/yolo12x.pt")
|
||||
# NOTE: changing the model, also tweak imgsz in
|
||||
elif self.config.detector == DETECTOR_RTDETR:
|
||||
# self.model = RTDETR('models/rtdetr-x.pt') # drops frames
|
||||
self.model = RTDETR('models/rtdetr-l.pt') # somewhat less good in corners, but less frame dropping == better tracking
|
||||
self.model = YOLO('yolo11x.pt')
|
||||
else:
|
||||
raise RuntimeError(f"{self.config.detector} is not implemented yet. See --help")
|
||||
|
||||
|
@ -468,22 +455,14 @@ class Tracker(Node):
|
|||
|
||||
self.frame_sock = self.sub(self.config.zmq_frame_addr)
|
||||
self.trajectory_socket = self.pub(self.config.zmq_trajectory_addr)
|
||||
self.detection_socket = self.pub(self.config.zmq_detection_addr)
|
||||
|
||||
logger.debug("Set up tracker")
|
||||
|
||||
def track_frame(self, frame: Frame):
|
||||
det_img = frame.img
|
||||
# det_img = self.frame_preprocess.apply(frame.img)
|
||||
|
||||
if self.config.detector in [DETECTOR_YOLOv8, DETECTOR_RTDETR]:
|
||||
# both ultralytics
|
||||
detections: List[Detection] = _ultralytics_track(det_img, frame.index, self.model, classes=[0, 15, 16], imgsz=self.config.imgsz)
|
||||
if self.config.detector == DETECTOR_YOLOv8:
|
||||
detections: List[Detection] = _yolov8_track(frame, self.model, classes=[0, 15, 16], imgsz=[1152, 640])
|
||||
else :
|
||||
detections: List[Detection] = self._resnet_track(det_img, frame.index, scale = RCNN_SCALE)
|
||||
|
||||
# emit raw detections
|
||||
self.detection_socket.send_pyobj(detections)
|
||||
detections: List[Detection] = self._resnet_track(frame, scale = RCNN_SCALE)
|
||||
|
||||
for detection in detections:
|
||||
track = self.tracks[detection.track_id]
|
||||
|
@ -497,6 +476,7 @@ class Tracker(Node):
|
|||
|
||||
return detections
|
||||
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Live tracking of frames coming in over zmq
|
||||
|
@ -631,12 +611,13 @@ class Tracker(Node):
|
|||
logger.info('Stopping')
|
||||
|
||||
|
||||
def _resnet_track(self, img: cv2.Mat, frame_idx: int, scale: float = 1) -> List[Detection]:
|
||||
def _resnet_track(self, frame: Frame, scale: float = 1) -> List[Detection]:
|
||||
img = frame.img
|
||||
if scale != 1:
|
||||
dsize = (int(img.shape[1] * scale), int(img.shape[0] * scale))
|
||||
img = cv2.resize(img, dsize)
|
||||
detections = self._resnet_detect_persons(img)
|
||||
tracks: List[Detection] = self.mot_tracker.track_detections(detections, img, frame_idx)
|
||||
tracks: List[Detection] = self.mot_tracker.track_detections(detections, img, frame.index)
|
||||
# active_tracks = [t for t in tracks if t.is_confirmed()]
|
||||
return [d.get_scaled(1/scale) for d in tracks]
|
||||
|
||||
|
@ -699,11 +680,6 @@ class Tracker(Node):
|
|||
type=str,
|
||||
default="ipc:///tmp/feeds_traj")
|
||||
|
||||
argparser.add_argument('--zmq-detection-addr',
|
||||
help='Manually specity communication addr for the detection messages',
|
||||
type=str,
|
||||
default="ipc:///tmp/feeds_dets")
|
||||
|
||||
argparser.add_argument("--save-for-training",
|
||||
help="Specify the path in which to save",
|
||||
type=Path,
|
||||
|
@ -721,10 +697,6 @@ class Tracker(Node):
|
|||
argparser.add_argument("--smooth-tracks",
|
||||
help="Smooth the tracker tracks before sending them to the predictor",
|
||||
action='store_true')
|
||||
argparser.add_argument("--imgsz",
|
||||
help="Detector imgsz parameter (applicable to ultralytics detectors)",
|
||||
type=int,
|
||||
default=960)
|
||||
return argparser
|
||||
|
||||
|
||||
|
|
|
@ -108,6 +108,8 @@ class GigE(VideoSource):
|
|||
|
||||
return img[tl[1]:br[1],tl[0]:br[0],:]
|
||||
|
||||
|
||||
|
||||
class SingleCvVideoSource(VideoSource):
|
||||
def recv(self):
|
||||
while True:
|
||||
|
@ -124,10 +126,7 @@ class SingleCvVideoSource(VideoSource):
|
|||
|
||||
class RtspSource(SingleCvVideoSource):
|
||||
def __init__(self, video_url: str | Path, camera: Camera = None):
|
||||
# keep max 1 frame in app-buffer (0 = unlimited)
|
||||
# When using gstreamer 1.28 drop=true is deprecated, use: leaky-type=2 which frame to drop: https://gstreamer.freedesktop.org/documentation/applib/gstappsrc.html?gi-language=c
|
||||
|
||||
gst = f"rtspsrc location={video_url} latency=0 buffer-mode=auto ! decodebin ! videoconvert ! appsink max-buffers=1 drop=true"
|
||||
gst = f"rtspsrc location={video_url} latency=0 buffer-mode=auto ! decodebin ! videoconvert ! appsink max-buffers=0 drop=true"
|
||||
logger.info(f"Capture gstreamer (gst-launch-1.0): {gst}")
|
||||
self.video = cv2.VideoCapture(gst, cv2.CAP_GSTREAMER)
|
||||
self.frame_idx = 0
|
||||
|
@ -212,7 +211,7 @@ class CameraSource(SingleCvVideoSource):
|
|||
self.video.set(cv2.CAP_PROP_FPS, self.camera.fps)
|
||||
self.frame_idx = 0
|
||||
|
||||
def get_video_source(video_sources: List[UrlOrPath], camera: Optional[Camera] = None, frame_offset=0, frame_end:Optional[int]=None, loop=False):
|
||||
def get_video_source(video_sources: List[UrlOrPath], camera: Camera, frame_offset=0, frame_end:Optional[int]=None, loop=False):
|
||||
|
||||
if str(video_sources[0]).isdigit():
|
||||
# numeric input is a CV camera
|
||||
|
@ -233,7 +232,3 @@ def get_video_source(video_sources: List[UrlOrPath], camera: Optional[Camera] =
|
|||
return FilelistSource(video_sources, offset = frame_offset, end=frame_end, loop=loop)
|
||||
# os.environ["OPENCV_FFMPEG_CAPTURE_OPTIONS"] = "fflags;nobuffer|flags;low_delay|avioflags;direct|rtsp_transport;udp"
|
||||
|
||||
|
||||
def get_video_source_from_str(video_sources: List[str]):
|
||||
paths = [UrlOrPath(s) for s in video_sources]
|
||||
return get_video_source(paths)
|
66
uv.lock
66
uv.lock
|
@ -1300,12 +1300,6 @@ wheels = [
|
|||
{ url = "https://files.pythonhosted.org/packages/2e/5e/cb3dbdf3ae18e281b8b1b4691bb5d3465b383e04bde2c2a782c893f1ee21/nicegui-2.13.0-py3-none-any.whl", hash = "sha256:2343d37885df2c2e388a4f4c3f0ce9b308be02e16b0303108471a1a38fe3508f", size = 16482500 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "noise"
|
||||
version = "1.2.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/18/29/bb830ee6d934311e17a7a4fa1368faf3e73fbb09c0d80fc44e41828df177/noise-1.2.2.tar.gz", hash = "sha256:57a2797436574391ff63a111e852e53a4164ecd81ad23639641743cd1a209b65", size = 125615 }
|
||||
|
||||
[[package]]
|
||||
name = "notebook"
|
||||
version = "7.3.3"
|
||||
|
@ -1830,11 +1824,12 @@ wheels = [
|
|||
|
||||
[[package]]
|
||||
name = "pywin32"
|
||||
version = "306"
|
||||
version = "310"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/08/dc/28c668097edfaf4eac4617ef7adf081b9cf50d254672fcf399a70f5efc41/pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d", size = 8506422 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d3/d6/891894edec688e72c2e308b3243fad98b4066e1839fd2fe78f04129a9d31/pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8", size = 9226392 },
|
||||
{ url = "https://files.pythonhosted.org/packages/95/da/a5f38fffbba2fb99aa4aa905480ac4b8e83ca486659ac8c95bce47fb5276/pywin32-310-cp310-cp310-win32.whl", hash = "sha256:6dd97011efc8bf51d6793a82292419eba2c71cf8e7250cfac03bba284454abc1", size = 8848240 },
|
||||
{ url = "https://files.pythonhosted.org/packages/aa/fe/d873a773324fa565619ba555a82c9dabd677301720f3660a731a5d07e49a/pywin32-310-cp310-cp310-win_amd64.whl", hash = "sha256:c3e78706e4229b915a0821941a84e7ef420bf2b77e08c9dae3c76fd03fd2ae3d", size = 9601854 },
|
||||
{ url = "https://files.pythonhosted.org/packages/3c/84/1a8e3d7a15490d28a5d816efa229ecb4999cdc51a7c30dd8914f669093b8/pywin32-310-cp310-cp310-win_arm64.whl", hash = "sha256:33babed0cf0c92a6f94cc6cc13546ab24ee13e3e800e61ed87609ab91e4c8213", size = 8522963 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -2196,20 +2191,6 @@ wheels = [
|
|||
{ url = "https://files.pythonhosted.org/packages/a0/4b/528ccf7a982216885a1ff4908e886b8fb5f19862d1962f56a3fce2435a70/starlette-0.46.1-py3-none-any.whl", hash = "sha256:77c74ed9d2720138b25875133f3a2dae6d854af2ec37dceb56aef370c1d8a227", size = 71995 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "superfsmon"
|
||||
version = "1.2.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "supervisor", marker = "sys_platform != 'win32'" },
|
||||
{ name = "supervisor-win", marker = "sys_platform == 'win32'" },
|
||||
{ name = "watchdog" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/e9/c2/269264babce3c29f5721cdb7c79ab4930562b67786bb6e5cc838e36e3530/superfsmon-1.2.3.tar.gz", hash = "sha256:fe5918872dc258eacff98cd054b28b73531f9897f72f8583fb2bbd448fc33928", size = 5186 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/bd/c5/d8fbf5c3901db69f7b1e25708fc865570712264026d06f75c5d535ec4ab1/superfsmon-1.2.3-py3-none-any.whl", hash = "sha256:da798e2a2c260fa633213df9f2f26d504fe234f78886e5f62ae4d81f0130bdf7", size = 4738 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "supervisor"
|
||||
version = "4.2.5"
|
||||
|
@ -2222,18 +2203,6 @@ wheels = [
|
|||
{ url = "https://files.pythonhosted.org/packages/2c/7a/0ad3973941590c040475046fef37a2b08a76691e61aa59540828ee235a6e/supervisor-4.2.5-py2.py3-none-any.whl", hash = "sha256:2ecaede32fc25af814696374b79e42644ecaba5c09494c51016ffda9602d0f08", size = 319561 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "supervisor-win"
|
||||
version = "4.7.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "pywin32", marker = "sys_platform == 'win32'" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/98/48/5d6cd1b7492bf2c11452fd638de45519d2c103caed70c5bdb4ecebbac568/supervisor-win-4.7.0.tar.gz", hash = "sha256:c474d92edc7050b55adae2f7c7789d5d69f180dee7868a27673b1d38f8bea484", size = 397342 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/69/4d/3a493f15f5b80608857ef157f382ace494f51d9031e6bee6082437dd1403/supervisor_win-4.7.0-py2.py3-none-any.whl", hash = "sha256:bd98554c2a0878704c3f3fd95e38965d9986eae6a2ad29f34d73d0aee138a481", size = 303996 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tensorboard"
|
||||
version = "2.19.0"
|
||||
|
@ -2528,7 +2497,6 @@ dependencies = [
|
|||
{ name = "gdown" },
|
||||
{ name = "ipywidgets" },
|
||||
{ name = "jsonlines" },
|
||||
{ name = "noise" },
|
||||
{ name = "opencv-python" },
|
||||
{ name = "pandas-helper-calc" },
|
||||
{ name = "pyglet" },
|
||||
|
@ -2539,7 +2507,6 @@ dependencies = [
|
|||
{ name = "setproctitle" },
|
||||
{ name = "shapely" },
|
||||
{ name = "simplification" },
|
||||
{ name = "superfsmon" },
|
||||
{ name = "supervisor" },
|
||||
{ name = "tensorboardx" },
|
||||
{ name = "torch", version = "1.12.1", source = { registry = "https://pypi.org/simple" }, marker = "sys_platform != 'linux'" },
|
||||
|
@ -2563,7 +2530,6 @@ requires-dist = [
|
|||
{ name = "gdown", specifier = ">=4.7.1,<5" },
|
||||
{ name = "ipywidgets", specifier = ">=8.1.5,<9" },
|
||||
{ name = "jsonlines", specifier = ">=4.0.0,<5" },
|
||||
{ name = "noise", specifier = ">=1.2.2" },
|
||||
{ name = "opencv-python", path = "opencv_python-4.10.0.84-cp310-cp310-linux_x86_64.whl" },
|
||||
{ name = "pandas-helper-calc", git = "https://github.com/scls19fr/pandas-helper-calc" },
|
||||
{ name = "pyglet", specifier = ">=2.0.15,<3" },
|
||||
|
@ -2574,7 +2540,6 @@ requires-dist = [
|
|||
{ name = "setproctitle", specifier = ">=1.3.3,<2" },
|
||||
{ name = "shapely", specifier = ">=2.1" },
|
||||
{ name = "simplification", specifier = ">=0.7.12" },
|
||||
{ name = "superfsmon", specifier = ">=1.2.3" },
|
||||
{ name = "supervisor", specifier = ">=4.2.5" },
|
||||
{ name = "tensorboardx", specifier = ">=2.6.2.2,<3" },
|
||||
{ name = "torch", marker = "python_full_version < '3.10' or python_full_version >= '4' or sys_platform != 'linux'", specifier = "==1.12.1" },
|
||||
|
@ -2739,29 +2704,6 @@ wheels = [
|
|||
{ url = "https://files.pythonhosted.org/packages/a6/3d/7b22abbdb059d551507275a2815bc2b1974e3b9f6a13781c1eac9e858965/vbuild-0.8.2-py2.py3-none-any.whl", hash = "sha256:d76bcc976a1c53b6a5776ac947606f9e7786c25df33a587ebe33ed09dd8a1076", size = 9371 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "watchdog"
|
||||
version = "6.0.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/db/7d/7f3d619e951c88ed75c6037b246ddcf2d322812ee8ea189be89511721d54/watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282", size = 131220 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/0c/56/90994d789c61df619bfc5ce2ecdabd5eeff564e1eb47512bd01b5e019569/watchdog-6.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d1cdb490583ebd691c012b3d6dae011000fe42edb7a82ece80965b42abd61f26", size = 96390 },
|
||||
{ url = "https://files.pythonhosted.org/packages/55/46/9a67ee697342ddf3c6daa97e3a587a56d6c4052f881ed926a849fcf7371c/watchdog-6.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bc64ab3bdb6a04d69d4023b29422170b74681784ffb9463ed4870cf2f3e66112", size = 88389 },
|
||||
{ url = "https://files.pythonhosted.org/packages/44/65/91b0985747c52064d8701e1075eb96f8c40a79df889e59a399453adfb882/watchdog-6.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c897ac1b55c5a1461e16dae288d22bb2e412ba9807df8397a635d88f671d36c3", size = 89020 },
|
||||
{ url = "https://files.pythonhosted.org/packages/30/ad/d17b5d42e28a8b91f8ed01cb949da092827afb9995d4559fd448d0472763/watchdog-6.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c7ac31a19f4545dd92fc25d200694098f42c9a8e391bc00bdd362c5736dbf881", size = 87902 },
|
||||
{ url = "https://files.pythonhosted.org/packages/5c/ca/c3649991d140ff6ab67bfc85ab42b165ead119c9e12211e08089d763ece5/watchdog-6.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9513f27a1a582d9808cf21a07dae516f0fab1cf2d7683a742c498b93eedabb11", size = 88380 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a9/c7/ca4bf3e518cb57a686b2feb4f55a1892fd9a3dd13f470fca14e00f80ea36/watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13", size = 79079 },
|
||||
{ url = "https://files.pythonhosted.org/packages/5c/51/d46dc9332f9a647593c947b4b88e2381c8dfc0942d15b8edc0310fa4abb1/watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379", size = 79078 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d4/57/04edbf5e169cd318d5f07b4766fee38e825d64b6913ca157ca32d1a42267/watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e", size = 79076 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ab/cc/da8422b300e13cb187d2203f20b9253e91058aaf7db65b74142013478e66/watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f", size = 79077 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2c/3b/b8964e04ae1a025c44ba8e4291f86e97fac443bca31de8bd98d3263d2fcf/watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26", size = 79078 },
|
||||
{ url = "https://files.pythonhosted.org/packages/62/ae/a696eb424bedff7407801c257d4b1afda455fe40821a2be430e173660e81/watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c", size = 79077 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b5/e8/dbf020b4d98251a9860752a094d09a65e1b436ad181faf929983f697048f/watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2", size = 79078 },
|
||||
{ url = "https://files.pythonhosted.org/packages/07/f6/d0e5b343768e8bcb4cda79f0f2f55051bf26177ecd5651f84c07567461cf/watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a", size = 79065 },
|
||||
{ url = "https://files.pythonhosted.org/packages/db/d9/c495884c6e548fce18a8f40568ff120bc3a4b7b99813081c8ac0c936fa64/watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680", size = 79070 },
|
||||
{ url = "https://files.pythonhosted.org/packages/33/e8/e40370e6d74ddba47f002a32919d91310d6074130fe4e17dabcafc15cbf1/watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f", size = 79067 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "watchfiles"
|
||||
version = "1.0.4"
|
||||
|
|
Loading…
Reference in a new issue