Compare commits
15 commits
betterprot
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8dcf70959b | ||
|
|
bd7f0cf2f3 | ||
|
|
509ad16733 | ||
|
|
a387cae62c | ||
|
|
3b9a7c033f | ||
|
|
3a73806a52 | ||
|
|
858bb91244 | ||
|
|
218419368a | ||
|
|
472eebf9a0 | ||
|
|
c4ef961a78 | ||
|
|
f925f1e31a | ||
|
|
fcaa24a860 | ||
|
|
e09c9d44e8 | ||
|
|
acdb869430 | ||
|
|
c6747ac8e6 |
27 changed files with 5271 additions and 519 deletions
|
|
@ -2,12 +2,12 @@
|
||||||
"batch_size": 512,
|
"batch_size": 512,
|
||||||
"grad_clip": 1.0,
|
"grad_clip": 1.0,
|
||||||
"learning_rate_style": "exp",
|
"learning_rate_style": "exp",
|
||||||
"learning_rate": 0.01,
|
"learning_rate": 0.001,
|
||||||
"min_learning_rate": 1e-05,
|
"min_learning_rate": 1e-05,
|
||||||
"learning_decay_rate": 0.9999,
|
"learning_decay_rate": 0.9999,
|
||||||
"prediction_horizon": 30,
|
"prediction_horizon": 60,
|
||||||
"minimum_history_length": 5,
|
"minimum_history_length": 5,
|
||||||
"maximum_history_length": 50,
|
"maximum_history_length": 150,
|
||||||
"map_encoder": {
|
"map_encoder": {
|
||||||
"PEDESTRIAN": {
|
"PEDESTRIAN": {
|
||||||
"heading_state_index": [2, 3],
|
"heading_state_index": [2, 3],
|
||||||
|
|
|
||||||
18
README.md
18
README.md
|
|
@ -14,12 +14,24 @@ These are roughly the steps to go from datagathering to training
|
||||||
1. Make sure to have some recordings with a fixed camera. [UPDATE: not needed anymore, except for calibration & homography footage]
|
1. Make sure to have some recordings with a fixed camera. [UPDATE: not needed anymore, except for calibration & homography footage]
|
||||||
* Recording can be done with `ffmpeg -rtsp_transport udp -i rtsp://USER:PASS@IP:554/Streaming/Channels/1.mp4 hof2-cam-$(date "+%Y%m%d-%H%M").mp4`
|
* Recording can be done with `ffmpeg -rtsp_transport udp -i rtsp://USER:PASS@IP:554/Streaming/Channels/1.mp4 hof2-cam-$(date "+%Y%m%d-%H%M").mp4`
|
||||||
2. Follow the steps in the auxilary [traptools](https://git.rubenvandeven.com/security_vision/traptools) repository to obtain (1) camera matrix, lens distortion, image dimensions, and (2+3) homography
|
2. Follow the steps in the auxilary [traptools](https://git.rubenvandeven.com/security_vision/traptools) repository to obtain (1) camera matrix, lens distortion, image dimensions, and (2+3) homography
|
||||||
3. Run the tracker, e.g. `uv run tracker --detector ultralytics --homography ../DATASETS/NAME/homography.json --video-src ../DATASETS/NAME/*.mp4 --calibration ../DATASETS/NAME/calibration.json --save-for-training EXPERIMENTS/raw/NAME/`
|
3. Track lidar or video data:
|
||||||
* Note: You can run this right of the camera stream: `uv run tracker --eval_device cuda:0 --detector ultralytics --video-src rtsp://USER:PW@ADDRESS/STREAM --homography ../DATASETS/NAME/homography.json --calibration ../DATASETS/NAME/calibration.json --save-for-training EXPERIMENTS/raw/NAME/`, each recording adding a new file to the `raw` folder.
|
1. Video: Run the video source & video tracker nodes:
|
||||||
4. Parse tracker data to Trajectron format: `uv run process_data --src-dir EXPERIMENTS/raw/NAME --dst-dir EXPERIMENTS/trajectron-data/ --name NAME` Optionally, smooth tracks: `--smooth-tracks`
|
* `uv run trap_video_source --homography ../DATASETS/hof4-test-angle/homography.json --video-src gige://../DATASETS/hof4-test-angle/gige_config.json --calibration ../DATASETS/hof4-test-angle/calibration.json` (Optionally, use recorded video with `--video-src videos/render-source-2025-10-19T21\:09.mp4 --video-offset 300`)
|
||||||
|
* `uv run trap_tracker --smooth-tracks --eval_device cuda:0 --detector ultralytics`
|
||||||
|
2. Lidar: `uv run trap_lidar --min-box-area 0 --pi LOCAL_IP --smooth-tracks`
|
||||||
|
4. Save the tracks emitted by the video or lidar tracker: `uv run trap_track_writer --output-dir EXPERIMENTS/raw/hof-lidar`
|
||||||
|
* Each recording adds a new txt file to the `raw` folder.
|
||||||
|
4. Parse tracker data to Trajectron format: `uv run process_data --src-dir EXPERIMENTS/raw/NAME --dst-dir EXPERIMENTS/trajectron-data/ --name NAME`
|
||||||
|
* Optionally, smooth tracks: `--smooth-tracks`
|
||||||
|
* Optionally, and variations with noise: `--noise-tracks 2` (creates 2 variations)
|
||||||
|
* Optionally, and variations with at a random offset: `--offset-tracks 2` (creates 2 variations)
|
||||||
* Optionally, add a map: ideally a RGB png: 3 layers of 0-255
|
* Optionally, add a map: ideally a RGB png: 3 layers of 0-255
|
||||||
* `uv run process_data --src-dir EXPERIMENTS/raw/NAME --dst-dir EXPERIMENTS/trajectron-data/ --name NAME --smooth-tracks --camera-fps 12 --homography ../DATASETS/NAME/homography.json --calibration ../DATASETS/NAME/calibration.json --filter-displacement 2 --map-img-path ../DATASETS/NAME/map.png`
|
* `uv run process_data --src-dir EXPERIMENTS/raw/NAME --dst-dir EXPERIMENTS/trajectron-data/ --name NAME --smooth-tracks --camera-fps 12 --homography ../DATASETS/NAME/homography.json --calibration ../DATASETS/NAME/calibration.json --filter-displacement 2 --map-img-path ../DATASETS/NAME/map.png`
|
||||||
|
* See [[tests/trajectron_maps.ipynb]] for more info how to do so (e.g. the homography map/scale settings, which are also set in process_data)
|
||||||
|
|
||||||
5. Train Trajectron model `uv run trajectron_train --eval_every 10 --vis_every 1 --train_data_dict NAME_train.pkl --eval_data_dict NAME_val.pkl --offline_scene_graph no --preprocess_workers 8 --log_dir EXPERIMENTS/models --log_tag _NAME --train_epochs 100 --conf EXPERIMENTS/config.json --batch_size 256 --data_dir EXPERIMENTS/trajectron-data `
|
5. Train Trajectron model `uv run trajectron_train --eval_every 10 --vis_every 1 --train_data_dict NAME_train.pkl --eval_data_dict NAME_val.pkl --offline_scene_graph no --preprocess_workers 8 --log_dir EXPERIMENTS/models --log_tag _NAME --train_epochs 100 --conf EXPERIMENTS/config.json --batch_size 256 --data_dir EXPERIMENTS/trajectron-data `
|
||||||
|
* For faster training disalble edges:
|
||||||
|
` uv run trajectron_train --eval_every 200 --train_data_dict dortmund-nostep-nosmooth-noise2-offsets1-f2.0-map-2025-11-11_train.pkl --eval_data_dict dortmund-nostep-nosmooth-noise2-offsets1-f2.0-map-2025-11-11_val.pkl --offline_scene_graph no --preprocess_workers 8 --log_dir /home/ruben/suspicion/trap/SETTINGS/2025-11-dortmund/models --log_tag _dortmund-nostep-nosmooth-noise2-offsets1-f2.0-map-2025-11-11 --train_epochs 100 --conf /home/ruben/suspicion/trap/SETTINGS/2025-11-dortmund/trajectron.json --data_dir SETTINGS/2025-11-dortmund/trajectron --map_encoding --no_edge_encoding --dynamic_edges yes --no_edge_encoding --edge_influence_combine_method max --batch_size 512`
|
||||||
6. The run!
|
6. The run!
|
||||||
* `uv run supervisord`
|
* `uv run supervisord`
|
||||||
<!-- * On a video file (you can use a wildcard) `DISPLAY=:1 uv run trapserv --remote-log-addr 100.69.123.91 --eval_device cuda:0 --detector ultralytics --homography ../DATASETS/NAME/homography.json --eval_data_dict EXPERIMENTS/trajectron-data/hof2s-m_test.pkl --video-src ../DATASETS/NAME/*.mp4 --model_dir EXPERIMENTS/models/models_DATE_NAME/--smooth-predictions --smooth-tracks --num-samples 3 --render-window --calibration ../DATASETS/NAME/calibration.json` (the DISPLAY environment variable is used here to running over SSH connection and display on local monitor)
|
<!-- * On a video file (you can use a wildcard) `DISPLAY=:1 uv run trapserv --remote-log-addr 100.69.123.91 --eval_device cuda:0 --detector ultralytics --homography ../DATASETS/NAME/homography.json --eval_data_dict EXPERIMENTS/trajectron-data/hof2s-m_test.pkl --video-src ../DATASETS/NAME/*.mp4 --model_dir EXPERIMENTS/models/models_DATE_NAME/--smooth-predictions --smooth-tracks --num-samples 3 --render-window --calibration ../DATASETS/NAME/calibration.json` (the DISPLAY environment variable is used here to running over SSH connection and display on local monitor)
|
||||||
|
|
|
||||||
130
SETTINGS/2025-11-dortmund/trajectron.json
Normal file
130
SETTINGS/2025-11-dortmund/trajectron.json
Normal file
|
|
@ -0,0 +1,130 @@
|
||||||
|
{
|
||||||
|
"batch_size": 512,
|
||||||
|
"grad_clip": 1.0,
|
||||||
|
"learning_rate_style": "exp",
|
||||||
|
"learning_rate": 0.001,
|
||||||
|
"min_learning_rate": 1e-05,
|
||||||
|
"learning_decay_rate": 0.9999,
|
||||||
|
"prediction_horizon": 60,
|
||||||
|
"minimum_history_length": 5,
|
||||||
|
"maximum_history_length": 150,
|
||||||
|
"map_encoder": {
|
||||||
|
"PEDESTRIAN": {
|
||||||
|
"heading_state_index": [2, 3],
|
||||||
|
"patch_size": [
|
||||||
|
50,
|
||||||
|
10,
|
||||||
|
50,
|
||||||
|
90
|
||||||
|
],
|
||||||
|
"map_channels": 3,
|
||||||
|
"hidden_channels": [
|
||||||
|
10,
|
||||||
|
20,
|
||||||
|
5,
|
||||||
|
1
|
||||||
|
],
|
||||||
|
"output_size": 32,
|
||||||
|
"masks": [
|
||||||
|
5,
|
||||||
|
5,
|
||||||
|
5,
|
||||||
|
5
|
||||||
|
],
|
||||||
|
"strides": [
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1
|
||||||
|
],
|
||||||
|
"dropout": 0.5
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"k": 1,
|
||||||
|
"k_eval": 1,
|
||||||
|
"kl_min": 0.07,
|
||||||
|
"kl_weight": 100.0,
|
||||||
|
"kl_weight_start": 0,
|
||||||
|
"kl_decay_rate": 0.99995,
|
||||||
|
"kl_crossover": 400,
|
||||||
|
"kl_sigmoid_divisor": 4,
|
||||||
|
"rnn_kwargs": {
|
||||||
|
"dropout_keep_prob": 0.75
|
||||||
|
},
|
||||||
|
"MLP_dropout_keep_prob": 0.9,
|
||||||
|
"enc_rnn_dim_edge": 1,
|
||||||
|
"enc_rnn_dim_edge_influence": 1,
|
||||||
|
"enc_rnn_dim_history": 32,
|
||||||
|
"enc_rnn_dim_future": 32,
|
||||||
|
"dec_rnn_dim": 128,
|
||||||
|
"q_z_xy_MLP_dims": null,
|
||||||
|
"p_z_x_MLP_dims": 32,
|
||||||
|
"GMM_components": 1,
|
||||||
|
"log_p_yt_xz_max": 6,
|
||||||
|
"N": 1,
|
||||||
|
"K": 25,
|
||||||
|
"tau_init": 2.0,
|
||||||
|
"tau_final": 0.05,
|
||||||
|
"tau_decay_rate": 0.997,
|
||||||
|
"use_z_logit_clipping": true,
|
||||||
|
"z_logit_clip_start": 0.05,
|
||||||
|
"z_logit_clip_final": 5.0,
|
||||||
|
"z_logit_clip_crossover": 300,
|
||||||
|
"z_logit_clip_divisor": 5,
|
||||||
|
"dynamic": {
|
||||||
|
"PEDESTRIAN": {
|
||||||
|
"name": "SingleIntegrator",
|
||||||
|
"distribution": true,
|
||||||
|
"limits": {}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"state": {
|
||||||
|
"PEDESTRIAN": {
|
||||||
|
"position": [
|
||||||
|
"x",
|
||||||
|
"y"
|
||||||
|
],
|
||||||
|
"velocity": [
|
||||||
|
"x",
|
||||||
|
"y"
|
||||||
|
],
|
||||||
|
"acceleration": [
|
||||||
|
"x",
|
||||||
|
"y"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"pred_state": {
|
||||||
|
"PEDESTRIAN": {
|
||||||
|
"position": [
|
||||||
|
"x",
|
||||||
|
"y"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"log_histograms": false,
|
||||||
|
"dynamic_edges": "yes",
|
||||||
|
"edge_state_combine_method": "sum",
|
||||||
|
"edge_influence_combine_method": "max",
|
||||||
|
"edge_addition_filter": [
|
||||||
|
0.25,
|
||||||
|
0.5,
|
||||||
|
0.75,
|
||||||
|
1.0
|
||||||
|
],
|
||||||
|
"edge_removal_filter": [
|
||||||
|
1.0,
|
||||||
|
0.0
|
||||||
|
],
|
||||||
|
"offline_scene_graph": "yes",
|
||||||
|
"incl_robot_node": false,
|
||||||
|
"node_freq_mult_train": false,
|
||||||
|
"node_freq_mult_eval": false,
|
||||||
|
"scene_freq_mult_train": false,
|
||||||
|
"scene_freq_mult_eval": false,
|
||||||
|
"scene_freq_mult_viz": false,
|
||||||
|
"edge_encoding": false,
|
||||||
|
"use_map_encoding": true,
|
||||||
|
"augment": false,
|
||||||
|
"override_attention_radius": []
|
||||||
|
}
|
||||||
|
|
@ -16,7 +16,7 @@ dependencies = [
|
||||||
"gdown>=4.7.1,<5",
|
"gdown>=4.7.1,<5",
|
||||||
"pandas-helper-calc",
|
"pandas-helper-calc",
|
||||||
"tsmoothie>=1.0.5,<2",
|
"tsmoothie>=1.0.5,<2",
|
||||||
"pyglet>=2.0.15,<3",
|
"pyglet>=2.1.8,<3",
|
||||||
"pyglet-cornerpin>=0.3.0,<0.4",
|
"pyglet-cornerpin>=0.3.0,<0.4",
|
||||||
"opencv-python",
|
"opencv-python",
|
||||||
"setproctitle>=1.3.3,<2",
|
"setproctitle>=1.3.3,<2",
|
||||||
|
|
@ -42,6 +42,7 @@ dependencies = [
|
||||||
"nptyping>=2.5.0",
|
"nptyping>=2.5.0",
|
||||||
"py-to-proto>=0.6.0",
|
"py-to-proto>=0.6.0",
|
||||||
"grpcio-tools>=1.76.0",
|
"grpcio-tools>=1.76.0",
|
||||||
|
"dearpygui>=2.1.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[project.scripts]
|
[project.scripts]
|
||||||
|
|
@ -58,12 +59,15 @@ model_train = "trap.models.train:train"
|
||||||
trap_video_source = "trap.frame_emitter:FrameEmitter.parse_and_start"
|
trap_video_source = "trap.frame_emitter:FrameEmitter.parse_and_start"
|
||||||
trap_video_writer = "trap.frame_writer:FrameWriter.parse_and_start"
|
trap_video_writer = "trap.frame_writer:FrameWriter.parse_and_start"
|
||||||
trap_tracker = "trap.tracker:Tracker.parse_and_start"
|
trap_tracker = "trap.tracker:Tracker.parse_and_start"
|
||||||
trap_lidar = "trap.lidar_tracker:LidarTracker.parse_and_start"
|
trap_track_writer = "trap.track_writer:TrackWriter.parse_and_start"
|
||||||
|
trap_lidar = "trap.lidar_tracker:Lidar.parse_and_start"
|
||||||
trap_stage = "trap.stage:Stage.parse_and_start"
|
trap_stage = "trap.stage:Stage.parse_and_start"
|
||||||
|
trap_render_stage = "trap.stage_renderer:StageRenderer.parse_and_start"
|
||||||
trap_prediction = "trap.prediction_server:PredictionServer.parse_and_start"
|
trap_prediction = "trap.prediction_server:PredictionServer.parse_and_start"
|
||||||
trap_render_cv = "trap.cv_renderer:CvRenderer.parse_and_start"
|
trap_render_cv = "trap.cv_renderer:CvRenderer.parse_and_start"
|
||||||
trap_monitor = "trap.monitor:Monitor.parse_and_start" # migrate timer
|
trap_monitor = "trap.monitor:Monitor.parse_and_start" # migrate timer
|
||||||
trap_laser_calibration = "trap.laser_calibration:LaserCalibration.parse_and_start" # migrate timer
|
trap_laser_calibration = "trap.laser_calibration:LaserCalibration.parse_and_start" # migrate timer
|
||||||
|
trap_settings = "trap.settings:Settings.parse_and_start" # migrate timer
|
||||||
|
|
||||||
[tool.uv]
|
[tool.uv]
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -23,23 +23,45 @@ directory=%(here)s
|
||||||
autostart=false
|
autostart=false
|
||||||
|
|
||||||
[program:video]
|
[program:video]
|
||||||
command=uv run trap_video_source --homography ../DATASETS/hof3/homography.json --video-src ../DATASETS/hof3/hof3-cam-demo-twoperson.mp4 --calibration ../DATASETS/hof3/calibration.json --video-loop
|
# command=uv run trap_video_source --homography ../DATASETS/hof3/homography.json --video-src ../DATASETS/hof3/hof3-cam-demo-twoperson.mp4 --calibration ../DATASETS/hof3/calibration.json --video-loop
|
||||||
# command=uv run trap_video_source --homography ../DATASETS/hof3-cam-baumer-cropped/homography.json --video-src gige://../DATASETS/hof3-cam-baumer-cropped/gige_config.json --calibration ../DATASETS/hof3-cam-baumer-cropped/calibration.json
|
command=uv run trap_video_source --homography ../DATASETS/hof3-cam-baumer-cropped/homography.json --video-src gige://../DATASETS/hof3-cam-baumer-cropped/gige_config.json --calibration ../DATASETS/hof3-cam-baumer-cropped/calibration.json
|
||||||
directory=%(here)s
|
|
||||||
directory=%(here)s
|
directory=%(here)s
|
||||||
|
|
||||||
[program:tracker]
|
[program:tracker]
|
||||||
command=uv run trap_tracker --smooth-tracks
|
command=uv run trap_tracker --smooth-tracks
|
||||||
|
# command=uv run trap_lidar --min-box-area 0 --viz --smooth-tracks
|
||||||
|
# environment=DISPLAY=":0"
|
||||||
directory=%(here)s
|
directory=%(here)s
|
||||||
|
autostart=false
|
||||||
|
|
||||||
|
[program:lidar]
|
||||||
|
command=uv run trap_lidar --min-box-area 0.1 --viz
|
||||||
|
environment=DISPLAY=":0"
|
||||||
|
directory=%(here)s
|
||||||
|
autostart=false
|
||||||
|
|
||||||
|
[program:track_writer]
|
||||||
|
command=uv run trap_track_writer --output-dir EXPERIMENTS/raw/hof-lidar
|
||||||
|
# environment=DISPLAY=":0"
|
||||||
|
directory=%(here)s
|
||||||
|
autostart=false
|
||||||
|
stopwaitsecs=60
|
||||||
|
|
||||||
[program:stage]
|
[program:stage]
|
||||||
# command=uv run trap_stage
|
# command=uv run trap_stage
|
||||||
command=uv run trap_stage --verbose --camera-fps 12 --homography ../DATASETS/hof3/homography.json --calibration ../DATASETS/hof3/calibration.json --cache-path /tmp/history_cache-hof3.pcl --tracker-output-dir EXPERIMENTS/raw/hof3/
|
command=uv run trap_stage --verbose --camera-fps 12 --homography ../DATASETS/hof3/homography.json --calibration ../DATASETS/hof3/calibration.json --cache-path /tmp/history_cache-hof3.pcl --tracker-output-dir EXPERIMENTS/raw/hof3/
|
||||||
directory=%(here)s
|
directory=%(here)s
|
||||||
|
|
||||||
[program:predictor]
|
[program:settings]
|
||||||
command=uv run trap_prediction --eval_device cuda:0 --model_dir EXPERIMENTS/models/models_20241229_21_35_13_hof3-m2-ud-split-conv12-f2.0-map-2024-12-29/ --num-samples 1 --map_encoding --eval_data_dict EXPERIMENTS/trajectron-data/hof3-m2-ud-split-nostep-conv12-f2.0-map-2024-12-29_val.pkl --prediction-horizon 120 --gmm-mode True --z-mode
|
command=uv run trap_settings
|
||||||
|
autostart=true
|
||||||
|
environment=DISPLAY=":0"
|
||||||
|
directory=%(here)s
|
||||||
|
|
||||||
|
[program:predictor]
|
||||||
|
# command=uv run trap_prediction --eval_device cuda:0 --model_dir EXPERIMENTS/models/models_20241229_21_35_13_hof3-m2-ud-split-conv12-f2.0-map-2024-12-29/ --num-samples 1 --map_encoding --eval_data_dict EXPERIMENTS/trajectron-data/hof3-m2-ud-split-nostep-conv12-f2.0-map-2024-12-29_val.pkl --prediction-horizon 120 --gmm-mode True --z-mode
|
||||||
|
command=uv run trap_prediction --eval_device cuda:0 --model_dir SETTINGS/2025-11-dortmund/models/models_20251111_19_06_29_dortmund-nostep-nosmooth-noise2-offsets1-f2.0-map-2025-11-11/ --num-samples 1 --map_encoding --eval_data_dict SETTINGS/2025-11-dortmund/trajectron/dortmund-nostep-nosmooth-noise2-offsets1-f2.0-map-2025-11-12_val.pkl --prediction-horizon 120 --gmm-mode True --z-mode --conf SETTINGS/2025-11-dortmund/trajectron.json
|
||||||
|
# command=uv run trap_prediction --eval_device cuda:0 --model_dir EXPERIMENTS/models/models_20251106_11_51_00_hof-lidar-m2-ud-nostep-kalsmooth-noise2-offsets2-f2.0-map-2025-11-06/ --num-samples 1 --map_encoding --eval_data_dict EXPERIMENTS/trajectron-data/hof-lidar-m2-ud-nostep-kalsmooth-noise2-offsets2-f2.0-map-2025-11-06_val.pkl --prediction-horizon 120 --gmm-mode True --z-mode
|
||||||
# uv run trajectron_train --continue_training_from EXPERIMENTS/models/models_20241229_21_35_13_hof3-m2-ud-split-conv12-f2.0-map-2024-12-29/ --eval_every 5 --train_data_dict hof3-nostep-conv12-f2.0-map-2024-12-27_train.pkl --eval_data_dict hof3-nostep-conv12-f2.0-map-2024-12-27_val.pkl --offline_scene_graph no --preprocess_workers 8 --log_dir EXPERIMENTS/models --log_tag _hof3-conv12-f2.0-map-2024-12-27 --train_epochs 10 --conf EXPERIMENTS/config.json --data_dir EXPERIMENTS/trajectron-data --map_encoding
|
# uv run trajectron_train --continue_training_from EXPERIMENTS/models/models_20241229_21_35_13_hof3-m2-ud-split-conv12-f2.0-map-2024-12-29/ --eval_every 5 --train_data_dict hof3-nostep-conv12-f2.0-map-2024-12-27_train.pkl --eval_data_dict hof3-nostep-conv12-f2.0-map-2024-12-27_val.pkl --offline_scene_graph no --preprocess_workers 8 --log_dir EXPERIMENTS/models --log_tag _hof3-conv12-f2.0-map-2024-12-27 --train_epochs 10 --conf EXPERIMENTS/config.json --data_dir EXPERIMENTS/trajectron-data --map_encoding
|
||||||
directory=%(here)s
|
directory=%(here)s
|
||||||
|
|
||||||
|
|
@ -51,8 +73,25 @@ autostart=false
|
||||||
; can be long to quit if rendering to video file
|
; can be long to quit if rendering to video file
|
||||||
stopwaitsecs=60
|
stopwaitsecs=60
|
||||||
|
|
||||||
|
[program:render_cv]
|
||||||
|
command=uv run trap_render_cv
|
||||||
|
directory=%(here)s
|
||||||
|
environment=DISPLAY=":0"
|
||||||
|
autostart=false
|
||||||
|
; can be long to quit if rendering to video file
|
||||||
|
stopwaitsecs=60
|
||||||
|
|
||||||
|
[program:laserspace]
|
||||||
|
command=cargo run --release tcp://127.0.0.1:99174 ../trap/SETTINGS/2025-11-dortmund/laserspace.json
|
||||||
|
directory=%(here)s/../laserspace
|
||||||
|
environment=DISPLAY=":0"
|
||||||
|
autostart=false
|
||||||
|
; can be long to quit if rendering to video file
|
||||||
|
stopwaitsecs=60
|
||||||
|
|
||||||
|
|
||||||
# during development auto restart some services when the code changes
|
# during development auto restart some services when the code changes
|
||||||
[program:superfsmon]
|
[program:superfsmon]
|
||||||
command=superfsmon trap/stage.py stage
|
command=superfsmon trap/stage.py stage
|
||||||
directory=%(here)s
|
directory=%(here)s
|
||||||
|
autostart=false
|
||||||
1028
tests/stage_lines.ipynb
Normal file
1028
tests/stage_lines.ipynb
Normal file
File diff suppressed because one or more lines are too long
776
tests/track_history.ipynb
Normal file
776
tests/track_history.ipynb
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
181
trap/anomaly.py
Normal file
181
trap/anomaly.py
Normal file
|
|
@ -0,0 +1,181 @@
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from trap.base import ProjectedTrack
|
||||||
|
from trap.lines import AppendableLine, Coordinate, DeltaT, ProceduralChain, RenderableLines, SrgbaColor, StaticLine
|
||||||
|
|
||||||
|
logger = logging.getLogger('anomaly')
|
||||||
|
|
||||||
|
def calc_anomaly(segments: List[DiffSegment], window: int = 3):
|
||||||
|
"""Calculate anomaly score based on provided segments
|
||||||
|
considering a sliding window of the last n items
|
||||||
|
"""
|
||||||
|
|
||||||
|
relevant_segments = segments[-window:]
|
||||||
|
scores = [s.avg_score() for s in relevant_segments]
|
||||||
|
s = list(filter(lambda x: x is not None,scores))
|
||||||
|
|
||||||
|
return np.average(s)
|
||||||
|
|
||||||
|
|
||||||
|
class DiffSegment():
|
||||||
|
"""
|
||||||
|
A segment of a prediction track, that can be diffed
|
||||||
|
with a track. The track is continously updated.
|
||||||
|
If a new prediction comes in, the diff is marked as
|
||||||
|
finished. After which it is animated and added to the
|
||||||
|
Scenario's anomaly score.
|
||||||
|
"""
|
||||||
|
DRAW_DECAY_SPEED = 25
|
||||||
|
POINT_INTERVAL = 4
|
||||||
|
|
||||||
|
def __init__(self, prediction: ProjectedTrack):
|
||||||
|
self.ptrack = prediction
|
||||||
|
self._last_diff_frame_idx: int = 0
|
||||||
|
self.finished = False
|
||||||
|
|
||||||
|
self.line = StaticLine()
|
||||||
|
self.points: List[Coordinate] = []
|
||||||
|
self._drawn_points = []
|
||||||
|
self._target_track = prediction
|
||||||
|
|
||||||
|
self.score = 0
|
||||||
|
|
||||||
|
def finish(self):
|
||||||
|
self.finished = True
|
||||||
|
|
||||||
|
def nr_of_passed_points(self) -> int:
|
||||||
|
if not self._last_diff_frame_idx:
|
||||||
|
return 0
|
||||||
|
return self._last_diff_frame_idx - self.ptrack.frame_index
|
||||||
|
|
||||||
|
# if isinstance(self.line, AppendableLine):
|
||||||
|
# return self.line.nr_of_passed_points() * self.POINT_INTERVAL
|
||||||
|
# else:
|
||||||
|
# return len(self.points) * self.POINT_INTERVAL
|
||||||
|
|
||||||
|
def avg_score(self):
|
||||||
|
frames_passed = self.nr_of_passed_points()
|
||||||
|
if not frames_passed:
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
return self.score/frames_passed
|
||||||
|
|
||||||
|
|
||||||
|
# run on each track update received
|
||||||
|
def update_track(self, track: ProjectedTrack):
|
||||||
|
self._target_track = track
|
||||||
|
|
||||||
|
if self.finished:
|
||||||
|
# don't add new points if finished
|
||||||
|
return
|
||||||
|
|
||||||
|
# migrate SceneraioScene function
|
||||||
|
start_frame_idx = max(self.ptrack.frame_index, self._last_diff_frame_idx)
|
||||||
|
traj_diff_steps_back = track.frame_index - start_frame_idx # positive value
|
||||||
|
pred_diff_steps_forward = start_frame_idx - self.ptrack.frame_index # positive value
|
||||||
|
|
||||||
|
if traj_diff_steps_back < 0 or len(track.history) < traj_diff_steps_back:
|
||||||
|
logger.warning("Track history doesn't reach prediction start. Should not be possible. Skip")
|
||||||
|
# elif len(ptrack.predictions[0]) < pred_diff_steps_back:
|
||||||
|
# logger.warning("Prediction does not reach prediction start. Should not be possible. Skip")
|
||||||
|
else:
|
||||||
|
trajectory = track.projected_history
|
||||||
|
|
||||||
|
# from start to as far as it gets
|
||||||
|
trajectory_range = trajectory[-1*traj_diff_steps_back:]
|
||||||
|
prediction_range = self.ptrack.predictions[0][pred_diff_steps_forward:] # in world coordinate space
|
||||||
|
line = []
|
||||||
|
for i, (p1, p2) in enumerate(zip(trajectory_range, prediction_range)):
|
||||||
|
diff = (p1[0]-p2[0], p1[1]-p2[1])
|
||||||
|
self.score += np.linalg.norm(diff)
|
||||||
|
|
||||||
|
offset_from_start = (pred_diff_steps_forward + i)
|
||||||
|
if offset_from_start % self.POINT_INTERVAL == 0:
|
||||||
|
self.line.extend([p1, p2])
|
||||||
|
self.points.extend([p1, p2])
|
||||||
|
|
||||||
|
self._last_diff_frame_idx = track.frame_index
|
||||||
|
|
||||||
|
|
||||||
|
# # run each render tick
|
||||||
|
# def update_drawn_positions(self, dt: DeltaT):
|
||||||
|
# if isinstance(self.line, AppendableLine):
|
||||||
|
# if self.finished and self.line.ready:
|
||||||
|
# # convert when fully drawn
|
||||||
|
# # print(self, "CONVERT LINE")
|
||||||
|
# self.line = ProceduralChain.from_appendable_line(self.line)
|
||||||
|
|
||||||
|
# if isinstance(self.line, ProceduralChain):
|
||||||
|
# self.line.target = self._target_track.projected_history[-1]
|
||||||
|
|
||||||
|
# # if not self.finished or not self.line.ready:
|
||||||
|
# self.line.update_drawn_positions(dt)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def as_renderable(self) -> RenderableLines:
|
||||||
|
color = SrgbaColor(0,0,1,1)
|
||||||
|
|
||||||
|
# if not self.finished or not self.line.ready:
|
||||||
|
return self.line.as_renderable(color)
|
||||||
|
# return self.line.as_renderable(color)
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_loitering_scores(track: ProjectedTrack, min_duration_to_linger, linger_factor, velocity_threshold, window = None):
|
||||||
|
"""
|
||||||
|
Calculates a loitering score (0-1) for each track.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tracks: A list of tracks, where each track is a list of (frame_id, x, y, width, height).
|
||||||
|
min_duration_to_linger: Minimum number of frames to start considering a segment as lingering.
|
||||||
|
linger_factor: Divide number of lingering frames by 'linger_factor' to get a score 0-1
|
||||||
|
velocity_threshold: Maximum velocity (meters/frame) to consider as lingering.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A generator providing loitering scores for each frame
|
||||||
|
"""
|
||||||
|
|
||||||
|
total_frames = len(track.projected_history)
|
||||||
|
|
||||||
|
if total_frames < 2:
|
||||||
|
return 0.0 # Not enough data
|
||||||
|
|
||||||
|
offset = window * -1 if window is not None else 0
|
||||||
|
|
||||||
|
x_coords = [t[0] for t in track.projected_history[offset:]]
|
||||||
|
y_coords = [t[1] for t in track.projected_history[offset:]]
|
||||||
|
|
||||||
|
# Calculate velocities
|
||||||
|
velocities = np.sqrt(np.diff(x_coords)**2 + np.diff(y_coords)**2)
|
||||||
|
|
||||||
|
# Calculate distances
|
||||||
|
# distances = np.diff(x_coords)
|
||||||
|
# distances_y = np.diff(y_coords)
|
||||||
|
# distances_total = np.sqrt(distances**2 + distances_y**2)
|
||||||
|
|
||||||
|
linger_duration = 0
|
||||||
|
linger_frames = 0
|
||||||
|
|
||||||
|
|
||||||
|
for i in range(len(velocities)):
|
||||||
|
if velocities[i] < velocity_threshold:
|
||||||
|
linger_duration += 1
|
||||||
|
if linger_duration >= min_duration_to_linger:
|
||||||
|
linger_frames +=1
|
||||||
|
else:
|
||||||
|
# decay if moving faster
|
||||||
|
linger_duration = max(linger_duration - 1.5, 0)
|
||||||
|
linger_frames = max(linger_frames - 1.5, 0)
|
||||||
|
|
||||||
|
# Calculate loitering score
|
||||||
|
if total_frames > 0:
|
||||||
|
loitering_score = min(1, max(0, linger_frames / linger_factor))
|
||||||
|
else:
|
||||||
|
loitering_score = 0.0
|
||||||
|
|
||||||
|
yield loitering_score
|
||||||
|
|
||||||
10
trap/base.py
10
trap/base.py
|
|
@ -25,6 +25,7 @@ from bytetracker.basetrack import TrackState as ByteTrackTrackState
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from shapely import Point
|
from shapely import Point
|
||||||
|
|
||||||
|
|
||||||
from trap.utils import get_bins, inv_lerp, lerp
|
from trap.utils import get_bins, inv_lerp, lerp
|
||||||
from trajectron.environment import Environment, Node, Scene
|
from trajectron.environment import Environment, Node, Scene
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
|
@ -73,6 +74,7 @@ class DetectionState(IntFlag):
|
||||||
Confirmed = 2 # after tentative
|
Confirmed = 2 # after tentative
|
||||||
Lost = 4 # lost when DeepsortTrack.time_since_update > 0 but not Deleted
|
Lost = 4 # lost when DeepsortTrack.time_since_update > 0 but not Deleted
|
||||||
Interpolated = 8 # A position estimated through interpolation of adjecent detections
|
Interpolated = 8 # A position estimated through interpolation of adjecent detections
|
||||||
|
# Interpolated = 8 # A position estimated through interpolation of adjecent detections
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_deepsort_track(cls, track: DeepsortTrack):
|
def from_deepsort_track(cls, track: DeepsortTrack):
|
||||||
|
|
@ -88,11 +90,13 @@ class DetectionState(IntFlag):
|
||||||
def from_bytetrack_track(cls, track: ByteTrackTrack):
|
def from_bytetrack_track(cls, track: ByteTrackTrack):
|
||||||
if track.state == ByteTrackTrackState.New:
|
if track.state == ByteTrackTrackState.New:
|
||||||
return cls.Tentative
|
return cls.Tentative
|
||||||
if track.state == ByteTrackTrackState.Lost:
|
if track.state == ByteTrackTrackState.Removed:
|
||||||
return cls.Lost
|
return cls.Lost
|
||||||
# if track.time_since_update > 0:
|
# if track.time_since_update > 0:
|
||||||
if track.state == ByteTrackTrackState.Tracked:
|
if track.state == ByteTrackTrackState.Tracked:
|
||||||
return cls.Confirmed
|
return cls.Confirmed
|
||||||
|
if track.state == ByteTrackTrackState.Lost:
|
||||||
|
return cls.Tentative
|
||||||
raise RuntimeError("Should not run into Deleted entries here")
|
raise RuntimeError("Should not run into Deleted entries here")
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -170,6 +174,8 @@ class DistortedCamera(ABC):
|
||||||
calibdata = json.load(fp)
|
calibdata = json.load(fp)
|
||||||
if 'type' in calibdata and calibdata['type'] == 'fisheye':
|
if 'type' in calibdata and calibdata['type'] == 'fisheye':
|
||||||
camera = FisheyeCamera.from_calibdata(calibdata, H, fps)
|
camera = FisheyeCamera.from_calibdata(calibdata, H, fps)
|
||||||
|
elif 'type' in calibdata and calibdata['type'] == 'undistorted':
|
||||||
|
camera = UndistortedCamera(calibdata['fps'])
|
||||||
else:
|
else:
|
||||||
camera = Camera.from_calibdata(calibdata, H, fps)
|
camera = Camera.from_calibdata(calibdata, H, fps)
|
||||||
|
|
||||||
|
|
@ -759,6 +765,8 @@ class CameraAction(argparse.Action):
|
||||||
data = json.load(fp)
|
data = json.load(fp)
|
||||||
if 'type' in data and data['type'] == 'fisheye':
|
if 'type' in data and data['type'] == 'fisheye':
|
||||||
camera = FisheyeCamera.from_calibfile(Path(values), namespace.H, namespace.camera_fps)
|
camera = FisheyeCamera.from_calibfile(Path(values), namespace.H, namespace.camera_fps)
|
||||||
|
elif 'type' in data and data['type'] == 'undistorted':
|
||||||
|
camera = UndistortedCamera(namespace.camera_fps)
|
||||||
else:
|
else:
|
||||||
camera = Camera.from_calibfile(Path(values), namespace.H, namespace.camera_fps)
|
camera = Camera.from_calibfile(Path(values), namespace.H, namespace.camera_fps)
|
||||||
# # print(data)
|
# # print(data)
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,7 @@ import pyglet
|
||||||
import zmq
|
import zmq
|
||||||
from pyglet import shapes
|
from pyglet import shapes
|
||||||
|
|
||||||
from trap.base import Detection
|
from trap.base import Detection, UndistortedCamera
|
||||||
from trap.counter import CounterListerner
|
from trap.counter import CounterListerner
|
||||||
from trap.frame_emitter import Frame, Track
|
from trap.frame_emitter import Frame, Track
|
||||||
from trap.lines import load_lines_from_svg
|
from trap.lines import load_lines_from_svg
|
||||||
|
|
@ -151,10 +151,12 @@ class CvRenderer(Node):
|
||||||
# logger.debug(f'new video frame {frame.index}')
|
# logger.debug(f'new video frame {frame.index}')
|
||||||
|
|
||||||
|
|
||||||
if self.frame is None:
|
if self.frame is None and i < 100:
|
||||||
# might need to wait a few iterations before first frame comes available
|
# might need to wait a few iterations before first frame comes available
|
||||||
time.sleep(.1)
|
time.sleep(.1)
|
||||||
continue
|
continue
|
||||||
|
else:
|
||||||
|
self.frame = Frame(i, np.zeros((1920,1080,3)), camera=UndistortedCamera(12))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
prediction_frame: Frame = self.prediction_sock.recv_pyobj(zmq.NOBLOCK)
|
prediction_frame: Frame = self.prediction_sock.recv_pyobj(zmq.NOBLOCK)
|
||||||
|
|
@ -274,7 +276,7 @@ class CvRenderer(Node):
|
||||||
render_parser.add_argument('--debug-map',
|
render_parser.add_argument('--debug-map',
|
||||||
help='specify a map (svg-file) from which to load lines which will be overlayed',
|
help='specify a map (svg-file) from which to load lines which will be overlayed',
|
||||||
type=str,
|
type=str,
|
||||||
default="../DATASETS/hof3/map_hof.svg")
|
default="../DATASETS/hof-lidar/map_hof.svg")
|
||||||
return render_parser
|
return render_parser
|
||||||
|
|
||||||
def click_print_position(self, event,x,y,flags,param):
|
def click_print_position(self, event,x,y,flags,param):
|
||||||
|
|
@ -410,8 +412,9 @@ def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame,
|
||||||
inv_H = np.linalg.pinv(prediction_frame.H)
|
inv_H = np.linalg.pinv(prediction_frame.H)
|
||||||
# For debugging:
|
# For debugging:
|
||||||
# draw_trackjectron_history(img, track, int(track.track_id), conversion)
|
# draw_trackjectron_history(img, track, int(track.track_id), conversion)
|
||||||
anim_position = get_animation_position(track, frame)
|
# anim_position = get_animation_position(track, frame)
|
||||||
draw_track_predictions(img, track, int(track.track_id)+1, frame.camera, conversion, anim_position=anim_position, as_clusters=as_clusters)
|
anim_position = 1
|
||||||
|
draw_track_predictions(img, track, int(track.track_id)+1, prediction_frame.camera, conversion, anim_position=anim_position, as_clusters=as_clusters)
|
||||||
cv2.putText(img, f"{len(track.predictor_history) if track.predictor_history else 'none'}", to_point(track.history[0].get_foot_coords()), cv2.FONT_HERSHEY_COMPLEX, 1, (255,255,255), 1)
|
cv2.putText(img, f"{len(track.predictor_history) if track.predictor_history else 'none'}", to_point(track.history[0].get_foot_coords()), cv2.FONT_HERSHEY_COMPLEX, 1, (255,255,255), 1)
|
||||||
if prediction_frame.maps:
|
if prediction_frame.maps:
|
||||||
for i, m in enumerate(prediction_frame.maps):
|
for i, m in enumerate(prediction_frame.maps):
|
||||||
|
|
|
||||||
|
|
@ -37,7 +37,6 @@ class FrameEmitter(node.Node):
|
||||||
video_gen = enumerate(source, start = offset)
|
video_gen = enumerate(source, start = offset)
|
||||||
|
|
||||||
# writer = FrameWriter(self.config.record, None, None) if self.config.record else nullcontext
|
# writer = FrameWriter(self.config.record, None, None) if self.config.record else nullcontext
|
||||||
print(self.config.record)
|
|
||||||
writer = FrameWriter(str(self.config.record), None, None) if self.config.record else None
|
writer = FrameWriter(str(self.config.record), None, None) if self.config.record else None
|
||||||
try:
|
try:
|
||||||
processor = ImgMovementFilter()
|
processor = ImgMovementFilter()
|
||||||
|
|
|
||||||
File diff suppressed because it is too large
Load diff
632
trap/lines.py
632
trap/lines.py
|
|
@ -5,6 +5,7 @@ import copy
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from enum import Enum, IntEnum
|
from enum import Enum, IntEnum
|
||||||
from functools import partial
|
from functools import partial
|
||||||
|
import logging
|
||||||
import math
|
import math
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import time
|
import time
|
||||||
|
|
@ -21,13 +22,16 @@ import svgpathtools
|
||||||
from noise import snoise2
|
from noise import snoise2
|
||||||
|
|
||||||
from trap import renderable_pb2
|
from trap import renderable_pb2
|
||||||
from trap.utils import exponentialDecayRounded, inv_lerp
|
from trap.utils import easeInOutQuad, exponentialDecay, exponentialDecayRounded, inv_lerp, lerp, relativePointToPolar, relativePolarToPoint
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
See [notebook](../test_path_transforms.ipynb) for examples
|
See [notebook](../test_path_transforms.ipynb) for examples
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger('lines')
|
||||||
|
|
||||||
RenderablePosition = Tuple[float,float]
|
RenderablePosition = Tuple[float,float]
|
||||||
Coordinate = Tuple[float, float]
|
Coordinate = Tuple[float, float]
|
||||||
DeltaT = float # delta_t in seconds
|
DeltaT = float # delta_t in seconds
|
||||||
|
|
@ -54,6 +58,9 @@ class SrgbaColor():
|
||||||
def __eq__(self, other):
|
def __eq__(self, other):
|
||||||
return math.isclose(self.red, other.red) and math.isclose(self.green, other.green) and math.isclose(self.blue, other.blue) and math.isclose(self.alpha, other.alpha)
|
return math.isclose(self.red, other.red) and math.isclose(self.green, other.green) and math.isclose(self.blue, other.blue) and math.isclose(self.alpha, other.alpha)
|
||||||
|
|
||||||
|
def as_array(self):
|
||||||
|
return np.array([self.red, self.green, self.blue, self.alpha])
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class RenderablePoint():
|
class RenderablePoint():
|
||||||
|
|
@ -307,12 +314,31 @@ class AppendableLine(LineGenerator):
|
||||||
# create origin
|
# create origin
|
||||||
self.drawn_points.append(self.target_points[0])
|
self.drawn_points.append(self.target_points[0])
|
||||||
# and drawing head
|
# and drawing head
|
||||||
self.drawn_points.append(self.target_points[0])
|
# self.drawn_points.append(self.target_points[0])
|
||||||
|
|
||||||
|
target_l = shapely.geometry.LineString(self.target_points).length
|
||||||
|
current_l = shapely.geometry.LineString(self.drawn_points).length if len(self.drawn_points) > 1 else 0
|
||||||
|
req_l = exponentialDecayRounded(self.current_l, target_l, self.draw_decay_speed, dt, .05)
|
||||||
|
|
||||||
|
if np.isclose(req_l, target_l):
|
||||||
|
self.drawn_points = self.target_points
|
||||||
|
else:
|
||||||
|
distance_to_do = req_l - current_l
|
||||||
|
|
||||||
idx = len(self.drawn_points) - 1
|
idx = len(self.drawn_points) - 1
|
||||||
|
while distance_to_do:
|
||||||
target = self.target_points[idx]
|
target = self.target_points[idx]
|
||||||
|
this_distance = np.array(self.drawn_points[-1] ) - np.array(target)
|
||||||
|
if this_distance > distance_to_do:
|
||||||
|
break
|
||||||
|
distance_to_do - this_distance
|
||||||
|
|
||||||
if np.isclose(self.drawn_points[-1], target, atol=.05).all():
|
idx+=1
|
||||||
|
self.drawn_points.append(target)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if np.isclose(self.drawn_points[-1], self.target_points[idx], atol=.05).all():
|
||||||
# TODO: might want to migrate to np.isclose()
|
# TODO: might want to migrate to np.isclose()
|
||||||
if len(self.drawn_points) == len(self.target_points):
|
if len(self.drawn_points) == len(self.target_points):
|
||||||
self.ready = True
|
self.ready = True
|
||||||
|
|
@ -456,7 +482,7 @@ class LineAnimator(StaticLine):
|
||||||
# print(self, target_line, bool(target_line), target_line is not None)
|
# print(self, target_line, bool(target_line), target_line is not None)
|
||||||
self.target = target_line if target_line is not None else StaticLine()
|
self.target = target_line if target_line is not None else StaticLine()
|
||||||
self.ready = len(self.target) == 0
|
self.ready = len(self.target) == 0
|
||||||
self.start_t = time.time()
|
self.start_t = time.perf_counter()
|
||||||
self.skip = False
|
self.skip = False
|
||||||
|
|
||||||
def extend(self, coords):
|
def extend(self, coords):
|
||||||
|
|
@ -478,15 +504,23 @@ class LineAnimator(StaticLine):
|
||||||
def is_ready(self):
|
def is_ready(self):
|
||||||
return (self.ready or self.skip) and self.target.is_ready()
|
return (self.ready or self.skip) and self.target.is_ready()
|
||||||
|
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
self.target.start()
|
self.target.start()
|
||||||
|
|
||||||
self.start_t = time.time()
|
self.start_t = time.perf_counter()
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
def is_started(self):
|
||||||
|
return bool(self.start_t)
|
||||||
|
|
||||||
|
def is_running(self):
|
||||||
|
# when ready, consider not running
|
||||||
|
return bool(self.start_t) and not self.is_ready()
|
||||||
|
|
||||||
def running_for(self):
|
def running_for(self):
|
||||||
if self.start_t:
|
if self.start_t:
|
||||||
return time.time() - self.start_t
|
return time.perf_counter() - self.start_t
|
||||||
return 0.
|
return 0.
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -505,9 +539,10 @@ class AppendableLineAnimator(LineAnimator):
|
||||||
|
|
||||||
|
|
||||||
def apply(self, target_line, dt: DeltaT) -> RenderableLine:
|
def apply(self, target_line, dt: DeltaT) -> RenderableLine:
|
||||||
if len(target_line) == 0:
|
if len(target_line) < 2:
|
||||||
|
return target_line
|
||||||
# nothing to draw yet
|
# nothing to draw yet
|
||||||
return RenderableLine([])
|
# return RenderableLine([])
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -521,6 +556,12 @@ class AppendableLineAnimator(LineAnimator):
|
||||||
self.drawn_points.append(copy.deepcopy(self.drawn_points[-1]))
|
self.drawn_points.append(copy.deepcopy(self.drawn_points[-1]))
|
||||||
|
|
||||||
idx = len(self.drawn_points) - 1
|
idx = len(self.drawn_points) - 1
|
||||||
|
if idx > len(target_line.points) - 1:
|
||||||
|
logger.warning("Target line shorter that appendable line, shorten")
|
||||||
|
self.drawn_points = self.drawn_points[:len(target_line)]
|
||||||
|
idx = len(self.drawn_points) - 1
|
||||||
|
|
||||||
|
|
||||||
target = target_line.points[idx]
|
target = target_line.points[idx]
|
||||||
|
|
||||||
if np.isclose(self.drawn_points[-1].position, target.position, atol=.05).all():
|
if np.isclose(self.drawn_points[-1].position, target.position, atol=.05).all():
|
||||||
|
|
@ -574,6 +615,20 @@ class FadeOutLine(LineAnimator):
|
||||||
|
|
||||||
return target_line
|
return target_line
|
||||||
|
|
||||||
|
class SimplifyLine(LineAnimator):
|
||||||
|
"""
|
||||||
|
Simplify the line
|
||||||
|
"""
|
||||||
|
def __init__(self, target_line = None, factor: float =.003):
|
||||||
|
super().__init__(target_line)
|
||||||
|
self.factor = factor
|
||||||
|
self.ready = True # filter holds no state, so always ready
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def apply(self, target_line: RenderableLine, dt: DeltaT) -> RenderableLine:
|
||||||
|
return target_line.as_simplified(SimplifyMethod.RDP, self.factor)
|
||||||
|
|
||||||
class FadeOutJitterLine(LineAnimator):
|
class FadeOutJitterLine(LineAnimator):
|
||||||
"""
|
"""
|
||||||
Fade the line providing an alpha, 1 by default
|
Fade the line providing an alpha, 1 by default
|
||||||
|
|
@ -611,15 +666,94 @@ class CropLine(LineAnimator):
|
||||||
Crop the line at a max nr of points (thus not actual lenght!)
|
Crop the line at a max nr of points (thus not actual lenght!)
|
||||||
Keeps the tail, removes the start
|
Keeps the tail, removes the start
|
||||||
"""
|
"""
|
||||||
def __init__(self, target_line = None, max_points = 200):
|
def __init__(self, target_line = None, max_points: Optional[int] = None, start_offset: Optional[int] = None):
|
||||||
super().__init__(target_line)
|
super().__init__(target_line)
|
||||||
|
self.start_offset = start_offset
|
||||||
self.max_points = max_points
|
self.max_points = max_points
|
||||||
self.ready = True # static filter, always ready
|
self.ready = True # static filter, always ready
|
||||||
|
|
||||||
def apply(self, target_line: RenderableLine, dt: DeltaT) -> RenderableLine:
|
def apply(self, target_line: RenderableLine, dt: DeltaT) -> RenderableLine:
|
||||||
|
if self.start_offset:
|
||||||
|
if len(target_line) <= self.start_offset:
|
||||||
|
return RenderableLine([])
|
||||||
|
target_line.points = target_line.points[self.start_offset:]
|
||||||
|
|
||||||
|
if self.max_points:
|
||||||
target_line.points = target_line.points[-1 * self.max_points:]
|
target_line.points = target_line.points[-1 * self.max_points:]
|
||||||
|
|
||||||
return target_line
|
return target_line
|
||||||
|
|
||||||
|
|
||||||
|
class CropAnimationLine(LineAnimator):
|
||||||
|
"""
|
||||||
|
Similar to segment, but on points instead of lenght, and as animation
|
||||||
|
"""
|
||||||
|
def __init__(self, target_line = None, max_points = 200, assume_fps=12):
|
||||||
|
super().__init__(target_line)
|
||||||
|
self.max_points = max_points
|
||||||
|
self.assume_fps = assume_fps
|
||||||
|
# self.ready = True # static filter, always ready
|
||||||
|
|
||||||
|
# def set_frame_offset(self, frame_offset: int):
|
||||||
|
# self.frame_offset = frame_offset
|
||||||
|
|
||||||
|
def apply(self, target_line: RenderableLine, dt: DeltaT) -> RenderableLine:
|
||||||
|
|
||||||
|
dt = self.running_for()
|
||||||
|
frame_offset = int(dt * self.assume_fps)
|
||||||
|
|
||||||
|
max_points = int(self.max_points(dt)) if callable(self.max_points) else self.max_points
|
||||||
|
|
||||||
|
|
||||||
|
head = frame_offset + 1
|
||||||
|
tail = max(0, frame_offset + 1 - max_points)
|
||||||
|
# print(self.running_for(), frame_offset, head,tail)
|
||||||
|
|
||||||
|
|
||||||
|
target_line.points = target_line.points[tail:head]
|
||||||
|
self.ready = len(target_line.points) < 1
|
||||||
|
|
||||||
|
return target_line
|
||||||
|
|
||||||
|
|
||||||
|
class FadedEndsLine(LineAnimator):
|
||||||
|
"""
|
||||||
|
Static filter; Fade the tail of the line. Always (not only cropped)
|
||||||
|
"""
|
||||||
|
def __init__(self, target_line = None, in_fade_steps: int = 30, out_fade_steps: int = 30):
|
||||||
|
super().__init__(target_line)
|
||||||
|
self.ready = True
|
||||||
|
self.fade_in_steps = in_fade_steps
|
||||||
|
self.fade_out_steps = out_fade_steps
|
||||||
|
|
||||||
|
|
||||||
|
def apply(self, target_line: RenderableLine, dt: DeltaT) -> RenderableLine:
|
||||||
|
l = len(target_line.points)
|
||||||
|
points = []
|
||||||
|
|
||||||
|
# TODO: fractional divide if fade_in and out are not equal
|
||||||
|
half_points = l // 2
|
||||||
|
fade_in = min(self.fade_in_steps, half_points)
|
||||||
|
fade_out = min(self.fade_out_steps, half_points)
|
||||||
|
|
||||||
|
|
||||||
|
for i, point in enumerate(target_line.points):
|
||||||
|
if i < fade_in:
|
||||||
|
t = i / self.fade_in_steps
|
||||||
|
elif i > (l - fade_out):
|
||||||
|
t = 1 - (i - (l - fade_out)) / self.fade_out_steps
|
||||||
|
else:
|
||||||
|
t = 1
|
||||||
|
|
||||||
|
alpha = t
|
||||||
|
if alpha >= 0:
|
||||||
|
alpha = min(1, alpha)
|
||||||
|
point.color = point.color.as_faded(alpha)
|
||||||
|
points.append(point)
|
||||||
|
|
||||||
|
|
||||||
|
return RenderableLine(points)
|
||||||
|
|
||||||
class FadedTailLine(LineAnimator):
|
class FadedTailLine(LineAnimator):
|
||||||
"""
|
"""
|
||||||
Fade the tail of the line, proving a max length
|
Fade the tail of the line, proving a max length
|
||||||
|
|
@ -658,6 +792,7 @@ class FadedTailLine(LineAnimator):
|
||||||
return RenderableLine(points)
|
return RenderableLine(points)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class NoiseLine(LineAnimator):
|
class NoiseLine(LineAnimator):
|
||||||
"""
|
"""
|
||||||
Apply animated noise to line normals
|
Apply animated noise to line normals
|
||||||
|
|
@ -733,7 +868,7 @@ class NoiseLine(LineAnimator):
|
||||||
# noise_y = noise([x * frequency, (y + dt) * frequency]) * amplitude * normal_y
|
# noise_y = noise([x * frequency, (y + dt) * frequency]) * amplitude * normal_y
|
||||||
noise = snoise2(i * frequency, t % 1000, octaves=4)
|
noise = snoise2(i * frequency, t % 1000, octaves=4)
|
||||||
|
|
||||||
use_amp = amplitude
|
use_amp = amplitude[i] if hasattr(amplitude, "__getitem__") else amplitude
|
||||||
if fade_over_n_points > 0:
|
if fade_over_n_points > 0:
|
||||||
rev_step = len(drawable_points) - i
|
rev_step = len(drawable_points) - i
|
||||||
amp_factor = rev_step / fade_over_n_points
|
amp_factor = rev_step / fade_over_n_points
|
||||||
|
|
@ -805,6 +940,19 @@ class SegmentLine(LineAnimator):
|
||||||
else:
|
else:
|
||||||
return (0, ls.length * t)
|
return (0, ls.length * t)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def anim_follow_in_front(cls, t: float, ls: shapely.geometry.LineString):
|
||||||
|
keypoints = [
|
||||||
|
(0.0, 0, 0), #
|
||||||
|
(0.07, 1.0, 2.0), #
|
||||||
|
(0.3, 1.5, 2.0), # At t=0.7, value=0.5
|
||||||
|
(0.6, 1, 1.5), # At t=0.7, value=0.5
|
||||||
|
(0.7, 1.2, 1.5), # At t=0.7, value=0.5
|
||||||
|
(0.7, 1, 1.8), # At t=0.7, value=0.5
|
||||||
|
(1.0, .5, ls.length), # At t=1, value=0
|
||||||
|
]
|
||||||
|
return KeyframeAnimator(keypoints, easeInOutQuad).get_value(t)
|
||||||
|
|
||||||
|
|
||||||
def apply(self, target_line: RenderableLine, dt: DeltaT):
|
def apply(self, target_line: RenderableLine, dt: DeltaT):
|
||||||
if len(target_line) < 2:
|
if len(target_line) < 2:
|
||||||
|
|
@ -826,6 +974,57 @@ class SegmentLine(LineAnimator):
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class KeyframeAnimator:
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
keypoints: List[Tuple[float, float, float]],
|
||||||
|
easing_func: Callable[[float], float] = None,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Initialize the animator with keypoints and an optional easing function.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
keypoints: List of (t_value, value) tuples, where t_value is in [0, 1].
|
||||||
|
easing_func: Optional function to apply easing to `t` (e.g., easeInOutQuad).
|
||||||
|
"""
|
||||||
|
self.keypoints = sorted(keypoints, key=lambda x: x[0])
|
||||||
|
self.easing_func = easing_func
|
||||||
|
|
||||||
|
def get_value(self, t: float) -> float:
|
||||||
|
"""
|
||||||
|
Get the interpolated value at time `t` (0 <= t <= 1).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
t: Normalized time (0 to 1).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Interpolated value at time `t`.
|
||||||
|
"""
|
||||||
|
if self.easing_func:
|
||||||
|
t = self.easing_func(t)
|
||||||
|
|
||||||
|
# Handle edge cases
|
||||||
|
if t <= self.keypoints[0][0]:
|
||||||
|
return self.keypoints[0][1], self.keypoints[0][2]
|
||||||
|
if t >= self.keypoints[-1][0]:
|
||||||
|
return self.keypoints[-1][1], self.keypoints[-1][2]
|
||||||
|
|
||||||
|
# Find the two keypoints surrounding `t`
|
||||||
|
for i in range(len(self.keypoints) - 1):
|
||||||
|
t0, val0, valb0 = self.keypoints[i]
|
||||||
|
t1, val1, valb1 = self.keypoints[i + 1]
|
||||||
|
|
||||||
|
if t0 <= t <= t1:
|
||||||
|
# Normalize `t` between t0 and t1
|
||||||
|
local_t = inv_lerp(t0, t1, t)
|
||||||
|
|
||||||
|
# Interpolate between val0 and val1
|
||||||
|
|
||||||
|
return lerp(val0, val1, local_t), lerp(valb0, valb1, local_t)
|
||||||
|
|
||||||
|
return self.keypoints[-1][1], self.keypoints[-1][2] # fallback
|
||||||
|
|
||||||
|
|
||||||
class DashedLine(LineAnimator):
|
class DashedLine(LineAnimator):
|
||||||
"""
|
"""
|
||||||
Dashed line
|
Dashed line
|
||||||
|
|
@ -867,6 +1066,7 @@ class DashedLine(LineAnimator):
|
||||||
segments.append(dash)
|
segments.append(dash)
|
||||||
pos += dash_len + gap_len
|
pos += dash_len + gap_len
|
||||||
|
|
||||||
|
segments = [segment for segment in segments if isinstance(segment, shapely.geometry.LineString)]
|
||||||
# TODO: return all color together with the points
|
# TODO: return all color together with the points
|
||||||
return shapely.geometry.MultiLineString(segments)
|
return shapely.geometry.MultiLineString(segments)
|
||||||
|
|
||||||
|
|
@ -880,8 +1080,11 @@ class DashedLine(LineAnimator):
|
||||||
self.ready = True
|
self.ready = True
|
||||||
return target_line
|
return target_line
|
||||||
|
|
||||||
|
gap_len = gap_len if not callable(self.gap_len) else self.gap_len(dt, self.running_for())
|
||||||
|
dash_len = dash_len if not callable(self.dash_len) else self.dash_len(dt, self.running_for())
|
||||||
|
|
||||||
ls = target_line.as_linestring()
|
ls = target_line.as_linestring()
|
||||||
multilinestring = self.dashed_line(ls, self.dash_len, self.gap_len, self.t_factor * self.running_for(), self.loop_offset)
|
multilinestring = self.dashed_line(ls, dash_len, gap_len, self.t_factor * self.running_for(), self.loop_offset)
|
||||||
|
|
||||||
|
|
||||||
# when looping, it is always ready, otherwise, only if totally gone
|
# when looping, it is always ready, otherwise, only if totally gone
|
||||||
|
|
@ -891,6 +1094,377 @@ class DashedLine(LineAnimator):
|
||||||
|
|
||||||
return RenderableLine.from_multilinestring(multilinestring, color)
|
return RenderableLine.from_multilinestring(multilinestring, color)
|
||||||
|
|
||||||
|
class StartFromClosestPoint(LineAnimator):
|
||||||
|
"""
|
||||||
|
Dashed line
|
||||||
|
"""
|
||||||
|
def __init__(self, target_line = None, from_point: Optional[Tuple[float,float]] = None):
|
||||||
|
super().__init__(target_line)
|
||||||
|
self.from_point = from_point
|
||||||
|
self.ready = True # static filter, always ready
|
||||||
|
|
||||||
|
def disable(self):
|
||||||
|
self.from_point = None
|
||||||
|
|
||||||
|
def set_point(self, point: Tuple[float,float]):
|
||||||
|
self.from_point = point
|
||||||
|
|
||||||
|
def apply(self, target_line: RenderableLine, dt: DeltaT) -> RenderableLine:
|
||||||
|
"""
|
||||||
|
warning, dashing (for now) removes all color
|
||||||
|
"""
|
||||||
|
|
||||||
|
if len(target_line) < 1 or self.from_point is None:
|
||||||
|
return target_line
|
||||||
|
|
||||||
|
|
||||||
|
distance = math.inf
|
||||||
|
idx = 0
|
||||||
|
from_point = np.array(self.from_point)
|
||||||
|
# print(from_point)
|
||||||
|
|
||||||
|
for i, point in enumerate(target_line.points):
|
||||||
|
if i < 2:
|
||||||
|
continue # skip the first to avoid jitter
|
||||||
|
|
||||||
|
to_point = np.array(point.position)
|
||||||
|
new_distance = np.linalg.norm(from_point-to_point)
|
||||||
|
if new_distance < distance:
|
||||||
|
distance = new_distance
|
||||||
|
idx = i+1
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
|
||||||
|
if idx >= len(target_line.points):
|
||||||
|
logger.warning("Empty line")
|
||||||
|
return RenderableLine([])
|
||||||
|
points = []
|
||||||
|
if idx > 0:
|
||||||
|
p = target_line.points[idx]
|
||||||
|
p.position = self.from_point
|
||||||
|
|
||||||
|
points.append(p)
|
||||||
|
points.extend(target_line.points[idx:])
|
||||||
|
# print(from_point, idx, [p.position for p in target_line.points[max(0,idx-5):idx+5]])
|
||||||
|
return RenderableLine(points)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def find_closest_to_point(cls, point: Tuple[Coordinate], points: List[Coordinate]):
|
||||||
|
from_point = np.array(point)
|
||||||
|
# idx = 0
|
||||||
|
# distance = math.inf
|
||||||
|
if len(points) == 0:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# print(points)
|
||||||
|
# print(points- from_point)
|
||||||
|
# print(np.array(points) - from_point)
|
||||||
|
idx = np.argmin(np.linalg.norm(np.array(points) - from_point, axis=1))
|
||||||
|
# for i, to_point in enumerate(points):
|
||||||
|
# to_point = np.array(to_point)
|
||||||
|
# new_distance = np.linalg.norm(from_point-to_point)
|
||||||
|
# if new_distance < distance:
|
||||||
|
# distance = new_distance
|
||||||
|
# idx = i+1
|
||||||
|
# else:
|
||||||
|
# break
|
||||||
|
return idx
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class RotatingLine(LineAnimator):
|
||||||
|
"""
|
||||||
|
Rotate the line around starting point towards new shape
|
||||||
|
"""
|
||||||
|
def __init__(self, target_line = None, decay_speed=16):
|
||||||
|
super().__init__(target_line)
|
||||||
|
self.decay_speed = decay_speed
|
||||||
|
self.drawn_points: List[RenderablePoint] = []
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def apply(self, target_line: RenderableLine, dt: DeltaT) -> RenderableLine:
|
||||||
|
"""
|
||||||
|
warning, dashing (for now) removes all color
|
||||||
|
"""
|
||||||
|
|
||||||
|
if len(target_line) < 2:
|
||||||
|
self.ready = True
|
||||||
|
return target_line
|
||||||
|
|
||||||
|
if len(self.drawn_points) < 1:
|
||||||
|
self.drawn_points = target_line.points
|
||||||
|
|
||||||
|
# find closest point to start from:
|
||||||
|
|
||||||
|
origin = target_line.points[0]
|
||||||
|
# print(origin, target_line.points[-1])
|
||||||
|
# closest_idx = StartFromClosestPoint.find_closest_to_point(origin.position, [p.position for p in self.drawn_points])
|
||||||
|
# if closest_idx:
|
||||||
|
# print('cut at', closest_idx)
|
||||||
|
# self.drawn_points = self.drawn_points[closest_idx:] # hard cutof
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
diff_length = len(target_line) - len(self.drawn_points)
|
||||||
|
if diff_length < 0: # drawn points is larger
|
||||||
|
self.drawn_points = self.drawn_points[:len(target_line)]
|
||||||
|
if diff_length > 0: # target line is larger
|
||||||
|
self.drawn_points += [self.drawn_points[-1]] * diff_length
|
||||||
|
|
||||||
|
# associated_diff = self.prediction_diffs[a]
|
||||||
|
# progress = associated_diff.nr_of_passed_points()
|
||||||
|
is_ready: List[bool] = []
|
||||||
|
|
||||||
|
# target_point = target_line.points[-1]
|
||||||
|
|
||||||
|
for i, (target_point, drawn_point) in enumerate(zip(target_line.points, list(self.drawn_points))):
|
||||||
|
# TODO: this should be done in polar space starting from origin (i.e. self.drawn_posision[-1])
|
||||||
|
decay = max(3, (18/i) if i else 10) # points further away move with more delay
|
||||||
|
decay = self.decay_speed
|
||||||
|
|
||||||
|
drawn_r, drawn_angle = relativePointToPolar( origin.position, drawn_point.position)
|
||||||
|
pred_r, pred_angle = relativePointToPolar(origin.position, target_point.position)
|
||||||
|
r = exponentialDecay(drawn_r, pred_r, decay, dt)
|
||||||
|
|
||||||
|
# make circular coordinates transition through the smaller arc
|
||||||
|
# TODO 20251108 bring this back, but calculated for the whole line:
|
||||||
|
if abs(drawn_angle - pred_angle) > math.pi:
|
||||||
|
pred_angle -= math.pi * 2
|
||||||
|
angle = exponentialDecay(drawn_angle, pred_angle, decay, dt)
|
||||||
|
x, y = relativePolarToPoint(origin.position, r, angle)
|
||||||
|
|
||||||
|
r = exponentialDecay(drawn_point.color.red, target_point.color.red, decay, dt)
|
||||||
|
g = exponentialDecay(drawn_point.color.green, target_point.color.green, decay, dt)
|
||||||
|
b = exponentialDecay(drawn_point.color.blue, target_point.color.blue, decay, dt)
|
||||||
|
a = exponentialDecay(drawn_point.color.alpha, target_point.color.alpha, decay, dt)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
self.drawn_points[i].position = (x, y)
|
||||||
|
self.drawn_points[i].color = SrgbaColor(r, g, b, a)
|
||||||
|
is_ready.append(np.isclose(drawn_point.position, target_point.position, atol=.05).all())
|
||||||
|
|
||||||
|
self.ready = all(is_ready)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
return RenderableLine(self.drawn_points)
|
||||||
|
|
||||||
|
from scipy.ndimage import gaussian_filter1d
|
||||||
|
from scipy.signal import find_peaks
|
||||||
|
|
||||||
|
class NoodleWiggler(LineAnimator):
|
||||||
|
"""When a line is 'noodling' don't draw it as a whole, but sway it animated
|
||||||
|
|
||||||
|
Work in progress
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, target_line = None):
|
||||||
|
super().__init__(target_line)
|
||||||
|
self.range = 10
|
||||||
|
self.threshold = 1.8
|
||||||
|
self.smoothing_sigma = 2
|
||||||
|
self.t = 0
|
||||||
|
self.t_factor = 4
|
||||||
|
|
||||||
|
def apply(self, target_line, dt):
|
||||||
|
if len(target_line) < 2:
|
||||||
|
return target_line
|
||||||
|
|
||||||
|
self.t += dt
|
||||||
|
|
||||||
|
scores = []
|
||||||
|
distances = [np.linalg.norm(np.array(a.position)- np.array(b.position)) for a,b in zip(target_line.points, target_line.points[1:])]
|
||||||
|
|
||||||
|
# 1) find points with high distance traveled relative to displacement. This is noodling
|
||||||
|
for i, point in enumerate(target_line.points):
|
||||||
|
if i < self.range:
|
||||||
|
scores.append(0)
|
||||||
|
if len(target_line.points) < i + self.range:
|
||||||
|
scores.append(0)
|
||||||
|
|
||||||
|
a = target_line.points[i-self.range]
|
||||||
|
b = target_line.points[i+self.range]
|
||||||
|
|
||||||
|
net_distance = np.linalg.norm(np.array(a.position)- np.array(b.position))
|
||||||
|
gross_distance = sum(distances[i-self.range:i-self.range-1])
|
||||||
|
self.scores.append(max(0, gross_distance/net_distance - self.threshold))
|
||||||
|
|
||||||
|
# 2) smooth the curve
|
||||||
|
smoothed_scores = gaussian_filter1d(scores, sigma=self.smoothing_sigma)
|
||||||
|
|
||||||
|
# 3) find the peaks ; most intense noodling points
|
||||||
|
peak_idxs, _ = find_peaks(smoothed_scores, height=0.5)
|
||||||
|
|
||||||
|
# 4) for peaks, find start-peak-end indexes
|
||||||
|
segments = self.connect_peaks_to_segments(smoothed_scores, peak_idxs)
|
||||||
|
|
||||||
|
points, scores = self.replace_noodling_segments(scores, target_line, segments, 4)
|
||||||
|
|
||||||
|
# 5) apply noise according to score
|
||||||
|
new_points = NoiseLine.apply_perlin_noise_to_line_normal(
|
||||||
|
drawable_points=[p.position for p in points],
|
||||||
|
amplitude=scores,
|
||||||
|
t = self.t * self.t_factor,
|
||||||
|
frequency= 2
|
||||||
|
)
|
||||||
|
|
||||||
|
# 6) rebuild as line
|
||||||
|
points = []
|
||||||
|
for point, pos in zip(target_line.points, new_positions):
|
||||||
|
p = copy.deepcopy(point)
|
||||||
|
p.position = pos
|
||||||
|
points.append(p)
|
||||||
|
|
||||||
|
|
||||||
|
return RenderableLine(points)
|
||||||
|
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def connect_peaks_to_segments(cls, scores, peaks, threshold=0.3, min_segment_length=1) -> List [Tuple[int, int, int]]:
|
||||||
|
"""
|
||||||
|
returns a list of tuples: (start_idx, peak_idx, end_idx)
|
||||||
|
"""
|
||||||
|
segments = []
|
||||||
|
i = 0
|
||||||
|
n = len(scores)
|
||||||
|
|
||||||
|
while i < len(peaks):
|
||||||
|
peak = peaks[i]
|
||||||
|
start = peak
|
||||||
|
end = peak
|
||||||
|
|
||||||
|
# Expand left
|
||||||
|
while start > 0 and scores[start - 1] > threshold:
|
||||||
|
start -= 1
|
||||||
|
|
||||||
|
# Expand right
|
||||||
|
while end < n - 1 and scores[end + 1] > threshold:
|
||||||
|
end += 1
|
||||||
|
|
||||||
|
# Merge with next peak if close
|
||||||
|
if i < len(peaks) - 1 and peaks[i + 1] - end < min_segment_length:
|
||||||
|
i += 1
|
||||||
|
continue
|
||||||
|
|
||||||
|
segments.append((start, peak, end))
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
return segments
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def replace_noodling_segments(cls, scores: List[float], target_line: RenderableLine, segments, num_interpolated_points = 4):
|
||||||
|
"""
|
||||||
|
Replace noodling segments in target_line with a fixed number of interpolated points,
|
||||||
|
while preserving all non-noodling sections.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
target_line: Object with a `points` attribute (list of point objects).
|
||||||
|
segments: List of tuples (start_index, end_index) for noodling segments.
|
||||||
|
num_interpolated_points: Number of intermediate points to insert between start and end.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A new list of points with noodling segments replaced.
|
||||||
|
"""
|
||||||
|
new_points = []
|
||||||
|
new_scores = []
|
||||||
|
i = 0
|
||||||
|
n = len(target_line.points)
|
||||||
|
|
||||||
|
for start, peak, end in segments:
|
||||||
|
# Add all points up to the start of the noodling segment
|
||||||
|
while i < start:
|
||||||
|
new_points.append(target_line.points[i])
|
||||||
|
new_scores.append(scores[i])
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
# Skip the noodling segment and add interpolated points
|
||||||
|
start_point = target_line.points[start]
|
||||||
|
peak_point = target_line.points[peak]
|
||||||
|
end_point = target_line.points[end]
|
||||||
|
|
||||||
|
start_score = scores[start]
|
||||||
|
peak_score = scores[peak]
|
||||||
|
end_score = scores[end]
|
||||||
|
|
||||||
|
# Interpolate between start and peak
|
||||||
|
for j in range(num_interpolated_points + 2): # +2 to include start and peak
|
||||||
|
t = inv_lerp(0, num_interpolated_points + 1, j)
|
||||||
|
new_x = lerp(start_point.position[0], peak_point.position[0], t)
|
||||||
|
new_y = lerp(start_point.position[1], peak_point.position[1], t)
|
||||||
|
new_score = lerp(start_score, peak_score, t)
|
||||||
|
new_point = RenderablePoint(position=(new_x, new_y), color=start_point.color)
|
||||||
|
new_points.append(new_point)
|
||||||
|
new_scores.append(new_score)
|
||||||
|
|
||||||
|
# Interpolate between peak and end (skip peak to avoid duplication)
|
||||||
|
for j in range(1, num_interpolated_points + 2): # +2 to include peak and end
|
||||||
|
t = inv_lerp(0, num_interpolated_points + 1, j)
|
||||||
|
new_x = lerp(peak_point.position[0], end_point.position[0], t)
|
||||||
|
new_y = lerp(peak_point.position[1], end_point.position[1], t)
|
||||||
|
new_score = lerp(peak_score, end_score, t)
|
||||||
|
new_point = RenderablePoint(position=(new_x, new_y), color=start_point.color)
|
||||||
|
new_points.append(new_point)
|
||||||
|
new_scores.append(new_score)
|
||||||
|
|
||||||
|
i = end + 1 # Skip to the end of the noodling segment
|
||||||
|
|
||||||
|
# Add remaining points after the last noodling segment
|
||||||
|
while i < n:
|
||||||
|
new_points.append(target_line.points[i])
|
||||||
|
new_scores.append(scores[i])
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
return new_points, scores
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# @classmethod
|
||||||
|
# def detect_noodling_sections(cls, line, segment_length=0.1, ratio_threshold=2.0):
|
||||||
|
# """
|
||||||
|
# Detects noodling sections in a LineString using the ratio of actual length to straight-line distance.
|
||||||
|
|
||||||
|
# Args:
|
||||||
|
# line (LineString): Input LineString.
|
||||||
|
# segment_length (float): Length of each segment as a fraction of the total line length.
|
||||||
|
# ratio_threshold (float): Threshold for the ratio (actual_length / straight_line_distance).
|
||||||
|
|
||||||
|
# Returns:
|
||||||
|
# list: List of noodling segments (as LineStrings).
|
||||||
|
# """
|
||||||
|
# noodling_sections = []
|
||||||
|
# total_length = line.length
|
||||||
|
# segment_start = 0.0
|
||||||
|
|
||||||
|
# while segment_start < 1.0:
|
||||||
|
# segment_end = min(segment_start + segment_length, 1.0)
|
||||||
|
# segment = substring(line, segment_start, segment_end, normalized=True)
|
||||||
|
|
||||||
|
# # Calculate straight-line distance between start and end points
|
||||||
|
# start_point = Point(segment.coords[0])
|
||||||
|
# end_point = Point(segment.coords[-1])
|
||||||
|
# straight_line_distance = start_point.distance(end_point)
|
||||||
|
|
||||||
|
# # Calculate actual length of the segment
|
||||||
|
# actual_length = segment.length
|
||||||
|
|
||||||
|
# # Check if the ratio exceeds the threshold
|
||||||
|
# if straight_line_distance > 0 and (actual_length / straight_line_distance) > ratio_threshold:
|
||||||
|
# noodling_sections.append(segment)
|
||||||
|
|
||||||
|
# segment_start = segment_end
|
||||||
|
|
||||||
|
# return noodling_sections
|
||||||
|
|
||||||
|
# # Example usage:
|
||||||
|
# line = LineString([(0, 0), (0.1, 0.1), (0.2, 0.2), (0.3, 0.1), (0.4, 0.2), (1, 1)])
|
||||||
|
# noodling_segments = detect_noodling_sections(line, segment_length=0.2, area_threshold=0.02)
|
||||||
|
|
||||||
|
# for i, segment in enumerate(noodling_segments):
|
||||||
|
# print(f"Noodling segment {i+1}: {segment}")
|
||||||
|
|
||||||
IndexAndOffset = Tuple[int, float]
|
IndexAndOffset = Tuple[int, float]
|
||||||
|
|
||||||
|
|
@ -984,6 +1558,9 @@ class LineAnimationStack():
|
||||||
def is_ready(self):
|
def is_ready(self):
|
||||||
return self.tail.is_ready()
|
return self.tail.is_ready()
|
||||||
|
|
||||||
|
def is_running(self):
|
||||||
|
return self.tail.is_running()
|
||||||
|
|
||||||
|
|
||||||
class LineAnimationSequenceStep(NamedTuple):
|
class LineAnimationSequenceStep(NamedTuple):
|
||||||
line: LineAnimator
|
line: LineAnimator
|
||||||
|
|
@ -1013,7 +1590,7 @@ class LineAnimationSequence():
|
||||||
|
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
self.start_at = time.time()
|
self.start_at = time.perf_counter()
|
||||||
self.idx = 0
|
self.idx = 0
|
||||||
|
|
||||||
def as_renderable_line(self, dt: DeltaT):
|
def as_renderable_line(self, dt: DeltaT):
|
||||||
|
|
@ -1059,3 +1636,34 @@ def layers_to_message(layers: RenderableLayers):
|
||||||
# print( t2-t1,t3-t2)
|
# print( t2-t1,t3-t2)
|
||||||
|
|
||||||
return s
|
return s
|
||||||
|
|
||||||
|
def message_to_layers(s: str) -> RenderableLayers:
|
||||||
|
"""Decode the protobuf"""
|
||||||
|
pb_layers = renderable_pb2.RenderableLayers()
|
||||||
|
pb_layers.ParseFromString(s)
|
||||||
|
|
||||||
|
|
||||||
|
layers = {}
|
||||||
|
|
||||||
|
for n, pb_lines in pb_layers.layers.items():
|
||||||
|
lines = []
|
||||||
|
for pb_line in pb_lines.lines:
|
||||||
|
points = []
|
||||||
|
for pb_point in pb_line.points:
|
||||||
|
color = SrgbaColor(
|
||||||
|
pb_point.color.red,
|
||||||
|
pb_point.color.green,
|
||||||
|
pb_point.color.blue,
|
||||||
|
pb_point.color.alpha,
|
||||||
|
)
|
||||||
|
|
||||||
|
point = RenderablePoint(
|
||||||
|
(pb_point.position.x, pb_point.position.y),
|
||||||
|
color
|
||||||
|
)
|
||||||
|
points.append(point)
|
||||||
|
|
||||||
|
lines.append(RenderableLine(points))
|
||||||
|
layers[n] = RenderableLines(lines, CoordinateSpace.WORLD)
|
||||||
|
|
||||||
|
return layers
|
||||||
74
trap/node.py
74
trap/node.py
|
|
@ -1,10 +1,11 @@
|
||||||
|
from collections import defaultdict
|
||||||
import logging
|
import logging
|
||||||
from logging.handlers import QueueHandler, QueueListener, SocketHandler
|
from logging.handlers import QueueHandler, QueueListener, SocketHandler
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
from multiprocessing.synchronize import Event as BaseEvent
|
from multiprocessing.synchronize import Event as BaseEvent
|
||||||
from argparse import ArgumentParser, Namespace
|
from argparse import ArgumentParser, Namespace
|
||||||
import time
|
import time
|
||||||
from typing import Optional
|
from typing import Any, Optional
|
||||||
|
|
||||||
import zmq
|
import zmq
|
||||||
|
|
||||||
|
|
@ -14,6 +15,8 @@ from trap.timer import Timer
|
||||||
|
|
||||||
class Node():
|
class Node():
|
||||||
def __init__(self, config: Namespace, is_running: BaseEvent, fps_counter: CounterFpsSender):
|
def __init__(self, config: Namespace, is_running: BaseEvent, fps_counter: CounterFpsSender):
|
||||||
|
self.node_id = self.__class__.__name__.lower()
|
||||||
|
|
||||||
self.config = config
|
self.config = config
|
||||||
self.is_running = is_running
|
self.is_running = is_running
|
||||||
self.fps_counter = fps_counter
|
self.fps_counter = fps_counter
|
||||||
|
|
@ -24,6 +27,11 @@ class Node():
|
||||||
|
|
||||||
self.dt_since_last_tick = 0
|
self.dt_since_last_tick = 0
|
||||||
|
|
||||||
|
self.config_sock = self.sub(self.config.zmq_config_addr)
|
||||||
|
self.config_init_sock = self.push(self.config.zmq_config_init_addr) # a sending sub
|
||||||
|
self.settings = defaultdict(None)
|
||||||
|
self.refresh_settings()
|
||||||
|
|
||||||
self.setup()
|
self.setup()
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
|
@ -41,13 +49,45 @@ class Node():
|
||||||
def run(self):
|
def run(self):
|
||||||
raise RuntimeError("Not implemented run()")
|
raise RuntimeError("Not implemented run()")
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
"""
|
||||||
|
Called when runloop is stopped. Override to clean up what was initiated in start() and run() methods
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def refresh_settings(self):
|
||||||
|
try:
|
||||||
|
self.config_init_sock.send_string(self.node_id, zmq.NOBLOCK)
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.warning('No settings socket available')
|
||||||
|
self.logger.exception(e)
|
||||||
|
|
||||||
|
|
||||||
def run_loop(self):
|
def run_loop(self):
|
||||||
"""Use in run(), to check if it should keep looping
|
"""Use in run(), to check if it should keep looping
|
||||||
Takes care of tick()'ing the iterations/second counter
|
Takes care of tick()'ing the iterations/second counter
|
||||||
"""
|
"""
|
||||||
self.tick()
|
self.tick()
|
||||||
|
self.check_config()
|
||||||
return self.is_running.is_set()
|
return self.is_running.is_set()
|
||||||
|
|
||||||
|
def check_config(self):
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
config = self.config_sock.recv_json(zmq.NOBLOCK)
|
||||||
|
|
||||||
|
for field, value in config.items():
|
||||||
|
self.settings[field] = value
|
||||||
|
except zmq.ZMQError as e:
|
||||||
|
# no msgs
|
||||||
|
break
|
||||||
|
|
||||||
|
def get_setting(self, name: str, default: Any):
|
||||||
|
if name in self.settings:
|
||||||
|
return self.settings[name]
|
||||||
|
return default
|
||||||
|
|
||||||
|
|
||||||
def run_loop_capped_fps(self, max_fps: float, warn_below_fps: float = 0.):
|
def run_loop_capped_fps(self, max_fps: float, warn_below_fps: float = 0.):
|
||||||
"""Use in run(), to check if it should keep looping
|
"""Use in run(), to check if it should keep looping
|
||||||
Takes care of tick()'ing the iterations/second counter
|
Takes care of tick()'ing the iterations/second counter
|
||||||
|
|
@ -92,6 +132,14 @@ class Node():
|
||||||
type=int,
|
type=int,
|
||||||
default=19996
|
default=19996
|
||||||
)
|
)
|
||||||
|
parser.add_argument('--zmq-config-addr',
|
||||||
|
help='Manually specity communication addr for the config messages',
|
||||||
|
type=str,
|
||||||
|
default="ipc:///tmp/feeds_config")
|
||||||
|
parser.add_argument('--zmq-config-init-addr',
|
||||||
|
help='Manually specity communication addr for req-rep config messages',
|
||||||
|
type=str,
|
||||||
|
default="ipc:///tmp/feeds_config_rr")
|
||||||
return parser
|
return parser
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -110,10 +158,27 @@ class Node():
|
||||||
sock.bind(addr)
|
sock.bind(addr)
|
||||||
return sock
|
return sock
|
||||||
|
|
||||||
|
def push(self, addr: str):
|
||||||
|
"push-pull pair"
|
||||||
|
sock = self.zmq_context.socket(zmq.PUSH)
|
||||||
|
# sock.setsockopt(zmq.LINGER, 0)
|
||||||
|
sock.connect(addr)
|
||||||
|
return sock
|
||||||
|
|
||||||
|
def pull(self, addr: str):
|
||||||
|
"Push-pull pair"
|
||||||
|
sock = self.zmq_context.socket(zmq.PULL)
|
||||||
|
sock.bind(addr)
|
||||||
|
return sock
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def start(cls, config: Namespace, is_running: BaseEvent, timer_counter: Optional[Timer]):
|
def start(cls, config: Namespace, is_running: BaseEvent, timer_counter: Optional[Timer]):
|
||||||
instance = cls(config, is_running, timer_counter)
|
instance = cls(config, is_running, timer_counter)
|
||||||
|
try:
|
||||||
instance.run()
|
instance.run()
|
||||||
|
except Exception as e:
|
||||||
|
instance.logger.exception(f"{e}")
|
||||||
|
instance.stop()
|
||||||
instance.logger.info("Stopping")
|
instance.logger.info("Stopping")
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
|
@ -139,11 +204,14 @@ def setup_logging(config: Namespace):
|
||||||
logging.captureWarnings(True)
|
logging.captureWarnings(True)
|
||||||
# root_logger.setLevel(logging.NOTSET) # to send all records to cutelog
|
# root_logger.setLevel(logging.NOTSET) # to send all records to cutelog
|
||||||
socket_handler = SocketHandler(config.remote_log_addr, config.remote_log_port)
|
socket_handler = SocketHandler(config.remote_log_addr, config.remote_log_port)
|
||||||
print(socket_handler.host, socket_handler.port)
|
|
||||||
|
# print(socket_handler.host, socket_handler.port)
|
||||||
socket_handler.setLevel(logging.NOTSET)
|
socket_handler.setLevel(logging.NOTSET)
|
||||||
log_handlers.append(socket_handler)
|
log_handlers.append(socket_handler)
|
||||||
|
|
||||||
logging.basicConfig(
|
logging.basicConfig(
|
||||||
level=loglevel,
|
level=loglevel,
|
||||||
handlers=log_handlers # [queue_handler]
|
handlers=log_handlers, # [queue_handler]
|
||||||
|
format="%(asctime)s %(levelname)s:%(name)s:%(message)s",
|
||||||
|
datefmt="%H:%M:%S"
|
||||||
)
|
)
|
||||||
|
|
@ -6,22 +6,26 @@ import pathlib
|
||||||
import pickle
|
import pickle
|
||||||
import random
|
import random
|
||||||
import time
|
import time
|
||||||
|
from typing import List
|
||||||
import warnings
|
import warnings
|
||||||
from argparse import ArgumentParser, Namespace
|
from argparse import ArgumentParser, Namespace
|
||||||
from multiprocessing import Event
|
from multiprocessing import Event
|
||||||
|
|
||||||
import dill
|
import dill
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
import shapely
|
||||||
import torch
|
import torch
|
||||||
import zmq
|
import zmq
|
||||||
from trajectron.environment import Environment, Scene
|
from trajectron.environment import Environment, Scene, GeometricMap
|
||||||
from trajectron.model.model_registrar import ModelRegistrar
|
from trajectron.model.model_registrar import ModelRegistrar
|
||||||
from trajectron.model.online.online_trajectron import OnlineTrajectron
|
from trajectron.model.online.online_trajectron import OnlineTrajectron
|
||||||
from trajectron.utils import prediction_output_to_trajectories
|
from trajectron.utils import prediction_output_to_trajectories
|
||||||
|
|
||||||
from trap.frame_emitter import DataclassJSONEncoder, Frame
|
from trap.frame_emitter import DataclassJSONEncoder, Frame
|
||||||
|
from trap.lines import load_lines_from_svg
|
||||||
from trap.node import Node
|
from trap.node import Node
|
||||||
from trap.tracker import Smoother
|
from trap.tracker import Smoother
|
||||||
|
from trap.utils import ImageMap
|
||||||
|
|
||||||
logger = logging.getLogger("trap.prediction")
|
logger = logging.getLogger("trap.prediction")
|
||||||
|
|
||||||
|
|
@ -50,19 +54,21 @@ def create_online_env(env, hyperparams, scene_idx, init_timestep):
|
||||||
init_timestep + 1),
|
init_timestep + 1),
|
||||||
state=hyperparams['state'])
|
state=hyperparams['state'])
|
||||||
online_scene.robot = test_scene.robot
|
online_scene.robot = test_scene.robot
|
||||||
online_scene.calculate_scene_graph(attention_radius=env.attention_radius,
|
radius = {k: 0 for k,v in env.attention_radius.items()}
|
||||||
|
|
||||||
|
online_scene.calculate_scene_graph(attention_radius=radius,
|
||||||
edge_addition_filter=hyperparams['edge_addition_filter'],
|
edge_addition_filter=hyperparams['edge_addition_filter'],
|
||||||
edge_removal_filter=hyperparams['edge_removal_filter'])
|
edge_removal_filter=hyperparams['edge_removal_filter'])
|
||||||
|
|
||||||
return Environment(node_type_list=env.node_type_list,
|
return Environment(node_type_list=env.node_type_list,
|
||||||
standardization=env.standardization,
|
standardization=env.standardization,
|
||||||
scenes=[online_scene],
|
scenes=[online_scene],
|
||||||
attention_radius=env.attention_radius,
|
attention_radius=radius,
|
||||||
robot_type=env.robot_type)
|
robot_type=env.robot_type)
|
||||||
|
|
||||||
|
|
||||||
def get_maps_for_input(input_dict, scene, hyperparams, device):
|
def get_maps_for_input(input_dict, scene: Scene, hyperparams, device):
|
||||||
scene_maps = list()
|
scene_maps: List[ImageMap] = list()
|
||||||
scene_pts = list()
|
scene_pts = list()
|
||||||
heading_angles = list()
|
heading_angles = list()
|
||||||
patch_sizes = list()
|
patch_sizes = list()
|
||||||
|
|
@ -84,10 +90,11 @@ def get_maps_for_input(input_dict, scene, hyperparams, device):
|
||||||
else:
|
else:
|
||||||
heading_angle = None
|
heading_angle = None
|
||||||
|
|
||||||
scene_map = scene.map[node.type]
|
scene_map: ImageMap = scene.map[node.type]
|
||||||
|
scene_map.set_bounds() # update old pickled maps
|
||||||
# map_point = x[-1, :2]
|
# map_point = x[-1, :2]
|
||||||
# map_point = x[:2]
|
map_point = x[:2]
|
||||||
map_point = x[:2].clip(0) # prevent crash for out of map point.
|
# map_point = x[:2].clip(0) # prevent crash for out of map point.
|
||||||
|
|
||||||
patch_size = hyperparams['map_encoder'][node.type]['patch_size']
|
patch_size = hyperparams['map_encoder'][node.type]['patch_size']
|
||||||
|
|
||||||
|
|
@ -104,11 +111,16 @@ def get_maps_for_input(input_dict, scene, hyperparams, device):
|
||||||
|
|
||||||
# print(scene_maps, patch_sizes, heading_angles)
|
# print(scene_maps, patch_sizes, heading_angles)
|
||||||
# print(scene_pts)
|
# print(scene_pts)
|
||||||
|
try:
|
||||||
maps = scene_maps[0].get_cropped_maps_from_scene_map_batch(scene_maps,
|
maps = scene_maps[0].get_cropped_maps_from_scene_map_batch(scene_maps,
|
||||||
scene_pts=torch.Tensor(scene_pts),
|
scene_pts=torch.Tensor(scene_pts),
|
||||||
patch_size=patch_sizes[0],
|
patch_size=patch_sizes[0],
|
||||||
rotation=heading_angles,
|
rotation=heading_angles,
|
||||||
device='cpu')
|
device='cpu')
|
||||||
|
except Exception as e:
|
||||||
|
# print(scene_maps)
|
||||||
|
logger.warning(f"Crash on getting maps for points: {scene_pts=} {heading_angles=} {patch_size=}")
|
||||||
|
raise e
|
||||||
|
|
||||||
maps_dict = {node: maps[[i]].to(device) for i, node in enumerate(nodes_with_maps)}
|
maps_dict = {node: maps[[i]].to(device) for i, node in enumerate(nodes_with_maps)}
|
||||||
return maps_dict
|
return maps_dict
|
||||||
|
|
@ -154,6 +166,15 @@ class PredictionServer(Node):
|
||||||
self.prediction_socket = self.pub(self.config.zmq_prediction_addr)
|
self.prediction_socket = self.pub(self.config.zmq_prediction_addr)
|
||||||
self.external_predictions = not self.config.zmq_prediction_addr.startswith("ipc://")
|
self.external_predictions = not self.config.zmq_prediction_addr.startswith("ipc://")
|
||||||
|
|
||||||
|
self.cutoff_shape = None
|
||||||
|
if self.config.cutoff_map:
|
||||||
|
|
||||||
|
self.cutoff_line = load_lines_from_svg(self.config.cutoff_map, 100, '')[0]
|
||||||
|
self.cutoff_shape = shapely.Polygon([p.position for p in self.cutoff_line.points])
|
||||||
|
|
||||||
|
logger.info(f"{self.cutoff_shape}")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def send_frame(self, frame: Frame):
|
def send_frame(self, frame: Frame):
|
||||||
if self.external_predictions:
|
if self.external_predictions:
|
||||||
|
|
@ -176,7 +197,8 @@ class PredictionServer(Node):
|
||||||
# model_dir = 'models/models_04_Oct_2023_21_04_48_eth_vel_ar3'
|
# model_dir = 'models/models_04_Oct_2023_21_04_48_eth_vel_ar3'
|
||||||
|
|
||||||
# Load hyperparameters from json
|
# Load hyperparameters from json
|
||||||
config_file = os.path.join(self.config.model_dir, self.config.conf)
|
# config_file = os.path.join(self.config.model_dir, self.config.conf)
|
||||||
|
config_file = self.config.conf
|
||||||
if not os.path.exists(config_file):
|
if not os.path.exists(config_file):
|
||||||
raise ValueError('Config json not found!')
|
raise ValueError('Config json not found!')
|
||||||
with open(config_file, 'r') as conf_json:
|
with open(config_file, 'r') as conf_json:
|
||||||
|
|
@ -216,6 +238,9 @@ class PredictionServer(Node):
|
||||||
logger.info(f"Basing online env on {eval_scene=} -- loaded from {self.config.eval_data_dict}")
|
logger.info(f"Basing online env on {eval_scene=} -- loaded from {self.config.eval_data_dict}")
|
||||||
online_env = create_online_env(eval_env, hyperparams, scene_idx, init_timestep)
|
online_env = create_online_env(eval_env, hyperparams, scene_idx, init_timestep)
|
||||||
|
|
||||||
|
print("overriding attention radius")
|
||||||
|
online_env.attention_radius = {(online_env.NodeType.PEDESTRIAN, online_env.NodeType.PEDESTRIAN): 0.1}
|
||||||
|
|
||||||
# auto-find highest iteration
|
# auto-find highest iteration
|
||||||
model_registrar = ModelRegistrar(self.config.model_dir, self.config.eval_device)
|
model_registrar = ModelRegistrar(self.config.model_dir, self.config.eval_device)
|
||||||
model_iterations = pathlib.Path(self.config.model_dir).glob('model_registrar-*.pt')
|
model_iterations = pathlib.Path(self.config.model_dir).glob('model_registrar-*.pt')
|
||||||
|
|
@ -289,6 +314,7 @@ class PredictionServer(Node):
|
||||||
|
|
||||||
input_dict = {}
|
input_dict = {}
|
||||||
for identifier, track in frame.tracks.items():
|
for identifier, track in frame.tracks.items():
|
||||||
|
|
||||||
# if len(trajectory['history']) < 7:
|
# if len(trajectory['history']) < 7:
|
||||||
# # TODO: these trajectories should still be in the output, but without predictions
|
# # TODO: these trajectories should still be in the output, but without predictions
|
||||||
# continue
|
# continue
|
||||||
|
|
@ -305,7 +331,16 @@ class PredictionServer(Node):
|
||||||
if len(track.history) < 2:
|
if len(track.history) < 2:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
node = track.to_trajectron_node(frame.camera, online_env)
|
node = track.to_trajectron_node(frame.camera, online_env)
|
||||||
|
|
||||||
|
if self.cutoff_shape:
|
||||||
|
position = shapely.Point(node.data.data[-1][:2])
|
||||||
|
if not shapely.contains(self.cutoff_shape, position):
|
||||||
|
# logger.debug(f"Skip position {position}")
|
||||||
|
continue
|
||||||
|
|
||||||
# print(node.data.data[-1])
|
# print(node.data.data[-1])
|
||||||
input_dict[node] = np.array(object=node.data.data[-1])
|
input_dict[node] = np.array(object=node.data.data[-1])
|
||||||
# print("history", node.data.data[-10:])
|
# print("history", node.data.data[-10:])
|
||||||
|
|
@ -344,6 +379,7 @@ class PredictionServer(Node):
|
||||||
# )
|
# )
|
||||||
|
|
||||||
# input_dict[node] = np.array(object=[x[-1],y[-1],vx[-1],vy[-1],ax[-1],ay[-1]])
|
# input_dict[node] = np.array(object=[x[-1],y[-1],vx[-1],vy[-1],ax[-1],ay[-1]])
|
||||||
|
# break # only on
|
||||||
|
|
||||||
# print(input_dict)
|
# print(input_dict)
|
||||||
|
|
||||||
|
|
@ -360,9 +396,11 @@ class PredictionServer(Node):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
maps = None
|
maps = None
|
||||||
|
start_maps = time.time()
|
||||||
if hyperparams['use_map_encoding']:
|
if hyperparams['use_map_encoding']:
|
||||||
maps = get_maps_for_input(input_dict, eval_scene, hyperparams, device=self.config.eval_device)
|
maps = get_maps_for_input(input_dict, eval_scene, hyperparams, device=self.config.eval_device)
|
||||||
|
|
||||||
|
|
||||||
# print(maps)
|
# print(maps)
|
||||||
|
|
||||||
# robot_present_and_future = None
|
# robot_present_and_future = None
|
||||||
|
|
@ -390,7 +428,8 @@ class PredictionServer(Node):
|
||||||
gmm_mode=self.config.gmm_mode, # "If True: The mode of the Gaussian Mixture Model (GMM) is sampled (see trajectron.model.mgcvae.py)"
|
gmm_mode=self.config.gmm_mode, # "If True: The mode of the Gaussian Mixture Model (GMM) is sampled (see trajectron.model.mgcvae.py)"
|
||||||
z_mode=self.config.z_mode # "Predictions from the model’s most-likely high-level latent behavior mode" (see trajecton.models.components.discrete_latent:sample_p(most_likely_z=z_mode))
|
z_mode=self.config.z_mode # "Predictions from the model’s most-likely high-level latent behavior mode" (see trajecton.models.components.discrete_latent:sample_p(most_likely_z=z_mode))
|
||||||
)
|
)
|
||||||
|
print(len(dists), len (preds))
|
||||||
|
intermediate = time.time()
|
||||||
# unsure what this bit from online_prediction.py does:
|
# unsure what this bit from online_prediction.py does:
|
||||||
# detailed_preds_dict = dict()
|
# detailed_preds_dict = dict()
|
||||||
# for node in eval_scene.nodes:
|
# for node in eval_scene.nodes:
|
||||||
|
|
@ -410,8 +449,8 @@ class PredictionServer(Node):
|
||||||
|
|
||||||
|
|
||||||
end = time.time()
|
end = time.time()
|
||||||
logger.debug("took %.2f s (= %.2f Hz) w/ %d nodes and %d edges -- init: %.2f s" % (end - start,
|
logger.debug("took %.2f s (= %.2f Hz), maps: %.2f, forward: %.2f w/ %d nodes and %d edges -- init: %.2f s" % (end - start,
|
||||||
1. / (end - start), len(trajectron.nodes),
|
1. / (end - start), (start-start_maps)/(end - start), (intermediate-start)/(end - start), len(trajectron.nodes),
|
||||||
trajectron.scene_graph.get_num_edges(), start-t_init))
|
trajectron.scene_graph.get_num_edges(), start-t_init))
|
||||||
|
|
||||||
# if self.config.center_data:
|
# if self.config.center_data:
|
||||||
|
|
@ -433,7 +472,7 @@ class PredictionServer(Node):
|
||||||
futures_dict = futures_dict[ts_key]
|
futures_dict = futures_dict[ts_key]
|
||||||
|
|
||||||
response = {}
|
response = {}
|
||||||
logger.debug(f"{histories_dict=}")
|
# logger.debug(f"{histories_dict=}")
|
||||||
for node in histories_dict:
|
for node in histories_dict:
|
||||||
history = histories_dict[node]
|
history = histories_dict[node]
|
||||||
# future = futures_dict[node] # ground truth dict
|
# future = futures_dict[node] # ground truth dict
|
||||||
|
|
@ -441,7 +480,9 @@ class PredictionServer(Node):
|
||||||
# print('preds', len(predictions[0][0]))
|
# print('preds', len(predictions[0][0]))
|
||||||
|
|
||||||
if not len(history) or np.isnan(history[-1]).any():
|
if not len(history) or np.isnan(history[-1]).any():
|
||||||
logger.warning('skip for no history')
|
logger.warning(f'skip for no history for {node} @ {ts_key} [{len(prediction_dict)=}, {len(histories_dict)=}, {len(futures_dict)=}]')
|
||||||
|
# logger.info(f"{preds=}")
|
||||||
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# response[node.id] = {
|
# response[node.id] = {
|
||||||
|
|
@ -499,9 +540,9 @@ class PredictionServer(Node):
|
||||||
# default='../Trajectron-plus-plus/experiments/pedestrians/models/models_04_Oct_2023_21_04_48_eth_vel_ar3')
|
# default='../Trajectron-plus-plus/experiments/pedestrians/models/models_04_Oct_2023_21_04_48_eth_vel_ar3')
|
||||||
|
|
||||||
inference_parser.add_argument("--conf",
|
inference_parser.add_argument("--conf",
|
||||||
help="path to json config file for hyperparameters, relative to model_dir",
|
help="path to json config file for hyperparameters",
|
||||||
type=str,
|
type=pathlib.Path,
|
||||||
default='config.json')
|
default='EXPERIMENTS/config.json')
|
||||||
|
|
||||||
# Model Parameters (hyperparameters)
|
# Model Parameters (hyperparameters)
|
||||||
inference_parser.add_argument("--offline_scene_graph",
|
inference_parser.add_argument("--offline_scene_graph",
|
||||||
|
|
@ -556,12 +597,12 @@ class PredictionServer(Node):
|
||||||
inference_parser.add_argument('--batch_size',
|
inference_parser.add_argument('--batch_size',
|
||||||
help='training batch size',
|
help='training batch size',
|
||||||
type=int,
|
type=int,
|
||||||
default=256)
|
default=512)
|
||||||
|
|
||||||
inference_parser.add_argument('--k_eval',
|
inference_parser.add_argument('--k_eval',
|
||||||
help='how many samples to take during evaluation',
|
help='how many samples to take during evaluation',
|
||||||
type=int,
|
type=int,
|
||||||
default=25)
|
default=1)
|
||||||
|
|
||||||
# Data Parameters
|
# Data Parameters
|
||||||
inference_parser.add_argument("--eval_data_dict",
|
inference_parser.add_argument("--eval_data_dict",
|
||||||
|
|
@ -583,7 +624,7 @@ class PredictionServer(Node):
|
||||||
inference_parser.add_argument("--eval_device",
|
inference_parser.add_argument("--eval_device",
|
||||||
help="what device to use during inference",
|
help="what device to use during inference",
|
||||||
type=str,
|
type=str,
|
||||||
default="cpu")
|
default="cuda:0")
|
||||||
|
|
||||||
|
|
||||||
inference_parser.add_argument('--seed',
|
inference_parser.add_argument('--seed',
|
||||||
|
|
@ -624,6 +665,11 @@ class PredictionServer(Node):
|
||||||
help="Center data around cx and cy. Should also be used when processing data",
|
help="Center data around cx and cy. Should also be used when processing data",
|
||||||
action='store_true')
|
action='store_true')
|
||||||
|
|
||||||
|
inference_parser.add_argument('--cutoff-map',
|
||||||
|
help='specify a map (svg-file) that specifies projection boundaries. In here, degrade chance to be selectede',
|
||||||
|
type=str,
|
||||||
|
default="../DATASETS/hof-lidar/map_hof.svg")
|
||||||
|
|
||||||
|
|
||||||
return inference_parser
|
return inference_parser
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -8,16 +8,18 @@ import time
|
||||||
from xml.dom.pulldom import default_bufsize
|
from xml.dom.pulldom import default_bufsize
|
||||||
from attr import dataclass
|
from attr import dataclass
|
||||||
import cv2
|
import cv2
|
||||||
|
import noise
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import dill
|
import dill
|
||||||
import tqdm
|
import tqdm
|
||||||
import argparse
|
import argparse
|
||||||
from typing import List, Optional
|
from typing import Dict, List, Optional
|
||||||
|
|
||||||
|
from trap.base import Track
|
||||||
from trap.config import CameraAction, HomographyAction
|
from trap.config import CameraAction, HomographyAction
|
||||||
from trap.frame_emitter import Camera
|
from trap.frame_emitter import Camera
|
||||||
from trap.tracker import FinalDisplacementFilter, Smoother, TrackReader
|
from trap.tracker import FinalDisplacementFilter, Noiser, RandomOffset, Smoother, TrackReader
|
||||||
|
|
||||||
#sys.path.append("../../")
|
#sys.path.append("../../")
|
||||||
from trajectron.environment import Environment, Scene, Node
|
from trajectron.environment import Environment, Scene, Node
|
||||||
|
|
@ -72,22 +74,29 @@ class TrackIteration:
|
||||||
smooth: bool
|
smooth: bool
|
||||||
step_size: int
|
step_size: int
|
||||||
step_offset: int
|
step_offset: int
|
||||||
|
noisy: bool = False
|
||||||
|
offset: bool = False
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def iteration_variations(cls, smooth = True, toggle_smooth=True, sample_step_size=1):
|
def iteration_variations(cls, smooth = True, toggle_smooth=True, sample_step_size=1, noisy_variations=0, offset_variations=0):
|
||||||
iterations: List[TrackIteration] = []
|
iterations: List[TrackIteration] = []
|
||||||
for i in range(sample_step_size):
|
for i in range(sample_step_size):
|
||||||
iterations.append(TrackIteration(smooth, sample_step_size, i))
|
for n in range(noisy_variations+1):
|
||||||
if toggle_smooth:
|
for f in range(offset_variations+1):
|
||||||
iterations.append(TrackIteration(not smooth, sample_step_size, i))
|
iterations.append(TrackIteration(smooth, sample_step_size, i, noisy=bool(n), offset=bool(f)))
|
||||||
|
if smooth and toggle_smooth:
|
||||||
|
iterations.append(TrackIteration(not smooth, sample_step_size, i, noisy=bool(n), offset=bool(f)))
|
||||||
return iterations
|
return iterations
|
||||||
|
|
||||||
# maybe_makedirs('trajectron-data')
|
# maybe_makedirs('trajectron-data')
|
||||||
# for desired_source in [ 'hof2', ]:# ,'hof-maskrcnn', 'hof-yolov8', 'VIRAT-0102-parsed', 'virat-resnet-keypoints-full']:
|
# for desired_source in [ 'hof2', ]:# ,'hof-maskrcnn', 'hof-yolov8', 'VIRAT-0102-parsed', 'virat-resnet-keypoints-full']:
|
||||||
|
|
||||||
def process_data(src_dir: Path, dst_dir: Path, name: str, smooth_tracks: bool, cm_to_m: bool, center_data: bool, bin_positions: bool, camera: Camera, step_size: int, filter_displacement:float, map_img_path: Optional[Path]):
|
def process_data(src_dir: Path, dst_dir: Path, name: str, smooth_tracks: bool, noise_tracks: int, offset_tracks: int, center_data: bool, bin_positions: bool, camera: Camera, step_size: int, filter_displacement:float, map_img_path: Optional[Path]):
|
||||||
name += f"-nostep" if step_size == 1 else f"-step{step_size}"
|
name += f"-nostep" if step_size == 1 else f"-step{step_size}"
|
||||||
name += f"-conv{smooth_window}" if smooth_tracks else f"-nosmooth"
|
# name += f"-conv{smooth_window}" if smooth_tracks else f"-nosmooth"
|
||||||
|
name += f"-kalsmooth" if smooth_tracks else f"-nosmooth"
|
||||||
|
name += f"-noise{noise_tracks}" if noise_tracks else f""
|
||||||
|
name += f"-offsets{offset_tracks}" if offset_tracks else f""
|
||||||
name += f"-f{filter_displacement}" if filter_displacement > 0 else ""
|
name += f"-f{filter_displacement}" if filter_displacement > 0 else ""
|
||||||
name += "-map" if map_img_path else "-nomap"
|
name += "-map" if map_img_path else "-nomap"
|
||||||
name += f"-{datetime.date.today()}"
|
name += f"-{datetime.date.today()}"
|
||||||
|
|
@ -98,9 +107,15 @@ def process_data(src_dir: Path, dst_dir: Path, name: str, smooth_tracks: bool, c
|
||||||
if not map_img_path.exists():
|
if not map_img_path.exists():
|
||||||
raise RuntimeError(f"Map image does not exists {map_img_path}")
|
raise RuntimeError(f"Map image does not exists {map_img_path}")
|
||||||
|
|
||||||
|
print(f"Using map {map_img_path}")
|
||||||
|
|
||||||
type_map = {}
|
type_map = {}
|
||||||
# TODO)) For now, assume the map is a 100x scale of the world coordinates (i.e. 100px per meter)
|
# TODO)) For now, assume the map is a 100x scale of the world coordinates (i.e. 100px per meter)
|
||||||
# thus when we do a homography of 5px per meter, scale down by 20
|
# thus when we do a homography of 5px per meter, scale down by 20
|
||||||
|
map_H_path = map_img_path.with_suffix('.json')
|
||||||
|
if map_H_path.exists():
|
||||||
|
homography_matrix = np.loadtxt(map_H_path)
|
||||||
|
else:
|
||||||
homography_matrix = np.array([
|
homography_matrix = np.array([
|
||||||
[5, 0,0],
|
[5, 0,0],
|
||||||
[0, 5,0],
|
[0, 5,0],
|
||||||
|
|
@ -123,21 +138,37 @@ def process_data(src_dir: Path, dst_dir: Path, name: str, smooth_tracks: bool, c
|
||||||
skipped_for_error = 0
|
skipped_for_error = 0
|
||||||
created = 0
|
created = 0
|
||||||
|
|
||||||
smoother = Smoother(window_len=smooth_window, convolution=True) if smooth_tracks else None
|
# smoother = Smoother(window_len=smooth_window, convolution=True) if smooth_tracks else None
|
||||||
|
smoother = Smoother(convolution=False) if smooth_tracks else None
|
||||||
|
noiser = Noiser(amplitude=.1) if noise_tracks else None
|
||||||
|
|
||||||
reader = TrackReader(src_dir, camera.fps)
|
reader = TrackReader(src_dir, camera.fps)
|
||||||
tracks = [t for t in reader]
|
tracks = [t for t in reader]
|
||||||
|
print(f"Unfiltered total: {len(tracks)} tracks")
|
||||||
if filter_displacement > 0:
|
if filter_displacement > 0:
|
||||||
filter = FinalDisplacementFilter(filter_displacement)
|
filter = FinalDisplacementFilter(filter_displacement)
|
||||||
tracks = filter.apply(tracks, camera)
|
tracks = filter.apply(tracks, camera)
|
||||||
|
print(f"Filtered: {len(tracks)} tracks")
|
||||||
|
|
||||||
|
skip_idxs = []
|
||||||
|
for idx, track in enumerate(tracks):
|
||||||
|
track_history = track.get_projected_history(camera=camera)
|
||||||
|
distances = np.sqrt(np.sum(np.diff(track_history, axis=0)**2, axis=1))
|
||||||
|
# print(trajectory_org)
|
||||||
|
# print(distances)
|
||||||
|
if any(distances > 3):
|
||||||
|
skip_idxs.append(idx)
|
||||||
|
for idx in skip_idxs:
|
||||||
|
tracks.pop(idx)
|
||||||
|
print(f"Filtered {len(skip_idxs)} tracks which contained leaps")
|
||||||
|
|
||||||
total = len(tracks)
|
total = len(tracks)
|
||||||
bar = tqdm.tqdm(total=total)
|
bar = tqdm.tqdm(total=total)
|
||||||
|
|
||||||
destinations = {
|
destinations = {
|
||||||
'train': int(total * .8),
|
'train': int(total * .91),
|
||||||
'val': int(total * .12),
|
'val': int(total * .08),
|
||||||
'test': int(total * .08),
|
'test': int(total * .01), # I don't realyl care about this
|
||||||
}
|
}
|
||||||
|
|
||||||
max_track = reader.get(str(max([int(k) for k in reader._tracks.keys()])))
|
max_track = reader.get(str(max([int(k) for k in reader._tracks.keys()])))
|
||||||
|
|
@ -153,7 +184,7 @@ def process_data(src_dir: Path, dst_dir: Path, name: str, smooth_tracks: bool, c
|
||||||
dt3 = RollingAverage()
|
dt3 = RollingAverage()
|
||||||
dt4 = RollingAverage()
|
dt4 = RollingAverage()
|
||||||
|
|
||||||
sets = {}
|
sets: Dict[str, List[Track]] = {}
|
||||||
offset = 0
|
offset = 0
|
||||||
for data_class, nr in destinations.items():
|
for data_class, nr in destinations.items():
|
||||||
# TODO)) think of a way to shuffle while keeping scenes
|
# TODO)) think of a way to shuffle while keeping scenes
|
||||||
|
|
@ -163,7 +194,8 @@ def process_data(src_dir: Path, dst_dir: Path, name: str, smooth_tracks: bool, c
|
||||||
|
|
||||||
print(f"Camera FPS: {camera.fps}, actual fps: {camera.fps/step_size} (or {(1/camera.fps)*step_size})")
|
print(f"Camera FPS: {camera.fps}, actual fps: {camera.fps/step_size} (or {(1/camera.fps)*step_size})")
|
||||||
|
|
||||||
names = {}
|
names: Dict[str, Path] = {}
|
||||||
|
max_pos = 0
|
||||||
|
|
||||||
for data_class, nr_of_items in destinations.items():
|
for data_class, nr_of_items in destinations.items():
|
||||||
env = Environment(node_type_list=['PEDESTRIAN'], standardization=standardization)
|
env = Environment(node_type_list=['PEDESTRIAN'], standardization=standardization)
|
||||||
|
|
@ -182,7 +214,9 @@ def process_data(src_dir: Path, dst_dir: Path, name: str, smooth_tracks: bool, c
|
||||||
# scene = None
|
# scene = None
|
||||||
|
|
||||||
scene_nodes = defaultdict(lambda: [])
|
scene_nodes = defaultdict(lambda: [])
|
||||||
variations = TrackIteration.iteration_variations(smooth_tracks, False, step_size)
|
variations = TrackIteration.iteration_variations(smooth_tracks, True, step_size, noise_tracks, offset_tracks)
|
||||||
|
|
||||||
|
print(f"Create {len(variations)} variations")
|
||||||
|
|
||||||
for i, track in enumerate(sets[data_class]):
|
for i, track in enumerate(sets[data_class]):
|
||||||
bar.update()
|
bar.update()
|
||||||
|
|
@ -210,13 +244,20 @@ def process_data(src_dir: Path, dst_dir: Path, name: str, smooth_tracks: bool, c
|
||||||
interpolated_track = track.get_with_interpolated_history()
|
interpolated_track = track.get_with_interpolated_history()
|
||||||
b = time.time()
|
b = time.time()
|
||||||
|
|
||||||
for variation_nr, iteration_settings in enumerate(variations):
|
|
||||||
|
|
||||||
|
|
||||||
|
for variation_nr, iteration_settings in enumerate(variations):
|
||||||
|
track = interpolated_track
|
||||||
|
|
||||||
|
if iteration_settings.noisy:
|
||||||
|
track = noiser.apply_track(track)
|
||||||
|
if iteration_settings.offset:
|
||||||
|
offset = RandomOffset(amplitude=.1)
|
||||||
|
track = offset.apply_track(track)
|
||||||
if iteration_settings.smooth:
|
if iteration_settings.smooth:
|
||||||
track = smoother.smooth_track(interpolated_track)
|
track = smoother.smooth_track(track)
|
||||||
# track = Smoother(smooth_window, False).smooth_track(track)
|
# track = Smoother(smooth_window, False).smooth_track(track)
|
||||||
else:
|
|
||||||
track = interpolated_track # TODO)) Copy & move smooth outside iter loop
|
|
||||||
c = time.time()
|
c = time.time()
|
||||||
|
|
||||||
if iteration_settings.step_size > 1:
|
if iteration_settings.step_size > 1:
|
||||||
|
|
@ -227,6 +268,7 @@ def process_data(src_dir: Path, dst_dir: Path, name: str, smooth_tracks: bool, c
|
||||||
|
|
||||||
# track.get_projected_history(H=None, camera=self.config.camera)
|
# track.get_projected_history(H=None, camera=self.config.camera)
|
||||||
node = track.to_trajectron_node(camera, env)
|
node = track.to_trajectron_node(camera, env)
|
||||||
|
max_pos = max(node.data.data[0][0], max_pos)
|
||||||
|
|
||||||
data_class = time.time()
|
data_class = time.time()
|
||||||
|
|
||||||
|
|
@ -288,7 +330,8 @@ def process_data(src_dir: Path, dst_dir: Path, name: str, smooth_tracks: bool, c
|
||||||
|
|
||||||
# print(scene.nodes[0].first_timestep)
|
# print(scene.nodes[0].first_timestep)
|
||||||
|
|
||||||
print(f'Processed {len(scenes):.2f} scene for data class {data_class}')
|
print(f'Processed {len(scenes)} scene with {sum([len(s.nodes) for s in scenes])} nodes for data class {data_class}')
|
||||||
|
# print("MAXIMUM!!", max_pos)
|
||||||
|
|
||||||
env.scenes = scenes
|
env.scenes = scenes
|
||||||
|
|
||||||
|
|
@ -298,9 +341,29 @@ def process_data(src_dir: Path, dst_dir: Path, name: str, smooth_tracks: bool, c
|
||||||
with open(data_dict_path, 'wb') as f:
|
with open(data_dict_path, 'wb') as f:
|
||||||
dill.dump(env, f, protocol=dill.HIGHEST_PROTOCOL)
|
dill.dump(env, f, protocol=dill.HIGHEST_PROTOCOL)
|
||||||
|
|
||||||
|
bar.close()
|
||||||
|
|
||||||
# print(f"Linear: {l}")
|
# print(f"Linear: {l}")
|
||||||
# print(f"Non-Linear: {nl}")
|
# print(f"Non-Linear: {nl}")
|
||||||
print(f"error: {skipped_for_error}, used: {created}")
|
print(f"error: {skipped_for_error}, used: {created}")
|
||||||
|
print("Run with")
|
||||||
|
target_model_dir = (dst_dir / "../models/").resolve()
|
||||||
|
target_config = (dst_dir / "../trajectron.json").resolve()
|
||||||
|
# set eval_every very high, because we're not interested in theoretical evaluations, and we don't mind overfitting
|
||||||
|
print(f"""
|
||||||
|
uv run trajectron_train --eval_every 200 \\
|
||||||
|
--train_data_dict {names['train'].name} \\
|
||||||
|
--eval_data_dict {names['val'].name} \\
|
||||||
|
--offline_scene_graph no --preprocess_workers 8 \\
|
||||||
|
--log_dir {target_model_dir} \\
|
||||||
|
--log_tag _{name} \\
|
||||||
|
--train_epochs 100 \\
|
||||||
|
--conf {target_config} \\
|
||||||
|
--data_dir {dst_dir} \\
|
||||||
|
{"--map_encoding" if map_img_path else ""} \\
|
||||||
|
--no_edge_encoding
|
||||||
|
""")
|
||||||
|
|
||||||
return names
|
return names
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
@ -309,6 +372,8 @@ def main():
|
||||||
parser.add_argument("--dst-dir", "-d", type=Path, required=True, help="Destination directory to store parsed .pkl files (typically 'trajectron-data')")
|
parser.add_argument("--dst-dir", "-d", type=Path, required=True, help="Destination directory to store parsed .pkl files (typically 'trajectron-data')")
|
||||||
parser.add_argument("--name", "-n", type=str, required=True, help="Identifier to prefix the output .pkl files with (result is NAME-train.pkl, NAME-test.pkl)")
|
parser.add_argument("--name", "-n", type=str, required=True, help="Identifier to prefix the output .pkl files with (result is NAME-train.pkl, NAME-test.pkl)")
|
||||||
parser.add_argument("--smooth-tracks", action='store_true', help=f"Enable smoother. Set to {smooth_window} frames")
|
parser.add_argument("--smooth-tracks", action='store_true', help=f"Enable smoother. Set to {smooth_window} frames")
|
||||||
|
parser.add_argument("--noise-tracks", type=int, default=0, help=f"Enable Noiser. provide number for how many noisy variations")
|
||||||
|
parser.add_argument("--offset-tracks", type=int, default=0, help=f"Enable Offset. provide number for how many random offset variations")
|
||||||
parser.add_argument("--cm-to-m", action='store_true', help=f"If homography is in cm, convert tracked points to meter for beter results")
|
parser.add_argument("--cm-to-m", action='store_true', help=f"If homography is in cm, convert tracked points to meter for beter results")
|
||||||
parser.add_argument("--center-data", action='store_true', help=f"Normalise around center")
|
parser.add_argument("--center-data", action='store_true', help=f"Normalise around center")
|
||||||
parser.add_argument("--bin-positions", action='store_true', help=f"Experiment to put round positions to a grid")
|
parser.add_argument("--bin-positions", action='store_true', help=f"Experiment to put round positions to a grid")
|
||||||
|
|
@ -346,7 +411,8 @@ def main():
|
||||||
args.dst_dir,
|
args.dst_dir,
|
||||||
args.name,
|
args.name,
|
||||||
args.smooth_tracks,
|
args.smooth_tracks,
|
||||||
args.cm_to_m,
|
args.noise_tracks,
|
||||||
|
args.offset_tracks,
|
||||||
args.center_data,
|
args.center_data,
|
||||||
args.bin_positions,
|
args.bin_positions,
|
||||||
args.camera,
|
args.camera,
|
||||||
|
|
|
||||||
50
trap/renderable.proto
Normal file
50
trap/renderable.proto
Normal file
|
|
@ -0,0 +1,50 @@
|
||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package renderable;
|
||||||
|
|
||||||
|
// Enum for coordinate spaces
|
||||||
|
enum CoordinateSpace {
|
||||||
|
UNDEFINED=0;
|
||||||
|
CAMERA = 1;
|
||||||
|
UNDISTORTED_CAMERA = 2;
|
||||||
|
WORLD = 3;
|
||||||
|
LASER = 4;
|
||||||
|
RAW_LASER = 8;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Message for RenderablePosition (Tuple[float, float])
|
||||||
|
message RenderablePosition {
|
||||||
|
float x = 1;
|
||||||
|
float y = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Message for SrgbaColor
|
||||||
|
message SrgbaColor {
|
||||||
|
float red = 1;
|
||||||
|
float green = 2;
|
||||||
|
float blue = 3;
|
||||||
|
float alpha = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Message for RenderablePoint
|
||||||
|
message RenderablePoint {
|
||||||
|
RenderablePosition position = 1;
|
||||||
|
SrgbaColor color = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Message for RenderableLine
|
||||||
|
message RenderableLine {
|
||||||
|
repeated RenderablePoint points = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Message for RenderableLines
|
||||||
|
message RenderableLines {
|
||||||
|
repeated RenderableLine lines = 1;
|
||||||
|
CoordinateSpace space = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Message to represent RenderableLayers (Dict[int, RenderableLines])
|
||||||
|
message RenderableLayers {
|
||||||
|
map<int32, RenderableLines> layers = 1;
|
||||||
|
}
|
||||||
|
|
||||||
175
trap/settings.py
Normal file
175
trap/settings.py
Normal file
|
|
@ -0,0 +1,175 @@
|
||||||
|
from argparse import ArgumentParser
|
||||||
|
import json
|
||||||
|
import math
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Dict
|
||||||
|
|
||||||
|
import zmq
|
||||||
|
from trap.node import Node
|
||||||
|
|
||||||
|
import dearpygui.dearpygui as dpg
|
||||||
|
|
||||||
|
class Settings(Node):
|
||||||
|
"""
|
||||||
|
Quickndirty gui to change some settings ad-hoc
|
||||||
|
no storage of values, no defaults. No detection of lost nodes, or sending config on them starting
|
||||||
|
|
||||||
|
"""
|
||||||
|
def setup(self):
|
||||||
|
|
||||||
|
self.config_sock.close() # setup by default for all nodes, but we want to publish
|
||||||
|
self.config_sock = self.pub(self.config.zmq_config_addr)
|
||||||
|
|
||||||
|
self.config_init_sock.close() # setup by default for all nodes, but we want to publish
|
||||||
|
self.config_init_sock = self.pull(self.config.zmq_config_init_addr)
|
||||||
|
|
||||||
|
self.settings_fields = {}
|
||||||
|
self.settings: Dict[str, Any] = {}
|
||||||
|
|
||||||
|
|
||||||
|
self.load()
|
||||||
|
|
||||||
|
dpg.create_context()
|
||||||
|
dpg.create_viewport(title='Trap settings', width=600, height=1200)
|
||||||
|
dpg.setup_dearpygui()
|
||||||
|
|
||||||
|
|
||||||
|
with dpg.window(label="General", pos=(0, 0)):
|
||||||
|
dpg.add_text(f"Settings from {self.config.settings_file}")
|
||||||
|
dpg.add_button(label="Save", callback=self.save)
|
||||||
|
|
||||||
|
with dpg.window(label="Renderer", pos=(0, 600)):
|
||||||
|
for i in range(8) :
|
||||||
|
self.register_setting(f'stagerenderer.layer.{i}', dpg.add_checkbox(label=f"layer {i}", default_value=self.get_setting(f'stagerenderer.layer.{i}', True), callback=self.on_change))
|
||||||
|
self.register_setting(f'stagerenderer.scale', dpg.add_slider_float(label="scale", default_value=self.get_setting(f'stagerenderer.scale', 1), max_value=3, callback=self.on_change))
|
||||||
|
self.register_setting(f'stagerenderer.dx', dpg.add_slider_int(label="dx", default_value=self.get_setting(f'stagerenderer.dx', 0), min_value=-300, max_value=300, callback=self.on_change))
|
||||||
|
self.register_setting(f'stagerenderer.dy', dpg.add_slider_int(label="dy", default_value=self.get_setting(f'stagerenderer.dy', 0), min_value=-300, max_value=300, callback=self.on_change))
|
||||||
|
self.register_setting(f'stagerenderer.fade', dpg.add_slider_float(label="fade factor", default_value=self.get_setting(f'stagerenderer.fade', 0.27), max_value=1, callback=self.on_change))
|
||||||
|
|
||||||
|
with dpg.window(label="Stage", pos=(150, 0)):
|
||||||
|
self.register_setting(f'stage.fps', dpg.add_slider_int(label="FPS cap", default_value=self.get_setting(f'stage.fps', 30), callback=self.on_change))
|
||||||
|
self.register_setting(f'stage.prediction_interval', dpg.add_slider_int(label="prediction interval", default_value=self.get_setting('stage.prediction_interval', 18), callback=self.on_change))
|
||||||
|
self.register_setting(f'stage.loitering_animation', dpg.add_checkbox(label="loitering_animation", default_value=self.get_setting('stage.loitering_animation', True), callback=self.on_change))
|
||||||
|
|
||||||
|
with dpg.window(label="Lidar", pos=(0, 100), autosize=True):
|
||||||
|
self.register_setting(f'lidar.crop_map_boundaries', dpg.add_checkbox(label="crop_map_boundaries", default_value=self.get_setting(f'lidar.crop_map_boundaries', True), callback=self.on_change))
|
||||||
|
self.register_setting(f'lidar.viz_cropping', dpg.add_checkbox(label="viz_cropping", default_value=self.get_setting(f'lidar.viz_cropping', True), callback=self.on_change))
|
||||||
|
# self.register_setting(f'lidar.voxel_downsample', dpg.add_checkbox(label="voxel_downsample", default_value=self.get_setting(f'lidar.voxel_downsample', True), callback=self.on_change))
|
||||||
|
self.register_setting(f'lidar.tracking_enabled', dpg.add_checkbox(label="tracking_enabled", default_value=self.get_setting(f'lidar.tracking_enabled', True), callback=self.on_change))
|
||||||
|
self.register_setting(f'lidar.kalman_factor', dpg.add_slider_float(label="kalman_factor", default_value=self.get_setting(f'lidar.kalman_factor', 1.3), max_value=3, callback=self.on_change))
|
||||||
|
|
||||||
|
|
||||||
|
dpg.add_separator(label="Clustering")
|
||||||
|
cluster_methods = ("birch", "optics", "dbscan")
|
||||||
|
self.register_setting('lidar.cluster.method', dpg.add_combo(label="Method", items=cluster_methods, default_value=self.get_setting('lidar.cluster.method', default='dbscan'), callback=self.on_change))
|
||||||
|
self.register_setting(f'lidar.eps', dpg.add_slider_float(label="DBSCAN epsilon", default_value=self.get_setting(f'lidar.eps', 0.3), max_value=1, callback=self.on_change))
|
||||||
|
self.register_setting(f'lidar.min_samples', dpg.add_slider_int(label="DBSCAN min_samples", default_value=self.get_setting(f'lidar.min_samples', 8), max_value=30, callback=self.on_change))
|
||||||
|
dpg.add_text("When using BIRCH, the resulting subclusters can be postprocessed by DBSCAN:")
|
||||||
|
self.register_setting('lidar.birch_process_subclusters', dpg.add_checkbox(label="Process subclusters", default_value=self.get_setting('lidar.birch_process_subclusters', True), callback=self.on_change))
|
||||||
|
self.register_setting('lidar.birch_threshold', dpg.add_slider_float(label="Threshold", default_value=self.get_setting('lidar.birch_threshold', 1), max_value=2.5, callback=self.on_change))
|
||||||
|
self.register_setting('lidar.birch_branching_factor', dpg.add_slider_int(label="Branching factor", default_value=self.get_setting('lidar.birch_branching_factor', 50), max_value=100, callback=self.on_change))
|
||||||
|
|
||||||
|
dpg.add_separator(label="Cluster filter")
|
||||||
|
self.register_setting(f'lidar.min_box_area', dpg.add_slider_float(label="min_box_area", default_value=self.get_setting(f'lidar.min_box_area', .1), min_value=0, max_value=1, callback=self.on_change))
|
||||||
|
self.register_setting(f'lidar.max_box_area', dpg.add_slider_float(label="max_box_area", default_value=self.get_setting(f'lidar.max_box_area', 5), min_value=.5, max_value=10, callback=self.on_change))
|
||||||
|
|
||||||
|
for i, lidar in enumerate(["192.168.1.16", "192.168.0.10"]):
|
||||||
|
name = lidar.replace(".", "_")
|
||||||
|
with dpg.window(label=f"Lidar {lidar}", pos=(i * 300, 450),autosize=True):
|
||||||
|
# dpg.add_text("test")
|
||||||
|
# dpg.add_input_text(label="string", default_value="Quick brown fox")
|
||||||
|
self.register_setting(f'lidar.{name}.enabled', dpg.add_checkbox(label="enabled", default_value=self.get_setting(f'lidar.{name}.enabled', True), callback=self.on_change))
|
||||||
|
self.register_setting(f'lidar.{name}.rot_x', dpg.add_slider_float(label="rot_x", default_value=self.get_setting(f'lidar.{name}.rot_x', 0), max_value=math.pi * 2, callback=self.on_change))
|
||||||
|
self.register_setting(f'lidar.{name}.rot_y', dpg.add_slider_float(label="rot_y", default_value=self.get_setting(f'lidar.{name}.rot_y', 0), max_value=math.pi * 2, callback=self.on_change))
|
||||||
|
self.register_setting(f'lidar.{name}.rot_z', dpg.add_slider_float(label="rot_z", default_value=self.get_setting(f'lidar.{name}.rot_z', 0), max_value=math.pi * 2, callback=self.on_change))
|
||||||
|
self.register_setting(f'lidar.{name}.trans_x', dpg.add_slider_float(label="trans_x", default_value=self.get_setting(f'lidar.{name}.trans_x', 0), min_value=-15, max_value=15, callback=self.on_change))
|
||||||
|
self.register_setting(f'lidar.{name}.trans_y', dpg.add_slider_float(label="trans_y", default_value=self.get_setting(f'lidar.{name}.trans_y', 0), min_value=-15, max_value=15, callback=self.on_change))
|
||||||
|
self.register_setting(f'lidar.{name}.trans_z', dpg.add_slider_float(label="trans_z", default_value=self.get_setting(f'lidar.{name}.trans_z', 0), min_value=-15, max_value=15, callback=self.on_change))
|
||||||
|
|
||||||
|
self.send_for_prefix("") # spread the defaults
|
||||||
|
|
||||||
|
dpg.show_viewport()
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
|
||||||
|
dpg.destroy_context()
|
||||||
|
|
||||||
|
|
||||||
|
def check_config(self):
|
||||||
|
# override node function to disable it
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def refresh_settings(self):
|
||||||
|
# override node function to disable it
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def get_setting(self, name: str, default: Any):
|
||||||
|
"""
|
||||||
|
Automatically configure the value with the default when requesting it
|
||||||
|
"""
|
||||||
|
r = super().get_setting(name, default)
|
||||||
|
self.settings[name] = r
|
||||||
|
return r
|
||||||
|
|
||||||
|
def register_setting(self, name: str, field: int):
|
||||||
|
self.settings_fields[field] = name
|
||||||
|
|
||||||
|
def on_change(self, sender, value, user_data = None):
|
||||||
|
# print(sender, app_data, user_data)
|
||||||
|
setting = self.settings_fields[sender]
|
||||||
|
print(setting, value)
|
||||||
|
self.settings[setting] = value
|
||||||
|
self.config_sock.send_json({setting: value})
|
||||||
|
|
||||||
|
def send_for_prefix(self, prefix: str):
|
||||||
|
self.config_sock.send_json(self.get_by_prefix(prefix))
|
||||||
|
|
||||||
|
def save(self):
|
||||||
|
with self.config.settings_file.open('w') as fp:
|
||||||
|
self.logger.info(f"Save to {self.config.settings_file}")
|
||||||
|
json.dump(self.settings, fp)
|
||||||
|
|
||||||
|
def get_by_prefix(self, prefix: str) -> Dict[str, Any]:
|
||||||
|
return {key: value for key, value in self.settings.items() if key.startswith(prefix)}
|
||||||
|
|
||||||
|
|
||||||
|
def load(self) -> Dict[str, Any]:
|
||||||
|
if not self.config.settings_file.exists():
|
||||||
|
self.logger.info(f"No config at {self.config.settings_file}")
|
||||||
|
return {}
|
||||||
|
|
||||||
|
self.logger.info(f"Loading from {self.config.settings_file}")
|
||||||
|
with self.config.settings_file.open('r') as fp:
|
||||||
|
self.settings = json.load(fp)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
# below replaces, start_dearpygui()
|
||||||
|
while self.run_loop() and dpg.is_dearpygui_running():
|
||||||
|
|
||||||
|
# 1) receive init requests
|
||||||
|
try:
|
||||||
|
init_msg = self.config_init_sock.recv_string(zmq.NOBLOCK)
|
||||||
|
self.logger.info(f"Send init for {init_msg}")
|
||||||
|
print('init', init_msg)
|
||||||
|
self.send_for_prefix(init_msg)
|
||||||
|
except zmq.ZMQError as e:
|
||||||
|
# no msgs
|
||||||
|
pass
|
||||||
|
|
||||||
|
dpg.render_dearpygui_frame()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def arg_parser(cls):
|
||||||
|
argparser = ArgumentParser()
|
||||||
|
argparser.add_argument('--settings-file',
|
||||||
|
help='Where to store settings',
|
||||||
|
type=Path,
|
||||||
|
default=Path("./settings.json"))
|
||||||
|
|
||||||
|
return argparser
|
||||||
|
|
||||||
521
trap/stage.py
521
trap/stage.py
|
|
@ -1,5 +1,6 @@
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from abc import abstractmethod
|
||||||
from argparse import ArgumentParser
|
from argparse import ArgumentParser
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
|
|
@ -8,7 +9,9 @@ from functools import partial
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
from math import inf
|
from math import inf
|
||||||
|
import math
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
import random
|
||||||
import time
|
import time
|
||||||
import threading
|
import threading
|
||||||
from typing import Dict, Generator, List, Optional, Type, TypeVar
|
from typing import Dict, Generator, List, Optional, Type, TypeVar
|
||||||
|
|
@ -19,9 +22,10 @@ import zmq
|
||||||
from trap.anomaly import DiffSegment, calc_anomaly, calculate_loitering_scores
|
from trap.anomaly import DiffSegment, calc_anomaly, calculate_loitering_scores
|
||||||
from trap.base import CameraAction, DataclassJSONEncoder, Frame, HomographyAction, ProjectedTrack, Track
|
from trap.base import CameraAction, DataclassJSONEncoder, Frame, HomographyAction, ProjectedTrack, Track
|
||||||
from trap.counter import CounterSender
|
from trap.counter import CounterSender
|
||||||
from trap.lines import AppendableLine, AppendableLineAnimator, Coordinate, CropLine, DashedLine, DeltaT, FadeOutJitterLine, FadeOutLine, FadedTailLine, LineAnimationStack, LineAnimator, NoiseLine, RenderableLayers, RenderableLine, RenderableLines, SegmentLine, SimplifyMethod, SrgbaColor, StaticLine, layers_to_message, load_lines_from_svg
|
from trap.lines import AppendableLine, AppendableLineAnimator, Coordinate, CoordinateSpace, CropAnimationLine, CropLine, DashedLine, DeltaT, FadeOutJitterLine, FadeOutLine, FadedEndsLine, FadedTailLine, LineAnimationStack, LineAnimator, NoiseLine, RenderableLayers, RenderableLine, RenderableLines, RotatingLine, SegmentLine, SimplifyLine, SimplifyMethod, SrgbaColor, StartFromClosestPoint, StaticLine, layers_to_message, load_lines_from_svg
|
||||||
from trap.node import Node
|
from trap.node import Node
|
||||||
from trap.track_history import TrackHistory
|
from trap.track_history import TrackHistory
|
||||||
|
from trap.utils import lerp
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger('trap.stage')
|
logger = logging.getLogger('trap.stage')
|
||||||
|
|
@ -34,15 +38,15 @@ OPTION_TRACK_NOISE = False
|
||||||
|
|
||||||
TRACK_ASSUMED_FPS = 12
|
TRACK_ASSUMED_FPS = 12
|
||||||
|
|
||||||
TAKEOVER_FADEOUT = 3
|
|
||||||
LOST_FADEOUT = 2 # seconds
|
LOST_FADEOUT = 2 # seconds
|
||||||
PREDICTION_INTERVAL: float|None = 20 # frames
|
PREDICTION_INTERVAL: int|None = int(TRACK_ASSUMED_FPS * 1.2) # frames
|
||||||
PREDICTION_FADE_IN: float = 3
|
PREDICTION_FADE_IN: float = 3
|
||||||
PREDICTION_FADE_SLOPE: float = -10
|
PREDICTION_FADE_SLOPE: float = -10
|
||||||
PREDICTION_FADE_AFTER_DURATION: float = 8 # seconds
|
PREDICTION_FADE_AFTER_DURATION: float = 8 # seconds
|
||||||
PREDICTION_END_FADE = 2 #frames
|
PREDICTION_END_FADE = 2 #frames
|
||||||
# TRACK_MAX_POINTS = 100
|
# TRACK_MAX_POINTS = 100
|
||||||
TRACK_FADE_AFTER_DURATION = 15. # seconds
|
TRACK_FADE_AFTER_DURATION = 9. # seconds
|
||||||
TRACK_END_FADE = 30 # points
|
TRACK_END_FADE = 30 # points
|
||||||
TRACK_FADE_ASSUME_FPS = TRACK_ASSUMED_FPS
|
TRACK_FADE_ASSUME_FPS = TRACK_ASSUMED_FPS
|
||||||
|
|
||||||
|
|
@ -67,25 +71,79 @@ class SceneInfo:
|
||||||
priority: int
|
priority: int
|
||||||
description: str = ""
|
description: str = ""
|
||||||
takeover_possible: bool = False # whether to allow for other scenarios to steal the stage
|
takeover_possible: bool = False # whether to allow for other scenarios to steal the stage
|
||||||
|
takeover_possible_after: float = -1
|
||||||
|
|
||||||
class ScenarioScene(Enum):
|
class ScenarioScene(Enum):
|
||||||
DETECTED = SceneInfo(4, "First detection")
|
DETECTED = SceneInfo(4, "First detection")
|
||||||
SUBSTANTIAL = SceneInfo(6, "Multiple detections")
|
TRACKED = SceneInfo(6, "Multiple detections")
|
||||||
FIRST_PREDICTION = SceneInfo(10, "Prediction is ready")
|
PREDICTION_AVAILABLE = SceneInfo(10, "Prediction is ready")
|
||||||
CORRECTED_PREDICTION = SceneInfo(11, "Multiple predictions")
|
UPDATED_PREDICTION = SceneInfo(11, "Multiple predictions")
|
||||||
LOITERING = SceneInfo(7, "Foundto be loitering", takeover_possible=True)
|
LOITERING = SceneInfo(7, "Foundto be loitering", takeover_possible=True, takeover_possible_after=10) # TODO: create "possible after"
|
||||||
PLAY = SceneInfo(7, description="After many predictions; just fooling around", takeover_possible=True)
|
PLAY = SceneInfo(7, description="After many predictions; just fooling around", takeover_possible=True, takeover_possible_after=10)
|
||||||
LOST = SceneInfo(-1, description="Track lost", takeover_possible=True)
|
LOST = SceneInfo(-1, description="Track lost", takeover_possible=True, takeover_possible_after=0)
|
||||||
|
|
||||||
Time = float
|
Time = float
|
||||||
|
|
||||||
class Scenario:
|
class PrioritySlotItem():
|
||||||
def __init__(self, track_id):
|
TAKEOVER_FADEOUT = 3
|
||||||
|
|
||||||
|
def __init__(self, identifier):
|
||||||
|
self.identifier = identifier
|
||||||
|
self.start_time = 0.
|
||||||
|
self.take_over_at: Optional[Time] = None
|
||||||
|
|
||||||
|
def take_over(self):
|
||||||
|
if self.take_over_at:
|
||||||
|
return
|
||||||
|
|
||||||
|
self.take_over_at = time.perf_counter()
|
||||||
|
|
||||||
|
def taken_over(self):
|
||||||
|
self.is_running = False
|
||||||
|
self.take_over_at = None
|
||||||
|
|
||||||
|
def takenover_for(self):
|
||||||
|
if self.take_over_at:
|
||||||
|
return time.perf_counter() - self.take_over_at
|
||||||
|
return None
|
||||||
|
|
||||||
|
def takeover_factor(self):
|
||||||
|
l = self.takenover_for()
|
||||||
|
if not l:
|
||||||
|
return 0
|
||||||
|
return l/self.TAKEOVER_FADEOUT
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
# change when visible
|
||||||
|
logger.info(f"Start {self.identifier}: {self.get_state_name()}")
|
||||||
|
self.start_time = time.perf_counter()
|
||||||
|
self.is_running = True
|
||||||
|
|
||||||
|
def running_for(self):
|
||||||
|
return time.perf_counter() - self.start_time
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_priority(self) -> int:
|
||||||
|
raise RuntimeError("Not implemented")
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_state_name(self) -> str:
|
||||||
|
raise RuntimeError("Not implemented")
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def can_be_taken_over(self):
|
||||||
|
raise RuntimeError("Not implemented")
|
||||||
|
|
||||||
|
|
||||||
|
class Scenario(PrioritySlotItem):
|
||||||
|
def __init__(self, track_id, stage: Stage):
|
||||||
|
super().__init__(track_id)
|
||||||
|
self.stage = stage
|
||||||
self.track_id = track_id
|
self.track_id = track_id
|
||||||
self.scene: ScenarioScene = ScenarioScene.DETECTED
|
self.scene: ScenarioScene = ScenarioScene.DETECTED
|
||||||
self.start_time = 0.
|
|
||||||
self.current_time = 0
|
self.current_time = 0
|
||||||
self.take_over_at: Optional[Time] = None
|
|
||||||
|
|
||||||
self.track: Optional[ProjectedTrack] = None
|
self.track: Optional[ProjectedTrack] = None
|
||||||
self.prediction_tracks: List[ProjectedTrack] = []
|
self.prediction_tracks: List[ProjectedTrack] = []
|
||||||
|
|
@ -98,12 +156,26 @@ class Scenario:
|
||||||
|
|
||||||
self.is_running = False
|
self.is_running = False
|
||||||
|
|
||||||
|
self.loitering_factor = 0
|
||||||
|
|
||||||
logger.info(f"Found {self.track_id}: {self.scene.name}")
|
logger.info(f"Found {self.track_id}: {self.scene.name}")
|
||||||
|
|
||||||
def start(self):
|
def get_state_name(self):
|
||||||
# change when visible
|
return self.scene.name
|
||||||
logger.info(f"Start {self.track_id}: {self.scene.name}")
|
|
||||||
self.is_running = True
|
def get_priority(self) -> int:
|
||||||
|
# newer higher prio
|
||||||
|
distance = 0
|
||||||
|
# todo: check if last point is within bounds
|
||||||
|
if self.track and len(self.track.projected_history) > 5:
|
||||||
|
distance = np.linalg.norm(self.track.projected_history[-1] - self.track.projected_history[0])
|
||||||
|
return (self.scene.value.priority, distance)
|
||||||
|
|
||||||
|
def can_be_taken_over(self):
|
||||||
|
if self.scene.value.takeover_possible:
|
||||||
|
if time.perf_counter() - self.state_change_at > self.scene.value.takeover_possible_after:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
def track_age(self):
|
def track_age(self):
|
||||||
if not self.track:
|
if not self.track:
|
||||||
|
|
@ -114,7 +186,7 @@ class Scenario:
|
||||||
if self.take_over_at:
|
if self.take_over_at:
|
||||||
return
|
return
|
||||||
|
|
||||||
self.take_over_at = time.time()
|
self.take_over_at = time.perf_counter()
|
||||||
|
|
||||||
def taken_over(self):
|
def taken_over(self):
|
||||||
self.is_running = False
|
self.is_running = False
|
||||||
|
|
@ -122,19 +194,19 @@ class Scenario:
|
||||||
|
|
||||||
def takenover_for(self):
|
def takenover_for(self):
|
||||||
if self.take_over_at:
|
if self.take_over_at:
|
||||||
return time.time() - self.take_over_at
|
return time.perf_counter() - self.take_over_at
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def takeover_factor(self):
|
def takeover_factor(self):
|
||||||
l = self.takenover_for()
|
l = self.takenover_for()
|
||||||
if not l:
|
if not l:
|
||||||
return 0
|
return 0
|
||||||
return l/TAKEOVER_FADEOUT
|
return l/self.TAKEOVER_FADEOUT
|
||||||
|
|
||||||
|
|
||||||
def lost_for(self):
|
def lost_for(self):
|
||||||
if self.scene is ScenarioScene.LOST:
|
if self.scene is ScenarioScene.LOST:
|
||||||
return time.time() - self.state_change_at
|
return time.perf_counter() - self.state_change_at
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def lost_factor(self):
|
def lost_factor(self):
|
||||||
|
|
@ -144,7 +216,7 @@ class Scenario:
|
||||||
return l/LOST_FADEOUT
|
return l/LOST_FADEOUT
|
||||||
|
|
||||||
def anomaly_factor(self):
|
def anomaly_factor(self):
|
||||||
return calc_anomaly(self.prediction_diffs, 10)
|
return calc_anomaly(self.prediction_diffs)
|
||||||
|
|
||||||
def deactivate(self):
|
def deactivate(self):
|
||||||
self.take_over_at = None
|
self.take_over_at = None
|
||||||
|
|
@ -157,11 +229,12 @@ class Scenario:
|
||||||
|
|
||||||
def set_scene(self, scene: ScenarioScene):
|
def set_scene(self, scene: ScenarioScene):
|
||||||
if self.scene is scene:
|
if self.scene is scene:
|
||||||
return
|
return False
|
||||||
|
|
||||||
logger.info(f"Changing scene for {self.track_id}: {self.scene.name} -> {scene.name}")
|
logger.info(f"Changing scene for {self.track_id}: {self.scene.name} -> {scene.name}")
|
||||||
self.scene = scene
|
self.scene = scene
|
||||||
self.state_change_at = time.time()
|
self.state_change_at = time.perf_counter()
|
||||||
|
return True
|
||||||
|
|
||||||
def update_state(self):
|
def update_state(self):
|
||||||
self.check_lost() or self.check_loitering() or self.check_track()
|
self.check_lost() or self.check_loitering() or self.check_track()
|
||||||
|
|
@ -175,25 +248,31 @@ class Scenario:
|
||||||
|
|
||||||
def check_loitering(self):
|
def check_loitering(self):
|
||||||
scores = [s for s in calculate_loitering_scores(self.track, LOITERING_DURATION_TO_LINGER, LOITERING_LINGER_FACTOR, LOITERING_VELOCITY_TRESHOLD/TRACK_ASSUMED_FPS, 150)]
|
scores = [s for s in calculate_loitering_scores(self.track, LOITERING_DURATION_TO_LINGER, LOITERING_LINGER_FACTOR, LOITERING_VELOCITY_TRESHOLD/TRACK_ASSUMED_FPS, 150)]
|
||||||
if scores[-1] > .99:
|
|
||||||
|
if not len(scores):
|
||||||
|
logger.warning(f"No loitering score for {self.track_id}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
self.loitering_factor = scores[-1]
|
||||||
|
if self.loitering_factor > .99:
|
||||||
self.set_scene(ScenarioScene.LOITERING)
|
self.set_scene(ScenarioScene.LOITERING)
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def check_track(self):
|
def check_track(self):
|
||||||
predictions = len(self.prediction_tracks)
|
predictions = len(self.prediction_tracks)
|
||||||
if predictions == 1:
|
if predictions and self.running_for() < 20:
|
||||||
self.set_scene(ScenarioScene.FIRST_PREDICTION)
|
self.set_scene(ScenarioScene.PREDICTION_AVAILABLE)
|
||||||
return True
|
return True
|
||||||
if predictions > 10:
|
if predictions and self.running_for() > 60 * 5:
|
||||||
self.set_scene(ScenarioScene.PLAY)
|
self.set_scene(ScenarioScene.PLAY)
|
||||||
return True
|
return True
|
||||||
if predictions:
|
if predictions:
|
||||||
self.set_scene(ScenarioScene.CORRECTED_PREDICTION)
|
self.set_scene(ScenarioScene.UPDATED_PREDICTION)
|
||||||
return True
|
return True
|
||||||
if self.track:
|
if self.track:
|
||||||
if len(self.track.projected_history) > TRACK_ASSUMED_FPS * 3:
|
if len(self.track.projected_history) > TRACK_ASSUMED_FPS * 2:
|
||||||
self.set_scene(ScenarioScene.SUBSTANTIAL)
|
self.set_scene(ScenarioScene.TRACKED)
|
||||||
else:
|
else:
|
||||||
self.set_scene(ScenarioScene.DETECTED)
|
self.set_scene(ScenarioScene.DETECTED)
|
||||||
return True
|
return True
|
||||||
|
|
@ -227,8 +306,8 @@ class Scenario:
|
||||||
# in case of the unlikely event that prediction was received sooner
|
# in case of the unlikely event that prediction was received sooner
|
||||||
self.recv_track(track)
|
self.recv_track(track)
|
||||||
|
|
||||||
|
interval = self.stage.get_setting('stage.prediction_interval', PREDICTION_INTERVAL)
|
||||||
if PREDICTION_INTERVAL is not None and len(self.prediction_tracks) and (track.frame_index - self.prediction_tracks[-1].frame_index) < PREDICTION_INTERVAL:
|
if interval is not None and len(self.prediction_tracks) and (track.frame_index - self.prediction_tracks[-1].frame_index) < interval:
|
||||||
# just drop tracks if the predictions come to quick
|
# just drop tracks if the predictions come to quick
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
@ -248,6 +327,19 @@ class Scenario:
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def build_line_others():
|
||||||
|
others_color = SrgbaColor(1,1,0,1)
|
||||||
|
line_others = LineAnimationStack(StaticLine([], others_color))
|
||||||
|
# line_others.add(SegmentLine(line_others.tail, duration=3, anim_f=partial(SegmentLine.anim_grow, in_and_out=True, max_len=5)))
|
||||||
|
line_others.add(SimplifyLine(line_others.tail, 0.001)) # Simplify before effects, so they don't distort
|
||||||
|
line_others.add(CropAnimationLine(line_others.tail, 70, assume_fps=TRACK_ASSUMED_FPS*2)) # speed up
|
||||||
|
line_others.add(NoiseLine(line_others.tail, amplitude=0, t_factor=.3))
|
||||||
|
# line_others.add(DashedLine(line_others.tail, t_factor=4, loop_offset=True))
|
||||||
|
# line_others.get(DashedLine).skip = True
|
||||||
|
line_others.add(FadedEndsLine(line_others.tail, 30, 30))
|
||||||
|
line_others.add(FadeOutLine(line_others.tail))
|
||||||
|
line_others.get(FadeOutLine).set_alpha(0)
|
||||||
|
return line_others
|
||||||
|
|
||||||
class DrawnScenario(Scenario):
|
class DrawnScenario(Scenario):
|
||||||
"""
|
"""
|
||||||
|
|
@ -256,39 +348,49 @@ class DrawnScenario(Scenario):
|
||||||
This distinction is only for ordering the code
|
This distinction is only for ordering the code
|
||||||
"""
|
"""
|
||||||
|
|
||||||
MAX_HISTORY = 300 # points of history of trajectory to display (preventing too long lines)
|
MAX_HISTORY = 130 # points of history of trajectory to display (preventing too long lines)
|
||||||
CUT_GAP = 5 # when adding a new prediction, keep the existing prediction until that point + this CUT_GAP margin
|
CUT_GAP = 5 # when adding a new prediction, keep the existing prediction until that point + this CUT_GAP margin
|
||||||
|
|
||||||
def __init__(self, track_id):
|
def __init__(self, track_id, stage: Stage):
|
||||||
super().__init__(track_id)
|
super().__init__(track_id, stage)
|
||||||
self.last_update_t = time.perf_counter()
|
self.last_update_t = time.perf_counter()
|
||||||
self.active_ptrack: Optional[ProjectedTrack] = None
|
self.active_ptrack: Optional[ProjectedTrack] = None
|
||||||
|
|
||||||
history_color = SrgbaColor(1.,0.,1.,1.)
|
history_color = SrgbaColor(1.,0.,1.,1.)
|
||||||
history = StaticLine([], history_color)
|
history = StaticLine([], history_color)
|
||||||
self.line_history = LineAnimationStack(history)
|
self.line_history = LineAnimationStack(history)
|
||||||
self.line_history.add(AppendableLineAnimator(self.line_history.tail, draw_decay_speed=25))
|
self.line_history.add(AppendableLineAnimator(self.line_history.tail, draw_decay_speed=120, transition_in_on_init=False))
|
||||||
|
|
||||||
self.line_history.add(CropLine(self.line_history.tail, self.MAX_HISTORY))
|
self.line_history.add(CropLine(self.line_history.tail, self.MAX_HISTORY))
|
||||||
|
self.line_history.add(SimplifyLine(self.line_history.tail, 0.002)) # Simplify before effects, so they don't distort
|
||||||
self.line_history.add(FadedTailLine(self.line_history.tail, TRACK_FADE_AFTER_DURATION * TRACK_ASSUMED_FPS, TRACK_END_FADE))
|
self.line_history.add(FadedTailLine(self.line_history.tail, TRACK_FADE_AFTER_DURATION * TRACK_ASSUMED_FPS, TRACK_END_FADE))
|
||||||
self.line_history.add(NoiseLine(self.line_history.tail, amplitude=0, t_factor=.3))
|
self.line_history.add(NoiseLine(self.line_history.tail, amplitude=0, t_factor=.3))
|
||||||
self.line_history.add(FadeOutJitterLine(self.line_history.tail, frequency=5, t_factor=.5))
|
self.line_history.add(FadeOutJitterLine(self.line_history.tail, frequency=5, t_factor=.5))
|
||||||
|
|
||||||
self.prediction_color = SrgbaColor(0,1,0,1)
|
self.prediction_color = SrgbaColor(0,1,0,1)
|
||||||
self.line_prediction = LineAnimationStack(StaticLine([], self.prediction_color))
|
self.line_prediction = LineAnimationStack(StaticLine([], self.prediction_color))
|
||||||
self.line_prediction.add(SegmentLine(self.line_prediction.tail, duration=.5))
|
|
||||||
self.line_prediction.add(DashedLine(self.line_prediction.tail, t_factor=4, loop_offset=True))
|
self.line_prediction.add(CropLine(self.line_prediction.tail, start_offset=0))
|
||||||
|
self.line_prediction.add(StartFromClosestPoint(self.line_prediction.tail))
|
||||||
|
self.line_prediction.get(StartFromClosestPoint).skip=True
|
||||||
|
self.line_prediction.add(RotatingLine(self.line_prediction.tail, decay_speed=16))
|
||||||
|
self.line_prediction.get(RotatingLine).skip = False
|
||||||
|
self.line_prediction.add(SegmentLine(self.line_prediction.tail, duration=7 / 3, anim_f=SegmentLine.anim_follow_in_front))
|
||||||
|
self.line_prediction.get(SegmentLine).skip = False
|
||||||
|
self.line_prediction.add(SimplifyLine(self.line_prediction.tail, 0.002)) # Simplify before effects, so they don't distort
|
||||||
|
GAP_DURATION = 5
|
||||||
|
def dash_len(dt, t):
|
||||||
|
t=min(1, t/GAP_DURATION)
|
||||||
|
return lerp(.99, .6, t)
|
||||||
|
def gap_len(dt, t):
|
||||||
|
t=min(1, t/GAP_DURATION)
|
||||||
|
return lerp(0.01, .9, t)
|
||||||
|
|
||||||
|
self.line_prediction.add(DashedLine(self.line_prediction.tail, dash_len=dash_len, gap_len=gap_len, t_factor=2, loop_offset=True))
|
||||||
self.line_prediction.get(DashedLine).skip = True
|
self.line_prediction.get(DashedLine).skip = True
|
||||||
self.line_prediction.add(FadeOutLine(self.line_prediction.tail))
|
self.line_prediction.add(FadeOutLine(self.line_prediction.tail))
|
||||||
|
|
||||||
# when rendering tracks from others similar/close to the current one
|
# when rendering tracks from others similar/close to the current one
|
||||||
self.others_color = SrgbaColor(1,1,0,1)
|
self.line_others = build_line_others()
|
||||||
self.line_others = LineAnimationStack(StaticLine([], self.others_color))
|
|
||||||
self.line_others.add(SegmentLine(self.line_others.tail, duration=3, anim_f=partial(SegmentLine.anim_grow, in_and_out=True, max_len=5)))
|
|
||||||
# self.line_others.add(DashedLine(self.line_others.tail, t_factor=4, loop_offset=True))
|
|
||||||
# self.line_others.get(DashedLine).skip = True
|
|
||||||
self.line_others.add(FadeOutLine(self.line_others.tail))
|
|
||||||
self.line_others.get(FadeOutLine).set_alpha(0)
|
|
||||||
|
|
||||||
self.tracks_to_self: Optional[Generator] = None
|
self.tracks_to_self: Optional[Generator] = None
|
||||||
self.tracks_to_self_pos = None
|
self.tracks_to_self_pos = None
|
||||||
|
|
@ -296,53 +398,83 @@ class DrawnScenario(Scenario):
|
||||||
# self.line_prediction_drawn = self.line_prediction_faded
|
# self.line_prediction_drawn = self.line_prediction_faded
|
||||||
|
|
||||||
|
|
||||||
def update(self, stage: Stage):
|
def update(self):
|
||||||
super().update()
|
super().update()
|
||||||
if self.track:
|
if self.track:
|
||||||
self.line_history.root.points = self.track.projected_history
|
self.line_history.root.points = self.track.projected_history
|
||||||
|
|
||||||
|
lost_factor = self.lost_factor() # fade out when lost
|
||||||
|
start_factor = 0#1 - min(1, self.running_for()) # fade in when starting
|
||||||
|
# print(start_factor)
|
||||||
|
self.line_history.get(FadeOutJitterLine).set_alpha(1- lost_factor - start_factor)
|
||||||
|
self.line_prediction.get(FadeOutLine).set_alpha(1-lost_factor)
|
||||||
|
self.line_history.get(NoiseLine).amplitude = lost_factor * 1.8
|
||||||
|
|
||||||
if len(self.prediction_tracks):
|
if len(self.prediction_tracks):
|
||||||
|
# now_p = np.array(self.line_history.root.points[-1])
|
||||||
|
# prev_p = np.array(self.line_history.root.points[-1 * min(4, len(self.line_history.root.points))])
|
||||||
|
# diff = now_p - prev_p
|
||||||
|
self.line_prediction.get(StartFromClosestPoint).set_point(self.line_history.root.points[-1])
|
||||||
|
# print("set origin", self.line_history.root.points[-1])
|
||||||
|
|
||||||
|
|
||||||
# TODO: only when animation is ready for it? or collect lines
|
# TODO: only when animation is ready for it? or collect lines
|
||||||
|
if self.is_running:
|
||||||
if not self.active_ptrack:
|
if not self.active_ptrack:
|
||||||
|
# draw the first prediction
|
||||||
self.active_ptrack = self.prediction_tracks[-1]
|
self.active_ptrack = self.prediction_tracks[-1]
|
||||||
|
self.line_prediction.root.points = self.active_ptrack._track.predictions[0]
|
||||||
|
|
||||||
self.line_prediction.start() # reset positions
|
self.line_prediction.start() # reset positions
|
||||||
|
|
||||||
elif self.active_ptrack._track.updated_at < self.prediction_tracks[-1]._track.updated_at:
|
elif self.active_ptrack._track.updated_at < self.prediction_tracks[-1]._track.updated_at:
|
||||||
|
# stale prediction
|
||||||
# switch only if drawing animation is ready
|
# switch only if drawing animation is ready
|
||||||
if self.line_prediction.is_ready():
|
# if self.line_prediction.is_ready():
|
||||||
self.active_ptrack = self.prediction_tracks[-1]
|
self.active_ptrack = self.prediction_tracks[-1]
|
||||||
self.line_prediction.get(SegmentLine).anim_f = partial(SegmentLine.anim_arrive, length=.3)
|
self.line_prediction.root.points = self.active_ptrack._track.predictions[0]
|
||||||
self.line_prediction.get(SegmentLine).duration = .5
|
if self.line_prediction.is_ready() and self.line_prediction.get(DashedLine).skip == True:
|
||||||
self.line_prediction.get(DashedLine).skip = True
|
self.line_prediction.get(SegmentLine).skip = True
|
||||||
# print('restart')
|
|
||||||
self.line_prediction.start() # reset positions
|
|
||||||
# print(self.line_prediction.get(SegmentLine).running_for())
|
|
||||||
else:
|
|
||||||
if self.line_prediction.is_ready():
|
|
||||||
# little hack: check is dashedline skips, to only run this once per animation:
|
|
||||||
if self.line_prediction.get(DashedLine).skip:
|
|
||||||
# no new yet, but ready with anim, start stage 2
|
|
||||||
self.line_prediction.get(SegmentLine).anim_f = partial(SegmentLine.anim_grow)
|
|
||||||
self.line_prediction.get(SegmentLine).duration = 1
|
|
||||||
# self.line_prediction.get(SegmentLine).skip = True
|
|
||||||
self.line_prediction.get(DashedLine).skip = False
|
self.line_prediction.get(DashedLine).skip = False
|
||||||
self.line_prediction.start()
|
|
||||||
elif self.line_prediction.get(SegmentLine).duration != 2: # hack to only play once
|
|
||||||
self.line_prediction.get(SegmentLine).anim_f = partial(SegmentLine.anim_grow, reverse=True)
|
|
||||||
self.line_prediction.get(SegmentLine).duration = 2
|
|
||||||
self.line_prediction.get(SegmentLine).start()
|
|
||||||
|
|
||||||
|
self.line_prediction.start() # reset positions
|
||||||
|
|
||||||
|
# self.line_prediction.get(SegmentLine).anim_f = partial(SegmentLine.anim_arrive, length=.3)
|
||||||
|
# self.line_prediction.get(SegmentLine).duration = .5
|
||||||
|
# self.line_prediction.get(DashedLine).skip = True
|
||||||
|
# # print('restart')
|
||||||
|
# self.line_prediction.start() # reset positions
|
||||||
|
# # print(self.line_prediction.get(SegmentLine).running_for())
|
||||||
|
# else:
|
||||||
|
# if self.line_prediction.is_ready():
|
||||||
|
# # little hack: check is dashedline skips, to only run this once per animation:
|
||||||
|
# if self.line_prediction.get(DashedLine).skip:
|
||||||
|
# # no new yet, but ready with anim, start stage 2
|
||||||
|
# self.line_prediction.get(SegmentLine).anim_f = partial(SegmentLine.anim_grow)
|
||||||
|
# self.line_prediction.get(SegmentLine).duration = 1
|
||||||
|
# # self.line_prediction.get(SegmentLine).skip = True
|
||||||
|
# self.line_prediction.get(DashedLine).skip = False
|
||||||
|
# self.line_prediction.start()
|
||||||
|
# elif self.line_prediction.get(SegmentLine).duration != 2: # hack to only play once
|
||||||
|
# self.line_prediction.get(SegmentLine).anim_f = partial(SegmentLine.anim_grow, reverse=True)
|
||||||
|
# self.line_prediction.get(SegmentLine).duration = 2
|
||||||
|
# self.line_prediction.get(SegmentLine).start()
|
||||||
|
|
||||||
|
if self.active_ptrack:
|
||||||
|
# TODO: this should crop by distance/lenght
|
||||||
|
self.line_prediction.get(CropLine).start_offset = self.track._track.frame_index - self.active_ptrack._track.frame_index
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# self.line_prediction_dashed.set_offset_t(self.active_ptrack._track.track_update_dt() * 4)
|
# self.line_prediction_dashed.set_offset_t(self.active_ptrack._track.track_update_dt() * 4)
|
||||||
self.line_prediction.root.points = self.active_ptrack._track.predictions[0]
|
|
||||||
|
|
||||||
|
|
||||||
# special case: LOITERING
|
# special case: LOITERING
|
||||||
if self.scene is ScenarioScene.LOITERING or self.state_change_at:
|
if self.stage.get_setting('stage.loitering_animation', True) and self.scene is ScenarioScene.LOITERING: # or self.state_change_at:
|
||||||
# logger.info('loitering')
|
# logger.info('loitering')
|
||||||
transition = min(1, (time.time() - self.state_change_at)/1.4)
|
transition = min(1, (time.perf_counter() - self.state_change_at)/1.4)
|
||||||
|
# print('loitering factor', transition)
|
||||||
|
|
||||||
|
|
||||||
# TODO: transition fade, using to_alpha(), so it can fade back in again:
|
# TODO: transition fade, using to_alpha(), so it can fade back in again:
|
||||||
|
|
@ -356,21 +488,27 @@ class DrawnScenario(Scenario):
|
||||||
# print(transition > .999, self.is_running, current_position_rounded, time_diff)
|
# print(transition > .999, self.is_running, current_position_rounded, time_diff)
|
||||||
|
|
||||||
if transition > .999 and self.is_running and not all(self.tracks_to_self_pos == current_position_rounded) and time_diff > 5: # only do these expensive calls when running
|
if transition > .999 and self.is_running and not all(self.tracks_to_self_pos == current_position_rounded) and time_diff > 5: # only do these expensive calls when running
|
||||||
logger.info(f"Fetch similar tracks for {self.track_id}")
|
|
||||||
t = time.perf_counter()
|
|
||||||
self.tracks_to_self_pos = current_position_rounded
|
self.tracks_to_self_pos = current_position_rounded
|
||||||
self.tracks_to_self_fetched_at = time.perf_counter()
|
self.tracks_to_self_fetched_at = time.perf_counter()
|
||||||
|
|
||||||
# fetch lines nearby
|
# fetch lines nearby
|
||||||
track_ids = stage.history.get_nearest_tracks(current_position, 15)
|
track_ids = self.stage.history.get_nearest_tracks(current_position, 30)
|
||||||
self.track_ids_to_self = iter(track_ids)
|
self.track_ids_to_self = iter(track_ids)
|
||||||
self.tracks_to_self = stage.history.ids_as_trajectory(track_ids)
|
self.tracks_to_self = self.stage.history.ids_as_trajectory(track_ids)
|
||||||
|
|
||||||
print(time.perf_counter() - t, "fetch delya")
|
self.stage.logger.info(f"Fetched similar tracks for {self.track_id}. (Took {time.perf_counter() - self.tracks_to_self_fetched_at}s)")
|
||||||
|
|
||||||
if self.tracks_to_self and self.line_others.is_ready():
|
# if self.tracks_to_self and not len(self.line_others.root.points):
|
||||||
current_history_id = next(self.track_ids_to_self)
|
if self.tracks_to_self and not self.line_others.is_running():
|
||||||
|
try:
|
||||||
current_history = next(self.tracks_to_self)
|
current_history = next(self.tracks_to_self)
|
||||||
|
current_history_id = next(self.track_ids_to_self)
|
||||||
|
self.line_others.get(CropAnimationLine).assume_fps = min(
|
||||||
|
self.line_others.get(CropAnimationLine).assume_fps + TRACK_ASSUMED_FPS*1.5 , # faster each time
|
||||||
|
TRACK_ASSUMED_FPS * 6 # capped at 6x
|
||||||
|
)
|
||||||
|
|
||||||
|
self.line_others.get(NoiseLine).amplitude = .05
|
||||||
|
|
||||||
logger.info(f"play history item: {current_history_id}")
|
logger.info(f"play history item: {current_history_id}")
|
||||||
self.line_others.get(FadeOutLine).set_alpha(1)
|
self.line_others.get(FadeOutLine).set_alpha(1)
|
||||||
|
|
@ -378,10 +516,17 @@ class DrawnScenario(Scenario):
|
||||||
self.line_others.root.points = current_history
|
self.line_others.root.points = current_history
|
||||||
# print(self.line_others.root.points)
|
# print(self.line_others.root.points)
|
||||||
self.line_others.start()
|
self.line_others.start()
|
||||||
|
except StopIteration as e:
|
||||||
|
pass
|
||||||
|
# logger.info("Exhausted similar tracks?")
|
||||||
|
|
||||||
|
else:
|
||||||
|
# reset loitering values
|
||||||
|
self.line_others.get(CropAnimationLine).assume_fps = TRACK_ASSUMED_FPS*2
|
||||||
|
self.line_others.get(NoiseLine).amplitude = 0
|
||||||
|
|
||||||
# special case: PLAY
|
# special case: PLAY
|
||||||
elif self.scene is ScenarioScene.PLAY:
|
if self.scene is ScenarioScene.PLAY:
|
||||||
pass
|
pass
|
||||||
# if self.scene is ScenarioScene.CORRECTED_PREDICTION:
|
# if self.scene is ScenarioScene.CORRECTED_PREDICTION:
|
||||||
# self.line_prediction.get(DashedLine).skip = False
|
# self.line_prediction.get(DashedLine).skip = False
|
||||||
|
|
@ -391,48 +536,174 @@ class DrawnScenario(Scenario):
|
||||||
def to_renderable_lines(self, dt: DeltaT) -> RenderableLines:
|
def to_renderable_lines(self, dt: DeltaT) -> RenderableLines:
|
||||||
# each scene is handled differently:
|
# each scene is handled differently:
|
||||||
|
|
||||||
|
t1 = time.perf_counter()
|
||||||
|
|
||||||
# 1) history, fade out when lost
|
# 1) history, fade out when lost
|
||||||
self.line_history.get(FadeOutJitterLine).set_alpha(1-self.lost_factor())
|
# self.line_history.get(StaticLine).color = SrgbaColor(1, 0, 1-self.anomaly_factor(), 1)
|
||||||
self.line_prediction.get(FadeOutLine).set_alpha(1-self.lost_factor())
|
|
||||||
self.line_history.get(NoiseLine).amplitude = self.lost_factor()
|
|
||||||
|
|
||||||
# fade out history after max duration, given in frames
|
# fade out history after max duration, given in frames
|
||||||
track_age_in_frames = self.track_age() * TRACK_ASSUMED_FPS
|
track_age_in_frames = self.track_age() * TRACK_ASSUMED_FPS
|
||||||
self.line_history.get(FadedTailLine).set_frame_offset(track_age_in_frames)
|
self.line_history.get(FadedTailLine).set_frame_offset(track_age_in_frames)
|
||||||
|
|
||||||
|
t2 = time.perf_counter()
|
||||||
|
|
||||||
|
|
||||||
# 2) also fade-out when moving into loitering mode.
|
|
||||||
# when fading out is done, start drawing historical data
|
|
||||||
|
|
||||||
history_line = self.line_history.as_renderable_line(dt)
|
history_line = self.line_history.as_renderable_line(dt)
|
||||||
|
t3 = time.perf_counter()
|
||||||
prediction_line = self.line_prediction.as_renderable_line(dt)
|
prediction_line = self.line_prediction.as_renderable_line(dt)
|
||||||
|
t4 = time.perf_counter()
|
||||||
others_line = self.line_others.as_renderable_line(dt)
|
others_line = self.line_others.as_renderable_line(dt)
|
||||||
|
t5 = time.perf_counter()
|
||||||
|
|
||||||
# print(history_line)
|
# print(history_line)
|
||||||
# print(self.track_id, len(self.line_history.points), len(history_line))
|
# print(self.track_id, len(self.line_history.points), len(history_line))
|
||||||
|
|
||||||
|
timings = (t5-t4, t4-t3, t3-t2, t2-t1)
|
||||||
|
|
||||||
return RenderableLines([
|
return RenderableLines([
|
||||||
history_line,
|
history_line,
|
||||||
prediction_line,
|
prediction_line,
|
||||||
others_line
|
others_line
|
||||||
])
|
]), timings
|
||||||
|
|
||||||
|
def set_scene(self, scene):
|
||||||
|
"""Create log message for the auxilary interface
|
||||||
|
"""
|
||||||
|
original = self.scene.name
|
||||||
|
changed = super().set_scene(scene)
|
||||||
|
if changed:
|
||||||
|
try:
|
||||||
|
self.stage.log_sock.send_string(f"Visitor {self.track_id}: {original} -> {self.scene.name}", zmq.NOBLOCK)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("Not sent the scene change message, broken socket?")
|
||||||
|
return changed
|
||||||
|
|
||||||
|
class NoTracksScenario(PrioritySlotItem):
|
||||||
|
TAKEOVER_FADEOUT = 1 # override default to be faster
|
||||||
|
|
||||||
|
def __init__(self, stage: Stage, i: int):
|
||||||
|
super().__init__(f"screensaver_{i}")
|
||||||
|
self.stage = stage
|
||||||
|
self.line = build_line_others()
|
||||||
|
|
||||||
|
def get_priority(self):
|
||||||
|
# super low priority
|
||||||
|
return (-1, -1)
|
||||||
|
|
||||||
|
def can_be_taken_over(self):
|
||||||
|
return True
|
||||||
|
|
||||||
|
def get_state_name(self):
|
||||||
|
return "previewing"
|
||||||
|
|
||||||
|
def update(self, stage: Stage):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def to_renderable_lines(self, dt: DeltaT):
|
||||||
|
timings = []
|
||||||
|
lines = RenderableLines([], CoordinateSpace.WORLD)
|
||||||
|
if not self.line.is_running():
|
||||||
|
track_id = random.choice(list(self.stage.history.state.tracks.keys()))
|
||||||
|
# print('track_id', track_id)
|
||||||
|
positions = self.stage.history.state.track_histories[track_id]
|
||||||
|
self.line.root.points = positions
|
||||||
|
self.line.start()
|
||||||
|
|
||||||
|
alpha = 1 - self.takeover_factor()
|
||||||
|
self.line.get(FadeOutLine).set_alpha(alpha)
|
||||||
|
|
||||||
|
lines.lines.append(
|
||||||
|
self.line.as_renderable_line(dt)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
return lines, timings
|
||||||
|
|
||||||
|
|
||||||
|
class DebugDrawer():
|
||||||
|
def __init__(self, stage: Stage):
|
||||||
|
self.stage = stage
|
||||||
|
|
||||||
|
def positions_to_renderable_lines(self, dt: DeltaT):
|
||||||
|
lines = RenderableLines([], CoordinateSpace.WORLD)
|
||||||
|
past_color = SrgbaColor(1,0,1,1)
|
||||||
|
current_color = SrgbaColor(1,0,0,.6)
|
||||||
|
for scenario in self.stage.scenarios.values():
|
||||||
|
# lines.append(StaticLine(scenario.track.projected_history, past_color).as_renderable_line(dt).as_simplified(factor=.005))
|
||||||
|
center = scenario.track.projected_history[-1]
|
||||||
|
|
||||||
|
lines.append(StaticLine([[center[0], center[1]-.2], [center[0], center[1]+.2]], current_color).as_renderable_line(dt))
|
||||||
|
lines.append(StaticLine([[center[0]-.2, center[1]], [center[0]+.2, center[1]]], current_color).as_renderable_line(dt))
|
||||||
|
return lines
|
||||||
|
|
||||||
|
def predictions_to_renderable_lines(self, dt: DeltaT):
|
||||||
|
lines = RenderableLines([], CoordinateSpace.WORLD)
|
||||||
|
future_color = SrgbaColor(0,1,0,.6)
|
||||||
|
for scenario in self.stage.scenarios.values():
|
||||||
|
# lines.append(StaticLine(scenario.track.projected_history, past_color).as_renderable_line(dt).as_simplified(factor=.005))
|
||||||
|
if scenario.active_ptrack:
|
||||||
|
lines.append(StaticLine(scenario.active_ptrack._track.predictions[0], future_color).as_renderable_line(dt))
|
||||||
|
return lines
|
||||||
|
|
||||||
|
|
||||||
|
class DatasetDrawer():
|
||||||
|
def __init__(self, stage: Stage):
|
||||||
|
self.stage = stage
|
||||||
|
|
||||||
|
line_color = SrgbaColor(0,1,1,1)
|
||||||
|
self.track_line = LineAnimationStack(StaticLine([], line_color))
|
||||||
|
# self.track_line.add(SimplifyLine(self.track_line.tail, 0.004)) # Simplify before cropping, to get less noodling
|
||||||
|
self.track_line.add(SimplifyLine(self.track_line.tail, 0.002)) # no laser in dortmund
|
||||||
|
self.track_line.add(CropAnimationLine(self.track_line.tail, 50, assume_fps=TRACK_ASSUMED_FPS*20)) # speed up
|
||||||
|
|
||||||
|
# self.track_line.add(DashedLine(self.track_line.tail, t_factor=4, loop_offset=True))
|
||||||
|
# self.track_line.get(DashedLine).skip = True
|
||||||
|
# self.track_line.add(FadedEndsLine(self.track_line.tail, 10, 10))
|
||||||
|
self.track_line.add(FadeOutJitterLine(self.track_line.tail, t_factor=3))
|
||||||
|
# self.track_line.add(FadeOutLine(self.track_line.tail))
|
||||||
|
self.track_line.get(FadeOutJitterLine).set_alpha(np.random.random()*.3+.7)
|
||||||
|
|
||||||
|
|
||||||
|
def to_renderable_lines(self, dt: DeltaT):
|
||||||
|
lines = RenderableLines([], CoordinateSpace.WORLD)
|
||||||
|
if not self.track_line.is_running():
|
||||||
|
# print('update')
|
||||||
|
track_id = random.choice(list(self.stage.history.state.tracks.keys()))
|
||||||
|
# print('track_id', track_id)
|
||||||
|
positions = self.stage.history.state.track_histories[track_id]
|
||||||
|
self.track_line.root.points = positions
|
||||||
|
self.track_line.start()
|
||||||
|
# else:
|
||||||
|
# print('-')
|
||||||
|
|
||||||
|
lines.lines.append(
|
||||||
|
self.track_line.as_renderable_line(dt)
|
||||||
|
)
|
||||||
|
# print(lines)
|
||||||
|
|
||||||
|
return lines
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class Stage(Node):
|
class Stage(Node):
|
||||||
|
|
||||||
FPS = 60
|
FALLBACK_FPS = 30 # we render to lasers, no need to go faster!
|
||||||
|
|
||||||
def setup(self):
|
def setup(self):
|
||||||
self.active_scenarios: List[DrawnScenario] = [] # List of currently running Scenario instances
|
self.active_scenarios: List[DrawnScenario] = [] # List of currently running Scenario instances
|
||||||
|
|
||||||
|
|
||||||
self.scenarios: Dict[str, DrawnScenario] = DefaultDictKeyed(lambda key: DrawnScenario(key))
|
self.scenarios: Dict[str, DrawnScenario] = DefaultDictKeyed(lambda key: DrawnScenario(key, self))
|
||||||
self.frame_noimg_sock = self.sub(self.config.zmq_frame_noimg_addr)
|
self.frame_noimg_sock = self.sub(self.config.zmq_frame_noimg_addr)
|
||||||
self.trajectory_sock = self.sub(self.config.zmq_trajectory_addr)
|
self.trajectory_sock = self.sub(self.config.zmq_trajectory_addr)
|
||||||
self.prediction_sock = self.sub(self.config.zmq_prediction_addr)
|
self.prediction_sock = self.sub(self.config.zmq_prediction_addr)
|
||||||
|
self.detection_sock = self.sub(self.config.zmq_detection_addr)
|
||||||
self.stage_sock = self.pub(self.config.zmq_stage_addr)
|
self.stage_sock = self.pub(self.config.zmq_stage_addr)
|
||||||
|
self.log_sock = self.push(self.config.zmq_log_addr)
|
||||||
|
# self.stage_py_sock = self.pub(self.config.zmq_stage_py_addr)
|
||||||
self.counter = CounterSender()
|
self.counter = CounterSender()
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -442,11 +713,17 @@ class Stage(Node):
|
||||||
|
|
||||||
self.history = TrackHistory(self.config.tracker_output_dir, self.config.camera, self.config.cache_path)
|
self.history = TrackHistory(self.config.tracker_output_dir, self.config.camera, self.config.cache_path)
|
||||||
|
|
||||||
|
self.auxilary = DatasetDrawer(self)
|
||||||
|
self.debug_drawer = DebugDrawer(self)
|
||||||
|
|
||||||
|
# 'screensavers'
|
||||||
|
self.notrack_scenarios = [] #[NoTracksScenario(self, i) for i in range(self.config.max_active_scenarios)]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
while self.run_loop_capped_fps(self.FPS, warn_below_fps=10):
|
while self.run_loop_capped_fps(self.get_setting('stage.fps', self.FALLBACK_FPS), warn_below_fps=10):
|
||||||
dt = max(1/ self.FPS, self.dt_since_last_tick) # never dt of 0
|
dt = max(1/ self.get_setting('stage.fps', self.FALLBACK_FPS), self.dt_since_last_tick) # never dt of 0
|
||||||
|
|
||||||
# t1 = time.perf_counter()
|
# t1 = time.perf_counter()
|
||||||
self.loop_receive()
|
self.loop_receive()
|
||||||
|
|
@ -484,7 +761,7 @@ class Stage(Node):
|
||||||
"""Update active scenarios and handle pauses/completions."""
|
"""Update active scenarios and handle pauses/completions."""
|
||||||
# 1) process timestep for all scenarios
|
# 1) process timestep for all scenarios
|
||||||
for s in self.scenarios.values():
|
for s in self.scenarios.values():
|
||||||
s.update(self)
|
s.update()
|
||||||
|
|
||||||
|
|
||||||
# 2) Remove stale tracks and take-overs
|
# 2) Remove stale tracks and take-overs
|
||||||
|
|
@ -502,9 +779,9 @@ class Stage(Node):
|
||||||
|
|
||||||
|
|
||||||
# 3) determine set of pending scenarios (all except running)
|
# 3) determine set of pending scenarios (all except running)
|
||||||
pending_scenarios = [s for s in self.scenarios.values() if s not in self.active_scenarios]
|
pending_scenarios = [s for s in list(self.scenarios.values()) + self.notrack_scenarios if s not in self.active_scenarios]
|
||||||
# ... highest priority first
|
# ... highest priority first
|
||||||
pending_scenarios.sort(key=lambda s: s.scene.value.priority, reverse=True)
|
pending_scenarios.sort(key=lambda s: s.get_priority(), reverse=True)
|
||||||
|
|
||||||
# 4) check if there's a slot free:
|
# 4) check if there's a slot free:
|
||||||
while len(self.active_scenarios) < self.config.max_active_scenarios and len(pending_scenarios):
|
while len(self.active_scenarios) < self.config.max_active_scenarios and len(pending_scenarios):
|
||||||
|
|
@ -515,15 +792,15 @@ class Stage(Node):
|
||||||
# 5) Takeover Logic: If no space, try to replace a lower-priority active scenario
|
# 5) Takeover Logic: If no space, try to replace a lower-priority active scenario
|
||||||
# which is in a scene in which takeover is possible
|
# which is in a scene in which takeover is possible
|
||||||
eligible_active_scenarios = [
|
eligible_active_scenarios = [
|
||||||
s for s in self.active_scenarios if s.scene.value.takeover_possible
|
s for s in self.active_scenarios if s.can_be_taken_over()
|
||||||
]
|
]
|
||||||
eligible_active_scenarios.sort(key=lambda s: s.scene.value.priority)
|
eligible_active_scenarios.sort(key=lambda s: s.get_priority())
|
||||||
|
|
||||||
if eligible_active_scenarios and pending_scenarios:
|
if eligible_active_scenarios and pending_scenarios:
|
||||||
lowest_priority_active = eligible_active_scenarios[0]
|
lowest_priority_active = eligible_active_scenarios[0]
|
||||||
highest_priority_waiting = pending_scenarios[0]
|
highest_priority_waiting = pending_scenarios[0]
|
||||||
|
|
||||||
if highest_priority_waiting.scene.value.priority > lowest_priority_active.scene.value.priority:
|
if highest_priority_waiting.get_priority() > lowest_priority_active.get_priority():
|
||||||
# Takeover! Stop the active scenario
|
# Takeover! Stop the active scenario
|
||||||
# will be cleaned up in update() loop after animation finishes
|
# will be cleaned up in update() loop after animation finishes
|
||||||
# automatically triggering the start of the highest priority scene
|
# automatically triggering the start of the highest priority scene
|
||||||
|
|
@ -536,20 +813,36 @@ class Stage(Node):
|
||||||
|
|
||||||
# TODO: sometimes very slow!
|
# TODO: sometimes very slow!
|
||||||
t1 = time.perf_counter()
|
t1 = time.perf_counter()
|
||||||
for scenario in self.active_scenarios:
|
training_lines = self.auxilary.to_renderable_lines(dt)
|
||||||
lines.append_lines(scenario.to_renderable_lines(dt))
|
|
||||||
|
|
||||||
t2 = time.perf_counter()
|
t2 = time.perf_counter()
|
||||||
rl = lines.as_simplified(SimplifyMethod.RDP, .003) # or segmentise (see shapely)
|
active_positions = self.debug_drawer.positions_to_renderable_lines(dt)
|
||||||
|
all_predictions = self.debug_drawer.predictions_to_renderable_lines(dt)
|
||||||
|
|
||||||
|
t2b = time.perf_counter()
|
||||||
|
|
||||||
|
timings = []
|
||||||
|
for scenario in self.active_scenarios:
|
||||||
|
scenario_lines, timing = scenario.to_renderable_lines(dt)
|
||||||
|
lines.append_lines(scenario_lines)
|
||||||
|
timings.append(timing)
|
||||||
|
if not len(self.active_scenarios):
|
||||||
|
lines = training_lines
|
||||||
|
|
||||||
|
t2c = time.perf_counter()
|
||||||
|
# rl_scenario = lines.as_simplified(SimplifyMethod.RDP, .003) # or segmentise (see shapely)
|
||||||
|
# rl_training = training_lines.as_simplified(SimplifyMethod.RDP, .003) # or segmentise (see shapely)
|
||||||
self.counter.set("stage.lines", len(lines.lines))
|
self.counter.set("stage.lines", len(lines.lines))
|
||||||
self.counter.set("stage.points_orig", lines.point_count())
|
# self.counter.set("stage.points_orig", lines.point_count())
|
||||||
self.counter.set("stage.points", rl.point_count())
|
self.counter.set("stage.points", lines.point_count())
|
||||||
t3 = time.perf_counter()
|
t3 = time.perf_counter()
|
||||||
|
|
||||||
|
|
||||||
layers: RenderableLayers = {
|
layers: RenderableLayers = {
|
||||||
1: lines,
|
1: lines,
|
||||||
2: self.debug_lines,
|
2: self.debug_lines,
|
||||||
|
3: training_lines,
|
||||||
|
4: active_positions,
|
||||||
|
5: all_predictions,
|
||||||
}
|
}
|
||||||
|
|
||||||
t4 = time.perf_counter()
|
t4 = time.perf_counter()
|
||||||
|
|
@ -559,17 +852,19 @@ class Stage(Node):
|
||||||
|
|
||||||
t5 = time.perf_counter()
|
t5 = time.perf_counter()
|
||||||
self.stage_sock.send(msg)
|
self.stage_sock.send(msg)
|
||||||
|
# self.stage_sock.send_pyobj(layers)
|
||||||
|
|
||||||
# self.stage_sock.send_json(obj=layers, cls=DataclassJSONEncoder)
|
# self.stage_sock.send_json(obj=layers, cls=DataclassJSONEncoder)
|
||||||
|
|
||||||
t6 = time.perf_counter()
|
t6 = time.perf_counter()
|
||||||
|
|
||||||
t = (t2-t1, t3-t2, t4-t3, t5-t4, t6-t5)
|
t = (t2-t1, t2b-t2, t2c-t2b, t3-t2c, t2b-t2, t4-t3, t5-t4, t6-t5)
|
||||||
if sum(t) > .1:
|
if sum(t) > .1:
|
||||||
print(t)
|
print(t)
|
||||||
print(len(lines.lines))
|
print(len(lines.lines))
|
||||||
print(lines.point_count())
|
print(lines.point_count())
|
||||||
print(len(msg))
|
print(len(msg))
|
||||||
|
print('scenario timings:', timings)
|
||||||
# print(msg)
|
# print(msg)
|
||||||
# exit()
|
# exit()
|
||||||
|
|
||||||
|
|
@ -590,14 +885,30 @@ class Stage(Node):
|
||||||
help='Manually specity communication addr for the prediction messages',
|
help='Manually specity communication addr for the prediction messages',
|
||||||
type=str,
|
type=str,
|
||||||
default="ipc:///tmp/feeds_preds")
|
default="ipc:///tmp/feeds_preds")
|
||||||
|
argparser.add_argument('--zmq-detection-addr',
|
||||||
|
help='Manually specity communication addr for the detection messages',
|
||||||
|
type=str,
|
||||||
|
default="ipc:///tmp/feeds_dets")
|
||||||
argparser.add_argument('--zmq-stage-addr',
|
argparser.add_argument('--zmq-stage-addr',
|
||||||
help='Manually specity communication addr for the stage messages (the rendered lines)',
|
help='Manually specity communication addr for the stage messages (the rendered lines)',
|
||||||
type=str,
|
type=str,
|
||||||
default="tcp://0.0.0.0:99174")
|
default="tcp://0.0.0.0:99174")
|
||||||
|
argparser.add_argument('--zmq-log-addr',
|
||||||
|
help='Manually specity communication addr for the log messages',
|
||||||
|
type=str,
|
||||||
|
default="tcp://0.0.0.0:99188")
|
||||||
|
argparser.add_argument('--zmq-stage-py-addr',
|
||||||
|
help='Sometimes there is no need for protobuf',
|
||||||
|
type=str,
|
||||||
|
default="ipc:///tmp/feeds_stage")
|
||||||
argparser.add_argument('--debug-map',
|
argparser.add_argument('--debug-map',
|
||||||
help='specify a map (svg-file) from which to load lines which will be overlayed',
|
help='specify a map (svg-file) from which to load lines which will be overlayed',
|
||||||
type=str,
|
type=str,
|
||||||
default="../DATASETS/hof3/map_hof.svg")
|
default="../DATASETS/hof-lidar/map_hof.svg")
|
||||||
|
argparser.add_argument('--cutoff-map',
|
||||||
|
help='specify a map (svg-file) that specifies projection boundaries. In here, degrade chance to be selectede',
|
||||||
|
type=str,
|
||||||
|
default="../DATASETS/hof-lidar/map_hof.svg")
|
||||||
argparser.add_argument('--max-active-scenarios',
|
argparser.add_argument('--max-active-scenarios',
|
||||||
help='Maximum number of active scenarios that can be drawn at once (to not overlod the laser)',
|
help='Maximum number of active scenarios that can be drawn at once (to not overlod the laser)',
|
||||||
type=int,
|
type=int,
|
||||||
|
|
|
||||||
276
trap/stage_renderer.py
Normal file
276
trap/stage_renderer.py
Normal file
|
|
@ -0,0 +1,276 @@
|
||||||
|
|
||||||
|
|
||||||
|
from argparse import ArgumentParser
|
||||||
|
from collections import deque
|
||||||
|
import math
|
||||||
|
import re
|
||||||
|
from typing import List
|
||||||
|
import numpy as np
|
||||||
|
import pyglet
|
||||||
|
from torch import mul
|
||||||
|
import zmq
|
||||||
|
from trap.lines import RenderableLayers, message_to_layers
|
||||||
|
from trap.node import Node
|
||||||
|
|
||||||
|
BG_COLOR = (0,0,255)
|
||||||
|
class StageRenderer(Node):
|
||||||
|
def setup(self):
|
||||||
|
# self.prediction_sock = self.sub(self.config.zmq_prediction_addr)
|
||||||
|
# self.tracker_sock = self.sub(self.config.zmq_trajectory_addr)
|
||||||
|
# self.detector_sock = self.sub(self.config.zmq_detection_addr)
|
||||||
|
# self.frame_sock = self.sub(self.config.zmq_frame_addr)
|
||||||
|
self.stage_sock = self.sub(self.config.zmq_stage_addr)
|
||||||
|
self.log_sock = self.pull(self.config.zmq_log_addr)
|
||||||
|
|
||||||
|
# setup pyglet:
|
||||||
|
display = pyglet.display.get_display()
|
||||||
|
screens = display.get_screens()
|
||||||
|
|
||||||
|
# use configured montior, fall back to whatever is available
|
||||||
|
self.screen = sorted(screens, reverse=True, key=lambda s: s.get_monitor_name() == self.config.monitor)[0]
|
||||||
|
|
||||||
|
if self.screen.get_monitor_name() != self.config.monitor:
|
||||||
|
self.logger.warning(f"Not displaying on configured monitor. {self.screen.get_monitor_name()} instead of {self.config.monitor}")
|
||||||
|
|
||||||
|
# print(self.screen.get_modes())
|
||||||
|
|
||||||
|
|
||||||
|
config = pyglet.gl.Config(sample_buffers=1, samples=4)
|
||||||
|
|
||||||
|
# when screen is in portrait, window mode here expects still (larger x smaller) number.
|
||||||
|
# self.window.get_size() will be reported properly
|
||||||
|
wh = sorted((self.screen.width, self.screen.height), reverse=self.config.fullscreen)
|
||||||
|
|
||||||
|
self.window = pyglet.window.Window(width=wh[0], height=wh[1], config=config, fullscreen=self.config.fullscreen, screen=self.screen)
|
||||||
|
self.window.set_exclusive_keyboard(True)
|
||||||
|
self.window.set_exclusive_keyboard(False)
|
||||||
|
self.window.set_exclusive_mouse(True)
|
||||||
|
self.window.set_exclusive_mouse(False)
|
||||||
|
|
||||||
|
# self.window.set_size(1080, 1920)
|
||||||
|
|
||||||
|
window_size = self.window.get_size()
|
||||||
|
|
||||||
|
padding = 40
|
||||||
|
|
||||||
|
print(window_size)
|
||||||
|
self.window.set_handler('on_draw', self.on_draw)
|
||||||
|
# self.window.set_handler('on_close', self.on_close)
|
||||||
|
|
||||||
|
# pyglet.gl.glClearColor(81./255, 20/255, 46./255, 0)
|
||||||
|
pyglet.gl.glClearColor(0/255, 0/255, 255/255, 0)
|
||||||
|
self.fps_display = pyglet.window.FPSDisplay(window=self.window, color=(255,255,255,255))
|
||||||
|
self.fps_display.label.x = self.window.width - 50
|
||||||
|
self.fps_display.label.y = self.window.height - 17
|
||||||
|
self.fps_display.label.bold = False
|
||||||
|
self.fps_display.label.font_size = 10
|
||||||
|
|
||||||
|
self.current_layers: RenderableLayers = {}
|
||||||
|
|
||||||
|
self.lines: List[pyglet.shapes.Line] = []
|
||||||
|
self.lines_batch = pyglet.graphics.Batch()
|
||||||
|
self.text = pyglet.text.document.FormattedDocument("")
|
||||||
|
self.text_batch = pyglet.graphics.Batch()
|
||||||
|
self.text_layout = pyglet.text.layout.TextLayout(
|
||||||
|
self.text, padding, (self.window.get_size()[0]-padding*2) // 2 - 100,
|
||||||
|
width=self.window.get_size()[1] - 2*padding,
|
||||||
|
height=(self.window.get_size()[0] - padding) // 2,
|
||||||
|
multiline=True, wrap_lines=False, batch=self.text_batch)
|
||||||
|
|
||||||
|
max_len = 31
|
||||||
|
self.log_msgs = deque([], maxlen=max_len)
|
||||||
|
self.log_msgs.extend(["-"] * max_len)
|
||||||
|
|
||||||
|
|
||||||
|
translate = (10,-400)
|
||||||
|
# scale = 5
|
||||||
|
|
||||||
|
smallest_dimension = min(self.window.get_size())
|
||||||
|
max_x = 16.3
|
||||||
|
max_y = 14.3
|
||||||
|
scale = min(smallest_dimension / max_x, smallest_dimension/max_y)
|
||||||
|
|
||||||
|
|
||||||
|
self.logger.info(f"Use {scale=}")
|
||||||
|
|
||||||
|
|
||||||
|
self.transform = np.array([
|
||||||
|
[scale, 0,translate[0]],
|
||||||
|
[0,-scale,window_size[1]],
|
||||||
|
[0,0,1]
|
||||||
|
])
|
||||||
|
|
||||||
|
self.bg_image = pyglet.image.load(self.config.floorplan)
|
||||||
|
scale = (window_size[0] - padding*2) / (self.bg_image.width)
|
||||||
|
print('image_scale', scale, self.bg_image.width, self.bg_image.height)
|
||||||
|
# self.bg_image.height = int(self.bg_image.height / 3)
|
||||||
|
# self.bg_image.width = int(self.bg_image.width / 3)
|
||||||
|
img_y = window_size[1]-int(self.bg_image.height*scale)-padding*2
|
||||||
|
self.bg_sprite = pyglet.sprite.Sprite(img=self.bg_image, x=padding, y=img_y)
|
||||||
|
self.bg_sprite.scale = scale
|
||||||
|
|
||||||
|
|
||||||
|
clear_area = img_y
|
||||||
|
self.clear_transparent = pyglet.shapes.Rectangle(0, window_size[1]-clear_area, window_size[0], clear_area, color=(*BG_COLOR,255//70))
|
||||||
|
self.clear_fully= pyglet.shapes.Rectangle(0, 0, window_size[0], window_size[1]-clear_area, color=(*BG_COLOR,255))
|
||||||
|
|
||||||
|
self.window.clear()
|
||||||
|
|
||||||
|
|
||||||
|
def check_running(self, dt):
|
||||||
|
if not self.run_loop():
|
||||||
|
self.window.close()
|
||||||
|
self.event_loop.exit()
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
self.event_loop = pyglet.app.EventLoop()
|
||||||
|
pyglet.clock.schedule_interval(self.check_running, 0.1)
|
||||||
|
# pyglet.clock.schedule(self.receive)
|
||||||
|
self.event_loop.run()
|
||||||
|
|
||||||
|
|
||||||
|
def receive(self, dt):
|
||||||
|
try:
|
||||||
|
msg = self.stage_sock.recv(zmq.NOBLOCK)
|
||||||
|
self.current_layers = message_to_layers(msg)
|
||||||
|
self.update_lines()
|
||||||
|
except zmq.ZMQError as e:
|
||||||
|
# idx = frame.index if frame else "NONE"
|
||||||
|
# logger.debug(f"reuse video frame {idx}")
|
||||||
|
pass
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
log_msg = self.log_sock.recv_string(zmq.NOBLOCK)
|
||||||
|
self.log_msgs.append(log_msg)
|
||||||
|
except zmq.ZMQError as e:
|
||||||
|
# idx = frame.index if frame else "NONE"
|
||||||
|
# logger.debug(f"reuse video frame {idx}")
|
||||||
|
break
|
||||||
|
self.update_msgs()
|
||||||
|
|
||||||
|
|
||||||
|
def update_lines(self):
|
||||||
|
"""
|
||||||
|
Render the renderable lines of selected layers
|
||||||
|
"""
|
||||||
|
|
||||||
|
additional_scale = self.get_setting('stagerenderer.scale', 1)
|
||||||
|
dx = self.get_setting('stagerenderer.dx', 0)
|
||||||
|
dy = self.get_setting('stagerenderer.dy', 0)
|
||||||
|
transform = self.transform.copy()
|
||||||
|
transform[0][0] *= additional_scale
|
||||||
|
transform[1][1] *= additional_scale
|
||||||
|
transform[0][2] += dx
|
||||||
|
transform[1][2] += dy
|
||||||
|
|
||||||
|
i = -1
|
||||||
|
for nr, lines in self.current_layers.items():
|
||||||
|
|
||||||
|
if not self.get_setting(f'stagerenderer.layer.{nr}', True):
|
||||||
|
continue
|
||||||
|
|
||||||
|
|
||||||
|
for line in lines.lines:
|
||||||
|
for p1, p2 in zip(line.points, line.points[1:]):
|
||||||
|
i += 1
|
||||||
|
pp1 = np.array([p1.position[0], p1.position[1], 1])
|
||||||
|
pp2 = np.array([p2.position[0], p2.position[1], 1])
|
||||||
|
|
||||||
|
pos1 = (transform@pp1)[:2].astype(int)
|
||||||
|
pos2 = (transform@pp2)[:2].astype(int)
|
||||||
|
|
||||||
|
color = (p2.color.as_array()*255).astype(int)
|
||||||
|
|
||||||
|
if i < len(self.lines):
|
||||||
|
shape = self.lines[i]
|
||||||
|
shape.x = pos1[0]
|
||||||
|
shape.y = pos1[1]
|
||||||
|
shape.x2 = pos2[0]
|
||||||
|
shape.y2 = pos2[1]
|
||||||
|
shape.color = color
|
||||||
|
else:
|
||||||
|
self.lines.append(pyglet.shapes.Line(pos1[0], pos1[1],
|
||||||
|
pos2[0],
|
||||||
|
pos2[1],
|
||||||
|
3,
|
||||||
|
color,
|
||||||
|
batch=self.lines_batch))
|
||||||
|
|
||||||
|
|
||||||
|
too_many = len(self.lines) - 1 - i
|
||||||
|
if too_many > 0:
|
||||||
|
for j in reversed(range(i, i+too_many)):
|
||||||
|
self.lines[i].delete()
|
||||||
|
del self.lines[i]
|
||||||
|
|
||||||
|
|
||||||
|
def update_msgs(self):
|
||||||
|
text = "\n".join(self.log_msgs)
|
||||||
|
self.text.text = text
|
||||||
|
self.text.set_style(0, len(self.text.text), dict(
|
||||||
|
font_name='Arial', # change to a font installed on your system
|
||||||
|
font_size=18,
|
||||||
|
color=(255, 255, 255, 255),
|
||||||
|
))
|
||||||
|
|
||||||
|
|
||||||
|
colorsmap = {
|
||||||
|
'ANOMALOUS': (255, 0, 0, 255),
|
||||||
|
'LOITERING': (255, 255, 0, 255),
|
||||||
|
'DETECTED': (255, 0, 255, 255),
|
||||||
|
'SUBSTANTIAL': (255, 0, 255, 255),
|
||||||
|
'LOST': (0, 0, 0, 255),
|
||||||
|
}
|
||||||
|
|
||||||
|
matchtext = "".join(self.log_msgs) # find no newlines
|
||||||
|
for state,color in colorsmap.items():
|
||||||
|
for match in re.finditer(state, matchtext):
|
||||||
|
self.text.set_style(match.start(), match.end(), dict(
|
||||||
|
color=color
|
||||||
|
))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def on_draw(self):
|
||||||
|
self.receive(.1)
|
||||||
|
# self.window.clear()
|
||||||
|
self.clear_transparent.color = (*BG_COLOR, int(3))
|
||||||
|
self.clear_transparent.draw()
|
||||||
|
self.clear_fully.draw()
|
||||||
|
self.fps_display.draw()
|
||||||
|
|
||||||
|
self.bg_sprite.draw()
|
||||||
|
|
||||||
|
self.lines_batch.draw()
|
||||||
|
self.text_batch.draw()
|
||||||
|
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def arg_parser(cls):
|
||||||
|
render_parser = ArgumentParser()
|
||||||
|
|
||||||
|
render_parser.add_argument('--zmq-stage-addr',
|
||||||
|
help='Manually specity communication addr for the stage messages (the rendered lines)',
|
||||||
|
type=str,
|
||||||
|
default="tcp://0.0.0.0:99174")
|
||||||
|
render_parser.add_argument('--zmq-log-addr',
|
||||||
|
help='Manually specity communication addr for the log messages',
|
||||||
|
type=str,
|
||||||
|
default="tcp://0.0.0.0:99188")
|
||||||
|
|
||||||
|
render_parser.add_argument("--fullscreen",
|
||||||
|
help="Set Window full screen",
|
||||||
|
action='store_true')
|
||||||
|
|
||||||
|
render_parser.add_argument('--floorplan',
|
||||||
|
help='specify a map (png-file) onto which overlayed',
|
||||||
|
type=str,
|
||||||
|
default="SETTINGS/2025-11-dortmund/space/floorplan.png")
|
||||||
|
render_parser.add_argument('--monitor',
|
||||||
|
help='Specify a screen on which to output (eg. HDMI-0)',
|
||||||
|
type=str,
|
||||||
|
default="HDMI-0")
|
||||||
|
return render_parser
|
||||||
|
|
||||||
|
|
@ -330,6 +330,7 @@ def track_predictions_to_lines(track: Track, camera:Camera, anim_position=.8):
|
||||||
return
|
return
|
||||||
|
|
||||||
current_point = track.get_projected_history(camera=camera)[-1]
|
current_point = track.get_projected_history(camera=camera)[-1]
|
||||||
|
|
||||||
slide_t = min(1, max(0, inv_lerp(0, 0.8, anim_position))) # slide_position
|
slide_t = min(1, max(0, inv_lerp(0, 0.8, anim_position))) # slide_position
|
||||||
|
|
||||||
lines = []
|
lines = []
|
||||||
|
|
@ -372,6 +373,7 @@ def draw_track_predictions(img: cv2.Mat, track: Track, color_index: int, camera:
|
||||||
|
|
||||||
lines = track_predictions_to_lines(track, camera, anim_position)
|
lines = track_predictions_to_lines(track, camera, anim_position)
|
||||||
|
|
||||||
|
|
||||||
if not lines:
|
if not lines:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
|
||||||
186
trap/track_history.py
Normal file
186
trap/track_history.py
Normal file
|
|
@ -0,0 +1,186 @@
|
||||||
|
from dataclasses import dataclass
|
||||||
|
import logging
|
||||||
|
from pathlib import Path
|
||||||
|
import pickle
|
||||||
|
from threading import Lock
|
||||||
|
import time
|
||||||
|
from typing import Dict, Iterable, List, Optional, Set
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from trap.base import Camera, Track
|
||||||
|
from trap.lines import Coordinate
|
||||||
|
from trap.tracker import FinalDisplacementFilter, Smoother, TrackReader
|
||||||
|
|
||||||
|
from scipy.spatial import KDTree
|
||||||
|
|
||||||
|
logger = logging.getLogger('history')
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class TrackHistoryState():
|
||||||
|
"""
|
||||||
|
The lock of TrackHistory is not pickle-able so separate it into a separate state
|
||||||
|
"""
|
||||||
|
tracks: List[Track]
|
||||||
|
track_histories: Dict[str, np.ndarray]
|
||||||
|
indexed_track_ids: List[str]
|
||||||
|
tree: KDTree
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class TrackHistory():
|
||||||
|
def __init__(self, path: Path, camera: Camera, cache_path: Optional[Path]):
|
||||||
|
self.path = path
|
||||||
|
self.camera = camera
|
||||||
|
self.cache_path = cache_path
|
||||||
|
self.lock = Lock()
|
||||||
|
self.load_from_cache() or self.reload()
|
||||||
|
|
||||||
|
|
||||||
|
def load_from_cache(self):
|
||||||
|
if self.cache_path is None:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if self.cache_path.exists():
|
||||||
|
logger.debug("Load history state from cache")
|
||||||
|
with self.cache_path.open('rb') as fp:
|
||||||
|
try:
|
||||||
|
state = pickle.load(fp)
|
||||||
|
if not isinstance(state, TrackHistoryState):
|
||||||
|
raise RuntimeError("Pickled data is not a trackhistorystate")
|
||||||
|
self.state = state
|
||||||
|
return True
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Cannot read cache {self.cache_path}: {e}")
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def build_tree(self):
|
||||||
|
reader = TrackReader(self.path, self.camera.fps)
|
||||||
|
logger.debug(f'loaded {len(reader)} tracks')
|
||||||
|
|
||||||
|
track_filter = FinalDisplacementFilter(2)
|
||||||
|
tracks = track_filter.apply(reader, self.camera)
|
||||||
|
logger.debug(f'after filtering left with {len(tracks)} tracks')
|
||||||
|
|
||||||
|
|
||||||
|
tracks: List[Track] = [t.get_with_interpolated_history() for t in tracks]
|
||||||
|
logger.debug(f'interpolated {len(tracks)} tracks')
|
||||||
|
|
||||||
|
# use convolution here, because precision does not matter and it is _way_ faster
|
||||||
|
smoother = Smoother(convolution=True)
|
||||||
|
tracks = [smoother.smooth_track(t) for t in tracks]
|
||||||
|
logger.debug(f'smoothed')
|
||||||
|
|
||||||
|
tracks = {track.track_id: track for track in tracks}
|
||||||
|
|
||||||
|
|
||||||
|
track_histories = {t.track_id: t.get_projected_history(camera=self.camera) for t in tracks.values()}
|
||||||
|
downsampled_histories = {t_id: self.downsample_history(h) for t_id, h in track_histories.items()}
|
||||||
|
logger.debug(f'projected to world space')
|
||||||
|
|
||||||
|
|
||||||
|
# Sample data (coordinates and metadata)
|
||||||
|
# coordinates = [(1, 2, 'Point A'), (3, 4, 'Point B'), (5, 6, 'Point C'), (7, 8, 'Point D')]
|
||||||
|
all_points = []
|
||||||
|
indexed_track_ids: List[str] = []
|
||||||
|
for track_id, history in downsampled_histories.items():
|
||||||
|
all_points.extend([
|
||||||
|
[point[0], point[1]] for point in history
|
||||||
|
])
|
||||||
|
indexed_track_ids.extend([track_id] * len(history))
|
||||||
|
|
||||||
|
# self.flat_idx = self.flat_histories[:,2]
|
||||||
|
|
||||||
|
# Create the KD-Tree
|
||||||
|
tree = KDTree(all_points)
|
||||||
|
|
||||||
|
logger.debug('built tree')
|
||||||
|
return TrackHistoryState(
|
||||||
|
tracks, track_histories, indexed_track_ids, tree
|
||||||
|
)
|
||||||
|
|
||||||
|
def reload(self):
|
||||||
|
state = self.build_tree()
|
||||||
|
|
||||||
|
# aquire lock as brief as possible
|
||||||
|
with self.lock:
|
||||||
|
self.state = state
|
||||||
|
|
||||||
|
|
||||||
|
if self.cache_path:
|
||||||
|
with self.cache_path.open('wb') as fp:
|
||||||
|
logger.debug("Writing history to cache")
|
||||||
|
pickle.dump(self.state, fp)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def get_nearest_tracks(self, point: Coordinate, k:int, max_r: Optional[float] = np.inf):
|
||||||
|
with self.lock:
|
||||||
|
distances, indexes = self.state.tree.query(point, k, distance_upper_bound=max_r)
|
||||||
|
# filter out when there's no
|
||||||
|
indexes = indexes[distances != np.inf]
|
||||||
|
track_ids: Set[str] = {self.state.indexed_track_ids[idx] for idx in indexes}
|
||||||
|
|
||||||
|
# nearby_indexes = self.tree.query_ball_point(point, r)
|
||||||
|
# track_ids = set([self.flat_idx[idx] for idx in nearby_indexes])
|
||||||
|
|
||||||
|
return track_ids
|
||||||
|
|
||||||
|
def ids_as_trajectory(self, track_ids: Iterable[str]):
|
||||||
|
for track_id in track_ids:
|
||||||
|
yield self.state.tracks[track_id].get_projected_history(camera=self.camera)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def downsample_history(cls, history, cell_size=.3):
|
||||||
|
|
||||||
|
|
||||||
|
if not len(history):
|
||||||
|
return []
|
||||||
|
|
||||||
|
positions = np.unique(np.round(history / cell_size), axis=0) * cell_size
|
||||||
|
|
||||||
|
return positions
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
path = Path("EXPERIMENTS/raw/hof3/")
|
||||||
|
logging.basicConfig(level=logging.DEBUG)
|
||||||
|
|
||||||
|
calibration_path = Path("../DATASETS/hof3/calibration.json")
|
||||||
|
homography_path = Path("../DATASETS/hof3/homography.json")
|
||||||
|
camera = Camera.from_paths(calibration_path, homography_path, 12)
|
||||||
|
# device = device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
s = time.time()
|
||||||
|
history = TrackHistory(path, camera, Path("/tmp/historystate_hof3.pcl"))
|
||||||
|
dt = time.time() - s
|
||||||
|
print(f'loaded {len(history.state.tracks)} tracks in {dt}s')
|
||||||
|
|
||||||
|
|
||||||
|
track = list(history.state.tracks.values())[25]
|
||||||
|
trajectory_crop = TrackHistory.downsample_history(history.state.track_histories[track.track_id])
|
||||||
|
trajectory_org = track.get_projected_history(camera=camera)
|
||||||
|
target_point = trajectory_org[len(trajectory_org)//2+90]
|
||||||
|
|
||||||
|
import matplotlib.pyplot as plt # Visualization
|
||||||
|
|
||||||
|
track_set = history.get_nearest_tracks(target_point, 10, max_r=np.inf)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
plt.gca().set_aspect('equal')
|
||||||
|
plt.scatter(trajectory_crop[:,0], trajectory_crop[:,1], c='orange')
|
||||||
|
plt.plot(trajectory_org[:,0], trajectory_org[:,1], c='blue', alpha=1)
|
||||||
|
plt.scatter(target_point[0], target_point[1], c='red', alpha=1)
|
||||||
|
for track_id in track_set:
|
||||||
|
closeby = history.state.tracks[track_id].get_projected_history(camera=camera)
|
||||||
|
plt.plot(closeby[:,0], closeby[:,1], c='green', alpha=.1)
|
||||||
|
|
||||||
|
plt.show()
|
||||||
74
trap/track_writer.py
Normal file
74
trap/track_writer.py
Normal file
|
|
@ -0,0 +1,74 @@
|
||||||
|
# used for "Forward Referencing of type annotations"
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from argparse import ArgumentParser
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import zmq
|
||||||
|
|
||||||
|
from trap.base import Track
|
||||||
|
from trap.frame_emitter import Frame
|
||||||
|
from trap.node import Node
|
||||||
|
from trap.tracker import TrainingDataWriter, TrainingTrackWriter
|
||||||
|
|
||||||
|
|
||||||
|
class TrackWriter(Node):
|
||||||
|
def setup(self):
|
||||||
|
self.track_sock = self.sub(self.config.zmq_lost_addr)
|
||||||
|
self.log_sock = self.push(self.config.zmq_log_addr)
|
||||||
|
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
with TrainingTrackWriter(self.config.output_dir) as writer:
|
||||||
|
try:
|
||||||
|
while self.run_loop():
|
||||||
|
zmq_ev = self.track_sock.poll(timeout=1000)
|
||||||
|
if not zmq_ev:
|
||||||
|
# when no data comes in, loop so that is_running is checked
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
track: Track = self.track_sock.recv_pyobj()
|
||||||
|
|
||||||
|
if len(track.history) < 20:
|
||||||
|
self.logger.debug(f"ignore short track {len(track.history)}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
writer.add(track)
|
||||||
|
|
||||||
|
self.logger.info(f"Added track {track.track_id}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.log_sock.send_string(f"Added track {track.track_id} to dataset, {len(track.history)} datapoints", zmq.NOBLOCK)
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.warning("Not sent the message, broken socket?")
|
||||||
|
|
||||||
|
except zmq.ZMQError as e:
|
||||||
|
|
||||||
|
pass
|
||||||
|
except KeyboardInterrupt as e:
|
||||||
|
print('stopping on interrupt')
|
||||||
|
|
||||||
|
self.logger.info('Stopping')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def arg_parser(cls):
|
||||||
|
argparser = ArgumentParser()
|
||||||
|
argparser.add_argument('--zmq-log-addr',
|
||||||
|
help='Manually specity communication addr for the log messages',
|
||||||
|
type=str,
|
||||||
|
default="tcp://0.0.0.0:99188")
|
||||||
|
argparser.add_argument('--zmq-lost-addr',
|
||||||
|
help='Manually specity communication addr for the trajectory messages',
|
||||||
|
type=str,
|
||||||
|
default="ipc:///tmp/feeds_lost")
|
||||||
|
argparser.add_argument("--output-dir",
|
||||||
|
help="Directory to save the video in",
|
||||||
|
required=True,
|
||||||
|
default=Path("EXPERIMENTS/raw/hof-lidar"),
|
||||||
|
type=Path)
|
||||||
|
return argparser
|
||||||
|
|
||||||
|
|
||||||
182
trap/tracker.py
182
trap/tracker.py
|
|
@ -1,3 +1,4 @@
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
import argparse
|
import argparse
|
||||||
import csv
|
import csv
|
||||||
import json
|
import json
|
||||||
|
|
@ -118,6 +119,7 @@ class FinalDisplacementFilter(TrackFilter):
|
||||||
|
|
||||||
def filter(self, track: Track, camera: Camera):
|
def filter(self, track: Track, camera: Camera):
|
||||||
history = track.get_projected_history(H=None, camera=camera)
|
history = track.get_projected_history(H=None, camera=camera)
|
||||||
|
|
||||||
displacement = np.linalg.norm(history[0]-history[-1])
|
displacement = np.linalg.norm(history[0]-history[-1])
|
||||||
return displacement > self.min_displacement
|
return displacement > self.min_displacement
|
||||||
|
|
||||||
|
|
@ -125,13 +127,36 @@ class TrackReader:
|
||||||
def __init__(self, path: Path, fps: int, include_blacklisted = False, exclude_whitelisted = False):
|
def __init__(self, path: Path, fps: int, include_blacklisted = False, exclude_whitelisted = False):
|
||||||
self.blacklist_file = path / "blacklist.jsonl"
|
self.blacklist_file = path / "blacklist.jsonl"
|
||||||
self.whitelist_file = path / "whitelist.jsonl" # for skipping
|
self.whitelist_file = path / "whitelist.jsonl" # for skipping
|
||||||
self.tracks_file = path / "tracks.pkl"
|
# self.tracks_file = path / "tracks.pkl"
|
||||||
|
self.tracks_files = path.glob('tracks*.pkl')
|
||||||
|
|
||||||
# with self.tracks_file.open('r') as fp:
|
# with self.tracks_file.open('r') as fp:
|
||||||
# tracks_dict: dict = json.load(fp)
|
# tracks_dict: dict = json.load(fp)
|
||||||
|
|
||||||
with self.tracks_file.open('rb') as fp:
|
tracks: Dict[str, Track] = {}
|
||||||
tracks: dict = pickle.load(fp)
|
for tracks_file in self.tracks_files:
|
||||||
|
logger.info(f"Read {tracks_file}")
|
||||||
|
with tracks_file.open('rb') as fp:
|
||||||
|
while True:
|
||||||
|
# multiple tracks can be pickled separately
|
||||||
|
try:
|
||||||
|
trackset: Dict[str, Track] = pickle.load(fp)
|
||||||
|
for track_id, track in trackset.items():
|
||||||
|
if len(tracks) < 1:
|
||||||
|
max_item = 0
|
||||||
|
else:
|
||||||
|
max_item = max([int(t) for t in tracks.keys()])
|
||||||
|
|
||||||
|
if int(track.track_id) < max_item:
|
||||||
|
track_id = str(max_item+1)
|
||||||
|
else:
|
||||||
|
track_id = track.track_id
|
||||||
|
|
||||||
|
track.track_id = track_id
|
||||||
|
tracks[track.track_id] = track
|
||||||
|
except EOFError:
|
||||||
|
break
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if self.blacklist_file.exists():
|
if self.blacklist_file.exists():
|
||||||
|
|
@ -255,6 +280,48 @@ class TrainingDataWriter:
|
||||||
rewrite_raw_track_files(self.path)
|
rewrite_raw_track_files(self.path)
|
||||||
|
|
||||||
|
|
||||||
|
class TrainingTrackWriter:
|
||||||
|
"""
|
||||||
|
Supersedes TrainingDataWriter, by writing full tracks"""
|
||||||
|
def __init__(self, training_path: Optional[Path]):
|
||||||
|
if training_path is None:
|
||||||
|
self.path = None
|
||||||
|
return
|
||||||
|
|
||||||
|
if not isinstance(training_path, Path):
|
||||||
|
raise ValueError("save-for-training should be a path")
|
||||||
|
if not training_path.exists():
|
||||||
|
logger.info(f"Making path for training data: {training_path}")
|
||||||
|
training_path.mkdir(parents=True, exist_ok=False)
|
||||||
|
else:
|
||||||
|
logger.warning(f"Path for training-data exists: {training_path}. Continuing assuming that's ok.")
|
||||||
|
|
||||||
|
self.path = training_path
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
if self.path:
|
||||||
|
d = datetime.now().isoformat(timespec="minutes")
|
||||||
|
self.training_fp = open(self.path / f'tracks-{d}.pcl', 'wb')
|
||||||
|
logger.debug(f"Writing tracker data to {self.training_fp.name}")
|
||||||
|
# following https://github.com/StanfordASL/Trajectron-plus-plus/blob/master/experiments/pedestrians/process_data.py
|
||||||
|
# self.csv = csv.DictWriter(self.training_fp, fieldnames=FIELDNAMES, delimiter='\t', quoting=csv.QUOTE_NONE)
|
||||||
|
self.count = 0
|
||||||
|
return self
|
||||||
|
|
||||||
|
def add(self, track: Track):
|
||||||
|
self.count += 1;
|
||||||
|
pickle.dump(track, self.training_fp)
|
||||||
|
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_value, exc_tb):
|
||||||
|
# ... ignore exception (type, value, traceback)
|
||||||
|
if not self.path:
|
||||||
|
return
|
||||||
|
|
||||||
|
self.training_fp.close()
|
||||||
|
# rewrite_raw_track_files(self.path)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def rewrite_raw_track_files(path: Path):
|
def rewrite_raw_track_files(path: Path):
|
||||||
source_files = list(sorted(path.glob("*.txt"))) # we loop twice, so need a list instead of generator
|
source_files = list(sorted(path.glob("*.txt"))) # we loop twice, so need a list instead of generator
|
||||||
|
|
@ -295,7 +362,7 @@ def rewrite_raw_track_files(path: Path):
|
||||||
with file.open('w') as target_fp:
|
with file.open('w') as target_fp:
|
||||||
|
|
||||||
for i in range(line_nrs):
|
for i in range(line_nrs):
|
||||||
line = sources.readline()
|
line = sources.readline().rstrip()
|
||||||
current_file = sources.current_file
|
current_file = sources.current_file
|
||||||
if prev_file != current_file:
|
if prev_file != current_file:
|
||||||
offset: int = max_track_id
|
offset: int = max_track_id
|
||||||
|
|
@ -775,49 +842,33 @@ def run():
|
||||||
is_running.clear()
|
is_running.clear()
|
||||||
|
|
||||||
|
|
||||||
class Smoother:
|
class TrackPointFilter(ABC):
|
||||||
|
@abstractmethod
|
||||||
def __init__(self, window_len=6, convolution=False):
|
def apply(self, points: List[float]):
|
||||||
# for some reason this smoother messes the predictions. Probably skews the points too much??
|
pass
|
||||||
if convolution:
|
|
||||||
self.smoother = ConvolutionSmoother(window_len=window_len, window_type='hanning', copy=None)
|
|
||||||
else:
|
|
||||||
# "Unlike Kalman filtering, which focuses on predicting and updating the current state using historical measurements, Kalman smoothing enhances the accuracy of past state values"
|
|
||||||
# see https://medium.com/@shahalkp1/kalman-smoothing-using-tsmoothie-0175260464e5
|
|
||||||
self.smoother = KalmanSmoother(component='level_trend', component_noise={'level':0.02, 'season': .01, 'trend':0.02},n_seasons = 2, copy=None)
|
|
||||||
|
|
||||||
|
|
||||||
|
def apply_track(self, track: Track) -> Track:
|
||||||
def smooth(self, points: List[float]):
|
|
||||||
self.smoother.smooth(points)
|
|
||||||
return self.smoother.smooth_data[0]
|
|
||||||
|
|
||||||
def smooth_track(self, track: Track) -> Track:
|
|
||||||
ls = [d.l for d in track.history]
|
ls = [d.l for d in track.history]
|
||||||
ts = [d.t for d in track.history]
|
ts = [d.t for d in track.history]
|
||||||
ws = [d.w for d in track.history]
|
ws = [d.w for d in track.history]
|
||||||
hs = [d.h for d in track.history]
|
hs = [d.h for d in track.history]
|
||||||
self.smoother.smooth(ls)
|
ls = self.apply(ls)
|
||||||
ls = self.smoother.smooth_data[0]
|
ts = self.apply(ts)
|
||||||
self.smoother.smooth(ts)
|
ws = self.apply(ws)
|
||||||
ts = self.smoother.smooth_data[0]
|
hs = self.apply(hs)
|
||||||
self.smoother.smooth(ws)
|
|
||||||
ws = self.smoother.smooth_data[0]
|
|
||||||
self.smoother.smooth(hs)
|
|
||||||
hs = self.smoother.smooth_data[0]
|
|
||||||
new_history = [Detection(d.track_id, l, t, w, h, d.conf, d.state, d.frame_nr, d.det_class) for l, t, w, h, d in zip(ls,ts,ws,hs, track.history)]
|
new_history = [Detection(d.track_id, l, t, w, h, d.conf, d.state, d.frame_nr, d.det_class) for l, t, w, h, d in zip(ls,ts,ws,hs, track.history)]
|
||||||
return track.get_with_new_history(new_history)
|
return track.get_with_new_history(new_history)
|
||||||
# return Track(track.track_id, new_history, track.predictor_history, track.predictions, track.fps)
|
|
||||||
|
|
||||||
def smooth_frame_tracks(self, frame: Frame) -> Frame:
|
def apply_to_frame_tracks(self, frame: Frame) -> Frame:
|
||||||
new_tracks = []
|
new_tracks = []
|
||||||
for track in frame.tracks.values():
|
for track in frame.tracks.values():
|
||||||
new_track = self.smooth_track(track)
|
new_track = self.apply_track(track)
|
||||||
new_tracks.append(new_track)
|
new_tracks.append(new_track)
|
||||||
frame.tracks = {t.track_id: t for t in new_tracks}
|
frame.tracks = {t.track_id: t for t in new_tracks}
|
||||||
return frame
|
return frame
|
||||||
|
|
||||||
def smooth_frame_predictions(self, frame: Frame) -> Frame:
|
def apply_to_frame_predictions(self, frame: Frame) -> Frame:
|
||||||
|
|
||||||
for track in frame.tracks.values():
|
for track in frame.tracks.values():
|
||||||
new_predictions = []
|
new_predictions = []
|
||||||
|
|
@ -828,14 +879,69 @@ class Smoother:
|
||||||
xs = [d[0] for d in prediction]
|
xs = [d[0] for d in prediction]
|
||||||
ys = [d[1] for d in prediction]
|
ys = [d[1] for d in prediction]
|
||||||
|
|
||||||
self.smoother.smooth(xs)
|
xs = self.apply(xs)
|
||||||
xs = self.smoother.smooth_data[0]
|
ys = self.apply(ys)
|
||||||
self.smoother.smooth(ys)
|
|
||||||
ys = self.smoother.smooth_data[0]
|
|
||||||
|
|
||||||
smooth_prediction = [[x,y] for x, y in zip(xs, ys)]
|
filtered_prediction = [[x,y] for x, y in zip(xs, ys)]
|
||||||
|
|
||||||
new_predictions.append(smooth_prediction)
|
new_predictions.append(filtered_prediction)
|
||||||
track.predictions = new_predictions
|
track.predictions = new_predictions
|
||||||
|
|
||||||
return frame
|
return frame
|
||||||
|
|
||||||
|
class Smoother(TrackPointFilter):
|
||||||
|
|
||||||
|
def __init__(self, window_len=6, convolution=False):
|
||||||
|
# for some reason this smoother messes the predictions. Probably skews the points too much??
|
||||||
|
if convolution:
|
||||||
|
self.smoother = ConvolutionSmoother(window_len=window_len, window_type='hanning', copy=None)
|
||||||
|
else:
|
||||||
|
# "Unlike Kalman filtering, which focuses on predicting and updating the current state using historical measurements, Kalman smoothing enhances the accuracy of past state values"
|
||||||
|
# see https://medium.com/@shahalkp1/kalman-smoothing-using-tsmoothie-0175260464e5
|
||||||
|
# self.smoother = KalmanSmoother(component='level_trend', component_noise={'level':0.02, 'season': .01, 'trend':0.02},n_seasons = 2, copy=False)
|
||||||
|
self.smoother = KalmanSmoother(component='level', component_noise={'level':0.01},observation_noise=.3, n_seasons = 0, copy=False)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def apply(self, points: List[float]):
|
||||||
|
self.smoother.smooth(points)
|
||||||
|
return self.smoother.smooth_data[0]
|
||||||
|
|
||||||
|
|
||||||
|
# aliases, for historic reasons
|
||||||
|
def smooth(self, points: List[float]):
|
||||||
|
return self.apply(points)
|
||||||
|
|
||||||
|
def smooth_track(self, track: Track) -> Track:
|
||||||
|
return self.apply_track(track)
|
||||||
|
|
||||||
|
def smooth_frame_tracks(self, frame: Frame) -> Frame:
|
||||||
|
return self.apply_to_frame_tracks(frame)
|
||||||
|
|
||||||
|
def smooth_frame_predictions(self, frame: Frame) -> Frame:
|
||||||
|
return self.apply_to_frame_predictions(frame)
|
||||||
|
|
||||||
|
class Noiser(TrackPointFilter):
|
||||||
|
|
||||||
|
def __init__(self, amplitude=.1):
|
||||||
|
self.amplitude = amplitude
|
||||||
|
|
||||||
|
|
||||||
|
def apply(self, points: List[float]):
|
||||||
|
return np.random.normal(points, scale=self.amplitude).tolist()
|
||||||
|
|
||||||
|
|
||||||
|
class RandomOffset(TrackPointFilter):
|
||||||
|
"""
|
||||||
|
A bit hacky way to offset the whole track. Does x & y & w & h with the same value
|
||||||
|
"""
|
||||||
|
def __init__(self, amplitude=.1):
|
||||||
|
self.amplitude = np.random.normal(scale=amplitude)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def apply(self, points: List[float]):
|
||||||
|
return [p + self.amplitude for p in points]
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1,4 +1,5 @@
|
||||||
# lerp & inverse lerp from https://gist.github.com/laundmo/b224b1f4c8ef6ca5fe47e132c8deab56
|
# lerp & inverse lerp from https://gist.github.com/laundmo/b224b1f4c8ef6ca5fe47e132c8deab56
|
||||||
|
from collections import namedtuple
|
||||||
import linecache
|
import linecache
|
||||||
import math
|
import math
|
||||||
import os
|
import os
|
||||||
|
|
@ -7,6 +8,7 @@ import tracemalloc
|
||||||
from typing import Iterable
|
from typing import Iterable
|
||||||
import cv2
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
import torch
|
||||||
from trajectron.environment.map import GeometricMap
|
from trajectron.environment.map import GeometricMap
|
||||||
|
|
||||||
def lerp(a: float, b: float, t: float) -> float:
|
def lerp(a: float, b: float, t: float) -> float:
|
||||||
|
|
@ -28,6 +30,12 @@ def inv_lerp(a: float, b: float, v: float) -> float:
|
||||||
"""
|
"""
|
||||||
return (v - a) / (b - a)
|
return (v - a) / (b - a)
|
||||||
|
|
||||||
|
def easeInOutQuad(t: float) -> float:
|
||||||
|
"""Quadratic easing in/out - smoothing the transition."""
|
||||||
|
if t < 0.5:
|
||||||
|
return 2 * t * t
|
||||||
|
else:
|
||||||
|
return 1 - np.power(-2 * t + 2, 2) / 2
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -128,6 +136,7 @@ def display_top(snapshot: tracemalloc.Snapshot, key_type='lineno', limit=5):
|
||||||
print("Total allocated size: %.1f KiB" % (total / 1024))
|
print("Total allocated size: %.1f KiB" % (total / 1024))
|
||||||
|
|
||||||
|
|
||||||
|
ImageMapBounds = namedtuple('ImageMapBounds', ['min_x', 'max_x', 'min_y', 'max_y'])
|
||||||
class ImageMap(GeometricMap): # TODO Implement for image maps -> watch flipped coordinate system
|
class ImageMap(GeometricMap): # TODO Implement for image maps -> watch flipped coordinate system
|
||||||
def __init__(self, img: cv2.Mat, H_world_to_map: cv2.Mat, description=None):
|
def __init__(self, img: cv2.Mat, H_world_to_map: cv2.Mat, description=None):
|
||||||
# homography_matrix = np.loadtxt('H.txt')
|
# homography_matrix = np.loadtxt('H.txt')
|
||||||
|
|
@ -144,12 +153,57 @@ class ImageMap(GeometricMap): # TODO Implement for image maps -> watch flipped
|
||||||
layers = layers.copy() # copy to apply negative stride
|
layers = layers.copy() # copy to apply negative stride
|
||||||
# layers =
|
# layers =
|
||||||
|
|
||||||
#scale 255
|
|
||||||
|
|
||||||
#alternatively: morph image to world space with a scale, as in trajectron/experiments/nuscenes/process_data.py
|
#alternatively: morph image to world space with a scale, as in trajectron/experiments/nuscenes/process_data.py
|
||||||
|
|
||||||
super().__init__(layers, homography_matrix, description)
|
super().__init__(layers, homography_matrix, description)
|
||||||
|
|
||||||
|
self.set_bounds()
|
||||||
|
|
||||||
|
def set_bounds(self):
|
||||||
|
"""
|
||||||
|
Use homography and image to calculate the limits of positions in world coordinates
|
||||||
|
"""
|
||||||
|
# print(self.data.shape)
|
||||||
|
|
||||||
|
max_x = self.data.shape[1]
|
||||||
|
max_y = self.data.shape[2]
|
||||||
|
|
||||||
|
# this assumes a map that is only scaled and translated, not skewed
|
||||||
|
points_in_map = np.array([
|
||||||
|
[0, 0],
|
||||||
|
[max_x, max_y],
|
||||||
|
])
|
||||||
|
|
||||||
|
# calculate bounds:
|
||||||
|
H_map_to_world = np.linalg.inv(self.homography)
|
||||||
|
|
||||||
|
# Convert points to homogeneous coordinates and Apply the transformation
|
||||||
|
homogeneous_points = np.hstack((points_in_map, np.ones((points_in_map.shape[0], 1))))
|
||||||
|
transformed_points = np.dot(homogeneous_points, H_map_to_world.T)
|
||||||
|
# Convert back to Cartesian coordinates
|
||||||
|
transformed_points = transformed_points[:, :2]
|
||||||
|
|
||||||
|
self.bounds = ImageMapBounds(
|
||||||
|
transformed_points[0][0],
|
||||||
|
transformed_points[1][0],
|
||||||
|
transformed_points[0][1],
|
||||||
|
transformed_points[1][1]
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_cropped_maps_from_scene_map_batch(cls, maps, scene_pts, patch_size, rotation=None, device='cpu'):
|
||||||
|
min_bounds = [maps[0].bounds.min_x, maps[0].bounds.min_y]
|
||||||
|
max_bounds = [maps[0].bounds.max_x, maps[0].bounds.max_y]
|
||||||
|
|
||||||
|
if torch.is_tensor(scene_pts):
|
||||||
|
min_bounds = torch.Tensor(min_bounds)
|
||||||
|
max_bounds = torch.Tensor(max_bounds)
|
||||||
|
|
||||||
|
scene_pts = scene_pts.clip(min=min_bounds, max=max_bounds)
|
||||||
|
|
||||||
|
return super().get_cropped_maps_from_scene_map_batch(maps, scene_pts, patch_size, rotation, device)
|
||||||
|
|
||||||
def to_map_points(self, scene_pts):
|
def to_map_points(self, scene_pts):
|
||||||
org_shape = None
|
org_shape = None
|
||||||
if len(scene_pts.shape) > 2:
|
if len(scene_pts.shape) > 2:
|
||||||
|
|
|
||||||
21
uv.lock
21
uv.lock
|
|
@ -417,6 +417,17 @@ wheels = [
|
||||||
{ url = "https://files.pythonhosted.org/packages/d3/36/e0010483ca49b9bf6f389631ccea07b3ff6b678d14d8c7a0a4357860c36a/dash-3.2.0-py3-none-any.whl", hash = "sha256:4c1819588d83bed2cbcf5807daa5c2380c8c85789a6935a733f018f04ad8a6a2", size = 7900661 },
|
{ url = "https://files.pythonhosted.org/packages/d3/36/e0010483ca49b9bf6f389631ccea07b3ff6b678d14d8c7a0a4357860c36a/dash-3.2.0-py3-none-any.whl", hash = "sha256:4c1819588d83bed2cbcf5807daa5c2380c8c85789a6935a733f018f04ad8a6a2", size = 7900661 },
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "dearpygui"
|
||||||
|
version = "2.1.0"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/2e/26/93db234a69d01ae84ed61abb991a3da5555410abdf38d5932fb7ed594e12/dearpygui-2.1.0-cp310-cp310-macosx_10_6_x86_64.whl", hash = "sha256:374d4c605affdf8a49eef4cf434f76e17df374590e51745b62c67025d1d41ec3", size = 2101011 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/ef/13/0301fd7fd3ac01ed23003873681c835f18d14267953c54ff6889cb1d0212/dearpygui-2.1.0-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:074985fa9d1622edda89c7f113d7a25ae5543f2e3f684bba3601e688c873936f", size = 1874384 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/5d/54/5e53d99a1d352f387bd18250d8bcfe9e60594eefc062f246f61810c1bd80/dearpygui-2.1.0-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:49808389f1d1acfb4f5cbd9f1b1ec188fbcd2d828414094864f7035e27158db2", size = 2636636 },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/34/44/2508c4ba08b28cc9e827a04ae00fc73dbe6820531241eb43ba28f370372b/dearpygui-2.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:792c1cd34dd0d03bf15838cc1e66ad01282c672ef0d2b9981368b6dcd37e67d3", size = 1810184 },
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "debugpy"
|
name = "debugpy"
|
||||||
version = "1.8.13"
|
version = "1.8.13"
|
||||||
|
|
@ -1878,11 +1889,11 @@ wheels = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pyglet"
|
name = "pyglet"
|
||||||
version = "2.1.3"
|
version = "2.1.11"
|
||||||
source = { registry = "https://pypi.org/simple" }
|
source = { registry = "https://pypi.org/simple" }
|
||||||
sdist = { url = "https://files.pythonhosted.org/packages/08/90/7f8a8d939dbf8f6875b957540cc3091e936e41c4ac8f190a9517589678f8/pyglet-2.1.3.tar.gz", hash = "sha256:9a2c3c84228402ea7103443ac8a52060cc1c91419951ec1105845ce30fed2ce8", size = 6515859 }
|
sdist = { url = "https://files.pythonhosted.org/packages/e3/6b/84c397a74cd33eb377168c682e9e3d6b90c1c10c661e11ea5b397ac8497c/pyglet-2.1.11.tar.gz", hash = "sha256:8285d0af7d0ab443232a81df4d941e0d5c48c18a23ec770b3e5c59a222f5d56e", size = 6594448 }
|
||||||
wheels = [
|
wheels = [
|
||||||
{ url = "https://files.pythonhosted.org/packages/83/d6/41208b6741e732a7faf160e89346a17e81b14899bd7ae90058da858083d6/pyglet-2.1.3-py3-none-any.whl", hash = "sha256:5a7eaf35869ecf7451fb49cc064c4c0e9a118eaa5e771667c607125b13f85e33", size = 962091 },
|
{ url = "https://files.pythonhosted.org/packages/77/a2/2b09fbff0eedbe44fbf164b321439a38f7c5568d8b754aa197ee45886431/pyglet-2.1.11-py3-none-any.whl", hash = "sha256:fa0f4fdf366cfc5040aeb462416910b0db2fa374b7d620b7a432178ca3fa8af1", size = 1032213 },
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
|
@ -2757,6 +2768,7 @@ source = { editable = "." }
|
||||||
dependencies = [
|
dependencies = [
|
||||||
{ name = "baumer-neoapi" },
|
{ name = "baumer-neoapi" },
|
||||||
{ name = "bytetracker" },
|
{ name = "bytetracker" },
|
||||||
|
{ name = "dearpygui" },
|
||||||
{ name = "deep-sort-realtime" },
|
{ name = "deep-sort-realtime" },
|
||||||
{ name = "facenet-pytorch" },
|
{ name = "facenet-pytorch" },
|
||||||
{ name = "ffmpeg-python" },
|
{ name = "ffmpeg-python" },
|
||||||
|
|
@ -2798,6 +2810,7 @@ dependencies = [
|
||||||
requires-dist = [
|
requires-dist = [
|
||||||
{ name = "baumer-neoapi", path = "../../Downloads/Baumer_neoAPI_1.5.0_lin_x86_64_python/wheel/baumer_neoapi-1.5.0-cp34.cp35.cp36.cp37.cp38.cp39.cp310.cp311.cp312-none-linux_x86_64.whl" },
|
{ name = "baumer-neoapi", path = "../../Downloads/Baumer_neoAPI_1.5.0_lin_x86_64_python/wheel/baumer_neoapi-1.5.0-cp34.cp35.cp36.cp37.cp38.cp39.cp310.cp311.cp312-none-linux_x86_64.whl" },
|
||||||
{ name = "bytetracker", git = "https://github.com/rubenvandeven/bytetrack-pip" },
|
{ name = "bytetracker", git = "https://github.com/rubenvandeven/bytetrack-pip" },
|
||||||
|
{ name = "dearpygui", specifier = ">=2.1.0" },
|
||||||
{ name = "deep-sort-realtime", specifier = ">=1.3.2,<2" },
|
{ name = "deep-sort-realtime", specifier = ">=1.3.2,<2" },
|
||||||
{ name = "facenet-pytorch", specifier = ">=2.5.3" },
|
{ name = "facenet-pytorch", specifier = ">=2.5.3" },
|
||||||
{ name = "ffmpeg-python", specifier = ">=0.2.0,<0.3" },
|
{ name = "ffmpeg-python", specifier = ">=0.2.0,<0.3" },
|
||||||
|
|
@ -2812,7 +2825,7 @@ requires-dist = [
|
||||||
{ name = "opencv-python", path = "opencv_python-4.10.0.84-cp310-cp310-linux_x86_64.whl" },
|
{ name = "opencv-python", path = "opencv_python-4.10.0.84-cp310-cp310-linux_x86_64.whl" },
|
||||||
{ name = "pandas-helper-calc", git = "https://github.com/scls19fr/pandas-helper-calc" },
|
{ name = "pandas-helper-calc", git = "https://github.com/scls19fr/pandas-helper-calc" },
|
||||||
{ name = "py-to-proto", specifier = ">=0.6.0" },
|
{ name = "py-to-proto", specifier = ">=0.6.0" },
|
||||||
{ name = "pyglet", specifier = ">=2.0.15,<3" },
|
{ name = "pyglet", specifier = ">=2.1.8,<3" },
|
||||||
{ name = "pyglet-cornerpin", specifier = ">=0.3.0,<0.4" },
|
{ name = "pyglet-cornerpin", specifier = ">=0.3.0,<0.4" },
|
||||||
{ name = "python-statemachine", specifier = ">=2.5.0" },
|
{ name = "python-statemachine", specifier = ">=2.5.0" },
|
||||||
{ name = "pyusb", specifier = ">=1.3.1,<2" },
|
{ name = "pyusb", specifier = ">=1.3.1,<2" },
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue