trap/trap/config.py
2024-12-20 16:22:31 +01:00

367 lines
No EOL
15 KiB
Python

import argparse
from pathlib import Path
import types
import numpy as np
import json
from trap.tracker import DETECTORS, TRACKER_BYTETRACK, TRACKERS
from trap.frame_emitter import Camera
from pyparsing import Optional
from trap.frame_emitter import UrlOrPath
class LambdaParser(argparse.ArgumentParser):
"""Execute lambda functions
"""
def parse_args(self, args=None, namespace=None):
args = super().parse_args(args, namespace)
for key in vars(args):
f = args.__dict__[key]
if type(f) == types.LambdaType:
print(f'Getting default value for {key}')
args.__dict__[key] = f()
return args
parser = LambdaParser()
# parser.parse_args()
parser.add_argument(
'--verbose',
'-v',
help="Increase verbosity. Add multiple times to increase further.",
action='count', default=0
)
parser.add_argument(
'--remote-log-addr',
help="Connect to a remote logger like cutelog. Specify the ip",
type=str,
)
parser.add_argument(
'--remote-log-port',
help="Connect to a remote logger like cutelog. Specify the port",
type=int,
default=19996
)
# parser.add_argument('--foo')
inference_parser = parser.add_argument_group('Inference')
connection_parser = parser.add_argument_group('Connection')
frame_emitter_parser = parser.add_argument_group('Frame emitter')
tracker_parser = parser.add_argument_group('Tracker')
render_parser = parser.add_argument_group('Renderer')
class HomographyAction(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super().__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values: Path, option_string=None):
if values.suffix == '.json':
with values.open('r') as fp:
H = np.array(json.load(fp))
else:
H = np.loadtxt(values, delimiter=',')
setattr(namespace, self.dest, values)
setattr(namespace, 'H', H)
class CameraAction(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super().__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
if values is None:
setattr(namespace, self.dest, None)
else:
camera = Camera.from_calibfile(Path(values), namespace.H, namespace.camera_fps)
# values = Path(values)
# with values.open('r') as fp:
# data = json.load(fp)
# # print(data)
# # print(data['camera_matrix'])
# # camera = {
# # 'camera_matrix': np.array(data['camera_matrix']),
# # 'dist_coeff': np.array(data['dist_coeff']),
# # }
# camera = Camera(np.array(data['camera_matrix']), np.array(data['dist_coeff']), data['dim']['width'], data['dim']['height'], namespace.H, namespace.camera_fps)
setattr(namespace, 'camera', camera)
inference_parser.add_argument("--step-size",
# TODO)) Make dataset/model metadata
help="sample step size (should be the same as for data processing and augmentation)",
type=int,
default=1,
)
inference_parser.add_argument("--model_dir",
help="directory with the model to use for inference",
type=str, # TODO: make into Path
default='../Trajectron-plus-plus/experiments/trap/models/models_18_Oct_2023_19_56_22_virat_vel_ar3/')
# default='../Trajectron-plus-plus/experiments/pedestrians/models/models_04_Oct_2023_21_04_48_eth_vel_ar3')
inference_parser.add_argument("--conf",
help="path to json config file for hyperparameters, relative to model_dir",
type=str,
default='config.json')
# Model Parameters (hyperparameters)
inference_parser.add_argument("--offline_scene_graph",
help="whether to precompute the scene graphs offline, options are 'no' and 'yes'",
type=str,
default='yes')
inference_parser.add_argument("--dynamic_edges",
help="whether to use dynamic edges or not, options are 'no' and 'yes'",
type=str,
default='yes')
inference_parser.add_argument("--edge_state_combine_method",
help="the method to use for combining edges of the same type",
type=str,
default='sum')
inference_parser.add_argument("--edge_influence_combine_method",
help="the method to use for combining edge influences",
type=str,
default='attention')
inference_parser.add_argument('--edge_addition_filter',
nargs='+',
help="what scaling to use for edges as they're created",
type=float,
default=[0.25, 0.5, 0.75, 1.0]) # We don't automatically pad left with 0.0, if you want a sharp
# and short edge addition, then you need to have a 0.0 at the
# beginning, e.g. [0.0, 1.0].
inference_parser.add_argument('--edge_removal_filter',
nargs='+',
help="what scaling to use for edges as they're removed",
type=float,
default=[1.0, 0.0]) # We don't automatically pad right with 0.0, if you want a sharp drop off like
# the default, then you need to have a 0.0 at the end.
inference_parser.add_argument('--incl_robot_node',
help="whether to include a robot node in the graph or simply model all agents",
action='store_true')
inference_parser.add_argument('--map_encoding',
help="Whether to use map encoding or not",
action='store_true')
inference_parser.add_argument('--no_edge_encoding',
help="Whether to use neighbors edge encoding",
action='store_true')
inference_parser.add_argument('--batch_size',
help='training batch size',
type=int,
default=256)
inference_parser.add_argument('--k_eval',
help='how many samples to take during evaluation',
type=int,
default=25)
# Data Parameters
inference_parser.add_argument("--eval_data_dict",
help="what file to load for evaluation data (WHEN NOT USING LIVE DATA)",
type=str,
default='../Trajectron-plus-plus/experiments/processed/eth_test.pkl')
inference_parser.add_argument("--output_dir",
help="what dir to save output (i.e., saved models, logs, etc) (WHEN NOT USING LIVE OUTPUT)",
type=Path,
default='./OUT/test_inference')
# inference_parser.add_argument('--device',
# help='what device to perform training on',
# type=str,
# default='cuda:0')
inference_parser.add_argument("--eval_device",
help="what device to use during inference",
type=str,
default="cpu")
inference_parser.add_argument('--seed',
help='manual seed to use, default is 123',
type=int,
default=123)
inference_parser.add_argument('--predict_training_data',
help='Ignore tracker and predict data from the training dataset',
action='store_true')
inference_parser.add_argument("--smooth-predictions",
help="Smooth the predicted tracks",
action='store_true')
inference_parser.add_argument('--prediction-horizon',
help='Trajectron.incremental_forward parameter',
type=int,
default=30)
inference_parser.add_argument('--num-samples',
help='Trajectron.incremental_forward parameter',
type=int,
default=5)
inference_parser.add_argument("--full-dist",
help="Trajectron.incremental_forward parameter",
action='store_true')
inference_parser.add_argument("--gmm-mode",
help="Trajectron.incremental_forward parameter",
type=bool,
default=True)
inference_parser.add_argument("--z-mode",
help="Trajectron.incremental_forward parameter",
action='store_true')
inference_parser.add_argument('--cm-to-m',
help="Correct for homography that is in cm (i.e. {x,y}/100). Should also be used when processing data",
action='store_true')
inference_parser.add_argument('--center-data',
help="Center data around cx and cy. Should also be used when processing data",
action='store_true')
# Internal connections.
connection_parser.add_argument('--zmq-trajectory-addr',
help='Manually specity communication addr for the trajectory messages',
type=str,
default="ipc:///tmp/feeds_traj")
connection_parser.add_argument('--zmq-camera-stream-addr',
help='Manually specity communication addr for the camera stream messages',
type=str,
default="ipc:///tmp/feeds_img")
connection_parser.add_argument('--zmq-prediction-addr',
help='Manually specity communication addr for the prediction messages',
type=str,
default="ipc:///tmp/feeds_preds")
connection_parser.add_argument('--zmq-frame-addr',
help='Manually specity communication addr for the frame messages',
type=str,
default="ipc:///tmp/feeds_frame")
connection_parser.add_argument('--ws-port',
help='Port to listen for incomming websocket connections. Also serves the testing html-page.',
type=int,
default=8888)
connection_parser.add_argument('--bypass-prediction',
help='For debugging purpose: websocket input immediately to output',
action='store_true')
# Frame emitter
frame_emitter_parser.add_argument("--video-src",
help="source video to track from can be either a relative or absolute path, or a url, like an RTSP resource",
type=UrlOrPath,
nargs='+',
default=lambda: [UrlOrPath(p) for p in Path('../DATASETS/VIRAT_subset_0102x/').glob('*.mp4')])
frame_emitter_parser.add_argument("--video-offset",
help="Start playback from given frame. Note that when src is an array, this applies to all videos individually.",
default=None,
type=int)
#TODO: camera as source
frame_emitter_parser.add_argument("--video-loop",
help="By default it emitter will run only once. This allows it to loop the video file to keep testing.",
action='store_true')
#TODO: camera as source
# Tracker
tracker_parser.add_argument("--camera-fps",
help="Camera FPS",
type=int,
default=12)
tracker_parser.add_argument("--homography",
help="File with homography params",
type=Path,
default='../DATASETS/VIRAT_subset_0102x/VIRAT_0102_homography_img2world.txt',
action=HomographyAction)
tracker_parser.add_argument("--calibration",
help="File with camera intrinsics and lens distortion params (calibration.json)",
# type=Path,
default=None,
action=CameraAction)
tracker_parser.add_argument("--save-for-training",
help="Specify the path in which to save",
type=Path,
default=None)
tracker_parser.add_argument("--detector",
help="Specify the detector to use",
type=str,
choices=DETECTORS)
tracker_parser.add_argument("--tracker",
help="Specify the detector to use",
type=str,
default=TRACKER_BYTETRACK,
choices=TRACKERS)
tracker_parser.add_argument("--smooth-tracks",
help="Smooth the tracker tracks before sending them to the predictor",
action='store_true')
# now in calibration.json
# tracker_parser.add_argument("--frame-width",
# help="width of the frames",
# type=int,
# default=1280)
# tracker_parser.add_argument("--frame-height",
# help="height of the frames",
# type=int,
# default=720)
# Renderer
# render_parser.add_argument("--disable-renderer",
# help="Disable the renderer all together. Usefull when using an external renderer",
# action="store_true")
render_parser.add_argument("--render-file",
help="Render a video file previewing the prediction, and its delay compared to the current frame",
action='store_true')
render_parser.add_argument("--render-window",
help="Render a previewing to a window",
action='store_true')
render_parser.add_argument("--render-animation",
help="Render animation (pyglet)",
action='store_true')
render_parser.add_argument("--render-debug-shapes",
help="Lines and points for debugging/mapping",
action='store_true')
render_parser.add_argument("--render-hide-stats",
help="Default toggle to hide (switch with 'h')",
action='store_true')
render_parser.add_argument("--full-screen",
help="Set Window full screen",
action='store_true')
render_parser.add_argument("--render-url",
help="""Stream renderer on given URL. Two easy approaches:
- using zmq wrapper one can specify the LISTENING ip. To listen to any incoming connection: zmq:tcp://0.0.0.0:5556
- alternatively, using e.g. UDP one needs to specify the IP of the client. E.g. udp://100.69.123.91:5556/stream
Note that with ZMQ you can have multiple clients connecting simultaneously. E.g. using `ffplay zmq:tcp://100.109.175.82:5556`
When using udp, connecting can be done using `ffplay udp://100.109.175.82:5556/stream`
""",
type=str,
default=None)
render_parser.add_argument("--debug-points-file",
help="A json file with points to test projection/homography etc.",
type=Path,
required=False,
)