trap/trap/config.py

266 lines
10 KiB
Python

import argparse
from pathlib import Path
import types
from trap.tracker import DETECTORS
from pyparsing import Optional
class LambdaParser(argparse.ArgumentParser):
"""Execute lambda functions
"""
def parse_args(self, args=None, namespace=None):
args = super().parse_args(args, namespace)
for key in vars(args):
f = args.__dict__[key]
if type(f) == types.LambdaType:
print(f'Getting default value for {key}')
args.__dict__[key] = f()
return args
parser = LambdaParser()
# parser.parse_args()
parser.add_argument(
'--verbose',
'-v',
help="Increase verbosity. Add multiple times to increase further.",
action='count', default=0
)
parser.add_argument(
'--remote-log-addr',
help="Connect to a remote logger like cutelog. Specify the ip",
type=str,
)
parser.add_argument(
'--remote-log-port',
help="Connect to a remote logger like cutelog. Specify the port",
type=int,
default=19996
)
# parser.add_argument('--foo')
inference_parser = parser.add_argument_group('Inference')
connection_parser = parser.add_argument_group('Connection')
frame_emitter_parser = parser.add_argument_group('Frame emitter')
tracker_parser = parser.add_argument_group('Tracker')
render_parser = parser.add_argument_group('Renderer')
inference_parser.add_argument("--model_dir",
help="directory with the model to use for inference",
type=str, # TODO: make into Path
default='../Trajectron-plus-plus/experiments/trap/models/models_18_Oct_2023_19_56_22_virat_vel_ar3/')
# default='../Trajectron-plus-plus/experiments/pedestrians/models/models_04_Oct_2023_21_04_48_eth_vel_ar3')
inference_parser.add_argument("--conf",
help="path to json config file for hyperparameters, relative to model_dir",
type=str,
default='config.json')
# Model Parameters (hyperparameters)
inference_parser.add_argument("--offline_scene_graph",
help="whether to precompute the scene graphs offline, options are 'no' and 'yes'",
type=str,
default='yes')
inference_parser.add_argument("--dynamic_edges",
help="whether to use dynamic edges or not, options are 'no' and 'yes'",
type=str,
default='yes')
inference_parser.add_argument("--edge_state_combine_method",
help="the method to use for combining edges of the same type",
type=str,
default='sum')
inference_parser.add_argument("--edge_influence_combine_method",
help="the method to use for combining edge influences",
type=str,
default='attention')
inference_parser.add_argument('--edge_addition_filter',
nargs='+',
help="what scaling to use for edges as they're created",
type=float,
default=[0.25, 0.5, 0.75, 1.0]) # We don't automatically pad left with 0.0, if you want a sharp
# and short edge addition, then you need to have a 0.0 at the
# beginning, e.g. [0.0, 1.0].
inference_parser.add_argument('--edge_removal_filter',
nargs='+',
help="what scaling to use for edges as they're removed",
type=float,
default=[1.0, 0.0]) # We don't automatically pad right with 0.0, if you want a sharp drop off like
# the default, then you need to have a 0.0 at the end.
inference_parser.add_argument('--incl_robot_node',
help="whether to include a robot node in the graph or simply model all agents",
action='store_true')
inference_parser.add_argument('--map_encoding',
help="Whether to use map encoding or not",
action='store_true')
inference_parser.add_argument('--no_edge_encoding',
help="Whether to use neighbors edge encoding",
action='store_true')
inference_parser.add_argument('--batch_size',
help='training batch size',
type=int,
default=256)
inference_parser.add_argument('--k_eval',
help='how many samples to take during evaluation',
type=int,
default=25)
# Data Parameters
inference_parser.add_argument("--eval_data_dict",
help="what file to load for evaluation data (WHEN NOT USING LIVE DATA)",
type=str,
default='../Trajectron-plus-plus/experiments/processed/eth_test.pkl')
inference_parser.add_argument("--output_dir",
help="what dir to save output (i.e., saved models, logs, etc) (WHEN NOT USING LIVE OUTPUT)",
type=Path,
default='./OUT/test_inference')
# inference_parser.add_argument('--device',
# help='what device to perform training on',
# type=str,
# default='cuda:0')
inference_parser.add_argument("--eval_device",
help="what device to use during inference",
type=str,
default="cpu")
inference_parser.add_argument('--seed',
help='manual seed to use, default is 123',
type=int,
default=123)
inference_parser.add_argument('--predict_training_data',
help='Ignore tracker and predict data from the training dataset',
action='store_true')
inference_parser.add_argument("--smooth-predictions",
help="Smooth the predicted tracks",
action='store_true')
inference_parser.add_argument('--prediction-horizon',
help='Trajectron.incremental_forward parameter',
type=int,
default=30)
inference_parser.add_argument('--num-samples',
help='Trajectron.incremental_forward parameter',
type=int,
default=5)
inference_parser.add_argument("--full-dist",
help="Trajectron.incremental_forward parameter",
type=bool,
default=False)
inference_parser.add_argument("--gmm-mode",
help="Trajectron.incremental_forward parameter",
type=bool,
default=True)
inference_parser.add_argument("--z-mode",
help="Trajectron.incremental_forward parameter",
type=bool,
default=False)
# Internal connections.
connection_parser.add_argument('--zmq-trajectory-addr',
help='Manually specity communication addr for the trajectory messages',
type=str,
default="ipc:///tmp/feeds/traj")
connection_parser.add_argument('--zmq-camera-stream-addr',
help='Manually specity communication addr for the camera stream messages',
type=str,
default="ipc:///tmp/feeds/img")
connection_parser.add_argument('--zmq-prediction-addr',
help='Manually specity communication addr for the prediction messages',
type=str,
default="ipc:///tmp/feeds/preds")
connection_parser.add_argument('--zmq-frame-addr',
help='Manually specity communication addr for the frame messages',
type=str,
default="ipc:///tmp/feeds/frame")
connection_parser.add_argument('--ws-port',
help='Port to listen for incomming websocket connections. Also serves the testing html-page.',
type=int,
default=8888)
connection_parser.add_argument('--bypass-prediction',
help='For debugging purpose: websocket input immediately to output',
action='store_true')
# Frame emitter
frame_emitter_parser.add_argument("--video-src",
help="source video to track from",
type=Path,
nargs='+',
default=lambda: list(Path('../DATASETS/VIRAT_subset_0102x/').glob('*.mp4')))
frame_emitter_parser.add_argument("--video-offset",
help="Start playback from given frame. Note that when src is an array, this applies to all videos individually.",
default=None,
type=int)
#TODO: camera as source
frame_emitter_parser.add_argument("--video-loop",
help="By default it emitter will run only once. This allows it to loop the video file to keep testing.",
action='store_true')
#TODO: camera as source
# Tracker
tracker_parser.add_argument("--homography",
help="File with homography params",
type=Path,
default='../DATASETS/VIRAT_subset_0102x/VIRAT_0102_homography_img2world.txt')
tracker_parser.add_argument("--save-for-training",
help="Specify the path in which to save",
type=Path,
default=None)
tracker_parser.add_argument("--detector",
help="Specify the detector to use",
type=str,
choices=DETECTORS)
tracker_parser.add_argument("--smooth-tracks",
help="Smooth the tracker tracks before sending them to the predictor",
action='store_true')
# Renderer
render_parser.add_argument("--render-file",
help="Render a video file previewing the prediction, and its delay compared to the current frame",
action='store_true')
render_parser.add_argument("--render-url",
help="""Stream renderer on given URL. Two easy approaches:
- using zmq wrapper one can specify the LISTENING ip. To listen to any incoming connection: zmq:tcp://0.0.0.0:5556
- alternatively, using e.g. UDP one needs to specify the IP of the client. E.g. udp://100.69.123.91:5556/stream
Note that with ZMQ you can have multiple clients connecting simultaneously. E.g. using `ffplay zmq:tcp://100.109.175.82:5556`
When using udp, connecting can be done using `ffplay udp://100.109.175.82:5556/stream`
""",
type=str,
default=None)