2023-10-11 13:58:09 +02:00
|
|
|
# adapted from Trajectron++ online_server.py
|
|
|
|
import logging
|
|
|
|
from multiprocessing import Queue
|
|
|
|
import os
|
|
|
|
import time
|
|
|
|
import json
|
|
|
|
import torch
|
|
|
|
import dill
|
|
|
|
import random
|
|
|
|
import pathlib
|
|
|
|
import numpy as np
|
2023-10-11 16:35:15 +02:00
|
|
|
from trajectron.utils import prediction_output_to_trajectories
|
2023-10-11 13:58:09 +02:00
|
|
|
from trajectron.model.online.online_trajectron import OnlineTrajectron
|
|
|
|
from trajectron.model.model_registrar import ModelRegistrar
|
|
|
|
from trajectron.environment import Environment, Scene
|
|
|
|
import matplotlib.pyplot as plt
|
|
|
|
|
2023-10-11 16:35:15 +02:00
|
|
|
import zmq
|
2023-10-11 13:58:09 +02:00
|
|
|
|
|
|
|
logger = logging.getLogger("trajpred.inference")
|
|
|
|
|
|
|
|
|
|
|
|
# if not torch.cuda.is_available() or self.config.device == 'cpu':
|
|
|
|
# self.config.device = torch.device('cpu')
|
|
|
|
# else:
|
|
|
|
# if torch.cuda.device_count() == 1:
|
|
|
|
# # If you have CUDA_VISIBLE_DEVICES set, which you should,
|
|
|
|
# # then this will prevent leftover flag arguments from
|
|
|
|
# # messing with the device allocation.
|
|
|
|
# self.config.device = 'cuda:0'
|
|
|
|
|
|
|
|
# self.config.device = torch.device(self.config.device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def create_online_env(env, hyperparams, scene_idx, init_timestep):
|
|
|
|
test_scene = env.scenes[scene_idx]
|
|
|
|
|
|
|
|
online_scene = Scene(timesteps=init_timestep + 1,
|
|
|
|
map=test_scene.map,
|
|
|
|
dt=test_scene.dt)
|
|
|
|
online_scene.nodes = test_scene.get_nodes_clipped_at_time(
|
|
|
|
timesteps=np.arange(init_timestep - hyperparams['maximum_history_length'],
|
|
|
|
init_timestep + 1),
|
|
|
|
state=hyperparams['state'])
|
|
|
|
online_scene.robot = test_scene.robot
|
|
|
|
online_scene.calculate_scene_graph(attention_radius=env.attention_radius,
|
|
|
|
edge_addition_filter=hyperparams['edge_addition_filter'],
|
|
|
|
edge_removal_filter=hyperparams['edge_removal_filter'])
|
|
|
|
|
|
|
|
return Environment(node_type_list=env.node_type_list,
|
|
|
|
standardization=env.standardization,
|
|
|
|
scenes=[online_scene],
|
|
|
|
attention_radius=env.attention_radius,
|
|
|
|
robot_type=env.robot_type)
|
|
|
|
|
|
|
|
|
|
|
|
def get_maps_for_input(input_dict, scene, hyperparams):
|
|
|
|
scene_maps = list()
|
|
|
|
scene_pts = list()
|
|
|
|
heading_angles = list()
|
|
|
|
patch_sizes = list()
|
|
|
|
nodes_with_maps = list()
|
|
|
|
for node in input_dict:
|
|
|
|
if node.type in hyperparams['map_encoder']:
|
|
|
|
x = input_dict[node]
|
|
|
|
me_hyp = hyperparams['map_encoder'][node.type]
|
|
|
|
if 'heading_state_index' in me_hyp:
|
|
|
|
heading_state_index = me_hyp['heading_state_index']
|
|
|
|
# We have to rotate the map in the opposit direction of the agent to match them
|
|
|
|
if type(heading_state_index) is list: # infer from velocity or heading vector
|
|
|
|
heading_angle = -np.arctan2(x[-1, heading_state_index[1]],
|
|
|
|
x[-1, heading_state_index[0]]) * 180 / np.pi
|
|
|
|
else:
|
|
|
|
heading_angle = -x[-1, heading_state_index] * 180 / np.pi
|
|
|
|
else:
|
|
|
|
heading_angle = None
|
|
|
|
|
|
|
|
scene_map = scene.map[node.type]
|
|
|
|
map_point = x[-1, :2]
|
|
|
|
|
|
|
|
patch_size = hyperparams['map_encoder'][node.type]['patch_size']
|
|
|
|
|
|
|
|
scene_maps.append(scene_map)
|
|
|
|
scene_pts.append(map_point)
|
|
|
|
heading_angles.append(heading_angle)
|
|
|
|
patch_sizes.append(patch_size)
|
|
|
|
nodes_with_maps.append(node)
|
|
|
|
|
|
|
|
if heading_angles[0] is None:
|
|
|
|
heading_angles = None
|
|
|
|
else:
|
|
|
|
heading_angles = torch.Tensor(heading_angles)
|
|
|
|
|
|
|
|
maps = scene_maps[0].get_cropped_maps_from_scene_map_batch(scene_maps,
|
|
|
|
scene_pts=torch.Tensor(scene_pts),
|
|
|
|
patch_size=patch_sizes[0],
|
|
|
|
rotation=heading_angles)
|
|
|
|
|
|
|
|
maps_dict = {node: maps[[i]] for i, node in enumerate(nodes_with_maps)}
|
|
|
|
return maps_dict
|
|
|
|
|
|
|
|
|
|
|
|
class InferenceServer:
|
2023-10-11 16:35:15 +02:00
|
|
|
def __init__(self, config: dict):
|
2023-10-11 13:58:09 +02:00
|
|
|
self.config = config
|
2023-10-11 16:35:15 +02:00
|
|
|
|
|
|
|
context = zmq.Context()
|
|
|
|
self.trajectory_socket: zmq.Socket = context.socket(zmq.SUB)
|
|
|
|
self.trajectory_socket.connect(config.zmq_trajectory_addr)
|
|
|
|
self.trajectory_socket.setsockopt(zmq.SUBSCRIBE, b'')
|
|
|
|
|
|
|
|
self.prediction_socket: zmq.Socket = context.socket(zmq.PUB)
|
|
|
|
self.prediction_socket.bind(config.zmq_prediction_addr)
|
|
|
|
print(self.prediction_socket)
|
2023-10-11 13:58:09 +02:00
|
|
|
|
|
|
|
def run(self):
|
|
|
|
|
|
|
|
if self.config.seed is not None:
|
|
|
|
random.seed(self.config.seed)
|
|
|
|
np.random.seed(self.config.seed)
|
|
|
|
torch.manual_seed(self.config.seed)
|
|
|
|
if torch.cuda.is_available():
|
|
|
|
torch.cuda.manual_seed_all(self.config.seed)
|
|
|
|
|
|
|
|
# Choose one of the model directory names under the experiment/*/models folders.
|
|
|
|
# Possibilities are 'vel_ee', 'int_ee', 'int_ee_me', or 'robot'
|
|
|
|
# model_dir = os.path.join(self.config.log_dir, 'int_ee')
|
|
|
|
# model_dir = 'models/models_04_Oct_2023_21_04_48_eth_vel_ar3'
|
|
|
|
|
|
|
|
# Load hyperparameters from json
|
|
|
|
config_file = os.path.join(self.config.model_dir, self.config.conf)
|
|
|
|
if not os.path.exists(config_file):
|
|
|
|
raise ValueError('Config json not found!')
|
|
|
|
with open(config_file, 'r') as conf_json:
|
|
|
|
hyperparams = json.load(conf_json)
|
|
|
|
|
|
|
|
# Add hyperparams from arguments
|
|
|
|
hyperparams['dynamic_edges'] = self.config.dynamic_edges
|
|
|
|
hyperparams['edge_state_combine_method'] = self.config.edge_state_combine_method
|
|
|
|
hyperparams['edge_influence_combine_method'] = self.config.edge_influence_combine_method
|
|
|
|
hyperparams['edge_addition_filter'] = self.config.edge_addition_filter
|
|
|
|
hyperparams['edge_removal_filter'] = self.config.edge_removal_filter
|
|
|
|
hyperparams['batch_size'] = self.config.batch_size
|
|
|
|
hyperparams['k_eval'] = self.config.k_eval
|
|
|
|
hyperparams['offline_scene_graph'] = self.config.offline_scene_graph
|
|
|
|
hyperparams['incl_robot_node'] = self.config.incl_robot_node
|
|
|
|
hyperparams['edge_encoding'] = not self.config.no_edge_encoding
|
|
|
|
hyperparams['use_map_encoding'] = self.config.map_encoding
|
|
|
|
|
|
|
|
output_save_dir = os.path.join(self.config.output_dir, 'pred_figs')
|
|
|
|
pathlib.Path(output_save_dir).mkdir(parents=True, exist_ok=True)
|
|
|
|
|
|
|
|
|
|
|
|
with open(self.config.eval_data_dict, 'rb') as f:
|
|
|
|
eval_env = dill.load(f, encoding='latin1')
|
|
|
|
|
|
|
|
if eval_env.robot_type is None and hyperparams['incl_robot_node']:
|
|
|
|
eval_env.robot_type = eval_env.NodeType[0] # TODO: Make more general, allow the user to specify?
|
|
|
|
for scene in eval_env.scenes:
|
|
|
|
scene.add_robot_from_nodes(eval_env.robot_type)
|
|
|
|
|
2023-10-11 16:35:15 +02:00
|
|
|
logger.info('Loaded data from %s' % (self.config.eval_data_dict,))
|
2023-10-11 13:58:09 +02:00
|
|
|
|
|
|
|
# Creating a dummy environment with a single scene that contains information about the world.
|
|
|
|
# When using this code, feel free to use whichever scene index or initial timestep you wish.
|
|
|
|
scene_idx = 0
|
|
|
|
|
|
|
|
# You need to have at least acceleration, so you want 2 timesteps of prior data, e.g. [0, 1],
|
|
|
|
# so that you can immediately start incremental inference from the 3rd timestep onwards.
|
|
|
|
init_timestep = 1
|
|
|
|
|
|
|
|
eval_scene = eval_env.scenes[scene_idx]
|
|
|
|
online_env = create_online_env(eval_env, hyperparams, scene_idx, init_timestep)
|
|
|
|
|
|
|
|
model_registrar = ModelRegistrar(self.config.model_dir, self.config.eval_device)
|
|
|
|
model_registrar.load_models(iter_num=100)
|
|
|
|
|
|
|
|
trajectron = OnlineTrajectron(model_registrar,
|
|
|
|
hyperparams,
|
|
|
|
self.config.eval_device)
|
|
|
|
|
|
|
|
# If you want to see what different robot futures do to the predictions, uncomment this line as well as
|
|
|
|
# related "... += adjustment" lines below.
|
|
|
|
# adjustment = np.stack([np.arange(13)/float(i*2.0) for i in range(6, 12)], axis=1)
|
|
|
|
|
|
|
|
# Here's how you'd incrementally run the model, e.g. with streaming data.
|
|
|
|
trajectron.set_environment(online_env, init_timestep)
|
|
|
|
|
|
|
|
for timestep in range(init_timestep + 1, eval_scene.timesteps):
|
|
|
|
input_dict = eval_scene.get_clipped_input_dict(timestep, hyperparams['state'])
|
|
|
|
|
|
|
|
maps = None
|
|
|
|
if hyperparams['use_map_encoding']:
|
|
|
|
maps = get_maps_for_input(input_dict, eval_scene, hyperparams)
|
|
|
|
|
|
|
|
robot_present_and_future = None
|
|
|
|
if eval_scene.robot is not None and hyperparams['incl_robot_node']:
|
|
|
|
robot_present_and_future = eval_scene.robot.get(np.array([timestep,
|
|
|
|
timestep + hyperparams['prediction_horizon']]),
|
|
|
|
hyperparams['state'][eval_scene.robot.type],
|
|
|
|
padding=0.0)
|
|
|
|
robot_present_and_future = np.stack([robot_present_and_future, robot_present_and_future], axis=0)
|
|
|
|
# robot_present_and_future += adjustment
|
|
|
|
|
|
|
|
start = time.time()
|
|
|
|
dists, preds = trajectron.incremental_forward(input_dict,
|
|
|
|
maps,
|
|
|
|
prediction_horizon=6,
|
|
|
|
num_samples=51,
|
|
|
|
robot_present_and_future=robot_present_and_future,
|
|
|
|
full_dist=True)
|
|
|
|
end = time.time()
|
2023-10-11 16:35:15 +02:00
|
|
|
logger.info("t=%d: took %.2f s (= %.2f Hz) w/ %d nodes and %d edges" % (timestep, end - start,
|
2023-10-11 13:58:09 +02:00
|
|
|
1. / (end - start), len(trajectron.nodes),
|
|
|
|
trajectron.scene_graph.get_num_edges()))
|
|
|
|
|
2023-10-11 16:35:15 +02:00
|
|
|
# unsure what this bit from online_prediction.py does:
|
|
|
|
# detailed_preds_dict = dict()
|
|
|
|
# for node in eval_scene.nodes:
|
|
|
|
# if node in preds:
|
|
|
|
# detailed_preds_dict[node] = preds[node]
|
|
|
|
|
|
|
|
#adapted from trajectron.visualization
|
|
|
|
# prediction_dict provides the actual predictions
|
|
|
|
# histories_dict provides the trajectory used for prediction
|
|
|
|
# futures_dict is the Ground Truth, which is unvailable in an online setting
|
|
|
|
prediction_dict, histories_dict, futures_dict = prediction_output_to_trajectories({timestep: preds},
|
|
|
|
eval_scene.dt,
|
|
|
|
hyperparams['maximum_history_length'],
|
|
|
|
hyperparams['prediction_horizon']
|
|
|
|
)
|
|
|
|
|
|
|
|
assert(len(prediction_dict.keys()) <= 1)
|
|
|
|
if len(prediction_dict.keys()) == 0:
|
|
|
|
return
|
|
|
|
ts_key = list(prediction_dict.keys())[0]
|
|
|
|
|
|
|
|
prediction_dict = prediction_dict[ts_key]
|
|
|
|
histories_dict = histories_dict[ts_key]
|
|
|
|
futures_dict = futures_dict[ts_key]
|
|
|
|
|
|
|
|
response = {}
|
|
|
|
|
|
|
|
for node in histories_dict:
|
|
|
|
history = histories_dict[node]
|
|
|
|
# future = futures_dict[node]
|
|
|
|
predictions = prediction_dict[node]
|
|
|
|
|
|
|
|
if np.isnan(history[-1]).any():
|
|
|
|
continue
|
|
|
|
|
|
|
|
response[node.id] = {
|
|
|
|
'id': node.id,
|
|
|
|
'history': history.tolist(),
|
|
|
|
'predictions': predictions[0].tolist() # use batch 0
|
|
|
|
}
|
|
|
|
|
|
|
|
data = json.dumps(response)
|
|
|
|
self.prediction_socket.send_string(data)
|
|
|
|
# time.sleep(1)
|
|
|
|
# print(prediction_dict)
|
|
|
|
# print(histories_dict)
|
|
|
|
# print(futures_dict)
|
2023-10-11 13:58:09 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
2023-10-11 16:35:15 +02:00
|
|
|
def run_inference_server(config):
|
|
|
|
s = InferenceServer(config)
|
2023-10-11 13:58:09 +02:00
|
|
|
s.run()
|