Compare commits

..

No commits in common. "95da3d61361f02b62b85e93d3cf67300762546b2" and "4e883511d31c96f0a894afc714fed05e322cea5b" have entirely different histories.

3 changed files with 4 additions and 19 deletions

View file

@ -31,15 +31,6 @@ class Node(object):
self.forward_in_time_on_next_override = False
@property
def first_timestep(self):
return self._first_timestep
@first_timestep.setter
def first_timestep(self, value):
self._first_timestep = value
self._last_timestep = None # reset
def __eq__(self, other):
return ((isinstance(other, self.__class__)
or isinstance(self, other.__class__))

View file

@ -209,7 +209,7 @@ class OnlineMultimodalGenerativeCVAE(MultimodalGenerativeCVAE):
if self.node not in maps:
# This means the node was removed (it is only being kept around because of the edge removal filter).
me_params = self.hyperparams['map_encoder'][self.node_type]
self.TD['encoded_map'] = torch.zeros((1, me_params['output_size'])).to(self.TD['node_history_encoded'].device)
self.TD['encoded_map'] = torch.zeros((1, me_params['output_size']))
else:
encoded_map = self.node_modules[self.node_type + '/map_encoder'](maps[self.node] * 2. - 1.,
(mode == ModeKeys.TRAIN))

View file

@ -1,5 +1,3 @@
import logging
from typing import List
import torch
from torch import nn, optim, utils
import numpy as np
@ -19,8 +17,6 @@ from trajectron.model.trajectron import Trajectron
from trajectron.model.model_registrar import ModelRegistrar
from trajectron.model.model_utils import cyclical_lr
from trajectron.model.dataset import EnvironmentDataset, collate
from trajectron.environment import Environment, Scene, Node
from tensorboardX import SummaryWriter
# torch.autograd.set_detect_anomaly(True)
@ -138,7 +134,7 @@ def main():
min_future_timesteps=hyperparams['prediction_horizon'],
return_robot=not args.incl_robot_node)
train_data_loader = dict()
logging.debug(f"{train_scenes=}")
print(train_scenes)
for node_type_data_set in train_dataset:
if len(node_type_data_set) == 0:
continue
@ -169,7 +165,7 @@ def main():
for scene in eval_env.scenes:
scene.add_robot_from_nodes(eval_env.robot_type)
eval_scenes: List[Scene] = eval_env.scenes
eval_scenes = eval_env.scenes
eval_scenes_sample_probs = eval_env.scenes_freq_mult_prop if args.scene_freq_mult_eval else None
eval_dataset = EnvironmentDataset(eval_env,
@ -182,7 +178,6 @@ def main():
min_future_timesteps=hyperparams['prediction_horizon'],
return_robot=not args.incl_robot_node)
eval_data_loader = dict()
logging.debug(f"{eval_scenes=}")
for node_type_data_set in eval_dataset:
if len(node_type_data_set) == 0:
continue
@ -392,7 +387,6 @@ def main():
# Predict batch timesteps for evaluation dataset evaluation
eval_batch_errors = []
for scene in tqdm(eval_scenes, desc='Sample Evaluation', ncols=80):
logging.debug(f"{scene}, {scene.timesteps=}, {len(scene.nodes)}")
timesteps = scene.sample_timesteps(args.eval_batch_size)
predictions = eval_trajectron.predict(scene,
@ -419,7 +413,6 @@ def main():
# Predict maximum likelihood batch timesteps for evaluation dataset evaluation
eval_batch_errors_ml = []
for scene in tqdm(eval_scenes, desc='MM Evaluation', ncols=80):
logging.debug(f"{scene}, {scene.timesteps=}, {len(scene.nodes)}")
timesteps = scene.sample_timesteps(scene.timesteps)
predictions = eval_trajectron.predict(scene,
@ -430,6 +423,7 @@ def main():
z_mode=True,
gmm_mode=True,
full_dist=False)
eval_batch_errors_ml.append(evaluation.compute_batch_statistics(predictions,
scene.dt,
max_hl=max_hl,