2.4 MiB
Trajectron seems to support providing a map for a scene. This might be a way to get better predictions, that actually stay on the pathways instead of go through buildings. However, by default it supports maps from NuScenes, but not images (even though some traces of that remain in the code.) More info about support in trajectron is in issue #14 on their Github.
This notebook is used to test my implementation to add map support to Trajectron.
CHANGELOG:
- 2024-12-27 : Created
- Draw the map image
- Training sometimes (randomly?) gives NaN matrixes since using map encoding.
- Call Image map and test if converted points of all tracks fall within realistic image bounds (e.g. no negative points)
- 2024-12-28: Cells to double check heading
- Found bug in
config.json
: heading state index was derived from position instead of velocity. (changed [0,1] into [2,3])
- Found bug in
from pathlib import Path
from trap.frame_emitter import Camera
from trap.utils import ImageMap
import cv2
import matplotlib.pyplot as plt
import numpy as np
path = Path("EXPERIMENTS/raw/hof3/")
calibration_path = Path("../DATASETS/hof3/calibration.json")
homography_path = Path("../DATASETS/hof3/homography.json")
camera = Camera.from_paths(calibration_path, homography_path, 12)
image_path = Path("../DATASETS/hof3/map-undistorted-H-2.png")
print(image_path, image_path.exists())
homography_matrix = np.array([
[5, 0,0],
[0, 5,0],
[0,0,1],
]) # 100 scale
img = cv2.imread(image_path)
print(img.shape)
img = cv2.resize(img, (img.shape[1]//20, img.shape[0]//20))
print(img.shape)
imgmap = ImageMap(img, homography_matrix, "hof3-undistorted-H-2")
# img = cv2.imread(image_path)
plt.imshow(img)
img = imgmap.as_image()
# img = np.flipud(img)
plt.imshow(img)
# plt.gca().invert_yaxis()
from trap.tracker import TrackReader
reader = TrackReader(path, camera.fps, exclude_whitelisted = False, include_blacklisted=False)
from typing import List
from trap.frame_emitter import Track
from trap.tracker import FinalDisplacementFilter
tracks: List[Track] = [t for t in reader]
filter = FinalDisplacementFilter(2)
tracks = filter.apply(tracks, camera)
# track = tracks[0]
for track in tracks:
history = track.get_projected_history(None, camera)
points = imgmap.to_map_points(history)
print(history, points)
if not ((points[:,0] > 0 ) & (points[:,0] < 2440) & (points[:,1] > 0) & (points[:,1] < 1440)).all():
print("not all points between limits")
print(points)
break
# track.to_trajectron_node(camera, env)
track = tracks[20]
len(track.history)
fig = plt.figure(figsize=(20,16))
ax1, ax2 = fig.subplots(2)
im = cv2.imread("../DATASETS/hof3/output.png")
ax2.imshow(im)
ax1.set_aspect(1)
ax2.set_aspect(1)
t = track.get_with_interpolated_history()
points = t.get_projected_history(None, camera)
x, y = points[:,0], points[:,1]
ax1.plot(x, y, alpha=.2)
ax1.scatter(x, y, marker='x', alpha=.5)
l = [d.get_foot_coords()[0] for d in track.history]
t = [d.get_foot_coords()[1] for d in track.history]
ax2.plot(l, t, alpha=.2)
ax2.scatter(l, t, marker='x', alpha=.2)
Test cropping of maps¶
Similar to get_cropped_maps_from_scene_map_batch()
as used in prediction_server.py
.
v = np.diff(points, axis=0)
# this angle formula comes from
angles = [-np.arctan2(v[i, 0],
v[i, 1]) * 180 / np.pi for i in range(v.shape[0])]
angles
points[1:].shape, len(angles)
import torch
# torch.tensor(imgmap.data)
input_points = points[0:10:1]
input_angles = angles[0:10:1]
input_maps = [imgmap] * input_points.shape[0]
print(input_points.shape, len(input_angles), len(input_maps))
cropped_maps = ImageMap.get_cropped_maps_from_scene_map_batch(input_maps, input_points, [50, 10, 50, 90], input_angles)
# cropped_maps.all()
I still cheat a little bit by drawing the reverse angle, as this is used by trajectron as the map should turn the opposite direction towards the angle TODO: make sure these maths line up with prediction server and process data
def print_maps(cropped_maps, angles, input_points, previous_points = None):
if previous_points is None:
previous_points = [None] * len(cropped_maps)
for i, (m, angle, input_point, previous_point) in enumerate(zip(cropped_maps, angles, input_points, previous_points)):
fig, (ax1, ax2) = plt.subplots(1,2)
ax1.imshow(np.transpose(m.cpu().numpy(), (2, 1, 0)).astype(np.uint)*255)
ax1.arrow(10, 50, 15, 0, color='r')
ax1.scatter([10], [50], marker='o')
ax1.invert_yaxis()
# plt.show()
ax2.imshow(imgmap.as_image())
point = imgmap.to_map_points(np.array([input_point]))
# print(p)
# if len(input_points) > i+1:
y = np.sin(np.deg2rad(-angle))
x = np.cos(np.deg2rad(-angle))
d = np.array([x,y]) * 15
# dxy = (input_points[i+1] - input_points[i])
# d = (dxy / np.linalg.norm(dxy)) * 15
# # print(p[0][0], p[0][1],d[0], d[1])
ax2.arrow(point[0][0], point[0][1],d[0], d[1], color='r')
ax2.scatter(point[0][0], point[0][1], marker='x')
if previous_point is not None:
prevp = imgmap.to_map_points(np.array([previous_point]))
ax2.arrow(prevp[0][0], prevp[0][1],d[0], d[1], color='r')
ax2.scatter(prevp[0][0], prevp[0][1], marker='x')
ax2.invert_yaxis()
plt.show()
print_maps(cropped_maps, input_angles, input_points)
Test with only rotation, from a fixed point
input_points = np.array([[14,7]]* 8)
previous_points_offset = np.array([
[-1, 0],
[-1, -1],
[0, -1],
[1, -1],
[1, 0],
[1, 1],
[0, 1],
[-1, 1],
])
previous_points = input_points + previous_points_offset
input_angles = [
-np.arctan2(i[1]-p[1],
i[0]-p[0]) * 180 / np.pi
for i,p in zip(input_points, previous_points)]
print(input_angles)
# input_angles = list(range(0,360, 45))
input_maps = [imgmap] * 8
cropped_maps = ImageMap.get_cropped_maps_from_scene_map_batch(input_maps, input_points, [50, 10, 50, 90], input_angles)
print_maps(cropped_maps, input_angles, input_points, previous_points)