Data and viz options

This commit is contained in:
Ruben van de Ven 2024-11-14 17:01:32 +01:00
parent 627b320ec7
commit cc952424e0
7 changed files with 103 additions and 44 deletions

File diff suppressed because one or more lines are too long

View file

@ -95,7 +95,7 @@ class AnimationRenderer:
corners=[[471, 304], [1797, 376], [467, 387], [1792, 484]])
self.window.push_handlers(self.pins)
pyglet.gl.glClearColor(0,0,0, 0)
pyglet.gl.glClearColor(255,255,255,255)
self.fps_display = pyglet.window.FPSDisplay(window=self.window, color=(255,255,255,255))
self.fps_display.label.x = self.window.width - 50
self.fps_display.label.y = self.window.height - 17
@ -115,6 +115,7 @@ class AnimationRenderer:
self.batch_overlay = pyglet.graphics.Batch()
self.batch_anim = pyglet.graphics.Batch()
if self.config.render_debug_shapes:
self.debug_lines = [
pyglet.shapes.Line(1370, self.config.camera.h-360, 1380, 670-360, 2, (255,255,255,255), batch=self.batch_overlay),#v
pyglet.shapes.Line(0, 660-360, 1380, 670-360, 2, (255,255,255,255), batch=self.batch_overlay), #h
@ -277,7 +278,7 @@ class AnimationRenderer:
self.video_sprite = pyglet.sprite.Sprite(img=img, batch=self.batch_bg)
# transform to flipped coordinate system for pyglet
self.video_sprite.y = self.window.height - self.video_sprite.height
self.video_sprite.opacity = 90
self.video_sprite.opacity = 10
except zmq.ZMQError as e:
# idx = frame.index if frame else "NONE"
# logger.debug(f"reuse video frame {idx}")
@ -306,12 +307,19 @@ class AnimationRenderer:
# # TODO fade out
# del self.drawn_tracks[track_id]
if self.tracker_frame:
for track_id, track in self.tracker_frame.tracks.items():
if track_id not in self.drawn_tracks:
self.drawn_tracks[track_id] = DrawnTrack(track_id, track, self, self.tracker_frame.H, PROJECTION_MAP, self.config.camera)
else:
self.drawn_tracks[track_id].set_track(track)
if self.prediction_frame:
for track_id, track in self.prediction_frame.tracks.items():
if track_id not in self.drawn_tracks:
self.drawn_tracks[track_id] = DrawnTrack(track_id, track, self, self.prediction_frame.H, PROJECTION_MAP, self.config.camera)
else:
self.drawn_tracks[track_id].set_track(track)
self.drawn_tracks[track_id].set_predictions(track)
# clean up
for track_id in list(self.drawn_tracks.keys()):
@ -364,6 +372,8 @@ class AnimationRenderer:
shape.draw()
# self.batch_anim.draw()
self.batch_overlay.draw()
if self.config.render_debug_shapes:
self.pins.draw()
# pyglet.graphics.draw(3, pyglet.gl.GL_LINE, ("v2i", (100,200, 600,800)), ('c3B', (255,255,255, 255,255,255)))

View file

@ -319,6 +319,9 @@ render_parser.add_argument("--render-window",
render_parser.add_argument("--render-no-preview",
help="No preview, but only animation",
action='store_true')
render_parser.add_argument("--render-debug-shapes",
help="Lines and points for debugging/mapping",
action='store_true')
render_parser.add_argument("--full-screen",
help="Set Window full screen",
action='store_true')

View file

@ -303,16 +303,16 @@ class PredictionServer:
history = history_cm_to_m(history)
history = np.array(history)
x = history[:, 0] #- cx
y = history[:, 1] #- cy
x = history[:, 0] #- cx # we can create bigger steps by doing history[::5,0]
y = history[:, 1] #- cy # history[::5,1]
if self.config.center_data:
x -= cx
y -= cy
# TODO: calculate dt based on input
vx = derivative_of(x, 0.1) #eval_scene.dt
vy = derivative_of(y, 0.1)
ax = derivative_of(vx, 0.1)
ay = derivative_of(vy, 0.1)
vx = derivative_of(x, .1) #eval_scene.dt
vy = derivative_of(y, .1)
ax = derivative_of(vx, .1)
ay = derivative_of(vy, .1)
data_dict = {('position', 'x'): x[:], # [-10:-1]
('position', 'y'): y[:], # [-10:-1]
@ -445,6 +445,7 @@ class PredictionServer:
frame = self.smoother.smooth_frame_predictions(frame)
self.prediction_socket.send_pyobj(frame)
time.sleep(.5)
logger.info('Stopping')

View file

@ -72,6 +72,7 @@ class DrawnTrack:
self.camera = camera
self.H = H # TODO)) Move H to Camera object
self.set_track(track, H)
self.set_predictions(track, H)
self.drawn_positions = []
self.drawn_predictions = []
self.shapes: list[pyglet.shapes.Line] = []
@ -87,6 +88,8 @@ class DrawnTrack:
# perhaps only do in constructor:
self.inv_H = np.linalg.pinv(self.H)
def set_predictions(self, track: Track, H = None):
pred_coords = []
if track.predictions:
if self.draw_projection == PROJECTION_IMG:
@ -116,7 +119,7 @@ class DrawnTrack:
for i, pos in enumerate(drawn_prediction):
# TODO: this should be done in polar space starting from origin (i.e. self.drawn_posision[-1])
decay = max(3, (18/i) if i else 10) # points further away move with more delay
decay = 6
decay = 16
origin = self.drawn_positions[-1]
drawn_r, drawn_angle = relativePointToPolar( origin, drawn_prediction[i])
pred_r, pred_angle = relativePointToPolar(origin, self.pred_coords[a][i])
@ -179,7 +182,8 @@ class DrawnTrack:
# for i, pos in drawn_predictions.enumerate():
for ci in range(0, len(drawn_predictions)):
if ci == 0:
x, y = [int(p) for p in self.drawn_positions[-1]]
continue
# x, y = [int(p) for p in self.drawn_positions[-1]]
else:
x, y = [int(p) for p in drawn_predictions[ci-1]]
@ -191,7 +195,9 @@ class DrawnTrack:
if ci >= len(self.pred_shapes[a]):
# TODO: add color2
line = self.renderer.gradientLine(x, y, x2, y2, 3, color, color, batch=self.renderer.batch_anim)
# line = self.renderer.gradientLine(x, y, x2, y2, 3, color, color, batch=self.renderer.batch_anim)
line = pyglet.shapes.Line(x,y ,x2, y2, 1.5, color, batch=self.renderer.batch_anim)
# line = pyglet.shapes.Arc(x,y ,1.5, thickness=1.5, color=color, batch=self.renderer.batch_anim)
line.opacity = 5
self.pred_shapes[a].append(line)
@ -203,9 +209,9 @@ class DrawnTrack:
decay = (16/ci) if ci else 16
half = len(drawn_predictions) / 2
if ci < half:
target_opacity = 180
target_opacity = 60
else:
target_opacity = (1 - ((ci - half) / half)) * 180
target_opacity = (1 - ((ci - half) / half)) * 60
line.opacity = int(exponentialDecay(line.opacity, target_opacity, decay, dt))
@ -688,10 +694,13 @@ class PreviewRenderer:
# (255, 0, 255),
# (255, 255, 0)
# ]
# colorset = [
# (255,255,100),
# (255,100,255),
# (100,255,255),
# ]
colorset = [
(255,255,100),
(255,100,255),
(100,255,255),
(0,0,0),
]
# Deprecated

View file

@ -90,7 +90,7 @@ def augment(scene):
# maybe_makedirs('trajectron-data')
# for desired_source in [ 'hof2', ]:# ,'hof-maskrcnn', 'hof-yolov8', 'VIRAT-0102-parsed', 'virat-resnet-keypoints-full']:
def process_data(src_dir: Path, dst_dir: Path, name: str, smooth_tracks: bool, cm_to_m: bool, center_data: bool):
def process_data(src_dir: Path, dst_dir: Path, name: str, smooth_tracks: bool, cm_to_m: bool, center_data: bool, bin_positions: bool):
print(f"Process data in {src_dir}, to {dst_dir}, identified by {name}")
nl = 0
@ -117,12 +117,18 @@ def process_data(src_dir: Path, dst_dir: Path, name: str, smooth_tracks: bool, c
all_data['pos_x'] /= 100
all_data['pos_y'] /= 100
mean_x, mean_y = all_data['pos_x'].mean(), all_data['pos_y'].mean()
cx = .5 * all_data['pos_x'].min() + .5 * all_data['pos_x'].max()
cy = .5 * all_data['pos_y'].min() + .5 * all_data['pos_y'].max()
print(f"Dataset means: {mean_x=} {mean_y=}")
print(f"Dataset centers: {cx=} {cy=}")
# bins of .5 meter
# print(np.ceil(all_data['pos_x'].max())*2))
if bin_positions:
space_x = np.linspace(0, np.ceil(all_data['pos_x'].max()), int(np.ceil(all_data['pos_x'].max())*2)+1)
space_y = np.linspace(0, np.ceil(all_data['pos_y'].max()), int(np.ceil(all_data['pos_y'].max())*2)+1)
print(f"Dataset means: {mean_x=} {mean_y=}, (min: ({all_data['pos_x'].min()}, {all_data['pos_y'].min()}), max: ({all_data['pos_x'].max()}, {all_data['pos_y'].max()}))")
print(f"Dataset centers: {cx=} {cy=}")
for data_class in ['train', 'val', 'test']:
env = Environment(node_type_list=['PEDESTRIAN'], standardization=standardization)
@ -174,6 +180,12 @@ def process_data(src_dir: Path, dst_dir: Path, name: str, smooth_tracks: bool, c
data['pos_x'] -= cx
data['pos_y'] -= cy
if bin_positions:
data['pos_x'] =np.digitize(data['pos_x'], bins=space_x)
data['pos_y'] =np.digitize(data['pos_y'], bins=space_y)
print(data['pos_x'])
# Mean Position
print("Means: x:", data['pos_x'].mean(), "y:", data['pos_y'].mean())
@ -199,9 +211,22 @@ def process_data(src_dir: Path, dst_dir: Path, name: str, smooth_tracks: bool, c
skipped_for_error += 1
continue
# without repeats, there will mostli likely only be straight movements
# better to filter by time
# only_diff = node_df[['pos_x', 'pos_y']].diff().fillna(1).any(axis=1)
# # print(node_df[['pos_x', 'pos_y']], )
# # exit()
# # mask positions
# node_values = node_df[only_diff][['pos_x', 'pos_y']].values
# print(node_values)
if bin_positions:
node_values = node_df.iloc[::5, :][['pos_x', 'pos_y']].values
else:
node_values = node_df[['pos_x', 'pos_y']].values
# print(node_values)
if node_values.shape[0] < min_track_length:
continue
@ -262,6 +287,7 @@ def main():
parser.add_argument("--smooth-tracks", action='store_true', help=f"Enable smoother. Set to {smooth_window} frames")
parser.add_argument("--cm-to-m", action='store_true', help=f"If homography is in cm, convert tracked points to meter for beter results")
parser.add_argument("--center-data", action='store_true', help=f"Normalise around center")
parser.add_argument("--bin-positions", action='store_true', help=f"Experiment to put round positions to a grid")
args = parser.parse_args()
process_data(**args.__dict__)

View file

@ -443,7 +443,8 @@ class Smoother:
else:
# "Unlike Kalman filtering, which focuses on predicting and updating the current state using historical measurements, Kalman smoothing enhances the accuracy of past state values"
# see https://medium.com/@shahalkp1/kalman-smoothing-using-tsmoothie-0175260464e5
self.smoother = KalmanSmoother(component='level_trend_season', component_noise={'level':0.03, 'season': .02, 'trend':0.04},n_seasons = 2, copy=None)
self.smoother = KalmanSmoother(component='level_trend', component_noise={'level':0.03, 'season': .02, 'trend':0.04},n_seasons = 2, copy=None)
def smooth(self, points: List[float]):