display debug points from homography
This commit is contained in:
parent
0af5030845
commit
e6187964d3
4 changed files with 42 additions and 15 deletions
|
@ -22,6 +22,7 @@ import math
|
|||
from pyglet import shapes
|
||||
|
||||
from PIL import Image
|
||||
import json
|
||||
|
||||
from trap.frame_emitter import DetectionState, Frame, Track
|
||||
from trap.preview_renderer import DrawnTrack, PROJECTION_IMG, PROJECTION_MAP
|
||||
|
@ -115,6 +116,26 @@ class AnimationRenderer:
|
|||
|
||||
]
|
||||
|
||||
self.debug_points = []
|
||||
# print(self.config.debug_points_file)
|
||||
if self.config.debug_points_file:
|
||||
with self.config.debug_points_file.open('r') as fp:
|
||||
img_points = np.array(json.load(fp))
|
||||
# to place points accurate I used a 2160p image, but during calibration and
|
||||
# prediction I use(d) a 1440p image, so convert points to different space:
|
||||
img_points = np.array(img_points)
|
||||
# first undistort the points so that lines are actually straight
|
||||
undistorted_img_points = cv2.undistortPoints(np.array([img_points]).astype('float32'), self.config.camera.mtx, self.config.camera.dist, None, self.config.camera.newcameramtx)
|
||||
dst_img_points = cv2.perspectiveTransform(np.array(undistorted_img_points), self.config.camera.H)
|
||||
if dst_img_points.shape[1:] == (1,2):
|
||||
dst_img_points = np.reshape(dst_img_points, (dst_img_points.shape[0], 2))
|
||||
|
||||
self.debug_points = [
|
||||
pyglet.shapes.Circle(p[0], self.window.height - p[1], 3, color=(255,0,0,255), batch=self.batch_overlay) for p in dst_img_points
|
||||
]
|
||||
|
||||
|
||||
|
||||
|
||||
self.init_shapes()
|
||||
|
||||
|
|
|
@ -326,3 +326,9 @@ render_parser.add_argument("--render-url",
|
|||
type=str,
|
||||
default=None)
|
||||
|
||||
|
||||
render_parser.add_argument("--debug-points-file",
|
||||
help="A json file with points to test projection/homography etc.",
|
||||
type=Path,
|
||||
required=False,
|
||||
)
|
|
@ -215,7 +215,6 @@ class FrameEmitter:
|
|||
i = self.config.video_offset
|
||||
|
||||
|
||||
|
||||
# if '-' in video_path.path().stem:
|
||||
# path_stem = video_path.stem[:video_path.stem.rfind('-')]
|
||||
# else:
|
||||
|
|
|
@ -111,20 +111,21 @@ class DrawnTrack:
|
|||
if len(self.coords) > len(self.drawn_positions):
|
||||
self.drawn_positions.extend(self.coords[len(self.drawn_positions):])
|
||||
|
||||
for a, drawn_prediction in enumerate(self.drawn_predictions):
|
||||
for i, pos in enumerate(drawn_prediction):
|
||||
# TODO: this should be done in polar space starting from origin (i.e. self.drawn_posision[-1])
|
||||
decay = max(3, (18/i) if i else 10) # points further away move with more delay
|
||||
decay = 6
|
||||
origin = self.drawn_positions[-1]
|
||||
drawn_r, drawn_angle = relativePointToPolar( origin, drawn_prediction[i])
|
||||
pred_r, pred_angle = relativePointToPolar(origin, self.pred_coords[a][i])
|
||||
r = exponentialDecay(drawn_r, pred_r, decay, dt)
|
||||
angle = exponentialDecay(drawn_angle, pred_angle, decay, dt)
|
||||
x, y = relativePolarToPoint(origin, r, angle)
|
||||
self.drawn_predictions[a][i] = int(x), int(y)
|
||||
# self.drawn_predictions[i][0] = int(exponentialDecay(self.drawn_predictions[i][0], self.pred_coords[i][0], decay, dt))
|
||||
# self.drawn_predictions[i][1] = int(exponentialDecay(self.drawn_predictions[i][1], self.pred_coords[i][1], decay, dt))
|
||||
if len(self.pred_coords):
|
||||
for a, drawn_prediction in enumerate(self.drawn_predictions):
|
||||
for i, pos in enumerate(drawn_prediction):
|
||||
# TODO: this should be done in polar space starting from origin (i.e. self.drawn_posision[-1])
|
||||
decay = max(3, (18/i) if i else 10) # points further away move with more delay
|
||||
decay = 6
|
||||
origin = self.drawn_positions[-1]
|
||||
drawn_r, drawn_angle = relativePointToPolar( origin, drawn_prediction[i])
|
||||
pred_r, pred_angle = relativePointToPolar(origin, self.pred_coords[a][i])
|
||||
r = exponentialDecay(drawn_r, pred_r, decay, dt)
|
||||
angle = exponentialDecay(drawn_angle, pred_angle, decay, dt)
|
||||
x, y = relativePolarToPoint(origin, r, angle)
|
||||
self.drawn_predictions[a][i] = int(x), int(y)
|
||||
# self.drawn_predictions[i][0] = int(exponentialDecay(self.drawn_predictions[i][0], self.pred_coords[i][0], decay, dt))
|
||||
# self.drawn_predictions[i][1] = int(exponentialDecay(self.drawn_predictions[i][1], self.pred_coords[i][1], decay, dt))
|
||||
|
||||
if len(self.pred_coords) > len(self.drawn_predictions):
|
||||
self.drawn_predictions.extend(self.pred_coords[len(self.drawn_predictions):])
|
||||
|
|
Loading…
Reference in a new issue