93 lines
3.2 KiB
Python
93 lines
3.2 KiB
Python
|
"""
|
||
|
After calibrating the camera, this scripts helps with setting
|
||
|
the homography to map all points to a top-down space (so that
|
||
|
distances remain equal.)
|
||
|
|
||
|
1. Set dataset variable and snapshot img as in `02-....py`.
|
||
|
2. Make sure to have a irl_points.json file which translates all
|
||
|
points in img_points.json to their respective real world coordinates.
|
||
|
A useful way to obtain these: draw them (e.g. with chalk marker) on the
|
||
|
ground, measure distances, place distances in SolveSpace, export SolveSpace
|
||
|
to SVG, get the point coordinates from that file (in cm).
|
||
|
3. Run the script to save the homography.json file.
|
||
|
"""
|
||
|
|
||
|
import cv2
|
||
|
import json
|
||
|
import numpy as np
|
||
|
|
||
|
dataset = 'hof2'
|
||
|
dataset_sample_img = "snapshot3.png"
|
||
|
|
||
|
with open(dataset + '/img_points.json', 'r') as fp:
|
||
|
img_points = np.array(json.load(fp))
|
||
|
# to place points accurate I used a 2160p image, but during calibration and
|
||
|
# prediction I use(d) a 1440p image, so convert points to different space:
|
||
|
img_points = np.array(img_points)
|
||
|
with open(dataset + '/irl_points.json', 'r') as fp:
|
||
|
irl_points = json.load(fp)
|
||
|
# irl_points = np.array([[p[0]/10+100, p[1]/10+100] for p in irl_points])
|
||
|
irl_points = np.array(irl_points)
|
||
|
|
||
|
# I measured IRL points in cm. Scale to meters
|
||
|
irl_points /= 100
|
||
|
|
||
|
def points_on_img(in_img, points) -> cv2.Mat:
|
||
|
img = in_img.copy()
|
||
|
if points.shape[1:] == (1,2):
|
||
|
points = np.reshape(points, (points.shape[0], 2))
|
||
|
for i, p in enumerate(points):
|
||
|
x = int(p[0])
|
||
|
y= int(p[1])
|
||
|
cv2.circle(img, (x,y), radius=2, color=(0, 0, 255), thickness=-1)
|
||
|
cv2.putText(img, f"{i}", (x,y-5), cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255))
|
||
|
return img
|
||
|
|
||
|
|
||
|
|
||
|
with open(dataset + '/calibration.json', 'r') as fp:
|
||
|
calibdata = json.load(fp)
|
||
|
mtx = np.array(calibdata['camera_matrix'])
|
||
|
dist = np.array(calibdata['dist_coeff'])
|
||
|
w, h = calibdata['dim']['width'], calibdata['dim']['height']
|
||
|
|
||
|
img = cv2.resize(cv2.imread(dataset + "/" + dataset_sample_img), (w, h))
|
||
|
|
||
|
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w,h), 1, (w,h))
|
||
|
|
||
|
# first undistort the points so that lines are actually straight
|
||
|
undistorted_img_points = cv2.undistortPoints(np.array([img_points]).astype('float32'), mtx, dist, None, newcameramtx)
|
||
|
|
||
|
undistorted_img = cv2.undistort(img, mtx, dist, None, newcameramtx)
|
||
|
|
||
|
cv2.imshow('original', points_on_img(img, img_points))
|
||
|
cv2.imshow('undistorted', points_on_img(undistorted_img, undistorted_img_points))
|
||
|
|
||
|
H, status = cv2.findHomography(undistorted_img_points,irl_points)
|
||
|
|
||
|
# Homography converts to meters, this make the picture miniscule.
|
||
|
# Scale up for preview
|
||
|
view_H = H
|
||
|
view_H[:2] = H[:2]*100
|
||
|
dst_img = cv2.warpPerspective(undistorted_img,view_H,(w,h))
|
||
|
dst_img_points = cv2.perspectiveTransform(np.array(undistorted_img_points), view_H)
|
||
|
# when converting from mapped space back to image space
|
||
|
# inv_H = np.linalg.pinv(H)
|
||
|
print(dst_img_points)
|
||
|
|
||
|
dst = points_on_img(dst_img, dst_img_points)
|
||
|
# print(dst.shape)
|
||
|
cv2.imshow('sampl', dst)
|
||
|
|
||
|
for a,b, c, d in zip(img_points, undistorted_img_points, irl_points, dst_img_points):
|
||
|
print(f"{a} -> {b} -> {c} -> {d}")
|
||
|
|
||
|
# H[:2] = H[:2]/ 100
|
||
|
# print(H)
|
||
|
with open(dataset + "/homography.json", 'w') as fp:
|
||
|
json.dump(H.tolist(), fp)
|
||
|
|
||
|
while True:
|
||
|
if cv2.waitKey(33) == ord('q'):
|
||
|
break
|