traptools/02-testcalibration-and-draw-points.py
2024-11-19 21:26:53 +01:00

116 lines
No EOL
3.6 KiB
Python

"""
After obtaining the calibration.json (camera P and distortion matrixes)
using 01-calibrate.py, this script previews the calibration. Using the cursor
on the original (not-yet-undistorted) image you can add points, which can be
used for the homography later.
1. Set dataset variable to point to the folder with calibration.json and a preview image
2. Set a snapshot image. Note that this image _can_ be higher resolution than the video that is used
this allows for more precise point placement, but might need some conversion in the next step
3. Points are read and saved from points.json in the dataset folder
"""
import cv2
import json
import os
import numpy as np
dataset = 'hof2'
snapshot_img = 'snapshot3.png'
with open(dataset + '/calibration.json', 'r') as fp:
calibdata = json.load(fp)
mtx = np.array(calibdata['camera_matrix'])
dist = np.array(calibdata['dist_coeff'])
w, h = calibdata['dim']['width'], calibdata['dim']['height']
# # Refining the camera matrix using parameters obtained by calibration
# if we don't set this, the new image will be cropped to the minimum size
# this way, no cropping occurs
# w, h = 2560, 1440
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w,h), 1, (w,h))
if os.path.exists(dataset + '/points.json'):
with open(dataset + '/points.json', 'r') as fp:
points = json.load(fp)
else:
points = [[500,500],[1000,1000]]
def add_point(event,x,y,flags,param):
global points
if event == cv2.EVENT_LBUTTONUP:
selected = None
for i, p in enumerate(points):
d = (p[0]-x)**2 + (p[1]-y)**2
if d < 14:
selected = i
break
print('click', selected)
if selected is None:
points.append([x,y])
else:
points.pop(selected)
# cv2.circle(img,(x,y),100,(255,0,0),-1)
# mouseX,mouseY = x,y
cv2.namedWindow('original image')
cv2.setMouseCallback('original image',add_point)
# cap = cv2.VideoCapture("./hof2-hikvision.mp4")
cap = cv2.VideoCapture(dataset + "/" + snapshot_img)
img_w = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
img_h = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
# scale saved points to snapshot
points = [
[p[0]*(img_w/w), p[1]*(img_h/h)] for p in points
]
imgOld = None
while True: # (found < needed): # Here, 10 can be changed to whatever number you like to choose
ret, img = cap.read() # Capture frame-by-frame
if not ret:
img = imgOld
else:
imgOld = img
# Method 1 to undistort the image
dst = cv2.undistort(img, mtx, dist, None, newcameramtx)
dstPoints = cv2.undistortPoints(np.array(points).astype('float32'), mtx, dist, None, P=newcameramtx)
# dst = cv2.undistort(img, mtx, dist, None)
# # Method 2 to undistort the image
# mapx,mapy=cv2.initUndistortRectifyMap(mtx,dist,None,newcameramtx,(w,h),5)
# dst = cv2.remap(img,mapx,mapy,cv2.INTER_LINEAR)
# Displaying the undistorted image
drawImg = img.copy()
drawDst = dst.copy()
for p in dstPoints:
x = int(p[0][0])
y= int(p[0][1])
cv2.circle(drawDst, (x,y), radius=2, color=(0, 0, 255), thickness=-1)
for i, p in enumerate(points):
x = int(p[0])
y= int(p[1])
cv2.circle(drawImg, (x,y), radius=2, color=(0, 0, 255), thickness=-1)
cv2.putText(drawImg, f"{i}", (x,y-5), cv2.FONT_HERSHEY_COMPLEX, 4, (0,0,255))
cv2.imshow("undistorted image",drawDst)
cv2.imshow("original image",drawImg)
if cv2.waitKey(5) == ord('q'):
break
print("write points.json")
with open(dataset + '/points.json', 'w') as fp:
points = [
[p[0]*(w/img_w), p[1]*(h/img_h)] for p in points
]
json.dump(points, fp)