traptools/01-calibrate.py
2025-05-09 21:44:58 +02:00

176 lines
5.8 KiB
Python

'''
Find camera intrinsicts:
camera matrix and distortion coefficients
Largely a copy from https://longervision.github.io/2017/03/16/ComputerVision/OpenCV/opencv-internal-calibration-chessboard/
Usage:
1. Set dataset variable to point to a directory containing chessboard.mp4
2. make sure CHECKERBOARD has the nr of corners in the printed board used. Use (6,9) for https://github.com/opencv/opencv/blob/4.x/doc/pattern.png
3. Scripts creates a `calibration.json` in the dataset folder
'''
from pathlib import Path
import time
import numpy as np
import cv2
import json
import tqdm
import math
CALIB_FISHEYE = True
dataset = Path('hof3-cam-baumer')
# set needed detections. Use math.inf to scan the whole video
needed_detections = math.inf # 20
# Defining the dimensions of checkerboard
CHECKERBOARD = (6,9)
CROPPED_HEIGHT = 1520
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
if not CALIB_FISHEYE:
objp = np.zeros((CHECKERBOARD[0] * CHECKERBOARD[1],3), np.float32)
objp[:,:2] = np.mgrid[0:CHECKERBOARD[0],0:CHECKERBOARD[1]].T.reshape(-1,2)
else:
objp = np.zeros((1, CHECKERBOARD[0]*CHECKERBOARD[1], 3), np.float32)
objp[0,:,:2] = np.mgrid[0:CHECKERBOARD[0], 0:CHECKERBOARD[1]].T.reshape(-1, 2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
videofile = dataset / "chessboard7.mp4"
cap = cv2.VideoCapture(videofile)
_, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_img_shape = img.shape[:2]
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
dim = {
'width': frame_width,
'height': frame_height,
}
loaded = np.load(dataset / "chessboard-points.npz")
objpoints, imgpoints = loaded['objpoints'], loaded['imgpoints']
print(f"Calculating matrixes with {len(objpoints)} detections")
if not CALIB_FISHEYE:
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, (frame_height, frame_width), None, None)
# It's very important to transform the matrix to list.
data = {'dim': dim, 'camera_matrix': np.asarray(mtx).tolist(), 'dist_coeff': np.asarray(dist).tolist()}
fn = dataset / "calibration.json"
print(f"write to {fn}")
with open(fn, "w") as f:
json.dump(data, f)
else:
#see also: https://medium.com/@kennethjiang/calibrate-fisheye-lens-using-opencv-333b05afa0b0
calibration_flags = cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC+cv2.fisheye.CALIB_CHECK_COND+cv2.fisheye.CALIB_FIX_SKEW
cv2.CV_32FC3
# objpoints = objpoints.astype(np.int32)
# print(objpoints, imgpoints.dtype)
objpoints = [objp for i in range(len(imgpoints))]
N_OK = len(objpoints)
K = np.zeros((3, 3))
D = np.zeros((4, 1))
rvecs = [np.zeros((1, 1, 3), dtype=np.float64) for i in range(N_OK)]
tvecs = [np.zeros((1, 1, 3), dtype=np.float64) for i in range(N_OK)]
rms, _, _, _, _ = \
cv2.fisheye.calibrate(
objpoints,
imgpoints,
gray.shape[::-1],
K,
D,
rvecs,
tvecs,
calibration_flags,
(cv2.TERM_CRITERIA_EPS+cv2.TERM_CRITERIA_MAX_ITER, 30, 1e-6)
)
DIM=_img_shape[::-1]
print("Found " + str(N_OK) + " valid images for calibration")
print("DIM=" + str(DIM))
print("K=np.array(" + str(K.tolist()) + ")")
print("D=np.array(" + str(D.tolist()) + ")")
balance=0
dim2=None
dim3=None
# img = cv2.imread(dataset / "snapshot.png") # just use video frame
dim1 = img.shape[:2][::-1] #dim1 is the dimension of input image to un-distort
assert dim1[0]/dim1[1] == DIM[0]/DIM[1], "Image to undistort needs to have same aspect ratio as the ones used in calibration"
if not dim2:
dim2 = dim1
if not dim3:
dim3 = dim1
dim3 = (dim1[0], CROPPED_HEIGHT)
scaled_K = K * dim1[0] / DIM[0] # The values of K is to scale with image dimension.
scaled_K[2][2] = 1.0 # Except that K[2][2] is always 1.0
# This is how scaled_K, dim2 and balance are used to determine the final K used to un-distort image. OpenCV document failed to make this clear!
new_K = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(scaled_K, D, dim2, np.eye(3), balance=balance)
map1, map2 = cv2.fisheye.initUndistortRectifyMap(scaled_K, D, np.eye(3), new_K, dim3, cv2.CV_16SC2)
t = time.perf_counter()
# print(map1, map2)
undistorted_img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
print(f"Took {time.perf_counter()-t}s")
data = {
'type': 'fisheye',
'dim1': dim1,
'dim2':dim2,
'dim3': dim3,
'K': np.asarray(K).tolist(),
'D':np.asarray(D).tolist(),
'new_K':np.asarray(new_K).tolist(),
'scaled_K':np.asarray(scaled_K).tolist(),
'balance':balance}
print(data)
with open(dataset/"calibration.json", "w") as f:
json.dump(data, f)
# map some points to test:
distorted_img_points = [
[1000,700],
[500,700],
[250,700],
[125,700],
]
new_points = cv2.fisheye.undistortPoints (np.array([distorted_img_points]).astype(np.float32), K=scaled_K, D=D, R=np.eye(3), P=new_K)
print ('new', new_points)
print(undistorted_img.shape)
for i, point in enumerate(distorted_img_points):
cv2.circle(img, point, 5, (0,255,0), 2)
cv2.circle(undistorted_img, new_points[0][i].astype(int), 5, (0,255,0), 2)
# cv2.circle(undistorted_img, new_points[0][0].astype(int), 5, (0,255,0), 2)
cv2.imshow("original", img)
cv2.imshow("undistorted", undistorted_img)
# img2 = cv2.imread("2.png")
# cv2.imshow("none undistorted", img2)
cv2.waitKey(0)
cv2.destroyAllWindows()