Calibration and homography tools
This commit is contained in:
commit
3f0e358327
6 changed files with 459 additions and 0 deletions
94
01-calibrate.py
Normal file
94
01-calibrate.py
Normal file
|
@ -0,0 +1,94 @@
|
||||||
|
'''
|
||||||
|
Find camera intrinsicts:
|
||||||
|
camera matrix and distortion coefficients
|
||||||
|
Largely a copy from https://longervision.github.io/2017/03/16/ComputerVision/OpenCV/opencv-internal-calibration-chessboard/
|
||||||
|
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
|
||||||
|
1. Set dataset variable to point to a directory containing chessboard.mp4
|
||||||
|
2. make sure CHECKERBOARD has the nr of corners in the printed board used. Use (6,9) for https://github.com/opencv/opencv/blob/4.x/doc/pattern.png
|
||||||
|
3. Scripts creates a `calibration.json` in the dataset folder
|
||||||
|
|
||||||
|
'''
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import cv2
|
||||||
|
import json
|
||||||
|
import tqdm
|
||||||
|
import math
|
||||||
|
|
||||||
|
dataset = 'hof2'
|
||||||
|
|
||||||
|
# set needed detections. Use math.inf to scan the whole video
|
||||||
|
needed_detections = math.inf # 20
|
||||||
|
|
||||||
|
# Defining the dimensions of checkerboard
|
||||||
|
CHECKERBOARD = (6,9)
|
||||||
|
|
||||||
|
# termination criteria
|
||||||
|
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
|
||||||
|
|
||||||
|
|
||||||
|
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
|
||||||
|
objp = np.zeros((CHECKERBOARD[0] * CHECKERBOARD[1],3), np.float32)
|
||||||
|
objp[:,:2] = np.mgrid[0:CHECKERBOARD[0],0:CHECKERBOARD[1]].T.reshape(-1,2)
|
||||||
|
|
||||||
|
# Arrays to store object points and image points from all the images.
|
||||||
|
objpoints = [] # 3d point in real world space
|
||||||
|
imgpoints = [] # 2d points in image plane.
|
||||||
|
|
||||||
|
cap = cv2.VideoCapture(dataset / "chessboard.mp4")
|
||||||
|
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||||
|
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||||
|
dim = {
|
||||||
|
'width': frame_width,
|
||||||
|
'height': frame_height,
|
||||||
|
}
|
||||||
|
found = 0
|
||||||
|
|
||||||
|
p = tqdm.tqdm()
|
||||||
|
p2 = tqdm.tqdm(total=needed_detections)
|
||||||
|
|
||||||
|
while ((found < needed_detections) if math.isfinite(needed_detections) else True):
|
||||||
|
ret, img = cap.read() # Capture frame-by-frame
|
||||||
|
if not ret:
|
||||||
|
break
|
||||||
|
|
||||||
|
p.update()
|
||||||
|
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
||||||
|
|
||||||
|
# Find the chess board corners
|
||||||
|
ret, corners = cv2.findChessboardCorners(gray, CHECKERBOARD,None)
|
||||||
|
|
||||||
|
# If found, add object points, image points (after refining them)
|
||||||
|
if ret == True:
|
||||||
|
objpoints.append(objp) # Certainly, every loop objp is the same, in 3D.
|
||||||
|
corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
|
||||||
|
imgpoints.append(corners2)
|
||||||
|
p2.update()
|
||||||
|
p2.n
|
||||||
|
|
||||||
|
# Draw and display the corners
|
||||||
|
img = cv2.drawChessboardCorners(img, CHECKERBOARD, corners2, ret)
|
||||||
|
found += 1
|
||||||
|
|
||||||
|
cv2.imshow('img', img)
|
||||||
|
cv2.waitKey(1)
|
||||||
|
|
||||||
|
# When everything done, release the capture
|
||||||
|
cap.release()
|
||||||
|
cv2.destroyAllWindows()
|
||||||
|
|
||||||
|
print(f"Calculating matrixes with {found} detections")
|
||||||
|
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
|
||||||
|
|
||||||
|
# It's very important to transform the matrix to list.
|
||||||
|
|
||||||
|
data = {'dim': dim, 'camera_matrix': np.asarray(mtx).tolist(), 'dist_coeff': np.asarray(dist).tolist()}
|
||||||
|
|
||||||
|
fn = dataset + "/calibration.json"
|
||||||
|
print(f"write to {fn}")
|
||||||
|
with open(fn, "w") as f:
|
||||||
|
json.dump(data, f)
|
||||||
|
|
116
02-testcalibration-and-draw-points.py
Normal file
116
02-testcalibration-and-draw-points.py
Normal file
|
@ -0,0 +1,116 @@
|
||||||
|
"""
|
||||||
|
After obtaining the calibration.json (camera P and distortion matrixes)
|
||||||
|
using 01-calibrate.py, this script previews the calibration. Using the cursor
|
||||||
|
on the original (not-yet-undistorted) image you can add points, which can be
|
||||||
|
used for the homography later.
|
||||||
|
|
||||||
|
1. Set dataset variable to point to the folder with calibration.json and a preview image
|
||||||
|
2. Set a snapshot image. Note that this image _can_ be higher resolution than the video that is used
|
||||||
|
this allows for more precise point placement, but might need some conversion in the next step
|
||||||
|
3. Points are read and saved from points.json in the dataset folder
|
||||||
|
"""
|
||||||
|
import cv2
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
dataset = 'hof2'
|
||||||
|
snapshot_img = 'snapshot3.png'
|
||||||
|
|
||||||
|
with open(dataset + '/calibration.json', 'r') as fp:
|
||||||
|
calibdata = json.load(fp)
|
||||||
|
mtx = np.array(calibdata['camera_matrix'])
|
||||||
|
dist = np.array(calibdata['dist_coeff'])
|
||||||
|
w, h = calibdata['dim']['width'], calibdata['dim']['height']
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# # Refining the camera matrix using parameters obtained by calibration
|
||||||
|
# if we don't set this, the new image will be cropped to the minimum size
|
||||||
|
# this way, no cropping occurs
|
||||||
|
# w, h = 2560, 1440
|
||||||
|
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w,h), 1, (w,h))
|
||||||
|
|
||||||
|
if os.path.exists(dataset + '/points.json'):
|
||||||
|
with open(dataset + '/points.json', 'r') as fp:
|
||||||
|
points = json.load(fp)
|
||||||
|
else:
|
||||||
|
points = [[500,500],[1000,1000]]
|
||||||
|
|
||||||
|
def add_point(event,x,y,flags,param):
|
||||||
|
global points
|
||||||
|
if event == cv2.EVENT_LBUTTONUP:
|
||||||
|
selected = None
|
||||||
|
for i, p in enumerate(points):
|
||||||
|
d = (p[0]-x)**2 + (p[1]-y)**2
|
||||||
|
if d < 14:
|
||||||
|
selected = i
|
||||||
|
break
|
||||||
|
print('click', selected)
|
||||||
|
if selected is None:
|
||||||
|
points.append([x,y])
|
||||||
|
else:
|
||||||
|
points.pop(selected)
|
||||||
|
|
||||||
|
# cv2.circle(img,(x,y),100,(255,0,0),-1)
|
||||||
|
# mouseX,mouseY = x,y
|
||||||
|
|
||||||
|
cv2.namedWindow('original image')
|
||||||
|
cv2.setMouseCallback('original image',add_point)
|
||||||
|
|
||||||
|
# cap = cv2.VideoCapture("./hof2-hikvision.mp4")
|
||||||
|
cap = cv2.VideoCapture(dataset + "/" + snapshot_img)
|
||||||
|
img_w = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
|
||||||
|
img_h = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
|
||||||
|
|
||||||
|
# scale saved points to snapshot
|
||||||
|
points = [
|
||||||
|
[p[0]*(img_w/w), p[1]*(img_h/h)] for p in points
|
||||||
|
]
|
||||||
|
|
||||||
|
imgOld = None
|
||||||
|
while True: # (found < needed): # Here, 10 can be changed to whatever number you like to choose
|
||||||
|
ret, img = cap.read() # Capture frame-by-frame
|
||||||
|
|
||||||
|
if not ret:
|
||||||
|
img = imgOld
|
||||||
|
else:
|
||||||
|
imgOld = img
|
||||||
|
|
||||||
|
# Method 1 to undistort the image
|
||||||
|
dst = cv2.undistort(img, mtx, dist, None, newcameramtx)
|
||||||
|
dstPoints = cv2.undistortPoints(np.array(points).astype('float32'), mtx, dist, None, P=newcameramtx)
|
||||||
|
|
||||||
|
# dst = cv2.undistort(img, mtx, dist, None)
|
||||||
|
|
||||||
|
# # Method 2 to undistort the image
|
||||||
|
# mapx,mapy=cv2.initUndistortRectifyMap(mtx,dist,None,newcameramtx,(w,h),5)
|
||||||
|
|
||||||
|
# dst = cv2.remap(img,mapx,mapy,cv2.INTER_LINEAR)
|
||||||
|
|
||||||
|
# Displaying the undistorted image
|
||||||
|
drawImg = img.copy()
|
||||||
|
drawDst = dst.copy()
|
||||||
|
for p in dstPoints:
|
||||||
|
x = int(p[0][0])
|
||||||
|
y= int(p[0][1])
|
||||||
|
cv2.circle(drawDst, (x,y), radius=2, color=(0, 0, 255), thickness=-1)
|
||||||
|
for i, p in enumerate(points):
|
||||||
|
x = int(p[0])
|
||||||
|
y= int(p[1])
|
||||||
|
cv2.circle(drawImg, (x,y), radius=2, color=(0, 0, 255), thickness=-1)
|
||||||
|
cv2.putText(drawImg, f"{i}", (x,y-5), cv2.FONT_HERSHEY_COMPLEX, 4, (0,0,255))
|
||||||
|
|
||||||
|
cv2.imshow("undistorted image",drawDst)
|
||||||
|
cv2.imshow("original image",drawImg)
|
||||||
|
if cv2.waitKey(5) == ord('q'):
|
||||||
|
break
|
||||||
|
|
||||||
|
|
||||||
|
print("write points.json")
|
||||||
|
with open(dataset + '/points.json', 'w') as fp:
|
||||||
|
points = [
|
||||||
|
[p[0]*(w/img_w), p[1]*(h/img_h)] for p in points
|
||||||
|
]
|
||||||
|
json.dump(points, fp)
|
92
03-homography.py
Normal file
92
03-homography.py
Normal file
|
@ -0,0 +1,92 @@
|
||||||
|
"""
|
||||||
|
After calibrating the camera, this scripts helps with setting
|
||||||
|
the homography to map all points to a top-down space (so that
|
||||||
|
distances remain equal.)
|
||||||
|
|
||||||
|
1. Set dataset variable and snapshot img as in `02-....py`.
|
||||||
|
2. Make sure to have a irl_points.json file which translates all
|
||||||
|
points in img_points.json to their respective real world coordinates.
|
||||||
|
A useful way to obtain these: draw them (e.g. with chalk marker) on the
|
||||||
|
ground, measure distances, place distances in SolveSpace, export SolveSpace
|
||||||
|
to SVG, get the point coordinates from that file (in cm).
|
||||||
|
3. Run the script to save the homography.json file.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import cv2
|
||||||
|
import json
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
dataset = 'hof2'
|
||||||
|
dataset_sample_img = "snapshot3.png"
|
||||||
|
|
||||||
|
with open(dataset + '/img_points.json', 'r') as fp:
|
||||||
|
img_points = np.array(json.load(fp))
|
||||||
|
# to place points accurate I used a 2160p image, but during calibration and
|
||||||
|
# prediction I use(d) a 1440p image, so convert points to different space:
|
||||||
|
img_points = np.array(img_points)
|
||||||
|
with open(dataset + '/irl_points.json', 'r') as fp:
|
||||||
|
irl_points = json.load(fp)
|
||||||
|
# irl_points = np.array([[p[0]/10+100, p[1]/10+100] for p in irl_points])
|
||||||
|
irl_points = np.array(irl_points)
|
||||||
|
|
||||||
|
# I measured IRL points in cm. Scale to meters
|
||||||
|
irl_points /= 100
|
||||||
|
|
||||||
|
def points_on_img(in_img, points) -> cv2.Mat:
|
||||||
|
img = in_img.copy()
|
||||||
|
if points.shape[1:] == (1,2):
|
||||||
|
points = np.reshape(points, (points.shape[0], 2))
|
||||||
|
for i, p in enumerate(points):
|
||||||
|
x = int(p[0])
|
||||||
|
y= int(p[1])
|
||||||
|
cv2.circle(img, (x,y), radius=2, color=(0, 0, 255), thickness=-1)
|
||||||
|
cv2.putText(img, f"{i}", (x,y-5), cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255))
|
||||||
|
return img
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
with open(dataset + '/calibration.json', 'r') as fp:
|
||||||
|
calibdata = json.load(fp)
|
||||||
|
mtx = np.array(calibdata['camera_matrix'])
|
||||||
|
dist = np.array(calibdata['dist_coeff'])
|
||||||
|
w, h = calibdata['dim']['width'], calibdata['dim']['height']
|
||||||
|
|
||||||
|
img = cv2.resize(cv2.imread(dataset + "/" + dataset_sample_img), (w, h))
|
||||||
|
|
||||||
|
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w,h), 1, (w,h))
|
||||||
|
|
||||||
|
# first undistort the points so that lines are actually straight
|
||||||
|
undistorted_img_points = cv2.undistortPoints(np.array([img_points]).astype('float32'), mtx, dist, None, newcameramtx)
|
||||||
|
|
||||||
|
undistorted_img = cv2.undistort(img, mtx, dist, None, newcameramtx)
|
||||||
|
|
||||||
|
cv2.imshow('original', points_on_img(img, img_points))
|
||||||
|
cv2.imshow('undistorted', points_on_img(undistorted_img, undistorted_img_points))
|
||||||
|
|
||||||
|
H, status = cv2.findHomography(undistorted_img_points,irl_points)
|
||||||
|
|
||||||
|
# Homography converts to meters, this make the picture miniscule.
|
||||||
|
# Scale up for preview
|
||||||
|
view_H = H
|
||||||
|
view_H[:2] = H[:2]*100
|
||||||
|
dst_img = cv2.warpPerspective(undistorted_img,view_H,(w,h))
|
||||||
|
dst_img_points = cv2.perspectiveTransform(np.array(undistorted_img_points), view_H)
|
||||||
|
# when converting from mapped space back to image space
|
||||||
|
# inv_H = np.linalg.pinv(H)
|
||||||
|
print(dst_img_points)
|
||||||
|
|
||||||
|
dst = points_on_img(dst_img, dst_img_points)
|
||||||
|
# print(dst.shape)
|
||||||
|
cv2.imshow('sampl', dst)
|
||||||
|
|
||||||
|
for a,b, c, d in zip(img_points, undistorted_img_points, irl_points, dst_img_points):
|
||||||
|
print(f"{a} -> {b} -> {c} -> {d}")
|
||||||
|
|
||||||
|
# H[:2] = H[:2]/ 100
|
||||||
|
# print(H)
|
||||||
|
with open(dataset + "/homography.json", 'w') as fp:
|
||||||
|
json.dump(H.tolist(), fp)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
if cv2.waitKey(33) == ord('q'):
|
||||||
|
break
|
23
README.md
Normal file
23
README.md
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
# Some tools to facilitate trajectory prediction
|
||||||
|
|
||||||
|
_See also [trap](https://git.rubenvandeven.com/security_vision/trap)_
|
||||||
|
|
||||||
|
## 1. Camera calibration
|
||||||
|
|
||||||
|
Find the camera intrinsics and lens distortion matrixes. This helps to remove curvature from the image, and points map to a linear space.
|
||||||
|
|
||||||
|
## 02. Test Calibration and draw points
|
||||||
|
|
||||||
|
Apply the now obtained camera matrix to undistort a snapshot. Check if it looks good.
|
||||||
|
|
||||||
|
Now we can obtain coordinates to map for the homography. Draw points on the floor (I used chalk) and measure their distances. I then used SolveSpace to go from their distances to positions in a plane.
|
||||||
|
|
||||||
|
Then with a camera snapshot of these points, click with the cursor in the source image to draw mark these points in the image.
|
||||||
|
|
||||||
|
This is saved to `points.json`. If this is right, rename it to `img_points.json` for the homography.
|
||||||
|
|
||||||
|
## 2. Homography
|
||||||
|
|
||||||
|
Having the camera intrinsics, the perspective of the camera can be undone by mapping points to a 'top down' space. This way, the distances between points is in accordance to their distance IRL.
|
||||||
|
|
||||||
|
This file reads camera intrinsics & distortion matrixes, `img_points.json` (obtained step 2) and the corresponding `irl_points.json`. Which I created based on coordinates obtained with SolveSpace.
|
118
poetry.lock
generated
Normal file
118
poetry.lock
generated
Normal file
|
@ -0,0 +1,118 @@
|
||||||
|
# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "colorama"
|
||||||
|
version = "0.4.6"
|
||||||
|
description = "Cross-platform colored terminal text."
|
||||||
|
optional = false
|
||||||
|
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
|
||||||
|
files = [
|
||||||
|
{file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
|
||||||
|
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "numpy"
|
||||||
|
version = "2.1.2"
|
||||||
|
description = "Fundamental package for array computing in Python"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.10"
|
||||||
|
files = [
|
||||||
|
{file = "numpy-2.1.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:30d53720b726ec36a7f88dc873f0eec8447fbc93d93a8f079dfac2629598d6ee"},
|
||||||
|
{file = "numpy-2.1.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e8d3ca0a72dd8846eb6f7dfe8f19088060fcb76931ed592d29128e0219652884"},
|
||||||
|
{file = "numpy-2.1.2-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:fc44e3c68ff00fd991b59092a54350e6e4911152682b4782f68070985aa9e648"},
|
||||||
|
{file = "numpy-2.1.2-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:7c1c60328bd964b53f8b835df69ae8198659e2b9302ff9ebb7de4e5a5994db3d"},
|
||||||
|
{file = "numpy-2.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6cdb606a7478f9ad91c6283e238544451e3a95f30fb5467fbf715964341a8a86"},
|
||||||
|
{file = "numpy-2.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d666cb72687559689e9906197e3bec7b736764df6a2e58ee265e360663e9baf7"},
|
||||||
|
{file = "numpy-2.1.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c6eef7a2dbd0abfb0d9eaf78b73017dbfd0b54051102ff4e6a7b2980d5ac1a03"},
|
||||||
|
{file = "numpy-2.1.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:12edb90831ff481f7ef5f6bc6431a9d74dc0e5ff401559a71e5e4611d4f2d466"},
|
||||||
|
{file = "numpy-2.1.2-cp310-cp310-win32.whl", hash = "sha256:a65acfdb9c6ebb8368490dbafe83c03c7e277b37e6857f0caeadbbc56e12f4fb"},
|
||||||
|
{file = "numpy-2.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:860ec6e63e2c5c2ee5e9121808145c7bf86c96cca9ad396c0bd3e0f2798ccbe2"},
|
||||||
|
{file = "numpy-2.1.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b42a1a511c81cc78cbc4539675713bbcf9d9c3913386243ceff0e9429ca892fe"},
|
||||||
|
{file = "numpy-2.1.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:faa88bc527d0f097abdc2c663cddf37c05a1c2f113716601555249805cf573f1"},
|
||||||
|
{file = "numpy-2.1.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:c82af4b2ddd2ee72d1fc0c6695048d457e00b3582ccde72d8a1c991b808bb20f"},
|
||||||
|
{file = "numpy-2.1.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:13602b3174432a35b16c4cfb5de9a12d229727c3dd47a6ce35111f2ebdf66ff4"},
|
||||||
|
{file = "numpy-2.1.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ebec5fd716c5a5b3d8dfcc439be82a8407b7b24b230d0ad28a81b61c2f4659a"},
|
||||||
|
{file = "numpy-2.1.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2b49c3c0804e8ecb05d59af8386ec2f74877f7ca8fd9c1e00be2672e4d399b1"},
|
||||||
|
{file = "numpy-2.1.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2cbba4b30bf31ddbe97f1c7205ef976909a93a66bb1583e983adbd155ba72ac2"},
|
||||||
|
{file = "numpy-2.1.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8e00ea6fc82e8a804433d3e9cedaa1051a1422cb6e443011590c14d2dea59146"},
|
||||||
|
{file = "numpy-2.1.2-cp311-cp311-win32.whl", hash = "sha256:5006b13a06e0b38d561fab5ccc37581f23c9511879be7693bd33c7cd15ca227c"},
|
||||||
|
{file = "numpy-2.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:f1eb068ead09f4994dec71c24b2844f1e4e4e013b9629f812f292f04bd1510d9"},
|
||||||
|
{file = "numpy-2.1.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d7bf0a4f9f15b32b5ba53147369e94296f5fffb783db5aacc1be15b4bf72f43b"},
|
||||||
|
{file = "numpy-2.1.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b1d0fcae4f0949f215d4632be684a539859b295e2d0cb14f78ec231915d644db"},
|
||||||
|
{file = "numpy-2.1.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:f751ed0a2f250541e19dfca9f1eafa31a392c71c832b6bb9e113b10d050cb0f1"},
|
||||||
|
{file = "numpy-2.1.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:bd33f82e95ba7ad632bc57837ee99dba3d7e006536200c4e9124089e1bf42426"},
|
||||||
|
{file = "numpy-2.1.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b8cde4f11f0a975d1fd59373b32e2f5a562ade7cde4f85b7137f3de8fbb29a0"},
|
||||||
|
{file = "numpy-2.1.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d95f286b8244b3649b477ac066c6906fbb2905f8ac19b170e2175d3d799f4df"},
|
||||||
|
{file = "numpy-2.1.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ab4754d432e3ac42d33a269c8567413bdb541689b02d93788af4131018cbf366"},
|
||||||
|
{file = "numpy-2.1.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e585c8ae871fd38ac50598f4763d73ec5497b0de9a0ab4ef5b69f01c6a046142"},
|
||||||
|
{file = "numpy-2.1.2-cp312-cp312-win32.whl", hash = "sha256:9c6c754df29ce6a89ed23afb25550d1c2d5fdb9901d9c67a16e0b16eaf7e2550"},
|
||||||
|
{file = "numpy-2.1.2-cp312-cp312-win_amd64.whl", hash = "sha256:456e3b11cb79ac9946c822a56346ec80275eaf2950314b249b512896c0d2505e"},
|
||||||
|
{file = "numpy-2.1.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a84498e0d0a1174f2b3ed769b67b656aa5460c92c9554039e11f20a05650f00d"},
|
||||||
|
{file = "numpy-2.1.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4d6ec0d4222e8ffdab1744da2560f07856421b367928026fb540e1945f2eeeaf"},
|
||||||
|
{file = "numpy-2.1.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:259ec80d54999cc34cd1eb8ded513cb053c3bf4829152a2e00de2371bd406f5e"},
|
||||||
|
{file = "numpy-2.1.2-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:675c741d4739af2dc20cd6c6a5c4b7355c728167845e3c6b0e824e4e5d36a6c3"},
|
||||||
|
{file = "numpy-2.1.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05b2d4e667895cc55e3ff2b56077e4c8a5604361fc21a042845ea3ad67465aa8"},
|
||||||
|
{file = "numpy-2.1.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43cca367bf94a14aca50b89e9bc2061683116cfe864e56740e083392f533ce7a"},
|
||||||
|
{file = "numpy-2.1.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:76322dcdb16fccf2ac56f99048af32259dcc488d9b7e25b51e5eca5147a3fb98"},
|
||||||
|
{file = "numpy-2.1.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:32e16a03138cabe0cb28e1007ee82264296ac0983714094380b408097a418cfe"},
|
||||||
|
{file = "numpy-2.1.2-cp313-cp313-win32.whl", hash = "sha256:242b39d00e4944431a3cd2db2f5377e15b5785920421993770cddb89992c3f3a"},
|
||||||
|
{file = "numpy-2.1.2-cp313-cp313-win_amd64.whl", hash = "sha256:f2ded8d9b6f68cc26f8425eda5d3877b47343e68ca23d0d0846f4d312ecaa445"},
|
||||||
|
{file = "numpy-2.1.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:2ffef621c14ebb0188a8633348504a35c13680d6da93ab5cb86f4e54b7e922b5"},
|
||||||
|
{file = "numpy-2.1.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:ad369ed238b1959dfbade9018a740fb9392c5ac4f9b5173f420bd4f37ba1f7a0"},
|
||||||
|
{file = "numpy-2.1.2-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:d82075752f40c0ddf57e6e02673a17f6cb0f8eb3f587f63ca1eaab5594da5b17"},
|
||||||
|
{file = "numpy-2.1.2-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:1600068c262af1ca9580a527d43dc9d959b0b1d8e56f8a05d830eea39b7c8af6"},
|
||||||
|
{file = "numpy-2.1.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a26ae94658d3ba3781d5e103ac07a876b3e9b29db53f68ed7df432fd033358a8"},
|
||||||
|
{file = "numpy-2.1.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13311c2db4c5f7609b462bc0f43d3c465424d25c626d95040f073e30f7570e35"},
|
||||||
|
{file = "numpy-2.1.2-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:2abbf905a0b568706391ec6fa15161fad0fb5d8b68d73c461b3c1bab6064dd62"},
|
||||||
|
{file = "numpy-2.1.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:ef444c57d664d35cac4e18c298c47d7b504c66b17c2ea91312e979fcfbdfb08a"},
|
||||||
|
{file = "numpy-2.1.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:bdd407c40483463898b84490770199d5714dcc9dd9b792f6c6caccc523c00952"},
|
||||||
|
{file = "numpy-2.1.2-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:da65fb46d4cbb75cb417cddf6ba5e7582eb7bb0b47db4b99c9fe5787ce5d91f5"},
|
||||||
|
{file = "numpy-2.1.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c193d0b0238638e6fc5f10f1b074a6993cb13b0b431f64079a509d63d3aa8b7"},
|
||||||
|
{file = "numpy-2.1.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a7d80b2e904faa63068ead63107189164ca443b42dd1930299e0d1cb041cec2e"},
|
||||||
|
{file = "numpy-2.1.2.tar.gz", hash = "sha256:13532a088217fa624c99b843eeb54640de23b3414b14aa66d023805eb731066c"},
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "opencv-python"
|
||||||
|
version = "4.10.0.84"
|
||||||
|
description = "Wrapper package for OpenCV python bindings."
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.6"
|
||||||
|
files = [
|
||||||
|
{file = "opencv-python-4.10.0.84.tar.gz", hash = "sha256:72d234e4582e9658ffea8e9cae5b63d488ad06994ef12d81dc303b17472f3526"},
|
||||||
|
{file = "opencv_python-4.10.0.84-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:fc182f8f4cda51b45f01c64e4cbedfc2f00aff799debebc305d8d0210c43f251"},
|
||||||
|
{file = "opencv_python-4.10.0.84-cp37-abi3-macosx_12_0_x86_64.whl", hash = "sha256:71e575744f1d23f79741450254660442785f45a0797212852ee5199ef12eed98"},
|
||||||
|
{file = "opencv_python-4.10.0.84-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09a332b50488e2dda866a6c5573ee192fe3583239fb26ff2f7f9ceb0bc119ea6"},
|
||||||
|
{file = "opencv_python-4.10.0.84-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ace140fc6d647fbe1c692bcb2abce768973491222c067c131d80957c595b71f"},
|
||||||
|
{file = "opencv_python-4.10.0.84-cp37-abi3-win32.whl", hash = "sha256:2db02bb7e50b703f0a2d50c50ced72e95c574e1e5a0bb35a8a86d0b35c98c236"},
|
||||||
|
{file = "opencv_python-4.10.0.84-cp37-abi3-win_amd64.whl", hash = "sha256:32dbbd94c26f611dc5cc6979e6b7aa1f55a64d6b463cc1dcd3c95505a63e48fe"},
|
||||||
|
]
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
numpy = {version = ">=1.26.0", markers = "python_version >= \"3.12\""}
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "tqdm"
|
||||||
|
version = "4.66.6"
|
||||||
|
description = "Fast, Extensible Progress Meter"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.7"
|
||||||
|
files = [
|
||||||
|
{file = "tqdm-4.66.6-py3-none-any.whl", hash = "sha256:223e8b5359c2efc4b30555531f09e9f2f3589bcd7fdd389271191031b49b7a63"},
|
||||||
|
{file = "tqdm-4.66.6.tar.gz", hash = "sha256:4bdd694238bef1485ce839d67967ab50af8f9272aab687c0d7702a01da0be090"},
|
||||||
|
]
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
colorama = {version = "*", markers = "platform_system == \"Windows\""}
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"]
|
||||||
|
notebook = ["ipywidgets (>=6)"]
|
||||||
|
slack = ["slack-sdk"]
|
||||||
|
telegram = ["requests"]
|
||||||
|
|
||||||
|
[metadata]
|
||||||
|
lock-version = "2.0"
|
||||||
|
python-versions = "^3.12"
|
||||||
|
content-hash = "f93605d68b70d310dc7383c5e54252f753003732137baad292ec11a523042d66"
|
16
pyproject.toml
Normal file
16
pyproject.toml
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
[tool.poetry]
|
||||||
|
name = "traptools"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = ""
|
||||||
|
authors = ["Ruben van de Ven <git@rubenvandeven.com>"]
|
||||||
|
readme = "README.md"
|
||||||
|
|
||||||
|
[tool.poetry.dependencies]
|
||||||
|
python = "^3.12"
|
||||||
|
opencv-python = "^4.10.0.84"
|
||||||
|
tqdm = "^4.66.6"
|
||||||
|
|
||||||
|
|
||||||
|
[build-system]
|
||||||
|
requires = ["poetry-core"]
|
||||||
|
build-backend = "poetry.core.masonry.api"
|
Loading…
Reference in a new issue