Add arguments & enable saving of frame

This commit is contained in:
Ruben van de Ven 2019-02-04 22:35:40 +01:00
parent a40478f282
commit 35c53870da
2 changed files with 168 additions and 89 deletions

View file

@ -1,5 +1,5 @@
#!/usr/bin/env python #!/usr/bin/env python
import cv2 import cv2
import dlib import dlib
import numpy as np import numpy as np
@ -10,7 +10,7 @@ from scipy.ndimage.filters import gaussian_filter
from PIL import Image, ImageDraw,ImageTk from PIL import Image, ImageDraw,ImageTk
import pandas as pd import pandas as pd
import seaborn as sns import seaborn as sns
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure from matplotlib.figure import Figure
from matplotlib import cm from matplotlib import cm
@ -20,12 +20,57 @@ if sys.version_info[0] < 3:
else: else:
import tkinter as Tk import tkinter as Tk
import time import time
import datetime
import coloredlogs
import argparse
argParser = argparse.ArgumentParser(description='Draw a heatmap')
argParser.add_argument(
'--camera',
'-c',
default=0,
type=int,
help='The id of the camera'
)
argParser.add_argument(
'--verbose',
'-v',
action="store_true",
)
argParser.add_argument(
'--hide-graph',
action="store_true",
)
argParser.add_argument(
'--hide-preview',
action="store_true",
)
argParser.add_argument(
'--output-dir',
'-o',
help="directory in which to store evey x files",
)
argParser.add_argument(
'--save-interval',
type=int,
default=15,
help="Interval at which to save heatmap frames (in seconds)"
)
args = argParser.parse_args()
coloredlogs.install(
level=logging.DEBUG if args.verbose else logging.INFO,
# format='%(asctime)-15s %(name)s %(levelname)s: %(message)s'
)
logging.basicConfig( format='%(asctime)-15s %(name)s %(levelname)s: %(message)s' )
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
# Read Image # Read Image
c = cv2.VideoCapture(0) c = cv2.VideoCapture(args.camera)
# im = cv2.imread("headPose.jpg"); # im = cv2.imread("headPose.jpg");
@ -63,7 +108,7 @@ def create_perspective_transform_matrix(src, dst):
B = np.array(dst).reshape(8) B = np.array(dst).reshape(8)
af = np.dot(np.linalg.inv(A.T * A) * A.T, B) af = np.dot(np.linalg.inv(A.T * A) * A.T, B)
m = np.append(np.array(af).reshape(8), 1).reshape((3, 3)) m = np.append(np.array(af).reshape(8), 1).reshape((3, 3))
logger.info("Created transformmatrix: src %s dst %s m %s", src, dst, m) logger.info("Created transformmatrix: src {} dst {} m {}".format( src, dst, m))
return m return m
# got this amazing thing from here: https://stackoverflow.com/a/24088499 # got this amazing thing from here: https://stackoverflow.com/a/24088499
@ -158,25 +203,34 @@ else:
coordinates = {'tl': None, 'tr': None, 'bl': None, 'br': None} coordinates = {'tl': None, 'tr': None, 'bl': None, 'br': None}
transform = None transform = None
windowRoot = Tk.Toplevel() if not args.hide_graph:
windowSize = (1000,1000) windowRoot = Tk.Toplevel()
windowRoot.geometry('%dx%d+%d+%d' % (windowSize[0],windowSize[1],0,0)) windowSize = (1000,1000)
figure = Figure(figsize=(16, 9), dpi=100) windowRoot.geometry('%dx%d+%d+%d' % (windowSize[0],windowSize[1],0,0))
axes = figure.add_subplot(111) figure = Figure(figsize=(16, 9), dpi=100)
axes = figure.add_subplot(111)
axes.set_title('Tk embedding') axes.set_title('Tk embedding')
axes.set_xlabel('X axis label') axes.set_xlabel('X axis label')
axes.set_ylabel('Y label') axes.set_ylabel('Y label')
# canvas = Tk.Canvas(windowRoot,width=1000,height=1000) # canvas = Tk.Canvas(windowRoot,width=1000,height=1000)
canvas = FigureCanvasTkAgg(figure,master=windowRoot) canvas = FigureCanvasTkAgg(figure,master=windowRoot)
canvas.show() canvas.draw()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1) canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
imageWindowRoot = Tk.Toplevel() imageWindowRoot = Tk.Toplevel()
imageWindowSize = (1000,1000) imageWindowSize = (1000,1000)
imageWindowRoot.geometry('%dx%d+%d+%d' % (imageWindowSize[0],imageWindowSize[1],0,0)) imageWindowRoot.geometry('%dx%d+%d+%d' % (imageWindowSize[0],imageWindowSize[1],0,0))
imageWindowRoot.attributes("-fullscreen", True)
# imageCanvas is where the heatmap image is drawn
imageCanvas = Tk.Canvas(imageWindowRoot,width=1000,height=1000) imageCanvas = Tk.Canvas(imageWindowRoot,width=1000,height=1000)
imageCanvas.pack() imageCanvas.pack()
imageWindowRoot.lift()
if args.output_dir:
startTime = time.time()
lastSaveTime = startTime
while True: while True:
t1 = time.time() t1 = time.time()
@ -202,7 +256,7 @@ while True:
shape = predictor(im, d) shape = predictor(im, d)
td2 = time.time() td2 = time.time()
logger.debug("Found face points in %fs", td2-td1) logger.debug("Found face points in %fs", td2-td1)
#2D image points. If you change the image, you need to change vector #2D image points. If you change the image, you need to change vector
image_points = np.array([ image_points = np.array([
(shape.part(30).x,shape.part(30).y), # Nose tip (shape.part(30).x,shape.part(30).y), # Nose tip
@ -212,7 +266,7 @@ while True:
(shape.part(48).x,shape.part(48).y), # Left Mouth corner (shape.part(48).x,shape.part(48).y), # Left Mouth corner
(shape.part(54).x,shape.part(54).y) # Right mouth corner (shape.part(54).x,shape.part(54).y) # Right mouth corner
], dtype="double") ], dtype="double")
# 3D model points. # 3D model points.
model_points = np.array([ model_points = np.array([
(0.0, 0.0, 0.0), # Nose tip (0.0, 0.0, 0.0), # Nose tip
@ -221,9 +275,9 @@ while True:
(225.0, 170.0, -135.0), # Right eye right corne (225.0, 170.0, -135.0), # Right eye right corne
(-150.0, -150.0, -125.0), # Left Mouth corner (-150.0, -150.0, -125.0), # Left Mouth corner
(150.0, -150.0, -125.0) # Right mouth corner (150.0, -150.0, -125.0) # Right mouth corner
]) ])
# Camera internals # Camera internals
focal_length = size[1] focal_length = size[1]
center = (size[1]/2, size[0]/2) center = (size[1]/2, size[0]/2)
@ -232,26 +286,26 @@ while True:
[0, focal_length, center[1]], [0, focal_length, center[1]],
[0, 0, 1]], dtype = "double" [0, 0, 1]], dtype = "double"
) )
# print ("Camera Matrix :\n {0}".format(camera_matrix)) # logger.info ("Camera Matrix :\n {0}".format(camera_matrix))
dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion
(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE) (success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)
if not success: if not success:
print("Error determening PnP", success) logger.info("Error determening PnP {}".format(success) )
continue continue
logger.debug ("Rotation Vector:\n %s", rotation_vector) logger.debug ("Rotation Vector:\n %s", rotation_vector)
print ("Translation Vector:\n {0}".format(translation_vector)) logger.info ("Translation Vector:\n {0}".format(translation_vector))
# Project a 3D point (0, 0, 1000.0) onto the image plane. # Project a 3D point (0, 0, 1000.0) onto the image plane.
# We use this to draw a line sticking out of the nose # We use this to draw a line sticking out of the nose
(nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs) (nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs)
for p in image_points: for p in image_points:
cv2.circle(im, (int(p[0]), int(p[1])), 3, (0,0,255), -1) cv2.circle(im, (int(p[0]), int(p[1])), 3, (0,0,255), -1)
p1 = ( int(image_points[0][0]), int(image_points[0][1])) p1 = ( int(image_points[0][0]), int(image_points[0][1]))
p2 = ( int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1])) p2 = ( int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
cv2.line(im, p1, p2, (255,0,0), 2) cv2.line(im, p1, p2, (255,0,0), 2)
@ -260,33 +314,35 @@ while True:
cv2.Rodrigues(rotation_vector, rotMatrix, jacobian=0) cv2.Rodrigues(rotation_vector, rotMatrix, jacobian=0)
# Find rotation: https://stackoverflow.com/a/15029416 # Find rotation: https://stackoverflow.com/a/15029416
rx = np.arctan2(rotMatrix[2,1], rotMatrix[2,2]) # not used anymore :-)
ry = np.arctan2(-rotMatrix[2,0], np.sqrt(np.square(rotMatrix[2,1]) + np.square(rotMatrix[2,2]))) # rx = np.arctan2(rotMatrix[2,1], rotMatrix[2,2])
rz = np.arctan2(rotMatrix[1,0],rotMatrix[0,0]) # ry = np.arctan2(-rotMatrix[2,0], np.sqrt(np.square(rotMatrix[2,1]) + np.square(rotMatrix[2,2])))
print("rotation", rx, ry, rz) # rz = np.arctan2(rotMatrix[1,0],rotMatrix[0,0])
ry = - np.arcsin(rotMatrix[0,2]) # logger.info("rotation {} {} {}".format(rx, ry, rz) )
rx = np.arctan2(rotMatrix[1,2]/np.cos(ry), rotMatrix[2,2]/np.cos(ry)) # ry = - np.arcsin(rotMatrix[0,2])
rz = np.arctan2(rotMatrix[0,1]/np.cos(ry), rotMatrix[0,0]/np.cos(ry)) # rx = np.arctan2(rotMatrix[1,2]/np.cos(ry), rotMatrix[2,2]/np.cos(ry))
print("rotation ml", rx, ry, rz) # seems better? # rz = np.arctan2(rotMatrix[0,1]/np.cos(ry), rotMatrix[0,0]/np.cos(ry))
# logger.info("rotation ml {} {} {}".format(rx, ry, rz) )# seems better?
# draw little floorplan for x: 10 -> 50 maps to z: 0 -> 10000, x: -2000 -> 2000
mapPosX = int((translation_vector[0] + 500) / 1000 * 40)
mapPosY = int((translation_vector[1] + 500) / 1000 * 40)
mapPosZ = int((translation_vector[2] + 0 ) / 10000 * 40)
cv2.circle(im, (mapPosZ + 10, mapPosX + 10), 2, (0,0,255), -1)
cv2.circle(im, (mapPosZ + 60, mapPosY + 10), 2, (0,0,255), -1)
# make it an _amazing_ stick figurine for the side view
cv2.line(im, (mapPosZ + 60, mapPosY + 10), (mapPosZ + 60, mapPosY + 20), (0,0,255), 1)
cv2.line(im, (mapPosZ + 60, mapPosY + 20), (mapPosZ + 55, mapPosY + 25), (0,0,255), 1)
cv2.line(im, (mapPosZ + 60, mapPosY + 20), (mapPosZ + 65, mapPosY + 25), (0,0,255), 1)
cv2.line(im, (mapPosZ + 60, mapPosY + 15), (mapPosZ + 55, mapPosY + 10), (0,0,255), 1)
cv2.line(im, (mapPosZ + 60, mapPosY + 15), (mapPosZ + 65, mapPosY + 10), (0,0,255), 1)
# draw rotation vector
cv2.circle(im, (mapPosZ + 60, mapPosY + 10), 2, (0,0,255), -1)
viewDirectionVector = np.dot(np.array([0.0, 0.0, 1000.0]), rotMatrix) viewDirectionVector = np.dot(np.array([0.0, 0.0, 1000.0]), rotMatrix)
cv2.line(im, (mapPosZ + 10, mapPosX + 10), (mapPosZ + 10 + int(viewDirectionVector[2] * 100), mapPosX + 10 + int(viewDirectionVector[0] * 100)), (255,255,0), 1)
cv2.line(im, (mapPosZ + 60, mapPosY + 10), (mapPosZ + 60 + int(viewDirectionVector[2] * 100), mapPosY + 10 - int(viewDirectionVector[1] * 100)), (255,0,255), 1) if not args.hide_preview:
# draw little floorplan for x: 10 -> 50 maps to z: 0 -> 10000, x: -2000 -> 2000
mapPosX = int((translation_vector[0] + 500) / 1000 * 40)
mapPosY = int((translation_vector[1] + 500) / 1000 * 40)
mapPosZ = int((translation_vector[2] + 0 ) / 10000 * 40)
cv2.circle(im, (mapPosZ + 10, mapPosX + 10), 2, (0,0,255), -1)
cv2.circle(im, (mapPosZ + 60, mapPosY + 10), 2, (0,0,255), -1)
# make it an _amazing_ stick figurine for the side view
cv2.line(im, (mapPosZ + 60, mapPosY + 10), (mapPosZ + 60, mapPosY + 20), (0,0,255), 1)
cv2.line(im, (mapPosZ + 60, mapPosY + 20), (mapPosZ + 55, mapPosY + 25), (0,0,255), 1)
cv2.line(im, (mapPosZ + 60, mapPosY + 20), (mapPosZ + 65, mapPosY + 25), (0,0,255), 1)
cv2.line(im, (mapPosZ + 60, mapPosY + 15), (mapPosZ + 55, mapPosY + 10), (0,0,255), 1)
cv2.line(im, (mapPosZ + 60, mapPosY + 15), (mapPosZ + 65, mapPosY + 10), (0,0,255), 1)
# draw rotation vector
cv2.circle(im, (mapPosZ + 60, mapPosY + 10), 2, (0,0,255), -1)
cv2.line(im, (mapPosZ + 10, mapPosX + 10), (mapPosZ + 10 + int(viewDirectionVector[2] * 100), mapPosX + 10 + int(viewDirectionVector[0] * 100)), (255,255,0), 1)
cv2.line(im, (mapPosZ + 60, mapPosY + 10), (mapPosZ + 60 + int(viewDirectionVector[2] * 100), mapPosY + 10 - int(viewDirectionVector[1] * 100)), (255,0,255), 1)
# Translation vector gives position in space: # Translation vector gives position in space:
# x, y z: 0,0,0 is center of camera # x, y z: 0,0,0 is center of camera
@ -298,7 +354,7 @@ while True:
# z = t3 * r3 * a = 0 # z = t3 * r3 * a = 0
# => a = -t3 / r3 # => a = -t3 / r3
# substitute found a in x,y # substitute found a in x,y
a = - translation_vector[2] / rotation_vector[2] a = - translation_vector[2] / rotation_vector[2]
x = translation_vector[0] + rotation_vector[0] * a x = translation_vector[0] + rotation_vector[0] * a
y = translation_vector[1] + rotation_vector[1] * a y = translation_vector[1] + rotation_vector[1] * a
@ -315,23 +371,26 @@ while True:
logger.debug("Timer: All other face drawing stuff in %fs", td3-td2) logger.debug("Timer: All other face drawing stuff in %fs", td3-td2)
# TODO only draw nose line now, so we can change color depending whether on screen or not # TODO only draw nose line now, so we can change color depending whether on screen or not
# processed all faces, now draw on screen: # processed all faces, now draw on screen:
te1 = time.time() te1 = time.time()
# draw little floorplan for 10 -> 50, sideplan 60 -> 100 (40x40 px)
cv2.rectangle(im, (9, 9), (51, 51), (255,255,255), 1)
cv2.rectangle(im, (59, 9), (101, 51), (255,255,255), 1)
cv2.line(im, (10,10), (10,50), (200,200,200), 2)
cv2.line(im, (60,10), (60,50), (200,200,200), 2)
# screen is 16:10 if not args.hide_preview:
cv2.rectangle(im, (9, 59), (91, 111), (255,255,255), 1) # draw little floorplan for 10 -> 50, sideplan 60 -> 100 (40x40 px)
cv2.rectangle(im, (9, 9), (51, 51), (255,255,255), 1)
cv2.rectangle(im, (59, 9), (101, 51), (255,255,255), 1)
cv2.line(im, (10,10), (10,50), (200,200,200), 2)
cv2.line(im, (60,10), (60,50), (200,200,200), 2)
# screen is 16:10
cv2.rectangle(im, (9, 59), (91, 111), (255,255,255), 1)
if transform is None: if transform is None:
cv2.putText(im, "1", (10,70), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255) if coordinates['tl'] is not None else (0,0,255)) if not args.hide_preview:
cv2.putText(im, "2", (85,70), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255) if coordinates['tr'] is not None else (0,0,255)) cv2.putText(im, "1", (10,70), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255) if coordinates['tl'] is not None else (0,0,255))
cv2.putText(im, "3", (10,110), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255) if coordinates['bl'] is not None else (0,0,255)) cv2.putText(im, "2", (85,70), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255) if coordinates['tr'] is not None else (0,0,255))
cv2.putText(im, "4", (85,110), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255) if coordinates['br'] is not None else (0,0,255)) cv2.putText(im, "3", (10,110), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255) if coordinates['bl'] is not None else (0,0,255))
cv2.putText(im, "4", (85,110), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255) if coordinates['br'] is not None else (0,0,255))
tm1 = 0 tm1 = 0
tm2 = 0 tm2 = 0
tm3 = 0 tm3 = 0
@ -346,9 +405,9 @@ while True:
# dot2 = np.dot(coordinates['bl'] - point, coordinates['tl'] - coordinates['br']) # dot2 = np.dot(coordinates['bl'] - point, coordinates['tl'] - coordinates['br'])
# pointIn3 = [point[0], point[1], 0] # pointIn3 = [point[0], point[1], 0]
# targetPoint = np.dot(pointIn3, transformationMatrix) # targetPoint = np.dot(pointIn3, transformationMatrix)
# print("Looking at", pointIn3, np.dot( transformationMatrix, pointIn3)) # logger.info("Looking at", pointIn3, np.dot( transformationMatrix, pointIn3))
targetPoint = transform(point) targetPoint = transform(point)
print("Looking at", point, targetPoint) logger.info("Looking at {} {}".format(point, targetPoint) )
# cv2.circle(im, (int(targetPoint[0]), int(targetPoint[1])), 2, (0,255,0), -1) # cv2.circle(im, (int(targetPoint[0]), int(targetPoint[1])), 2, (0,255,0), -1)
# from 1920x1080 to 80x50 # from 1920x1080 to 80x50
miniTargetPoint = (int(targetPoint[0] / 1920 * 80 + 10), int(targetPoint[1] / 1080 * 50 + 60)) miniTargetPoint = (int(targetPoint[0] / 1920 * 80 + 10), int(targetPoint[1] / 1080 * 50 + 60))
@ -356,9 +415,11 @@ while True:
targetInt = (int(targetPoint[0]), int(targetPoint[1])) targetInt = (int(targetPoint[0]), int(targetPoint[1]))
# check if point fits on screen: # check if point fits on screen:
# if so, measure it # if so, measure it
if targetInt[0] >= 0 and targetInt[1] >= 0 and targetInt[0] < metricsSize[1] and targetInt[1] < metricsSize[0]: if targetInt[0] >= 0 and targetInt[1] >= 0 and targetInt[0] < metricsSize[0] and targetInt[1] < metricsSize[1]:
dataframe = dataframe.append({'x':targetInt[0],'y':targetInt[1]}, ignore_index=True) dataframe = dataframe.append({'x':targetInt[0],'y':targetInt[1]}, ignore_index=True)
logger.debug("Put metric {},{} in metrix of {},{}".format(targetInt[1],targetInt[0], metricsSize[1], metricsSize[0]))
newMetrics[targetInt[1],targetInt[0]] += 1 newMetrics[targetInt[1],targetInt[0]] += 1
# after we collected all new metrics, blur them foor smoothness # after we collected all new metrics, blur them foor smoothness
# and add to all metrics collected # and add to all metrics collected
tm3 = time.time() tm3 = time.time()
@ -369,7 +430,8 @@ while True:
# Display webcam image with overlays # Display webcam image with overlays
te2 = time.time() te2 = time.time()
logger.debug("Drew on screen in %fs", te2-te1) logger.debug("Drew on screen in %fs", te2-te1)
cv2.imshow("Output", im) if not args.hide_preview:
cv2.imshow("Output", im)
te3 = time.time() te3 = time.time()
logger.debug("showed webcam image in %fs", te3-te2) logger.debug("showed webcam image in %fs", te3-te2)
@ -383,7 +445,7 @@ while True:
normalisedMetrics = np.uint8(cm.plasma(normalisedMetrics)*255) normalisedMetrics = np.uint8(cm.plasma(normalisedMetrics)*255)
tm22 = time.time() tm22 = time.time()
logger.debug("Max normalised metrics: %f", np.max(normalisedMetrics)) logger.debug("Max normalised metrics: %f", np.max(normalisedMetrics))
# print(normalisedMetrics) # logger.info(normalisedMetrics)
tm23 = time.time() tm23 = time.time()
image = Image.fromarray(normalisedMetrics) image = Image.fromarray(normalisedMetrics)
wpercent = (imageWindowSize[0] / float(image.size[0])) wpercent = (imageWindowSize[0] / float(image.size[0]))
@ -394,17 +456,32 @@ while True:
imagesprite = imageCanvas.create_image(500,500,image=tkpi, tags="IMG") imagesprite = imageCanvas.create_image(500,500,image=tkpi, tags="IMG")
imageWindowRoot.update() imageWindowRoot.update()
tm24 = time.time() tm24 = time.time()
logger.debug("PIL iamge generated in %fs", tm24 - tm23) logger.debug("PIL image generated in %fs", tm24 - tm23)
logger.debug("Total matrix time is %fs", tm4 - tm3 + tm2 - tm1 + tm24 - tm21) logger.debug("Total matrix time is %fs", tm4 - tm3 + tm2 - tm1 + tm24 - tm21)
te4 = time.time() if not args.hide_graph:
axes.clear() te4 = time.time()
if(len(dataframe) > 2): axes.clear()
g = sns.kdeplot(dataframe['x'], dataframe['y'],ax=axes, n_levels=30, shade=True, cmap=cm.rainbow) if(len(dataframe) > 2):
canvas.draw() g = sns.kdeplot(dataframe['x'], dataframe['y'],ax=axes, n_levels=30, shade=True, cmap=cm.rainbow)
windowRoot.update() canvas.draw()
te5 = time.time() windowRoot.update()
logger.debug("Drew graph & updated window in %fs", te5-te4) te5 = time.time()
logger.debug("Drew graph & updated window in %fs", te5-te4)
if args.output_dir:
# save output to dir
now = tm24 # time.time()
if now - lastSaveTime > args.save_interval:
filename = os.path.join(
args.output_dir,
"frame{}.png".format(
datetime.datetime.now().replace(microsecond=0).isoformat()
)
)
image.save(filename)
lastSaveTime = now
pass
# (optionally) very slowly fade out previous metrics: # (optionally) very slowly fade out previous metrics:
# metrics = metrics * .999 # metrics = metrics * .999
@ -430,11 +507,11 @@ while True:
coordinates['br'] = currentPoint coordinates['br'] = currentPoint
recalculate = True recalculate = True
elif keyPress == ord('t') and transform is not None: elif keyPress == ord('t') and transform is not None:
print("Coordinates", coordinates) logger.info("Coordinates {}".format(coordinates) )
print("Drawing area", screenDrawCorners) logger.info("Drawing area {}".format(screenDrawCorners))
print("Test point %s", currentPoint ) logger.info("Test point {}".format(currentPoint ))
print("Transformed point %s", transform(currentPoint)) logger.info("Transformed point {}".format(transform(currentPoint)))
if recalculate is True and not any (x is None for x in coordinates.values()): if recalculate is True and not any (x is None for x in coordinates.values()):
logger.debug(coordinates.values()) logger.debug(coordinates.values())
@ -446,4 +523,3 @@ while True:
cv2.destroyAllWindows() cv2.destroyAllWindows()

3
output/.gitignore vendored Normal file
View file

@ -0,0 +1,3 @@
*
!.gitignore