Changes, hopeflly fixes + debug tools

This commit is contained in:
Ruben van de Ven 2019-02-07 11:05:41 +01:00
parent 5212e3aab2
commit 61d0418edf
5 changed files with 502 additions and 231 deletions

20
calibrate-capture.py Normal file
View file

@ -0,0 +1,20 @@
import cv2
c = cv2.VideoCapture(2)
# if not ding this we only have jittery 10fps
c.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*"MJPG"))
# set camera resoltion
c.set(3, 1280)
c.set(4, 720)
for i in range(15):
_,im = c.read()
cv2.imwrite('calibrate/left-{:06d}.png'.format(i), im)
cv2.imshow('left', im)
if cv2.waitKey(1000) & 0xFF == ord('q'):
break
c.release()
cv2.destroyAllWindows()

44
calibrate.py Normal file
View file

@ -0,0 +1,44 @@
import numpy as np
import cv2
import glob
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 50, 0.001)
# criteria=cv2.CALIB_CB_FAST_CHECK
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*7,3), np.float32)
objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
images = glob.glob('calibrate/*.png')
for fname in images:
print(fname)
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# gray = cv2.resize(gray, (640, 360))
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (3,3),None)
print(ret)
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
imgpoints.append(corners2)
# Draw and display the corners
img = cv2.drawChessboardCorners(gray, (7,6), corners2,ret)
cv2.imshow('img',img)
cv2.waitKey(5000)
cv2.destroyAllWindows()
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
print(ret, mtx, dist, rvecs, tvecs)

131
get_coordinates.py Normal file
View file

@ -0,0 +1,131 @@
import pickle
import numpy as np
metricsSize = [960,600]
screenDrawCorners = np.array([[0,0], [metricsSize[0]-1,0], [0, metricsSize[1]-1], [metricsSize[0]-1,metricsSize[1]-1]])
def create_perspective_transform_matrix(src, dst):
""" Creates a perspective transformation matrix which transforms points
in quadrilateral ``src`` to the corresponding points on quadrilateral
``dst``.
Will raise a ``np.linalg.LinAlgError`` on invalid input.
"""
# See:
# * http://xenia.media.mit.edu/~cwren/interpolator/
# * http://stackoverflow.com/a/14178717/71522
in_matrix = []
for (x, y), (X, Y) in zip(src, dst):
in_matrix.extend([
[x, y, 1, 0, 0, 0, -X * x, -X * y],
[0, 0, 0, x, y, 1, -Y * x, -Y * y],
])
A = np.matrix(in_matrix, dtype=np.float)
B = np.array(dst).reshape(8)
af = np.dot(np.linalg.inv(A.T * A) * A.T, B)
m = np.append(np.array(af).reshape(8), 1).reshape((3, 3))
return m
# got this amazing thing from here: https://stackoverflow.com/a/24088499
def create_perspective_transform(src, dst, round=False, splat_args=False):
""" Returns a function which will transform points in quadrilateral
``src`` to the corresponding points on quadrilateral ``dst``::
>>> transform = create_perspective_transform(
... [(0, 0), (10, 0), (10, 10), (0, 10)],
... [(50, 50), (100, 50), (100, 100), (50, 100)],
... )
>>> transform((5, 5))
(74.99999999999639, 74.999999999999957)
If ``round`` is ``True`` then points will be rounded to the nearest
integer and integer values will be returned.
>>> transform = create_perspective_transform(
... [(0, 0), (10, 0), (10, 10), (0, 10)],
... [(50, 50), (100, 50), (100, 100), (50, 100)],
... round=True,
... )
>>> transform((5, 5))
(75, 75)
If ``splat_args`` is ``True`` the function will accept two arguments
instead of a tuple.
>>> transform = create_perspective_transform(
... [(0, 0), (10, 0), (10, 10), (0, 10)],
... [(50, 50), (100, 50), (100, 100), (50, 100)],
... splat_args=True,
... )
>>> transform(5, 5)
(74.99999999999639, 74.999999999999957)
If the input values yield an invalid transformation matrix an identity
function will be returned and the ``error`` attribute will be set to a
description of the error::
>>> tranform = create_perspective_transform(
... np.zeros((4, 2)),
... np.zeros((4, 2)),
... )
>>> transform((5, 5))
(5.0, 5.0)
>>> transform.error
'invalid input quads (...): Singular matrix
"""
try:
transform_matrix = create_perspective_transform_matrix(src, dst)
error = None
except np.linalg.LinAlgError as e:
transform_matrix = np.identity(3, dtype=np.float)
error = "invalid input quads (%s and %s): %s" %(src, dst, e)
error = error.replace("\n", "")
to_eval = "def perspective_transform(%s):\n" %(
splat_args and "*pt" or "pt",
)
to_eval += " res = np.dot(transform_matrix, ((pt[0], ), (pt[1], ), (1, )))\n"
to_eval += " res = res / res[2]\n"
if round:
to_eval += " return (int(round(res[0][0])), int(round(res[1][0])))\n"
else:
to_eval += " return (res[0][0], res[1][0])\n"
locals = {
"transform_matrix": transform_matrix,
}
locals.update(globals())
exec to_eval in locals, locals
res = locals["perspective_transform"]
res.matrix = transform_matrix
res.error = error
return res
def coordinatesToSrc(coordinates):
return np.array([coordinates['tl'], coordinates['tr'],coordinates['bl'], coordinates['br']])
print("Coordinates in pickle file:")
with open('coordinates.p', 'r') as fp:
c = pickle.load(fp)
for name, coord in c.items():
print("\t", name, coord[0], coord[1])
transform = create_perspective_transform(coordinatesToSrc(c), screenDrawCorners, True)
print("Metrics")
print(metricsSize)
print("Test halfway point:")
x = ((c['tl'][0]+c['bl'][0])/2 + (c['tr'][0]+c['br'][0])/2) / 2
y = ((c['tl'][1]+c['tr'][1])/2 + (c['bl'][1]+c['br'][1])/2) / 2
print("\t",x,y)
print(transform((x,y)))
print("tl", transform((c['tl'])))
print("tr", transform((c['tr'])))
print("bl", transform((c['bl'])))
print("br", transform((c['br'])))

View file

@ -76,6 +76,11 @@ argParser.add_argument(
default=4, default=4,
help="Nr of total processes (min 3)" help="Nr of total processes (min 3)"
) )
argParser.add_argument(
'--only-metrics',
action="store_true",
help="Render only metrics instead of the heatmap. Convenient for debugging."
)
args = argParser.parse_args() args = argParser.parse_args()
@ -88,10 +93,7 @@ logger = logging.getLogger(__name__)
# im = cv2.imread("headPose.jpg"); # im = cv2.imread("headPose.jpg");
spotSize = (100,100)
spot = Image.open(os.path.join(cur_dir,"spot.png")).convert('L')
spot = spot.resize(spotSize)
spot = np.array(spot)
predictor_path = os.path.join(cur_dir,"shape_predictor_68_face_landmarks.dat") predictor_path = os.path.join(cur_dir,"shape_predictor_68_face_landmarks.dat")
@ -106,11 +108,28 @@ screenDrawCorners = np.array([[10,60], [90, 60], [10, 110], [90, 110]])
# metrics matrix # metrics matrix
metricsSize = [1920,1080] metricsSize = [1920,1080]
metricsSize = [1280,800] # metricsSize = [1280,800]
metricsSize = [960,600] # metricsSize = [960,600]
metricsSize = [1080,1080] # no point in having it different from to the render size
dataframe = pd.DataFrame(columns=['x','y']) dataframe = pd.DataFrame(columns=['x','y'])
renderSize = [1280,800] renderSize = [1280,800]
renderSize = [1080,1080]
# Used to create a black backdrop, instead of the ugly Qt-gray, if neccessary
screenSize = [1920,1080]
spotS = int(100./720*renderSize[1])
spotSize = (spotS, spotS)
spot = Image.open(os.path.join(cur_dir,"spot.png")).convert('L')
spot = spot.resize(spotSize)
spot = np.array(spot)
backdrop = None
if screenSize != renderSize:
shape = [screenSize[1],screenSize[0], 3]
backdrop = np.zeros(shape, dtype=np.uint8)
metrics = None metrics = None
if lastMetricsFilename and os.path.isfile(lastMetricsFilename): if lastMetricsFilename and os.path.isfile(lastMetricsFilename):
@ -231,7 +250,7 @@ def coordinatesToSrc(coordinates):
# coordinates of the screen boundaries # coordinates of the screen boundaries
if os.path.exists("coordinates.p"): if os.path.exists("coordinates.p"):
coordinates = pickle.load(open("coordinates.p", "rb")) coordinates = pickle.load(open("coordinates.p", "rb"))
transform = create_perspective_transform(coordinatesToSrc(coordinates), screenDrawCorners) transform = create_perspective_transform(coordinatesToSrc(coordinates), screenDrawCorners, True)
a = [np.array([ 1312.15541183]), np.array([ 244.56278002]), 0] a = [np.array([ 1312.15541183]), np.array([ 244.56278002]), 0]
logger.info("Loaded coordinates: %s", coordinatesToSrc(coordinates)) logger.info("Loaded coordinates: %s", coordinatesToSrc(coordinates))
@ -306,6 +325,7 @@ def captureFacesPoints(i):
# We use this later for calibrating # We use this later for calibrating
currentPoint = None currentPoint = None
currentVectors = None
currentPoints = [] currentPoints = []
if len(dets) > 0: if len(dets) > 0:
@ -420,13 +440,14 @@ def captureFacesPoints(i):
# x = translation_vector[0] + rotation_vector[0]* a # x = translation_vector[0] + rotation_vector[0]* a
# y = translation_vector[1] + rotation_vector[1] * a # y = translation_vector[1] + rotation_vector[1] * a
# logger.warn("First {} {},{}".format(a,x,y)) # logger.warn("First {} {},{}".format(a,x,y))
a = - translation_vector[2]# / viewDirectionVector[2] a = translation_vector[2] / viewDirectionVector[2]
x = translation_vector[0] + viewDirectionVector[0] * a x = translation_vector[0] + viewDirectionVector[0] * a
y = translation_vector[1] + viewDirectionVector[1] * a y = translation_vector[1] + viewDirectionVector[1] * a
# logger.warn("Second {} {},{}".format(a,x,y)) # logger.warn("Second {} {},{}".format(a,x,y))
point = np.array([x,y]) point = np.array([x,y])
currentPoint = point currentPoint = point
currentVectors = {'translation': translation_vector, 'rotation': viewDirectionVector}
currentPoints.append(point) currentPoints.append(point)
td3 = time.time() td3 = time.time()
@ -434,7 +455,7 @@ def captureFacesPoints(i):
# TODO only draw nose line now, so we can change color depending whether on screen or not # TODO only draw nose line now, so we can change color depending whether on screen or not
results = {'currentPoint': currentPoint, 'currentPoints': currentPoints} results = {'currentPoint': currentPoint, 'currentPoints': currentPoints, 'currentVectors': currentVectors}
results['im'] = im if not args.hide_preview else None results['im'] = im if not args.hide_preview else None
try: try:
@ -445,11 +466,14 @@ def captureFacesPoints(i):
def captureVideo(): def captureVideo():
c = cv2.VideoCapture(args.camera) c = cv2.VideoCapture(args.camera)
# if not ding this we only have jittery 10fps
c.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*"MJPG"))
# set camera resoltion # set camera resoltion
# c.set(3, 1280) c.set(3, 1280)
# c.set(4, 720) c.set(4, 720)
c.set(3, 960) # c.set(3, 960)
c.set(4, 540) # c.set(4, 540)
logger.debug("Camera FPS: {}".format(c.get(5))) logger.debug("Camera FPS: {}".format(c.get(5)))
while True: while True:
@ -462,225 +486,280 @@ def captureVideo():
logger.debug("Que sizes: image: {}, points: {} ".format(photoQueue.qsize(), pointsQueue.qsize())) logger.debug("Que sizes: image: {}, points: {} ".format(photoQueue.qsize(), pointsQueue.qsize()))
processes = [] if __name__ == '__main__':
for i in range(args.processes - 2): processes = []
p = multiprocessing.Process(target=captureFacesPoints, args=(i,)) for i in range(args.processes - 2):
p = multiprocessing.Process(target=captureFacesPoints, args=(i,))
p.daemon = True
p.start()
processes.append(p)
p = multiprocessing.Process(target=captureVideo, args=())
p.daemon = True p.daemon = True
p.start() p.start()
processes.append(p) processes.append(p)
p = multiprocessing.Process(target=captureVideo, args=()) newMetrics = np.zeros((metricsSize[1], metricsSize[0]))
p.daemon = True lastRunTime = 0
p.start()
processes.append(p)
newMetrics = np.zeros((metricsSize[1], metricsSize[0])) while True:
lastRunTime = 0 result = None
while True:
result = None
try:
te1 = time.time() te1 = time.time()
result = pointsQueue.get() try:
te1b = time.time() result = pointsQueue.get()
im = result['im'] te1b = time.time()
currentPoint = result['currentPoint'] im = result['im']
currentPoints = result['currentPoints'] currentPoint = result['currentPoint']
except queue.Empty as e: currentPoints = result['currentPoints']
logger.warn('Result queue empty') currentVectors = result['currentVectors']
except queue.Empty as e:
logger.warn('Result queue empty')
if result is not None: tr1 = time.time()
if not args.hide_preview: if result is not None:
# draw little floorplan for 10 -> 50, sideplan 60 -> 100 (40x40 px)
cv2.rectangle(im, (9, 9), (51, 51), (255,255,255), 1)
cv2.rectangle(im, (59, 9), (101, 51), (255,255,255), 1)
cv2.line(im, (10,10), (10,50), (200,200,200), 2)
cv2.line(im, (60,10), (60,50), (200,200,200), 2)
# screen is 16:10
cv2.rectangle(im, (9, 59), (91, 111), (255,255,255), 1)
if transform is None:
if not args.hide_preview: if not args.hide_preview:
cv2.putText(im, "1", (10,70), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255) if coordinates['tl'] is not None else (0,0,255)) # draw little floorplan for 10 -> 50, sideplan 60 -> 100 (40x40 px)
cv2.putText(im, "2", (85,70), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255) if coordinates['tr'] is not None else (0,0,255)) cv2.rectangle(im, (9, 9), (51, 51), (255,255,255), 1)
cv2.putText(im, "3", (10,110), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255) if coordinates['bl'] is not None else (0,0,255)) cv2.rectangle(im, (59, 9), (101, 51), (255,255,255), 1)
cv2.putText(im, "4", (85,110), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255) if coordinates['br'] is not None else (0,0,255)) cv2.line(im, (10,10), (10,50), (200,200,200), 2)
tm1 = 0 cv2.line(im, (60,10), (60,50), (200,200,200), 2)
tm2 = 0
tm3 = 0 # screen is 16:10
tm4 = 0 cv2.rectangle(im, (9, 59), (91, 111), (255,255,255), 1)
else:
for point in currentPoints: if transform is None:
# check if within coordinates:
# dot1 = np.dot(coordinates['tl'] - point, coordinates['tl'] - coordinates['br'])
# dot2 = np.dot(coordinates['bl'] - point, coordinates['tl'] - coordinates['br'])
# pointIn3 = [point[0], point[1], 0]
# targetPoint = np.dot(pointIn3, transformationMatrix)
# logger.info("Looking at", pointIn3, np.dot( transformationMatrix, pointIn3))
targetPoint = transform(point)
logger.info("Looking at {} {}".format(point, targetPoint) )
# cv2.circle(im, (int(targetPoint[0]), int(targetPoint[1])), 2, (0,255,0), -1)
# from 1920x1080 to 80x50
if not args.hide_preview: if not args.hide_preview:
miniTargetPoint = (int(targetPoint[0] / metricsSize[0] * 80 + 10), int(targetPoint[1] / metricsSize[1] * 50 + 60)) cv2.putText(im, "1", (10,70), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255) if coordinates['tl'] is not None else (0,0,255))
cv2.circle(im, miniTargetPoint, 2, (0,255,0), -1) cv2.putText(im, "2", (85,70), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255) if coordinates['tr'] is not None else (0,0,255))
targetInt = (int(targetPoint[0]), int(targetPoint[1])) cv2.putText(im, "3", (10,110), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255) if coordinates['bl'] is not None else (0,0,255))
# check if point fits on screen: cv2.putText(im, "4", (85,110), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255) if coordinates['br'] is not None else (0,0,255))
# if so, measure it tm1 = 0
if targetInt[0]+spotSize[0] >= 0 and targetInt[1]+spotSize[1] >= 0 and targetInt[0]-spotSize[0] < metricsSize[0] and targetInt[1]-spotSize[0] < metricsSize[1]: tm2 = 0
dataframe = dataframe.append({'x':targetInt[0],'y':targetInt[1]}, ignore_index=True) tm3 = 0
logger.info("Put metric {},{} in metrix of {},{}".format(targetInt[1],targetInt[0], metricsSize[1], metricsSize[0])) tm4 = 0
#TODO: make it one numpy array action: else:
for sx in range(spotSize[0]): for point in currentPoints:
for sy in range(spotSize[1]): # check if within coordinates:
mx = targetInt[0] + sx - (spotSize[0]-1)/2 # dot1 = np.dot(coordinates['tl'] - point, coordinates['tl'] - coordinates['br'])
my = targetInt[1] + sy - (spotSize[1]-1)/2 # dot2 = np.dot(coordinates['bl'] - point, coordinates['tl'] - coordinates['br'])
# pointIn3 = [point[0], point[1], 0]
# targetPoint = np.dot(pointIn3, transformationMatrix)
# logger.info("Looking at", pointIn3, np.dot( transformationMatrix, pointIn3))
targetPoint = transform(point)
logger.info("Looking at {} {}".format(point, targetPoint) )
# cv2.circle(im, (int(targetPoint[0]), int(targetPoint[1])), 2, (0,255,0), -1)
# from 1920x1080 to 80x50
if not args.hide_preview:
miniTargetPoint = (int(targetPoint[0] / metricsSize[0] * 80 + 10), int(targetPoint[1] / metricsSize[1] * 50 + 60))
cv2.circle(im, miniTargetPoint, 2, (0,255,0), -1)
targetInt = (int(targetPoint[0]), int(targetPoint[1]))
# check if point fits on screen:
# if so, measure it
if targetInt[0]+spotSize[0] >= 0 and targetInt[1]+spotSize[1] >= 0 and targetInt[0]-spotSize[0] < metricsSize[0] and targetInt[1]-spotSize[1] < metricsSize[1]:
if not args.hide_graph:
dataframe = dataframe.append({'x':targetInt[0],'y':targetInt[1]}, ignore_index=True)
if mx >= 0 and my >= 0 and mx < metricsSize[0] and my < metricsSize[1]: logger.info("Put metric {},{} in metrics of {},{}".format(targetInt[1],targetInt[0], metricsSize[1], metricsSize[0]))
newMetrics[my,mx] += spot[sx,sy] #/ 20 # newMetrics[targetInt[1]-1,targetInt[0]-1] += 1
# print("MAX",np.max(newMetrics))
#TODO: make it one numpy array action:
for sx in range(spotSize[0]):
for sy in range(spotSize[1]):
mx = targetInt[0] + sx - (spotSize[0]-1)/2
my = targetInt[1] + sy - (spotSize[1]-1)/2
if mx >= 0 and my >= 0 and mx < metricsSize[0] and my < metricsSize[1]:
newMetrics[my,mx] += spot[sx,sy] #/ 20
# after we collected all new metrics, blur them foor smoothness # after we collected all new metrics, blur them foor smoothness
# and add to all metrics collected # and add to all metrics collected
tm3 = time.time() tm3 = time.time()
# metrics = metrics + gaussian_filter(newMetrics, sigma = 13) # metrics = metrics + gaussian_filter(newMetrics, sigma = 13)
tm4 = time.time() tm4 = time.time()
# logger.debug("Updated matrix with blur in %f", tm4 - tm3 + tm2 - tm1) # logger.debug("Updated matrix with blur in %f", tm4 - tm3 + tm2 - tm1)
# Display webcam image with overlays # Display webcam image with overlays
te2 = time.time() te2 = time.time()
if result is not None and not args.hide_preview: if result is not None and not args.hide_preview:
cv2.imshow("Output", im) cv2.imshow("Output", im)
te3 = time.time() te3 = time.time()
logger.debug("showed webcam image in %fs", te3-te2) logger.debug("Pre processing took: {}s".format(te2-tr1))
logger.debug("Rendering took %fs", te3-te1) logger.debug("showed webcam image in %fs", te3-te2)
logger.debug("Waited took %fs", te1b-te1) logger.debug("Rendering took %fs", te3-te1)
logger.debug("Waited took %fs", te1b-te1)
# blur smooth the heatmap
# logger.debug("Max blurred metrics: %f", np.max(metrics))
# blur smooth the heatmap # update the heatmap output
# logger.debug("Max blurred metrics: %f", np.max(metrics)) tm21 = time.time()
t = tm21
# update the heatmap output diffT = min(1, t - lastRunTime)
tm21 = time.time() lastRunTime = t
t = tm21 # animDuration = 1
# factor = animDuration
metrics = metrics + newMetrics*diffT
newMetrics *= (1-diffT)
print('MAXES', np.max(metrics), np.max(newMetrics), diffT, t - lastRunTime)
# smooth impact of first hits by having at least 0.05
normalisedMetrics = metrics / (max(255*7 ,np.max(metrics)))
# convert to colormap, thanks to: https://stackoverflow.com/a/10967471
if args.only_metrics:
# output only metrics instead of heatmap. Usefull for debugging O:)
nmax = np.max(newMetrics)
renderMetrics = newMetrics/nmax if nmax > 0 else newMetrics
normalisedMetricsColored = np.uint8(renderMetrics *255 )
normalisedMetricsColoredBGR = cv2.cvtColor(normalisedMetricsColored, cv2.COLOR_GRAY2BGR)
# draw grid lines
for i in range(int(metricsSize[0]/100)):
cv2.line(normalisedMetricsColoredBGR, (i*100, 0), (i*100, metricsSize[1]), (150,150,150), 1)
else:
normalisedMetricsColored = np.uint8(cm.nipy_spectral(normalisedMetrics)*255)
normalisedMetricsColoredBGR = cv2.cvtColor(normalisedMetricsColored, cv2.COLOR_RGB2BGR)
diffT = min(1, t - lastRunTime) if currentPoint is not None and args.verbose:
lastRunTime = t cv2.putText(normalisedMetricsColoredBGR, "x: {}".format(currentPoint[0]), (10,70), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255))
# animDuration = 1 cv2.putText(normalisedMetricsColoredBGR, "y: {}".format(currentPoint[1]), (10,90), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255))
# factor = animDuration
metrics = metrics + newMetrics*diffT cv2.putText(normalisedMetricsColoredBGR, "pos: x: {}, y: {}, z: {}".format(
newMetrics *= (1-diffT) currentVectors['translation'][0],
print('MAXES', np.max(metrics), np.max(newMetrics), diffT, t - lastRunTime) currentVectors['translation'][1],
currentVectors['translation'][2]
# smooth impact of first hits by having at least 0.05 ), (10,110), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255))
normalisedMetrics = metrics / (max(255*7 ,np.max(metrics))) cv2.putText(
# convert to colormap, thanks to: https://stackoverflow.com/a/10967471 normalisedMetricsColoredBGR,
normalisedMetricsColored = np.uint8(cm.nipy_spectral(normalisedMetrics)*255) "rot: x: {}, y: {}, z{}".format(
normalisedMetricsColoredBGR = cv2.cvtColor(normalisedMetricsColored, cv2.COLOR_RGB2BGR) currentVectors['rotation'][0],
currentVectors['rotation'][1],
tm22 = time.time() currentVectors['rotation'][2],
logger.debug("Max normalised metrics: %f", np.max(normalisedMetrics)) ), (10,130), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255))
# logger.info(normalisedMetrics) targetPoint = transform(currentPoint)
tm23 = time.time() logger.info("Are we really looking at {}".format(targetPoint))
logger.info("Size: {}".format(normalisedMetricsColoredBGR.shape))
cv2.imshow("test",normalisedMetricsColoredBGR) cv2.circle(normalisedMetricsColoredBGR, targetPoint, 2, (0,255,0), -1)
# image = Image.fromarray(normalisedMetricsColored) cv2.line(
# wpercent = (imageWindowSize[0] / float(image.size[0])) normalisedMetricsColoredBGR,
# hsize = int((float(image.size[1]) * float(wpercent))) (metricsSize[0]/2,metricsSize[1]/2),
# renderImage = image.resize((renderSize[0], renderSize[1])) tuple(targetPoint),
# print(renderImage.size, "lala") (255,0,0), 2
# if args.queue_length:
# imageQueue.append(image)
# if len(imageQueue) > args.queue_length:
# logger.warn("Use image from queue :-)")
# image = imageQueue.pop(0)
# tkpi = ImageTk.PhotoImage(renderImage)
# imageCanvas.delete("IMG")
# imagesprite = imageCanvas.create_image(renderSize[0]/2, renderSize[1]/2,image=tkpi, tags="IMG")
# imageWindowRoot.update()
tm24 = time.time()
logger.debug("PIL image generated in %fs", tm24 - tm23)
# logger.debug("Total matrix time is %fs", tm4 - tm3 + tm2 - tm1 + tm24 - tm21)
if not args.hide_graph:
te4 = time.time()
axes.clear()
if(len(dataframe) > 2):
g = sns.kdeplot(dataframe['x'], dataframe['y'],ax=axes, n_levels=30, shade=True, cmap=cm.rainbow)
canvas.draw()
windowRoot.update()
te5 = time.time()
logger.debug("Drew graph & updated window in %fs", te5-te4)
if args.output_dir:
# save output to dir
now = tm24 # time.time()
if now - lastSaveTime > args.save_interval:
filename = os.path.join(
args.output_dir,
"frame{}.png".format(
datetime.datetime.now().replace(microsecond=0).isoformat()
)
) )
cv2.imwrite(filename, normalisedMetricsColoredBGR)
# image.save(filename)
with open(lastMetricsFilename, 'wb') as fp: # cv2.putText(normalisedMetricsColoredBGR, "z: {}".format(currentPoint[2]), (10,70), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255))
pickle.dump( metrics, fp )
logger.debug("Saved frame to {}".format(filename)) tm22 = time.time()
lastSaveTime = now logger.debug("Max normalised metrics: %f", np.max(normalisedMetrics))
# logger.info(normalisedMetrics)
tm23 = time.time()
# (optionally) very slowly fade out previous metrics: # normalisedMetricsColoredBGR = cv2.resize(normalisedMetricsColoredBGR, tuple(renderSize))
metrics = metrics * .9997 if backdrop is not None:
dx = (screenSize[0] - renderSize[0]) / 2
dy = (screenSize[1] - renderSize[1]) / 2
print(dx, dy)
backdrop[dy:dy+renderSize[1], dx:dx+renderSize[0]] = normalisedMetricsColoredBGR
renderImage = backdrop
else:
renderImage = normalisedMetricsColoredBGR
cv2.imshow("test", renderImage)
# image = Image.fromarray(normalisedMetricsColored)
# wpercent = (imageWindowSize[0] / float(image.size[0]))
# hsize = int((float(image.size[1]) * float(wpercent)))
# renderImage = image.resize((renderSize[0], renderSize[1]))
# print(renderImage.size, "lala")
keyPress = cv2.waitKey(5) # if args.queue_length:
# imageQueue.append(image)
# if len(imageQueue) > args.queue_length:
# logger.warn("Use image from queue :-)")
# image = imageQueue.pop(0)
if keyPress==27: # tkpi = ImageTk.PhotoImage(renderImage)
break # imageCanvas.delete("IMG")
elif keyPress == ord('d'): # imagesprite = imageCanvas.create_image(renderSize[0]/2, renderSize[1]/2,image=tkpi, tags="IMG")
logger.setLevel(logging.DEBUG) # imageWindowRoot.update()
elif keyPress > -1 and currentPoint is not None: tm24 = time.time()
recalculate = False logger.debug("Render in generated in %fs", tm24 - tm23)
if keyPress == ord('1'): # logger.debug("Total matrix time is %fs", tm4 - tm3 + tm2 - tm1 + tm24 - tm21)
coordinates['tl'] = currentPoint
recalculate = True
logger.warn('Calibrate 1')
elif keyPress == ord('2'):
coordinates['tr'] = currentPoint
recalculate = True
logger.warn('Calibrate 2')
elif keyPress == ord('3'):
coordinates['bl'] = currentPoint
recalculate = True
logger.warn('Calibrate 3')
elif keyPress == ord('4'):
coordinates['br'] = currentPoint
recalculate = True
logger.warn('Calibrate 4')
elif keyPress == ord('t') and transform is not None:
logger.info("Coordinates {}".format(coordinates) )
logger.info("Drawing area {}".format(screenDrawCorners))
logger.info("Test point {}".format(currentPoint ))
logger.info("Transformed point {}".format(transform(currentPoint)))
if recalculate is True and not any (x is None for x in coordinates.values()): if not args.hide_graph:
logger.debug(coordinates.values()) te4 = time.time()
pickle.dump( coordinates, open( "coordinates.p", "wb" ) ) axes.clear()
logger.info("Saved coordinates") if(len(dataframe) > 2):
g = sns.kdeplot(dataframe['x'], dataframe['y'],ax=axes, n_levels=30, shade=True, cmap=cm.rainbow)
canvas.draw()
windowRoot.update()
te5 = time.time()
logger.debug("Drew graph & updated window in %fs", te5-te4)
transform = create_perspective_transform(coordinatesToSrc(coordinates), screenDrawCorners) if args.output_dir:
# save output to dir
now = tm24 # time.time()
if now - lastSaveTime > args.save_interval:
filename = os.path.join(
args.output_dir,
"frame{}.png".format(
datetime.datetime.now().replace(microsecond=0).isoformat()
)
)
cv2.imwrite(filename, normalisedMetricsColoredBGR)
# image.save(filename)
duration = time.time()-te1 with open(lastMetricsFilename, 'wb') as fp:
fps = 1/duration pickle.dump( metrics, fp )
logger.info("Rendering loop %fs %ffps", duration, fps)
cv2.destroyAllWindows() logger.debug("Saved frame to {}".format(filename))
lastSaveTime = now
# (optionally) very slowly fade out previous metrics:
metrics = metrics * .9997
keyPress = cv2.waitKey(5)
if keyPress==27:
break
elif keyPress == ord('d'):
logger.setLevel(logging.DEBUG)
elif keyPress > -1 and currentPoint is not None:
recalculate = False
if keyPress == ord('1'):
coordinates['tl'] = currentPoint
recalculate = True
logger.warn('Calibrate 1')
elif keyPress == ord('2'):
coordinates['tr'] = currentPoint
recalculate = True
logger.warn('Calibrate 2')
elif keyPress == ord('3'):
coordinates['bl'] = currentPoint
recalculate = True
logger.warn('Calibrate 3')
elif keyPress == ord('4'):
coordinates['br'] = currentPoint
recalculate = True
logger.warn('Calibrate 4')
elif keyPress == ord('t') and transform is not None:
logger.info("Coordinates {}".format(coordinates) )
logger.info("Drawing area {}".format(screenDrawCorners))
logger.info("Test point {}".format(currentPoint ))
logger.info("Transformed point {}".format(transform(currentPoint)))
if recalculate is True and not any (x is None for x in coordinates.values()):
logger.debug(coordinates.values())
pickle.dump( coordinates, open( "coordinates.p", "wb" ) )
logger.info("Saved coordinates")
transform = create_perspective_transform(coordinatesToSrc(coordinates), screenDrawCorners, True)
duration = time.time()-te1
fps = 1/duration
logger.info("Rendering loop %fs %ffps", duration, fps)
cv2.destroyAllWindows()

3
output/.gitignore vendored
View file

@ -1,3 +0,0 @@
*
!.gitignore