First working multiprocessing
This commit is contained in:
parent
02627cf897
commit
fd76f7a97d
1 changed files with 212 additions and 159 deletions
371
head_pose.py
371
head_pose.py
|
@ -21,9 +21,11 @@ else:
|
||||||
import tkinter as Tk
|
import tkinter as Tk
|
||||||
import time
|
import time
|
||||||
import datetime
|
import datetime
|
||||||
|
import Queue
|
||||||
|
|
||||||
import coloredlogs
|
import coloredlogs
|
||||||
import argparse
|
import argparse
|
||||||
|
import multiprocessing
|
||||||
|
|
||||||
argParser = argparse.ArgumentParser(description='Draw a heatmap')
|
argParser = argparse.ArgumentParser(description='Draw a heatmap')
|
||||||
argParser.add_argument(
|
argParser.add_argument(
|
||||||
|
@ -65,6 +67,12 @@ argParser.add_argument(
|
||||||
default=0,
|
default=0,
|
||||||
help="Nr of frames to keep in queue (adds a delay)"
|
help="Nr of frames to keep in queue (adds a delay)"
|
||||||
)
|
)
|
||||||
|
argParser.add_argument(
|
||||||
|
'--processes',
|
||||||
|
type=int,
|
||||||
|
default=4,
|
||||||
|
help="Nr of total processes (min 3)"
|
||||||
|
)
|
||||||
|
|
||||||
args = argParser.parse_args()
|
args = argParser.parse_args()
|
||||||
|
|
||||||
|
@ -75,14 +83,6 @@ coloredlogs.install(
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
# Read Image
|
|
||||||
#c = cv2.VideoCapture(args.camera)
|
|
||||||
c = cv2.VideoCapture(args.camera)
|
|
||||||
# set camera resoltion
|
|
||||||
c.set(3, 1280)
|
|
||||||
c.set(4, 720)
|
|
||||||
#c.set(3, 480)
|
|
||||||
#c.set(4, 320)
|
|
||||||
# im = cv2.imread("headPose.jpg");
|
# im = cv2.imread("headPose.jpg");
|
||||||
|
|
||||||
|
|
||||||
|
@ -93,8 +93,6 @@ if args.output_dir:
|
||||||
else:
|
else:
|
||||||
lastMetricsFilename = None
|
lastMetricsFilename = None
|
||||||
|
|
||||||
detector = dlib.get_frontal_face_detector()
|
|
||||||
predictor = dlib.shape_predictor(predictor_path)
|
|
||||||
|
|
||||||
screenDrawCorners = np.array([[10,60], [90, 60], [10, 110], [90, 110]])
|
screenDrawCorners = np.array([[10,60], [90, 60], [10, 110], [90, 110]])
|
||||||
|
|
||||||
|
@ -271,154 +269,206 @@ if args.output_dir:
|
||||||
if args.queue_length:
|
if args.queue_length:
|
||||||
imageQueue = []
|
imageQueue = []
|
||||||
|
|
||||||
|
lock = multiprocessing.Lock()
|
||||||
|
photoQueue = multiprocessing.Queue(maxsize=args.processes)
|
||||||
|
pointsQueue = multiprocessing.Queue(maxsize=args.processes)
|
||||||
|
|
||||||
|
def captureFacesPoints(i):
|
||||||
|
logger.info("Start capturer {}".format( i))
|
||||||
|
# dedicated detector & predictor instances:
|
||||||
|
detector = dlib.get_frontal_face_detector()
|
||||||
|
predictor = dlib.shape_predictor(predictor_path)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
t1 = time.time()
|
||||||
|
im = photoQueue.get(block=True, timeout=10)
|
||||||
|
if im is None:
|
||||||
|
continue
|
||||||
|
logger.debug("Got foto in {}".format( i))
|
||||||
|
size = im.shape
|
||||||
|
t2 = time.time()
|
||||||
|
logger.debug("Captured frame in %fs", t2-t1)
|
||||||
|
|
||||||
|
# Docs: Ask the detector to find the bounding boxes of each face. The 1 in the
|
||||||
|
# second argument indicates that we should upsample the image 1 time. This
|
||||||
|
# will make everything bigger and allow us to detect more faces.
|
||||||
|
dets = detector(im, 1)
|
||||||
|
t3 = time.time()
|
||||||
|
logger.debug("Number of faces detected: {} - took {}s".format(len(dets), t3-t2))
|
||||||
|
|
||||||
|
# We use this later for calibrating
|
||||||
|
currentPoint = None
|
||||||
|
currentPoints = []
|
||||||
|
|
||||||
|
if len(dets) > 0:
|
||||||
|
|
||||||
|
for d in dets:
|
||||||
|
td1 = time.time()
|
||||||
|
shape = predictor(im, d)
|
||||||
|
td2 = time.time()
|
||||||
|
logger.debug("Found face points in %fs", td2-td1)
|
||||||
|
|
||||||
|
#2D image points. If you change the image, you need to change vector
|
||||||
|
image_points = np.array([
|
||||||
|
(shape.part(30).x,shape.part(30).y), # Nose tip
|
||||||
|
(shape.part(8).x,shape.part(8).y), # Chin
|
||||||
|
(shape.part(36).x,shape.part(36).y), # Left eye left corner
|
||||||
|
(shape.part(45).x,shape.part(45).y), # Right eye right corne
|
||||||
|
(shape.part(48).x,shape.part(48).y), # Left Mouth corner
|
||||||
|
(shape.part(54).x,shape.part(54).y) # Right mouth corner
|
||||||
|
], dtype="double")
|
||||||
|
|
||||||
|
# 3D model points.
|
||||||
|
model_points = np.array([
|
||||||
|
(0.0, 0.0, 0.0), # Nose tip
|
||||||
|
(0.0, -330.0, -65.0), # Chin
|
||||||
|
(-225.0, 170.0, -135.0), # Left eye left corner
|
||||||
|
(225.0, 170.0, -135.0), # Right eye right corne
|
||||||
|
(-150.0, -150.0, -125.0), # Left Mouth corner
|
||||||
|
(150.0, -150.0, -125.0) # Right mouth corner
|
||||||
|
|
||||||
|
])
|
||||||
|
|
||||||
|
# Camera internals
|
||||||
|
focal_length = size[1]
|
||||||
|
center = (size[1]/2, size[0]/2)
|
||||||
|
camera_matrix = np.array(
|
||||||
|
[[focal_length, 0, center[0]],
|
||||||
|
[0, focal_length, center[1]],
|
||||||
|
[0, 0, 1]], dtype = "double"
|
||||||
|
)
|
||||||
|
|
||||||
|
# logger.info ("Camera Matrix :\n {0}".format(camera_matrix))
|
||||||
|
|
||||||
|
dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion
|
||||||
|
(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)
|
||||||
|
|
||||||
|
if not success:
|
||||||
|
logger.info("Error determening PnP {}".format(success) )
|
||||||
|
continue
|
||||||
|
|
||||||
|
logger.debug ("Rotation Vector:\n %s", rotation_vector)
|
||||||
|
logger.debug ("Translation Vector:\n {0}".format(translation_vector))
|
||||||
|
|
||||||
|
# Project a 3D point (0, 0, 1000.0) onto the image plane.
|
||||||
|
# We use this to draw a line sticking out of the nose
|
||||||
|
(nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs)
|
||||||
|
|
||||||
|
for p in image_points:
|
||||||
|
cv2.circle(im, (int(p[0]), int(p[1])), 3, (0,0,255), -1)
|
||||||
|
|
||||||
|
p1 = ( int(image_points[0][0]), int(image_points[0][1]))
|
||||||
|
p2 = ( int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
|
||||||
|
cv2.line(im, p1, p2, (255,0,0), 2)
|
||||||
|
|
||||||
|
rotMatrix = np.zeros([3,3])
|
||||||
|
cv2.Rodrigues(rotation_vector, rotMatrix, jacobian=0)
|
||||||
|
|
||||||
|
# Find rotation: https://stackoverflow.com/a/15029416
|
||||||
|
# not used anymore :-)
|
||||||
|
# rx = np.arctan2(rotMatrix[2,1], rotMatrix[2,2])
|
||||||
|
# ry = np.arctan2(-rotMatrix[2,0], np.sqrt(np.square(rotMatrix[2,1]) + np.square(rotMatrix[2,2])))
|
||||||
|
# rz = np.arctan2(rotMatrix[1,0],rotMatrix[0,0])
|
||||||
|
# logger.info("rotation {} {} {}".format(rx, ry, rz) )
|
||||||
|
# ry = - np.arcsin(rotMatrix[0,2])
|
||||||
|
# rx = np.arctan2(rotMatrix[1,2]/np.cos(ry), rotMatrix[2,2]/np.cos(ry))
|
||||||
|
# rz = np.arctan2(rotMatrix[0,1]/np.cos(ry), rotMatrix[0,0]/np.cos(ry))
|
||||||
|
# logger.info("rotation ml {} {} {}".format(rx, ry, rz) )# seems better?
|
||||||
|
viewDirectionVector = np.dot(np.array([0.0, 0.0, 1.0]), rotMatrix)
|
||||||
|
|
||||||
|
if not args.hide_preview:
|
||||||
|
# draw little floorplan for x: 10 -> 50 maps to z: 0 -> 10000, x: -2000 -> 2000
|
||||||
|
mapPosX = int((translation_vector[0] + 500) / 1000 * 40)
|
||||||
|
mapPosY = int((translation_vector[1] + 500) / 1000 * 40)
|
||||||
|
mapPosZ = int((translation_vector[2] + 0 ) / 10000 * 40)
|
||||||
|
cv2.circle(im, (mapPosZ + 10, mapPosX + 10), 2, (0,0,255), -1)
|
||||||
|
cv2.circle(im, (mapPosZ + 60, mapPosY + 10), 2, (0,0,255), -1)
|
||||||
|
# make it an _amazing_ stick figurine for the side view
|
||||||
|
cv2.line(im, (mapPosZ + 60, mapPosY + 10), (mapPosZ + 60, mapPosY + 20), (0,0,255), 1)
|
||||||
|
cv2.line(im, (mapPosZ + 60, mapPosY + 20), (mapPosZ + 55, mapPosY + 25), (0,0,255), 1)
|
||||||
|
cv2.line(im, (mapPosZ + 60, mapPosY + 20), (mapPosZ + 65, mapPosY + 25), (0,0,255), 1)
|
||||||
|
cv2.line(im, (mapPosZ + 60, mapPosY + 15), (mapPosZ + 55, mapPosY + 10), (0,0,255), 1)
|
||||||
|
cv2.line(im, (mapPosZ + 60, mapPosY + 15), (mapPosZ + 65, mapPosY + 10), (0,0,255), 1)
|
||||||
|
# draw rotation vector
|
||||||
|
cv2.circle(im, (mapPosZ + 60, mapPosY + 10), 2, (0,0,255), -1)
|
||||||
|
|
||||||
|
cv2.line(im, (mapPosZ + 10, mapPosX + 10), (mapPosZ + 10 + int(viewDirectionVector[2] * 100), mapPosX + 10 + int(viewDirectionVector[0] * 100)), (255,255,0), 1)
|
||||||
|
cv2.line(im, (mapPosZ + 60, mapPosY + 10), (mapPosZ + 60 + int(viewDirectionVector[2] * 100), mapPosY + 10 - int(viewDirectionVector[1] * 100)), (255,0,255), 1)
|
||||||
|
|
||||||
|
# Translation vector gives position in space:
|
||||||
|
# x, y z: 0,0,0 is center of camera
|
||||||
|
# line: (x,y,z) = f(a) = (t1 + r1*a, t2+r2*a, t3 + r3*a)
|
||||||
|
# Screen: (x,y,z) = (x,y,0)
|
||||||
|
# Interesection:
|
||||||
|
# x = t1 + r1 * a
|
||||||
|
# y = t2 + r2 * a
|
||||||
|
# z = t3 * r3 * a = 0
|
||||||
|
# => a = -t3 / r3
|
||||||
|
# substitute found a in x,y
|
||||||
|
|
||||||
|
# seems to be wrong?
|
||||||
|
# a = - translation_vector[2]# / rotation_vector[2]
|
||||||
|
# x = translation_vector[0] + rotation_vector[0] * a
|
||||||
|
# y = translation_vector[1] + rotation_vector[1] * a
|
||||||
|
# logger.warn("First {} {},{}".format(a,x,y))
|
||||||
|
a = - translation_vector[2]# / viewDirectionVector[2]
|
||||||
|
x = translation_vector[0] + viewDirectionVector[0] * a
|
||||||
|
y = translation_vector[1] + viewDirectionVector[1] * a
|
||||||
|
# logger.warn("Second {} {},{}".format(a,x,y))
|
||||||
|
point = np.array([x,y])
|
||||||
|
|
||||||
|
currentPoint = point
|
||||||
|
currentPoints.append(point)
|
||||||
|
|
||||||
|
td3 = time.time()
|
||||||
|
logger.debug("Timer: All other face drawing stuff in %fs", td3-td2)
|
||||||
|
|
||||||
|
# TODO only draw nose line now, so we can change color depending whether on screen or not
|
||||||
|
|
||||||
|
results = {'currentPoint': currentPoint, 'currentPoints': currentPoints, 'im': im}
|
||||||
|
|
||||||
|
try:
|
||||||
|
pointsQueue.put_nowait(results)
|
||||||
|
except Queue.Full as e:
|
||||||
|
logger.critical("Reslt queue full?")
|
||||||
|
# not applicable to multiprocessing.queue in p2.7: photoQueue.task_done()
|
||||||
|
|
||||||
|
def captureVideo():
|
||||||
|
c = cv2.VideoCapture(args.camera)
|
||||||
|
# set camera resoltion
|
||||||
|
c.set(3, 1280)
|
||||||
|
c.set(4, 720)
|
||||||
|
logger.debug("Camera FPS: {}".format(c.get(5)))
|
||||||
|
|
||||||
|
while True:
|
||||||
|
_, im = c.read()
|
||||||
|
try:
|
||||||
|
photoQueue.put_nowait(im)
|
||||||
|
except Queue.Full as e:
|
||||||
|
logger.debug("Photo queue full")
|
||||||
|
time.sleep(.05)
|
||||||
|
logger.debug("Que sizes: image: {}, points: {} ".format(photoQueue.qsize(), pointsQueue.qsize()))
|
||||||
|
|
||||||
|
|
||||||
|
processes = []
|
||||||
|
for i in range(args.processes - 2):
|
||||||
|
p = multiprocessing.Process(target=captureFacesPoints, args=(i,))
|
||||||
|
p.daemon = True
|
||||||
|
p.start()
|
||||||
|
processes.append(p)
|
||||||
|
|
||||||
|
p = multiprocessing.Process(target=captureVideo, args=())
|
||||||
|
p.daemon = True
|
||||||
|
p.start()
|
||||||
|
processes.append(p)
|
||||||
|
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
if args.hide_preview:
|
|
||||||
# if preview is hidden, we can always re-raise the image window
|
|
||||||
imageWindowRoot.lift()
|
|
||||||
|
|
||||||
t1 = time.time()
|
|
||||||
_, im = c.read()
|
|
||||||
size = im.shape
|
|
||||||
t2 = time.time()
|
|
||||||
logger.debug("Captured frame in %fs", t2-t1)
|
|
||||||
# Docs: Ask the detector to find the bounding boxes of each face. The 1 in the
|
|
||||||
# second argument indicates that we should upsample the image 1 time. This
|
|
||||||
# will make everything bigger and allow us to detect more faces.
|
|
||||||
dets = detector(im, 1)
|
|
||||||
t3 = time.time()
|
|
||||||
logger.debug("Number of faces detected: {} - took {}s".format(len(dets), t3-t2))
|
|
||||||
|
|
||||||
# We use this later for calibrating
|
|
||||||
currentPoint = None
|
|
||||||
currentPoints = []
|
|
||||||
|
|
||||||
if len(dets) > 0:
|
|
||||||
|
|
||||||
for d in dets:
|
|
||||||
td1 = time.time()
|
|
||||||
shape = predictor(im, d)
|
|
||||||
td2 = time.time()
|
|
||||||
logger.debug("Found face points in %fs", td2-td1)
|
|
||||||
|
|
||||||
#2D image points. If you change the image, you need to change vector
|
|
||||||
image_points = np.array([
|
|
||||||
(shape.part(30).x,shape.part(30).y), # Nose tip
|
|
||||||
(shape.part(8).x,shape.part(8).y), # Chin
|
|
||||||
(shape.part(36).x,shape.part(36).y), # Left eye left corner
|
|
||||||
(shape.part(45).x,shape.part(45).y), # Right eye right corne
|
|
||||||
(shape.part(48).x,shape.part(48).y), # Left Mouth corner
|
|
||||||
(shape.part(54).x,shape.part(54).y) # Right mouth corner
|
|
||||||
], dtype="double")
|
|
||||||
|
|
||||||
# 3D model points.
|
|
||||||
model_points = np.array([
|
|
||||||
(0.0, 0.0, 0.0), # Nose tip
|
|
||||||
(0.0, -330.0, -65.0), # Chin
|
|
||||||
(-225.0, 170.0, -135.0), # Left eye left corner
|
|
||||||
(225.0, 170.0, -135.0), # Right eye right corne
|
|
||||||
(-150.0, -150.0, -125.0), # Left Mouth corner
|
|
||||||
(150.0, -150.0, -125.0) # Right mouth corner
|
|
||||||
|
|
||||||
])
|
|
||||||
|
|
||||||
# Camera internals
|
|
||||||
focal_length = size[1]
|
|
||||||
center = (size[1]/2, size[0]/2)
|
|
||||||
camera_matrix = np.array(
|
|
||||||
[[focal_length, 0, center[0]],
|
|
||||||
[0, focal_length, center[1]],
|
|
||||||
[0, 0, 1]], dtype = "double"
|
|
||||||
)
|
|
||||||
|
|
||||||
# logger.info ("Camera Matrix :\n {0}".format(camera_matrix))
|
|
||||||
|
|
||||||
dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion
|
|
||||||
(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)
|
|
||||||
|
|
||||||
if not success:
|
|
||||||
logger.info("Error determening PnP {}".format(success) )
|
|
||||||
continue
|
|
||||||
|
|
||||||
logger.debug ("Rotation Vector:\n %s", rotation_vector)
|
|
||||||
logger.info ("Translation Vector:\n {0}".format(translation_vector))
|
|
||||||
|
|
||||||
# Project a 3D point (0, 0, 1000.0) onto the image plane.
|
|
||||||
# We use this to draw a line sticking out of the nose
|
|
||||||
(nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs)
|
|
||||||
|
|
||||||
for p in image_points:
|
|
||||||
cv2.circle(im, (int(p[0]), int(p[1])), 3, (0,0,255), -1)
|
|
||||||
|
|
||||||
p1 = ( int(image_points[0][0]), int(image_points[0][1]))
|
|
||||||
p2 = ( int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
|
|
||||||
cv2.line(im, p1, p2, (255,0,0), 2)
|
|
||||||
|
|
||||||
rotMatrix = np.zeros([3,3])
|
|
||||||
cv2.Rodrigues(rotation_vector, rotMatrix, jacobian=0)
|
|
||||||
|
|
||||||
# Find rotation: https://stackoverflow.com/a/15029416
|
|
||||||
# not used anymore :-)
|
|
||||||
# rx = np.arctan2(rotMatrix[2,1], rotMatrix[2,2])
|
|
||||||
# ry = np.arctan2(-rotMatrix[2,0], np.sqrt(np.square(rotMatrix[2,1]) + np.square(rotMatrix[2,2])))
|
|
||||||
# rz = np.arctan2(rotMatrix[1,0],rotMatrix[0,0])
|
|
||||||
# logger.info("rotation {} {} {}".format(rx, ry, rz) )
|
|
||||||
# ry = - np.arcsin(rotMatrix[0,2])
|
|
||||||
# rx = np.arctan2(rotMatrix[1,2]/np.cos(ry), rotMatrix[2,2]/np.cos(ry))
|
|
||||||
# rz = np.arctan2(rotMatrix[0,1]/np.cos(ry), rotMatrix[0,0]/np.cos(ry))
|
|
||||||
# logger.info("rotation ml {} {} {}".format(rx, ry, rz) )# seems better?
|
|
||||||
viewDirectionVector = np.dot(np.array([0.0, 0.0, 1.0]), rotMatrix)
|
|
||||||
|
|
||||||
if not args.hide_preview:
|
|
||||||
# draw little floorplan for x: 10 -> 50 maps to z: 0 -> 10000, x: -2000 -> 2000
|
|
||||||
mapPosX = int((translation_vector[0] + 500) / 1000 * 40)
|
|
||||||
mapPosY = int((translation_vector[1] + 500) / 1000 * 40)
|
|
||||||
mapPosZ = int((translation_vector[2] + 0 ) / 10000 * 40)
|
|
||||||
cv2.circle(im, (mapPosZ + 10, mapPosX + 10), 2, (0,0,255), -1)
|
|
||||||
cv2.circle(im, (mapPosZ + 60, mapPosY + 10), 2, (0,0,255), -1)
|
|
||||||
# make it an _amazing_ stick figurine for the side view
|
|
||||||
cv2.line(im, (mapPosZ + 60, mapPosY + 10), (mapPosZ + 60, mapPosY + 20), (0,0,255), 1)
|
|
||||||
cv2.line(im, (mapPosZ + 60, mapPosY + 20), (mapPosZ + 55, mapPosY + 25), (0,0,255), 1)
|
|
||||||
cv2.line(im, (mapPosZ + 60, mapPosY + 20), (mapPosZ + 65, mapPosY + 25), (0,0,255), 1)
|
|
||||||
cv2.line(im, (mapPosZ + 60, mapPosY + 15), (mapPosZ + 55, mapPosY + 10), (0,0,255), 1)
|
|
||||||
cv2.line(im, (mapPosZ + 60, mapPosY + 15), (mapPosZ + 65, mapPosY + 10), (0,0,255), 1)
|
|
||||||
# draw rotation vector
|
|
||||||
cv2.circle(im, (mapPosZ + 60, mapPosY + 10), 2, (0,0,255), -1)
|
|
||||||
|
|
||||||
cv2.line(im, (mapPosZ + 10, mapPosX + 10), (mapPosZ + 10 + int(viewDirectionVector[2] * 100), mapPosX + 10 + int(viewDirectionVector[0] * 100)), (255,255,0), 1)
|
|
||||||
cv2.line(im, (mapPosZ + 60, mapPosY + 10), (mapPosZ + 60 + int(viewDirectionVector[2] * 100), mapPosY + 10 - int(viewDirectionVector[1] * 100)), (255,0,255), 1)
|
|
||||||
|
|
||||||
# Translation vector gives position in space:
|
|
||||||
# x, y z: 0,0,0 is center of camera
|
|
||||||
# line: (x,y,z) = f(a) = (t1 + r1*a, t2+r2*a, t3 + r3*a)
|
|
||||||
# Screen: (x,y,z) = (x,y,0)
|
|
||||||
# Interesection:
|
|
||||||
# x = t1 + r1 * a
|
|
||||||
# y = t2 + r2 * a
|
|
||||||
# z = t3 * r3 * a = 0
|
|
||||||
# => a = -t3 / r3
|
|
||||||
# substitute found a in x,y
|
|
||||||
|
|
||||||
# seems to be wrong?
|
|
||||||
a = - translation_vector[2]# / rotation_vector[2]
|
|
||||||
x = translation_vector[0] + rotation_vector[0] * a
|
|
||||||
y = translation_vector[1] + rotation_vector[1] * a
|
|
||||||
logger.warn("First {} {},{}".format(a,x,y))
|
|
||||||
a = - translation_vector[2]# / viewDirectionVector[2]
|
|
||||||
x = translation_vector[0] + viewDirectionVector[0] * a
|
|
||||||
y = translation_vector[1] + viewDirectionVector[1] * a
|
|
||||||
logger.warn("Second {} {},{}".format(a,x,y))
|
|
||||||
point = np.array([x,y])
|
|
||||||
|
|
||||||
currentPoint = point
|
|
||||||
currentPoints.append(point)
|
|
||||||
|
|
||||||
td3 = time.time()
|
|
||||||
logger.debug("Timer: All other face drawing stuff in %fs", td3-td2)
|
|
||||||
|
|
||||||
# TODO only draw nose line now, so we can change color depending whether on screen or not
|
|
||||||
|
|
||||||
# processed all faces, now draw on screen:
|
|
||||||
te1 = time.time()
|
te1 = time.time()
|
||||||
|
result = pointsQueue.get()
|
||||||
|
im = result['im']
|
||||||
|
currentPoint = result['currentPoint']
|
||||||
|
currentPoints = result['currentPoints']
|
||||||
|
|
||||||
if not args.hide_preview:
|
if not args.hide_preview:
|
||||||
# draw little floorplan for 10 -> 50, sideplan 60 -> 100 (40x40 px)
|
# draw little floorplan for 10 -> 50, sideplan 60 -> 100 (40x40 px)
|
||||||
|
@ -469,17 +519,18 @@ while True:
|
||||||
# after we collected all new metrics, blur them foor smoothness
|
# after we collected all new metrics, blur them foor smoothness
|
||||||
# and add to all metrics collected
|
# and add to all metrics collected
|
||||||
tm3 = time.time()
|
tm3 = time.time()
|
||||||
metrics = metrics + gaussian_filter(newMetrics, sigma = 13)
|
# metrics = metrics + gaussian_filter(newMetrics, sigma = 13)
|
||||||
|
metrics = metrics + newMetrics
|
||||||
tm4 = time.time()
|
tm4 = time.time()
|
||||||
logger.debug("Updated matrix with blur in %f", tm4 - tm3 + tm2 - tm1)
|
logger.debug("Updated matrix with blur in %f", tm4 - tm3 + tm2 - tm1)
|
||||||
|
|
||||||
# Display webcam image with overlays
|
# Display webcam image with overlays
|
||||||
te2 = time.time()
|
te2 = time.time()
|
||||||
logger.debug("Drew on screen in %fs", te2-te1)
|
|
||||||
if not args.hide_preview:
|
if not args.hide_preview:
|
||||||
cv2.imshow("Output", im)
|
cv2.imshow("Output", im)
|
||||||
te3 = time.time()
|
te3 = time.time()
|
||||||
logger.debug("showed webcam image in %fs", te3-te2)
|
logger.debug("showed webcam image in %fs", te3-te2)
|
||||||
|
logger.debug("Rendering took %fs", te3-te1)
|
||||||
|
|
||||||
# blur smooth the heatmap
|
# blur smooth the heatmap
|
||||||
# logger.debug("Max blurred metrics: %f", np.max(metrics))
|
# logger.debug("Max blurred metrics: %f", np.max(metrics))
|
||||||
|
@ -584,6 +635,8 @@ while True:
|
||||||
|
|
||||||
transform = create_perspective_transform(coordinatesToSrc(coordinates), screenDrawCorners)
|
transform = create_perspective_transform(coordinatesToSrc(coordinates), screenDrawCorners)
|
||||||
|
|
||||||
|
duration = time.time()-te1
|
||||||
|
fps = 1/duration
|
||||||
|
logger.info("Rendering loop %fs %ffps", duration, fps)
|
||||||
|
|
||||||
cv2.destroyAllWindows()
|
cv2.destroyAllWindows()
|
||||||
|
|
Loading…
Reference in a new issue