Fixes and sort of mini fade
This commit is contained in:
parent
df6f7ce8db
commit
d58e4fed01
1 changed files with 10 additions and 6 deletions
14
head_pose.py
14
head_pose.py
|
@ -125,7 +125,7 @@ if metrics is None:
|
||||||
metrics = np.zeros((metricsSize[1], metricsSize[0])) # (y, x)
|
metrics = np.zeros((metricsSize[1], metricsSize[0])) # (y, x)
|
||||||
logger.warn("New metrics")
|
logger.warn("New metrics")
|
||||||
|
|
||||||
screenDrawCorners = np.array([[0,0], [1919,0], [0, 1079], [1919,1079]])
|
screenDrawCorners = np.array([[0,0], [metricsSize[0]-1,0], [0, metricsSize[1]-1], [metricsSize[0]-1,metricsSize[1]-1]])
|
||||||
|
|
||||||
def create_perspective_transform_matrix(src, dst):
|
def create_perspective_transform_matrix(src, dst):
|
||||||
""" Creates a perspective transformation matrix which transforms points
|
""" Creates a perspective transformation matrix which transforms points
|
||||||
|
@ -363,6 +363,7 @@ def captureFacesPoints(i):
|
||||||
(nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs)
|
(nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs)
|
||||||
|
|
||||||
for p in image_points:
|
for p in image_points:
|
||||||
|
# face points
|
||||||
cv2.circle(im, (int(p[0]), int(p[1])), 3, (0,0,255), -1)
|
cv2.circle(im, (int(p[0]), int(p[1])), 3, (0,0,255), -1)
|
||||||
|
|
||||||
p1 = ( int(image_points[0][0]), int(image_points[0][1]))
|
p1 = ( int(image_points[0][0]), int(image_points[0][1]))
|
||||||
|
@ -382,7 +383,7 @@ def captureFacesPoints(i):
|
||||||
# rx = np.arctan2(rotMatrix[1,2]/np.cos(ry), rotMatrix[2,2]/np.cos(ry))
|
# rx = np.arctan2(rotMatrix[1,2]/np.cos(ry), rotMatrix[2,2]/np.cos(ry))
|
||||||
# rz = np.arctan2(rotMatrix[0,1]/np.cos(ry), rotMatrix[0,0]/np.cos(ry))
|
# rz = np.arctan2(rotMatrix[0,1]/np.cos(ry), rotMatrix[0,0]/np.cos(ry))
|
||||||
# logger.info("rotation ml {} {} {}".format(rx, ry, rz) )# seems better?
|
# logger.info("rotation ml {} {} {}".format(rx, ry, rz) )# seems better?
|
||||||
viewDirectionVector = np.dot(np.array([0.0, 0.0, 1.0]), rotMatrix)
|
viewDirectionVector = np.dot(np.array([0.0, 0.0, 100.0]), rotMatrix)
|
||||||
|
|
||||||
if not args.hide_preview:
|
if not args.hide_preview:
|
||||||
# draw little floorplan for x: 10 -> 50 maps to z: 0 -> 10000, x: -2000 -> 2000
|
# draw little floorplan for x: 10 -> 50 maps to z: 0 -> 10000, x: -2000 -> 2000
|
||||||
|
@ -439,7 +440,7 @@ def captureFacesPoints(i):
|
||||||
try:
|
try:
|
||||||
pointsQueue.put_nowait(results)
|
pointsQueue.put_nowait(results)
|
||||||
except Queue.Full as e:
|
except Queue.Full as e:
|
||||||
logger.critical("Reslt queue full?")
|
logger.warn("Result queue full?")
|
||||||
# not applicable to multiprocessing.queue in p2.7: photoQueue.task_done()
|
# not applicable to multiprocessing.queue in p2.7: photoQueue.task_done()
|
||||||
|
|
||||||
def captureVideo():
|
def captureVideo():
|
||||||
|
@ -447,6 +448,8 @@ def captureVideo():
|
||||||
# set camera resoltion
|
# set camera resoltion
|
||||||
c.set(3, 1280)
|
c.set(3, 1280)
|
||||||
c.set(4, 720)
|
c.set(4, 720)
|
||||||
|
c.set(3, 960)
|
||||||
|
c.set(4, 540)
|
||||||
logger.debug("Camera FPS: {}".format(c.get(5)))
|
logger.debug("Camera FPS: {}".format(c.get(5)))
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
|
@ -520,7 +523,7 @@ while True:
|
||||||
# cv2.circle(im, (int(targetPoint[0]), int(targetPoint[1])), 2, (0,255,0), -1)
|
# cv2.circle(im, (int(targetPoint[0]), int(targetPoint[1])), 2, (0,255,0), -1)
|
||||||
# from 1920x1080 to 80x50
|
# from 1920x1080 to 80x50
|
||||||
if not args.hide_preview:
|
if not args.hide_preview:
|
||||||
miniTargetPoint = (int(targetPoint[0] / 1920 * 80 + 10), int(targetPoint[1] / 1080 * 50 + 60))
|
miniTargetPoint = (int(targetPoint[0] / metricsSize[0] * 80 + 10), int(targetPoint[1] / metricsSize[1] * 50 + 60))
|
||||||
cv2.circle(im, miniTargetPoint, 2, (0,255,0), -1)
|
cv2.circle(im, miniTargetPoint, 2, (0,255,0), -1)
|
||||||
targetInt = (int(targetPoint[0]), int(targetPoint[1]))
|
targetInt = (int(targetPoint[0]), int(targetPoint[1]))
|
||||||
# check if point fits on screen:
|
# check if point fits on screen:
|
||||||
|
@ -572,6 +575,7 @@ while True:
|
||||||
|
|
||||||
metrics = metrics + newMetrics*diffT
|
metrics = metrics + newMetrics*diffT
|
||||||
newMetrics *= (1-diffT)
|
newMetrics *= (1-diffT)
|
||||||
|
print('MAXES', np.max(metrics), np.max(newMetrics))
|
||||||
|
|
||||||
# smooth impact of first hits by having at least 0.05
|
# smooth impact of first hits by having at least 0.05
|
||||||
normalisedMetrics = metrics / (max(255*7 ,np.max(metrics)))
|
normalisedMetrics = metrics / (max(255*7 ,np.max(metrics)))
|
||||||
|
@ -603,7 +607,7 @@ while True:
|
||||||
# imageWindowRoot.update()
|
# imageWindowRoot.update()
|
||||||
tm24 = time.time()
|
tm24 = time.time()
|
||||||
logger.debug("PIL image generated in %fs", tm24 - tm23)
|
logger.debug("PIL image generated in %fs", tm24 - tm23)
|
||||||
logger.debug("Total matrix time is %fs", tm4 - tm3 + tm2 - tm1 + tm24 - tm21)
|
# logger.debug("Total matrix time is %fs", tm4 - tm3 + tm2 - tm1 + tm24 - tm21)
|
||||||
|
|
||||||
if not args.hide_graph:
|
if not args.hide_graph:
|
||||||
te4 = time.time()
|
te4 = time.time()
|
||||||
|
|
Loading…
Reference in a new issue