Detects head pose from webcam
This commit is contained in:
commit
8126e135e4
2 changed files with 112 additions and 0 deletions
17
README.md
Normal file
17
README.md
Normal file
|
@ -0,0 +1,17 @@
|
|||
Translates headposes of viewers into a heat map.
|
||||
|
||||
Head-pose detection adapted from [1]
|
||||
|
||||
TODO:
|
||||
- Camera calibration [2],[3]
|
||||
- Calibrating sequence for the screen/projection
|
||||
-
|
||||
|
||||
[1]: https://www.learnopencv.com/head-pose-estimation-using-opencv-and-dlib/
|
||||
[2]: https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html
|
||||
[3]: https://docs.opencv.org/2.4/doc/tutorials/calib3d/camera_calibration/camera_calibration.html
|
||||
|
||||
|
||||
# Install:
|
||||
|
||||
`wget http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2`
|
95
head_pose.py
Normal file
95
head_pose.py
Normal file
|
@ -0,0 +1,95 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
import cv2
|
||||
import dlib
|
||||
import numpy as np
|
||||
|
||||
# Read Image
|
||||
c = cv2.VideoCapture(0)
|
||||
# im = cv2.imread("headPose.jpg");
|
||||
|
||||
|
||||
predictor_path = "shape_predictor_68_face_landmarks.dat"
|
||||
|
||||
detector = dlib.get_frontal_face_detector()
|
||||
predictor = dlib.shape_predictor(predictor_path)
|
||||
|
||||
while True:
|
||||
_, im = c.read()
|
||||
size = im.shape
|
||||
|
||||
# Docs: Ask the detector to find the bounding boxes of each face. The 1 in the
|
||||
# second argument indicates that we should upsample the image 1 time. This
|
||||
# will make everything bigger and allow us to detect more faces.
|
||||
dets = detector(im, 1)
|
||||
|
||||
print("Number of faces detected: {}".format(len(dets)))
|
||||
|
||||
if len(dets) > 0:
|
||||
|
||||
for d in dets:
|
||||
shape = predictor(im, d)
|
||||
|
||||
print(shape.part(30).x, shape.part(54))
|
||||
#2D image points. If you change the image, you need to change vector
|
||||
image_points = np.array([
|
||||
(shape.part(30).x,shape.part(30).y), # Nose tip
|
||||
(shape.part(8).x,shape.part(8).y), # Chin
|
||||
(shape.part(36).x,shape.part(36).y), # Left eye left corner
|
||||
(shape.part(45).x,shape.part(45).y), # Right eye right corne
|
||||
(shape.part(48).x,shape.part(48).y), # Left Mouth corner
|
||||
(shape.part(54).x,shape.part(54).y) # Right mouth corner
|
||||
], dtype="double")
|
||||
|
||||
# 3D model points.
|
||||
model_points = np.array([
|
||||
(0.0, 0.0, 0.0), # Nose tip
|
||||
(0.0, -330.0, -65.0), # Chin
|
||||
(-225.0, 170.0, -135.0), # Left eye left corner
|
||||
(225.0, 170.0, -135.0), # Right eye right corne
|
||||
(-150.0, -150.0, -125.0), # Left Mouth corner
|
||||
(150.0, -150.0, -125.0) # Right mouth corner
|
||||
|
||||
])
|
||||
|
||||
|
||||
# Camera internals
|
||||
|
||||
focal_length = size[1]
|
||||
center = (size[1]/2, size[0]/2)
|
||||
camera_matrix = np.array(
|
||||
[[focal_length, 0, center[0]],
|
||||
[0, focal_length, center[1]],
|
||||
[0, 0, 1]], dtype = "double"
|
||||
)
|
||||
|
||||
print ("Camera Matrix :\n {0}".format(camera_matrix))
|
||||
|
||||
dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion
|
||||
(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)
|
||||
|
||||
print ("Rotation Vector:\n {0}".format(rotation_vector))
|
||||
print ("Translation Vector:\n {0}".format(translation_vector))
|
||||
|
||||
|
||||
# Project a 3D point (0, 0, 1000.0) onto the image plane.
|
||||
# We use this to draw a line sticking out of the nose
|
||||
|
||||
|
||||
(nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs)
|
||||
|
||||
for p in image_points:
|
||||
cv2.circle(im, (int(p[0]), int(p[1])), 3, (0,0,255), -1)
|
||||
|
||||
|
||||
p1 = ( int(image_points[0][0]), int(image_points[0][1]))
|
||||
p2 = ( int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
|
||||
|
||||
cv2.line(im, p1, p2, (255,0,0), 2)
|
||||
|
||||
# Display image
|
||||
cv2.imshow("Output", im)
|
||||
if cv2.waitKey(5)==27:
|
||||
break
|
||||
|
||||
cv2.destroyAllWindows()
|
Loading…
Reference in a new issue