#!/usr/bin/env python import cv2 import dlib import numpy as np from PIL import Image, ImageDraw # Read Image c = cv2.VideoCapture(0) # im = cv2.imread("headPose.jpg"); predictor_path = "shape_predictor_68_face_landmarks.dat" detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor(predictor_path) while True: _, im = c.read() size = im.shape # Docs: Ask the detector to find the bounding boxes of each face. The 1 in the # second argument indicates that we should upsample the image 1 time. This # will make everything bigger and allow us to detect more faces. dets = detector(im, 1) print("Number of faces detected: {}".format(len(dets))) if len(dets) > 0: for d in dets: shape = predictor(im, d) print(shape.part(30).x, shape.part(54)) #2D image points. If you change the image, you need to change vector image_points = np.array([ (shape.part(30).x,shape.part(30).y), # Nose tip (shape.part(8).x,shape.part(8).y), # Chin (shape.part(36).x,shape.part(36).y), # Left eye left corner (shape.part(45).x,shape.part(45).y), # Right eye right corne (shape.part(48).x,shape.part(48).y), # Left Mouth corner (shape.part(54).x,shape.part(54).y) # Right mouth corner ], dtype="double") # 3D model points. model_points = np.array([ (0.0, 0.0, 0.0), # Nose tip (0.0, -330.0, -65.0), # Chin (-225.0, 170.0, -135.0), # Left eye left corner (225.0, 170.0, -135.0), # Right eye right corne (-150.0, -150.0, -125.0), # Left Mouth corner (150.0, -150.0, -125.0) # Right mouth corner ]) # Camera internals focal_length = size[1] center = (size[1]/2, size[0]/2) camera_matrix = np.array( [[focal_length, 0, center[0]], [0, focal_length, center[1]], [0, 0, 1]], dtype = "double" ) # print ("Camera Matrix :\n {0}".format(camera_matrix)) dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion (success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE) if not success: print("Error determening PnP", success) continue print ("Rotation Vector:\n {0}".format(rotation_vector)) print ("Translation Vector:\n {0}".format(translation_vector)) # Project a 3D point (0, 0, 1000.0) onto the image plane. # We use this to draw a line sticking out of the nose (nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs) for p in image_points: cv2.circle(im, (int(p[0]), int(p[1])), 3, (0,0,255), -1) p1 = ( int(image_points[0][0]), int(image_points[0][1])) p2 = ( int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1])) cv2.line(im, p1, p2, (255,0,0), 2) rotMatrix = np.zeros([3,3]) cv2.Rodrigues(rotation_vector, rotMatrix, jacobian=0) # Find rotation: https://stackoverflow.com/a/15029416 rx = np.arctan2(rotMatrix[2,1], rotMatrix[2,2]) ry = np.arctan2(-rotMatrix[2,0], np.sqrt(np.square(rotMatrix[2,1]) + np.square(rotMatrix[2,2]))) rz = np.arctan2(rotMatrix[1,0],rotMatrix[0,0]) print("rotation", rx, ry, rz) ry = - np.arcsin(rotMatrix[0,2]) rx = np.arctan2(rotMatrix[1,2]/np.cos(ry), rotMatrix[2,2]/np.cos(ry)) rz = np.arctan2(rotMatrix[0,1]/np.cos(ry), rotMatrix[0,0]/np.cos(ry)) print("rotation ml", rx, ry, rz) # seems better? # rotatedVector = np.dot(rotMatrix, translation_vector) # print("rvec", rotatedVector) # draw little floorplan for x: 10 -> 50 maps to z: 0 -> 10000, x: -2000 -> 2000 mapPosX = int((translation_vector[0] + 500) / 1000 * 40) mapPosY = int((translation_vector[1] + 500) / 1000 * 40) mapPosZ = int((translation_vector[2] + 0 ) / 10000 * 40) cv2.circle(im, (mapPosZ + 10, mapPosX + 10), 2, (0,0,255), -1) cv2.circle(im, (mapPosZ + 60, mapPosY + 10), 2, (0,0,255), -1) # make it an _amazing_ stick figurine for the side view cv2.line(im, (mapPosZ + 60, mapPosY + 10), (mapPosZ + 60, mapPosY + 20), (0,0,255), 1) cv2.line(im, (mapPosZ + 60, mapPosY + 20), (mapPosZ + 55, mapPosY + 25), (0,0,255), 1) cv2.line(im, (mapPosZ + 60, mapPosY + 20), (mapPosZ + 65, mapPosY + 25), (0,0,255), 1) cv2.line(im, (mapPosZ + 60, mapPosY + 15), (mapPosZ + 55, mapPosY + 10), (0,0,255), 1) cv2.line(im, (mapPosZ + 60, mapPosY + 15), (mapPosZ + 65, mapPosY + 10), (0,0,255), 1) # draw rotation vector cv2.circle(im, (mapPosZ + 60, mapPosY + 10), 2, (0,0,255), -1) # cv2.line(im, (mapPosZ + 10, mapPosX + 10), (mapPosZ + 10 + int(rotation_vector[2]*5), mapPosX + 10 + int(rotation_vector[0]*5)), (255,255,0), 1) # cv2.line(im, (mapPosZ + 60, mapPosY + 10), (mapPosZ + 60 + int(rotation_vector[2]*5), mapPosY + 10 + int(rotation_vector[1]*200)), (255,255,0), 1) # print(rotMatrix) viewDirectionVector = np.dot(np.array([0.0, 0.0, 1000.0]), rotMatrix) cv2.line(im, (mapPosZ + 10, mapPosX + 10), (mapPosZ + 10 + int(viewDirectionVector[2] * 100), mapPosX + 10 + int(viewDirectionVector[0] * 100)), (255,255,0), 1) cv2.line(im, (mapPosZ + 60, mapPosY + 10), (mapPosZ + 60 + int(viewDirectionVector[2] * 100), mapPosY + 10 - int(viewDirectionVector[1] * 100)), (255,0,255), 1) # Translation vector gives position in space: # x, y z: 0,0,0 is center of camera # line: (x,y,z) = f(a) = (t1 + r1*a, t2+r2*a, t3 + r3*a) # Screen: (x,y,z) = (x,y,0) # Interesection: # x = t1 + r1 * a # y = t2 + r2 * a # z = t3 * r3 * a = 0 # => a = -t3 / r3 # substitute found a in x,y a = - translation_vector[2] / rotation_vector[2] x = translation_vector[0] + rotation_vector[0] * a y = translation_vector[1] + rotation_vector[1] * a a = - translation_vector[2] / viewDirectionVector[2] x = translation_vector[0] + viewDirectionVector[0] * a y = translation_vector[1] + viewDirectionVector[1] * a print (a, x, y) # draw little floorplan for 10 -> 50, sideplan 60 -> 100 (40x40 px) cv2.rectangle(im, (9, 9), (51, 51), (255,255,255), 1) cv2.rectangle(im, (59, 9), (101, 51), (255,255,255), 1) cv2.line(im, (10,10), (10,50), (200,200,200), 2) cv2.line(im, (60,10), (60,50), (200,200,200), 2) # Display image cv2.imshow("Output", im) keyPress = cv2.waitKey(5) if keyPress==27: break cv2.destroyAllWindows()