First working multiprocessing
This commit is contained in:
parent
02627cf897
commit
fd76f7a97d
1 changed files with 212 additions and 159 deletions
103
head_pose.py
103
head_pose.py
|
@ -21,9 +21,11 @@ else:
|
|||
import tkinter as Tk
|
||||
import time
|
||||
import datetime
|
||||
import Queue
|
||||
|
||||
import coloredlogs
|
||||
import argparse
|
||||
import multiprocessing
|
||||
|
||||
argParser = argparse.ArgumentParser(description='Draw a heatmap')
|
||||
argParser.add_argument(
|
||||
|
@ -65,6 +67,12 @@ argParser.add_argument(
|
|||
default=0,
|
||||
help="Nr of frames to keep in queue (adds a delay)"
|
||||
)
|
||||
argParser.add_argument(
|
||||
'--processes',
|
||||
type=int,
|
||||
default=4,
|
||||
help="Nr of total processes (min 3)"
|
||||
)
|
||||
|
||||
args = argParser.parse_args()
|
||||
|
||||
|
@ -75,14 +83,6 @@ coloredlogs.install(
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Read Image
|
||||
#c = cv2.VideoCapture(args.camera)
|
||||
c = cv2.VideoCapture(args.camera)
|
||||
# set camera resoltion
|
||||
c.set(3, 1280)
|
||||
c.set(4, 720)
|
||||
#c.set(3, 480)
|
||||
#c.set(4, 320)
|
||||
# im = cv2.imread("headPose.jpg");
|
||||
|
||||
|
||||
|
@ -93,8 +93,6 @@ if args.output_dir:
|
|||
else:
|
||||
lastMetricsFilename = None
|
||||
|
||||
detector = dlib.get_frontal_face_detector()
|
||||
predictor = dlib.shape_predictor(predictor_path)
|
||||
|
||||
screenDrawCorners = np.array([[10,60], [90, 60], [10, 110], [90, 110]])
|
||||
|
||||
|
@ -271,16 +269,26 @@ if args.output_dir:
|
|||
if args.queue_length:
|
||||
imageQueue = []
|
||||
|
||||
while True:
|
||||
if args.hide_preview:
|
||||
# if preview is hidden, we can always re-raise the image window
|
||||
imageWindowRoot.lift()
|
||||
lock = multiprocessing.Lock()
|
||||
photoQueue = multiprocessing.Queue(maxsize=args.processes)
|
||||
pointsQueue = multiprocessing.Queue(maxsize=args.processes)
|
||||
|
||||
def captureFacesPoints(i):
|
||||
logger.info("Start capturer {}".format( i))
|
||||
# dedicated detector & predictor instances:
|
||||
detector = dlib.get_frontal_face_detector()
|
||||
predictor = dlib.shape_predictor(predictor_path)
|
||||
|
||||
while True:
|
||||
t1 = time.time()
|
||||
_, im = c.read()
|
||||
im = photoQueue.get(block=True, timeout=10)
|
||||
if im is None:
|
||||
continue
|
||||
logger.debug("Got foto in {}".format( i))
|
||||
size = im.shape
|
||||
t2 = time.time()
|
||||
logger.debug("Captured frame in %fs", t2-t1)
|
||||
|
||||
# Docs: Ask the detector to find the bounding boxes of each face. The 1 in the
|
||||
# second argument indicates that we should upsample the image 1 time. This
|
||||
# will make everything bigger and allow us to detect more faces.
|
||||
|
@ -340,7 +348,7 @@ while True:
|
|||
continue
|
||||
|
||||
logger.debug ("Rotation Vector:\n %s", rotation_vector)
|
||||
logger.info ("Translation Vector:\n {0}".format(translation_vector))
|
||||
logger.debug ("Translation Vector:\n {0}".format(translation_vector))
|
||||
|
||||
# Project a 3D point (0, 0, 1000.0) onto the image plane.
|
||||
# We use this to draw a line sticking out of the nose
|
||||
|
@ -399,14 +407,14 @@ while True:
|
|||
# substitute found a in x,y
|
||||
|
||||
# seems to be wrong?
|
||||
a = - translation_vector[2]# / rotation_vector[2]
|
||||
x = translation_vector[0] + rotation_vector[0] * a
|
||||
y = translation_vector[1] + rotation_vector[1] * a
|
||||
logger.warn("First {} {},{}".format(a,x,y))
|
||||
# a = - translation_vector[2]# / rotation_vector[2]
|
||||
# x = translation_vector[0] + rotation_vector[0] * a
|
||||
# y = translation_vector[1] + rotation_vector[1] * a
|
||||
# logger.warn("First {} {},{}".format(a,x,y))
|
||||
a = - translation_vector[2]# / viewDirectionVector[2]
|
||||
x = translation_vector[0] + viewDirectionVector[0] * a
|
||||
y = translation_vector[1] + viewDirectionVector[1] * a
|
||||
logger.warn("Second {} {},{}".format(a,x,y))
|
||||
# logger.warn("Second {} {},{}".format(a,x,y))
|
||||
point = np.array([x,y])
|
||||
|
||||
currentPoint = point
|
||||
|
@ -417,8 +425,50 @@ while True:
|
|||
|
||||
# TODO only draw nose line now, so we can change color depending whether on screen or not
|
||||
|
||||
# processed all faces, now draw on screen:
|
||||
results = {'currentPoint': currentPoint, 'currentPoints': currentPoints, 'im': im}
|
||||
|
||||
try:
|
||||
pointsQueue.put_nowait(results)
|
||||
except Queue.Full as e:
|
||||
logger.critical("Reslt queue full?")
|
||||
# not applicable to multiprocessing.queue in p2.7: photoQueue.task_done()
|
||||
|
||||
def captureVideo():
|
||||
c = cv2.VideoCapture(args.camera)
|
||||
# set camera resoltion
|
||||
c.set(3, 1280)
|
||||
c.set(4, 720)
|
||||
logger.debug("Camera FPS: {}".format(c.get(5)))
|
||||
|
||||
while True:
|
||||
_, im = c.read()
|
||||
try:
|
||||
photoQueue.put_nowait(im)
|
||||
except Queue.Full as e:
|
||||
logger.debug("Photo queue full")
|
||||
time.sleep(.05)
|
||||
logger.debug("Que sizes: image: {}, points: {} ".format(photoQueue.qsize(), pointsQueue.qsize()))
|
||||
|
||||
|
||||
processes = []
|
||||
for i in range(args.processes - 2):
|
||||
p = multiprocessing.Process(target=captureFacesPoints, args=(i,))
|
||||
p.daemon = True
|
||||
p.start()
|
||||
processes.append(p)
|
||||
|
||||
p = multiprocessing.Process(target=captureVideo, args=())
|
||||
p.daemon = True
|
||||
p.start()
|
||||
processes.append(p)
|
||||
|
||||
|
||||
while True:
|
||||
te1 = time.time()
|
||||
result = pointsQueue.get()
|
||||
im = result['im']
|
||||
currentPoint = result['currentPoint']
|
||||
currentPoints = result['currentPoints']
|
||||
|
||||
if not args.hide_preview:
|
||||
# draw little floorplan for 10 -> 50, sideplan 60 -> 100 (40x40 px)
|
||||
|
@ -469,17 +519,18 @@ while True:
|
|||
# after we collected all new metrics, blur them foor smoothness
|
||||
# and add to all metrics collected
|
||||
tm3 = time.time()
|
||||
metrics = metrics + gaussian_filter(newMetrics, sigma = 13)
|
||||
# metrics = metrics + gaussian_filter(newMetrics, sigma = 13)
|
||||
metrics = metrics + newMetrics
|
||||
tm4 = time.time()
|
||||
logger.debug("Updated matrix with blur in %f", tm4 - tm3 + tm2 - tm1)
|
||||
|
||||
# Display webcam image with overlays
|
||||
te2 = time.time()
|
||||
logger.debug("Drew on screen in %fs", te2-te1)
|
||||
if not args.hide_preview:
|
||||
cv2.imshow("Output", im)
|
||||
te3 = time.time()
|
||||
logger.debug("showed webcam image in %fs", te3-te2)
|
||||
logger.debug("Rendering took %fs", te3-te1)
|
||||
|
||||
# blur smooth the heatmap
|
||||
# logger.debug("Max blurred metrics: %f", np.max(metrics))
|
||||
|
@ -584,6 +635,8 @@ while True:
|
|||
|
||||
transform = create_perspective_transform(coordinatesToSrc(coordinates), screenDrawCorners)
|
||||
|
||||
|
||||
duration = time.time()-te1
|
||||
fps = 1/duration
|
||||
logger.info("Rendering loop %fs %ffps", duration, fps)
|
||||
|
||||
cv2.destroyAllWindows()
|
||||
|
|
Loading…
Reference in a new issue