Test if we get more up-to-date results with different queing system

This commit is contained in:
Ruben van de Ven 2020-10-02 14:09:20 +02:00
parent 16c37ee750
commit 21bda67665
1 changed files with 48 additions and 20 deletions

View File

@ -1,4 +1,4 @@
from multiprocessing import Process, Queue from multiprocessing import Process, Queue, JoinableQueue
from queue import Empty, Full from queue import Empty, Full
import cv2 import cv2
import logging import logging
@ -28,11 +28,19 @@ font = ImageFont.truetype(fontfile, 30)
font_s = ImageFont.truetype(fontfile, 20) font_s = ImageFont.truetype(fontfile, 20)
countdown_font = ImageFont.truetype(fontfile, 160) countdown_font = ImageFont.truetype(fontfile, 160)
class Request():
def __init__(self, image, cam_time = None, for_snapshot=False):
self.image = image
self.time = cam_time
self.for_snapshot = False
class Result(): class Result():
def __init__(self, algorithm, image, confidence_threshold = 0.5): def __init__(self, algorithm, image, time, for_snapshot, confidence_threshold = 0.5):
self.algorithm = algorithm self.algorithm = algorithm
self.visualisation = image self.visualisation = image
self.detections = [] self.detections = []
self.time = time
self.for_snapshot = for_snapshot
self.confidence_threshold = confidence_threshold self.confidence_threshold = confidence_threshold
def add_detection(self, startX, startY, endX, endY, confidence): def add_detection(self, startX, startY, endX, endY, confidence):
@ -62,7 +70,7 @@ class Result():
''' '''
Draw on a specified canvas Draw on a specified canvas
''' '''
color = draw_colors[self.algorithm] color = draw_colors[self.algorithm]
for detection in self.detections: for detection in self.detections:
self.draw_detection(draw, detection, color) self.draw_detection(draw, detection, color)
@ -98,7 +106,7 @@ class Result():
factor_y = height / self.visualisation.shape[0] factor_y = height / self.visualisation.shape[0]
inter = cv2.INTER_NEAREST if self.algorithm in ['dnn', 'haar'] else cv2.INTER_CUBIC inter = cv2.INTER_NEAREST if self.algorithm in ['dnn', 'haar'] else cv2.INTER_CUBIC
img = cv2.resize(img, (width, height), interpolation=inter) img = cv2.resize(img, (width, height), interpolation=inter)
result = Result(self.algorithm, img, self.confidence_threshold) result = Result(self.algorithm, img, self.time, self.for_snapshot, self.confidence_threshold)
for d in self.detections: for d in self.detections:
result.add_detection( result.add_detection(
int(d['startX'] * factor_x), int(d['startX'] * factor_x),
@ -131,6 +139,8 @@ def record(device_id, q1,q2, q3, q4, resolution, rotate):
exit() exit()
timestamp = time.time()
if rotate is not None: if rotate is not None:
image = cv2.rotate(image, rotate) image = cv2.rotate(image, rotate)
@ -148,17 +158,22 @@ def record(device_id, q1,q2, q3, q4, resolution, rotate):
# ignore if processing doesn't keep up # ignore if processing doesn't keep up
pass pass
try: try:
q2.put_nowait(image) # frames generally come in faster than the processing takes.
# so we want to only put in a frame after processing is done
if q2._unfinished_tasks.get_value() == 0:
q2.put_nowait(Request(image, timestamp))
except Full as e: except Full as e:
# ignore if processing doesn't keep up # ignore if processing doesn't keep up
pass pass
try: try:
q3.put_nowait(image) if q3._unfinished_tasks.get_value() == 0:
q3.put_nowait(Request(image, timestamp))
except Full as e: except Full as e:
# ignore if processing doesn't keep up # ignore if processing doesn't keep up
pass pass
try: try:
q4.put_nowait(image) if q4._unfinished_tasks.get_value() == 0:
q4.put_nowait(Request(image, timestamp))
except Full as e: except Full as e:
# ignore if processing doesn't keep up # ignore if processing doesn't keep up
pass pass
@ -217,9 +232,10 @@ def process1_hog(in_q, out_q):
process_this_frame = True process_this_frame = True
while True: while True:
if process_this_frame: # if process_this_frame:
# Grab a single frame of video # Grab a single frame of video
frame = in_q.get() request = in_q.get()
frame = request.image
frame = cv2.cvtColor(src=frame, code=cv2.COLOR_BGR2GRAY) frame = cv2.cvtColor(src=frame, code=cv2.COLOR_BGR2GRAY)
viz_frame = cv2.resize(frame, (0, 0), fx=visualisation_factor, fy=visualisation_factor) viz_frame = cv2.resize(frame, (0, 0), fx=visualisation_factor, fy=visualisation_factor)
@ -236,7 +252,8 @@ def process1_hog(in_q, out_q):
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses) # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
# rgb_small_frame = det_frame[:, :, ::-1] # rgb_small_frame = det_frame[:, :, ::-1]
# dets, scores, idxs = face_detector.run(rgb_small_frame, 1, -2) # dets, scores, idxs = face_detector.run(rgb_small_frame, 1, -2)
dets, scores, idxs = face_detector.run(det_frame, 1, -2) # dets, scores, idxs = face_detector.run(det_frame, 1, -2)
dets, scores, idxs = face_detector.run(det_frame)
# print(dets, scores, idxs) # print(dets, scores, idxs)
hog_image_rescaled = (hog_image_rescaled.astype('float32') * 255).astype('uint8') hog_image_rescaled = (hog_image_rescaled.astype('float32') * 255).astype('uint8')
@ -250,7 +267,7 @@ def process1_hog(in_q, out_q):
colored_image = cv2.cvtColor(colored_image, cv2.COLOR_RGB2BGR) colored_image = cv2.cvtColor(colored_image, cv2.COLOR_RGB2BGR)
# result = Result('hog', hog_image_rescaled, 0) # result = Result('hog', hog_image_rescaled, 0)
result = Result('hog', colored_image, 0) result = Result('hog', colored_image, request.time, request.for_snapshot, 0)
# Display the results # Display the results
for i, rectangle in enumerate(dets): for i, rectangle in enumerate(dets):
@ -280,9 +297,10 @@ def process1_hog(in_q, out_q):
# Display the resulting image # Display the resulting image
out_q.put(result) out_q.put(result)
in_q.task_done()
# print(cgray.shape) # print(cgray.shape)
process_this_frame = not process_this_frame # process_this_frame = not process_this_frame
def process2_dnn(in_q, out_q): def process2_dnn(in_q, out_q):
@ -298,7 +316,8 @@ def process2_dnn(in_q, out_q):
logger.info("Loaded") logger.info("Loaded")
while True: while True:
image = in_q.get() request = in_q.get()
image = request.image
(h, w) = image.shape[:2] (h, w) = image.shape[:2]
image_small = cv2.resize(image, (300, 300)) image_small = cv2.resize(image, (300, 300))
@ -311,7 +330,7 @@ def process2_dnn(in_q, out_q):
detections = net.forward() detections = net.forward()
# idxs = np.argsort(detections[0])[::-1][:5] # idxs = np.argsort(detections[0])[::-1][:5]
result = Result('dnn', image) result = Result('dnn', image, request.time, request.for_snapshot)
for i in range(0, detections.shape[2]): for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with the # extract the confidence (i.e., probability) associated with the
@ -329,6 +348,7 @@ def process2_dnn(in_q, out_q):
# draw_detection(image, startX, startY, endX, endY, confidence, draw_colors['dnn']) # draw_detection(image, startX, startY, endX, endY, confidence, draw_colors['dnn'])
out_q.put(result) out_q.put(result)
in_q.task_done()
def process3_haar(in_q, out_q, cascade_file): def process3_haar(in_q, out_q, cascade_file):
from cffi import FFI from cffi import FFI
@ -376,7 +396,8 @@ def process3_haar(in_q, out_q, cascade_file):
faceCascade = cv2.CascadeClassifier(cascade_file) faceCascade = cv2.CascadeClassifier(cascade_file)
while True: while True:
frame = in_q.get() request = in_q.get()
frame = request.image
(height_orig, width_orig) = frame.shape[:2] (height_orig, width_orig) = frame.shape[:2]
scale_factor = 4 scale_factor = 4
@ -419,7 +440,7 @@ def process3_haar(in_q, out_q, cascade_file):
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (width_orig, height_orig)) img = cv2.resize(img, (width_orig, height_orig))
result = Result('haar', img) result = Result('haar', img, request.time, request.for_snapshot)
for face in faces: for face in faces:
x1, y1, w, h = face x1, y1, w, h = face
@ -430,9 +451,9 @@ def process3_haar(in_q, out_q, cascade_file):
# draw_detection(img, x1 * scale_factor, y1 * scale_factor, x2 * scale_factor, y2 * scale_factor, 1, draw_colors['haar'],) # draw_detection(img, x1 * scale_factor, y1 * scale_factor, x2 * scale_factor, y2 * scale_factor, 1, draw_colors['haar'],)
result.add_detection(x1 * scale_factor, y1 * scale_factor, x2 * scale_factor, y2 * scale_factor, 1) result.add_detection(x1 * scale_factor, y1 * scale_factor, x2 * scale_factor, y2 * scale_factor, 1)
# print(img) # print(img)
out_q.put(result) out_q.put(result)
in_q.task_done()
def draw_stats(image, results, padding): def draw_stats(image, results, padding):
pil_im = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) pil_im = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
@ -564,6 +585,13 @@ def display(image_res, q1, q2, q3, q4, fullscreen, output_dir):
# TODO wait for frame to be processed. Eg. if I move and make a pic, it should use the last frame... # TODO wait for frame to be processed. Eg. if I move and make a pic, it should use the last frame...
# SNAP! # SNAP!
# output_res = (image_res[0] *2, image_res[1] * 2) # output_res = (image_res[0] *2, image_res[1] * 2)
req = Request(images[0], time.time(), for_snapshot=True)
q2.put_nowait(req)
q3.put_nowait(req)
q4.put_nowait(req)
output_res = image_res # no scaling needed anyore output_res = image_res # no scaling needed anyore
pil_im = Image.fromarray(cv2.cvtColor(images[0], cv2.COLOR_BGR2RGB)) pil_im = Image.fromarray(cv2.cvtColor(images[0], cv2.COLOR_BGR2RGB))
pil_im = pil_im.resize(output_res) pil_im = pil_im.resize(output_res)
@ -605,9 +633,9 @@ def main(camera_id, rotate, fullscreen, cascade_file, output_dir):
# https://docs.python.org/3/library/multiprocessing.html#programming-guidelines # https://docs.python.org/3/library/multiprocessing.html#programming-guidelines
# TODO: queue maxsize, or prefrabily some sort of throttled queue (like zmq hight water mark) # TODO: queue maxsize, or prefrabily some sort of throttled queue (like zmq hight water mark)
q_webcam1 = Queue(maxsize=1) q_webcam1 = Queue(maxsize=1)
q_webcam2 = Queue(maxsize=1) q_webcam2 = JoinableQueue(maxsize=2) # size is 2 so that record() can add an image on snapshot
q_webcam3 = Queue(maxsize=1) q_webcam3 = JoinableQueue(maxsize=2) # size is 2 so that record() can add an image on snapshot
q_webcam4 = Queue(maxsize=1) q_webcam4 = JoinableQueue(maxsize=2) # size is 2 so that record() can add an image on snapshot
q_process1 = Queue(maxsize=1) q_process1 = Queue(maxsize=1)
q_process2 = Queue(maxsize=1) q_process2 = Queue(maxsize=1)
q_process3 = Queue(maxsize=1) q_process3 = Queue(maxsize=1)