|
|
|
@ -5,13 +5,17 @@ import logging
@@ -5,13 +5,17 @@ import logging
|
|
|
|
|
import argparse |
|
|
|
|
import numpy as np |
|
|
|
|
import time |
|
|
|
|
from PIL import ImageFont, ImageDraw, Image |
|
|
|
|
import os |
|
|
|
|
|
|
|
|
|
draw_colors = { |
|
|
|
|
'dnn': (255,0,0), |
|
|
|
|
'hog': (255,0,0), |
|
|
|
|
'haar': (0,255,0), |
|
|
|
|
'hog': (0,0,255), |
|
|
|
|
'dnn': (0,0,255), |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
font = ImageFont.truetype("/home/ruben/Documents/Projecten/2018/PATH/presentation/lib/font/source-sans-pro/source-sans-pro-regular.ttf", 30) |
|
|
|
|
|
|
|
|
|
class Result(): |
|
|
|
|
def __init__(self, algorithm, image, confidence_threshold = 0.5): |
|
|
|
|
self.algorithm = algorithm |
|
|
|
@ -28,13 +32,54 @@ class Result():
@@ -28,13 +32,54 @@ class Result():
|
|
|
|
|
'confidence': confidence |
|
|
|
|
}) |
|
|
|
|
return self |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def draw_detections(self): |
|
|
|
|
color = draw_colors[self.algorithm] |
|
|
|
|
cv2_im_rgb = cv2.cvtColor(self.visualisation,cv2.COLOR_BGR2RGB) |
|
|
|
|
# Pass the image to PIL |
|
|
|
|
pil_im = Image.fromarray(cv2_im_rgb) |
|
|
|
|
draw = ImageDraw.Draw(pil_im, 'RGBA') |
|
|
|
|
|
|
|
|
|
for detection in self.detections: |
|
|
|
|
self.draw_detection(draw, detection, color) |
|
|
|
|
|
|
|
|
|
self.visualisation = cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR) |
|
|
|
|
|
|
|
|
|
def draw_detection(self, draw: ImageDraw, detection: dict, color: tuple): |
|
|
|
|
width = 2 |
|
|
|
|
|
|
|
|
|
if detection['confidence'] > self.confidence_threshold: |
|
|
|
|
# draw the bounding box of the face along with the associated |
|
|
|
|
# probability |
|
|
|
|
text = "{:.2f}%".format(detection['confidence'] * 100) |
|
|
|
|
y = detection['startY'] - 40 if detection['startY'] - 40 > 10 else detection['startY'] + 10 |
|
|
|
|
|
|
|
|
|
draw.text((detection['startX'], y), text, font=font, fill=color) |
|
|
|
|
# cv2.putText(self.visualisation, text, (detection['startX'], y), |
|
|
|
|
# cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2, lineType = cv2.LINE_AA) |
|
|
|
|
|
|
|
|
|
alpha = 1 |
|
|
|
|
else: |
|
|
|
|
# At least 10% opacity |
|
|
|
|
alpha = max(.3, detection['confidence']) |
|
|
|
|
|
|
|
|
|
color = list(color) |
|
|
|
|
color.append(int(alpha*255)) |
|
|
|
|
color = tuple(color) |
|
|
|
|
|
|
|
|
|
draw.rectangle((detection['startX'], detection['startY'], detection['endX'], detection['endY']), outline=color, width=width) |
|
|
|
|
# cv2.rectangle(rect_img, (0, 0), |
|
|
|
|
# (sub_img.shape[1]-int(width/2), sub_img.shape[0]-int(width/2)), |
|
|
|
|
# color, width) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def draw_detections_cv2(self): |
|
|
|
|
color = draw_colors[self.algorithm] |
|
|
|
|
for detection in self.detections: |
|
|
|
|
self.draw_detection(detection, color) |
|
|
|
|
|
|
|
|
|
def draw_detection(self, detection, color=(0,0,255)): |
|
|
|
|
def draw_detection_cv2(self, detection, color=(0,0,255)): |
|
|
|
|
|
|
|
|
|
# First we crop the sub-rect from the image |
|
|
|
|
sub_img = self.visualisation[detection['startY']:detection['endY'], detection['startX']:detection['endX']] |
|
|
|
@ -88,11 +133,28 @@ class Result():
@@ -88,11 +133,28 @@ class Result():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def record(device_id, q1,q2, q3, q4): |
|
|
|
|
def record(device_id, q1,q2, q3, q4, resolution, rotate): |
|
|
|
|
capture = cv2.VideoCapture(device_id) |
|
|
|
|
|
|
|
|
|
is_rotated_90 = rotate in [cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE] |
|
|
|
|
|
|
|
|
|
capture.set(cv2.CAP_PROP_FRAME_WIDTH, resolution[1] if is_rotated_90 else resolution[0]) |
|
|
|
|
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, resolution[0] if is_rotated_90 else resolution[1]) |
|
|
|
|
|
|
|
|
|
while True: |
|
|
|
|
ret, image = capture.read() |
|
|
|
|
logging.debug('r') |
|
|
|
|
if image is None: |
|
|
|
|
logging.critical("Error with camera?") |
|
|
|
|
exit() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if rotate is not None: |
|
|
|
|
image = cv2.rotate(image, rotate) |
|
|
|
|
|
|
|
|
|
# print(image.shape[:2], image.shape[1::-1]) |
|
|
|
|
if image.shape[1::-1] != resolution: |
|
|
|
|
logging.warning(f"Camera resultion seems wrong: {image.shape[:2]} instead of {resolution}") |
|
|
|
|
|
|
|
|
|
try: |
|
|
|
|
q1.put_nowait(image) |
|
|
|
|
except Full as e: |
|
|
|
@ -155,6 +217,10 @@ def process1_hog(in_q, out_q):
@@ -155,6 +217,10 @@ def process1_hog(in_q, out_q):
|
|
|
|
|
from skimage import data, exposure |
|
|
|
|
import matplotlib.pyplot as plt |
|
|
|
|
import dlib |
|
|
|
|
import matplotlib.pyplot as plt |
|
|
|
|
|
|
|
|
|
# Get the color map by name: |
|
|
|
|
cm = plt.get_cmap('plasma') |
|
|
|
|
|
|
|
|
|
face_detector = dlib.get_frontal_face_detector() |
|
|
|
|
|
|
|
|
@ -187,9 +253,17 @@ def process1_hog(in_q, out_q):
@@ -187,9 +253,17 @@ def process1_hog(in_q, out_q):
|
|
|
|
|
# print(dets, scores, idxs) |
|
|
|
|
|
|
|
|
|
hog_image_rescaled = (hog_image_rescaled.astype('float32') * 255).astype('uint8') |
|
|
|
|
hog_image_rescaled = cv2.cvtColor(hog_image_rescaled, cv2.COLOR_GRAY2BGR) |
|
|
|
|
# hog_image_rescaled = cv2.cvtColor(hog_image_rescaled, cv2.COLOR_GRAY2BGR) |
|
|
|
|
# blue background: |
|
|
|
|
# hog_image_rescaled[:,:,0] = 200 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Apply the colormap like a function to any array: |
|
|
|
|
colored_image = (cm(hog_image_rescaled) * 255).astype('uint8') |
|
|
|
|
colored_image = cv2.cvtColor(colored_image, cv2.COLOR_RGB2BGR) |
|
|
|
|
|
|
|
|
|
result = Result('hog', hog_image_rescaled, 0) |
|
|
|
|
# result = Result('hog', hog_image_rescaled, 0) |
|
|
|
|
result = Result('hog', colored_image, 0) |
|
|
|
|
|
|
|
|
|
# Display the results |
|
|
|
|
for i, rectangle in enumerate(dets): |
|
|
|
@ -269,10 +343,11 @@ def process2_dnn(in_q, out_q):
@@ -269,10 +343,11 @@ def process2_dnn(in_q, out_q):
|
|
|
|
|
|
|
|
|
|
out_q.put(result) |
|
|
|
|
|
|
|
|
|
def process3_haar(in_q, out_q): |
|
|
|
|
def process3_haar(in_q, out_q, cascade_file): |
|
|
|
|
from cffi import FFI |
|
|
|
|
from PIL import Image |
|
|
|
|
import cv2 |
|
|
|
|
import os |
|
|
|
|
|
|
|
|
|
logger = logging.getLogger('haar') |
|
|
|
|
|
|
|
|
@ -281,11 +356,12 @@ def process3_haar(in_q, out_q):
@@ -281,11 +356,12 @@ def process3_haar(in_q, out_q):
|
|
|
|
|
int test(int); |
|
|
|
|
|
|
|
|
|
typedef void* haarclassifier; |
|
|
|
|
haarclassifier classifier_new(); |
|
|
|
|
haarclassifier classifier_new(char *filename); |
|
|
|
|
void scan_image(haarclassifier, size_t width,size_t height, char *input, char *buffer, size_t length, bool debug); |
|
|
|
|
""") |
|
|
|
|
|
|
|
|
|
C = ffi.dlopen("/home/ruben/Documents/Projecten/2020/rust/testproject/target/debug/libvisual_haarcascades_lib.so") |
|
|
|
|
dir_path = os.path.dirname(os.path.realpath(__file__)) |
|
|
|
|
C = ffi.dlopen(os.path.join(dir_path,"../visualhaar/target/debug/libvisual_haarcascades_lib.so")) |
|
|
|
|
|
|
|
|
|
# print(C.test(9)) |
|
|
|
|
# i = Image.open("Marjo.jpg") |
|
|
|
@ -293,11 +369,14 @@ def process3_haar(in_q, out_q):
@@ -293,11 +369,14 @@ def process3_haar(in_q, out_q):
|
|
|
|
|
# height = i.size[0] |
|
|
|
|
|
|
|
|
|
# use the rust lib to draw the visualisation |
|
|
|
|
haar = C.classifier_new() |
|
|
|
|
|
|
|
|
|
filename = cascade_file.encode('ascii') |
|
|
|
|
fn = ffi.new("char[]", filename) |
|
|
|
|
haar = C.classifier_new(fn) |
|
|
|
|
logger.info("Initialised haar classifier") |
|
|
|
|
|
|
|
|
|
# opencv for the actual detections |
|
|
|
|
faceCascade = cv2.CascadeClassifier('./haarcascade_frontalface_alt2.xml') |
|
|
|
|
faceCascade = cv2.CascadeClassifier(cascade_file) |
|
|
|
|
|
|
|
|
|
while True: |
|
|
|
|
frame = in_q.get() |
|
|
|
@ -358,12 +437,16 @@ def process3_haar(in_q, out_q):
@@ -358,12 +437,16 @@ def process3_haar(in_q, out_q):
|
|
|
|
|
# print(img) |
|
|
|
|
out_q.put(result) |
|
|
|
|
|
|
|
|
|
def display(image_res, q1, q2, q3, q4): |
|
|
|
|
def display(image_res, q1, q2, q3, q4, fullscreen = False): |
|
|
|
|
prev_image1 = np.zeros((image_res[1],image_res[0],3), np.uint8) |
|
|
|
|
prev_image2 = np.zeros((image_res[1],image_res[0],3), np.uint8) |
|
|
|
|
prev_image3 = np.zeros((image_res[1],image_res[0],3), np.uint8) |
|
|
|
|
prev_image4 = np.zeros((image_res[1],image_res[0],3), np.uint8) |
|
|
|
|
|
|
|
|
|
if fullscreen: |
|
|
|
|
cv2.namedWindow("output", cv2.WND_PROP_FULLSCREEN) |
|
|
|
|
cv2.setWindowProperty("output",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN) |
|
|
|
|
|
|
|
|
|
while True: |
|
|
|
|
logging.debug('r') |
|
|
|
|
try: |
|
|
|
@ -403,14 +486,27 @@ def display(image_res, q1, q2, q3, q4):
@@ -403,14 +486,27 @@ def display(image_res, q1, q2, q3, q4):
|
|
|
|
|
img_concate_Verti1 = np.concatenate((image1,image2),axis=0) |
|
|
|
|
img_concate_Verti2 = np.concatenate((image3,image4),axis=0) |
|
|
|
|
grid_img = np.concatenate((img_concate_Verti1,img_concate_Verti2),axis=1) |
|
|
|
|
cv2.imshow("Output", grid_img) |
|
|
|
|
cv2.imshow("output", grid_img) |
|
|
|
|
|
|
|
|
|
# Hit 'q' on the keyboard to quit! |
|
|
|
|
if cv2.waitKey(1) & 0xFF == ord('q'): |
|
|
|
|
key = cv2.waitKey(1) & 0xFF |
|
|
|
|
if key == ord('q'): |
|
|
|
|
break |
|
|
|
|
if key == ord(' '): |
|
|
|
|
# TODO save frame |
|
|
|
|
pass |
|
|
|
|
|
|
|
|
|
def main(camera_id): |
|
|
|
|
def main(camera_id, rotate, fullscreen, cascade_file): |
|
|
|
|
image_size = (int(1920/2), int(1080/2)) |
|
|
|
|
|
|
|
|
|
if not os.path.exists(cascade_file): |
|
|
|
|
raise RuntimeError(f"Cannot load OpenCV haar-cascade file '{cascade_file}'") |
|
|
|
|
|
|
|
|
|
is_rotated_90 = rotate in [cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE] |
|
|
|
|
|
|
|
|
|
if is_rotated_90: |
|
|
|
|
image_size = (image_size[1], image_size[0]) |
|
|
|
|
|
|
|
|
|
# TODO should we use queues here at all? |
|
|
|
|
# https://docs.python.org/3/library/multiprocessing.html#programming-guidelines |
|
|
|
|
# TODO: queue maxsize, or prefrabily some sort of throttled queue (like zmq hight water mark) |
|
|
|
@ -422,11 +518,11 @@ def main(camera_id):
@@ -422,11 +518,11 @@ def main(camera_id):
|
|
|
|
|
q_process2 = Queue(maxsize=1) |
|
|
|
|
q_process3 = Queue(maxsize=1) |
|
|
|
|
|
|
|
|
|
p1 = Process(target=record, args=(camera_id, q_webcam1, q_webcam2,q_webcam3,q_webcam4)) |
|
|
|
|
p2 = Process(target=display, args=(image_size, q_webcam1, q_process1, q_process2, q_process3 )) |
|
|
|
|
p1 = Process(target=record, args=(camera_id, q_webcam1, q_webcam2,q_webcam3,q_webcam4, image_size, rotate)) |
|
|
|
|
p2 = Process(target=display, args=(image_size, q_webcam1, q_process1, q_process2, q_process3, fullscreen )) |
|
|
|
|
p3 = Process(target=process1_hog, args=(q_webcam2, q_process1,)) |
|
|
|
|
p4 = Process(target=process2_dnn, args=(q_webcam3, q_process2,)) |
|
|
|
|
p5 = Process(target=process3_haar, args=(q_webcam4, q_process3,)) |
|
|
|
|
p5 = Process(target=process3_haar, args=(q_webcam4, q_process3,cascade_file)) |
|
|
|
|
|
|
|
|
|
p1.start() |
|
|
|
|
p2.start() |
|
|
|
|