Compare commits

..

1 commit

Author SHA1 Message Date
Ruben van de Ven
21bda67665 Test if we get more up-to-date results with different queing system 2020-10-02 14:09:20 +02:00
7 changed files with 127 additions and 254 deletions

5
.vscode/launch.json vendored
View file

@ -17,9 +17,8 @@
"request": "launch", "request": "launch",
"program": "mirror.py", "program": "mirror.py",
"args": [ "args": [
"--windowed", // "--fullscreen",
"--output", "/tmp/face_saves", "--camera", "2",
"--camera", "0",
], ],
"console": "integratedTerminal" "console": "integratedTerminal"
} }

View file

@ -11,55 +11,21 @@ A `mirror` which shows which faces are detected through three different facial d
The installation in Windows can be done, though it is quite elaborate: The installation in Windows can be done, though it is quite elaborate:
* Install python3.8 * Install rustup-init
* Install VS C++ build tools * Install VS C++
* Install python3
* Install Cmake (needed for python dlib) * Install Cmake (needed for python dlib)
+ make sure to add it to path + make sure to add it to path
* Install git * Install git
+ including ssh deploy key + including ssh deploy key
* `git clone https://git.rubenvandeven.com/r/face_recognition` * `git clone https://git.rubenvandeven.com/r/face_detector`
* `cd face_recognition` * `cd face_recognition`
* `git submodules init`
* `git submodules update`
* `pip install virtualenv` * `pip install virtualenv`
* `virtualenv.exe venv` * `virtualenv.exe venv`
+ Might be that you need to run: `C:\Users\DP Medialab\AppData\Roaming\Python\Python39\Scripts\virtualenv.exe` (see pip output)
* `.\venv\Scripts\activate` * `.\venv\Scripts\activate`
+ Might be that you need to first run `Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser`
* `cd .\dnn\face_detector` * `cd .\dnn\face_detector`
* `python.exe .\download_weights.py` * `python.exe .\download_weights.py`
* `cd ..\..`
* `pip.exe install -r requirements.txt`
* `cd .\visualhaar` * `cd .\visualhaar`
* Either one of: * `cargo build --lib --release`
+ Compile rust library
* Install rustup-init
* `git submodules init`
* `git submodules update`
* `cargo build --lib --release`
+ Download dll from https://git.rubenvandeven.com/r/visualhaar/releases
+ Fetch `SourceSansPro-Regular.ttf` from the internet
+ Make the installer:
* Either one of:
* `& 'C:\Users\DP Medialab\AppData\Roaming\Python\Python38\Scripts\pyinstaller.exe' .\mirror.py --add-binary '.\visualhaar\target\release\visual_haarcascades_lib.dll;.' --add-data '.\haarcascade_frontalface_alt2.xml;.' --add-data '.\SourceSansPro-Regular.ttf;.' --add-data 'dnn;dnn'`
* `& '.\venv\Scripts\pyinstaller.exe' .\mirror.py --add-binary '.\visualhaar\target\release\visual_haarcascades_lib.dll;.' --add-data '.\haarcascade_frontalface_alt2.xml;.' --add-data '.\SourceSansPro-Regular.ttf;.' --add-data 'dnn;dnn' --hidden-import 'scipy.spatial.transform._rotation_groups' --hidden-import 'skimage.filters.rank.core_cy_3d'`
* `Compress-Archive -LiteralPath .\dist\mirror -DestinationPath .\dist\mirror.zip`
+ We could also [use wine for cross compilation](https://www.andreafortuna.org/2017/12/27/how-to-cross-compile-a-python-script-into-a-windows-executable-on-linux/) from Linux
- make sure wine is set to pose as Windows 10 (`winecfg`)
- `wine ~/Downloads/python-3.9.0-amd64.exe` (or whichever version you use)
- Install for all users
-
### On windows in VirtualBox
See [this](https://askubuntu.com/questions/4875/how-can-i-use-my-webcam-with-ubuntu-running-in-virtualbox/1237808#1237808) on getting the webcam working in the VM:
1. Install extension pack: `sudo apt install virtualbox-ext-pack`
2. `VBoxManage list webcams`
3. `VBoxManage controlvm "WIn10" webcam attach .3`
## Instructor help
If screen stays black: is the camera on?
Enable camera through keyboard (MSI laptops: fn+F6). Then go to Settings/Instellingen -> Privacy instellingen voor camera -> Grant apps access to camera.

View file

@ -1,4 +1,4 @@
from multiprocessing import Process, Queue from multiprocessing import Process, Queue, JoinableQueue
from queue import Empty, Full from queue import Empty, Full
import cv2 import cv2
import logging import logging
@ -9,12 +9,11 @@ import math
import datetime import datetime
from PIL import ImageFont, ImageDraw, Image from PIL import ImageFont, ImageDraw, Image
import os import os
import sys
draw_colors = { draw_colors = {
'hog': (198,65,124), 'hog': (255,255,255), #(198,65,124),
'haar': (255,255,255), 'haar': (255,255,255),
'dnn': (251,212,36), 'dnn': (255,255,255) #(251,212,36),
} }
titles = { titles = {
@ -23,20 +22,25 @@ titles = {
'dnn' : "Neural network", 'dnn' : "Neural network",
} }
fontfile = "SourceSansPro-Regular.ttf"
project_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),'..')
fontfile = os.path.join(project_dir, "SourceSansPro-Regular.ttf")
font = ImageFont.truetype(fontfile, 30) font = ImageFont.truetype(fontfile, 30)
font_s = ImageFont.truetype(fontfile, 20) font_s = ImageFont.truetype(fontfile, 20)
countdown_font = ImageFont.truetype(fontfile, 160) countdown_font = ImageFont.truetype(fontfile, 160)
class Request():
def __init__(self, image, cam_time = None, for_snapshot=False):
self.image = image
self.time = cam_time
self.for_snapshot = False
class Result(): class Result():
def __init__(self, algorithm, image, confidence_threshold = 0.5): def __init__(self, algorithm, image, time, for_snapshot, confidence_threshold = 0.5):
self.algorithm = algorithm self.algorithm = algorithm
self.visualisation = image self.visualisation = image
self.detections = [] self.detections = []
self.time = time
self.for_snapshot = for_snapshot
self.confidence_threshold = confidence_threshold self.confidence_threshold = confidence_threshold
def add_detection(self, startX, startY, endX, endY, confidence): def add_detection(self, startX, startY, endX, endY, confidence):
@ -49,50 +53,42 @@ class Result():
}) })
return self return self
def draw_detections(self, include_title = False, coloured=False): def draw_detections(self, include_title = False):
cv2_im_rgb = cv2.cvtColor(self.visualisation,cv2.COLOR_BGR2RGB) cv2_im_rgb = cv2.cvtColor(self.visualisation,cv2.COLOR_BGR2RGB)
# Pass the image to PIL # Pass the image to PIL
pil_im = Image.fromarray(cv2_im_rgb) pil_im = Image.fromarray(cv2_im_rgb)
draw = ImageDraw.Draw(pil_im, 'RGBA') draw = ImageDraw.Draw(pil_im, 'RGBA')
self.draw_detections_on(draw, coloured) self.draw_detections_on(draw)
if include_title: if include_title:
color = draw_colors[self.algorithm] if coloured else (255,255,255) draw.text((10,10), titles[self.algorithm], fill=draw_colors[self.algorithm], font=font)
draw.text((10,10), titles[self.algorithm], fill=color, font=font, stroke_width=1, stroke_fill=(0,0,0,100))
return cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR) return cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
def draw_detections_on(self, draw: ImageDraw, coloured=False, onlyIfConfident=False): def draw_detections_on(self, draw: ImageDraw):
''' '''
Draw on a specified canvas Draw on a specified canvas
''' '''
color = draw_colors[self.algorithm] if coloured else (255,255,255) color = draw_colors[self.algorithm]
for detection in self.detections: for detection in self.detections:
self.draw_detection(draw, detection, color, onlyIfConfident) self.draw_detection(draw, detection, color)
def draw_detection(self, draw: ImageDraw, detection: dict, color: tuple, onlyIfConfident: bool = False): def draw_detection(self, draw: ImageDraw, detection: dict, color: tuple):
width = 2
if detection['confidence'] > self.confidence_threshold: if detection['confidence'] > self.confidence_threshold:
width = 8
# draw the bounding box of the face along with the associated # draw the bounding box of the face along with the associated
# probability # probability
text = "{:.0f}%".format(detection['confidence'] * 100) text = "{:.2f}%".format(detection['confidence'] * 100)
y = detection['startY'] - 40 if detection['startY'] - 40 > 10 else detection['startY'] + 10 y = detection['startY'] - 40 if detection['startY'] - 40 > 10 else detection['startY'] + 10
draw.text((detection['startX'], y), text, font=font, fill=color, stroke_fill=(0,0,0,100), stroke_width=1) draw.text((detection['startX'], y), text, font=font, fill=color)
# cv2.putText(self.visualisation, text, (detection['startX'], y), # cv2.putText(self.visualisation, text, (detection['startX'], y),
# cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2, lineType = cv2.LINE_AA) # cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2, lineType = cv2.LINE_AA)
alpha = 1 alpha = 1
draw.rectangle((detection['startX']-1, detection['startY']-1, detection['endX']+1, detection['endY']+1), outline=(0,0,0,100), width=1)
draw.rectangle((detection['startX']+width, detection['startY']+width, detection['endX']-width, detection['endY']-width), outline=(0,0,0,100), width=1)
elif onlyIfConfident:
# Only draw if above threshold, so this should be ignored.
return
else: else:
width = int(detection['confidence'] * 10 * 8)
# At least 10% opacity # At least 10% opacity
alpha = max(.2, detection['confidence']) alpha = max(.2, detection['confidence'])
@ -102,35 +98,23 @@ class Result():
draw.rectangle((detection['startX'], detection['startY'], detection['endX'], detection['endY']), outline=color, width=width) draw.rectangle((detection['startX'], detection['startY'], detection['endX'], detection['endY']), outline=color, width=width)
def resize(self, width, height, flip=False):
def resize(self, width, height):
# TODO resize to new target incl all detections # TODO resize to new target incl all detections
img = self.visualisation img = self.visualisation
factor_x = width / self.visualisation.shape[1] factor_x = width / self.visualisation.shape[1]
factor_y = height / self.visualisation.shape[0] factor_y = height / self.visualisation.shape[0]
inter = cv2.INTER_NEAREST if self.algorithm in ['dnn', 'haar'] else cv2.INTER_CUBIC inter = cv2.INTER_NEAREST if self.algorithm in ['dnn', 'haar'] else cv2.INTER_CUBIC
img = cv2.resize(img, (width, height), interpolation=inter) img = cv2.resize(img, (width, height), interpolation=inter)
result = Result(self.algorithm, img, self.time, self.for_snapshot, self.confidence_threshold)
if flip:
img = cv2.flip(img, 1)
result = Result(self.algorithm, img, self.confidence_threshold)
for d in self.detections: for d in self.detections:
if flip: result.add_detection(
result.add_detection( int(d['startX'] * factor_x),
int(width - d['endX'] * factor_x), int(d['startY'] * factor_y),
int(d['startY'] * factor_y), int(d['endX'] * factor_x),
int(width - d['startX'] * factor_x), int(d['endY'] * factor_y),
int(d['endY'] * factor_y), d['confidence']
d['confidence'] )
)
else:
result.add_detection(
int(d['startX'] * factor_x),
int(d['startY'] * factor_y),
int(d['endX'] * factor_x),
int(d['endY'] * factor_y),
d['confidence']
)
return result return result
def count_detections(self): def count_detections(self):
@ -152,9 +136,11 @@ def record(device_id, q1,q2, q3, q4, resolution, rotate):
ret, image = capture.read() ret, image = capture.read()
if image is None: if image is None:
logging.critical("Error with camera?") logging.critical("Error with camera?")
sys.exit() exit()
timestamp = time.time()
if rotate is not None: if rotate is not None:
image = cv2.rotate(image, rotate) image = cv2.rotate(image, rotate)
@ -172,17 +158,22 @@ def record(device_id, q1,q2, q3, q4, resolution, rotate):
# ignore if processing doesn't keep up # ignore if processing doesn't keep up
pass pass
try: try:
q2.put_nowait(image) # frames generally come in faster than the processing takes.
# so we want to only put in a frame after processing is done
if q2._unfinished_tasks.get_value() == 0:
q2.put_nowait(Request(image, timestamp))
except Full as e: except Full as e:
# ignore if processing doesn't keep up # ignore if processing doesn't keep up
pass pass
try: try:
q3.put_nowait(image) if q3._unfinished_tasks.get_value() == 0:
q3.put_nowait(Request(image, timestamp))
except Full as e: except Full as e:
# ignore if processing doesn't keep up # ignore if processing doesn't keep up
pass pass
try: try:
q4.put_nowait(image) if q4._unfinished_tasks.get_value() == 0:
q4.put_nowait(Request(image, timestamp))
except Full as e: except Full as e:
# ignore if processing doesn't keep up # ignore if processing doesn't keep up
pass pass
@ -223,9 +214,9 @@ def draw_detection(image, startX, startY, endX, endY, confidence, color=(0,0,255
def process1_hog(in_q, out_q): def process1_hog(in_q, out_q):
# from skimage.feature import hog as hog_orig from skimage.feature import hog as hog_orig
from .hog import hog # use modified version for viz from .hog import hog # use modified version for viz
from skimage import exposure from skimage import data, exposure
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import dlib import dlib
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
@ -236,17 +227,18 @@ def process1_hog(in_q, out_q):
face_detector = dlib.get_frontal_face_detector() face_detector = dlib.get_frontal_face_detector()
visualisation_factor = 1 visualisation_factor = 1
detection_factor = .3 detection_factor = .4
process_this_frame = True process_this_frame = True
while True: while True:
if process_this_frame: # if process_this_frame:
# Grab a single frame of video # Grab a single frame of video
frame = in_q.get() request = in_q.get()
frame = request.image
frame = cv2.cvtColor(src=frame, code=cv2.COLOR_BGR2GRAY) frame = cv2.cvtColor(src=frame, code=cv2.COLOR_BGR2GRAY)
# viz_frame = cv2.resize(frame, (0, 0), fx=visualisation_factor, fy=visualisation_factor) viz_frame = cv2.resize(frame, (0, 0), fx=visualisation_factor, fy=visualisation_factor)
det_frame = cv2.resize(frame, (0, 0), fx=detection_factor, fy=detection_factor) det_frame = cv2.resize(frame, (0, 0), fx=detection_factor, fy=detection_factor)
start = time.time() start = time.time()
@ -260,7 +252,8 @@ def process1_hog(in_q, out_q):
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses) # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
# rgb_small_frame = det_frame[:, :, ::-1] # rgb_small_frame = det_frame[:, :, ::-1]
# dets, scores, idxs = face_detector.run(rgb_small_frame, 1, -2) # dets, scores, idxs = face_detector.run(rgb_small_frame, 1, -2)
dets, scores, idxs = face_detector.run(det_frame, 1, -2) # dets, scores, idxs = face_detector.run(det_frame, 1, -2)
dets, scores, idxs = face_detector.run(det_frame)
# print(dets, scores, idxs) # print(dets, scores, idxs)
hog_image_rescaled = (hog_image_rescaled.astype('float32') * 255).astype('uint8') hog_image_rescaled = (hog_image_rescaled.astype('float32') * 255).astype('uint8')
@ -274,7 +267,7 @@ def process1_hog(in_q, out_q):
colored_image = cv2.cvtColor(colored_image, cv2.COLOR_RGB2BGR) colored_image = cv2.cvtColor(colored_image, cv2.COLOR_RGB2BGR)
# result = Result('hog', hog_image_rescaled, 0) # result = Result('hog', hog_image_rescaled, 0)
result = Result('hog', colored_image, 0) result = Result('hog', colored_image, request.time, request.for_snapshot, 0)
# Display the results # Display the results
for i, rectangle in enumerate(dets): for i, rectangle in enumerate(dets):
@ -304,9 +297,10 @@ def process1_hog(in_q, out_q):
# Display the resulting image # Display the resulting image
out_q.put(result) out_q.put(result)
in_q.task_done()
# print(cgray.shape) # print(cgray.shape)
process_this_frame = not process_this_frame # process_this_frame = not process_this_frame
def process2_dnn(in_q, out_q): def process2_dnn(in_q, out_q):
@ -322,7 +316,8 @@ def process2_dnn(in_q, out_q):
logger.info("Loaded") logger.info("Loaded")
while True: while True:
image = in_q.get() request = in_q.get()
image = request.image
(h, w) = image.shape[:2] (h, w) = image.shape[:2]
image_small = cv2.resize(image, (300, 300)) image_small = cv2.resize(image, (300, 300))
@ -335,7 +330,7 @@ def process2_dnn(in_q, out_q):
detections = net.forward() detections = net.forward()
# idxs = np.argsort(detections[0])[::-1][:5] # idxs = np.argsort(detections[0])[::-1][:5]
result = Result('dnn', image) result = Result('dnn', image, request.time, request.for_snapshot)
for i in range(0, detections.shape[2]): for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with the # extract the confidence (i.e., probability) associated with the
@ -353,8 +348,9 @@ def process2_dnn(in_q, out_q):
# draw_detection(image, startX, startY, endX, endY, confidence, draw_colors['dnn']) # draw_detection(image, startX, startY, endX, endY, confidence, draw_colors['dnn'])
out_q.put(result) out_q.put(result)
in_q.task_done()
def process3_haar(in_q, out_q, cascade_file, library_filename = None): def process3_haar(in_q, out_q, cascade_file):
from cffi import FFI from cffi import FFI
from PIL import Image from PIL import Image
import cv2 import cv2
@ -371,27 +367,19 @@ def process3_haar(in_q, out_q, cascade_file, library_filename = None):
void scan_image(haarclassifier, size_t width,size_t height, char *input, char *buffer, size_t length, size_t min_face_factor, bool debug); void scan_image(haarclassifier, size_t width,size_t height, char *input, char *buffer, size_t length, size_t min_face_factor, bool debug);
""") """)
dir_path = os.path.dirname(os.path.realpath(__file__))
if library_filename is not None:
C = ffi.dlopen(library_filename) lib_path = os.path.join(dir_path, "..", "visualhaar", "target", "release")
so_path = os.path.join(lib_path, "libvisual_haarcascades_lib.so")
dll_path = os.path.join(lib_path, "visual_haarcascades_lib.dll")
if os.path.exists(so_path):
C = ffi.dlopen(so_path)
elif os.path.exists(dll_path):
C = ffi.dlopen(dll_path)
else: else:
raise RuntimeException("Visual haarcascades library is not found")
lib_path = os.path.join(project_dir, "visualhaar", "target", "release")
possible_paths = [
os.path.join(lib_path, "libvisual_haarcascades_lib.so"),
os.path.join(lib_path, "visual_haarcascades_lib.dll"),
os.path.join(project_dir, "visual_haarcascades_lib.dll"),
]
existing_paths = [p for p in possible_paths if os.path.exists(p)]
if not len(existing_paths):
raise RuntimeError("Visual haarcascades library is not found")
logger.debug(f"Using library: {existing_paths[0]}")
C = ffi.dlopen(existing_paths[0])
# print(C.test(9)) # print(C.test(9))
# i = Image.open("Marjo.jpg") # i = Image.open("Marjo.jpg")
# width = i.size[0] # width = i.size[0]
@ -408,7 +396,8 @@ def process3_haar(in_q, out_q, cascade_file, library_filename = None):
faceCascade = cv2.CascadeClassifier(cascade_file) faceCascade = cv2.CascadeClassifier(cascade_file)
while True: while True:
frame = in_q.get() request = in_q.get()
frame = request.image
(height_orig, width_orig) = frame.shape[:2] (height_orig, width_orig) = frame.shape[:2]
scale_factor = 4 scale_factor = 4
@ -438,7 +427,7 @@ def process3_haar(in_q, out_q, cascade_file, library_filename = None):
start = time.time() start = time.time()
C.scan_image(haar, width, height, buffer2, buffer, buffer_len, 5, False) C.scan_image(haar, width, height, buffer2, buffer, buffer_len, 5, False)
logger.info(f"Visualised scan into buffer: {buffer}") logger.info(f"Visualised scan into buffer: {buffer}")
# print(f"duration: {time.time() - start}s") print(f"duration: {time.time() - start}s")
img = Image.frombuffer(pixel_format, (width, height), ffi.buffer(buffer), img = Image.frombuffer(pixel_format, (width, height), ffi.buffer(buffer),
"raw", pixel_format, 0, 1) "raw", pixel_format, 0, 1)
@ -451,7 +440,7 @@ def process3_haar(in_q, out_q, cascade_file, library_filename = None):
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (width_orig, height_orig)) img = cv2.resize(img, (width_orig, height_orig))
result = Result('haar', img) result = Result('haar', img, request.time, request.for_snapshot)
for face in faces: for face in faces:
x1, y1, w, h = face x1, y1, w, h = face
@ -462,17 +451,14 @@ def process3_haar(in_q, out_q, cascade_file, library_filename = None):
# draw_detection(img, x1 * scale_factor, y1 * scale_factor, x2 * scale_factor, y2 * scale_factor, 1, draw_colors['haar'],) # draw_detection(img, x1 * scale_factor, y1 * scale_factor, x2 * scale_factor, y2 * scale_factor, 1, draw_colors['haar'],)
result.add_detection(x1 * scale_factor, y1 * scale_factor, x2 * scale_factor, y2 * scale_factor, 1) result.add_detection(x1 * scale_factor, y1 * scale_factor, x2 * scale_factor, y2 * scale_factor, 1)
# print(img) # print(img)
out_q.put(result) out_q.put(result)
in_q.task_done()
def draw_stats(image, results, padding, coloured=False, drawDetections=False): def draw_stats(image, results, padding):
pil_im = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) pil_im = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(pil_im, 'RGBA') draw = ImageDraw.Draw(pil_im, 'RGBA')
draw_stats_on_canvas(draw, results, padding, coloured, drawDetections)
return cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
def draw_stats_on_canvas(draw, results, padding, coloured=False, drawDetections=False):
for i, result in enumerate(results): for i, result in enumerate(results):
if result is None: if result is None:
continue continue
@ -481,11 +467,9 @@ def draw_stats_on_canvas(draw, results, padding, coloured=False, drawDetections=
txt = "face" if c == 1 else "faces" txt = "face" if c == 1 else "faces"
txt = f"{result.algorithm.ljust(5)} {c} {txt}" txt = f"{result.algorithm.ljust(5)} {c} {txt}"
height = padding + 25 height = padding + 25
colour = draw_colors[result.algorithm] if coloured else (255,255,255) draw.text((padding, pil_im.size[1] - i*height - height), txt, fill=draw_colors[result.algorithm], font=font_s, stroke_width=1, stroke_fill=(0,0,0))
draw.text((padding, draw.im.size[1] - (i+1)*height - padding), txt, fill=colour, font=font, stroke_width=2, stroke_fill=(0,0,0))
if drawDetections: return cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
result.draw_detections_on(draw, coloured, onlyIfConfident=True)
def display(image_res, q1, q2, q3, q4, fullscreen, output_dir): def display(image_res, q1, q2, q3, q4, fullscreen, output_dir):
@ -572,9 +556,7 @@ def display(image_res, q1, q2, q3, q4, fullscreen, output_dir):
grid_img.shape[1] - padding - preview_width:grid_img.shape[1] - padding] = cv2.resize(image, (preview_width, preview_height), cv2.INTER_CUBIC) grid_img.shape[1] - padding - preview_width:grid_img.shape[1] - padding] = cv2.resize(image, (preview_width, preview_height), cv2.INTER_CUBIC)
# statistics # statistics
# for the plain webcam image (no viz), draw all detected faces. grid_img = draw_stats(grid_img, results, padding)
drawDetections = (selectPreview.imageIdx == 0)
grid_img = draw_stats(grid_img, results, padding, coloured=True, drawDetections=drawDetections)
pil_im = Image.fromarray(cv2.cvtColor(grid_img, cv2.COLOR_BGR2RGB)) pil_im = Image.fromarray(cv2.cvtColor(grid_img, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(pil_im, 'RGBA') draw = ImageDraw.Draw(pil_im, 'RGBA')
@ -582,7 +564,7 @@ def display(image_res, q1, q2, q3, q4, fullscreen, output_dir):
if countdown_until: if countdown_until:
duration = math.ceil(countdown_until - time.time()) duration = math.ceil(countdown_until - time.time())
w, h = draw.textsize(f"{duration}", font=countdown_font) w, h = draw.textsize(f"{duration}", font=countdown_font)
draw.text(((grid_img.shape[1]-w)/2,(grid_img.shape[0]-h)/2), f"{duration}", fill="white", stroke="black", font=countdown_font, stroke_width=1, stroke_fill=(0,0,0,100)) draw.text(((grid_img.shape[1]-w)/2,(grid_img.shape[0]-h)/2), f"{duration}", fill="white", stroke="black", font=countdown_font)
grid_img = cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR) grid_img = cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
@ -593,63 +575,48 @@ def display(image_res, q1, q2, q3, q4, fullscreen, output_dir):
# Hit 'q' on the keyboard to quit! # Hit 'q' on the keyboard to quit!
key = cv2.waitKey(1) & 0xFF key = cv2.waitKey(1) & 0xFF
if key == ord('q') or key == 27: # key 27: escape if key == ord('q'):
break break
if key == ord(' ') and not override_image:
# TODO: the truth value of an array with ore than one element is ambiguous, use a.any or a.all() (OF DUS override_image is None)
if key == ord(' ') and override_image is None:
countdown_until = time.time() + 3 # seconds of countdown countdown_until = time.time() + 3 # seconds of countdown
# SNAP! SAVE FRAMES
if countdown_until is not None and time.time() > countdown_until: if countdown_until is not None and time.time() > countdown_until:
countdown_until = None countdown_until = None
# TODO wait for frame to be processed. Eg. if I move and make a pic, it should use the last frame... # TODO wait for frame to be processed. Eg. if I move and make a pic, it should use the last frame...
# SNAP!
# output_res = (image_res[0] *2, image_res[1] * 2) # output_res = (image_res[0] *2, image_res[1] * 2)
req = Request(images[0], time.time(), for_snapshot=True)
q2.put_nowait(req)
q3.put_nowait(req)
q4.put_nowait(req)
output_res = image_res # no scaling needed anyore output_res = image_res # no scaling needed anyore
pil_im = Image.fromarray(cv2.cvtColor(cv2.flip(images[0],1), cv2.COLOR_BGR2RGB)) pil_im = Image.fromarray(cv2.cvtColor(images[0], cv2.COLOR_BGR2RGB))
pil_im = pil_im.resize(output_res) pil_im = pil_im.resize(output_res)
# base name for all images
name = datetime.datetime.now().isoformat(timespec='seconds').replace(':','-')
# filename of clean frame
filename = os.path.join(output_dir, f'{name}-frame.jpg')
pil_im.save(filename)
# now draw all results to the main image
draw = ImageDraw.Draw(pil_im, 'RGBA') draw = ImageDraw.Draw(pil_im, 'RGBA')
for result in results: for result in results:
if result is None: if result is None:
continue continue
result.resize(output_res[0], output_res[1], flip=True).draw_detections_on(draw, coloured=True) result.resize(output_res[0], output_res[1]).draw_detections_on(draw)
draw_stats_on_canvas(draw, results, padding, coloured=True)
override_image = cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR) override_image = cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
override_until = time.time() + 5 override_until = time.time() + 5
logger.info("Show frame until %f", override_until) logger.info("Show frame until %f", override_until)
# save images: # save images:
filename = os.path.join(output_dir, f'{name}-all.png') name = datetime.datetime.now().isoformat(timespec='seconds')
print(f"Save to {filename}") cv2.imwrite(os.path.join(output_dir, f'{name}.png'),override_image)
r=cv2.imwrite(filename, override_image)
if not r:
raise RuntimeError(f"Could not save image {filename}")
# finally, store each visualisation with the results
for result in results: for result in results:
result_img =result.draw_detections(include_title = True) cv2.imwrite(os.path.join(output_dir, f'{name}-{result.algorithm}.png'),result.visualisation)
filename = os.path.join(output_dir, f'{name}-{result.algorithm}.png')
r = cv2.imwrite(filename, result_img)
if not r:
raise RuntimeError(f"Could not save image {filename}")
def main(camera_id, rotate, fullscreen, cascade_file, output_dir, visualhaar_lib = None): def main(camera_id, rotate, fullscreen, cascade_file, output_dir):
image_size = (1920, 1080) #(int(1920/2), int(1080/2)) image_size = (1920, 1080) #(int(1920/2), int(1080/2))
if not os.path.exists(cascade_file): if not os.path.exists(cascade_file):
@ -666,9 +633,9 @@ def main(camera_id, rotate, fullscreen, cascade_file, output_dir, visualhaar_lib
# https://docs.python.org/3/library/multiprocessing.html#programming-guidelines # https://docs.python.org/3/library/multiprocessing.html#programming-guidelines
# TODO: queue maxsize, or prefrabily some sort of throttled queue (like zmq hight water mark) # TODO: queue maxsize, or prefrabily some sort of throttled queue (like zmq hight water mark)
q_webcam1 = Queue(maxsize=1) q_webcam1 = Queue(maxsize=1)
q_webcam2 = Queue(maxsize=1) q_webcam2 = JoinableQueue(maxsize=2) # size is 2 so that record() can add an image on snapshot
q_webcam3 = Queue(maxsize=1) q_webcam3 = JoinableQueue(maxsize=2) # size is 2 so that record() can add an image on snapshot
q_webcam4 = Queue(maxsize=1) q_webcam4 = JoinableQueue(maxsize=2) # size is 2 so that record() can add an image on snapshot
q_process1 = Queue(maxsize=1) q_process1 = Queue(maxsize=1)
q_process2 = Queue(maxsize=1) q_process2 = Queue(maxsize=1)
q_process3 = Queue(maxsize=1) q_process3 = Queue(maxsize=1)
@ -677,7 +644,7 @@ def main(camera_id, rotate, fullscreen, cascade_file, output_dir, visualhaar_lib
p2 = Process(target=display, args=(image_size, q_webcam1, q_process1, q_process2, q_process3, fullscreen, output_dir )) p2 = Process(target=display, args=(image_size, q_webcam1, q_process1, q_process2, q_process3, fullscreen, output_dir ))
p3 = Process(target=process1_hog, args=(q_webcam2, q_process1,)) p3 = Process(target=process1_hog, args=(q_webcam2, q_process1,))
p4 = Process(target=process2_dnn, args=(q_webcam3, q_process2,)) p4 = Process(target=process2_dnn, args=(q_webcam3, q_process2,))
p5 = Process(target=process3_haar, args=(q_webcam4, q_process3,cascade_file, visualhaar_lib)) p5 = Process(target=process3_haar, args=(q_webcam4, q_process3,cascade_file))
p1.start() p1.start()
p2.start() p2.start()

View file

@ -1,32 +0,0 @@
from PIL import ImageFont, ImageDraw, Image
import cv2
import numpy as np
text_to_show = "The quick brown fox jumps over the lazy dog"
# Load image in OpenCV
image = cv2.imread("Me.jpg")
# Convert the image to RGB (OpenCV uses BGR)
cv2_im_rgb = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
# Pass the image to PIL
pil_im = Image.fromarray(cv2_im_rgb)
draw = ImageDraw.Draw(pil_im)
# Draw the text
draw.text((10, 700), text_to_show, font=font)
# Get back the image to OpenCV
cv2_im_processed = cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
cv2.imshow('Fonts', cv2_im_processed)
cv2.waitKey(0)
cv2.destroyAllWindows()
def get_font(filename, size):
return ImageFont.truetype(filename, size)
def draw_text(img, ):

View file

@ -1,27 +1,22 @@
import argparse import argparse
import face_recognition.comparison import face_recognition.comparison
import cv2 import cv2
from multiprocessing import freeze_support
import os
if __name__ == '__main__': if __name__ == '__main__':
freeze_support() # support pyinstaller on Windows parser = argparse.ArgumentParser(description='Visualise face recognition algorithms.')
parser = argparse.ArgumentParser(description='Visualise face recognition algorithms.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--camera', '-c', type=int, default=0, parser.add_argument('--camera', '-c', type=int, default=0,
help='Numeric id of the camera') help='Numeric id of the camera')
parser.add_argument('--windowed', '-w', action='store_true', parser.add_argument('--fullscreen', '-f', action='store_true',
help='Display output windowed instead of fullscreen') help='Display output full screen')
parser.add_argument('--clockwise', action='store_true', parser.add_argument('--clockwise', action='store_true',
help='Rotate clockwise') help='Rotate clockwise')
parser.add_argument('--counter-clockwise', action='store_true', parser.add_argument('--counter-clockwise', action='store_true',
help='Rotate counter clockwise') help='Rotate counter clockwise')
parser.add_argument('--cascade', default=os.path.join(os.path.dirname(os.path.realpath(__file__)),'haarcascade_frontalface_alt2.xml'), parser.add_argument('--cascade', default='haarcascade_frontalface_alt2.xml',
help='Cascade XML file to use (opencv format)') help='Cascade XML file to use (opencv format)')
parser.add_argument('--output', metavar="DIRECTORY", default=os.path.expanduser("~/Desktop/faces"), parser.add_argument('--output', default='saves',
help='Directory to store images (after pressing spacebar)') help='Directory to store images (after pressing spacebar)')
parser.add_argument('--visualhaar-lib', metavar="LIBRARY", default=None,
help='path/filename for visualhaar library (.so on linux, .dll on windows)\nSee: https://git.rubenvandeven.com/r/visualhaar/releases')
args = parser.parse_args() args = parser.parse_args()
@ -31,8 +26,4 @@ if __name__ == '__main__':
if args.counter_clockwise: if args.counter_clockwise:
rotate = cv2.ROTATE_90_COUNTERCLOCKWISE rotate = cv2.ROTATE_90_COUNTERCLOCKWISE
if not os.path.exists(args.output): face_recognition.comparison.main(args.camera, rotate, args.fullscreen, args.cascade, args.output)
print("Making directory:", args.output)
os.mkdir(args.output)
face_recognition.comparison.main(args.camera, rotate, not args.windowed, args.cascade, args.output, args.visualhaar_lib)

View file

@ -1,25 +1,7 @@
altgraph==0.17 scipy
cffi==1.14.4 numpy
cycler==0.10.0 dlib
decorator==4.4.2 Pillow
dlib==19.21.1 opencv-python
future==0.18.2 cffi
imageio==2.9.0 scikit-image
kiwisolver==1.3.1
matplotlib==3.3.3
networkx==2.5
numpy==1.19.3
opencv-python==4.5.1.48
pefile==2019.4.18
Pillow==8.1.0
pycparser==2.20
pyinstaller==4.1
pyinstaller-hooks-contrib==2020.11
pyparsing==2.4.7
python-dateutil==2.8.1
PyWavelets==1.1.1
pywin32-ctypes==0.2.0
scikit-image==0.18.1
scipy==1.6.0
six==1.15.0
tifffile==2020.12.8

@ -1 +1 @@
Subproject commit 1319e644b1f59debe46be866d18209d2a6089e1b Subproject commit ac1aea1d68f346be194e64a3275629a177327f2e