Changes after feedback + better Windows installer explanation in Readme
This commit is contained in:
parent
b3b407a99e
commit
12ce9e1751
4 changed files with 56 additions and 21 deletions
2
.vscode/launch.json
vendored
2
.vscode/launch.json
vendored
|
@ -17,7 +17,7 @@
|
||||||
"request": "launch",
|
"request": "launch",
|
||||||
"program": "mirror.py",
|
"program": "mirror.py",
|
||||||
"args": [
|
"args": [
|
||||||
// "--windowed",
|
"--windowed",
|
||||||
"--output", "/tmp/face_saves",
|
"--output", "/tmp/face_saves",
|
||||||
"--camera", "0",
|
"--camera", "0",
|
||||||
],
|
],
|
||||||
|
|
20
README.md
20
README.md
|
@ -12,18 +12,22 @@ A `mirror` which shows which faces are detected through three different facial d
|
||||||
The installation in Windows can be done, though it is quite elaborate:
|
The installation in Windows can be done, though it is quite elaborate:
|
||||||
|
|
||||||
* Install python3
|
* Install python3
|
||||||
* Install VS C++
|
* Install VS C++ build tools
|
||||||
* Install Cmake (needed for python dlib)
|
* Install Cmake (needed for python dlib)
|
||||||
+ make sure to add it to path
|
+ make sure to add it to path
|
||||||
* Install git
|
* Install git
|
||||||
+ including ssh deploy key
|
+ including ssh deploy key
|
||||||
* `git clone https://git.rubenvandeven.com/r/face_detector`
|
* `git clone https://git.rubenvandeven.com/r/face_recognition`
|
||||||
* `cd face_recognition`
|
* `cd face_recognition`
|
||||||
* `pip install virtualenv`
|
* `pip install virtualenv`
|
||||||
* `virtualenv.exe venv`
|
* `virtualenv.exe venv`
|
||||||
|
+ Might be that you need to run: `C:\Users\DP Medialab\AppData\Roaming\Python\Python39\Scripts\virtualenv.exe` (see pip output)
|
||||||
* `.\venv\Scripts\activate`
|
* `.\venv\Scripts\activate`
|
||||||
|
+ Might be that you need to first run `Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser`
|
||||||
* `cd .\dnn\face_detector`
|
* `cd .\dnn\face_detector`
|
||||||
* `python.exe .\download_weights.py`
|
* `python.exe .\download_weights.py`
|
||||||
|
* `cd ..\..`
|
||||||
|
* `pip.exe install -r requirements.txt`
|
||||||
* `cd .\visualhaar`
|
* `cd .\visualhaar`
|
||||||
* Either one of:
|
* Either one of:
|
||||||
+ Compile rust library
|
+ Compile rust library
|
||||||
|
@ -35,4 +39,16 @@ The installation in Windows can be done, though it is quite elaborate:
|
||||||
+ Make the installer:
|
+ Make the installer:
|
||||||
* `& 'C:\Users\DP Medialab\AppData\Roaming\Python\Python38\Scripts\pyinstaller.exe' .\mirror.py --add-binary '.\visualhaar\target\release\visual_haarcascades_lib.dll;.' --add-data '.\haarcascade_frontalface_alt2.xml;.' --add-data '.\SourceSansPro-Regular.ttf;.' --add-data 'dnn;dnn'`
|
* `& 'C:\Users\DP Medialab\AppData\Roaming\Python\Python38\Scripts\pyinstaller.exe' .\mirror.py --add-binary '.\visualhaar\target\release\visual_haarcascades_lib.dll;.' --add-data '.\haarcascade_frontalface_alt2.xml;.' --add-data '.\SourceSansPro-Regular.ttf;.' --add-data 'dnn;dnn'`
|
||||||
* `mv '.\dist\mirror\mpl-data' '.\dist\mirror\matplotlib\'`
|
* `mv '.\dist\mirror\mpl-data' '.\dist\mirror\matplotlib\'`
|
||||||
|
* `Compress-Archive -LiteralPath .\dist\mirror -DestinationPath .\dist\mirror.zip`
|
||||||
|
+ We could also [use wine for cross compilation](https://www.andreafortuna.org/2017/12/27/how-to-cross-compile-a-python-script-into-a-windows-executable-on-linux/) from Linux
|
||||||
|
- make sure wine is set to pose as Windows 10 (`winecfg`)
|
||||||
|
- `wine ~/Downloads/python-3.9.0-amd64.exe` (or whichever version you use)
|
||||||
|
- Install for all users
|
||||||
|
-
|
||||||
|
|
||||||
|
|
||||||
|
## Instructor help
|
||||||
|
|
||||||
|
If screen stays black: is the camera on?
|
||||||
|
|
||||||
|
Enable camera through keyboard (MSI laptops: fn+F6). Then go to Settings/Instellingen -> Privacy instellingen voor camera -> Grant apps access to camera.
|
|
@ -9,6 +9,7 @@ import math
|
||||||
import datetime
|
import datetime
|
||||||
from PIL import ImageFont, ImageDraw, Image
|
from PIL import ImageFont, ImageDraw, Image
|
||||||
import os
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
draw_colors = {
|
draw_colors = {
|
||||||
'hog': (198,65,124),
|
'hog': (198,65,124),
|
||||||
|
@ -62,22 +63,22 @@ class Result():
|
||||||
|
|
||||||
return cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
|
return cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
|
||||||
|
|
||||||
def draw_detections_on(self, draw: ImageDraw, coloured=False):
|
def draw_detections_on(self, draw: ImageDraw, coloured=False, onlyIfConfident=False):
|
||||||
'''
|
'''
|
||||||
Draw on a specified canvas
|
Draw on a specified canvas
|
||||||
'''
|
'''
|
||||||
color = draw_colors[self.algorithm] if coloured else (255,255,255)
|
color = draw_colors[self.algorithm] if coloured else (255,255,255)
|
||||||
for detection in self.detections:
|
for detection in self.detections:
|
||||||
self.draw_detection(draw, detection, color)
|
self.draw_detection(draw, detection, color, onlyIfConfident)
|
||||||
|
|
||||||
def draw_detection(self, draw: ImageDraw, detection: dict, color: tuple):
|
def draw_detection(self, draw: ImageDraw, detection: dict, color: tuple, onlyIfConfident: bool = False):
|
||||||
|
|
||||||
|
|
||||||
if detection['confidence'] > self.confidence_threshold:
|
if detection['confidence'] > self.confidence_threshold:
|
||||||
width = 8
|
width = 8
|
||||||
# draw the bounding box of the face along with the associated
|
# draw the bounding box of the face along with the associated
|
||||||
# probability
|
# probability
|
||||||
text = "{:.2f}%".format(detection['confidence'] * 100)
|
text = "{:.0f}%".format(detection['confidence'] * 100)
|
||||||
y = detection['startY'] - 40 if detection['startY'] - 40 > 10 else detection['startY'] + 10
|
y = detection['startY'] - 40 if detection['startY'] - 40 > 10 else detection['startY'] + 10
|
||||||
|
|
||||||
draw.text((detection['startX'], y), text, font=font, fill=color, stroke_fill=(0,0,0,100), stroke_width=1)
|
draw.text((detection['startX'], y), text, font=font, fill=color, stroke_fill=(0,0,0,100), stroke_width=1)
|
||||||
|
@ -87,6 +88,9 @@ class Result():
|
||||||
alpha = 1
|
alpha = 1
|
||||||
draw.rectangle((detection['startX']-1, detection['startY']-1, detection['endX']+1, detection['endY']+1), outline=(0,0,0,100), width=1)
|
draw.rectangle((detection['startX']-1, detection['startY']-1, detection['endX']+1, detection['endY']+1), outline=(0,0,0,100), width=1)
|
||||||
draw.rectangle((detection['startX']+width, detection['startY']+width, detection['endX']-width, detection['endY']-width), outline=(0,0,0,100), width=1)
|
draw.rectangle((detection['startX']+width, detection['startY']+width, detection['endX']-width, detection['endY']-width), outline=(0,0,0,100), width=1)
|
||||||
|
elif onlyIfConfident:
|
||||||
|
# Only draw if above threshold, so this should be ignored.
|
||||||
|
return
|
||||||
else:
|
else:
|
||||||
width = int(detection['confidence'] * 10 * 8)
|
width = int(detection['confidence'] * 10 * 8)
|
||||||
# At least 10% opacity
|
# At least 10% opacity
|
||||||
|
@ -148,7 +152,7 @@ def record(device_id, q1,q2, q3, q4, resolution, rotate):
|
||||||
ret, image = capture.read()
|
ret, image = capture.read()
|
||||||
if image is None:
|
if image is None:
|
||||||
logging.critical("Error with camera?")
|
logging.critical("Error with camera?")
|
||||||
exit()
|
sys.exit()
|
||||||
|
|
||||||
|
|
||||||
if rotate is not None:
|
if rotate is not None:
|
||||||
|
@ -434,7 +438,7 @@ def process3_haar(in_q, out_q, cascade_file, library_filename = None):
|
||||||
start = time.time()
|
start = time.time()
|
||||||
C.scan_image(haar, width, height, buffer2, buffer, buffer_len, 5, False)
|
C.scan_image(haar, width, height, buffer2, buffer, buffer_len, 5, False)
|
||||||
logger.info(f"Visualised scan into buffer: {buffer}")
|
logger.info(f"Visualised scan into buffer: {buffer}")
|
||||||
print(f"duration: {time.time() - start}s")
|
# print(f"duration: {time.time() - start}s")
|
||||||
|
|
||||||
img = Image.frombuffer(pixel_format, (width, height), ffi.buffer(buffer),
|
img = Image.frombuffer(pixel_format, (width, height), ffi.buffer(buffer),
|
||||||
"raw", pixel_format, 0, 1)
|
"raw", pixel_format, 0, 1)
|
||||||
|
@ -462,13 +466,13 @@ def process3_haar(in_q, out_q, cascade_file, library_filename = None):
|
||||||
# print(img)
|
# print(img)
|
||||||
out_q.put(result)
|
out_q.put(result)
|
||||||
|
|
||||||
def draw_stats(image, results, padding, coloured=False):
|
def draw_stats(image, results, padding, coloured=False, drawDetections=False):
|
||||||
pil_im = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
|
pil_im = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
|
||||||
draw = ImageDraw.Draw(pil_im, 'RGBA')
|
draw = ImageDraw.Draw(pil_im, 'RGBA')
|
||||||
draw_stats_on_canvas(draw, results, padding, coloured)
|
draw_stats_on_canvas(draw, results, padding, coloured, drawDetections)
|
||||||
return cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
|
return cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
|
||||||
|
|
||||||
def draw_stats_on_canvas(draw, results, padding, coloured=False):
|
def draw_stats_on_canvas(draw, results, padding, coloured=False, drawDetections=False):
|
||||||
for i, result in enumerate(results):
|
for i, result in enumerate(results):
|
||||||
if result is None:
|
if result is None:
|
||||||
continue
|
continue
|
||||||
|
@ -478,8 +482,10 @@ def draw_stats_on_canvas(draw, results, padding, coloured=False):
|
||||||
txt = f"{result.algorithm.ljust(5)} {c} {txt}"
|
txt = f"{result.algorithm.ljust(5)} {c} {txt}"
|
||||||
height = padding + 25
|
height = padding + 25
|
||||||
colour = draw_colors[result.algorithm] if coloured else (255,255,255)
|
colour = draw_colors[result.algorithm] if coloured else (255,255,255)
|
||||||
draw.text((padding, draw.im.size[1] - i*height - height), txt, fill=colour, font=font_s, stroke_width=1, stroke_fill=(0,0,0))
|
draw.text((padding, draw.im.size[1] - (i+1)*height - padding), txt, fill=colour, font=font, stroke_width=2, stroke_fill=(0,0,0))
|
||||||
|
|
||||||
|
if drawDetections:
|
||||||
|
result.draw_detections_on(draw, coloured, onlyIfConfident=True)
|
||||||
|
|
||||||
|
|
||||||
def display(image_res, q1, q2, q3, q4, fullscreen, output_dir):
|
def display(image_res, q1, q2, q3, q4, fullscreen, output_dir):
|
||||||
|
@ -566,7 +572,9 @@ def display(image_res, q1, q2, q3, q4, fullscreen, output_dir):
|
||||||
grid_img.shape[1] - padding - preview_width:grid_img.shape[1] - padding] = cv2.resize(image, (preview_width, preview_height), cv2.INTER_CUBIC)
|
grid_img.shape[1] - padding - preview_width:grid_img.shape[1] - padding] = cv2.resize(image, (preview_width, preview_height), cv2.INTER_CUBIC)
|
||||||
|
|
||||||
# statistics
|
# statistics
|
||||||
grid_img = draw_stats(grid_img, results, padding)
|
# for the plain webcam image (no viz), draw all detected faces.
|
||||||
|
drawDetections = (selectPreview.imageIdx == 0)
|
||||||
|
grid_img = draw_stats(grid_img, results, padding, coloured=True, drawDetections=drawDetections)
|
||||||
pil_im = Image.fromarray(cv2.cvtColor(grid_img, cv2.COLOR_BGR2RGB))
|
pil_im = Image.fromarray(cv2.cvtColor(grid_img, cv2.COLOR_BGR2RGB))
|
||||||
draw = ImageDraw.Draw(pil_im, 'RGBA')
|
draw = ImageDraw.Draw(pil_im, 'RGBA')
|
||||||
|
|
||||||
|
@ -585,19 +593,30 @@ def display(image_res, q1, q2, q3, q4, fullscreen, output_dir):
|
||||||
|
|
||||||
# Hit 'q' on the keyboard to quit!
|
# Hit 'q' on the keyboard to quit!
|
||||||
key = cv2.waitKey(1) & 0xFF
|
key = cv2.waitKey(1) & 0xFF
|
||||||
if key == ord('q'):
|
if key == ord('q') or key == 27: # key 27: escape
|
||||||
break
|
break
|
||||||
if key == ord(' ') and not override_image:
|
|
||||||
|
# TODO: the truth value of an array with ore than one element is ambiguous, use a.any or a.all() (OF DUS override_image is None)
|
||||||
|
if key == ord(' ') and override_image is None:
|
||||||
countdown_until = time.time() + 3 # seconds of countdown
|
countdown_until = time.time() + 3 # seconds of countdown
|
||||||
|
|
||||||
|
# SNAP! SAVE FRAMES
|
||||||
if countdown_until is not None and time.time() > countdown_until:
|
if countdown_until is not None and time.time() > countdown_until:
|
||||||
countdown_until = None
|
countdown_until = None
|
||||||
# TODO wait for frame to be processed. Eg. if I move and make a pic, it should use the last frame...
|
# TODO wait for frame to be processed. Eg. if I move and make a pic, it should use the last frame...
|
||||||
# SNAP!
|
|
||||||
# output_res = (image_res[0] *2, image_res[1] * 2)
|
# output_res = (image_res[0] *2, image_res[1] * 2)
|
||||||
output_res = image_res # no scaling needed anyore
|
output_res = image_res # no scaling needed anyore
|
||||||
pil_im = Image.fromarray(cv2.cvtColor(cv2.flip(images[0],1), cv2.COLOR_BGR2RGB))
|
pil_im = Image.fromarray(cv2.cvtColor(cv2.flip(images[0],1), cv2.COLOR_BGR2RGB))
|
||||||
pil_im = pil_im.resize(output_res)
|
pil_im = pil_im.resize(output_res)
|
||||||
|
|
||||||
|
# base name for all images
|
||||||
|
name = datetime.datetime.now().isoformat(timespec='seconds').replace(':','-')
|
||||||
|
|
||||||
|
# filename of clean frame
|
||||||
|
filename = os.path.join(output_dir, f'{name}-frame.jpg')
|
||||||
|
pil_im.save(filename)
|
||||||
|
|
||||||
|
# now draw all results to the main image
|
||||||
draw = ImageDraw.Draw(pil_im, 'RGBA')
|
draw = ImageDraw.Draw(pil_im, 'RGBA')
|
||||||
|
|
||||||
for result in results:
|
for result in results:
|
||||||
|
@ -613,13 +632,13 @@ def display(image_res, q1, q2, q3, q4, fullscreen, output_dir):
|
||||||
logger.info("Show frame until %f", override_until)
|
logger.info("Show frame until %f", override_until)
|
||||||
|
|
||||||
# save images:
|
# save images:
|
||||||
name = datetime.datetime.now().isoformat(timespec='seconds').replace(':','-')
|
filename = os.path.join(output_dir, f'{name}-all.png')
|
||||||
filename = os.path.join(output_dir, f'{name}.png')
|
|
||||||
print(f"Save to {filename}")
|
print(f"Save to {filename}")
|
||||||
r=cv2.imwrite(filename, override_image)
|
r=cv2.imwrite(filename, override_image)
|
||||||
if not r:
|
if not r:
|
||||||
raise RuntimeError(f"Could not save image {filename}")
|
raise RuntimeError(f"Could not save image {filename}")
|
||||||
|
|
||||||
|
# finally, store each visualisation with the results
|
||||||
for result in results:
|
for result in results:
|
||||||
result_img =result.draw_detections(include_title = True)
|
result_img =result.draw_detections(include_title = True)
|
||||||
filename = os.path.join(output_dir, f'{name}-{result.algorithm}.png')
|
filename = os.path.join(output_dir, f'{name}-{result.algorithm}.png')
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit a6ac50c3b3b7ba43cc4c18e40e9f004f01027328
|
Subproject commit 1319e644b1f59debe46be866d18209d2a6089e1b
|
Loading…
Reference in a new issue