Styling and saves now flip back
This commit is contained in:
parent
357d481b97
commit
2af58fc170
3 changed files with 79 additions and 47 deletions
13
README.md
13
README.md
|
@ -11,21 +11,24 @@ A `mirror` which shows which faces are detected through three different facial d
|
||||||
|
|
||||||
The installation in Windows can be done, though it is quite elaborate:
|
The installation in Windows can be done, though it is quite elaborate:
|
||||||
|
|
||||||
* Install rustup-init
|
|
||||||
* Install VS C++
|
|
||||||
* Install python3
|
* Install python3
|
||||||
|
* Install VS C++
|
||||||
* Install Cmake (needed for python dlib)
|
* Install Cmake (needed for python dlib)
|
||||||
+ make sure to add it to path
|
+ make sure to add it to path
|
||||||
* Install git
|
* Install git
|
||||||
+ including ssh deploy key
|
+ including ssh deploy key
|
||||||
* `git clone https://git.rubenvandeven.com/r/face_detector`
|
* `git clone https://git.rubenvandeven.com/r/face_detector`
|
||||||
* `cd face_recognition`
|
* `cd face_recognition`
|
||||||
* `git submodules init`
|
|
||||||
* `git submodules update`
|
|
||||||
* `pip install virtualenv`
|
* `pip install virtualenv`
|
||||||
* `virtualenv.exe venv`
|
* `virtualenv.exe venv`
|
||||||
* `.\venv\Scripts\activate`
|
* `.\venv\Scripts\activate`
|
||||||
* `cd .\dnn\face_detector`
|
* `cd .\dnn\face_detector`
|
||||||
* `python.exe .\download_weights.py`
|
* `python.exe .\download_weights.py`
|
||||||
* `cd .\visualhaar`
|
* `cd .\visualhaar`
|
||||||
* `cargo build --lib --release`
|
* Either one of:
|
||||||
|
+ Compile rust library
|
||||||
|
* Install rustup-init
|
||||||
|
* `git submodules init`
|
||||||
|
* `git submodules update`
|
||||||
|
* `cargo build --lib --release`
|
||||||
|
+ Download dll from https://git.rubenvandeven.com/r/visualhaar/releases
|
||||||
|
|
|
@ -11,9 +11,9 @@ from PIL import ImageFont, ImageDraw, Image
|
||||||
import os
|
import os
|
||||||
|
|
||||||
draw_colors = {
|
draw_colors = {
|
||||||
'hog': (255,255,255), #(198,65,124),
|
'hog': (198,65,124),
|
||||||
'haar': (255,255,255),
|
'haar': (255,255,255),
|
||||||
'dnn': (255,255,255) #(251,212,36),
|
'dnn': (251,212,36),
|
||||||
}
|
}
|
||||||
|
|
||||||
titles = {
|
titles = {
|
||||||
|
@ -45,42 +45,47 @@ class Result():
|
||||||
})
|
})
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def draw_detections(self, include_title = False):
|
def draw_detections(self, include_title = False, coloured=False):
|
||||||
cv2_im_rgb = cv2.cvtColor(self.visualisation,cv2.COLOR_BGR2RGB)
|
cv2_im_rgb = cv2.cvtColor(self.visualisation,cv2.COLOR_BGR2RGB)
|
||||||
# Pass the image to PIL
|
# Pass the image to PIL
|
||||||
pil_im = Image.fromarray(cv2_im_rgb)
|
pil_im = Image.fromarray(cv2_im_rgb)
|
||||||
draw = ImageDraw.Draw(pil_im, 'RGBA')
|
draw = ImageDraw.Draw(pil_im, 'RGBA')
|
||||||
|
|
||||||
self.draw_detections_on(draw)
|
self.draw_detections_on(draw, coloured)
|
||||||
|
|
||||||
if include_title:
|
if include_title:
|
||||||
draw.text((10,10), titles[self.algorithm], fill=draw_colors[self.algorithm], font=font)
|
color = draw_colors[self.algorithm] if coloured else (255,255,255)
|
||||||
|
draw.text((10,10), titles[self.algorithm], fill=color, font=font, stroke_width=1, stroke_fill=(0,0,0,100))
|
||||||
|
|
||||||
return cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
|
return cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
|
||||||
|
|
||||||
def draw_detections_on(self, draw: ImageDraw):
|
def draw_detections_on(self, draw: ImageDraw, coloured=False):
|
||||||
'''
|
'''
|
||||||
Draw on a specified canvas
|
Draw on a specified canvas
|
||||||
'''
|
'''
|
||||||
color = draw_colors[self.algorithm]
|
color = draw_colors[self.algorithm] if coloured else (255,255,255)
|
||||||
for detection in self.detections:
|
for detection in self.detections:
|
||||||
self.draw_detection(draw, detection, color)
|
self.draw_detection(draw, detection, color)
|
||||||
|
|
||||||
def draw_detection(self, draw: ImageDraw, detection: dict, color: tuple):
|
def draw_detection(self, draw: ImageDraw, detection: dict, color: tuple):
|
||||||
width = 2
|
|
||||||
|
|
||||||
if detection['confidence'] > self.confidence_threshold:
|
if detection['confidence'] > self.confidence_threshold:
|
||||||
|
width = 8
|
||||||
# draw the bounding box of the face along with the associated
|
# draw the bounding box of the face along with the associated
|
||||||
# probability
|
# probability
|
||||||
text = "{:.2f}%".format(detection['confidence'] * 100)
|
text = "{:.2f}%".format(detection['confidence'] * 100)
|
||||||
y = detection['startY'] - 40 if detection['startY'] - 40 > 10 else detection['startY'] + 10
|
y = detection['startY'] - 40 if detection['startY'] - 40 > 10 else detection['startY'] + 10
|
||||||
|
|
||||||
draw.text((detection['startX'], y), text, font=font, fill=color)
|
draw.text((detection['startX'], y), text, font=font, fill=color, stroke_fill=(0,0,0,100), stroke_width=1)
|
||||||
# cv2.putText(self.visualisation, text, (detection['startX'], y),
|
# cv2.putText(self.visualisation, text, (detection['startX'], y),
|
||||||
# cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2, lineType = cv2.LINE_AA)
|
# cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2, lineType = cv2.LINE_AA)
|
||||||
|
|
||||||
alpha = 1
|
alpha = 1
|
||||||
|
draw.rectangle((detection['startX']-1, detection['startY']-1, detection['endX']+1, detection['endY']+1), outline=(0,0,0,100), width=1)
|
||||||
|
draw.rectangle((detection['startX']+width, detection['startY']+width, detection['endX']-width, detection['endY']-width), outline=(0,0,0,100), width=1)
|
||||||
else:
|
else:
|
||||||
|
width = int(detection['confidence'] * 10 * 8)
|
||||||
# At least 10% opacity
|
# At least 10% opacity
|
||||||
alpha = max(.2, detection['confidence'])
|
alpha = max(.2, detection['confidence'])
|
||||||
|
|
||||||
|
@ -90,23 +95,35 @@ class Result():
|
||||||
|
|
||||||
draw.rectangle((detection['startX'], detection['startY'], detection['endX'], detection['endY']), outline=color, width=width)
|
draw.rectangle((detection['startX'], detection['startY'], detection['endX'], detection['endY']), outline=color, width=width)
|
||||||
|
|
||||||
|
def resize(self, width, height, flip=False):
|
||||||
def resize(self, width, height):
|
|
||||||
# TODO resize to new target incl all detections
|
# TODO resize to new target incl all detections
|
||||||
img = self.visualisation
|
img = self.visualisation
|
||||||
factor_x = width / self.visualisation.shape[1]
|
factor_x = width / self.visualisation.shape[1]
|
||||||
factor_y = height / self.visualisation.shape[0]
|
factor_y = height / self.visualisation.shape[0]
|
||||||
inter = cv2.INTER_NEAREST if self.algorithm in ['dnn', 'haar'] else cv2.INTER_CUBIC
|
inter = cv2.INTER_NEAREST if self.algorithm in ['dnn', 'haar'] else cv2.INTER_CUBIC
|
||||||
img = cv2.resize(img, (width, height), interpolation=inter)
|
img = cv2.resize(img, (width, height), interpolation=inter)
|
||||||
|
|
||||||
|
if flip:
|
||||||
|
img = cv2.flip(img, 1)
|
||||||
|
|
||||||
result = Result(self.algorithm, img, self.confidence_threshold)
|
result = Result(self.algorithm, img, self.confidence_threshold)
|
||||||
for d in self.detections:
|
for d in self.detections:
|
||||||
result.add_detection(
|
if flip:
|
||||||
int(d['startX'] * factor_x),
|
result.add_detection(
|
||||||
int(d['startY'] * factor_y),
|
int(width - d['endX'] * factor_x),
|
||||||
int(d['endX'] * factor_x),
|
int(d['startY'] * factor_y),
|
||||||
int(d['endY'] * factor_y),
|
int(width - d['startX'] * factor_x),
|
||||||
d['confidence']
|
int(d['endY'] * factor_y),
|
||||||
)
|
d['confidence']
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
result.add_detection(
|
||||||
|
int(d['startX'] * factor_x),
|
||||||
|
int(d['startY'] * factor_y),
|
||||||
|
int(d['endX'] * factor_x),
|
||||||
|
int(d['endY'] * factor_y),
|
||||||
|
d['confidence']
|
||||||
|
)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def count_detections(self):
|
def count_detections(self):
|
||||||
|
@ -330,7 +347,7 @@ def process2_dnn(in_q, out_q):
|
||||||
|
|
||||||
out_q.put(result)
|
out_q.put(result)
|
||||||
|
|
||||||
def process3_haar(in_q, out_q, cascade_file):
|
def process3_haar(in_q, out_q, cascade_file, library_filename = None):
|
||||||
from cffi import FFI
|
from cffi import FFI
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
import cv2
|
import cv2
|
||||||
|
@ -347,18 +364,22 @@ def process3_haar(in_q, out_q, cascade_file):
|
||||||
void scan_image(haarclassifier, size_t width,size_t height, char *input, char *buffer, size_t length, size_t min_face_factor, bool debug);
|
void scan_image(haarclassifier, size_t width,size_t height, char *input, char *buffer, size_t length, size_t min_face_factor, bool debug);
|
||||||
""")
|
""")
|
||||||
|
|
||||||
dir_path = os.path.dirname(os.path.realpath(__file__))
|
|
||||||
|
if library_filename is not None:
|
||||||
lib_path = os.path.join(dir_path, "..", "visualhaar", "target", "release")
|
C = ffi.dlopen(library_filename)
|
||||||
so_path = os.path.join(lib_path, "libvisual_haarcascades_lib.so")
|
|
||||||
dll_path = os.path.join(lib_path, "visual_haarcascades_lib.dll")
|
|
||||||
|
|
||||||
if os.path.exists(so_path):
|
|
||||||
C = ffi.dlopen(so_path)
|
|
||||||
elif os.path.exists(dll_path):
|
|
||||||
C = ffi.dlopen(dll_path)
|
|
||||||
else:
|
else:
|
||||||
raise RuntimeException("Visual haarcascades library is not found")
|
dir_path = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
|
||||||
|
lib_path = os.path.join(dir_path, "..", "visualhaar", "target", "release")
|
||||||
|
so_path = os.path.join(lib_path, "libvisual_haarcascades_lib.so")
|
||||||
|
dll_path = os.path.join(lib_path, "visual_haarcascades_lib.dll")
|
||||||
|
|
||||||
|
if os.path.exists(so_path):
|
||||||
|
C = ffi.dlopen(so_path)
|
||||||
|
elif os.path.exists(dll_path):
|
||||||
|
C = ffi.dlopen(dll_path)
|
||||||
|
else:
|
||||||
|
raise RuntimeError("Visual haarcascades library is not found")
|
||||||
|
|
||||||
# print(C.test(9))
|
# print(C.test(9))
|
||||||
# i = Image.open("Marjo.jpg")
|
# i = Image.open("Marjo.jpg")
|
||||||
|
@ -434,10 +455,13 @@ def process3_haar(in_q, out_q, cascade_file):
|
||||||
# print(img)
|
# print(img)
|
||||||
out_q.put(result)
|
out_q.put(result)
|
||||||
|
|
||||||
def draw_stats(image, results, padding):
|
def draw_stats(image, results, padding, coloured=False):
|
||||||
pil_im = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
|
pil_im = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
|
||||||
draw = ImageDraw.Draw(pil_im, 'RGBA')
|
draw = ImageDraw.Draw(pil_im, 'RGBA')
|
||||||
|
draw_stats_on_canvas(draw, results, padding, coloured)
|
||||||
|
return cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
|
||||||
|
|
||||||
|
def draw_stats_on_canvas(draw, results, padding, coloured=False):
|
||||||
for i, result in enumerate(results):
|
for i, result in enumerate(results):
|
||||||
if result is None:
|
if result is None:
|
||||||
continue
|
continue
|
||||||
|
@ -446,9 +470,9 @@ def draw_stats(image, results, padding):
|
||||||
txt = "face" if c == 1 else "faces"
|
txt = "face" if c == 1 else "faces"
|
||||||
txt = f"{result.algorithm.ljust(5)} {c} {txt}"
|
txt = f"{result.algorithm.ljust(5)} {c} {txt}"
|
||||||
height = padding + 25
|
height = padding + 25
|
||||||
draw.text((padding, pil_im.size[1] - i*height - height), txt, fill=draw_colors[result.algorithm], font=font_s, stroke_width=1, stroke_fill=(0,0,0))
|
colour = draw_colors[result.algorithm] if coloured else (255,255,255)
|
||||||
|
draw.text((padding, draw.im.size[1] - i*height - height), txt, fill=colour, font=font_s, stroke_width=1, stroke_fill=(0,0,0))
|
||||||
|
|
||||||
return cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
|
|
||||||
|
|
||||||
|
|
||||||
def display(image_res, q1, q2, q3, q4, fullscreen, output_dir):
|
def display(image_res, q1, q2, q3, q4, fullscreen, output_dir):
|
||||||
|
@ -543,7 +567,7 @@ def display(image_res, q1, q2, q3, q4, fullscreen, output_dir):
|
||||||
if countdown_until:
|
if countdown_until:
|
||||||
duration = math.ceil(countdown_until - time.time())
|
duration = math.ceil(countdown_until - time.time())
|
||||||
w, h = draw.textsize(f"{duration}", font=countdown_font)
|
w, h = draw.textsize(f"{duration}", font=countdown_font)
|
||||||
draw.text(((grid_img.shape[1]-w)/2,(grid_img.shape[0]-h)/2), f"{duration}", fill="white", stroke="black", font=countdown_font)
|
draw.text(((grid_img.shape[1]-w)/2,(grid_img.shape[0]-h)/2), f"{duration}", fill="white", stroke="black", font=countdown_font, stroke_width=1, stroke_fill=(0,0,0,100))
|
||||||
|
|
||||||
grid_img = cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
|
grid_img = cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
|
||||||
|
|
||||||
|
@ -565,7 +589,7 @@ def display(image_res, q1, q2, q3, q4, fullscreen, output_dir):
|
||||||
# SNAP!
|
# SNAP!
|
||||||
# output_res = (image_res[0] *2, image_res[1] * 2)
|
# output_res = (image_res[0] *2, image_res[1] * 2)
|
||||||
output_res = image_res # no scaling needed anyore
|
output_res = image_res # no scaling needed anyore
|
||||||
pil_im = Image.fromarray(cv2.cvtColor(images[0], cv2.COLOR_BGR2RGB))
|
pil_im = Image.fromarray(cv2.cvtColor(cv2.flip(images[0],1), cv2.COLOR_BGR2RGB))
|
||||||
pil_im = pil_im.resize(output_res)
|
pil_im = pil_im.resize(output_res)
|
||||||
draw = ImageDraw.Draw(pil_im, 'RGBA')
|
draw = ImageDraw.Draw(pil_im, 'RGBA')
|
||||||
|
|
||||||
|
@ -573,7 +597,9 @@ def display(image_res, q1, q2, q3, q4, fullscreen, output_dir):
|
||||||
if result is None:
|
if result is None:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
result.resize(output_res[0], output_res[1]).draw_detections_on(draw)
|
result.resize(output_res[0], output_res[1], flip=True).draw_detections_on(draw, coloured=True)
|
||||||
|
|
||||||
|
draw_stats_on_canvas(draw, results, padding, coloured=True)
|
||||||
|
|
||||||
override_image = cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
|
override_image = cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
|
||||||
override_until = time.time() + 5
|
override_until = time.time() + 5
|
||||||
|
@ -583,12 +609,13 @@ def display(image_res, q1, q2, q3, q4, fullscreen, output_dir):
|
||||||
name = datetime.datetime.now().isoformat(timespec='seconds')
|
name = datetime.datetime.now().isoformat(timespec='seconds')
|
||||||
cv2.imwrite(os.path.join(output_dir, f'{name}.png'),override_image)
|
cv2.imwrite(os.path.join(output_dir, f'{name}.png'),override_image)
|
||||||
for result in results:
|
for result in results:
|
||||||
cv2.imwrite(os.path.join(output_dir, f'{name}-{result.algorithm}.png'),result.visualisation)
|
result_img =result.draw_detections(include_title = True)
|
||||||
|
cv2.imwrite(os.path.join(output_dir, f'{name}-{result.algorithm}.png'), result_img)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def main(camera_id, rotate, fullscreen, cascade_file, output_dir):
|
def main(camera_id, rotate, fullscreen, cascade_file, output_dir, visualhaar_lib = None):
|
||||||
image_size = (1920, 1080) #(int(1920/2), int(1080/2))
|
image_size = (1920, 1080) #(int(1920/2), int(1080/2))
|
||||||
|
|
||||||
if not os.path.exists(cascade_file):
|
if not os.path.exists(cascade_file):
|
||||||
|
@ -616,7 +643,7 @@ def main(camera_id, rotate, fullscreen, cascade_file, output_dir):
|
||||||
p2 = Process(target=display, args=(image_size, q_webcam1, q_process1, q_process2, q_process3, fullscreen, output_dir ))
|
p2 = Process(target=display, args=(image_size, q_webcam1, q_process1, q_process2, q_process3, fullscreen, output_dir ))
|
||||||
p3 = Process(target=process1_hog, args=(q_webcam2, q_process1,))
|
p3 = Process(target=process1_hog, args=(q_webcam2, q_process1,))
|
||||||
p4 = Process(target=process2_dnn, args=(q_webcam3, q_process2,))
|
p4 = Process(target=process2_dnn, args=(q_webcam3, q_process2,))
|
||||||
p5 = Process(target=process3_haar, args=(q_webcam4, q_process3,cascade_file))
|
p5 = Process(target=process3_haar, args=(q_webcam4, q_process3,cascade_file, visualhaar_lib))
|
||||||
|
|
||||||
p1.start()
|
p1.start()
|
||||||
p2.start()
|
p2.start()
|
||||||
|
|
|
@ -15,8 +15,10 @@ if __name__ == '__main__':
|
||||||
help='Rotate counter clockwise')
|
help='Rotate counter clockwise')
|
||||||
parser.add_argument('--cascade', default='haarcascade_frontalface_alt2.xml',
|
parser.add_argument('--cascade', default='haarcascade_frontalface_alt2.xml',
|
||||||
help='Cascade XML file to use (opencv format)')
|
help='Cascade XML file to use (opencv format)')
|
||||||
parser.add_argument('--output', default='saves',
|
parser.add_argument('--output', metavar="DIRECTORY", default='saves',
|
||||||
help='Directory to store images (after pressing spacebar)')
|
help='Directory to store images (after pressing spacebar)')
|
||||||
|
parser.add_argument('--visualhaar-lib', metavar="LIBRARY", default=None,
|
||||||
|
help='path/filename for visualhaar library (.so on linux, .dll on windows)\nSee: https://git.rubenvandeven.com/r/visualhaar/releases')
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
@ -26,4 +28,4 @@ if __name__ == '__main__':
|
||||||
if args.counter_clockwise:
|
if args.counter_clockwise:
|
||||||
rotate = cv2.ROTATE_90_COUNTERCLOCKWISE
|
rotate = cv2.ROTATE_90_COUNTERCLOCKWISE
|
||||||
|
|
||||||
face_recognition.comparison.main(args.camera, rotate, args.fullscreen, args.cascade, args.output)
|
face_recognition.comparison.main(args.camera, rotate, args.fullscreen, args.cascade, args.output, args.visualhaar_lib)
|
||||||
|
|
Loading…
Reference in a new issue