changes for windows compatibility
This commit is contained in:
parent
e21376465f
commit
a7ea09fb52
3 changed files with 87 additions and 57 deletions
|
@ -10,12 +10,21 @@ from PIL import ImageFont, ImageDraw, Image
|
|||
import os
|
||||
|
||||
draw_colors = {
|
||||
'hog': (255,0,0),
|
||||
'haar': (0,255,0),
|
||||
'dnn': (0,0,255),
|
||||
'hog': (198,65,124),
|
||||
'haar': (255,255,255),
|
||||
'dnn': (251,212,36),
|
||||
}
|
||||
|
||||
font = ImageFont.truetype("/home/ruben/Documents/Projecten/2018/PATH/presentation/lib/font/source-sans-pro/source-sans-pro-regular.ttf", 30)
|
||||
titles = {
|
||||
'hog' : "Histogram of oriented gradients",
|
||||
'haar' : "Haar cascades",
|
||||
'dnn' : "Neural network",
|
||||
}
|
||||
|
||||
fontfile = "SourceSansPro-Regular.ttf"
|
||||
|
||||
font = ImageFont.truetype(fontfile, 30)
|
||||
font_s = ImageFont.truetype(fontfile, 20)
|
||||
|
||||
class Result():
|
||||
def __init__(self, algorithm, image, confidence_threshold = 0.5):
|
||||
|
@ -34,13 +43,17 @@ class Result():
|
|||
})
|
||||
return self
|
||||
|
||||
def draw_detections(self):
|
||||
def draw_detections(self, include_title = False):
|
||||
cv2_im_rgb = cv2.cvtColor(self.visualisation,cv2.COLOR_BGR2RGB)
|
||||
# Pass the image to PIL
|
||||
pil_im = Image.fromarray(cv2_im_rgb)
|
||||
draw = ImageDraw.Draw(pil_im, 'RGBA')
|
||||
|
||||
self.draw_detections_on(draw)
|
||||
|
||||
if include_title:
|
||||
draw.text((10,10), titles[self.algorithm], fill=draw_colors[self.algorithm], font=font)
|
||||
|
||||
return cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
|
||||
|
||||
def draw_detections_on(self, draw: ImageDraw):
|
||||
|
@ -94,6 +107,9 @@ class Result():
|
|||
)
|
||||
return result
|
||||
|
||||
def count_detections(self):
|
||||
detections = [d for d in self.detections if d['confidence'] > self.confidence_threshold]
|
||||
return len(detections)
|
||||
|
||||
|
||||
def record(device_id, q1,q2, q3, q4, resolution, rotate):
|
||||
|
@ -324,7 +340,17 @@ def process3_haar(in_q, out_q, cascade_file):
|
|||
""")
|
||||
|
||||
dir_path = os.path.dirname(os.path.realpath(__file__))
|
||||
C = ffi.dlopen(os.path.join(dir_path,"../visualhaar/target/debug/libvisual_haarcascades_lib.so"))
|
||||
|
||||
lib_path = os.path.join(dir_path, "..", "visualhaar", "target", "debug")
|
||||
so_path = os.path.join(lib_path, "libvisual_haarcascades_lib.so")
|
||||
dll_path = os.path.join(lib_path, "visual_haarcascades_lib.dll")
|
||||
|
||||
if os.path.exists(so_path):
|
||||
C = ffi.dlopen(so_path)
|
||||
elif os.path.exists(dll_path):
|
||||
C = ffi.dlopen(dll_path)
|
||||
else:
|
||||
raise RuntimeException("Visual haarcascades library is not found")
|
||||
|
||||
# print(C.test(9))
|
||||
# i = Image.open("Marjo.jpg")
|
||||
|
@ -400,62 +426,65 @@ def process3_haar(in_q, out_q, cascade_file):
|
|||
# print(img)
|
||||
out_q.put(result)
|
||||
|
||||
def draw_stats(image, results):
|
||||
pil_im = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
|
||||
draw = ImageDraw.Draw(pil_im, 'RGBA')
|
||||
|
||||
for i, result in enumerate(results):
|
||||
if result is None:
|
||||
continue
|
||||
|
||||
c = result.count_detections()
|
||||
txt = "face" if c == 1 else "faces"
|
||||
txt = f"{result.algorithm.ljust(5)} {c} {txt}"
|
||||
draw.text((10, pil_im.size[1] - i*25 - 50), txt, fill=draw_colors[result.algorithm], font=font_s, stroke_width=1, stroke_fill=(0,0,0))
|
||||
|
||||
return cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
|
||||
|
||||
|
||||
def display(image_res, q1, q2, q3, q4, fullscreen, output_dir):
|
||||
logger = logging.getLogger('display')
|
||||
|
||||
empty_image = np.zeros((image_res[1],image_res[0],3), np.uint8)
|
||||
prev_image1 = None
|
||||
prev_result2 = None
|
||||
prev_result3 = None
|
||||
prev_result4 = None
|
||||
|
||||
results = [None, None, None]
|
||||
result_queues = [q2, q3, q4]
|
||||
images = [empty_image, empty_image, empty_image, empty_image]
|
||||
|
||||
override_image = None
|
||||
override_until = None
|
||||
|
||||
if fullscreen:
|
||||
cv2.namedWindow("output", cv2.WND_PROP_FULLSCREEN)
|
||||
cv2.setWindowProperty("output",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
|
||||
|
||||
override_image = None
|
||||
override_until = None
|
||||
|
||||
while True:
|
||||
logging.debug('r')
|
||||
try:
|
||||
image1 = q1.get_nowait()
|
||||
image1 = cv2.resize(image1, (image_res[0], image_res[1]))
|
||||
prev_image1 = image1
|
||||
image = q1.get_nowait()
|
||||
images[0] = cv2.resize(image, (image_res[0], image_res[1]))
|
||||
except Empty as e:
|
||||
image1 = prev_image1 if prev_image1 is not None else empty_image
|
||||
try:
|
||||
result2 = q2.get_nowait()
|
||||
result2 = result2.resize(image_res[0], image_res[1])
|
||||
prev_result2 = result2
|
||||
except Empty as e:
|
||||
result2 = prev_result2
|
||||
finally:
|
||||
image2 = result2.draw_detections() if result2 is not None else empty_image
|
||||
try:
|
||||
result3 = q3.get_nowait()
|
||||
result3 = result3.resize(image_res[0], image_res[1])
|
||||
prev_result3 = result3
|
||||
except Empty as e:
|
||||
result3 = prev_result3
|
||||
finally:
|
||||
image3 = result3.draw_detections() if result3 is not None else empty_image
|
||||
try:
|
||||
result4 = q4.get_nowait()
|
||||
result4 = result4.resize(image_res[0], image_res[1])
|
||||
prev_result4 = result4
|
||||
except Empty as e:
|
||||
result4 = prev_result4
|
||||
finally:
|
||||
image4 = result4.draw_detections() if result4 is not None else empty_image
|
||||
pass
|
||||
|
||||
for idx, queue in enumerate(result_queues):
|
||||
try:
|
||||
result = queue.get_nowait()
|
||||
results[idx] = result.resize(image_res[0], image_res[1])
|
||||
images[idx+1] = results[idx].draw_detections(include_title=True)
|
||||
except Empty as e:
|
||||
pass
|
||||
finally:
|
||||
pass
|
||||
|
||||
if override_image is not None and override_until > time.time():
|
||||
cv2.imshow("output", override_image)
|
||||
else:
|
||||
override_image = None
|
||||
|
||||
img_concate_Verti1 = np.concatenate((image1,image2),axis=0)
|
||||
img_concate_Verti2 = np.concatenate((image3,image4),axis=0)
|
||||
images[0] = draw_stats(images[0], results)
|
||||
|
||||
img_concate_Verti1 = np.concatenate((images[0],images[1]),axis=0)
|
||||
img_concate_Verti2 = np.concatenate((images[2],images[3]),axis=0)
|
||||
grid_img = np.concatenate((img_concate_Verti1,img_concate_Verti2),axis=1)
|
||||
cv2.imshow("output", grid_img)
|
||||
|
||||
|
@ -466,17 +495,15 @@ def display(image_res, q1, q2, q3, q4, fullscreen, output_dir):
|
|||
if key == ord(' '):
|
||||
# TODO wait for frame to be processed. Eg. if I move and make a pic, it should use the last frame...
|
||||
output_res = (image_res[0] *2, image_res[1] * 2)
|
||||
pil_im = Image.fromarray(cv2.cvtColor(image1, cv2.COLOR_BGR2RGB))
|
||||
pil_im = Image.fromarray(cv2.cvtColor(images[0], cv2.COLOR_BGR2RGB))
|
||||
pil_im = pil_im.resize(output_res)
|
||||
draw = ImageDraw.Draw(pil_im, 'RGBA')
|
||||
|
||||
if result2 is not None:
|
||||
result2.resize(output_res[0], output_res[1]).draw_detections_on(draw)
|
||||
if result3 is not None:
|
||||
result3.resize(output_res[0], output_res[1]).draw_detections_on(draw)
|
||||
if result4 is not None:
|
||||
result4.resize(output_res[0], output_res[1]).draw_detections_on(draw)
|
||||
for result in results:
|
||||
if result is None:
|
||||
continue
|
||||
|
||||
result.resize(output_res[0], output_res[1]).draw_detections_on(draw)
|
||||
|
||||
override_image = cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
|
||||
override_until = time.time() + 5
|
||||
|
@ -485,9 +512,9 @@ def display(image_res, q1, q2, q3, q4, fullscreen, output_dir):
|
|||
# save images:
|
||||
name = datetime.datetime.now().isoformat(timespec='seconds')
|
||||
cv2.imwrite(os.path.join(output_dir, f'{name}.png'),override_image)
|
||||
cv2.imwrite(os.path.join(output_dir, f'{name}-hog.png'),result2.visualisation)
|
||||
cv2.imwrite(os.path.join(output_dir, f'{name}-dnn.png'),result3.visualisation)
|
||||
cv2.imwrite(os.path.join(output_dir, f'{name}-haar.png'),result4.visualisation)
|
||||
for result in results:
|
||||
cv2.imwrite(os.path.join(output_dir, f'{name}-{result.algorithm}.png'),result.visualisation)
|
||||
|
||||
|
||||
def main(camera_id, rotate, fullscreen, cascade_file, output_dir):
|
||||
image_size = (int(1920/2), int(1080/2))
|
||||
|
|
|
@ -2,3 +2,6 @@ scipy
|
|||
numpy
|
||||
dlib
|
||||
Pillow
|
||||
opencv-python
|
||||
cffi
|
||||
scikit-image
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit 7de5440484842c147944ae123fa689333846dde7
|
||||
Subproject commit 928da82d24de1ae2cef268c140f9992b0614806b
|
Loading…
Reference in a new issue