Live visualisation of various facial recognition algorithms.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

96 lines
3.3 KiB

from cffi import FFI
from PIL import Image
import numpy as np
import cv2, time
ffi = FFI()
ffi.cdef("""
int test(int);
typedef void* haarclassifier;
haarclassifier classifier_new(char *filename);
void scan_image(haarclassifier, size_t width,size_t height, char *input, char *buffer, size_t length, bool debug);
""")
# C = ffi.dlopen("/home/ruben/Documents/Projecten/2020/rust/testproject/target/debug/libvisual_haarcascades_lib.so")
C = ffi.dlopen("visualhaar/target/debug/libvisual_haarcascades_lib.so")
print(C.test(9))
# i = Image.open("/home/ruben/Documents/Projecten/2020/rust/lena_orig.png")
# a= np.array(i)
a= cv2.imread("/home/ruben/Documents/Projecten/2020/rust/lena_orig.png")
capture = cv2.VideoCapture(2)
print("Buffer", capture.get(cv2.CAP_PROP_BUFFERSIZE))
capture.set(cv2.CAP_PROP_BUFFERSIZE, 1)
while True:
ret, a = capture.read()
print(a.shape)
# i = Image.open("Marjo.jpg")
width = int(a.shape[1]/4)
height = int(a.shape[0]/4)
image = cv2.resize(a, (width,height))
pixel_format = "RGB" #The raytracer only supports one format
bytes_per_pixel = 3
buffer_len = width * height * bytes_per_pixel
buffer = ffi.new("char[]", buffer_len)
# buffer2 = ffi.from_buffer("char[]", (i.tobytes("raw","RGB")))
buffer2 = ffi.from_buffer("char[]", image.tobytes())
filename = "/home/ruben/Documents/Projecten/2020/rust/testproject/haarcascade_frontalface_alt2.xml".encode('ascii')
fn = ffi.new("char[]", filename)
# fn = ffi.string(filename)
print("Initialise...")
haar = C.classifier_new(fn)
# i = Image.open("/home/ruben/Documents/Projecten/2020/rust/lena_orig.png")
# data = i.tobytes("raw", "RGB")
print('scan!')
start=time.time()
C.scan_image(haar, width, height, buffer2, buffer, buffer_len, True)
print(f"scanned in {time.time() - start}s", buffer)
# img = Image.frombuffer(pixel_format, (width, height), ffi.buffer(buffer),
# "raw", pixel_format, 0, 1)
# img = Image.frombuffer(pixel_format, (width, height), ffi.buffer(buffer),
# "raw", pixel_format, 0, 1)
img = Image.frombuffer(pixel_format, (width, height), ffi.buffer(buffer),
"raw", pixel_format, 0, 1)
a= np.array(img)
# a= np.frombuffer(ffi.buffer(buffer))
# a.reshape((height, width, bytes_per_pixel))
a = a[:, :, ::-1]
img_concate_Verti1 = np.concatenate((image,a),axis=0)
cv2.imshow("image",img_concate_Verti1)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# img.show()
# class Scene(object):
# def __init__(self, width, height, fov, shadow_bias, max_recursion_depth):
# self.__width = width
# self.__height = height
# self.__obj = C.scene_new(width, height, fov, shadow_bias, max_recursion_depth)
# def render(self):
# pixel_format = "RGBA" #The raytracer only supports one format
# bytes_per_pixel = 4
# buffer_len = self.__width * self.__height * bytes_per_pixel
# buffer = ffi.new("char[]", buffer_len)
# C.scene_render(self.__obj, buffer, buffer_len)
# return Image.frombuffer(pixel_format, (self.__width, self.__height), ffi.buffer(buffer),
# "raw", pixel_format, 0, 1)
capture.release()