mvp1/scan_faces.py

79 lines
2.0 KiB
Python

import io
import picamera
import cv2
import numpy as np
import datetime
import time
import Adafruit_CharLCD as LCD
# Init camera
camera = picamera.PiCamera()
camera.resolution = (1280, 720)
# Init classifier, various options:
#~ /usr/share/opencv/haarcascades/haarcascade_frontalface_default.xml (~22sec)
#~ /usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml (>25sec)
#~ /usr/share/opencv/haarcascades/haarcascade_frontalface_alt2.xml (~21sec)
#~ /usr/share/opencv/haarcascades/haarcascade_frontalface_alt_tree.xml (~14sec, seems to miss many faces)
classifier = cv2.CascadeClassifier('/usr/share/opencv/haarcascades/haarcascade_frontalface_alt2.xml')
# Set FPS (though RPi is probably to slow to meet it ;-)
FPS = 1.0
frameTimeDelta = datetime.timedelta(seconds=1.0/FPS)
# Init LCD
lcd_rs = 27
lcd_en = 22
lcd_d4 = 25
lcd_d5 = 24
lcd_d6 = 23
lcd_d7 = 18
lcd_backlight = 4
lcd_columns = 16
lcd_rows = 2
lcd = LCD.Adafruit_CharLCD(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7,
lcd_columns, lcd_rows, lcd_backlight)
prevFaceCount = 0
totalUse = 0
while True:
print(time.time(),"GO")
start = datetime.datetime.utcnow()
stream = io.BytesIO()
camera.capture(stream, format='jpeg')
print(time.time(),"captured")
buff = np.fromstring(stream.getvalue(), dtype=np.uint8)
image = cv2.imdecode(buff,1)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
print(time.time(),"grayed")
faces = classifier.detectMultiScale(gray, 1.2, 5, minSize=(30,20))
# TODO: time passed since last * prevFaceCount = usage
print(time.time(),"Found {} faces".format(len(faces)))
lcd.clear()
lcd.message("Current {:>8}\nTotal {:>10}".format(len(faces), totalUse))
if len(faces) < 1 and prevFaceCount < 1:
lcd.set_backlight(0)
else:
lcd.set_backlight(1)
end = datetime.datetime.utcnow()
prevFaceCount = len(faces)
totalUse += len(faces)
if end - start < frameTimeDelta:
waitTime = frameTimeDelta - (end-start)
print("wait {}".waitTime.total_seconds())
time.sleep(waitTime.total_seconds())