mvp1/scan_faces.py

120 lines
3.3 KiB
Python
Executable File

#!/usr/bin/python
import io
import picamera
import cv2
import numpy as np
import datetime
import time
import os
from si_prefix import si_format
import Adafruit_CharLCD as LCD
# Set FPS (though RPi is probably to slow to meet it ;-)
FPS = 1.0
frameTimeDelta = datetime.timedelta(seconds=1.0/FPS)
dimTimeDelta = datetime.timedelta(seconds=10)
# Init LCD
lcd_rs = 27
lcd_en = 22
lcd_d4 = 25
lcd_d5 = 24
lcd_d6 = 23
lcd_d7 = 18
lcd_backlight = 4
lcd_columns = 16
lcd_rows = 2
lcd = LCD.Adafruit_CharLCD(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7,
lcd_columns, lcd_rows, lcd_backlight)
lcd.clear()
lcd.message("Init scanner.")
# Init camera
camera = picamera.PiCamera()
camera.resolution = (1280, 720)
lcd.clear()
lcd.message("Init scanner..")
# Init classifier, various options:
#~ /usr/share/opencv/haarcascades/haarcascade_frontalface_default.xml (~22sec)
#~ /usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml (>25sec)
#~ /usr/share/opencv/haarcascades/haarcascade_frontalface_alt2.xml (~21sec)
#~ /usr/share/opencv/haarcascades/haarcascade_frontalface_alt_tree.xml (~14sec, seems to miss many faces)
classifier = cv2.CascadeClassifier('/usr/share/opencv/haarcascades/haarcascade_frontalface_alt2.xml')
lcd.clear()
lcd.message("Init scanner...")
prevFaceCount = 0
totalUse = 0 #in face-seconds
# make sure log file exists
if not os.path.exists("scan_face.log")
with open("scan_face.log","w") as f:
f.write("{},{},{}".format(time.time(), 0,0))
# get last line of log file and update 'total use' using that.
with open("scan_face.log", "rb") as f:
first = f.readline() # Read the first line.
f.seek(-2, os.SEEK_END) # Jump to the second last byte.
while f.read(1) != b"\n": # Until EOL is found...
f.seek(-2, os.SEEK_CUR) # ...jump back the read byte plus one more.
last = f.readline() # Read last line.
bits = last.split(",")
totalUse = bits[2]
log = open("scan_face.log", "a")
lastFaceTime = datetime.datetime.utcnow()
while True:
print(time.time(),"GO")
start = datetime.datetime.utcnow()
stream = io.BytesIO()
camera.capture(stream, format='jpeg')
print(time.time(),"captured")
buff = np.fromstring(stream.getvalue(), dtype=np.uint8)
image = cv2.imdecode(buff,1)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
print(time.time(),"grayed")
faces = classifier.detectMultiScale(gray, 1.2, 5, minSize=(30,20))
print(time.time(),"Found {} faces".format(len(faces)))
end = datetime.datetime.utcnow()
# take the frame as being representative of whole frame
scanDuration = (end - start).total_seconds()
totalUse += len(faces) * scanDuration
lcd.clear()
#~ lcd.message("viewers {:>8}\nview-min. {:>7.2f}".format(len(faces), totalUse/60))
lcd.message("{:>7} viewers \n{:>7}view-sec".format(len(faces), si_format(totalUse,precision=1)))
log.write("{},{},{}".format(time.time(), len(faces), int(totalUse)))
log.flush()
os.fsync(log.fileno())
if len(faces) < 1 and end - lastFaceTime > dimTimeDelta:
lcd.set_backlight(0)
else:
lcd.set_backlight(1)
if len(faces) > 0:
lastFaceTime = end
#~ if end - start < frameTimeDelta:
#~ waitTime = frameTimeDelta - (end-start)
#~ print("wait {}".waitTime.total_seconds())
#~ time.sleep(waitTime.total_seconds())