Rudimentary face compositing

This commit is contained in:
Ruben van de Ven 2019-02-03 17:43:32 +01:00
commit ba5939bfe0
10 changed files with 512 additions and 0 deletions

3
.gitignore vendored Normal file
View file

@ -0,0 +1,3 @@
build
affdex-sdk

3
.gitmodules vendored Normal file
View file

@ -0,0 +1,3 @@
[submodule "sdk-samples"]
path = sdk-samples
url = git@gitlab.com:rubenvandeven/affdex-sdk-cpp-samples.git

48
README.md Normal file
View file

@ -0,0 +1,48 @@
# Gender Bias
Uses a modified version of Affectivas [cpp-sdk-samples](https://github.com/Affectiva/cpp-sdk-samples/).
## Install
Install dependencies:
```bash
sudo apt libopencv-dev install libboost-system-dev libboost-filesystem-dev libboost-date-time-dev libboost-regex-dev libboost-thread-dev libboost-timer-dev libboost-chrono-dev libboost-serialization-dev libboost-log-dev libboost-program-options-dev
sudo apt install cmake build-essential git gzip
#rpi is ARM:
wget http://download.affectiva.com/linux/arm/affdex-cpp-sdk-3.1-40-linux-arm7.tar.gz
mkdir affdex-sdk
tar -xzvf affdex-cpp-sdk-*.tar.gz -C affdex-sdk
rm affdex-cpp-sdk-*.tar.gz
```
Build:
```bash
mkdir build && cd build
cmake -DOpenCV_DIR=/usr/ -DBOOST_ROOT=/usr/ -DAFFDEX_DIR=~/gender_detection/affdex-sdk ~/gender_detection/sdk-samples
make
```
To avoid ruining the SD-card too soon, mount /tmp as tmpfs. This folder will also be used to store camera frames for analysis. So to `/etc/fstab` add:
```
# tmpfs for /tmp, so we save the sd-card a bit
tmpfs /tmp tmpfs defaults,noatime,nosuid 0 0
tmpfs /var/log tmpfs defaults,noatime,mode=1777,size=64m 0 0
```
`sudo ln -s supervisord.conf /etc/supervisor/conf.d/specimens.conf`
Install fonts:
` cp fonts/*/*.ttf ~/.fonts/`
## Test
Some quirks in either Rasbian or the video demo require two variables to be set before running. Not doing so results in a segfault.
```
export LC_ALL=$LANG
export LD_PRELOAD=/usr/lib/arm-linux-gnueabihf/libopencv_core.so.2.4
~/gender_detection/build/video-demo/video-demo --input ~/gender_detection/image_test.jpg --data ~/gender_detection/affdex-sdk/data --draw=0 --numFaces=20
```

34
echoserver.py Normal file
View file

@ -0,0 +1,34 @@
import tornado.websocket
import tornado.web
import tornado.ioloop
import os
web_dir = os.path.join(os.path.split(__file__)[0], 'www')
# This is our WebSocketHandler - it handles the messages
# from the tornado server
class WebSocketHandler(tornado.websocket.WebSocketHandler):
connections = set()
# the client connected
def open(self):
self.connections.add(self)
print ("New client connected")
# the client sent the message
def on_message(self, message):
[con.write_message(message) for con in self.connections]
# client disconnected
def on_close(self):
self.connections.remove(self)
print ("Client disconnected")
application = tornado.web.Application([
(r"/ws", WebSocketHandler),
(r"/(.*)", tornado.web.StaticFileHandler, {"path": web_dir, "default_filename": 'index.html'}),
],debug=True)
application.listen(8888)
tornado.ioloop.IOLoop.instance().start()

272
portrait_compositor.py Normal file
View file

@ -0,0 +1,272 @@
import picamera
import io, os
import datetime
import csv
from subprocess import Popen, PIPE
from PIL import Image
import numpy as np
import cPickle as pickle
import requests
import time
import thread
from websocket import create_connection
import logging
import json
camera = picamera.PiCamera()
camera.rotation = 180
camera.resolution = (1920, 1080)
# camera.resolution = (1280, 720)
outputResolution = (1000, 1000)
# the binary genders as outputted by Affectiva
genders = ['male', 'female', 'unknown']
perspectives = ['side', 'front']
gender_perspectives = [g+"_"+p for p in perspectives for g in genders]
curdir = os.path.dirname(os.path.abspath(__file__))
tmpimage = '/tmp/piimage.jpg'
tmpimageResults = '/tmp/piimage.csv'
cmd = [
os.path.join(curdir, 'build/video-demo/video-demo'),
'--input', tmpimage,
'--data', os.path.join(curdir, 'affdex-sdk/data'),
'--draw', '0',
'--numFaces', '20',
]
# without these vars video-demo yields a segmentation fault
environment = {
'LC_LANG': 'en_GB.UTF-8',
'LD_PRELOAD': '/usr/lib/arm-linux-gnueabihf/libopencv_core.so.2.4',
}
def updateStats(type, name, count, image_filename):
params = {
'type': type,
'name': name,
'time': int(time.time()),
'case_count': int(count),
}
try:
ws = create_connection("ws://localhost:8888/ws")
js = json.dumps({
'type': type,
'name': name,
'img_src': os.path.basename(image_filename),
'case_count': int(count),
})
ws.send(js)
except Exception as e:
raise
url = 'https://artstats.rubenvandeven.com/composites/views.php'
if count % 10 == 0:
# only send every one in x image, so that the server never can
# retrace _exact_ faces by comparing the sent frames.
with open(image_filename) as fp:
print('send request including image')
r = requests.post(
url ,
files={'image': fp},
params=params
)
else:
print('send request')
r = requests.post(
url,
params=params
)
class CompositeImage:
def __init__(self, name, resolution):
self.name = name
self.count = 0
self.resolution = resolution
self.image = np.zeros((resolution[0],resolution[1],3))
# use state to determine whether a save is necessary
self.state_dirty = True
def addFace(self, img):
img_a = np.array(img.resize(self.resolution))
self.count += 1
self.image = (self.image * (self.count - 1)/float(self.count) + img_a / float(self.count))
self.state_dirty = True
def restore(self, i, dir):
'''
Restore from pickle nr
'''
self.count = i
name = self.get_frame_filename(self.count)
img_file = os.path.join(dir, name)
print("\trestore {}".format(img_file))
self.image = np.array(Image.open(img_file)).astype('float64')
self.state_dirty = False
def get_image(self):
return Image.fromarray(self.image.astype('uint8'),'RGB')
def get_frame_filename(self, i):
return "{}-{}x{}-{}.png".format(self.name, self.resolution[0], self.resolution[1], i)
def save_image(self, dir):
if self.state_dirty is False:
return
name = self.get_frame_filename(self.count)
filename = os.path.join(dir, name)
self.get_image().save(filename)
thread.start_new_thread( updateStats, ('gender', self.name, self.count, filename) )
self.state_dirty = False
class CompositeCollection:
"""
Store/save the composite images
"""
def __init__(self, names, size, target_dir = None):
self.id = "{}-{}x{}".format("-".join(names), size[0], size[1])
self.names = names
self.size = size
self.target_dir = os.path.dirname(os.path.abspath(__file__)) if target_dir is None else target_dir
self.load()
def get_pickle_filename(self):
return os.path.join(self.target_dir, self.id + ".p")
def load(self):
pickle_file_name = self.get_pickle_filename()
# if os.path.exists(pickle_file_name):
composites = {}
try:
with open( pickle_file_name, "rb" ) as fp:
data = pickle.load( fp )
for name in data['c']:
composites[name] = CompositeImage(name, self.size)
composites[name].restore( data['c'][name], self.target_dir)
except Exception as e:
print("Create new composite", e)
for name in self.names:
composites[name] = CompositeImage(name, self.size)
self.composites = composites
def save(self):
data = { 'size' : self.size, 'c': {} }
for name in self.composites:
data['c'][name] = self.composites[name].count
with open( self.get_pickle_filename(), "wb" ) as fp:
print("Save", data)
pickle.dump( data, fp )
def save_img(self, name):
self.get(name).save_image(self.target_dir)
def get_as_percentages(self, precision = 3):
total = sum([c.count for c in self.composites])
percentages = {}
if total < 1:
# assert: in the beginning, we were all made equal
for c in self.composites:
percentages[c.name] = round(100 / len(self.composites), precision)
else:
for c in self.composites:
percentages[c.name] = round(100 * (c.count / total), precision)
return percentages
def get(self, name):
return self.composites[name]
def clean(self):
for name in self.names:
c = self.get(name)
start = max(0, c.count - 10)
end = max(0, c.count - 5)
for i in range(start, end):
filename = os.path.join(self.target_dir, c.get_frame_filename(i))
if os.path.exists(filename):
print("Clean {}".format(filename))
os.unlink(filename)
def append_face(row, image, composites):
# degrees to distinguish side (as we will never be able to use 90 :-( )
suffix = 'side' if abs(float(row['yaw'])) > 20 else 'front'
# print('yaw:', float(row['yaw']))
name = "{}_{}".format(row['gender'], suffix)
if name not in composites.names:
return
composite = composites.get(name)
# TODO: matrix transform the image, to skew the face into being a flat-ish surface
# This might yield less blurry composites
# crop image, bt keep it bigger than the found face
grow_x = .2 # in every direction, so .2 becomes 1.4 * width
grow_y = grow_x
face_w = int(row['width'])
face_h = int(row['height'])
face_x = int(row['x'])
face_y = int(row['y'])
# we go square:
size_x = max(face_w, face_h) * (1 + grow_x * 2)
size_y = size_x
dx = (face_w - size_x) / 2
dy = (face_h - size_y) / 2
# PIL.Image handles cropping outside the canvas by filling with black/transparent
x = face_x + dx
y = face_y + dy
print('crop')
i = image.crop((x,y, x + size_x, y + size_y))
if suffix == 'side' and float(row['yaw']) < 0:
print('\tflip')
i = i.transpose(Image.FLIP_LEFT_RIGHT)
print('add')
composite.addFace(i)
print('added')
composites = CompositeCollection(gender_perspectives, outputResolution, os.path.join(curdir, 'output'))
while True:
start = datetime.datetime.utcnow()
# stream = io.BytesIO()
camera.capture(tmpimage, format='jpeg')
process = Popen(cmd, env=environment)
process.wait()
img = Image.open(tmpimage)
os.unlink(tmpimage)
with open(tmpimageResults) as csvfile:
print("open csv")
data = csv.DictReader(csvfile)
faces = 0
for row in data:
if row['faceId'] == 'nan':
# not a valid face
continue
faces += 1
print("append face")
append_face(row, img, composites)
if faces > 0:
print("save :-)")
for name in composites.names:
print("\tsave img '{}'".format(name))
c = composites.save_img(name)
# save pickle after images, so they can be restored
composites.save()
composites.clean()
# TODO: trigger output update

3
requirements.txt Normal file
View file

@ -0,0 +1,3 @@
websocket-client==0.54
requests==2.12
tornado==5.1

1
sdk-samples Submodule

@ -0,0 +1 @@
Subproject commit 7ac4d35c508bf02751ef122a7fc8a8dda922a287

15
supervisord.conf Normal file
View file

@ -0,0 +1,15 @@
[program:echoserver]
command=python /home/pi/specimens_of_composite_portraiture/echoserver.py
directory=/home/pi/specimens_of_composite_portraiture
startsecs=2
user=pi
autorestart=true
[program:portraits]
command=python /home/pi/specimens_of_composite_portraiture/portrait_compositor.py
directory=/home/pi/specimens_of_composite_portraiture
startsecs=7
user=pi
autorestart=true

132
www/index.html Normal file
View file

@ -0,0 +1,132 @@
<!DOCTYPE html>
<html lang="en" dir="ltr">
<head>
<meta charset="utf-8">
<title>Specimens of (involuntary) composite portraiture</title>
<style media="screen">
html{
/* height: 100vh;
width: 100vw; */
}
body{
font-family: "CMU Serif";
border:solid 2px black;
margin: 20px;
width: calc(100vw - 44px);
height: calc(100vh - 44px);
/* box-sizing: border-box; */
transform: rotate(.2deg);
font-size: 25pt;
}
h1,h2{
font-family: "OSP-DIN";
text-align: center;
text-transform: uppercase;
border-bottom: solid 2px black;
margin-top: 5px;
margin-bottom: 5px;
padding-bottom: 3px;
padding-top: 0;
font-weight: normal;
}
section#gender{
height: 45%;
border-bottom: solid 4px black;
}
section#ethnicity{
height: 48%;
border-bottom: solid 2px black;
}
h1{
font-size: 46pt;
}
.of{
font-size: 70%;
}
.star{
font-size: 40%;
display: inline-block;
position: relative;
top: -12px;
}
#gender h2{
letter-spacing: 5px;
}
#ethnicity h2{
letter-spacing: 2px;
}
#affdex{
position: absolute;
top: calc(100% - 18px);
right: 20px;
font-size: 14pt;
}
#graph{
display: flex;
flex-direction: row;
margin: 0 10px;
}
#graph > div{
width: 20%;
}
#graph .background{
border: solid 2px black;
height: 20px;
border-right:none;
}
#graph div:last-child .background{
border-right: solid 2px black;
}
#graph .cases{
text-align: center;
}
</style>
</head>
<body>
<h1>Specimens <span class='of'>of</span> (Involuntary) Composite Portraiture</h1>
<section id='gender'>
<h2>Gender Differentiation<span class='star'>*</span></h2>
<div class='genders'>
test
</div>
</section>
<section id='ethnicity'>
<h2>Reinforced Ethnical Stereotypes</h2>
<div id='graph'>
<div id='graph--black-african'>
<div class='background'></div>
<div class='cases'><span class='nr'>0</span> <span class='txt'>cases</span></div>
</div>
<div id='graph--caucasian'>
<div class='background'></div>
<div class='cases'><span class='nr'>0</span> <span class='txt'>cases</span></div>
</div>
<div id='graph--east-asian'>
<div class='background'></div>
<div class='cases'><span class='nr'>0</span> <span class='txt'>cases</span></div>
</div>
<div id='graph--hispanic'>
<div class='background'></div>
<div class='cases'><span class='nr'>0</span> <span class='txt'>cases</span></div>
</div>
<div id='graph--south-asian'>
<div class='background'></div>
<div class='cases'><span class='nr'>0</span> <span class='txt'>cases</span></div>
</div>
</div>
</section>
<div id='affdex'>* based on the 2017 edition of Affectiva's gender detection toolkit</div>
</body>
</html>

1
www/reconnecting-websocket.min.js vendored Normal file
View file

@ -0,0 +1 @@
!function(a,b){"function"==typeof define&&define.amd?define([],b):"undefined"!=typeof module&&module.exports?module.exports=b():a.ReconnectingWebSocket=b()}(this,function(){function a(b,c,d){function l(a,b){var c=document.createEvent("CustomEvent");return c.initCustomEvent(a,!1,!1,b),c}var e={debug:!1,automaticOpen:!0,reconnectInterval:1e3,maxReconnectInterval:3e4,reconnectDecay:1.5,timeoutInterval:2e3};d||(d={});for(var f in e)this[f]="undefined"!=typeof d[f]?d[f]:e[f];this.url=b,this.reconnectAttempts=0,this.readyState=WebSocket.CONNECTING,this.protocol=null;var h,g=this,i=!1,j=!1,k=document.createElement("div");k.addEventListener("open",function(a){g.onopen(a)}),k.addEventListener("close",function(a){g.onclose(a)}),k.addEventListener("connecting",function(a){g.onconnecting(a)}),k.addEventListener("message",function(a){g.onmessage(a)}),k.addEventListener("error",function(a){g.onerror(a)}),this.addEventListener=k.addEventListener.bind(k),this.removeEventListener=k.removeEventListener.bind(k),this.dispatchEvent=k.dispatchEvent.bind(k),this.open=function(b){h=new WebSocket(g.url,c||[]),b||k.dispatchEvent(l("connecting")),(g.debug||a.debugAll)&&console.debug("ReconnectingWebSocket","attempt-connect",g.url);var d=h,e=setTimeout(function(){(g.debug||a.debugAll)&&console.debug("ReconnectingWebSocket","connection-timeout",g.url),j=!0,d.close(),j=!1},g.timeoutInterval);h.onopen=function(){clearTimeout(e),(g.debug||a.debugAll)&&console.debug("ReconnectingWebSocket","onopen",g.url),g.protocol=h.protocol,g.readyState=WebSocket.OPEN,g.reconnectAttempts=0;var d=l("open");d.isReconnect=b,b=!1,k.dispatchEvent(d)},h.onclose=function(c){if(clearTimeout(e),h=null,i)g.readyState=WebSocket.CLOSED,k.dispatchEvent(l("close"));else{g.readyState=WebSocket.CONNECTING;var d=l("connecting");d.code=c.code,d.reason=c.reason,d.wasClean=c.wasClean,k.dispatchEvent(d),b||j||((g.debug||a.debugAll)&&console.debug("ReconnectingWebSocket","onclose",g.url),k.dispatchEvent(l("close")));var e=g.reconnectInterval*Math.pow(g.reconnectDecay,g.reconnectAttempts);setTimeout(function(){g.reconnectAttempts++,g.open(!0)},e>g.maxReconnectInterval?g.maxReconnectInterval:e)}},h.onmessage=function(b){(g.debug||a.debugAll)&&console.debug("ReconnectingWebSocket","onmessage",g.url,b.data);var c=l("message");c.data=b.data,k.dispatchEvent(c)},h.onerror=function(b){(g.debug||a.debugAll)&&console.debug("ReconnectingWebSocket","onerror",g.url,b),k.dispatchEvent(l("error"))}},1==this.automaticOpen&&this.open(!1),this.send=function(b){if(h)return(g.debug||a.debugAll)&&console.debug("ReconnectingWebSocket","send",g.url,b),h.send(b);throw"INVALID_STATE_ERR : Pausing to reconnect websocket"},this.close=function(a,b){"undefined"==typeof a&&(a=1e3),i=!0,h&&h.close(a,b)},this.refresh=function(){h&&h.close()}}return a.prototype.onopen=function(){},a.prototype.onclose=function(){},a.prototype.onconnecting=function(){},a.prototype.onmessage=function(){},a.prototype.onerror=function(){},a.debugAll=!1,a.CONNECTING=WebSocket.CONNECTING,a.OPEN=WebSocket.OPEN,a.CLOSING=WebSocket.CLOSING,a.CLOSED=WebSocket.CLOSED,a});