310 lines
No EOL
7.9 KiB
JavaScript
310 lines
No EOL
7.9 KiB
JavaScript
console.log('p5 version:', p5);
|
|
console.log('ml5 version:', ml5);
|
|
console.log(location.origin);
|
|
|
|
let assets = {};
|
|
|
|
var draw = function () {
|
|
};
|
|
|
|
// var gotResults = function(err, result) {
|
|
// if (err) {
|
|
// console.log(err)
|
|
// return
|
|
// }
|
|
// };
|
|
|
|
// function code_error(type, error) {
|
|
// window.parent.postMessage({
|
|
// 'type': type,
|
|
// 'error': error.message,
|
|
// 'name': error.name,
|
|
// 'line': error.lineNumber - 2, // seems it giveswrong line numbers
|
|
// 'column': error.columnNumber
|
|
// }, '*');
|
|
|
|
// }
|
|
|
|
// function no_code_error(type){
|
|
// window.parent.postMessage({
|
|
// 'type': type,
|
|
// 'error': null
|
|
// }, '*');
|
|
// }
|
|
|
|
// window.addEventListener("message", function (e) {
|
|
// if (event.origin !== window.location.origin) {
|
|
// console.error("Invalid origin of message. Ignored");
|
|
// return;
|
|
// }
|
|
|
|
// console.debug("receive", e.data);
|
|
|
|
// switch (e.data.action) {
|
|
// case 'asset':
|
|
// if(e.data.content === null){
|
|
// delete assets[e.data.id];
|
|
// } else {
|
|
// assets[e.data.id] = loadImage(e.data.content);
|
|
// }
|
|
|
|
// break;
|
|
// case 'code':
|
|
// let f = new Function("");
|
|
// try {
|
|
// f = new Function(e.data.draw);
|
|
// no_code_error('syntax');
|
|
// } catch (error) {
|
|
// code_error('syntax', error);
|
|
// // window.parent.postMessage({'syntax': error.lineNumber});
|
|
// }
|
|
// handleResults = f;
|
|
// break;
|
|
|
|
// default:
|
|
// console.error("Invalid action", e.data.action);
|
|
// break;
|
|
// }
|
|
|
|
// });
|
|
|
|
|
|
let faceapi;
|
|
var video;
|
|
var lastFrame;
|
|
var detections = [];
|
|
var factor_x, factor_y;
|
|
|
|
|
|
// function pause() {
|
|
// if (running)
|
|
// running = false;
|
|
// else {
|
|
// running = true;
|
|
// faceapi.detect(gotResults);
|
|
// }
|
|
// }
|
|
|
|
// by default all options are set to true
|
|
const detection_options = {
|
|
withLandmarks: true,
|
|
withDescriptors: false,
|
|
minConfidence: 0.5,
|
|
Mobilenetv1Model: window.parent.location.origin + '/assets/faceapi',
|
|
FaceLandmarkModel: window.parent.location.origin + '/assets/faceapi',
|
|
FaceLandmark68TinyNet: window.parent.location.origin + '/assets/faceapi',
|
|
FaceRecognitionModel: window.parent.location.origin + '/assets/faceapi',
|
|
TinyFaceDetectorModel: window.parent.location.origin + '/assets/faceapi',
|
|
}
|
|
|
|
|
|
|
|
function setup() {
|
|
// createCanvas(1280,720, WEBGL);
|
|
createCanvas(540,420);
|
|
smooth();
|
|
noFill();
|
|
|
|
|
|
push();
|
|
translate(-width/2, -height/2);
|
|
|
|
let constraints = {
|
|
video: {
|
|
width: { min: 720 },
|
|
height: { min: 540 }
|
|
},
|
|
audio: false
|
|
};
|
|
|
|
video = createCapture(constraints);
|
|
lastFrame = createGraphics(video.width, video.height);
|
|
|
|
console.log(video);
|
|
// HeadGazeSetup(video);
|
|
// video.size(width, height);
|
|
video.hide(); // Hide the video element, and just show the canvas
|
|
faceapi = ml5.faceApi(video, detection_options, modelReady);
|
|
textAlign(RIGHT);
|
|
}
|
|
|
|
function modelReady() {
|
|
faceapi.detect(gotResults);
|
|
}
|
|
|
|
var handleResults = function(){
|
|
// background(parseInt(Math.random()*255),parseInt(Math.random()*255),parseInt(Math.random()*255));
|
|
background((millis()/100)%255,0,0);
|
|
image(video, -width/2 + 10, -height/2 + 10, width - 20, height -20);
|
|
};
|
|
|
|
|
|
gotResults = function(err, result) {
|
|
if (err) {
|
|
console.log(err)
|
|
return
|
|
}
|
|
|
|
// store data for async draw function
|
|
// TODO results to more compatible format
|
|
detections = parseDetectionResults(result);
|
|
|
|
// size of video becomes known only after camera approval
|
|
if(lastFrame.width != video.width || lastFrame.height != video.height){
|
|
console.log('Resizing canvas');
|
|
lastFrame.resizeCanvas(video.width, video.height);
|
|
}
|
|
|
|
// lastFrame.background('red');
|
|
lastFrame.image(video, 0,0, video.width, video.height);
|
|
|
|
factor_x = width / video.width;
|
|
factor_y = height / video.height;
|
|
|
|
faceapi.detect(gotResults);
|
|
}
|
|
|
|
function drawBox(detections) {
|
|
for (let i = 0; i < detections.length; i++) {
|
|
const alignedRect = detections[i].alignedRect;
|
|
const x = alignedRect._box._x
|
|
const y = alignedRect._box._y
|
|
const boxWidth = alignedRect._box._width
|
|
const boxHeight = alignedRect._box._height
|
|
|
|
|
|
noFill();
|
|
stroke(161, 95, 251);
|
|
strokeWeight(2);
|
|
rect(x, y, boxWidth, boxHeight);
|
|
}
|
|
|
|
}
|
|
|
|
function drawLandmarks(detection) {
|
|
// for (let i = 0; i < detections.length; i++) {
|
|
const mouth = detection.parts.mouth;
|
|
const nose = detection.parts.nose;
|
|
const leftEye = detection.parts.leftEye;
|
|
const rightEye = detection.parts.rightEye;
|
|
const rightEyeBrow = detection.parts.rightEyeBrow;
|
|
const leftEyeBrow = detection.parts.leftEyeBrow;
|
|
const jawOutline = detection.parts.jawOutline;
|
|
|
|
drawPart(mouth, true);
|
|
drawPart(nose, true);
|
|
drawPart(leftEye, true);
|
|
drawPart(leftEyeBrow, false);
|
|
drawPart(rightEye, true);
|
|
drawPart(rightEyeBrow, false);
|
|
drawPart(jawOutline, false);
|
|
|
|
// }
|
|
}
|
|
|
|
function drawPart(feature, closed) {
|
|
beginShape();
|
|
|
|
for (let i = 0; i < feature.length; i++) {
|
|
const x = feature[i].x;
|
|
const y = feature[i].y;
|
|
vertex(x, y)
|
|
}
|
|
|
|
if (closed === true) {
|
|
endShape(CLOSE);
|
|
} else {
|
|
endShape();
|
|
}
|
|
|
|
}
|
|
|
|
/**
|
|
* Wrapper around p5.js color class
|
|
* @param {*} c color, either as array, or string (css name or HEX string)
|
|
*/
|
|
function getColor(c) {
|
|
if(!Array.isArray(c)) c = [c];
|
|
return new p5.Color(p5.instance, c);
|
|
}
|
|
|
|
var colors = {
|
|
red: getColor('red'),
|
|
blue: getColor('blue'),
|
|
green: getColor('green'),
|
|
};
|
|
|
|
function faceDistance(face1, face2){
|
|
// distance between faces, in pixels, not meters.. for now
|
|
// we cheat a little: take centers, visualise circle with r = max(width, height)
|
|
// and find distance between these circles
|
|
box1 = (face1.box.x, face1.box.x + face1.box.width)
|
|
box2 = (face2.box.x, face2.box.x + face2.box.width)
|
|
|
|
c1 = {
|
|
x: face1.box.x + face1.box.width / 2,
|
|
y: face1.box.y + face1.box.height / 2,
|
|
}
|
|
c2 = {
|
|
x: face2.box.x + face2.box.width / 2,
|
|
y: face2.box.y + face2.box.height / 2,
|
|
}
|
|
|
|
r1 = Math.max(face1.box.width, face1.box.height) / 2;
|
|
r2 = Math.max(face2.box.width, face2.box.height) / 2;
|
|
|
|
dx = c1.x - c2.x;
|
|
dy = c1.y - c2.y;
|
|
|
|
return Math.sqrt( Math.pow(dx, 2) + Math.pow(dy, 2) ) - r1 - r2;
|
|
}
|
|
|
|
function getBoundingBox(){
|
|
// arguments contains points, or sets of points. Find bbox
|
|
console.log(arguments)
|
|
|
|
// return {
|
|
// top:
|
|
// left:
|
|
// width:
|
|
// height:
|
|
// }
|
|
}
|
|
|
|
function parseDetectionResults(results) {
|
|
let detections = [];
|
|
for(let result of results) {
|
|
const landmarks = result.landmarks._positions.map((pos) => parseCoordinate(pos));
|
|
let detection = {
|
|
'landmarks': landmarks,
|
|
'parts': {},
|
|
'box': {
|
|
x: result.alignedRect._box._x * factor_x,
|
|
y: result.alignedRect._box._y * factor_y,
|
|
width: result.alignedRect._box._width * factor_x,
|
|
height: result.alignedRect._box._height * factor_y,
|
|
}
|
|
}
|
|
for(let idx in result.parts) {
|
|
detection.parts[idx] = result.parts[idx].map((pos) => parseCoordinate(pos));
|
|
}
|
|
detections.push(detection);
|
|
}
|
|
|
|
return detections;
|
|
// result.parts
|
|
// result.alignedRect._box
|
|
// factor_x, factor_y
|
|
}
|
|
|
|
/**
|
|
* face api detector returns coordinates with _x and _y attributes.
|
|
* We convert this to the canvas's coordinates
|
|
* @param Object {_x: , _y: }
|
|
*/
|
|
function parseCoordinate(position) {
|
|
return {
|
|
x: position._x * factor_x,
|
|
y: position._y * factor_y,
|
|
}
|
|
} |