console.log('p5 version:', p5); console.log('ml5 version:', ml5); console.log(location.origin); let assets = {}; var draw = function () { // Begin met het tekenen van de video // plaats hem op position x = 0, y = 0. // vul de hele breedte en hoogte image(lastFrame, 0,0, width, height); for(let detection of detections) { push(); let transformed = transformDetection(detection); translate(transformed.origin.x, transformed.origin.y); rotate(transformed.angle); try { drawMask(transformed); } catch (error) { console.exception(error); } pop(); } }; var drawMask = function(detection) { }; // var gotResults = function(err, result) { // if (err) { // console.log(err) // return // } // }; // function code_error(type, error) { // window.parent.postMessage({ // 'type': type, // 'error': error.message, // 'name': error.name, // 'line': error.lineNumber - 2, // seems it giveswrong line numbers // 'column': error.columnNumber // }, '*'); // } // function no_code_error(type){ // window.parent.postMessage({ // 'type': type, // 'error': null // }, '*'); // } // window.addEventListener("message", function (e) { // if (event.origin !== window.location.origin) { // console.error("Invalid origin of message. Ignored"); // return; // } // console.debug("receive", e.data); // switch (e.data.action) { // case 'asset': // if(e.data.content === null){ // delete assets[e.data.id]; // } else { // assets[e.data.id] = loadImage(e.data.content); // } // break; // case 'code': // let f = new Function(""); // try { // f = new Function(e.data.draw); // no_code_error('syntax'); // } catch (error) { // code_error('syntax', error); // // window.parent.postMessage({'syntax': error.lineNumber}); // } // handleResults = f; // break; // default: // console.error("Invalid action", e.data.action); // break; // } // }); let faceapi; var video; var lastFrame; var detections = []; var factor_x, factor_y; // function pause() { // if (running) // running = false; // else { // running = true; // faceapi.detect(gotResults); // } // } // by default all options are set to true const detection_options = { withLandmarks: true, withDescriptors: false, minConfidence: 0.5, Mobilenetv1Model: window.parent.location.origin + '/assets/faceapi', FaceLandmarkModel: window.parent.location.origin + '/assets/faceapi', FaceLandmark68TinyNet: window.parent.location.origin + '/assets/faceapi', FaceRecognitionModel: window.parent.location.origin + '/assets/faceapi', TinyFaceDetectorModel: window.parent.location.origin + '/assets/faceapi', } function setupAssets(){ // placeholder. Override in patch... } function setup() { // createCanvas(1280,720, WEBGL); createCanvas(540,420); smooth(); noFill(); push(); translate(-width/2, -height/2); let constraints = { video: { width: { min: 720 }, height: { min: 540 } }, audio: false }; video = createCapture(constraints); lastFrame = createGraphics(video.width, video.height); console.log(video); // HeadGazeSetup(video); // video.size(width, height); video.hide(); // Hide the video element, and just show the canvas faceapi = ml5.faceApi(video, detection_options, modelReady); textAlign(RIGHT); setupAssets(); } function modelReady() { faceapi.detect(gotResults); } var handleResults = function(){ // background(parseInt(Math.random()*255),parseInt(Math.random()*255),parseInt(Math.random()*255)); background((millis()/100)%255,0,0); image(video, -width/2 + 10, -height/2 + 10, width - 20, height -20); }; gotResults = function(err, result) { if (err) { console.log(err) return } // store data for async draw function // TODO results to more compatible format detections = parseDetectionResults(result); // size of video becomes known only after camera approval if(lastFrame.width != video.width || lastFrame.height != video.height){ console.log('Resizing canvas'); lastFrame.resizeCanvas(video.width, video.height); } // lastFrame.background('red'); lastFrame.image(video, 0,0, video.width, video.height); factor_x = width / video.width; factor_y = height / video.height; faceapi.detect(gotResults); } function drawBox(detections) { for (let i = 0; i < detections.length; i++) { const alignedRect = detections[i].alignedRect; const x = alignedRect._box._x const y = alignedRect._box._y const boxWidth = alignedRect._box._width const boxHeight = alignedRect._box._height noFill(); stroke(161, 95, 251); strokeWeight(2); rect(x, y, boxWidth, boxHeight); } } function drawLandmarks(detection) { // for (let i = 0; i < detections.length; i++) { const mouth = detection.parts.mouth; const nose = detection.parts.nose; const leftEye = detection.parts.leftEye; const rightEye = detection.parts.rightEye; const rightEyeBrow = detection.parts.rightEyeBrow; const leftEyeBrow = detection.parts.leftEyeBrow; const jawOutline = detection.parts.jawOutline; strokePoints(mouth, CLOSE); strokePoints(nose, CLOSE); strokePoints(leftEye, CLOSE); strokePoints(leftEyeBrow, OPEN); strokePoints(rightEye, CLOSE); strokePoints(rightEyeBrow, OPEN); strokePoints(jawOutline, OPEN); // } } function strokePoints(points, closed) { beginShape(); for (let i = 0; i < points.length; i++) { const x = points[i].x; const y = points[i].y; vertex(x, y) } if(typeof closed === 'undefined') { closed = CLOSE; } endShape(closed) } function drawPoints(points, radius) { if(typeof radius === 'undefined') { radius = 2; } for (let i = 0; i < points.length; i++) { const x = points[i].x; const y = points[i].y; circle(x, y, radius); } } function faceDistance(face1, face2){ // distance between faces, in pixels, not meters.. for now // we cheat a little: take centers, visualise circle with r = max(width, height) // and find distance between these circles box1 = (face1.box.x, face1.box.x + face1.box.width) box2 = (face2.box.x, face2.box.x + face2.box.width) c1 = { x: face1.box.x + face1.box.width / 2, y: face1.box.y + face1.box.height / 2, } c2 = { x: face2.box.x + face2.box.width / 2, y: face2.box.y + face2.box.height / 2, } r1 = Math.max(face1.box.width, face1.box.height) / 2; r2 = Math.max(face2.box.width, face2.box.height) / 2; dx = c1.x - c2.x; dy = c1.y - c2.y; return Math.sqrt( Math.pow(dx, 2) + Math.pow(dy, 2) ) - r1 - r2; } function mergePoints() { // a points should be {x: , y: } // collect all points in the arguments: let points = []; for(let arg of arguments) { if(Array.isArray(arg)) { points.push(...arg); } else { points.push(arg); } } return points; } function getBoundingBox(){ // arguments contains points, or sets of points. Find bbox const points = mergePoints(...arguments); const xs = points.map((point) => point.x); const ys = points.map((point) => point.y); const minx = Math.min(...xs); const miny = Math.min(...ys); return { x: minx, y: miny, width: Math.max(...xs) - minx, height: Math.max(...ys) - miny, } } function parseDetectionResults(results) { let detections = []; for(let result of results) { const landmarks = result.landmarks._positions.map((pos) => parseCoordinate(pos)); let detection = { 'points': landmarks, // TODO: rotation 'parts': {}, 'box': { x: result.alignedRect._box._x * factor_x, y: result.alignedRect._box._y * factor_y, width: result.alignedRect._box._width * factor_x, height: result.alignedRect._box._height * factor_y, }, } for(let idx in result.parts) { detection.parts[idx] = result.parts[idx].map((pos) => parseCoordinate(pos)); } detection['center'] = { x: detection.box.x + detection.box.width / 2, y: detection.box.y + detection.box.height / 2, } detections.push(detection); } return detections; } /** * face api detector returns coordinates with _x and _y attributes. * We convert this to the canvas's coordinates * @param Object {_x: , _y: } */ function parseCoordinate(position) { return { x: position._x * factor_x, y: position._y * factor_y, } } function transformDetection(original) { let b = original.points[36]; // outer point on left eye let a = original.points[45]; // outer point on right eye let cx =a.x/2 + b.x/2 let cy = a.y/2 + b.y/2 let angle = atan2(a.y - b.y, a.x - b.x); let detection = { 'points': original.points.map(p => transformPoint(p, cx, cy, angle)), 'origin': {x:cx, y:cy}, 'angle': angle, original: original } let bbox = getBoundingBox(detection.points); padding_x = bbox.width * .1; padding_y = bbox.height * .1; detection['box'] = { x: bbox.x - padding_x, y: bbox.y - padding_y, width: bbox.width * 1.2, height: bbox.height * 1.2 } return detection; } function transformPoint(p, cx, cy, angle) { const px = p.x-cx; const py = p.y-cy; return { x: px * cos(-angle) - py * sin(-angle), y: px * sin(-angle) + py * cos(-angle) } }