moodmeter/opencv-webcam-demo/opencv-webcam-demo.cpp

355 lines
12 KiB
C++

#include <iostream>
#include <memory>
#include <chrono>
#include <fstream>
#include <boost/filesystem.hpp>
#include <boost/timer/timer.hpp>
#include <boost/program_options.hpp>
#include <boost/algorithm/string.hpp>
#include "Frame.h"
#include "Face.h"
#include "FrameDetector.h"
#include "PhotoDetector.h"
#include "AffdexException.h"
#include "AFaceListener.hpp"
#include "PlottingImageListener.hpp"
#include "LoggingImageListener.hpp"
#include "StatusListener.hpp"
using namespace std;
using namespace affdex;
using namespace cv;
FeaturePoint minPoint(VecFeaturePoint points)
{
VecFeaturePoint::iterator it = points.begin();
FeaturePoint ret = *it;
for (; it != points.end(); it++)
{
if (it->x < ret.x) ret.x = it->x;
if (it->y < ret.y) ret.y = it->y;
}
return ret;
};
FeaturePoint maxPoint(VecFeaturePoint points)
{
VecFeaturePoint::iterator it = points.begin();
FeaturePoint ret = *it;
for (; it != points.end(); it++)
{
if (it->x > ret.x) ret.x = it->x;
if (it->y > ret.y) ret.y = it->y;
}
return ret;
};
std::string getAsJson(int framenr, const std::map<FaceId, Face> faces, const double timeStamp)
{
std::stringstream ss;
ss << "{" << "\"t\":" << timeStamp << ",";
ss << "\"nr\":" << framenr << ",";
ss << "\"faces\":[";
int i(0);
for (auto & face_id_pair : faces)
{
Face f = face_id_pair.second;
if(i > 0) { ss << ","; }
i++;
ss << "{";
// fStream << timeStamp << ","
// << f.id << ","
// << f.measurements.interocularDistance << ","
// << glassesMap[f.appearance.glasses] << ","
// << ageMap[f.appearance.age] << ","
// << ethnicityMap[f.appearance.ethnicity] << ","
// << genderMap[f.appearance.gender] << ","
// << affdex::EmojiToString(f.emojis.dominantEmoji) << ",";
float *values = (float *)&f.measurements.orientation;
for (std::string angle : { "pitch", "yaw", "roll" })
{
ss << "\"" << angle << "\":" << (*values) << ",";
values++;
}
values = (float *)&f.emotions;
for (std::string emotion : {
"joy", "fear", "disgust", "sadness", "anger",
"surprise", "contempt", "valence", "engagement"
})
{
ss << "\"" << emotion << "\":" << (*values) << ",";
values++;
}
values = (float *)&f.expressions;
for (std::string expression : {
"smile", "innerBrowRaise", "browRaise", "browFurrow", "noseWrinkle",
"upperLipRaise", "lipCornerDepressor", "chinRaise", "lipPucker", "lipPress",
"lipSuck", "mouthOpen", "smirk", "eyeClosure", "attention", "eyeWiden", "cheekRaise",
"lidTighten", "dimpler", "lipStretch", "jawDrop"
})
{
ss << "\"" << expression << "\":" << (*values) << ",";
values++;
}
FeaturePoint tl = minPoint(f.featurePoints);
FeaturePoint br = maxPoint(f.featurePoints);
ss << "\"rect\":{\"x\":" << tl.x << ",\"y\":" << tl.y
<< ",\"w\":" << (br.x - tl.x) << ",\"h\":" << (br.y - tl.y) << "},";
ss << "\"ioDistance\":"<< f.measurements.interocularDistance << ",";
ss << "\"id\":"<< f.id;
ss << "}";
}
ss << "]"; // faces
ss << "}";
return ss.str();
}
/// <summary>
/// Project for demoing the Windows SDK CameraDetector class (grabbing and processing frames from the camera).
/// </summary>
int main(int argsc, char ** argsv)
{
namespace po = boost::program_options; // abbreviate namespace
std::cerr << "Hit ESCAPE key to exit app.." << endl;
shared_ptr<PhotoDetector> frameDetector;
try{
const std::vector<int> DEFAULT_RESOLUTION{ 640, 480 };
affdex::path DATA_FOLDER;
std::vector<int> resolution;
int process_framerate = 30;
int buffer_length = 2;
unsigned int nFaces = 1;
bool draw_display = true;
bool use_segments = false;
int faceDetectorMode = (int)FaceDetectorMode::SMALL_FACES;
boost::filesystem::path imgPath("~/emo_in_file.jpg");
boost::filesystem::path outPath("~/output/");
float last_timestamp = -1.0f;
float capture_fps = -1.0f;
const int precision = 2;
std::cerr.precision(precision);
std::cout.precision(precision);
po::options_description description("Project for demoing the Affdex SDK CameraDetector class (grabbing and processing frames from the camera).");
description.add_options()
("help,h", po::bool_switch()->default_value(false), "Display this help message.")
#ifdef _WIN32
("data,d", po::wvalue< affdex::path >(&DATA_FOLDER)->default_value(affdex::path(L"data"), std::string("data")), "Path to the data folder")
#else // _WIN32
("data,d", po::value< affdex::path >(&DATA_FOLDER)->default_value(affdex::path("data"), std::string("data")), "Path to the data folder")
#endif // _WIN32
("pfps", po::value< int >(&process_framerate)->default_value(30), "Processing framerate.")
("bufferLen", po::value< int >(&buffer_length)->default_value(30), "process buffer size.")
("faceMode", po::value< int >(&faceDetectorMode)->default_value((int)FaceDetectorMode::SMALL_FACES), "Face detector mode (large faces vs small faces).")
("numFaces", po::value< unsigned int >(&nFaces)->default_value(1), "Number of faces to be tracked.")
("draw", po::value< bool >(&draw_display)->default_value(true), "Draw metrics on screen.")
("segments", po::value< bool >(&use_segments)->default_value(use_segments), "Use 'segments' rather than 'frames' (influences detecting frame%06d or segment%06d).")
//~ ("file,f", po::value< boost::filesystem::path >(&imgPath)->default_value(imgPath), "Filename of image that is watched/tracked for changes.")
("frameOutput,o", po::value< boost::filesystem::path >(&outPath)->default_value(outPath), "Directory to store the frame in (and json)")
;
po::variables_map args;
try
{
po::store(po::command_line_parser(argsc, argsv).options(description).run(), args);
if (args["help"].as<bool>())
{
std::cout << description << std::endl;
return 0;
}
po::notify(args);
}
catch (po::error& e)
{
std::cerr << "ERROR: " << e.what() << std::endl << std::endl;
std::cerr << "For help, use the -h option." << std::endl << std::endl;
return 1;
}
if (!boost::filesystem::exists(DATA_FOLDER))
{
std::cerr << "Folder doesn't exist: " << std::string(DATA_FOLDER.begin(), DATA_FOLDER.end()) << std::endl << std::endl;;
std::cerr << "Try specifying the folder through the command line" << std::endl;
std::cerr << description << std::endl;
return 1;
}
if (!boost::filesystem::exists(outPath))
{
std::cerr << "Folder doesn't exist: " << outPath.native() << std::endl << std::endl;;
std::cerr << "Try specifying the output folder through the command line" << std::endl;
std::cerr << description << std::endl;
return 1;
}
std::ofstream csvFileStream;
std::cerr << "Initializing Affdex FrameDetector" << endl;
shared_ptr<FaceListener> faceListenPtr(new AFaceListener());
shared_ptr<PlottingImageListener> listenPtr(new PlottingImageListener(csvFileStream, draw_display)); // Instanciate the ImageListener class
shared_ptr<StatusListener> videoListenPtr(new StatusListener());
frameDetector = make_shared<PhotoDetector>(nFaces, (affdex::FaceDetectorMode) faceDetectorMode); // Init the FrameDetector Class
//Initialize detectors
frameDetector->setDetectAllEmotions(true);
frameDetector->setDetectAllExpressions(true);
frameDetector->setDetectAllEmojis(false);
frameDetector->setDetectAllAppearances(false);
frameDetector->setClassifierPath(DATA_FOLDER);
frameDetector->setImageListener(listenPtr.get());
frameDetector->setFaceListener(faceListenPtr.get());
frameDetector->setProcessStatusListener(videoListenPtr.get());
auto start_time = std::chrono::system_clock::now();
std::cout << "Max num of faces set to: " << frameDetector->getMaxNumberFaces() << std::endl;
std::string mode;
switch (frameDetector->getFaceDetectorMode())
{
case FaceDetectorMode::LARGE_FACES:
mode = "LARGE_FACES";
break;
case FaceDetectorMode::SMALL_FACES:
mode = "SMALL_FACES";
break;
default:
break;
}
std::cout << "Face detector mode set to: " << mode << std::endl;
//Start the frame detector thread.
frameDetector->start();
int frameNrIn = 1;
int frameNrOut = 1;
// increase number to current pos:
while(true) {
char buff[100];
snprintf(buff, sizeof(buff), (use_segments ? "segment%06d.json" : "frame%06d.json"), frameNrIn);
boost::filesystem::path jsonPath = outPath / buff;
if ( boost::filesystem::exists( jsonPath.native() )) {
frameNrIn++;
frameNrOut++;
} else {
break;
}
}
std::time_t lastImgUpdate(0);
int seconds = 1;
while(true){ //(cv::waitKey(20) != -1);
char buff[100];
snprintf(buff, sizeof(buff), (use_segments ? "segment%06d.jpg" : "frame%06d.jpg"), frameNrIn);
boost::filesystem::path imgPath = outPath / buff;
if ( !boost::filesystem::exists( imgPath.native() )|| frameNrIn > frameNrOut ) {
// wait for file to appear
// and for the in file to be parsed (frame out)
usleep(5000); // wait 1/20 sec to avoid useless fast loop
} else {
std::cerr << "Read " << imgPath.native() << std::endl;
char buff[100];
snprintf(buff, sizeof(buff), (use_segments ? "segment%06d.json" : "frame%06d.json"), frameNrIn);
boost::filesystem::path jsonPath = outPath / buff;
// don't redo existing jsons
if( !boost::filesystem::exists( jsonPath.native() )) {
cv::Mat img = imread(imgPath.native(), 1);
//Calculate the Image timestamp and the capture frame rate;
const auto milliseconds = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now() - start_time);
//~ const double seconds = milliseconds.count() / 1000.f;
// Create a frame
Frame f(img.size().width, img.size().height, img.data, Frame::COLOR_FORMAT::BGR, seconds);
seconds++;
capture_fps = 1.0f / (seconds - last_timestamp);
last_timestamp = seconds;
frameDetector->process(f); //Pass the frame to detector
} else {
frameNrOut ++; // this won't happen later, but nr. should stay equal if skipping items.
}
frameNrIn++;
}
// For each frame processed (returns async)
if (listenPtr->getDataSize() > 0)
{
std::pair<Frame, std::map<FaceId, Face> > dataPoint = listenPtr->getData();
Frame frame = dataPoint.first;
std::map<FaceId, Face> faces = dataPoint.second;
// Draw metrics to the GUI
if (draw_display)
{
listenPtr->draw(faces, frame);
}
std::string json = getAsJson(frameNrOut, faces, frame.getTimestamp());
std::cout << json << std::endl;
// store json
char buff[100];
snprintf(buff, sizeof(buff), (use_segments ? "segment%06d.json" : "frame%06d.json"), frameNrOut);
boost::filesystem::path targetFilename = outPath / buff;
std::ofstream out(targetFilename.native());
std::cerr << "write "<< targetFilename.native() << std::endl;
out << json << "\n";
out.close();
frameNrOut++;
}
}
std::cerr << "Stopping FrameDetector Thread" << endl;
frameDetector->stop(); //Stop frame detector thread
}
catch (AffdexException ex)
{
std::cerr << "Encountered an AffdexException " << ex.what();
return 1;
}
catch (std::runtime_error err)
{
std::cerr << "Encountered a runtime error " << err.what();
return 1;
}
catch (std::exception ex)
{
std::cerr << "Encountered an exception " << ex.what();
return 1;
}
catch (...)
{
std::cerr << "Encountered an unhandled exception ";
return 1;
}
return 0;
}