Add emojis to the output file
This commit is contained in:
parent
4b4e1e05ba
commit
0d8c63f959
3 changed files with 107 additions and 77 deletions
|
@ -24,10 +24,10 @@ using namespace affdex;
|
||||||
|
|
||||||
class PlottingImageListener : public ImageListener
|
class PlottingImageListener : public ImageListener
|
||||||
{
|
{
|
||||||
|
|
||||||
std::mutex mMutex;
|
std::mutex mMutex;
|
||||||
std::deque<std::pair<Frame, std::map<FaceId, Face> > > mDataArray;
|
std::deque<std::pair<Frame, std::map<FaceId, Face> > > mDataArray;
|
||||||
|
|
||||||
double mCaptureLastTS;
|
double mCaptureLastTS;
|
||||||
double mCaptureFPS;
|
double mCaptureFPS;
|
||||||
double mProcessLastTS;
|
double mProcessLastTS;
|
||||||
|
@ -38,17 +38,18 @@ class PlottingImageListener : public ImageListener
|
||||||
const int spacing = 10;
|
const int spacing = 10;
|
||||||
const float font_size = 0.5f;
|
const float font_size = 0.5f;
|
||||||
const int font = cv::FONT_HERSHEY_COMPLEX_SMALL;
|
const int font = cv::FONT_HERSHEY_COMPLEX_SMALL;
|
||||||
|
|
||||||
std::vector<std::string> expressions;
|
std::vector<std::string> expressions;
|
||||||
std::vector<std::string> emotions;
|
std::vector<std::string> emotions;
|
||||||
|
std::vector<std::string> emojis;
|
||||||
std::vector<std::string> headAngles;
|
std::vector<std::string> headAngles;
|
||||||
|
|
||||||
std::map<affdex::Glasses, std::string> glassesMap;
|
std::map<affdex::Glasses, std::string> glassesMap;
|
||||||
std::map<affdex::Gender, std::string> genderMap;
|
std::map<affdex::Gender, std::string> genderMap;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
|
|
||||||
PlottingImageListener(std::ofstream &csv, const bool draw_display)
|
PlottingImageListener(std::ofstream &csv, const bool draw_display)
|
||||||
: fStream(csv), mDrawDisplay(draw_display), mStartT(std::chrono::system_clock::now()),
|
: fStream(csv), mDrawDisplay(draw_display), mStartT(std::chrono::system_clock::now()),
|
||||||
mCaptureLastTS(-1.0f), mCaptureFPS(-1.0f),
|
mCaptureLastTS(-1.0f), mCaptureFPS(-1.0f),
|
||||||
|
@ -59,35 +60,45 @@ public:
|
||||||
"upperLipRaise", "lipCornerDepressor", "chinRaise", "lipPucker", "lipPress",
|
"upperLipRaise", "lipCornerDepressor", "chinRaise", "lipPucker", "lipPress",
|
||||||
"lipSuck", "mouthOpen", "smirk", "eyeClosure", "attention"
|
"lipSuck", "mouthOpen", "smirk", "eyeClosure", "attention"
|
||||||
};
|
};
|
||||||
|
|
||||||
emotions = {
|
emotions = {
|
||||||
"joy", "fear", "disgust", "sadness", "anger",
|
"joy", "fear", "disgust", "sadness", "anger",
|
||||||
"surprise", "contempt", "valence", "engagement"
|
"surprise", "contempt", "valence", "engagement"
|
||||||
};
|
};
|
||||||
|
|
||||||
headAngles = { "pitch", "yaw", "roll" };
|
headAngles = { "pitch", "yaw", "roll" };
|
||||||
|
|
||||||
|
|
||||||
|
emojis = std::vector<std::string> {
|
||||||
|
"relaxed", "smiley", "laughing",
|
||||||
|
"kissing", "disappointed",
|
||||||
|
"rage", "smirk", "wink",
|
||||||
|
"stuckOutTongueWinkingEye", "stuckOutTongue",
|
||||||
|
"flushed", "scream"
|
||||||
|
};
|
||||||
|
|
||||||
genderMap = std::map<affdex::Gender, std::string> {
|
genderMap = std::map<affdex::Gender, std::string> {
|
||||||
{ affdex::Gender::Male, "male" },
|
{ affdex::Gender::Male, "male" },
|
||||||
{ affdex::Gender::Female, "female" },
|
{ affdex::Gender::Female, "female" },
|
||||||
{ affdex::Gender::Unknown, "unknown" },
|
{ affdex::Gender::Unknown, "unknown" },
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
glassesMap = std::map<affdex::Glasses, std::string> {
|
glassesMap = std::map<affdex::Glasses, std::string> {
|
||||||
{ affdex::Glasses::Yes, "glasses" },
|
{ affdex::Glasses::Yes, "glasses" },
|
||||||
{ affdex::Glasses::No, "no glasses" }
|
{ affdex::Glasses::No, "no glasses" }
|
||||||
};
|
};
|
||||||
|
|
||||||
fStream << "TimeStamp,faceId,interocularDistance,glasses,gender,";
|
fStream << "TimeStamp,faceId,interocularDistance,glasses,gender,dominantEmoji,";
|
||||||
for (std::string angle : headAngles) fStream << angle << ",";
|
for (std::string angle : headAngles) fStream << angle << ",";
|
||||||
for (std::string emotion : emotions) fStream << emotion << ",";
|
for (std::string emotion : emotions) fStream << emotion << ",";
|
||||||
for (std::string expression : expressions) fStream << expression << ",";
|
for (std::string expression : expressions) fStream << expression << ",";
|
||||||
|
for (std::string emoji : emojis) fStream << emoji << ",";
|
||||||
fStream << std::endl;
|
fStream << std::endl;
|
||||||
fStream.precision(4);
|
fStream.precision(4);
|
||||||
fStream << std::fixed;
|
fStream << std::fixed;
|
||||||
}
|
}
|
||||||
|
|
||||||
FeaturePoint minPoint(VecFeaturePoint points)
|
FeaturePoint minPoint(VecFeaturePoint points)
|
||||||
{
|
{
|
||||||
VecFeaturePoint::iterator it = points.begin();
|
VecFeaturePoint::iterator it = points.begin();
|
||||||
|
@ -99,7 +110,7 @@ public:
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
};
|
};
|
||||||
|
|
||||||
FeaturePoint maxPoint( VecFeaturePoint points)
|
FeaturePoint maxPoint( VecFeaturePoint points)
|
||||||
{
|
{
|
||||||
VecFeaturePoint::iterator it = points.begin();
|
VecFeaturePoint::iterator it = points.begin();
|
||||||
|
@ -111,27 +122,27 @@ public:
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
double getProcessingFrameRate()
|
double getProcessingFrameRate()
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lg(mMutex);
|
std::lock_guard<std::mutex> lg(mMutex);
|
||||||
return mProcessFPS;
|
return mProcessFPS;
|
||||||
}
|
}
|
||||||
|
|
||||||
double getCaptureFrameRate()
|
double getCaptureFrameRate()
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lg(mMutex);
|
std::lock_guard<std::mutex> lg(mMutex);
|
||||||
return mCaptureFPS;
|
return mCaptureFPS;
|
||||||
}
|
}
|
||||||
|
|
||||||
int getDataSize()
|
int getDataSize()
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lg(mMutex);
|
std::lock_guard<std::mutex> lg(mMutex);
|
||||||
return mDataArray.size();
|
return mDataArray.size();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<Frame, std::map<FaceId, Face>> getData()
|
std::pair<Frame, std::map<FaceId, Face>> getData()
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lg(mMutex);
|
std::lock_guard<std::mutex> lg(mMutex);
|
||||||
|
@ -139,7 +150,7 @@ public:
|
||||||
mDataArray.pop_front();
|
mDataArray.pop_front();
|
||||||
return dpoint;
|
return dpoint;
|
||||||
}
|
}
|
||||||
|
|
||||||
void onImageResults(std::map<FaceId, Face> faces, Frame image) override
|
void onImageResults(std::map<FaceId, Face> faces, Frame image) override
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lg(mMutex);
|
std::lock_guard<std::mutex> lg(mMutex);
|
||||||
|
@ -150,58 +161,68 @@ public:
|
||||||
mProcessFPS = 1.0f / (seconds - mProcessLastTS);
|
mProcessFPS = 1.0f / (seconds - mProcessLastTS);
|
||||||
mProcessLastTS = seconds;
|
mProcessLastTS = seconds;
|
||||||
};
|
};
|
||||||
|
|
||||||
void onImageCapture(Frame image) override
|
void onImageCapture(Frame image) override
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lg(mMutex);
|
std::lock_guard<std::mutex> lg(mMutex);
|
||||||
mCaptureFPS = 1.0f / (image.getTimestamp() - mCaptureLastTS);
|
mCaptureFPS = 1.0f / (image.getTimestamp() - mCaptureLastTS);
|
||||||
mCaptureLastTS = image.getTimestamp();
|
mCaptureLastTS = image.getTimestamp();
|
||||||
};
|
};
|
||||||
|
|
||||||
void outputToFile(const std::map<FaceId, Face> faces, const double timeStamp)
|
void outputToFile(const std::map<FaceId, Face> faces, const double timeStamp)
|
||||||
{
|
{
|
||||||
if (faces.empty())
|
if (faces.empty())
|
||||||
{
|
{
|
||||||
fStream << timeStamp << "nan,nan,no glasses,unknown,";
|
fStream << timeStamp << "nan,nan,no glasses,unknown, unknown,";
|
||||||
for (std::string angle : headAngles) fStream << "nan,";
|
for (std::string angle : headAngles) fStream << "nan,";
|
||||||
for (std::string emotion : emotions) fStream << "nan,";
|
for (std::string emotion : emotions) fStream << "nan,";
|
||||||
for (std::string expression : expressions) fStream << "nan,";
|
for (std::string expression : expressions) fStream << "nan,";
|
||||||
|
for (std::string emoji : emojis) fStream << "nan,";
|
||||||
fStream << std::endl;
|
fStream << std::endl;
|
||||||
}
|
}
|
||||||
for (auto & face_id_pair : faces)
|
for (auto & face_id_pair : faces)
|
||||||
{
|
{
|
||||||
Face f = face_id_pair.second;
|
Face f = face_id_pair.second;
|
||||||
|
|
||||||
fStream << timeStamp << ","
|
fStream << timeStamp << ","
|
||||||
<< f.id << ","
|
<< f.id << ","
|
||||||
<< f.measurements.interocularDistance << ","
|
<< f.measurements.interocularDistance << ","
|
||||||
<< glassesMap[f.appearance.glasses] << ","
|
<< glassesMap[f.appearance.glasses] << ","
|
||||||
<< genderMap[f.appearance.gender] << ",";
|
<< genderMap[f.appearance.gender] << ","
|
||||||
|
<< affdex::EmojiToString(f.emojis.dominantEmoji) << ",";
|
||||||
|
|
||||||
float *values = (float *)&f.measurements.orientation;
|
float *values = (float *)&f.measurements.orientation;
|
||||||
for (std::string angle : headAngles)
|
for (std::string angle : headAngles)
|
||||||
{
|
{
|
||||||
fStream << (*values) << ",";
|
fStream << (*values) << ",";
|
||||||
values++;
|
values++;
|
||||||
}
|
}
|
||||||
|
|
||||||
values = (float *)&f.emotions;
|
values = (float *)&f.emotions;
|
||||||
for (std::string emotion : emotions)
|
for (std::string emotion : emotions)
|
||||||
{
|
{
|
||||||
fStream << (*values) << ",";
|
fStream << (*values) << ",";
|
||||||
values++;
|
values++;
|
||||||
}
|
}
|
||||||
|
|
||||||
values = (float *)&f.expressions;
|
values = (float *)&f.expressions;
|
||||||
for (std::string expression : expressions)
|
for (std::string expression : expressions)
|
||||||
{
|
{
|
||||||
fStream << (*values) << ",";
|
fStream << (*values) << ",";
|
||||||
values++;
|
values++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
values = (float *)&f.emojis;
|
||||||
|
for (std::string emoji : emojis)
|
||||||
|
{
|
||||||
|
fStream << (*values) << ",";
|
||||||
|
values++;
|
||||||
|
}
|
||||||
|
|
||||||
fStream << std::endl;
|
fStream << std::endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void drawValues(const float * first, const std::vector<std::string> names,
|
void drawValues(const float * first, const std::vector<std::string> names,
|
||||||
const int x, int &padding, const cv::Scalar clr,
|
const int x, int &padding, const cv::Scalar clr,
|
||||||
cv::Mat img)
|
cv::Mat img)
|
||||||
|
@ -217,18 +238,18 @@ public:
|
||||||
first++;
|
first++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void draw(const std::map<FaceId, Face> faces, Frame image)
|
void draw(const std::map<FaceId, Face> faces, Frame image)
|
||||||
{
|
{
|
||||||
std::shared_ptr<byte> imgdata = image.getBGRByteArray();
|
std::shared_ptr<byte> imgdata = image.getBGRByteArray();
|
||||||
cv::Mat img = cv::Mat(image.getHeight(), image.getWidth(), CV_8UC3, imgdata.get());
|
cv::Mat img = cv::Mat(image.getHeight(), image.getWidth(), CV_8UC3, imgdata.get());
|
||||||
|
|
||||||
const int left_margin = 30;
|
const int left_margin = 30;
|
||||||
|
|
||||||
|
|
||||||
cv::Scalar clr = cv::Scalar(255, 255, 255);
|
cv::Scalar clr = cv::Scalar(0, 0, 255);
|
||||||
cv::Scalar header_clr = cv::Scalar(255, 0, 0);
|
cv::Scalar header_clr = cv::Scalar(255, 0, 0);
|
||||||
|
|
||||||
for (auto & face_id_pair : faces)
|
for (auto & face_id_pair : faces)
|
||||||
{
|
{
|
||||||
Face f = face_id_pair.second;
|
Face f = face_id_pair.second;
|
||||||
|
@ -239,49 +260,56 @@ public:
|
||||||
}
|
}
|
||||||
FeaturePoint tl = minPoint(points);
|
FeaturePoint tl = minPoint(points);
|
||||||
FeaturePoint br = maxPoint(points);
|
FeaturePoint br = maxPoint(points);
|
||||||
|
|
||||||
//Output the results of the different classifiers.
|
//Output the results of the different classifiers.
|
||||||
int padding = tl.y + 10;
|
int padding = tl.y + 10;
|
||||||
|
|
||||||
cv::putText(img, "APPEARANCE", cv::Point(br.x, padding += (spacing * 2)), font, font_size, header_clr);
|
cv::putText(img, "APPEARANCE", cv::Point(br.x, padding += (spacing * 2)), font, font_size, header_clr);
|
||||||
cv::putText(img, genderMap[f.appearance.gender], cv::Point(br.x, padding += spacing), font, font_size, clr);
|
cv::putText(img, genderMap[f.appearance.gender], cv::Point(br.x, padding += spacing), font, font_size, clr);
|
||||||
cv::putText(img, glassesMap[f.appearance.glasses], cv::Point(br.x, padding += spacing), font, font_size, clr);
|
cv::putText(img, glassesMap[f.appearance.glasses], cv::Point(br.x, padding += spacing), font, font_size, clr);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Orientation headAngles = f.measurements.orientation;
|
Orientation headAngles = f.measurements.orientation;
|
||||||
|
|
||||||
char strAngles[100];
|
char strAngles[100];
|
||||||
sprintf(strAngles, "Pitch: %3.2f Yaw: %3.2f Roll: %3.2f Interocular: %3.2f",
|
sprintf(strAngles, "Pitch: %3.2f Yaw: %3.2f Roll: %3.2f Interocular: %3.2f",
|
||||||
headAngles.pitch, headAngles.yaw, headAngles.roll, f.measurements.interocularDistance);
|
headAngles.pitch, headAngles.yaw, headAngles.roll, f.measurements.interocularDistance);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
char fId[10];
|
char fId[10];
|
||||||
sprintf(fId, "ID: %i", f.id);
|
sprintf(fId, "ID: %i", f.id);
|
||||||
cv::putText(img, fId, cv::Point(br.x, padding += spacing), font, font_size, clr);
|
cv::putText(img, fId, cv::Point(br.x, padding += spacing), font, font_size, clr);
|
||||||
|
|
||||||
cv::putText(img, "MEASUREMENTS", cv::Point(br.x, padding += (spacing * 2)), font, font_size, header_clr);
|
cv::putText(img, "MEASUREMENTS", cv::Point(br.x, padding += (spacing * 2)), font, font_size, header_clr);
|
||||||
|
|
||||||
cv::putText(img, strAngles, cv::Point(br.x, padding += spacing), font, font_size, clr);
|
cv::putText(img, strAngles, cv::Point(br.x, padding += spacing), font, font_size, clr);
|
||||||
|
|
||||||
|
cv::putText(img, "EMOJIS", cv::Point(br.x, padding += (spacing * 2)), font, font_size, header_clr);
|
||||||
|
|
||||||
|
cv::putText(img, "dominantEmoji: " + affdex::EmojiToString(f.emojis.dominantEmoji),
|
||||||
|
cv::Point(br.x, padding += spacing), font, font_size, clr);
|
||||||
|
|
||||||
|
drawValues((float *)&f.emojis, emojis, br.x, padding, clr, img);
|
||||||
|
|
||||||
cv::putText(img, "EXPRESSIONS", cv::Point(br.x, padding += (spacing * 2)), font, font_size, header_clr);
|
cv::putText(img, "EXPRESSIONS", cv::Point(br.x, padding += (spacing * 2)), font, font_size, header_clr);
|
||||||
|
|
||||||
drawValues((float *)&f.expressions, expressions, br.x, padding, clr, img);
|
drawValues((float *)&f.expressions, expressions, br.x, padding, clr, img);
|
||||||
|
|
||||||
cv::putText(img, "EMOTIONS", cv::Point(br.x, padding += (spacing * 2)), font, font_size, header_clr);
|
cv::putText(img, "EMOTIONS", cv::Point(br.x, padding += (spacing * 2)), font, font_size, header_clr);
|
||||||
|
|
||||||
drawValues((float *)&f.emotions, emotions, br.x, padding, clr, img);
|
drawValues((float *)&f.emotions, emotions, br.x, padding, clr, img);
|
||||||
|
|
||||||
}
|
}
|
||||||
char fps_str[50];
|
char fps_str[50];
|
||||||
sprintf(fps_str, "capture fps: %2.0f", mCaptureFPS);
|
sprintf(fps_str, "capture fps: %2.0f", mCaptureFPS);
|
||||||
cv::putText(img, fps_str, cv::Point(img.cols - 110, img.rows - left_margin - spacing), font, font_size, clr);
|
cv::putText(img, fps_str, cv::Point(img.cols - 110, img.rows - left_margin - spacing), font, font_size, clr);
|
||||||
sprintf(fps_str, "process fps: %2.0f", mProcessFPS);
|
sprintf(fps_str, "process fps: %2.0f", mProcessFPS);
|
||||||
cv::putText(img, fps_str, cv::Point(img.cols - 110, img.rows - left_margin), font, font_size, clr);
|
cv::putText(img, fps_str, cv::Point(img.cols - 110, img.rows - left_margin), font, font_size, clr);
|
||||||
|
|
||||||
cv::imshow("analyze video", img);
|
cv::imshow("analyze video", img);
|
||||||
cv::waitKey(5);
|
cv::waitKey(5);
|
||||||
}
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
|
@ -126,6 +126,7 @@ int main(int argsc, char ** argsv)
|
||||||
//Initialize detectors
|
//Initialize detectors
|
||||||
frameDetector->setDetectAllEmotions(true);
|
frameDetector->setDetectAllEmotions(true);
|
||||||
frameDetector->setDetectAllExpressions(true);
|
frameDetector->setDetectAllExpressions(true);
|
||||||
|
frameDetector->setDetectAllEmojis(true);
|
||||||
frameDetector->setDetectGender(true);
|
frameDetector->setDetectGender(true);
|
||||||
frameDetector->setDetectGlasses(true);
|
frameDetector->setDetectGlasses(true);
|
||||||
frameDetector->setClassifierPath(DATA_FOLDER);
|
frameDetector->setClassifierPath(DATA_FOLDER);
|
||||||
|
|
|
@ -35,11 +35,11 @@ int main(int argsc, char ** argsv)
|
||||||
bool loop = false;
|
bool loop = false;
|
||||||
unsigned int nFaces = 1;
|
unsigned int nFaces = 1;
|
||||||
int faceDetectorMode = (int)FaceDetectorMode::SMALL_FACES;
|
int faceDetectorMode = (int)FaceDetectorMode::SMALL_FACES;
|
||||||
|
|
||||||
const int precision = 2;
|
const int precision = 2;
|
||||||
std::cerr.precision(precision);
|
std::cerr.precision(precision);
|
||||||
std::cout.precision(precision);
|
std::cout.precision(precision);
|
||||||
|
|
||||||
namespace po = boost::program_options; // abbreviate namespace
|
namespace po = boost::program_options; // abbreviate namespace
|
||||||
po::options_description description("Project for demoing the Windows SDK VideoDetector class (processing video files).");
|
po::options_description description("Project for demoing the Windows SDK VideoDetector class (processing video files).");
|
||||||
description.add_options()
|
description.add_options()
|
||||||
|
@ -76,7 +76,7 @@ int main(int argsc, char ** argsv)
|
||||||
std::cerr << "For help, use the -h option." << std::endl << std::endl;
|
std::cerr << "For help, use the -h option." << std::endl << std::endl;
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse and check the data folder (with assets)
|
// Parse and check the data folder (with assets)
|
||||||
if (!boost::filesystem::exists(DATA_FOLDER))
|
if (!boost::filesystem::exists(DATA_FOLDER))
|
||||||
{
|
{
|
||||||
|
@ -89,20 +89,20 @@ int main(int argsc, char ** argsv)
|
||||||
{
|
{
|
||||||
//Initialize the video file detector
|
//Initialize the video file detector
|
||||||
VideoDetector videoDetector(process_framerate, nFaces, (affdex::FaceDetectorMode) faceDetectorMode);
|
VideoDetector videoDetector(process_framerate, nFaces, (affdex::FaceDetectorMode) faceDetectorMode);
|
||||||
|
|
||||||
//Initialize out file
|
//Initialize out file
|
||||||
boost::filesystem::path csvPath(videoPath);
|
boost::filesystem::path csvPath(videoPath);
|
||||||
csvPath.replace_extension(".csv");
|
csvPath.replace_extension(".csv");
|
||||||
std::ofstream csvFileStream(csvPath.c_str());
|
std::ofstream csvFileStream(csvPath.c_str());
|
||||||
|
|
||||||
if (!csvFileStream.is_open())
|
if (!csvFileStream.is_open())
|
||||||
{
|
{
|
||||||
std::cerr << "Unable to open csv file " << csvPath << std::endl;
|
std::cerr << "Unable to open csv file " << csvPath << std::endl;
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
std::cout << "Max num of faces set to: " << videoDetector.getMaxNumberFaces() << std::endl;
|
std::cout << "Max num of faces set to: " << videoDetector.getMaxNumberFaces() << std::endl;
|
||||||
std::string mode;
|
std::string mode;
|
||||||
switch (videoDetector.getFaceDetectorMode())
|
switch (videoDetector.getFaceDetectorMode())
|
||||||
|
@ -116,13 +116,14 @@ int main(int argsc, char ** argsv)
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::cout << "Face detector mode set to: " << mode << std::endl;
|
std::cout << "Face detector mode set to: " << mode << std::endl;
|
||||||
shared_ptr<PlottingImageListener> listenPtr(new PlottingImageListener(csvFileStream, draw_display));
|
shared_ptr<PlottingImageListener> listenPtr(new PlottingImageListener(csvFileStream, draw_display));
|
||||||
|
|
||||||
//Activate all the detectors
|
//Activate all the detectors
|
||||||
videoDetector.setDetectAllEmotions(true);
|
videoDetector.setDetectAllEmotions(true);
|
||||||
videoDetector.setDetectAllExpressions(true);
|
videoDetector.setDetectAllExpressions(true);
|
||||||
|
videoDetector.setDetectAllEmojis(true);
|
||||||
videoDetector.setDetectGender(true);
|
videoDetector.setDetectGender(true);
|
||||||
videoDetector.setDetectGlasses(true);
|
videoDetector.setDetectGlasses(true);
|
||||||
//Set the location of the data folder and license file
|
//Set the location of the data folder and license file
|
||||||
|
@ -130,16 +131,16 @@ int main(int argsc, char ** argsv)
|
||||||
videoDetector.setLicensePath(LICENSE_PATH);
|
videoDetector.setLicensePath(LICENSE_PATH);
|
||||||
//Add callback functions implementations
|
//Add callback functions implementations
|
||||||
videoDetector.setImageListener(listenPtr.get());
|
videoDetector.setImageListener(listenPtr.get());
|
||||||
|
|
||||||
|
|
||||||
videoDetector.start(); //Initialize the detectors .. call only once
|
videoDetector.start(); //Initialize the detectors .. call only once
|
||||||
|
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
shared_ptr<StatusListener> videoListenPtr = std::make_shared<StatusListener>();
|
shared_ptr<StatusListener> videoListenPtr = std::make_shared<StatusListener>();
|
||||||
videoDetector.setProcessStatusListener(videoListenPtr.get());
|
videoDetector.setProcessStatusListener(videoListenPtr.get());
|
||||||
videoDetector.process(videoPath); //Process a video
|
videoDetector.process(videoPath); //Process a video
|
||||||
|
|
||||||
//For each frame processed
|
//For each frame processed
|
||||||
while (videoListenPtr->isRunning())
|
while (videoListenPtr->isRunning())
|
||||||
{
|
{
|
||||||
|
@ -148,34 +149,34 @@ int main(int argsc, char ** argsv)
|
||||||
std::pair<Frame, std::map<FaceId, Face> > dataPoint = listenPtr->getData();
|
std::pair<Frame, std::map<FaceId, Face> > dataPoint = listenPtr->getData();
|
||||||
Frame frame = dataPoint.first;
|
Frame frame = dataPoint.first;
|
||||||
std::map<FaceId, Face> faces = dataPoint.second;
|
std::map<FaceId, Face> faces = dataPoint.second;
|
||||||
|
|
||||||
|
|
||||||
//Draw on the GUI
|
//Draw on the GUI
|
||||||
if (draw_display)
|
if (draw_display)
|
||||||
{
|
{
|
||||||
listenPtr->draw(faces, frame);
|
listenPtr->draw(faces, frame);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::cerr << "timestamp: " << frame.getTimestamp()
|
std::cerr << "timestamp: " << frame.getTimestamp()
|
||||||
<< " cfps: " << listenPtr->getCaptureFrameRate()
|
<< " cfps: " << listenPtr->getCaptureFrameRate()
|
||||||
<< " pfps: " << listenPtr->getProcessingFrameRate()
|
<< " pfps: " << listenPtr->getProcessingFrameRate()
|
||||||
<< " faces: "<< faces.size() << endl;
|
<< " faces: "<< faces.size() << endl;
|
||||||
|
|
||||||
//Output metrics to file
|
//Output metrics to file
|
||||||
listenPtr->outputToFile(faces, frame.getTimestamp());
|
listenPtr->outputToFile(faces, frame.getTimestamp());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} while(loop);
|
} while(loop);
|
||||||
|
|
||||||
videoDetector.stop();
|
videoDetector.stop();
|
||||||
csvFileStream.close();
|
csvFileStream.close();
|
||||||
|
|
||||||
std::cout << "Output written to file: " << csvPath << std::endl;
|
std::cout << "Output written to file: " << csvPath << std::endl;
|
||||||
}
|
}
|
||||||
catch (AffdexException ex)
|
catch (AffdexException ex)
|
||||||
{
|
{
|
||||||
std::cerr << ex.what();
|
std::cerr << ex.what();
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue