diff --git a/opencv-webcam-demo/opencv-webcam-demo.cpp b/opencv-webcam-demo/opencv-webcam-demo.cpp
index fd9636c..92e5e33 100644
--- a/opencv-webcam-demo/opencv-webcam-demo.cpp
+++ b/opencv-webcam-demo/opencv-webcam-demo.cpp
@@ -19,9 +19,9 @@
using namespace std;
using namespace affdex;
-///
-/// Project for demoing the Windows SDK CameraDetector class (grabbing and processing frames from the camera).
-///
+///
+/// Project for demoing the Windows SDK CameraDetector class (grabbing and processing frames from the camera).
+///
int main(int argsc, char ** argsv)
{
namespace po = boost::program_options; // abbreviate namespace
@@ -115,8 +115,8 @@ int main(int argsc, char ** argsv)
//Initialize detectors
frameDetector->setDetectAllEmotions(true);
frameDetector->setDetectAllExpressions(true);
- frameDetector->setDetectAllEmojis(true);
- frameDetector->setDetectAllAppearances(true);
+ frameDetector->setDetectAllEmojis(false);
+ frameDetector->setDetectAllAppearances(false);
frameDetector->setClassifierPath(DATA_FOLDER);
frameDetector->setImageListener(listenPtr.get());
frameDetector->setFaceListener(faceListenPtr.get());
@@ -153,6 +153,7 @@ int main(int argsc, char ** argsv)
frameDetector->start();
do{
+
cv::Mat img;
if (!webcam.read(img)) //Capture an image from the camera
{
@@ -184,13 +185,15 @@ int main(int argsc, char ** argsv)
listenPtr->draw(faces, frame);
}
- std::cerr << "timestamp: " << frame.getTimestamp()
- << " cfps: " << listenPtr->getCaptureFrameRate()
- << " pfps: " << listenPtr->getProcessingFrameRate()
- << " faces: " << faces.size() << endl;
+ // std::cerr << "timestamp: " << frame.getTimestamp()
+ // << " cfps: " << listenPtr->getCaptureFrameRate()
+ // << " pfps: " << listenPtr->getProcessingFrameRate()
+ // << " faces: " << faces.size() << endl;
//Output metrics to the file
//listenPtr->outputToFile(faces, frame.getTimestamp());
+
+ std:cout << getAsJson(faces, frame.getTimestamp()) << std::endl;
}
@@ -227,3 +230,60 @@ int main(int argsc, char ** argsv)
return 0;
}
+
+std::string getAsJson(const std::map faces, const double timeStamp)
+{
+ std::stringstream ss;
+ ss << "{" << "'t':" << timeStamp << ",";
+ ss << "'faces':[";
+
+ int i(0);
+
+ for (auto & face_id_pair : faces)
+ {
+ Face f = face_id_pair.second;
+
+ if(i > 0) { ss << ","; }
+ i++
+
+ ss << "{";
+
+ // fStream << timeStamp << ","
+ // << f.id << ","
+ // << f.measurements.interocularDistance << ","
+ // << glassesMap[f.appearance.glasses] << ","
+ // << ageMap[f.appearance.age] << ","
+ // << ethnicityMap[f.appearance.ethnicity] << ","
+ // << genderMap[f.appearance.gender] << ","
+ // << affdex::EmojiToString(f.emojis.dominantEmoji) << ",";
+
+ float *values = (float *)&f.measurements.orientation;
+ for (std::string angle : affdex::PlottingImageListener.headAngles)
+ {
+ ss << "'" << angle << "':" (*values) << ",";
+ values++;
+ }
+
+ values = (float *)&f.emotions;
+ for (std::string emotion : affdex::PlottingImageListener.emotions)
+ {
+ ss << "'" << emotion << "':" (*values) << ",";
+ values++;
+ }
+
+ values = (float *)&f.expressions;
+ for (std::string expression : affdex::PlottingImageListener.expressions)
+ {
+ ss << "'" << expression << "':" (*values) << ",";
+ values++;
+ }
+
+ ss << "'ioDistance':"<< f.measurements.interocularDistance << ",";
+ ss << "'id':"<< f.id;
+ ss << "}";
+ }
+
+ ss << "]"; // faces
+ ss << "}";
+ return ss.str();
+}