From 0d8c63f95994ab30160ee67dcaee9a85295a7134 Mon Sep 17 00:00:00 2001 From: Abdelrahman Mahmoud Date: Tue, 5 Apr 2016 11:13:23 -0400 Subject: [PATCH] Add emojis to the output file --- common/PlottingImageListener.hpp | 140 +++++++++++++--------- opencv-webcam-demo/opencv-webcam-demo.cpp | 1 + video-demo/video-demo.cpp | 43 +++---- 3 files changed, 107 insertions(+), 77 deletions(-) diff --git a/common/PlottingImageListener.hpp b/common/PlottingImageListener.hpp index cbf3638..02d2eec 100644 --- a/common/PlottingImageListener.hpp +++ b/common/PlottingImageListener.hpp @@ -24,10 +24,10 @@ using namespace affdex; class PlottingImageListener : public ImageListener { - + std::mutex mMutex; std::deque > > mDataArray; - + double mCaptureLastTS; double mCaptureFPS; double mProcessLastTS; @@ -38,17 +38,18 @@ class PlottingImageListener : public ImageListener const int spacing = 10; const float font_size = 0.5f; const int font = cv::FONT_HERSHEY_COMPLEX_SMALL; - + std::vector expressions; std::vector emotions; + std::vector emojis; std::vector headAngles; - + std::map glassesMap; std::map genderMap; - + public: - - + + PlottingImageListener(std::ofstream &csv, const bool draw_display) : fStream(csv), mDrawDisplay(draw_display), mStartT(std::chrono::system_clock::now()), mCaptureLastTS(-1.0f), mCaptureFPS(-1.0f), @@ -59,35 +60,45 @@ public: "upperLipRaise", "lipCornerDepressor", "chinRaise", "lipPucker", "lipPress", "lipSuck", "mouthOpen", "smirk", "eyeClosure", "attention" }; - + emotions = { "joy", "fear", "disgust", "sadness", "anger", "surprise", "contempt", "valence", "engagement" }; - + headAngles = { "pitch", "yaw", "roll" }; - + + + emojis = std::vector { + "relaxed", "smiley", "laughing", + "kissing", "disappointed", + "rage", "smirk", "wink", + "stuckOutTongueWinkingEye", "stuckOutTongue", + "flushed", "scream" + }; + genderMap = std::map { { affdex::Gender::Male, "male" }, { affdex::Gender::Female, "female" }, { affdex::Gender::Unknown, "unknown" }, - + }; - + glassesMap = std::map { { affdex::Glasses::Yes, "glasses" }, { affdex::Glasses::No, "no glasses" } }; - - fStream << "TimeStamp,faceId,interocularDistance,glasses,gender,"; + + fStream << "TimeStamp,faceId,interocularDistance,glasses,gender,dominantEmoji,"; for (std::string angle : headAngles) fStream << angle << ","; for (std::string emotion : emotions) fStream << emotion << ","; for (std::string expression : expressions) fStream << expression << ","; + for (std::string emoji : emojis) fStream << emoji << ","; fStream << std::endl; fStream.precision(4); fStream << std::fixed; } - + FeaturePoint minPoint(VecFeaturePoint points) { VecFeaturePoint::iterator it = points.begin(); @@ -99,7 +110,7 @@ public: } return ret; }; - + FeaturePoint maxPoint( VecFeaturePoint points) { VecFeaturePoint::iterator it = points.begin(); @@ -111,27 +122,27 @@ public: } return ret; }; - - + + double getProcessingFrameRate() { std::lock_guard lg(mMutex); return mProcessFPS; } - + double getCaptureFrameRate() { std::lock_guard lg(mMutex); return mCaptureFPS; } - + int getDataSize() { std::lock_guard lg(mMutex); return mDataArray.size(); - + } - + std::pair> getData() { std::lock_guard lg(mMutex); @@ -139,7 +150,7 @@ public: mDataArray.pop_front(); return dpoint; } - + void onImageResults(std::map faces, Frame image) override { std::lock_guard lg(mMutex); @@ -150,58 +161,68 @@ public: mProcessFPS = 1.0f / (seconds - mProcessLastTS); mProcessLastTS = seconds; }; - + void onImageCapture(Frame image) override { std::lock_guard lg(mMutex); mCaptureFPS = 1.0f / (image.getTimestamp() - mCaptureLastTS); mCaptureLastTS = image.getTimestamp(); }; - + void outputToFile(const std::map faces, const double timeStamp) { if (faces.empty()) { - fStream << timeStamp << "nan,nan,no glasses,unknown,"; + fStream << timeStamp << "nan,nan,no glasses,unknown, unknown,"; for (std::string angle : headAngles) fStream << "nan,"; for (std::string emotion : emotions) fStream << "nan,"; for (std::string expression : expressions) fStream << "nan,"; + for (std::string emoji : emojis) fStream << "nan,"; fStream << std::endl; } for (auto & face_id_pair : faces) { Face f = face_id_pair.second; - + fStream << timeStamp << "," << f.id << "," << f.measurements.interocularDistance << "," << glassesMap[f.appearance.glasses] << "," - << genderMap[f.appearance.gender] << ","; - + << genderMap[f.appearance.gender] << "," + << affdex::EmojiToString(f.emojis.dominantEmoji) << ","; + float *values = (float *)&f.measurements.orientation; for (std::string angle : headAngles) { fStream << (*values) << ","; values++; } - + values = (float *)&f.emotions; for (std::string emotion : emotions) { fStream << (*values) << ","; values++; } - + values = (float *)&f.expressions; for (std::string expression : expressions) { fStream << (*values) << ","; values++; } + + values = (float *)&f.emojis; + for (std::string emoji : emojis) + { + fStream << (*values) << ","; + values++; + } + fStream << std::endl; } } - + void drawValues(const float * first, const std::vector names, const int x, int &padding, const cv::Scalar clr, cv::Mat img) @@ -217,18 +238,18 @@ public: first++; } } - + void draw(const std::map faces, Frame image) { std::shared_ptr imgdata = image.getBGRByteArray(); cv::Mat img = cv::Mat(image.getHeight(), image.getWidth(), CV_8UC3, imgdata.get()); - + const int left_margin = 30; - - - cv::Scalar clr = cv::Scalar(255, 255, 255); + + + cv::Scalar clr = cv::Scalar(0, 0, 255); cv::Scalar header_clr = cv::Scalar(255, 0, 0); - + for (auto & face_id_pair : faces) { Face f = face_id_pair.second; @@ -239,49 +260,56 @@ public: } FeaturePoint tl = minPoint(points); FeaturePoint br = maxPoint(points); - + //Output the results of the different classifiers. int padding = tl.y + 10; - + cv::putText(img, "APPEARANCE", cv::Point(br.x, padding += (spacing * 2)), font, font_size, header_clr); cv::putText(img, genderMap[f.appearance.gender], cv::Point(br.x, padding += spacing), font, font_size, clr); cv::putText(img, glassesMap[f.appearance.glasses], cv::Point(br.x, padding += spacing), font, font_size, clr); - - - + + + Orientation headAngles = f.measurements.orientation; - + char strAngles[100]; sprintf(strAngles, "Pitch: %3.2f Yaw: %3.2f Roll: %3.2f Interocular: %3.2f", headAngles.pitch, headAngles.yaw, headAngles.roll, f.measurements.interocularDistance); - - - + + + char fId[10]; sprintf(fId, "ID: %i", f.id); cv::putText(img, fId, cv::Point(br.x, padding += spacing), font, font_size, clr); - + cv::putText(img, "MEASUREMENTS", cv::Point(br.x, padding += (spacing * 2)), font, font_size, header_clr); - + cv::putText(img, strAngles, cv::Point(br.x, padding += spacing), font, font_size, clr); - + + cv::putText(img, "EMOJIS", cv::Point(br.x, padding += (spacing * 2)), font, font_size, header_clr); + + cv::putText(img, "dominantEmoji: " + affdex::EmojiToString(f.emojis.dominantEmoji), + cv::Point(br.x, padding += spacing), font, font_size, clr); + + drawValues((float *)&f.emojis, emojis, br.x, padding, clr, img); + cv::putText(img, "EXPRESSIONS", cv::Point(br.x, padding += (spacing * 2)), font, font_size, header_clr); - + drawValues((float *)&f.expressions, expressions, br.x, padding, clr, img); - + cv::putText(img, "EMOTIONS", cv::Point(br.x, padding += (spacing * 2)), font, font_size, header_clr); - + drawValues((float *)&f.emotions, emotions, br.x, padding, clr, img); - + } char fps_str[50]; sprintf(fps_str, "capture fps: %2.0f", mCaptureFPS); cv::putText(img, fps_str, cv::Point(img.cols - 110, img.rows - left_margin - spacing), font, font_size, clr); sprintf(fps_str, "process fps: %2.0f", mProcessFPS); cv::putText(img, fps_str, cv::Point(img.cols - 110, img.rows - left_margin), font, font_size, clr); - + cv::imshow("analyze video", img); cv::waitKey(5); } - + }; diff --git a/opencv-webcam-demo/opencv-webcam-demo.cpp b/opencv-webcam-demo/opencv-webcam-demo.cpp index 9d6c05b..e32f744 100644 --- a/opencv-webcam-demo/opencv-webcam-demo.cpp +++ b/opencv-webcam-demo/opencv-webcam-demo.cpp @@ -126,6 +126,7 @@ int main(int argsc, char ** argsv) //Initialize detectors frameDetector->setDetectAllEmotions(true); frameDetector->setDetectAllExpressions(true); + frameDetector->setDetectAllEmojis(true); frameDetector->setDetectGender(true); frameDetector->setDetectGlasses(true); frameDetector->setClassifierPath(DATA_FOLDER); diff --git a/video-demo/video-demo.cpp b/video-demo/video-demo.cpp index 4e9d29d..72b5ac9 100644 --- a/video-demo/video-demo.cpp +++ b/video-demo/video-demo.cpp @@ -35,11 +35,11 @@ int main(int argsc, char ** argsv) bool loop = false; unsigned int nFaces = 1; int faceDetectorMode = (int)FaceDetectorMode::SMALL_FACES; - + const int precision = 2; std::cerr.precision(precision); std::cout.precision(precision); - + namespace po = boost::program_options; // abbreviate namespace po::options_description description("Project for demoing the Windows SDK VideoDetector class (processing video files)."); description.add_options() @@ -76,7 +76,7 @@ int main(int argsc, char ** argsv) std::cerr << "For help, use the -h option." << std::endl << std::endl; return 1; } - + // Parse and check the data folder (with assets) if (!boost::filesystem::exists(DATA_FOLDER)) { @@ -89,20 +89,20 @@ int main(int argsc, char ** argsv) { //Initialize the video file detector VideoDetector videoDetector(process_framerate, nFaces, (affdex::FaceDetectorMode) faceDetectorMode); - + //Initialize out file boost::filesystem::path csvPath(videoPath); csvPath.replace_extension(".csv"); std::ofstream csvFileStream(csvPath.c_str()); - + if (!csvFileStream.is_open()) { std::cerr << "Unable to open csv file " << csvPath << std::endl; return 1; } - - - + + + std::cout << "Max num of faces set to: " << videoDetector.getMaxNumberFaces() << std::endl; std::string mode; switch (videoDetector.getFaceDetectorMode()) @@ -116,13 +116,14 @@ int main(int argsc, char ** argsv) default: break; } - + std::cout << "Face detector mode set to: " << mode << std::endl; shared_ptr listenPtr(new PlottingImageListener(csvFileStream, draw_display)); - + //Activate all the detectors videoDetector.setDetectAllEmotions(true); videoDetector.setDetectAllExpressions(true); + videoDetector.setDetectAllEmojis(true); videoDetector.setDetectGender(true); videoDetector.setDetectGlasses(true); //Set the location of the data folder and license file @@ -130,16 +131,16 @@ int main(int argsc, char ** argsv) videoDetector.setLicensePath(LICENSE_PATH); //Add callback functions implementations videoDetector.setImageListener(listenPtr.get()); - - + + videoDetector.start(); //Initialize the detectors .. call only once - + do { shared_ptr videoListenPtr = std::make_shared(); videoDetector.setProcessStatusListener(videoListenPtr.get()); videoDetector.process(videoPath); //Process a video - + //For each frame processed while (videoListenPtr->isRunning()) { @@ -148,34 +149,34 @@ int main(int argsc, char ** argsv) std::pair > dataPoint = listenPtr->getData(); Frame frame = dataPoint.first; std::map faces = dataPoint.second; - - + + //Draw on the GUI if (draw_display) { listenPtr->draw(faces, frame); } - + std::cerr << "timestamp: " << frame.getTimestamp() << " cfps: " << listenPtr->getCaptureFrameRate() << " pfps: " << listenPtr->getProcessingFrameRate() << " faces: "<< faces.size() << endl; - + //Output metrics to file listenPtr->outputToFile(faces, frame.getTimestamp()); } } } while(loop); - + videoDetector.stop(); csvFileStream.close(); - + std::cout << "Output written to file: " << csvPath << std::endl; } catch (AffdexException ex) { std::cerr << ex.what(); } - + return 0; }