From 6e15e81777887d92921df48bc9b41739d7dcf2cd Mon Sep 17 00:00:00 2001 From: Tadas Baltrusaitis Date: Fri, 30 Mar 2018 09:36:32 +0100 Subject: [PATCH] Working towards coping without AU and eye models. Fixes to the visualizer. --- .gitignore | 2 + exe/FaceLandmarkImg/FaceLandmarkImg.cpp | 16 ++++- exe/FaceLandmarkVid/FaceLandmarkVid.cpp | 5 ++ .../FaceLandmarkVidMulti.cpp | 19 ++++-- exe/FeatureExtraction/FeatureExtraction.cpp | 14 +++++ gui/OpenFaceDemo/MainWindow.xaml.cs | 60 +++++++++++-------- .../include/RecorderOpenFaceParameters.h | 1 + lib/local/Utilities/src/Visualizer.cpp | 59 ++++++++++++++++-- 8 files changed, 139 insertions(+), 37 deletions(-) diff --git a/.gitignore b/.gitignore index 47e3d15..05c5849 100644 --- a/.gitignore +++ b/.gitignore @@ -95,3 +95,5 @@ lib/3rdParty/CameraEnumerator/Release/ lib/local/Utilities/Release/ exe/FaceLandmarkVidMulti/processed/ matlab_runners/Demos/processed/multi_face_aligned/ +exe/releases/OpenFace_0.4.1_win_x64_landmarks/ +exe/releases/OpenFace_* diff --git a/exe/FaceLandmarkImg/FaceLandmarkImg.cpp b/exe/FaceLandmarkImg/FaceLandmarkImg.cpp index 3599b1a..f43e4f8 100644 --- a/exe/FaceLandmarkImg/FaceLandmarkImg.cpp +++ b/exe/FaceLandmarkImg/FaceLandmarkImg.cpp @@ -127,18 +127,30 @@ int main (int argc, char **argv) captured_image = image_reader.GetNextImage(); + if (!face_model.eye_model) + { + cout << "WARNING: no eye model found" << endl; + } + + if (face_analyser.GetAUClassNames().size() == 0 && face_analyser.GetAUClassNames().size() == 0) + { + cout << "WARNING: no Action Unit models found" << endl; + } + cout << "Starting tracking" << endl; while (!captured_image.empty()) { Utilities::RecorderOpenFaceParameters recording_params(arguments, false, false, image_reader.fx, image_reader.fy, image_reader.cx, image_reader.cy); + if (!face_model.eye_model) + { + recording_params.setOutputGaze(false); + } Utilities::RecorderOpenFace open_face_rec(image_reader.name, recording_params, arguments); visualizer.SetImage(captured_image, image_reader.fx, image_reader.fy, image_reader.cx, image_reader.cy); - if (recording_params.outputGaze() && !face_model.eye_model) - cout << "WARNING: no eye model defined, but outputting gaze" << endl; // Making sure the image is in uchar grayscale cv::Mat_ grayscale_image = image_reader.GetGrayFrame(); diff --git a/exe/FaceLandmarkVid/FaceLandmarkVid.cpp b/exe/FaceLandmarkVid/FaceLandmarkVid.cpp index 2666579..95864ad 100644 --- a/exe/FaceLandmarkVid/FaceLandmarkVid.cpp +++ b/exe/FaceLandmarkVid/FaceLandmarkVid.cpp @@ -104,6 +104,11 @@ int main (int argc, char **argv) // The modules that are being used for tracking LandmarkDetector::CLNF face_model(det_parameters.model_location); + if (!face_model.eye_model) + { + cout << "WARNING: no eye model found" << endl; + } + // Open a sequence Utilities::SequenceCapture sequence_reader; diff --git a/exe/FaceLandmarkVidMulti/FaceLandmarkVidMulti.cpp b/exe/FaceLandmarkVidMulti/FaceLandmarkVidMulti.cpp index abd8c93..48eb686 100644 --- a/exe/FaceLandmarkVidMulti/FaceLandmarkVidMulti.cpp +++ b/exe/FaceLandmarkVidMulti/FaceLandmarkVidMulti.cpp @@ -158,6 +158,16 @@ int main (int argc, char **argv) face_analysis_params.OptimizeForImages(); FaceAnalysis::FaceAnalyser face_analyser(face_analysis_params); + if (!face_model.eye_model) + { + cout << "WARNING: no eye model found" << endl; + } + + if (face_analyser.GetAUClassNames().size() == 0 && face_analyser.GetAUClassNames().size() == 0) + { + cout << "WARNING: no Action Unit models found" << endl; + } + // Open a sequence Utilities::SequenceCapture sequence_reader; @@ -185,12 +195,13 @@ int main (int argc, char **argv) Utilities::RecorderOpenFaceParameters recording_params(arguments, true, sequence_reader.IsWebcam(), sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy, sequence_reader.fps); - // Do not do AU detection on multi-face case as it is not supported - recording_params.setOutputAUs(false); + if (!face_model.eye_model) + { + recording_params.setOutputGaze(false); + } + Utilities::RecorderOpenFace open_face_rec(sequence_reader.name, recording_params, arguments); - if (recording_params.outputGaze() && !face_model.eye_model) - cout << "WARNING: no eye model defined, but outputting gaze" << endl; if (sequence_reader.IsWebcam()) { diff --git a/exe/FeatureExtraction/FeatureExtraction.cpp b/exe/FeatureExtraction/FeatureExtraction.cpp index 94caecb..17ff95f 100644 --- a/exe/FeatureExtraction/FeatureExtraction.cpp +++ b/exe/FeatureExtraction/FeatureExtraction.cpp @@ -121,6 +121,16 @@ int main (int argc, char **argv) FaceAnalysis::FaceAnalyserParameters face_analysis_params(arguments); FaceAnalysis::FaceAnalyser face_analyser(face_analysis_params); + if (!face_model.eye_model) + { + cout << "WARNING: no eye model found" << endl; + } + + if (face_analyser.GetAUClassNames().size() == 0 && face_analyser.GetAUClassNames().size() == 0) + { + cout << "WARNING: no Action Unit models found" << endl; + } + Utilities::SequenceCapture sequence_reader; // A utility for visualizing the results @@ -150,6 +160,10 @@ int main (int argc, char **argv) Utilities::RecorderOpenFaceParameters recording_params(arguments, true, sequence_reader.IsWebcam(), sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy, sequence_reader.fps); + if (!face_model.eye_model) + { + recording_params.setOutputGaze(false); + } Utilities::RecorderOpenFace open_face_rec(sequence_reader.name, recording_params, arguments); if (recording_params.outputGaze() && !face_model.eye_model) diff --git a/gui/OpenFaceDemo/MainWindow.xaml.cs b/gui/OpenFaceDemo/MainWindow.xaml.cs index 052c725..68a51f5 100644 --- a/gui/OpenFaceDemo/MainWindow.xaml.cs +++ b/gui/OpenFaceDemo/MainWindow.xaml.cs @@ -259,37 +259,47 @@ namespace OpenFaceDemo { var au_regs = face_analyser.GetCurrentAUsReg(); + if(au_regs.Count > 0) + { + double smile = (au_regs["AU12"] + au_regs["AU06"] + au_regs["AU25"]) / 13.0; + double frown = (au_regs["AU15"] + au_regs["AU17"]) / 12.0; - double smile = (au_regs["AU12"] + au_regs["AU06"] + au_regs["AU25"]) / 13.0; - double frown = (au_regs["AU15"] + au_regs["AU17"]) / 12.0; + double brow_up = (au_regs["AU01"] + au_regs["AU02"]) / 10.0; + double brow_down = au_regs["AU04"] / 5.0; - double brow_up = (au_regs["AU01"] + au_regs["AU02"]) / 10.0; - double brow_down = au_regs["AU04"] / 5.0; + double eye_widen = au_regs["AU05"] / 3.0; + double nose_wrinkle = au_regs["AU09"] / 4.0; - double eye_widen = au_regs["AU05"] / 3.0; - double nose_wrinkle = au_regs["AU09"] / 4.0; + Dictionary smileDict = new Dictionary(); + smileDict[0] = 0.7 * smile_cumm + 0.3 * smile; + smileDict[1] = 0.7 * frown_cumm + 0.3 * frown; + smilePlot.AddDataPoint(new DataPointGraph() { Time = CurrentTime, values = smileDict, Confidence = confidence }); - Dictionary smileDict = new Dictionary(); - smileDict[0] = 0.7 * smile_cumm + 0.3 * smile; - smileDict[1] = 0.7 * frown_cumm + 0.3 * frown; - smilePlot.AddDataPoint(new DataPointGraph() { Time = CurrentTime, values = smileDict, Confidence = confidence }); + Dictionary browDict = new Dictionary(); + browDict[0] = 0.7 * brow_up_cumm + 0.3 * brow_up; + browDict[1] = 0.7 * brow_down_cumm + 0.3 * brow_down; + browPlot.AddDataPoint(new DataPointGraph() { Time = CurrentTime, values = browDict, Confidence = confidence }); - Dictionary browDict = new Dictionary(); - browDict[0] = 0.7 * brow_up_cumm + 0.3 * brow_up; - browDict[1] = 0.7 * brow_down_cumm + 0.3 * brow_down; - browPlot.AddDataPoint(new DataPointGraph() { Time = CurrentTime, values = browDict, Confidence = confidence }); + Dictionary eyeDict = new Dictionary(); + eyeDict[0] = 0.7 * widen_cumm + 0.3 * eye_widen; + eyeDict[1] = 0.7 * wrinkle_cumm + 0.3 * nose_wrinkle; + eyePlot.AddDataPoint(new DataPointGraph() { Time = CurrentTime, values = eyeDict, Confidence = confidence }); - Dictionary eyeDict = new Dictionary(); - eyeDict[0] = 0.7 * widen_cumm + 0.3 * eye_widen; - eyeDict[1] = 0.7 * wrinkle_cumm + 0.3 * nose_wrinkle; - eyePlot.AddDataPoint(new DataPointGraph() { Time = CurrentTime, values = eyeDict, Confidence = confidence }); - - smile_cumm = smileDict[0]; - frown_cumm = smileDict[1]; - brow_up_cumm = browDict[0]; - brow_down_cumm = browDict[1]; - widen_cumm = eyeDict[0]; - wrinkle_cumm = eyeDict[1]; + smile_cumm = smileDict[0]; + frown_cumm = smileDict[1]; + brow_up_cumm = browDict[0]; + brow_down_cumm = browDict[1]; + widen_cumm = eyeDict[0]; + wrinkle_cumm = eyeDict[1]; + } + else + { + // If no AUs present disable the AU visualization + MainGrid.ColumnDefinitions[2].Width = new GridLength(0); + eyePlot.Visibility = Visibility.Collapsed; + browPlot.Visibility = Visibility.Collapsed; + smilePlot.Visibility = Visibility.Collapsed; + } Dictionary poseDict = new Dictionary(); poseDict[0] = -pose[3]; diff --git a/lib/local/Utilities/include/RecorderOpenFaceParameters.h b/lib/local/Utilities/include/RecorderOpenFaceParameters.h index 8d6b176..74efa66 100644 --- a/lib/local/Utilities/include/RecorderOpenFaceParameters.h +++ b/lib/local/Utilities/include/RecorderOpenFaceParameters.h @@ -78,6 +78,7 @@ namespace Utilities float getCy() const { return cy; } void setOutputAUs(bool output_AUs) { this->output_AUs = output_AUs; } + void setOutputGaze(bool output_gaze) { this->output_gaze = output_gaze; } private: diff --git a/lib/local/Utilities/src/Visualizer.cpp b/lib/local/Utilities/src/Visualizer.cpp index d412c37..6dafb4f 100644 --- a/lib/local/Utilities/src/Visualizer.cpp +++ b/lib/local/Utilities/src/Visualizer.cpp @@ -34,6 +34,7 @@ #include #include #include +#include #include "Visualizer.h" #include "VisualizationUtils.h" @@ -236,21 +237,69 @@ void Visualizer::SetObservationActionUnits(const std::vector 0 || au_occurences.size() > 0) { - const int NB_AUS = 17; + std::set au_names; + std::map occurences_map; + std::map intensities_map; + + for (size_t idx = 0; idx < au_intensities.size(); idx++) + { + au_names.insert(au_intensities[idx].first); + intensities_map[au_intensities[idx].first] = au_intensities[idx].second; + } + + for (size_t idx = 0; idx < au_occurences.size(); idx++) + { + au_names.insert(au_occurences[idx].first); + occurences_map[au_occurences[idx].first] = au_occurences[idx].second; + } + const int AU_TRACKBAR_LENGTH = 400; const int AU_TRACKBAR_HEIGHT = 10; const int MARGIN_X = 185; const int MARGIN_Y = 10; - action_units_image = cv::Mat(NB_AUS * (AU_TRACKBAR_HEIGHT + 10) + MARGIN_Y * 2, AU_TRACKBAR_LENGTH + MARGIN_X, CV_8UC3, cv::Scalar(255,255,255)); + const int nb_aus = au_names.size(); + + // Do not reinitialize + if(action_units_image.empty()) + { + action_units_image = cv::Mat(nb_aus * (AU_TRACKBAR_HEIGHT + 10) + MARGIN_Y * 2, AU_TRACKBAR_LENGTH + MARGIN_X, CV_8UC3, cv::Scalar(255,255,255)); + } + else + { + action_units_image.setTo(255); + } std::map> aus; // first, prepare a mapping "AU name" -> { present, intensity } - for (size_t idx = 0; idx < au_intensities.size(); idx++) { - aus[au_intensities[idx].first] = std::make_pair(au_occurences[idx].second != 0, au_intensities[idx].second); + for (auto au_name : au_names) + { + // Insert the intensity and AU presense (as these do not always overlap check if they exist first) + bool occurence = false; + if (occurences_map.find(au_name) != occurences_map.end()) + { + occurence = occurences_map[au_name] != 0; + } + else + { + // If we do not have an occurence label, trust the intensity one + occurence = intensities_map[au_name] > 1; + } + double intensity = 0.0; + if (intensities_map.find(au_name) != intensities_map.end()) + { + intensity = intensities_map[au_name]; + } + else + { + // If we do not have an intensity label, trust the occurence one + intensity = occurences_map[au_name] == 0 ? 0 : 5; + } + + aus[au_name] = std::make_pair(occurence, intensity); } // then, build the graph @@ -382,12 +431,10 @@ char Visualizer::ShowObservation() } if (vis_aus && !action_units_image.empty()) { - cv::namedWindow("action units", cv::WindowFlags::WINDOW_KEEPRATIO); cv::imshow("action units", action_units_image); } if (vis_track) { - cv::namedWindow("tracking result", cv::WindowFlags::WINDOW_KEEPRATIO); cv::imshow("tracking result", captured_image); } return cv::waitKey(1);