Have FaceLandmarkVidMulti output (only) to JSON on stdout

This commit is contained in:
Ruben van de Ven 2018-05-01 23:00:16 +02:00
parent 92770ee156
commit 0dd61f148a
3 changed files with 110 additions and 58 deletions

View file

@ -4,7 +4,7 @@
// //
// ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY // ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
// //
// BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT. // BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT.
// IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE. // IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
// //
// License can be found in OpenFace-license.txt // License can be found in OpenFace-license.txt
@ -14,21 +14,21 @@
// reports and manuals, must cite at least one of the following works: // reports and manuals, must cite at least one of the following works:
// //
// OpenFace: an open source facial behavior analysis toolkit // OpenFace: an open source facial behavior analysis toolkit
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency // Tadas Baltru<EFBFBD>aitis, Peter Robinson, and Louis-Philippe Morency
// in IEEE Winter Conference on Applications of Computer Vision, 2016 // in IEEE Winter Conference on Applications of Computer Vision, 2016
// //
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation // Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling // Erroll Wood, Tadas Baltru<EFBFBD>aitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015 // in IEEE International. Conference on Computer Vision (ICCV), 2015
// //
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection // Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson // Tadas Baltru<EFBFBD>aitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge, // in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015 // IEEE International Conference on Automatic Face and Gesture Recognition, 2015
// //
// Constrained Local Neural Fields for robust facial landmark detection in the wild. // Constrained Local Neural Fields for robust facial landmark detection in the wild.
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency. // Tadas Baltru<EFBFBD>aitis, Peter Robinson, and Louis-Philippe Morency.
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013. // in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
// //
/////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////
@ -94,7 +94,7 @@ void NonOverlapingDetections(const vector<LandmarkDetector::CLNF>& clnf_models,
// See if the detections intersect // See if the detections intersect
cv::Rect_<double> model_rect = clnf_models[model].GetBoundingBox(); cv::Rect_<double> model_rect = clnf_models[model].GetBoundingBox();
for(int detection = face_detections.size()-1; detection >=0; --detection) for(int detection = face_detections.size()-1; detection >=0; --detection)
{ {
double intersection_area = (model_rect & face_detections[detection]).area(); double intersection_area = (model_rect & face_detections[detection]).area();
@ -130,17 +130,17 @@ int main (int argc, char **argv)
vector<LandmarkDetector::FaceModelParameters> det_parameters; vector<LandmarkDetector::FaceModelParameters> det_parameters;
det_parameters.push_back(det_params); det_parameters.push_back(det_params);
// The modules that are being used for tracking // The modules that are being used for tracking
vector<LandmarkDetector::CLNF> face_models; vector<LandmarkDetector::CLNF> face_models;
vector<bool> active_models; vector<bool> active_models;
int num_faces_max = 4; int num_faces_max = 15;
LandmarkDetector::CLNF face_model(det_parameters[0].model_location); LandmarkDetector::CLNF face_model(det_parameters[0].model_location);
face_model.face_detector_HAAR.load(det_parameters[0].face_detector_location); face_model.face_detector_HAAR.load(det_parameters[0].face_detector_location);
face_model.face_detector_location = det_parameters[0].face_detector_location; face_model.face_detector_location = det_parameters[0].face_detector_location;
face_models.reserve(num_faces_max); face_models.reserve(num_faces_max);
face_models.push_back(face_model); face_models.push_back(face_model);
@ -152,7 +152,7 @@ int main (int argc, char **argv)
active_models.push_back(false); active_models.push_back(false);
det_parameters.push_back(det_params); det_parameters.push_back(det_params);
} }
// Load facial feature extractor and AU analyser (make sure it is static, as we don't reidentify faces) // Load facial feature extractor and AU analyser (make sure it is static, as we don't reidentify faces)
FaceAnalysis::FaceAnalyserParameters face_analysis_params(arguments); FaceAnalysis::FaceAnalyserParameters face_analysis_params(arguments);
face_analysis_params.OptimizeForImages(); face_analysis_params.OptimizeForImages();
@ -195,10 +195,16 @@ int main (int argc, char **argv)
Utilities::RecorderOpenFaceParameters recording_params(arguments, true, sequence_reader.IsWebcam(), Utilities::RecorderOpenFaceParameters recording_params(arguments, true, sequence_reader.IsWebcam(),
sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy, sequence_reader.fps); sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy, sequence_reader.fps);
// for some reason not accepted as cli parameter, as we don't need it: disable it anyway
recording_params.setOutputAUs(false);
recording_params.setOutputHOG(false);
recording_params.setOutputAlignedFaces(false);
recording_params.setOutputTracked(false);
if (!face_model.eye_model) if (!face_model.eye_model)
{ {
recording_params.setOutputGaze(false); recording_params.setOutputGaze(false);
} }
Utilities::RecorderOpenFace open_face_rec(sequence_reader.name, recording_params, arguments); Utilities::RecorderOpenFace open_face_rec(sequence_reader.name, recording_params, arguments);
@ -217,11 +223,11 @@ int main (int argc, char **argv)
INFO_STREAM( "Starting tracking"); INFO_STREAM( "Starting tracking");
while(!captured_image.empty()) while(!captured_image.empty())
{ {
// Reading the images // Reading the images
cv::Mat_<uchar> grayscale_image = sequence_reader.GetGrayFrame(); cv::Mat_<uchar> grayscale_image = sequence_reader.GetGrayFrame();
vector<cv::Rect_<double> > face_detections; vector<cv::Rect_<double> > face_detections;
bool all_models_active = true; bool all_models_active = true;
@ -232,10 +238,10 @@ int main (int argc, char **argv)
all_models_active = false; all_models_active = false;
} }
} }
// Get the detections (every 8th frame and when there are free models available for tracking) // Get the detections (every 8th frame and when there are free models available for tracking)
if(frame_count % 8 == 0 && !all_models_active) if(frame_count % 8 == 0 && !all_models_active)
{ {
if(det_parameters[0].curr_face_detector == LandmarkDetector::FaceModelParameters::HOG_SVM_DETECTOR) if(det_parameters[0].curr_face_detector == LandmarkDetector::FaceModelParameters::HOG_SVM_DETECTOR)
{ {
vector<double> confidences; vector<double> confidences;
@ -262,7 +268,7 @@ int main (int argc, char **argv)
// If the current model has failed more than 4 times in a row, remove it // If the current model has failed more than 4 times in a row, remove it
if(face_models[model].failures_in_a_row > 4) if(face_models[model].failures_in_a_row > 4)
{ {
active_models[model] = false; active_models[model] = false;
face_models[model].Reset(); face_models[model].Reset();
} }
@ -270,13 +276,13 @@ int main (int argc, char **argv)
// If the model is inactive reactivate it with new detections // If the model is inactive reactivate it with new detections
if(!active_models[model]) if(!active_models[model])
{ {
for(size_t detection_ind = 0; detection_ind < face_detections.size(); ++detection_ind) for(size_t detection_ind = 0; detection_ind < face_detections.size(); ++detection_ind)
{ {
// if it was not taken by another tracker take it (if it is false swap it to true and enter detection, this makes it parallel safe) // if it was not taken by another tracker take it (if it is false swap it to true and enter detection, this makes it parallel safe)
if(face_detections_used[detection_ind].compare_and_swap(true, false) == false) if(face_detections_used[detection_ind].compare_and_swap(true, false) == false)
{ {
// Reinitialise the model // Reinitialise the model
face_models[model].Reset(); face_models[model].Reset();
@ -299,12 +305,15 @@ int main (int argc, char **argv)
detection_success = LandmarkDetector::DetectLandmarksInVideo(grayscale_image, face_models[model], det_parameters[model]); detection_success = LandmarkDetector::DetectLandmarksInVideo(grayscale_image, face_models[model], det_parameters[model]);
} }
}); });
// Keeping track of FPS // Keeping track of FPS
fps_tracker.AddFrame(); fps_tracker.AddFrame();
visualizer.SetImage(captured_image, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy); visualizer.SetImage(captured_image, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy);
std::stringstream jsonOutput;
jsonOutput << "[";
int jsonFaceId = 0;
// Go through every model and detect eye gaze, record results and visualise the results // Go through every model and detect eye gaze, record results and visualise the results
for(size_t model = 0; model < face_models.size(); ++model) for(size_t model = 0; model < face_models.size(); ++model)
{ {
@ -312,7 +321,7 @@ int main (int argc, char **argv)
if(active_models[model]) if(active_models[model])
{ {
// Estimate head pose and eye gaze // Estimate head pose and eye gaze
cv::Vec6d pose_estimate = LandmarkDetector::GetPose(face_models[model], sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy); cv::Vec6d pose_estimate = LandmarkDetector::GetPose(face_models[model], sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy);
cv::Point3f gaze_direction0(0, 0, 0); cv::Point3f gaze_direction1(0, 0, 0); cv::Vec2d gaze_angle(0, 0); cv::Point3f gaze_direction0(0, 0, 0); cv::Point3f gaze_direction1(0, 0, 0); cv::Vec2d gaze_angle(0, 0);
@ -337,14 +346,30 @@ int main (int argc, char **argv)
face_analyser.GetLatestHOG(hog_descriptor, num_hog_rows, num_hog_cols); face_analyser.GetLatestHOG(hog_descriptor, num_hog_rows, num_hog_cols);
} }
cv::Vec6d head_pose = LandmarkDetector::GetPose(face_models[model], sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy);
// Visualize the features // Visualize the features
visualizer.SetObservationFaceAlign(sim_warped_img); visualizer.SetObservationFaceAlign(sim_warped_img);
visualizer.SetObservationHOG(hog_descriptor, num_hog_rows, num_hog_cols); visualizer.SetObservationHOG(hog_descriptor, num_hog_rows, num_hog_cols);
visualizer.SetObservationLandmarks(face_models[model].detected_landmarks, face_models[model].detection_certainty); visualizer.SetObservationLandmarks(face_models[model].detected_landmarks, face_models[model].detection_certainty);
visualizer.SetObservationPose(LandmarkDetector::GetPose(face_models[model], sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy), face_models[model].detection_certainty); visualizer.SetObservationPose(head_pose, face_models[model].detection_certainty);
visualizer.SetObservationGaze(gaze_direction0, gaze_direction1, LandmarkDetector::CalculateAllEyeLandmarks(face_models[model]), LandmarkDetector::Calculate3DEyeLandmarks(face_models[model], sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy), face_models[model].detection_certainty); visualizer.SetObservationGaze(gaze_direction0, gaze_direction1, LandmarkDetector::CalculateAllEyeLandmarks(face_models[model]), LandmarkDetector::Calculate3DEyeLandmarks(face_models[model], sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy), face_models[model].detection_certainty);
visualizer.SetObservationActionUnits(face_analyser.GetCurrentAUsReg(), face_analyser.GetCurrentAUsClass()); visualizer.SetObservationActionUnits(face_analyser.GetCurrentAUsReg(), face_analyser.GetCurrentAUsClass());
if(face_models[model].detection_success && face_model.eye_model) {
if(jsonFaceId > 0){
jsonOutput << ",";
}
jsonFaceId++;
jsonOutput << "{\"fid\":";
jsonOutput << model << ", \"confidence\":" << face_models[model].detection_certainty;
// gaze_angle_x, gaze_angle_y Eye gaze direction in radians in world coordinates averaged for both eyes and converted into more easy to use format than gaze vectors. If a person is looking left-right this will results in the change of gaze_angle_x and, if a person is looking up-down this will result in change of gaze_angle_y, if a person is looking straight ahead both of the angles will be close to 0 (within measurement error)
jsonOutput << ", \"gaze_angle\": [" << gaze_angle[0] << ", " << gaze_angle[1] << "]";
jsonOutput << ", head_pos: [" << head_pose[0] << ", " << head_pose[1] << ", " << head_pose[2] << "]";
jsonOutput << ", head_rot: [" << head_pose[3] << ", " << head_pose[4] << ", " << head_pose[5] << "]";
jsonOutput << "}";
}
// Output features // Output features
open_face_rec.SetObservationHOG(face_models[model].detection_success, hog_descriptor, num_hog_rows, num_hog_cols, 31); // The number of channels in HOG is fixed at the moment, as using FHOG open_face_rec.SetObservationHOG(face_models[model].detection_success, hog_descriptor, num_hog_rows, num_hog_cols, 31); // The number of channels in HOG is fixed at the moment, as using FHOG
open_face_rec.SetObservationVisualization(visualizer.GetVisImage()); open_face_rec.SetObservationVisualization(visualizer.GetVisImage());
@ -364,9 +389,12 @@ int main (int argc, char **argv)
} }
visualizer.SetFps(fps_tracker.GetFPS()); visualizer.SetFps(fps_tracker.GetFPS());
jsonOutput << "]";
std::cout << jsonOutput.str() << std::endl;
// show visualization and detect key presses // show visualization and detect key presses
char character_press = visualizer.ShowObservation(); char character_press = visualizer.ShowObservation();
// restart the trackers // restart the trackers
if(character_press == 'r') if(character_press == 'r')
{ {
@ -389,7 +417,7 @@ int main (int argc, char **argv)
captured_image = sequence_reader.GetNextFrame(); captured_image = sequence_reader.GetNextFrame();
} }
frame_count = 0; frame_count = 0;
// Reset the model, for the next video // Reset the model, for the next video
@ -405,4 +433,3 @@ int main (int argc, char **argv)
return 0; return 0;
} }

View file

@ -3,7 +3,7 @@
// //
// ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY // ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
// //
// BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT. // BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT.
// IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE. // IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
// //
// License can be found in OpenFace-license.txt // License can be found in OpenFace-license.txt
@ -13,21 +13,21 @@
// reports and manuals, must cite at least one of the following works: // reports and manuals, must cite at least one of the following works:
// //
// OpenFace: an open source facial behavior analysis toolkit // OpenFace: an open source facial behavior analysis toolkit
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency // Tadas Baltru<EFBFBD>aitis, Peter Robinson, and Louis-Philippe Morency
// in IEEE Winter Conference on Applications of Computer Vision, 2016 // in IEEE Winter Conference on Applications of Computer Vision, 2016
// //
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation // Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling // Erroll Wood, Tadas Baltru<EFBFBD>aitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015 // in IEEE International. Conference on Computer Vision (ICCV), 2015
// //
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection // Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson // Tadas Baltru<EFBFBD>aitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge, // in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015 // IEEE International Conference on Automatic Face and Gesture Recognition, 2015
// //
// Constrained Local Neural Fields for robust facial landmark detection in the wild. // Constrained Local Neural Fields for robust facial landmark detection in the wild.
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency. // Tadas Baltru<EFBFBD>aitis, Peter Robinson, and Louis-Philippe Morency.
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013. // in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
// //
/////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////
@ -79,9 +79,12 @@ namespace Utilities
void setOutputAUs(bool output_AUs) { this->output_AUs = output_AUs; } void setOutputAUs(bool output_AUs) { this->output_AUs = output_AUs; }
void setOutputGaze(bool output_gaze) { this->output_gaze = output_gaze; } void setOutputGaze(bool output_gaze) { this->output_gaze = output_gaze; }
void setOutputHOG(bool output_HOG) { this->output_hog = output_HOG; }
void setOutputAlignedFaces(bool output_aligned_faces) { this->output_aligned_faces = output_aligned_faces; }
void setOutputTracked(bool output_tracked) { this->output_tracked = output_tracked; }
private: private:
// If we are recording results from a sequence each row refers to a frame, if we are recording an image each row is a face // If we are recording results from a sequence each row refers to a frame, if we are recording an image each row is a face
bool is_sequence; bool is_sequence;
// If the data is coming from a webcam // If the data is coming from a webcam
@ -97,7 +100,7 @@ namespace Utilities
bool output_hog; bool output_hog;
bool output_tracked; bool output_tracked;
bool output_aligned_faces; bool output_aligned_faces;
// Some video recording parameters // Some video recording parameters
std::string output_codec; std::string output_codec;
double fps_vid_out; double fps_vid_out;

View file

@ -3,7 +3,7 @@
// //
// ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY // ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
// //
// BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT. // BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT.
// IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE. // IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
// //
// License can be found in OpenFace-license.txt // License can be found in OpenFace-license.txt
@ -13,21 +13,21 @@
// reports and manuals, must cite at least one of the following works: // reports and manuals, must cite at least one of the following works:
// //
// OpenFace: an open source facial behavior analysis toolkit // OpenFace: an open source facial behavior analysis toolkit
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency // Tadas Baltru<EFBFBD>aitis, Peter Robinson, and Louis-Philippe Morency
// in IEEE Winter Conference on Applications of Computer Vision, 2016 // in IEEE Winter Conference on Applications of Computer Vision, 2016
// //
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation // Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling // Erroll Wood, Tadas Baltru<EFBFBD>aitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015 // in IEEE International. Conference on Computer Vision (ICCV), 2015
// //
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection // Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson // Tadas Baltru<EFBFBD>aitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge, // in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015 // IEEE International Conference on Automatic Face and Gesture Recognition, 2015
// //
// Constrained Local Neural Fields for robust facial landmark detection in the wild. // Constrained Local Neural Fields for robust facial landmark detection in the wild.
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency. // Tadas Baltru<EFBFBD>aitis, Peter Robinson, and Louis-Philippe Morency.
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013. // in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
// //
/////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////
@ -126,7 +126,7 @@ void RecorderOpenFace::PrepareRecording(const std::string& in_filename)
hog_recorder.Open(hog_filename); hog_recorder.Open(hog_filename);
} }
// saving the videos // saving the videos
if (params.outputTracked()) if (params.outputTracked())
{ {
if (params.isSequence()) if (params.isSequence())
@ -275,8 +275,7 @@ void RecorderOpenFace::SetObservationVisualization(const cv::Mat &vis_track)
void RecorderOpenFace::WriteObservation() void RecorderOpenFace::WriteObservation()
{ {
// Write out the CSV file (it will always be there, even if not outputting anything more but frame/face numbers)
// Write out the CSV file (it will always be there, even if not outputting anything more but frame/face numbers)
if(!csv_recorder.isOpen()) if(!csv_recorder.isOpen())
{ {
// As we are writing out the header, work out some things like number of landmarks, names of AUs etc. // As we are writing out the header, work out some things like number of landmarks, names of AUs etc.
@ -312,8 +311,34 @@ void RecorderOpenFace::WriteObservation()
csv_recorder.Open(csv_filename, params.isSequence(), params.output2DLandmarks(), params.output3DLandmarks(), params.outputPDMParams(), params.outputPose(), csv_recorder.Open(csv_filename, params.isSequence(), params.output2DLandmarks(), params.output3DLandmarks(), params.outputPDMParams(), params.outputPose(),
params.outputAUs(), params.outputGaze(), num_face_landmarks, num_model_modes, num_eye_landmarks, au_names_class, au_names_reg); params.outputAUs(), params.outputGaze(), num_face_landmarks, num_model_modes, num_eye_landmarks, au_names_class, au_names_reg);
} }
//
// if(landmark_detection_success){
//
// std::cout << "{\"fid\":";
// std::cout << face_id << ", \"confidence\":" << landmark_detection_confidence;
// //
// // std::cout << ", \"gaze_directions\": [[" << gaze_direction0.x << ", " << gaze_direction0.y << ", " << gaze_direction0.z << "]"
// // << ", [" << gaze_direction1.x << ", " << gaze_direction1.y << ", " << gaze_direction1.z << "]]";
//
// // gaze_angle_x, gaze_angle_y Eye gaze direction in radians in world coordinates averaged for both eyes and converted into more easy to use format than gaze vectors. If a person is looking left-right this will results in the change of gaze_angle_x and, if a person is looking up-down this will result in change of gaze_angle_y, if a person is looking straight ahead both of the angles will be close to 0 (within measurement error)
// std::cout << ", \"gaze_angle\": [" << gaze_angle[0] << ", " << gaze_angle[1] << "]";
// //
// // std::cout << ",\"eye_lmk3d\"[[]";
// // // Output the 3D eye landmarks
// // for (auto eye_lmk : eye_landmarks3D)
// // {
// // std::cout << ", [" << eye_lmk.x << "," << eye_lmk.y << "," << eye_lmk.z << "]";
// // }
// // std::cout << "]";
//
// std::cout << ", position: [" << head_pose[0] << ", " << head_pose[1] << ", " << head_pose[2] << "]";
// // rotation:
// // std::cout << ", " << pose_estimate[3] << ", " << pose_estimate[4] << ", " << pose_estimate[5];
//
// std::cout << "}" << std::endl;
// }
this->csv_recorder.WriteLine(face_id, frame_number, timestamp, landmark_detection_success, this->csv_recorder.WriteLine(face_id, frame_number, timestamp, landmark_detection_success,
landmark_detection_confidence, landmarks_2D, landmarks_3D, pdm_params_local, pdm_params_global, head_pose, landmark_detection_confidence, landmarks_2D, landmarks_3D, pdm_params_local, pdm_params_global, head_pose,
gaze_direction0, gaze_direction1, gaze_angle, eye_landmarks2D, eye_landmarks3D, au_intensities, au_occurences); gaze_direction0, gaze_direction1, gaze_angle, eye_landmarks2D, eye_landmarks3D, au_intensities, au_occurences);
@ -446,6 +471,3 @@ void RecorderOpenFace::Close()
metadata_file.close(); metadata_file.close();
} }