Have FaceLandmarkVidMulti output (only) to JSON on stdout
This commit is contained in:
parent
92770ee156
commit
0dd61f148a
3 changed files with 110 additions and 58 deletions
|
@ -14,20 +14,20 @@
|
||||||
// reports and manuals, must cite at least one of the following works:
|
// reports and manuals, must cite at least one of the following works:
|
||||||
//
|
//
|
||||||
// OpenFace: an open source facial behavior analysis toolkit
|
// OpenFace: an open source facial behavior analysis toolkit
|
||||||
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency
|
// Tadas Baltru<EFBFBD>aitis, Peter Robinson, and Louis-Philippe Morency
|
||||||
// in IEEE Winter Conference on Applications of Computer Vision, 2016
|
// in IEEE Winter Conference on Applications of Computer Vision, 2016
|
||||||
//
|
//
|
||||||
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
|
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
|
||||||
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
|
// Erroll Wood, Tadas Baltru<EFBFBD>aitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
|
||||||
// in IEEE International. Conference on Computer Vision (ICCV), 2015
|
// in IEEE International. Conference on Computer Vision (ICCV), 2015
|
||||||
//
|
//
|
||||||
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
|
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
|
||||||
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
|
// Tadas Baltru<EFBFBD>aitis, Marwa Mahmoud, and Peter Robinson
|
||||||
// in Facial Expression Recognition and Analysis Challenge,
|
// in Facial Expression Recognition and Analysis Challenge,
|
||||||
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
|
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
|
||||||
//
|
//
|
||||||
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
|
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
|
||||||
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency.
|
// Tadas Baltru<EFBFBD>aitis, Peter Robinson, and Louis-Philippe Morency.
|
||||||
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
|
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
|
||||||
//
|
//
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -135,7 +135,7 @@ int main (int argc, char **argv)
|
||||||
vector<LandmarkDetector::CLNF> face_models;
|
vector<LandmarkDetector::CLNF> face_models;
|
||||||
vector<bool> active_models;
|
vector<bool> active_models;
|
||||||
|
|
||||||
int num_faces_max = 4;
|
int num_faces_max = 15;
|
||||||
|
|
||||||
LandmarkDetector::CLNF face_model(det_parameters[0].model_location);
|
LandmarkDetector::CLNF face_model(det_parameters[0].model_location);
|
||||||
face_model.face_detector_HAAR.load(det_parameters[0].face_detector_location);
|
face_model.face_detector_HAAR.load(det_parameters[0].face_detector_location);
|
||||||
|
@ -195,11 +195,17 @@ int main (int argc, char **argv)
|
||||||
|
|
||||||
Utilities::RecorderOpenFaceParameters recording_params(arguments, true, sequence_reader.IsWebcam(),
|
Utilities::RecorderOpenFaceParameters recording_params(arguments, true, sequence_reader.IsWebcam(),
|
||||||
sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy, sequence_reader.fps);
|
sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy, sequence_reader.fps);
|
||||||
|
// for some reason not accepted as cli parameter, as we don't need it: disable it anyway
|
||||||
|
recording_params.setOutputAUs(false);
|
||||||
|
recording_params.setOutputHOG(false);
|
||||||
|
recording_params.setOutputAlignedFaces(false);
|
||||||
|
recording_params.setOutputTracked(false);
|
||||||
if (!face_model.eye_model)
|
if (!face_model.eye_model)
|
||||||
{
|
{
|
||||||
recording_params.setOutputGaze(false);
|
recording_params.setOutputGaze(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Utilities::RecorderOpenFace open_face_rec(sequence_reader.name, recording_params, arguments);
|
Utilities::RecorderOpenFace open_face_rec(sequence_reader.name, recording_params, arguments);
|
||||||
|
|
||||||
|
|
||||||
|
@ -305,6 +311,9 @@ int main (int argc, char **argv)
|
||||||
|
|
||||||
visualizer.SetImage(captured_image, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy);
|
visualizer.SetImage(captured_image, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy);
|
||||||
|
|
||||||
|
std::stringstream jsonOutput;
|
||||||
|
jsonOutput << "[";
|
||||||
|
int jsonFaceId = 0;
|
||||||
// Go through every model and detect eye gaze, record results and visualise the results
|
// Go through every model and detect eye gaze, record results and visualise the results
|
||||||
for(size_t model = 0; model < face_models.size(); ++model)
|
for(size_t model = 0; model < face_models.size(); ++model)
|
||||||
{
|
{
|
||||||
|
@ -337,14 +346,30 @@ int main (int argc, char **argv)
|
||||||
face_analyser.GetLatestHOG(hog_descriptor, num_hog_rows, num_hog_cols);
|
face_analyser.GetLatestHOG(hog_descriptor, num_hog_rows, num_hog_cols);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cv::Vec6d head_pose = LandmarkDetector::GetPose(face_models[model], sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy);
|
||||||
|
|
||||||
// Visualize the features
|
// Visualize the features
|
||||||
visualizer.SetObservationFaceAlign(sim_warped_img);
|
visualizer.SetObservationFaceAlign(sim_warped_img);
|
||||||
visualizer.SetObservationHOG(hog_descriptor, num_hog_rows, num_hog_cols);
|
visualizer.SetObservationHOG(hog_descriptor, num_hog_rows, num_hog_cols);
|
||||||
visualizer.SetObservationLandmarks(face_models[model].detected_landmarks, face_models[model].detection_certainty);
|
visualizer.SetObservationLandmarks(face_models[model].detected_landmarks, face_models[model].detection_certainty);
|
||||||
visualizer.SetObservationPose(LandmarkDetector::GetPose(face_models[model], sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy), face_models[model].detection_certainty);
|
visualizer.SetObservationPose(head_pose, face_models[model].detection_certainty);
|
||||||
visualizer.SetObservationGaze(gaze_direction0, gaze_direction1, LandmarkDetector::CalculateAllEyeLandmarks(face_models[model]), LandmarkDetector::Calculate3DEyeLandmarks(face_models[model], sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy), face_models[model].detection_certainty);
|
visualizer.SetObservationGaze(gaze_direction0, gaze_direction1, LandmarkDetector::CalculateAllEyeLandmarks(face_models[model]), LandmarkDetector::Calculate3DEyeLandmarks(face_models[model], sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy), face_models[model].detection_certainty);
|
||||||
visualizer.SetObservationActionUnits(face_analyser.GetCurrentAUsReg(), face_analyser.GetCurrentAUsClass());
|
visualizer.SetObservationActionUnits(face_analyser.GetCurrentAUsReg(), face_analyser.GetCurrentAUsClass());
|
||||||
|
|
||||||
|
if(face_models[model].detection_success && face_model.eye_model) {
|
||||||
|
if(jsonFaceId > 0){
|
||||||
|
jsonOutput << ",";
|
||||||
|
}
|
||||||
|
jsonFaceId++;
|
||||||
|
jsonOutput << "{\"fid\":";
|
||||||
|
jsonOutput << model << ", \"confidence\":" << face_models[model].detection_certainty;
|
||||||
|
// gaze_angle_x, gaze_angle_y Eye gaze direction in radians in world coordinates averaged for both eyes and converted into more easy to use format than gaze vectors. If a person is looking left-right this will results in the change of gaze_angle_x and, if a person is looking up-down this will result in change of gaze_angle_y, if a person is looking straight ahead both of the angles will be close to 0 (within measurement error)
|
||||||
|
jsonOutput << ", \"gaze_angle\": [" << gaze_angle[0] << ", " << gaze_angle[1] << "]";
|
||||||
|
jsonOutput << ", head_pos: [" << head_pose[0] << ", " << head_pose[1] << ", " << head_pose[2] << "]";
|
||||||
|
jsonOutput << ", head_rot: [" << head_pose[3] << ", " << head_pose[4] << ", " << head_pose[5] << "]";
|
||||||
|
jsonOutput << "}";
|
||||||
|
}
|
||||||
|
|
||||||
// Output features
|
// Output features
|
||||||
open_face_rec.SetObservationHOG(face_models[model].detection_success, hog_descriptor, num_hog_rows, num_hog_cols, 31); // The number of channels in HOG is fixed at the moment, as using FHOG
|
open_face_rec.SetObservationHOG(face_models[model].detection_success, hog_descriptor, num_hog_rows, num_hog_cols, 31); // The number of channels in HOG is fixed at the moment, as using FHOG
|
||||||
open_face_rec.SetObservationVisualization(visualizer.GetVisImage());
|
open_face_rec.SetObservationVisualization(visualizer.GetVisImage());
|
||||||
|
@ -364,6 +389,9 @@ int main (int argc, char **argv)
|
||||||
}
|
}
|
||||||
visualizer.SetFps(fps_tracker.GetFPS());
|
visualizer.SetFps(fps_tracker.GetFPS());
|
||||||
|
|
||||||
|
jsonOutput << "]";
|
||||||
|
std::cout << jsonOutput.str() << std::endl;
|
||||||
|
|
||||||
// show visualization and detect key presses
|
// show visualization and detect key presses
|
||||||
char character_press = visualizer.ShowObservation();
|
char character_press = visualizer.ShowObservation();
|
||||||
|
|
||||||
|
@ -405,4 +433,3 @@ int main (int argc, char **argv)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -13,20 +13,20 @@
|
||||||
// reports and manuals, must cite at least one of the following works:
|
// reports and manuals, must cite at least one of the following works:
|
||||||
//
|
//
|
||||||
// OpenFace: an open source facial behavior analysis toolkit
|
// OpenFace: an open source facial behavior analysis toolkit
|
||||||
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency
|
// Tadas Baltru<EFBFBD>aitis, Peter Robinson, and Louis-Philippe Morency
|
||||||
// in IEEE Winter Conference on Applications of Computer Vision, 2016
|
// in IEEE Winter Conference on Applications of Computer Vision, 2016
|
||||||
//
|
//
|
||||||
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
|
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
|
||||||
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
|
// Erroll Wood, Tadas Baltru<EFBFBD>aitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
|
||||||
// in IEEE International. Conference on Computer Vision (ICCV), 2015
|
// in IEEE International. Conference on Computer Vision (ICCV), 2015
|
||||||
//
|
//
|
||||||
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
|
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
|
||||||
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
|
// Tadas Baltru<EFBFBD>aitis, Marwa Mahmoud, and Peter Robinson
|
||||||
// in Facial Expression Recognition and Analysis Challenge,
|
// in Facial Expression Recognition and Analysis Challenge,
|
||||||
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
|
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
|
||||||
//
|
//
|
||||||
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
|
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
|
||||||
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency.
|
// Tadas Baltru<EFBFBD>aitis, Peter Robinson, and Louis-Philippe Morency.
|
||||||
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
|
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
|
||||||
//
|
//
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -79,6 +79,9 @@ namespace Utilities
|
||||||
|
|
||||||
void setOutputAUs(bool output_AUs) { this->output_AUs = output_AUs; }
|
void setOutputAUs(bool output_AUs) { this->output_AUs = output_AUs; }
|
||||||
void setOutputGaze(bool output_gaze) { this->output_gaze = output_gaze; }
|
void setOutputGaze(bool output_gaze) { this->output_gaze = output_gaze; }
|
||||||
|
void setOutputHOG(bool output_HOG) { this->output_hog = output_HOG; }
|
||||||
|
void setOutputAlignedFaces(bool output_aligned_faces) { this->output_aligned_faces = output_aligned_faces; }
|
||||||
|
void setOutputTracked(bool output_tracked) { this->output_tracked = output_tracked; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
|
|
|
@ -13,20 +13,20 @@
|
||||||
// reports and manuals, must cite at least one of the following works:
|
// reports and manuals, must cite at least one of the following works:
|
||||||
//
|
//
|
||||||
// OpenFace: an open source facial behavior analysis toolkit
|
// OpenFace: an open source facial behavior analysis toolkit
|
||||||
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency
|
// Tadas Baltru<EFBFBD>aitis, Peter Robinson, and Louis-Philippe Morency
|
||||||
// in IEEE Winter Conference on Applications of Computer Vision, 2016
|
// in IEEE Winter Conference on Applications of Computer Vision, 2016
|
||||||
//
|
//
|
||||||
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
|
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
|
||||||
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
|
// Erroll Wood, Tadas Baltru<EFBFBD>aitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
|
||||||
// in IEEE International. Conference on Computer Vision (ICCV), 2015
|
// in IEEE International. Conference on Computer Vision (ICCV), 2015
|
||||||
//
|
//
|
||||||
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
|
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
|
||||||
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
|
// Tadas Baltru<EFBFBD>aitis, Marwa Mahmoud, and Peter Robinson
|
||||||
// in Facial Expression Recognition and Analysis Challenge,
|
// in Facial Expression Recognition and Analysis Challenge,
|
||||||
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
|
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
|
||||||
//
|
//
|
||||||
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
|
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
|
||||||
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency.
|
// Tadas Baltru<EFBFBD>aitis, Peter Robinson, and Louis-Philippe Morency.
|
||||||
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
|
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
|
||||||
//
|
//
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -275,7 +275,6 @@ void RecorderOpenFace::SetObservationVisualization(const cv::Mat &vis_track)
|
||||||
|
|
||||||
void RecorderOpenFace::WriteObservation()
|
void RecorderOpenFace::WriteObservation()
|
||||||
{
|
{
|
||||||
|
|
||||||
// Write out the CSV file (it will always be there, even if not outputting anything more but frame/face numbers)
|
// Write out the CSV file (it will always be there, even if not outputting anything more but frame/face numbers)
|
||||||
if(!csv_recorder.isOpen())
|
if(!csv_recorder.isOpen())
|
||||||
{
|
{
|
||||||
|
@ -312,6 +311,32 @@ void RecorderOpenFace::WriteObservation()
|
||||||
csv_recorder.Open(csv_filename, params.isSequence(), params.output2DLandmarks(), params.output3DLandmarks(), params.outputPDMParams(), params.outputPose(),
|
csv_recorder.Open(csv_filename, params.isSequence(), params.output2DLandmarks(), params.output3DLandmarks(), params.outputPDMParams(), params.outputPose(),
|
||||||
params.outputAUs(), params.outputGaze(), num_face_landmarks, num_model_modes, num_eye_landmarks, au_names_class, au_names_reg);
|
params.outputAUs(), params.outputGaze(), num_face_landmarks, num_model_modes, num_eye_landmarks, au_names_class, au_names_reg);
|
||||||
}
|
}
|
||||||
|
//
|
||||||
|
// if(landmark_detection_success){
|
||||||
|
//
|
||||||
|
// std::cout << "{\"fid\":";
|
||||||
|
// std::cout << face_id << ", \"confidence\":" << landmark_detection_confidence;
|
||||||
|
// //
|
||||||
|
// // std::cout << ", \"gaze_directions\": [[" << gaze_direction0.x << ", " << gaze_direction0.y << ", " << gaze_direction0.z << "]"
|
||||||
|
// // << ", [" << gaze_direction1.x << ", " << gaze_direction1.y << ", " << gaze_direction1.z << "]]";
|
||||||
|
//
|
||||||
|
// // gaze_angle_x, gaze_angle_y Eye gaze direction in radians in world coordinates averaged for both eyes and converted into more easy to use format than gaze vectors. If a person is looking left-right this will results in the change of gaze_angle_x and, if a person is looking up-down this will result in change of gaze_angle_y, if a person is looking straight ahead both of the angles will be close to 0 (within measurement error)
|
||||||
|
// std::cout << ", \"gaze_angle\": [" << gaze_angle[0] << ", " << gaze_angle[1] << "]";
|
||||||
|
// //
|
||||||
|
// // std::cout << ",\"eye_lmk3d\"[[]";
|
||||||
|
// // // Output the 3D eye landmarks
|
||||||
|
// // for (auto eye_lmk : eye_landmarks3D)
|
||||||
|
// // {
|
||||||
|
// // std::cout << ", [" << eye_lmk.x << "," << eye_lmk.y << "," << eye_lmk.z << "]";
|
||||||
|
// // }
|
||||||
|
// // std::cout << "]";
|
||||||
|
//
|
||||||
|
// std::cout << ", position: [" << head_pose[0] << ", " << head_pose[1] << ", " << head_pose[2] << "]";
|
||||||
|
// // rotation:
|
||||||
|
// // std::cout << ", " << pose_estimate[3] << ", " << pose_estimate[4] << ", " << pose_estimate[5];
|
||||||
|
//
|
||||||
|
// std::cout << "}" << std::endl;
|
||||||
|
// }
|
||||||
|
|
||||||
this->csv_recorder.WriteLine(face_id, frame_number, timestamp, landmark_detection_success,
|
this->csv_recorder.WriteLine(face_id, frame_number, timestamp, landmark_detection_success,
|
||||||
landmark_detection_confidence, landmarks_2D, landmarks_3D, pdm_params_local, pdm_params_global, head_pose,
|
landmark_detection_confidence, landmarks_2D, landmarks_3D, pdm_params_local, pdm_params_global, head_pose,
|
||||||
|
@ -446,6 +471,3 @@ void RecorderOpenFace::Close()
|
||||||
metadata_file.close();
|
metadata_file.close();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue