Merge remote-tracking branch 'origin/master' into feature/Windows-GUI

Conflicts:
	exe/FeatureExtraction/FeatureExtraction.cpp
This commit is contained in:
Tadas Baltrusaitis 2017-10-29 11:40:52 +00:00
commit a8ec41e91f

View file

@ -55,7 +55,6 @@
#include <Face_utils.h> #include <Face_utils.h>
#include <FaceAnalyser.h> #include <FaceAnalyser.h>
#include <FaceAnalyserParameters.h>
#include <GazeEstimation.h> #include <GazeEstimation.h>
#ifndef CONFIG_DIR #ifndef CONFIG_DIR
@ -218,7 +217,7 @@ int main (int argc, char **argv)
{ {
vector<string> arguments = get_arguments(argc, argv); vector<string> arguments = get_arguments(argc, argv);
// Some initial parameters that can be overriden from command line // Some initial parameters that can be overriden from command line
vector<string> input_files, output_files, tracked_videos_output; vector<string> input_files, output_files, tracked_videos_output;
@ -244,6 +243,7 @@ int main (int argc, char **argv)
{ {
video_input = false; video_input = false;
} }
} }
// Grab camera parameters, if they are not defined (approximate values will be used) // Grab camera parameters, if they are not defined (approximate values will be used)
@ -360,6 +360,7 @@ int main (int argc, char **argv)
{ {
string curr_img_file = input_image_files[f_n][curr_img]; string curr_img_file = input_image_files[f_n][curr_img];
captured_image = cv::imread(curr_img_file, -1); captured_image = cv::imread(curr_img_file, -1);
total_frames = input_image_files[f_n].size();
} }
else else
{ {
@ -416,7 +417,9 @@ int main (int argc, char **argv)
catch(cv::Exception e) catch(cv::Exception e)
{ {
WARN_STREAM( "Could not open VideoWriter, OUTPUT FILE WILL NOT BE WRITTEN. Currently using codec " << output_codec << ", try using an other one (-oc option)"); WARN_STREAM( "Could not open VideoWriter, OUTPUT FILE WILL NOT BE WRITTEN. Currently using codec " << output_codec << ", try using an other one (-oc option)");
} }
} }
int frame_count = 0; int frame_count = 0;
@ -472,9 +475,6 @@ int main (int argc, char **argv)
detection_success = LandmarkDetector::DetectLandmarksInImage(grayscale_image, face_model, det_parameters); detection_success = LandmarkDetector::DetectLandmarksInImage(grayscale_image, face_model, det_parameters);
} }
// Work out the pose of the head from the tracked model
cv::Vec6d pose_estimate = LandmarkDetector::GetPose(face_model, fx, fy, cx, cy);
// Gaze tracking, absolute gaze direction // Gaze tracking, absolute gaze direction
cv::Point3f gazeDirection0(0, 0, -1); cv::Point3f gazeDirection0(0, 0, -1);
cv::Point3f gazeDirection1(0, 0, -1); cv::Point3f gazeDirection1(0, 0, -1);
@ -495,8 +495,7 @@ int main (int argc, char **argv)
// But only if needed in output // But only if needed in output
if(!output_similarity_align.empty() || hog_output_file.is_open() || output_AUs) if(!output_similarity_align.empty() || hog_output_file.is_open() || output_AUs)
{ {
face_analyser.AddNextFrame(captured_image, face_model.detected_landmarks, face_model.detection_success, time_stamp, false, !det_parameters.quiet_mode && (visualize_align || visualize_hog)); face_analyser.AddNextFrame(captured_image, face_model.detected_landmarks, face_model.detection_success, time_stamp, false, !det_parameters.quiet_mode && visualize_hog);
face_analyser.GetLatestAlignedFace(sim_warped_img); face_analyser.GetLatestAlignedFace(sim_warped_img);
if(!det_parameters.quiet_mode && visualize_align) if(!det_parameters.quiet_mode && visualize_align)
@ -516,6 +515,9 @@ int main (int argc, char **argv)
} }
} }
// Work out the pose of the head from the tracked model
cv::Vec6d pose_estimate = LandmarkDetector::GetPose(face_model, fx, fy, cx, cy);
if (hog_output_file.is_open()) if (hog_output_file.is_open())
{ {
output_HOG_frame(&hog_output_file, detection_success, hog_descriptor, num_hog_rows, num_hog_cols); output_HOG_frame(&hog_output_file, detection_success, hog_descriptor, num_hog_rows, num_hog_cols);
@ -732,14 +734,8 @@ void outputAllFeatures(std::ofstream* output_file, bool output_2D_landmarks, boo
{ {
double confidence = 0.5 * (1 - face_model.detection_certainty); double confidence = 0.5 * (1 - face_model.detection_certainty);
*output_file << std::setprecision(9);
*output_file << frame_count + 1 << ", " << time_stamp << ", ";
*output_file << std::setprecision(2); *output_file << frame_count + 1 << ", " << time_stamp << ", " << confidence << ", " << detection_success;
*output_file << confidence << ", " << detection_success;
*output_file << std::setprecision(5);
// Output the estimated gaze // Output the estimated gaze
if (output_gaze) if (output_gaze)
@ -763,7 +759,6 @@ void outputAllFeatures(std::ofstream* output_file, bool output_2D_landmarks, boo
} }
} }
*output_file << std::setprecision(4);
// Output the estimated head pose // Output the estimated head pose
if (output_pose) if (output_pose)
{ {
@ -778,7 +773,6 @@ void outputAllFeatures(std::ofstream* output_file, bool output_2D_landmarks, boo
} }
} }
*output_file << std::setprecision(4);
// Output the detected 2D facial landmarks // Output the detected 2D facial landmarks
if (output_2D_landmarks) if (output_2D_landmarks)
{ {