diff --git a/exe/FaceLandmarkImg/FaceLandmarkImg.cpp b/exe/FaceLandmarkImg/FaceLandmarkImg.cpp index 4771e62..51c24f9 100644 --- a/exe/FaceLandmarkImg/FaceLandmarkImg.cpp +++ b/exe/FaceLandmarkImg/FaceLandmarkImg.cpp @@ -191,6 +191,11 @@ int main (int argc, char **argv) Utilities::RecorderOpenFaceParameters recording_params(arguments, false); Utilities::RecorderOpenFace open_face_rec(image_reader.name, recording_params, arguments); + visualizer.SetImage(captured_image, image_reader.fx, image_reader.fy, image_reader.cx, image_reader.cy); + + if (recording_params.outputGaze() && !face_model.eye_model) + cout << "WARNING: no eye model defined, but outputting gaze" << endl; + // Making sure the image is in uchar grayscale cv::Mat_ grayscale_image = image_reader.GetGrayFrame(); @@ -230,7 +235,7 @@ int main (int argc, char **argv) cv::Point3f gaze_direction1(0, 0, -1); cv::Vec2d gaze_angle(0, 0); - if (success && det_parameters.track_gaze) + if (success && face_model.eye_model) { GazeAnalysis::EstimateGaze(face_model, gaze_direction0, image_reader.fx, image_reader.fy, image_reader.cx, image_reader.cy, true); GazeAnalysis::EstimateGaze(face_model, gaze_direction1, image_reader.fx, image_reader.fy, image_reader.cx, image_reader.cy, false); @@ -249,13 +254,11 @@ int main (int argc, char **argv) } // Displaying the tracking visualizations - visualizer.SetImage(captured_image, image_reader.fx, image_reader.fy, image_reader.cx, image_reader.cy); visualizer.SetObservationFaceAlign(sim_warped_img); visualizer.SetObservationHOG(hog_descriptor, num_hog_rows, num_hog_cols); visualizer.SetObservationLandmarks(face_model.detected_landmarks, face_model.detection_certainty, face_model.detection_success); visualizer.SetObservationPose(pose_estimate, face_model.detection_certainty); visualizer.SetObservationGaze(gaze_direction0, gaze_direction1, LandmarkDetector::CalculateAllEyeLandmarks(face_model), LandmarkDetector::Calculate3DEyeLandmarks(face_model, image_reader.fx, image_reader.fy, image_reader.cx, image_reader.cy), face_model.detection_certainty); - visualizer.ShowObservation(); // Setting up the recorder output open_face_rec.SetObservationHOG(face_model.detection_success, hog_descriptor, num_hog_rows, num_hog_cols, 31); // The number of channels in HOG is fixed at the moment, as using FHOG @@ -271,6 +274,7 @@ int main (int argc, char **argv) // Grabbing the next frame in the sequence captured_image = image_reader.GetNextImage(); } + visualizer.ShowObservation(); } diff --git a/exe/FaceLandmarkVid/FaceLandmarkVid.cpp b/exe/FaceLandmarkVid/FaceLandmarkVid.cpp index b32c767..fa59089 100644 --- a/exe/FaceLandmarkVid/FaceLandmarkVid.cpp +++ b/exe/FaceLandmarkVid/FaceLandmarkVid.cpp @@ -92,7 +92,6 @@ int main (int argc, char **argv) vector arguments = get_arguments(argc, argv); LandmarkDetector::FaceModelParameters det_parameters(arguments); - det_parameters.track_gaze = true; // The modules that are being used for tracking LandmarkDetector::CLNF face_model(det_parameters.model_location); @@ -142,7 +141,8 @@ int main (int argc, char **argv) cv::Point3f gazeDirection0(0, 0, -1); cv::Point3f gazeDirection1(0, 0, -1); - if (det_parameters.track_gaze && detection_success && face_model.eye_model) + // If tracking succeeded and we have an eye model, estimate gaze + if (detection_success && face_model.eye_model) { GazeAnalysis::EstimateGaze(face_model, gazeDirection0, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy, true); GazeAnalysis::EstimateGaze(face_model, gazeDirection1, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy, false); diff --git a/exe/FaceLandmarkVidMulti/FaceLandmarkVidMulti.vcxproj b/exe/FaceLandmarkVidMulti/FaceLandmarkVidMulti.vcxproj index 66bfee3..292c69f 100644 --- a/exe/FaceLandmarkVidMulti/FaceLandmarkVidMulti.vcxproj +++ b/exe/FaceLandmarkVidMulti/FaceLandmarkVidMulti.vcxproj @@ -166,6 +166,9 @@ {bdc1d107-de17-4705-8e7b-cdde8bfb2bf8} + + {8e741ea2-9386-4cf2-815e-6f9b08991eac} + diff --git a/exe/FeatureExtraction/FeatureExtraction.cpp b/exe/FeatureExtraction/FeatureExtraction.cpp index 6ccef9e..d90ce95 100644 --- a/exe/FeatureExtraction/FeatureExtraction.cpp +++ b/exe/FeatureExtraction/FeatureExtraction.cpp @@ -107,7 +107,6 @@ int main (int argc, char **argv) // Load face landmark detector LandmarkDetector::FaceModelParameters det_parameters(arguments); // Always track gaze in feature extraction - det_parameters.track_gaze = true; LandmarkDetector::CLNF face_model(det_parameters.model_location); // Load facial feature extractor and AU analyser @@ -137,6 +136,9 @@ int main (int argc, char **argv) Utilities::RecorderOpenFaceParameters recording_params(arguments, true, sequence_reader.fps); Utilities::RecorderOpenFace open_face_rec(sequence_reader.name, recording_params, arguments); + if (recording_params.outputGaze() && !face_model.eye_model) + cout << "WARNING: no eye model defined, but outputting gaze" << endl; + captured_image = sequence_reader.GetNextFrame(); // For reporting progress @@ -155,7 +157,7 @@ int main (int argc, char **argv) // Gaze tracking, absolute gaze direction cv::Point3f gazeDirection0(0, 0, -1); cv::Point3f gazeDirection1(0, 0, -1); cv::Vec2d gazeAngle(0, 0); - if (det_parameters.track_gaze && detection_success && face_model.eye_model) + if (detection_success && face_model.eye_model) { GazeAnalysis::EstimateGaze(face_model, gazeDirection0, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy, true); GazeAnalysis::EstimateGaze(face_model, gazeDirection1, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy, false); diff --git a/lib/local/LandmarkDetector/include/LandmarkDetectorParameters.h b/lib/local/LandmarkDetector/include/LandmarkDetectorParameters.h index 77ed168..f8840d0 100644 --- a/lib/local/LandmarkDetector/include/LandmarkDetectorParameters.h +++ b/lib/local/LandmarkDetector/include/LandmarkDetectorParameters.h @@ -102,9 +102,6 @@ struct FaceModelParameters // Should the parameters be refined for different scales bool refine_parameters; - // Using the brand new and experimental gaze tracker - bool track_gaze; - FaceModelParameters(); FaceModelParameters(vector &arguments); diff --git a/lib/local/LandmarkDetector/src/LandmarkDetectorModel.cpp b/lib/local/LandmarkDetector/src/LandmarkDetectorModel.cpp index 4c2769a..e5f3c6b 100644 --- a/lib/local/LandmarkDetector/src/LandmarkDetectorModel.cpp +++ b/lib/local/LandmarkDetector/src/LandmarkDetectorModel.cpp @@ -576,43 +576,37 @@ bool CLNF::DetectLandmarks(const cv::Mat_ &image, FaceModelParameters& pa // Do the hierarchical models in parallel tbb::parallel_for(0, (int)hierarchical_models.size(), [&](int part_model){ { - // Only do the synthetic eye models if we're doing gaze - if (!((hierarchical_model_names[part_model].compare("right_eye_28") == 0 || - hierarchical_model_names[part_model].compare("left_eye_28") == 0) - && !params.track_gaze)) + + int n_part_points = hierarchical_models[part_model].pdm.NumberOfPoints(); + + vector> mappings = this->hierarchical_mapping[part_model]; + + cv::Mat_ part_model_locs(n_part_points * 2, 1, 0.0); + + // Extract the corresponding landmarks + for (size_t mapping_ind = 0; mapping_ind < mappings.size(); ++mapping_ind) { + part_model_locs.at(mappings[mapping_ind].second) = detected_landmarks.at(mappings[mapping_ind].first); + part_model_locs.at(mappings[mapping_ind].second + n_part_points) = detected_landmarks.at(mappings[mapping_ind].first + this->pdm.NumberOfPoints()); + } - int n_part_points = hierarchical_models[part_model].pdm.NumberOfPoints(); + // Fit the part based model PDM + hierarchical_models[part_model].pdm.CalcParams(hierarchical_models[part_model].params_global, hierarchical_models[part_model].params_local, part_model_locs); - vector> mappings = this->hierarchical_mapping[part_model]; + // Only do this if we don't need to upsample + if (params_global[0] > 0.9 * hierarchical_models[part_model].patch_experts.patch_scaling[0]) + { + parts_used = true; - cv::Mat_ part_model_locs(n_part_points * 2, 1, 0.0); + this->hierarchical_params[part_model].window_sizes_current = this->hierarchical_params[part_model].window_sizes_init; - // Extract the corresponding landmarks - for (size_t mapping_ind = 0; mapping_ind < mappings.size(); ++mapping_ind) - { - part_model_locs.at(mappings[mapping_ind].second) = detected_landmarks.at(mappings[mapping_ind].first); - part_model_locs.at(mappings[mapping_ind].second + n_part_points) = detected_landmarks.at(mappings[mapping_ind].first + this->pdm.NumberOfPoints()); - } + // Do the actual landmark detection + hierarchical_models[part_model].DetectLandmarks(image, hierarchical_params[part_model]); - // Fit the part based model PDM - hierarchical_models[part_model].pdm.CalcParams(hierarchical_models[part_model].params_global, hierarchical_models[part_model].params_local, part_model_locs); - - // Only do this if we don't need to upsample - if (params_global[0] > 0.9 * hierarchical_models[part_model].patch_experts.patch_scaling[0]) - { - parts_used = true; - - this->hierarchical_params[part_model].window_sizes_current = this->hierarchical_params[part_model].window_sizes_init; - - // Do the actual landmark detection - hierarchical_models[part_model].DetectLandmarks(image, hierarchical_params[part_model]); - - } - else - { - hierarchical_models[part_model].pdm.CalcShape2D(hierarchical_models[part_model].detected_landmarks, hierarchical_models[part_model].params_local, hierarchical_models[part_model].params_global); - } + } + else + { + hierarchical_models[part_model].pdm.CalcShape2D(hierarchical_models[part_model].detected_landmarks, hierarchical_models[part_model].params_local, hierarchical_models[part_model].params_global); } } }); @@ -625,16 +619,11 @@ bool CLNF::DetectLandmarks(const cv::Mat_ &image, FaceModelParameters& pa { vector> mappings = this->hierarchical_mapping[part_model]; - if (!((hierarchical_model_names[part_model].compare("right_eye_28") == 0 || - hierarchical_model_names[part_model].compare("left_eye_28") == 0) - && !params.track_gaze)) + // Reincorporate the models into main tracker + for (size_t mapping_ind = 0; mapping_ind < mappings.size(); ++mapping_ind) { - // Reincorporate the models into main tracker - for (size_t mapping_ind = 0; mapping_ind < mappings.size(); ++mapping_ind) - { - detected_landmarks.at(mappings[mapping_ind].first) = hierarchical_models[part_model].detected_landmarks.at(mappings[mapping_ind].second); - detected_landmarks.at(mappings[mapping_ind].first + pdm.NumberOfPoints()) = hierarchical_models[part_model].detected_landmarks.at(mappings[mapping_ind].second + hierarchical_models[part_model].pdm.NumberOfPoints()); - } + detected_landmarks.at(mappings[mapping_ind].first) = hierarchical_models[part_model].detected_landmarks.at(mappings[mapping_ind].second); + detected_landmarks.at(mappings[mapping_ind].first + pdm.NumberOfPoints()) = hierarchical_models[part_model].detected_landmarks.at(mappings[mapping_ind].second + hierarchical_models[part_model].pdm.NumberOfPoints()); } } diff --git a/lib/local/LandmarkDetector/src/LandmarkDetectorParameters.cpp b/lib/local/LandmarkDetector/src/LandmarkDetectorParameters.cpp index 08910fe..798a2c4 100644 --- a/lib/local/LandmarkDetector/src/LandmarkDetectorParameters.cpp +++ b/lib/local/LandmarkDetector/src/LandmarkDetectorParameters.cpp @@ -148,12 +148,6 @@ FaceModelParameters::FaceModelParameters(vector &arguments) valid[i + 1] = false; i++; } - else if (arguments[i].compare("-gaze") == 0) - { - track_gaze = true; - - valid[i] = false; - } else if (arguments[i].compare("-q") == 0) { @@ -267,7 +261,5 @@ void FaceModelParameters::init() // By default use HOG SVM curr_face_detector = HOG_SVM_DETECTOR; - // The gaze tracking has to be explicitly initialised - track_gaze = false; } diff --git a/lib/local/Utilities/src/RecorderOpenFace.cpp b/lib/local/Utilities/src/RecorderOpenFace.cpp index 2feb8ca..24b856a 100644 --- a/lib/local/Utilities/src/RecorderOpenFace.cpp +++ b/lib/local/Utilities/src/RecorderOpenFace.cpp @@ -65,7 +65,8 @@ void CreateDirectory(std::string output_path) if (!success) { - std::cout << "Failed to create a directory..." << p.string() << std::endl; + std::cout << "ERROR: failed to create output directory:" << p.string() << ", do you have permission to create directory" << std::endl; + exit(1); } } } @@ -85,7 +86,6 @@ RecorderOpenFace::RecorderOpenFace(const std::string in_filename, RecorderOpenFa valid[i] = true; } - string record_root; for (size_t i = 0; i < arguments.size(); ++i) { if (arguments[i].compare("-outroot") == 0)