Making sure all modules do eye tracking by default. More work on FaceLandmarkImg simplification.

This commit is contained in:
Tadas Baltrusaitis 2017-11-16 09:00:47 +00:00
parent c93004f03d
commit 734c4fd34d
8 changed files with 47 additions and 60 deletions

View File

@ -191,6 +191,11 @@ int main (int argc, char **argv)
Utilities::RecorderOpenFaceParameters recording_params(arguments, false); Utilities::RecorderOpenFaceParameters recording_params(arguments, false);
Utilities::RecorderOpenFace open_face_rec(image_reader.name, recording_params, arguments); Utilities::RecorderOpenFace open_face_rec(image_reader.name, recording_params, arguments);
visualizer.SetImage(captured_image, image_reader.fx, image_reader.fy, image_reader.cx, image_reader.cy);
if (recording_params.outputGaze() && !face_model.eye_model)
cout << "WARNING: no eye model defined, but outputting gaze" << endl;
// Making sure the image is in uchar grayscale // Making sure the image is in uchar grayscale
cv::Mat_<uchar> grayscale_image = image_reader.GetGrayFrame(); cv::Mat_<uchar> grayscale_image = image_reader.GetGrayFrame();
@ -230,7 +235,7 @@ int main (int argc, char **argv)
cv::Point3f gaze_direction1(0, 0, -1); cv::Point3f gaze_direction1(0, 0, -1);
cv::Vec2d gaze_angle(0, 0); cv::Vec2d gaze_angle(0, 0);
if (success && det_parameters.track_gaze) if (success && face_model.eye_model)
{ {
GazeAnalysis::EstimateGaze(face_model, gaze_direction0, image_reader.fx, image_reader.fy, image_reader.cx, image_reader.cy, true); GazeAnalysis::EstimateGaze(face_model, gaze_direction0, image_reader.fx, image_reader.fy, image_reader.cx, image_reader.cy, true);
GazeAnalysis::EstimateGaze(face_model, gaze_direction1, image_reader.fx, image_reader.fy, image_reader.cx, image_reader.cy, false); GazeAnalysis::EstimateGaze(face_model, gaze_direction1, image_reader.fx, image_reader.fy, image_reader.cx, image_reader.cy, false);
@ -249,13 +254,11 @@ int main (int argc, char **argv)
} }
// Displaying the tracking visualizations // Displaying the tracking visualizations
visualizer.SetImage(captured_image, image_reader.fx, image_reader.fy, image_reader.cx, image_reader.cy);
visualizer.SetObservationFaceAlign(sim_warped_img); visualizer.SetObservationFaceAlign(sim_warped_img);
visualizer.SetObservationHOG(hog_descriptor, num_hog_rows, num_hog_cols); visualizer.SetObservationHOG(hog_descriptor, num_hog_rows, num_hog_cols);
visualizer.SetObservationLandmarks(face_model.detected_landmarks, face_model.detection_certainty, face_model.detection_success); visualizer.SetObservationLandmarks(face_model.detected_landmarks, face_model.detection_certainty, face_model.detection_success);
visualizer.SetObservationPose(pose_estimate, face_model.detection_certainty); visualizer.SetObservationPose(pose_estimate, face_model.detection_certainty);
visualizer.SetObservationGaze(gaze_direction0, gaze_direction1, LandmarkDetector::CalculateAllEyeLandmarks(face_model), LandmarkDetector::Calculate3DEyeLandmarks(face_model, image_reader.fx, image_reader.fy, image_reader.cx, image_reader.cy), face_model.detection_certainty); visualizer.SetObservationGaze(gaze_direction0, gaze_direction1, LandmarkDetector::CalculateAllEyeLandmarks(face_model), LandmarkDetector::Calculate3DEyeLandmarks(face_model, image_reader.fx, image_reader.fy, image_reader.cx, image_reader.cy), face_model.detection_certainty);
visualizer.ShowObservation();
// Setting up the recorder output // Setting up the recorder output
open_face_rec.SetObservationHOG(face_model.detection_success, hog_descriptor, num_hog_rows, num_hog_cols, 31); // The number of channels in HOG is fixed at the moment, as using FHOG open_face_rec.SetObservationHOG(face_model.detection_success, hog_descriptor, num_hog_rows, num_hog_cols, 31); // The number of channels in HOG is fixed at the moment, as using FHOG
@ -271,6 +274,7 @@ int main (int argc, char **argv)
// Grabbing the next frame in the sequence // Grabbing the next frame in the sequence
captured_image = image_reader.GetNextImage(); captured_image = image_reader.GetNextImage();
} }
visualizer.ShowObservation();
} }

View File

@ -92,7 +92,6 @@ int main (int argc, char **argv)
vector<string> arguments = get_arguments(argc, argv); vector<string> arguments = get_arguments(argc, argv);
LandmarkDetector::FaceModelParameters det_parameters(arguments); LandmarkDetector::FaceModelParameters det_parameters(arguments);
det_parameters.track_gaze = true;
// The modules that are being used for tracking // The modules that are being used for tracking
LandmarkDetector::CLNF face_model(det_parameters.model_location); LandmarkDetector::CLNF face_model(det_parameters.model_location);
@ -142,7 +141,8 @@ int main (int argc, char **argv)
cv::Point3f gazeDirection0(0, 0, -1); cv::Point3f gazeDirection0(0, 0, -1);
cv::Point3f gazeDirection1(0, 0, -1); cv::Point3f gazeDirection1(0, 0, -1);
if (det_parameters.track_gaze && detection_success && face_model.eye_model) // If tracking succeeded and we have an eye model, estimate gaze
if (detection_success && face_model.eye_model)
{ {
GazeAnalysis::EstimateGaze(face_model, gazeDirection0, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy, true); GazeAnalysis::EstimateGaze(face_model, gazeDirection0, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy, true);
GazeAnalysis::EstimateGaze(face_model, gazeDirection1, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy, false); GazeAnalysis::EstimateGaze(face_model, gazeDirection1, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy, false);

View File

@ -166,6 +166,9 @@
<ProjectReference Include="..\..\lib\local\LandmarkDetector\LandmarkDetector.vcxproj"> <ProjectReference Include="..\..\lib\local\LandmarkDetector\LandmarkDetector.vcxproj">
<Project>{bdc1d107-de17-4705-8e7b-cdde8bfb2bf8}</Project> <Project>{bdc1d107-de17-4705-8e7b-cdde8bfb2bf8}</Project>
</ProjectReference> </ProjectReference>
<ProjectReference Include="..\..\lib\local\Utilities\Utilities.vcxproj">
<Project>{8e741ea2-9386-4cf2-815e-6f9b08991eac}</Project>
</ProjectReference>
</ItemGroup> </ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets"> <ImportGroup Label="ExtensionTargets">

View File

@ -107,7 +107,6 @@ int main (int argc, char **argv)
// Load face landmark detector // Load face landmark detector
LandmarkDetector::FaceModelParameters det_parameters(arguments); LandmarkDetector::FaceModelParameters det_parameters(arguments);
// Always track gaze in feature extraction // Always track gaze in feature extraction
det_parameters.track_gaze = true;
LandmarkDetector::CLNF face_model(det_parameters.model_location); LandmarkDetector::CLNF face_model(det_parameters.model_location);
// Load facial feature extractor and AU analyser // Load facial feature extractor and AU analyser
@ -137,6 +136,9 @@ int main (int argc, char **argv)
Utilities::RecorderOpenFaceParameters recording_params(arguments, true, sequence_reader.fps); Utilities::RecorderOpenFaceParameters recording_params(arguments, true, sequence_reader.fps);
Utilities::RecorderOpenFace open_face_rec(sequence_reader.name, recording_params, arguments); Utilities::RecorderOpenFace open_face_rec(sequence_reader.name, recording_params, arguments);
if (recording_params.outputGaze() && !face_model.eye_model)
cout << "WARNING: no eye model defined, but outputting gaze" << endl;
captured_image = sequence_reader.GetNextFrame(); captured_image = sequence_reader.GetNextFrame();
// For reporting progress // For reporting progress
@ -155,7 +157,7 @@ int main (int argc, char **argv)
// Gaze tracking, absolute gaze direction // Gaze tracking, absolute gaze direction
cv::Point3f gazeDirection0(0, 0, -1); cv::Point3f gazeDirection1(0, 0, -1); cv::Vec2d gazeAngle(0, 0); cv::Point3f gazeDirection0(0, 0, -1); cv::Point3f gazeDirection1(0, 0, -1); cv::Vec2d gazeAngle(0, 0);
if (det_parameters.track_gaze && detection_success && face_model.eye_model) if (detection_success && face_model.eye_model)
{ {
GazeAnalysis::EstimateGaze(face_model, gazeDirection0, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy, true); GazeAnalysis::EstimateGaze(face_model, gazeDirection0, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy, true);
GazeAnalysis::EstimateGaze(face_model, gazeDirection1, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy, false); GazeAnalysis::EstimateGaze(face_model, gazeDirection1, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy, false);

View File

@ -102,9 +102,6 @@ struct FaceModelParameters
// Should the parameters be refined for different scales // Should the parameters be refined for different scales
bool refine_parameters; bool refine_parameters;
// Using the brand new and experimental gaze tracker
bool track_gaze;
FaceModelParameters(); FaceModelParameters();
FaceModelParameters(vector<string> &arguments); FaceModelParameters(vector<string> &arguments);

View File

@ -576,43 +576,37 @@ bool CLNF::DetectLandmarks(const cv::Mat_<uchar> &image, FaceModelParameters& pa
// Do the hierarchical models in parallel // Do the hierarchical models in parallel
tbb::parallel_for(0, (int)hierarchical_models.size(), [&](int part_model){ tbb::parallel_for(0, (int)hierarchical_models.size(), [&](int part_model){
{ {
// Only do the synthetic eye models if we're doing gaze
if (!((hierarchical_model_names[part_model].compare("right_eye_28") == 0 || int n_part_points = hierarchical_models[part_model].pdm.NumberOfPoints();
hierarchical_model_names[part_model].compare("left_eye_28") == 0)
&& !params.track_gaze)) vector<pair<int, int>> mappings = this->hierarchical_mapping[part_model];
cv::Mat_<double> part_model_locs(n_part_points * 2, 1, 0.0);
// Extract the corresponding landmarks
for (size_t mapping_ind = 0; mapping_ind < mappings.size(); ++mapping_ind)
{ {
part_model_locs.at<double>(mappings[mapping_ind].second) = detected_landmarks.at<double>(mappings[mapping_ind].first);
part_model_locs.at<double>(mappings[mapping_ind].second + n_part_points) = detected_landmarks.at<double>(mappings[mapping_ind].first + this->pdm.NumberOfPoints());
}
int n_part_points = hierarchical_models[part_model].pdm.NumberOfPoints(); // Fit the part based model PDM
hierarchical_models[part_model].pdm.CalcParams(hierarchical_models[part_model].params_global, hierarchical_models[part_model].params_local, part_model_locs);
vector<pair<int, int>> mappings = this->hierarchical_mapping[part_model]; // Only do this if we don't need to upsample
if (params_global[0] > 0.9 * hierarchical_models[part_model].patch_experts.patch_scaling[0])
{
parts_used = true;
cv::Mat_<double> part_model_locs(n_part_points * 2, 1, 0.0); this->hierarchical_params[part_model].window_sizes_current = this->hierarchical_params[part_model].window_sizes_init;
// Extract the corresponding landmarks // Do the actual landmark detection
for (size_t mapping_ind = 0; mapping_ind < mappings.size(); ++mapping_ind) hierarchical_models[part_model].DetectLandmarks(image, hierarchical_params[part_model]);
{
part_model_locs.at<double>(mappings[mapping_ind].second) = detected_landmarks.at<double>(mappings[mapping_ind].first);
part_model_locs.at<double>(mappings[mapping_ind].second + n_part_points) = detected_landmarks.at<double>(mappings[mapping_ind].first + this->pdm.NumberOfPoints());
}
// Fit the part based model PDM }
hierarchical_models[part_model].pdm.CalcParams(hierarchical_models[part_model].params_global, hierarchical_models[part_model].params_local, part_model_locs); else
{
// Only do this if we don't need to upsample hierarchical_models[part_model].pdm.CalcShape2D(hierarchical_models[part_model].detected_landmarks, hierarchical_models[part_model].params_local, hierarchical_models[part_model].params_global);
if (params_global[0] > 0.9 * hierarchical_models[part_model].patch_experts.patch_scaling[0])
{
parts_used = true;
this->hierarchical_params[part_model].window_sizes_current = this->hierarchical_params[part_model].window_sizes_init;
// Do the actual landmark detection
hierarchical_models[part_model].DetectLandmarks(image, hierarchical_params[part_model]);
}
else
{
hierarchical_models[part_model].pdm.CalcShape2D(hierarchical_models[part_model].detected_landmarks, hierarchical_models[part_model].params_local, hierarchical_models[part_model].params_global);
}
} }
} }
}); });
@ -625,16 +619,11 @@ bool CLNF::DetectLandmarks(const cv::Mat_<uchar> &image, FaceModelParameters& pa
{ {
vector<pair<int, int>> mappings = this->hierarchical_mapping[part_model]; vector<pair<int, int>> mappings = this->hierarchical_mapping[part_model];
if (!((hierarchical_model_names[part_model].compare("right_eye_28") == 0 || // Reincorporate the models into main tracker
hierarchical_model_names[part_model].compare("left_eye_28") == 0) for (size_t mapping_ind = 0; mapping_ind < mappings.size(); ++mapping_ind)
&& !params.track_gaze))
{ {
// Reincorporate the models into main tracker detected_landmarks.at<double>(mappings[mapping_ind].first) = hierarchical_models[part_model].detected_landmarks.at<double>(mappings[mapping_ind].second);
for (size_t mapping_ind = 0; mapping_ind < mappings.size(); ++mapping_ind) detected_landmarks.at<double>(mappings[mapping_ind].first + pdm.NumberOfPoints()) = hierarchical_models[part_model].detected_landmarks.at<double>(mappings[mapping_ind].second + hierarchical_models[part_model].pdm.NumberOfPoints());
{
detected_landmarks.at<double>(mappings[mapping_ind].first) = hierarchical_models[part_model].detected_landmarks.at<double>(mappings[mapping_ind].second);
detected_landmarks.at<double>(mappings[mapping_ind].first + pdm.NumberOfPoints()) = hierarchical_models[part_model].detected_landmarks.at<double>(mappings[mapping_ind].second + hierarchical_models[part_model].pdm.NumberOfPoints());
}
} }
} }

View File

@ -148,12 +148,6 @@ FaceModelParameters::FaceModelParameters(vector<string> &arguments)
valid[i + 1] = false; valid[i + 1] = false;
i++; i++;
} }
else if (arguments[i].compare("-gaze") == 0)
{
track_gaze = true;
valid[i] = false;
}
else if (arguments[i].compare("-q") == 0) else if (arguments[i].compare("-q") == 0)
{ {
@ -267,7 +261,5 @@ void FaceModelParameters::init()
// By default use HOG SVM // By default use HOG SVM
curr_face_detector = HOG_SVM_DETECTOR; curr_face_detector = HOG_SVM_DETECTOR;
// The gaze tracking has to be explicitly initialised
track_gaze = false;
} }

View File

@ -65,7 +65,8 @@ void CreateDirectory(std::string output_path)
if (!success) if (!success)
{ {
std::cout << "Failed to create a directory..." << p.string() << std::endl; std::cout << "ERROR: failed to create output directory:" << p.string() << ", do you have permission to create directory" << std::endl;
exit(1);
} }
} }
} }
@ -85,7 +86,6 @@ RecorderOpenFace::RecorderOpenFace(const std::string in_filename, RecorderOpenFa
valid[i] = true; valid[i] = true;
} }
string record_root;
for (size_t i = 0; i < arguments.size(); ++i) for (size_t i = 0; i < arguments.size(); ++i)
{ {
if (arguments[i].compare("-outroot") == 0) if (arguments[i].compare("-outroot") == 0)