Gaze estimation correction with offset, changing world coordinates to be default ones.
This commit is contained in:
parent
a47cbc5fd8
commit
1cfb765bb9
4 changed files with 13 additions and 13 deletions
|
@ -232,9 +232,9 @@ int main (int argc, char **argv)
|
||||||
// Get the input output file parameters
|
// Get the input output file parameters
|
||||||
|
|
||||||
// Indicates that rotation should be with respect to camera or world coordinates
|
// Indicates that rotation should be with respect to camera or world coordinates
|
||||||
bool use_world_coordinates;
|
bool use_camera_coordinates;
|
||||||
string output_codec; //not used but should
|
string output_codec; //not used but should
|
||||||
LandmarkDetector::get_video_input_output_params(input_files, output_files, tracked_videos_output, use_world_coordinates, output_codec, arguments);
|
LandmarkDetector::get_video_input_output_params(input_files, output_files, tracked_videos_output, use_camera_coordinates, output_codec, arguments);
|
||||||
|
|
||||||
bool video_input = true;
|
bool video_input = true;
|
||||||
bool images_as_video = false;
|
bool images_as_video = false;
|
||||||
|
@ -584,13 +584,13 @@ int main (int argc, char **argv)
|
||||||
|
|
||||||
// Work out the pose of the head from the tracked model
|
// Work out the pose of the head from the tracked model
|
||||||
cv::Vec6d pose_estimate;
|
cv::Vec6d pose_estimate;
|
||||||
if(use_world_coordinates)
|
if(use_camera_coordinates)
|
||||||
{
|
{
|
||||||
pose_estimate = LandmarkDetector::GetCorrectedPoseWorld(face_model, fx, fy, cx, cy);
|
pose_estimate = LandmarkDetector::GetCorrectedPoseCamera(face_model, fx, fy, cx, cy);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
pose_estimate = LandmarkDetector::GetCorrectedPoseCamera(face_model, fx, fy, cx, cy);
|
pose_estimate = LandmarkDetector::GetCorrectedPoseWorld(face_model, fx, fy, cx, cy);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (hog_output_file.is_open())
|
if (hog_output_file.is_open())
|
||||||
|
|
|
@ -88,7 +88,7 @@ cv::Point3f GetPupilPosition(cv::Mat_<double> eyeLdmks3d){
|
||||||
|
|
||||||
void FaceAnalysis::EstimateGaze(const LandmarkDetector::CLNF& clnf_model, cv::Point3f& gaze_absolute, float fx, float fy, float cx, float cy, bool left_eye)
|
void FaceAnalysis::EstimateGaze(const LandmarkDetector::CLNF& clnf_model, cv::Point3f& gaze_absolute, float fx, float fy, float cx, float cy, bool left_eye)
|
||||||
{
|
{
|
||||||
cv::Vec6d headPose = LandmarkDetector::GetPoseCamera(clnf_model, fx, fy, cx, cy);
|
cv::Vec6d headPose = LandmarkDetector::GetCorrectedPoseWorld(clnf_model, fx, fy, cx, cy);
|
||||||
cv::Vec3d eulerAngles(headPose(3), headPose(4), headPose(5));
|
cv::Vec3d eulerAngles(headPose(3), headPose(4), headPose(5));
|
||||||
cv::Matx33d rotMat = LandmarkDetector::Euler2RotationMatrix(eulerAngles);
|
cv::Matx33d rotMat = LandmarkDetector::Euler2RotationMatrix(eulerAngles);
|
||||||
|
|
||||||
|
@ -117,7 +117,7 @@ void FaceAnalysis::EstimateGaze(const LandmarkDetector::CLNF& clnf_model, cv::Po
|
||||||
|
|
||||||
cv::Mat faceLdmks3d = clnf_model.GetShape(fx, fy, cx, cy);
|
cv::Mat faceLdmks3d = clnf_model.GetShape(fx, fy, cx, cy);
|
||||||
faceLdmks3d = faceLdmks3d.t();
|
faceLdmks3d = faceLdmks3d.t();
|
||||||
cv::Mat offset = (cv::Mat_<double>(3, 1) << 0, -3.50, 0);
|
cv::Mat offset = (cv::Mat_<double>(3, 1) << 0, -3.50, 7.0);
|
||||||
int eyeIdx = 1;
|
int eyeIdx = 1;
|
||||||
if (left_eye)
|
if (left_eye)
|
||||||
{
|
{
|
||||||
|
|
|
@ -97,7 +97,7 @@ void create_directories(string output_path)
|
||||||
|
|
||||||
// Extracting the following command line arguments -f, -op, -of, -ov (and possible ordered repetitions)
|
// Extracting the following command line arguments -f, -op, -of, -ov (and possible ordered repetitions)
|
||||||
void get_video_input_output_params(vector<string> &input_video_files, vector<string> &output_files,
|
void get_video_input_output_params(vector<string> &input_video_files, vector<string> &output_files,
|
||||||
vector<string> &output_video_files, bool& world_coordinates_pose, string& output_codec, vector<string> &arguments)
|
vector<string> &output_video_files, bool& camera_coordinates_pose, string& output_codec, vector<string> &arguments)
|
||||||
{
|
{
|
||||||
bool* valid = new bool[arguments.size()];
|
bool* valid = new bool[arguments.size()];
|
||||||
|
|
||||||
|
@ -106,8 +106,8 @@ void get_video_input_output_params(vector<string> &input_video_files, vector<str
|
||||||
valid[i] = true;
|
valid[i] = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// By default use rotation with respect to camera (not world coordinates)
|
// By default use world coordinate system
|
||||||
world_coordinates_pose = false;
|
camera_coordinates_pose = false;
|
||||||
|
|
||||||
// By default use DIVX codec
|
// By default use DIVX codec
|
||||||
output_codec = "DIVX";
|
output_codec = "DIVX";
|
||||||
|
@ -165,9 +165,9 @@ void get_video_input_output_params(vector<string> &input_video_files, vector<str
|
||||||
valid[i+1] = false;
|
valid[i+1] = false;
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
else if (arguments[i].compare("-world_coord") == 0)
|
else if (arguments[i].compare("-camera_coord") == 0)
|
||||||
{
|
{
|
||||||
world_coordinates_pose = true;
|
camera_coordinates_pose = true;
|
||||||
}
|
}
|
||||||
else if (arguments[i].compare("-oc") == 0)
|
else if (arguments[i].compare("-oc") == 0)
|
||||||
{
|
{
|
||||||
|
|
Loading…
Reference in a new issue