Merge branch 'develop' into feature/Windows-GUI
# Conflicts: # exe/FaceLandmarkVidMulti/FaceLandmarkVidMulti.cpp # lib/local/GazeAnalyser/src/GazeEstimation.cpp # lib/local/LandmarkDetector/include/LandmarkDetectorFunc.h # lib/local/LandmarkDetector/src/LandmarkDetectorFunc.cpp
This commit is contained in:
commit
038ce5823d
7 changed files with 28 additions and 44 deletions
|
@ -224,10 +224,8 @@ int main (int argc, char **argv)
|
||||||
|
|
||||||
// Get the input output file parameters
|
// Get the input output file parameters
|
||||||
|
|
||||||
// Indicates that rotation should be with respect to camera or world coordinates
|
|
||||||
bool use_camera_coordinates = false;
|
|
||||||
string output_codec; //not used but should
|
string output_codec; //not used but should
|
||||||
LandmarkDetector::get_video_input_output_params(input_files, output_files, tracked_videos_output, use_camera_coordinates, output_codec, arguments);
|
LandmarkDetector::get_video_input_output_params(input_files, output_files, tracked_videos_output, output_codec, arguments);
|
||||||
|
|
||||||
bool video_input = true;
|
bool video_input = true;
|
||||||
bool images_as_video = false;
|
bool images_as_video = false;
|
||||||
|
@ -520,15 +518,7 @@ int main (int argc, char **argv)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Work out the pose of the head from the tracked model
|
// Work out the pose of the head from the tracked model
|
||||||
cv::Vec6d pose_estimate;
|
cv::Vec6d pose_estimate = LandmarkDetector::GetPose(face_model, fx, fy, cx, cy);
|
||||||
if(use_camera_coordinates)
|
|
||||||
{
|
|
||||||
pose_estimate = LandmarkDetector::GetCorrectedPoseCamera(face_model, fx, fy, cx, cy);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
pose_estimate = LandmarkDetector::GetCorrectedPoseWorld(face_model, fx, fy, cx, cy);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (hog_output_file.is_open())
|
if (hog_output_file.is_open())
|
||||||
{
|
{
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
#TBB library
|
#TBB library
|
||||||
include_directories(${TBB_ROOT_DIR}/include)
|
include_directories(${TBB_ROOT_DIR}/include)
|
||||||
|
include_directories(${BLAS_ROOT_DIR})
|
||||||
|
|
||||||
include_directories(${BOOST_INCLUDE_DIR})
|
include_directories(${BOOST_INCLUDE_DIR})
|
||||||
|
|
||||||
|
|
|
@ -92,7 +92,6 @@ cv::Point3f GazeAnalysis::GetPupilPosition(cv::Mat_<double> eyeLdmks3d){
|
||||||
void GazeAnalysis::EstimateGaze(const LandmarkDetector::CLNF& clnf_model, cv::Point3f& gaze_absolute, float fx, float fy, float cx, float cy, bool left_eye)
|
void GazeAnalysis::EstimateGaze(const LandmarkDetector::CLNF& clnf_model, cv::Point3f& gaze_absolute, float fx, float fy, float cx, float cy, bool left_eye)
|
||||||
{
|
{
|
||||||
cv::Vec6d headPose = LandmarkDetector::GetPose(clnf_model, fx, fy, cx, cy);
|
cv::Vec6d headPose = LandmarkDetector::GetPose(clnf_model, fx, fy, cx, cy);
|
||||||
|
|
||||||
cv::Vec3d eulerAngles(headPose(3), headPose(4), headPose(5));
|
cv::Vec3d eulerAngles(headPose(3), headPose(4), headPose(5));
|
||||||
cv::Matx33d rotMat = LandmarkDetector::Euler2RotationMatrix(eulerAngles);
|
cv::Matx33d rotMat = LandmarkDetector::Euler2RotationMatrix(eulerAngles);
|
||||||
|
|
||||||
|
|
|
@ -70,10 +70,11 @@ namespace LandmarkDetector
|
||||||
|
|
||||||
// Return the current estimate of the head pose in world coordinates with camera at origin (0,0,0)
|
// Return the current estimate of the head pose in world coordinates with camera at origin (0,0,0)
|
||||||
// The format returned is [Tx, Ty, Tz, Eul_x, Eul_y, Eul_z]
|
// The format returned is [Tx, Ty, Tz, Eul_x, Eul_y, Eul_z]
|
||||||
cv::Vec6d GetPose(const CLNF& clnf_model, double fx, double fy, double cx, double cy);
|
cv::Vec6d GetPose(const CLNF& clnf_model, float fx, float fy, float cx, float cy);
|
||||||
|
|
||||||
// Return the current estimate of the head pose in world coordinates with camera at origin (0,0,0), but with rotation representing if the head is looking at the camera
|
// Return the current estimate of the head pose in world coordinates with camera at origin (0,0,0), but with rotation representing if the head is looking at the camera
|
||||||
// The format returned is [Tx, Ty, Tz, Eul_x, Eul_y, Eul_z]
|
// The format returned is [Tx, Ty, Tz, Eul_x, Eul_y, Eul_z]
|
||||||
cv::Vec6d GetPoseWRTCamera(const CLNF& clnf_model, double fx, double fy, double cx, double cy);
|
cv::Vec6d GetPoseWRTCamera(const CLNF& clnf_model, float fx, float fy, float cx, float cy);
|
||||||
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -53,7 +53,7 @@ namespace LandmarkDetector
|
||||||
// Helper functions for parsing the inputs
|
// Helper functions for parsing the inputs
|
||||||
//=============================================================================================
|
//=============================================================================================
|
||||||
void get_video_input_output_params(vector<string> &input_video_file, vector<string> &output_files,
|
void get_video_input_output_params(vector<string> &input_video_file, vector<string> &output_files,
|
||||||
vector<string> &output_video_files, bool& camera_coordinates_pose, string &output_codec, vector<string> &arguments);
|
vector<string> &output_video_files, string &output_codec, vector<string> &arguments);
|
||||||
|
|
||||||
void get_camera_params(int &device, float &fx, float &fy, float &cx, float &cy, vector<string> &arguments);
|
void get_camera_params(int &device, float &fx, float &fy, float &cx, float &cy, vector<string> &arguments);
|
||||||
|
|
||||||
|
|
|
@ -50,15 +50,15 @@ using namespace LandmarkDetector;
|
||||||
// which is only correct close to the centre of the image
|
// which is only correct close to the centre of the image
|
||||||
// This method returns a corrected pose estimate with respect to world coordinates with camera at origin (0,0,0)
|
// This method returns a corrected pose estimate with respect to world coordinates with camera at origin (0,0,0)
|
||||||
// The format returned is [Tx, Ty, Tz, Eul_x, Eul_y, Eul_z]
|
// The format returned is [Tx, Ty, Tz, Eul_x, Eul_y, Eul_z]
|
||||||
cv::Vec6d LandmarkDetector::GetPose(const CLNF& clnf_model, double fx, double fy, double cx, double cy)
|
cv::Vec6d LandmarkDetector::GetPose(const CLNF& clnf_model, float fx, float fy, float cx, float cy)
|
||||||
{
|
{
|
||||||
if(!clnf_model.detected_landmarks.empty() && clnf_model.params_global[0] != 0)
|
if (!clnf_model.detected_landmarks.empty() && clnf_model.params_global[0] != 0)
|
||||||
{
|
{
|
||||||
// This is used as an initial estimate for the iterative PnP algorithm
|
// This is used as an initial estimate for the iterative PnP algorithm
|
||||||
double Z = fx / clnf_model.params_global[0];
|
double Z = fx / clnf_model.params_global[0];
|
||||||
|
|
||||||
double X = ((clnf_model.params_global[4] - cx) * (1.0/fx)) * Z;
|
double X = ((clnf_model.params_global[4] - cx) * (1.0 / fx)) * Z;
|
||||||
double Y = ((clnf_model.params_global[5] - cy) * (1.0/fy)) * Z;
|
double Y = ((clnf_model.params_global[5] - cy) * (1.0 / fy)) * Z;
|
||||||
|
|
||||||
// Correction for orientation
|
// Correction for orientation
|
||||||
|
|
||||||
|
@ -89,22 +89,22 @@ cv::Vec6d LandmarkDetector::GetPose(const CLNF& clnf_model, double fx, double fy
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
return cv::Vec6d(0,0,0,0,0,0);
|
return cv::Vec6d(0, 0, 0, 0, 0, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Getting a head pose estimate from the currently detected landmarks, with appropriate correction due to perspective projection
|
// Getting a head pose estimate from the currently detected landmarks, with appropriate correction due to perspective projection
|
||||||
// This method returns a corrected pose estimate with respect to a point camera (NOTE not the world coordinates), which is useful to find out if the person is looking at a camera
|
// This method returns a corrected pose estimate with respect to a point camera (NOTE not the world coordinates), which is useful to find out if the person is looking at a camera
|
||||||
// The format returned is [Tx, Ty, Tz, Eul_x, Eul_y, Eul_z]
|
// The format returned is [Tx, Ty, Tz, Eul_x, Eul_y, Eul_z]
|
||||||
cv::Vec6d LandmarkDetector::GetPoseWRTCamera(const CLNF& clnf_model, double fx, double fy, double cx, double cy)
|
cv::Vec6d LandmarkDetector::GetPoseWRTCamera(const CLNF& clnf_model, float fx, float fy, float cx, float cy)
|
||||||
{
|
{
|
||||||
if(!clnf_model.detected_landmarks.empty() && clnf_model.params_global[0] != 0)
|
if (!clnf_model.detected_landmarks.empty() && clnf_model.params_global[0] != 0)
|
||||||
{
|
{
|
||||||
|
|
||||||
double Z = fx / clnf_model.params_global[0];
|
double Z = fx / clnf_model.params_global[0];
|
||||||
|
|
||||||
double X = ((clnf_model.params_global[4] - cx) * (1.0/fx)) * Z;
|
double X = ((clnf_model.params_global[4] - cx) * (1.0 / fx)) * Z;
|
||||||
double Y = ((clnf_model.params_global[5] - cy) * (1.0/fy)) * Z;
|
double Y = ((clnf_model.params_global[5] - cy) * (1.0 / fy)) * Z;
|
||||||
|
|
||||||
// Correction for orientation
|
// Correction for orientation
|
||||||
|
|
||||||
|
@ -147,7 +147,7 @@ cv::Vec6d LandmarkDetector::GetPoseWRTCamera(const CLNF& clnf_model, double fx,
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
return cv::Vec6d(0,0,0,0,0,0);
|
return cv::Vec6d(0, 0, 0, 0, 0, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -97,7 +97,7 @@ void create_directories(string output_path)
|
||||||
|
|
||||||
// Extracting the following command line arguments -f, -op, -of, -ov (and possible ordered repetitions)
|
// Extracting the following command line arguments -f, -op, -of, -ov (and possible ordered repetitions)
|
||||||
void get_video_input_output_params(vector<string> &input_video_files, vector<string> &output_files,
|
void get_video_input_output_params(vector<string> &input_video_files, vector<string> &output_files,
|
||||||
vector<string> &output_video_files, bool& camera_coordinates_pose, string& output_codec, vector<string> &arguments)
|
vector<string> &output_video_files, string& output_codec, vector<string> &arguments)
|
||||||
{
|
{
|
||||||
bool* valid = new bool[arguments.size()];
|
bool* valid = new bool[arguments.size()];
|
||||||
|
|
||||||
|
@ -106,9 +106,6 @@ void get_video_input_output_params(vector<string> &input_video_files, vector<str
|
||||||
valid[i] = true;
|
valid[i] = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// By default use world coordinate system
|
|
||||||
camera_coordinates_pose = false;
|
|
||||||
|
|
||||||
// By default use DIVX codec
|
// By default use DIVX codec
|
||||||
output_codec = "DIVX";
|
output_codec = "DIVX";
|
||||||
|
|
||||||
|
@ -165,10 +162,6 @@ void get_video_input_output_params(vector<string> &input_video_files, vector<str
|
||||||
valid[i+1] = false;
|
valid[i+1] = false;
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
else if (arguments[i].compare("-camera_coord") == 0)
|
|
||||||
{
|
|
||||||
camera_coordinates_pose = true;
|
|
||||||
}
|
|
||||||
else if (arguments[i].compare("-oc") == 0)
|
else if (arguments[i].compare("-oc") == 0)
|
||||||
{
|
{
|
||||||
if(arguments[i + 1].length() == 4)
|
if(arguments[i + 1].length() == 4)
|
||||||
|
|
Loading…
Reference in a new issue