Code cleanup with visualization.

This commit is contained in:
Tadas Baltrusaitis 2017-11-14 07:54:02 +00:00
parent c7e13bce9e
commit 6500865a0b
6 changed files with 6 additions and 67 deletions

View file

@ -138,10 +138,6 @@ int main (int argc, char **argv)
// The actual facial landmark detection / tracking // The actual facial landmark detection / tracking
bool detection_success = LandmarkDetector::DetectLandmarksInVideo(grayscale_image, face_model, det_parameters); bool detection_success = LandmarkDetector::DetectLandmarksInVideo(grayscale_image, face_model, det_parameters);
// Visualising the results
// Drawing the facial landmarks on the face and the bounding box around it if tracking is successful and initialised
double detection_certainty = face_model.detection_certainty;
// Gaze tracking, absolute gaze direction // Gaze tracking, absolute gaze direction
cv::Point3f gazeDirection0(0, 0, -1); cv::Point3f gazeDirection0(0, 0, -1);
cv::Point3f gazeDirection1(0, 0, -1); cv::Point3f gazeDirection1(0, 0, -1);

View file

@ -66,8 +66,6 @@ public:
void AddNextFrame(const cv::Mat& frame, const cv::Mat_<float>& detected_landmarks, bool success, double timestamp_seconds, bool online = false); void AddNextFrame(const cv::Mat& frame, const cv::Mat_<float>& detected_landmarks, bool success, double timestamp_seconds, bool online = false);
cv::Mat GetLatestHOGDescriptorVisualisation();
double GetCurrentTimeSeconds(); double GetCurrentTimeSeconds();
// Grab the current predictions about AUs from the face analyser // Grab the current predictions about AUs from the face analyser
@ -132,7 +130,6 @@ private:
// Cache of intermediate images // Cache of intermediate images
cv::Mat aligned_face_for_au; cv::Mat aligned_face_for_au;
cv::Mat aligned_face_for_output; cv::Mat aligned_face_for_output;
cv::Mat hog_descriptor_visualisation;
bool out_grayscale; bool out_grayscale;
// Private members to be used for predictions // Private members to be used for predictions

View file

@ -970,11 +970,6 @@ vector<pair<string, double>> FaceAnalyser::PredictCurrentAUsClass(int view)
return predictions; return predictions;
} }
cv::Mat FaceAnalyser::GetLatestHOGDescriptorVisualisation()
{
return hog_descriptor_visualisation;
}
vector<pair<string, double>> FaceAnalyser::GetCurrentAUsClass() const vector<pair<string, double>> FaceAnalyser::GetCurrentAUsClass() const
{ {
return AU_predictions_class; return AU_predictions_class;

View file

@ -43,7 +43,6 @@ namespace GazeAnalysis
{ {
void EstimateGaze(const LandmarkDetector::CLNF& clnf_model, cv::Point3f& gaze_absolute, float fx, float fy, float cx, float cy, bool left_eye); void EstimateGaze(const LandmarkDetector::CLNF& clnf_model, cv::Point3f& gaze_absolute, float fx, float fy, float cx, float cy, bool left_eye);
void DrawGaze(cv::Mat img, const LandmarkDetector::CLNF& clnf_model, cv::Point3f gazeVecAxisLeft, cv::Point3f gazeVecAxisRight, float fx, float fy, float cx, float cy);
// Getting the gaze angle in radians with respect to the world coordinates (camera plane), when looking ahead straight at camera plane the gaze angle will be (0,0) // Getting the gaze angle in radians with respect to the world coordinates (camera plane), when looking ahead straight at camera plane the gaze angle will be (0,0)
cv::Vec2d GetGazeAngle(cv::Point3f& gaze_vector_1, cv::Point3f& gaze_vector_2); cv::Vec2d GetGazeAngle(cv::Point3f& gaze_vector_1, cv::Point3f& gaze_vector_2);

View file

@ -48,10 +48,6 @@ using namespace std;
using namespace GazeAnalysis; using namespace GazeAnalysis;
// For subpixel accuracy drawing
const int gaze_draw_shiftbits = 4;
const int gaze_draw_multiplier = 1 << 4;
cv::Point3f RaySphereIntersect(cv::Point3f rayOrigin, cv::Point3f rayDir, cv::Point3f sphereOrigin, float sphereRadius){ cv::Point3f RaySphereIntersect(cv::Point3f rayOrigin, cv::Point3f rayDir, cv::Point3f sphereOrigin, float sphereRadius){
float dx = rayDir.x; float dx = rayDir.x;
@ -150,47 +146,3 @@ cv::Vec2d GazeAnalysis::GetGazeAngle(cv::Point3f& gaze_vector_1, cv::Point3f& ga
return cv::Vec2d(x_angle, y_angle); return cv::Vec2d(x_angle, y_angle);
} }
void GazeAnalysis::DrawGaze(cv::Mat img, const LandmarkDetector::CLNF& clnf_model, cv::Point3f gazeVecAxisLeft, cv::Point3f gazeVecAxisRight, float fx, float fy, float cx, float cy)
{
cv::Mat cameraMat = (cv::Mat_<double>(3, 3) << fx, 0, cx, 0, fy, cy, 0, 0, 0);
int part_left = -1;
int part_right = -1;
for (size_t i = 0; i < clnf_model.hierarchical_models.size(); ++i)
{
if (clnf_model.hierarchical_model_names[i].compare("left_eye_28") == 0)
{
part_left = i;
}
if (clnf_model.hierarchical_model_names[i].compare("right_eye_28") == 0)
{
part_right = i;
}
}
cv::Mat eyeLdmks3d_left = clnf_model.hierarchical_models[part_left].GetShape(fx, fy, cx, cy);
cv::Point3f pupil_left = GetPupilPosition(eyeLdmks3d_left);
cv::Mat eyeLdmks3d_right = clnf_model.hierarchical_models[part_right].GetShape(fx, fy, cx, cy);
cv::Point3f pupil_right = GetPupilPosition(eyeLdmks3d_right);
vector<cv::Point3d> points_left;
points_left.push_back(cv::Point3d(pupil_left));
points_left.push_back(cv::Point3d(pupil_left + gazeVecAxisLeft*50.0));
vector<cv::Point3d> points_right;
points_right.push_back(cv::Point3d(pupil_right));
points_right.push_back(cv::Point3d(pupil_right + gazeVecAxisRight*50.0));
cv::Mat_<double> proj_points;
cv::Mat_<double> mesh_0 = (cv::Mat_<double>(2, 3) << points_left[0].x, points_left[0].y, points_left[0].z, points_left[1].x, points_left[1].y, points_left[1].z);
Utilities::Project(proj_points, mesh_0, fx, fy, cx, cy);
cv::line(img, cv::Point(cvRound(proj_points.at<double>(0,0) * (double)gaze_draw_multiplier), cvRound(proj_points.at<double>(0, 1) * (double)gaze_draw_multiplier)),
cv::Point(cvRound(proj_points.at<double>(1, 0) * (double)gaze_draw_multiplier), cvRound(proj_points.at<double>(1, 1) * (double)gaze_draw_multiplier)), cv::Scalar(110, 220, 0), 2, CV_AA, gaze_draw_shiftbits);
cv::Mat_<double> mesh_1 = (cv::Mat_<double>(2, 3) << points_right[0].x, points_right[0].y, points_right[0].z, points_right[1].x, points_right[1].y, points_right[1].z);
Utilities::Project(proj_points, mesh_1, fx, fy, cx, cy);
cv::line(img, cv::Point(cvRound(proj_points.at<double>(0, 0) * (double)gaze_draw_multiplier), cvRound(proj_points.at<double>(0, 1) * (double)gaze_draw_multiplier)),
cv::Point(cvRound(proj_points.at<double>(1, 0) * (double)gaze_draw_multiplier), cvRound(proj_points.at<double>(1, 1) * (double)gaze_draw_multiplier)), cv::Scalar(110, 220, 0), 2, CV_AA, gaze_draw_shiftbits);
}

View file

@ -766,7 +766,7 @@ vector<cv::Point2d> CalculateVisibleLandmarks(const CLNF& clnf_model)
if (clnf_model.detection_success) if (clnf_model.detection_success)
{ {
int idx = clnf_model.patch_experts.GetViewIdx(clnf_model.params_global, 0); int idx = clnf_model.patch_experts.GetViewIdx(clnf_model.params_global, 0);
// Because we only draw visible points, need to find which points patch experts consider visible at a certain orientation // Because we may want to draw visible points, need to find which points patch experts consider visible at a certain orientation
return CalculateVisibleLandmarks(clnf_model.detected_landmarks, clnf_model.patch_experts.visibilities[0][idx]); return CalculateVisibleLandmarks(clnf_model.detected_landmarks, clnf_model.patch_experts.visibilities[0][idx]);
} }
else else
@ -775,12 +775,12 @@ vector<cv::Point2d> CalculateVisibleLandmarks(const CLNF& clnf_model)
} }
} }
// Computing eye landmarks (to be drawn later or in different interfaces) // Computing eye landmarks
vector<cv::Point2d> CalculateVisibleEyeLandmarks(const CLNF& clnf_model) vector<cv::Point2d> CalculateVisibleEyeLandmarks(const CLNF& clnf_model)
{ {
vector<cv::Point2d> to_return; vector<cv::Point2d> to_return;
// If the model has hierarchical updates draw those too
for (size_t i = 0; i < clnf_model.hierarchical_models.size(); ++i) for (size_t i = 0; i < clnf_model.hierarchical_models.size(); ++i)
{ {
@ -803,7 +803,7 @@ vector<cv::Point3d> Calculate3DEyeLandmarks(const CLNF& clnf_model, double fx, d
{ {
vector<cv::Point3d> to_return; vector<cv::Point3d> to_return;
// If the model has hierarchical updates draw those too
for (size_t i = 0; i < clnf_model.hierarchical_models.size(); ++i) for (size_t i = 0; i < clnf_model.hierarchical_models.size(); ++i)
{ {
@ -824,12 +824,12 @@ vector<cv::Point3d> Calculate3DEyeLandmarks(const CLNF& clnf_model, double fx, d
} }
return to_return; return to_return;
} }
// Computing eye landmarks (to be drawn later or in different interfaces) // Computing eye landmarks
vector<cv::Point2d> CalculateAllEyeLandmarks(const CLNF& clnf_model) vector<cv::Point2d> CalculateAllEyeLandmarks(const CLNF& clnf_model)
{ {
vector<cv::Point2d> to_return; vector<cv::Point2d> to_return;
// If the model has hierarchical updates draw those too
for (size_t i = 0; i < clnf_model.hierarchical_models.size(); ++i) for (size_t i = 0; i < clnf_model.hierarchical_models.size(); ++i)
{ {