Dealing better with non-tracked frames.

This commit is contained in:
Tadas Baltrusaitis 2017-11-25 10:12:34 +00:00
parent c5749cc3ba
commit ea0f658190
2 changed files with 32 additions and 26 deletions

View File

@ -162,7 +162,7 @@ int main (int argc, char **argv)
bool detection_success = LandmarkDetector::DetectLandmarksInVideo(grayscale_image, face_model, det_parameters);
// Gaze tracking, absolute gaze direction
cv::Point3f gazeDirection0(0, 0, -1); cv::Point3f gazeDirection1(0, 0, -1); cv::Vec2d gazeAngle(0, 0);
cv::Point3f gazeDirection0(0, 0, 0); cv::Point3f gazeDirection1(0, 0, 0); cv::Vec2d gazeAngle(0, 0);
if (detection_success && face_model.eye_model)
{

View File

@ -1081,7 +1081,13 @@ cv::Mat_<double> CLNF::GetShape(double fx, double fy, double cx, double cy) cons
{
int n = this->detected_landmarks.rows/2;
cv::Mat_<double> shape3d(n*3, 1);
cv::Mat_<double> outShape(n, 3, 0.0);
// If the tracking started (otherwise no point reporting 3D shape)
if(this->tracking_initialised)
{
cv::Mat_<double> shape3d(n * 3, 1);
this->pdm.CalcShape3D(shape3d, this->params_local);
@ -1097,7 +1103,6 @@ cv::Mat_<double> CLNF::GetShape(double fx, double fy, double cx, double cy) cons
// from the weak perspective model can determine the average depth of the object
double Zavg = fx / params_global[0];
cv::Mat_<double> outShape(n,3,0.0);
// this is described in the paper in section 3.4 (equation 10) (of the CLM-Z paper)
for(int i = 0; i < n; i++)
@ -1112,6 +1117,7 @@ cv::Mat_<double> CLNF::GetShape(double fx, double fy, double cx, double cy) cons
outShape.at<double>(i,2) = (double)Z;
}
}
// The format is 3 rows - n cols
return outShape.t();