Simplified validator model.

This commit is contained in:
Tadas Baltrusaitis 2017-07-31 15:55:29 -04:00
parent 7513cf7964
commit 8eb086545c
6 changed files with 31 additions and 96 deletions

View file

@ -124,7 +124,6 @@ public:
// Keeping track of how many frames the tracker has failed in so far when tracking in videos
// This is useful for knowing when to initialise and reinitialise tracking
int failures_in_a_row;
int success_in_a_row;
// A template of a face that last succeeded with tracking (useful for large motions in video)
cv::Mat_<uchar> face_template;

View file

@ -887,8 +887,7 @@ double DetectionValidator::CheckCNN_tbb(const cv::Mat_<double>& warped_img, int
outputs[0] = outputs[0] + output;
}
if(cnn_convolutional_layers[view_id][cnn_layer][0].size() > 20)
{
// TBB pass for the remaining kernels, empirically helps with layers with more kernels
tbb::parallel_for(1, (int)cnn_convolutional_layers[view_id][cnn_layer][in].size(), [&](int k) {
{
@ -923,42 +922,7 @@ double DetectionValidator::CheckCNN_tbb(const cv::Mat_<double>& warped_img, int
}
}
});
}
else
{
for (size_t k = 1; k < cnn_convolutional_layers[view_id][cnn_layer][in].size(); ++k)
{
cv::Mat_<float> kernel = cnn_convolutional_layers[view_id][cnn_layer][in][k];
// The convolution (with precomputation)
cv::Mat_<float> output;
if (cnn_convolutional_layers_dft[view_id][cnn_layer][in][k].second.empty()) // This will only be needed during the first pass
{
std::map<int, cv::Mat_<double> > precomputed_dft;
LandmarkDetector::matchTemplate_m(input_image, input_image_dft, integral_image, integral_image_sq, kernel, precomputed_dft, output, CV_TM_CCORR);
cnn_convolutional_layers_dft[view_id][cnn_layer][in][k].first = precomputed_dft.begin()->first;
cnn_convolutional_layers_dft[view_id][cnn_layer][in][k].second = precomputed_dft.begin()->second;
}
else
{
std::map<int, cv::Mat_<double> > precomputed_dft;
precomputed_dft[cnn_convolutional_layers_dft[view_id][cnn_layer][in][k].first] = cnn_convolutional_layers_dft[view_id][cnn_layer][in][k].second;
LandmarkDetector::matchTemplate_m(input_image, input_image_dft, integral_image, integral_image_sq, kernel, precomputed_dft, output, CV_TM_CCORR);
}
// Combining the maps
if (in == 0)
{
outputs[k] = output;
}
else
{
outputs[k] = outputs[k] + output;
}
}
}
}
for (size_t k = 0; k < cnn_convolutional_layers[view_id][cnn_layer][0].size(); ++k)

View file

@ -288,36 +288,17 @@ bool LandmarkDetector::DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_i
CorrectGlobalParametersVideo(grayscale_image, clnf_model, params);
}
// If we are performing face validation, do it every 3 frames due to performance
bool reset_to_true = false;
double old_certainty = 0;
if (params.validate_detections == true && clnf_model.success_in_a_row % 3 != 0)
{
params.validate_detections = false;
reset_to_true = true;
old_certainty = clnf_model.detection_certainty;
}
bool track_success = clnf_model.DetectLandmarks(grayscale_image, depth_image, params);
if (reset_to_true)
{
params.validate_detections = true;
clnf_model.detection_certainty = old_certainty;
}
if(!track_success)
{
// Make a record that tracking failed
clnf_model.failures_in_a_row++;
clnf_model.success_in_a_row = 0;
}
else
{
// indicate that tracking is a success
clnf_model.failures_in_a_row = -1;
clnf_model.success_in_a_row++;
UpdateTemplate(grayscale_image, clnf_model);
}
}
@ -398,7 +379,6 @@ bool LandmarkDetector::DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_i
else
{
clnf_model.failures_in_a_row = -1;
clnf_model.success_in_a_row++;
UpdateTemplate(grayscale_image, clnf_model);
return true;
}
@ -409,14 +389,12 @@ bool LandmarkDetector::DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_i
if(!clnf_model.tracking_initialised)
{
clnf_model.failures_in_a_row++;
clnf_model.success_in_a_row = 0;
}
// un-initialise the tracking
if( clnf_model.failures_in_a_row > 100)
{
clnf_model.tracking_initialised = false;
clnf_model.success_in_a_row = 0;
}
return clnf_model.detection_success;

View file

@ -76,7 +76,6 @@ CLNF::CLNF(const CLNF& other): pdm(other.pdm), params_local(other.params_local.c
this->detection_certainty = other.detection_certainty;
this->model_likelihood = other.model_likelihood;
this->failures_in_a_row = other.failures_in_a_row;
this->success_in_a_row = other.success_in_a_row;
// Load the CascadeClassifier (as it does not have a proper copy constructor)
if(!face_detector_location.empty())
@ -122,7 +121,6 @@ CLNF & CLNF::operator= (const CLNF& other)
this->detection_certainty = other.detection_certainty;
this->model_likelihood = other.model_likelihood;
this->failures_in_a_row = other.failures_in_a_row;
this->success_in_a_row = other.success_in_a_row;
this->eye_model = other.eye_model;
@ -166,7 +164,6 @@ CLNF::CLNF(const CLNF&& other)
this->detection_certainty = other.detection_certainty;
this->model_likelihood = other.model_likelihood;
this->failures_in_a_row = other.failures_in_a_row;
this->success_in_a_row = other.success_in_a_row;
pdm = other.pdm;
params_local = other.params_local;
@ -202,7 +199,6 @@ CLNF & CLNF::operator= (const CLNF&& other)
this->detection_certainty = other.detection_certainty;
this->model_likelihood = other.model_likelihood;
this->failures_in_a_row = other.failures_in_a_row;
this->success_in_a_row = other.success_in_a_row;
pdm = other.pdm;
params_local = other.params_local;
@ -531,7 +527,6 @@ void CLNF::Read(string main_location)
params_global = cv::Vec6d(1, 0, 0, 0, 0, 0);
failures_in_a_row = -1;
success_in_a_row = 0;
}
@ -552,7 +547,6 @@ void CLNF::Reset()
params_global = cv::Vec6d(1, 0, 0, 0, 0, 0);
failures_in_a_row = -1;
success_in_a_row = 0;
face_template = cv::Mat_<uchar>();
}

View file

@ -1,4 +1,4 @@
Dataset and model, pitch, yaw, roll, mean, median
biwi error: 7.955, 5.583, 4.402, 5.980, 2.624
bu error: 2.762, 4.103, 2.568, 3.145, 2.118
ict error: 3.620, 3.608, 3.626, 3.618, 2.028
biwi error: 6.870, 5.338, 4.482, 5.563, 2.632
bu error: 2.785, 4.117, 2.571, 3.158, 2.119
ict error: 3.481, 3.641, 3.581, 3.568, 2.036