A lot of changes:

- New AU recognition models trained on extra datasets - Bosphorus, UNBC, FERA2011
- Cleaner and clearer separation of static and dynamic AU models
- AU training code cleaned up and instructions added
- bug fixes with median feature computation
- AU prediction correction (smoothing and shifting) with post processing
This commit is contained in:
Tadas Baltrusaitis 2016-07-22 09:35:50 -04:00
parent 146dcd5e5b
commit 2128589309
384 changed files with 1273 additions and 606 deletions

4
.gitignore vendored
View file

@ -28,3 +28,7 @@ exe/FeatureExtraction/out_bp4d/
x64/Debug/
matlab_runners/Action Unit Experiments/out_unbc/
matlab_runners/Action Unit Experiments/out_bosph/
matlab_runners/Action Unit Experiments/out_DISFA/
matlab_runners/Action Unit Experiments/out_fera/
matlab_runners/Demos/output_features_seq/
matlab_runners/Demos/output_features_vid/

View file

@ -324,12 +324,9 @@ int main (int argc, char **argv)
// Used for image masking
cv::Mat_<int> triangulation;//TODO rem?
string tri_loc;
if(boost::filesystem::exists(path("model/tris_68_full.txt")))
{
std::ifstream triangulation_file("model/tris_68_full.txt");
LandmarkDetector::ReadMat(triangulation_file, triangulation);
tri_loc = "model/tris_68_full.txt";
}
else
@ -337,12 +334,7 @@ int main (int argc, char **argv)
path loc = path(arguments[0]).parent_path() / "model/tris_68_full.txt";
tri_loc = loc.string();
if(exists(loc))
{
std::ifstream triangulation_file(loc.string());
LandmarkDetector::ReadMat(triangulation_file, triangulation);
}
else
if(!exists(loc))
{
cout << "Can't find triangulation files, exiting" << endl;
return 0;

View file

@ -16,3 +16,20 @@ svm_combined/AU_25_dynamic.dat AU25
svm_combined/AU_26_dynamic.dat AU26
svm_combined/AU_28_static.dat AU28
svm_combined/AU_45_dynamic.dat AU45
svr_combined/AU_1_dynamic_intensity_comb.dat AU01
svr_combined/AU_2_dynamic_intensity_comb.dat AU02
svr_combined/AU_4_static_intensity_comb.dat AU04
svr_combined/AU_5_dynamic_intensity.dat AU05
svr_combined/AU_6_static_intensity_comb.dat AU06
svr_combined/AU_7_static_intensity_comb.dat AU07
svr_combined/AU_9_dynamic_intensity.dat AU09
svr_combined/AU_10_static_intensity_comb.dat AU10
svr_combined/AU_12_static_intensity_comb.dat AU12
svr_combined/AU_14_static_intensity.dat AU14
svr_combined/AU_15_dynamic_intensity_comb.dat AU15
svr_combined/AU_17_dynamic_intensity_comb.dat AU17
svr_combined/AU_20_dynamic_intensity.dat AU20
svr_combined/AU_23_dynamic_intensity_comb.dat AU23
svr_combined/AU_25_dynamic_intensity_comb.dat AU25
svr_combined/AU_26_dynamic_intensity_comb.dat AU26
svr_combined/AU_45_dynamic_intensity_comb.dat AU45

View file

@ -16,7 +16,7 @@ svm_combined/AU_25_static.dat AU25
svm_combined/AU_26_static.dat AU26
svm_combined/AU_28_static.dat AU28
svm_combined/AU_45_static.dat AU45
svr_combined/AU_1_static_intensity_comb.dat AU01
svr_combined/AU_1_static_intensity.dat AU01
svr_combined/AU_2_static_intensity_comb.dat AU02
svr_combined/AU_4_static_intensity_comb.dat AU04
svr_combined/AU_5_static_intensity.dat AU05

View file

@ -124,7 +124,7 @@ public:
// Identify if models are static or dynamic (useful for correction and shifting)
std::vector<bool> GetDynamicAUClass() const; // Presence
std::vector<bool> GetDynamicAUReg() const; // Intensity
std::vector<std::pair<string, bool>> FaceAnalyser::GetDynamicAUReg() const; // Intensity
void ExtractAllPredictionsOfflineReg(vector<std::pair<std::string, vector<double>>>& au_predictions, vector<double>& confidences, vector<bool>& successes, vector<double>& timestamps, bool dynamic);

View file

@ -89,6 +89,11 @@ public:
return AU_names;
}
std::vector<double> GetCutoffs() const
{
return cutoffs;
}
private:
// The names of Action Units this model is responsible for
@ -101,6 +106,9 @@ private:
cv::Mat_<double> support_vectors;
cv::Mat_<double> biases;
// For AU callibration (see the OpenFace paper)
std::vector<double> cutoffs;
};
//===========================================================================
}

View file

@ -92,12 +92,11 @@ FaceAnalyser::FaceAnalyser(vector<cv::Vec3d> orientation_bins, double scale, int
align_height = height;
// Initialise the histograms that will represent bins from 0 - 1 (as HoG values are only stored as those)
// Set the number of bins for the histograms
num_bins_hog = 600;
num_bins_hog = 1000;
max_val_hog = 1;
min_val_hog = 0;
min_val_hog = -0.005;
// The geometry histogram ranges from -3 to 3
// The geometry histogram ranges from -60 to 60
num_bins_geom = 10000;
max_val_geom = 60;
min_val_geom = -60;
@ -186,19 +185,19 @@ std::vector<bool> FaceAnalyser::GetDynamicAUClass() const
return au_dynamic_class;
}
std::vector<bool> FaceAnalyser::GetDynamicAUReg() const
std::vector<std::pair<string, bool>> FaceAnalyser::GetDynamicAUReg() const
{
std::vector<bool> au_dynamic_reg;
std::vector<std::pair<string, bool>> au_dynamic_reg;
std::vector<std::string> au_reg_names_stat = AU_SVR_static_appearance_lin_regressors.GetAUNames();
std::vector<std::string> au_reg_names_dyn = AU_SVR_dynamic_appearance_lin_regressors.GetAUNames();
for (size_t i = 0; i < au_reg_names_stat.size(); ++i)
{
au_dynamic_reg.push_back(false);
au_dynamic_reg.push_back(std::pair<string, bool>(au_reg_names_stat[i], false));
}
for (size_t i = 0; i < au_reg_names_dyn.size(); ++i)
{
au_dynamic_reg.push_back(true);
au_dynamic_reg.push_back(std::pair<string, bool>(au_reg_names_dyn[i], true));
}
return au_dynamic_reg;
@ -396,11 +395,23 @@ void FaceAnalyser::AddNextFrame(const cv::Mat& frame, const LandmarkDetector::CL
bool update_median = true;
// TODO test if this would be useful or not
//if(!this->AU_predictions.empty())
//if(!this->AU_predictions_reg.empty())
//{
// for(size_t i = 0; i < this->AU_predictions.size(); ++i)
// vector<pair<string, bool>> dyns = this->GetDynamicAUReg();
// for(size_t i = 0; i < this->AU_predictions_reg.size(); ++i)
// {
// if(this->AU_predictions[i].second > 1)
// bool stat = false;
// for (size_t n = 0; n < dyns.size(); ++n)
// {
// if (dyns[n].first.compare(AU_predictions_reg[i].first) == 0)
// {
// stat = !dyns[i].second;
// }
// }
// // If static predictor above 1.5 assume it's not a neutral face
// if(this->AU_predictions_reg[i].second > 1.5 && stat)
// {
// update_median = false;
// break;
@ -417,7 +428,9 @@ void FaceAnalyser::AddNextFrame(const cv::Mat& frame, const LandmarkDetector::CL
if(frames_tracking % 2 == 1)
{
UpdateRunningMedian(this->hog_desc_hist[orientation_to_use], this->hog_hist_sum[orientation_to_use], this->hog_desc_median, hog_descriptor, update_median, this->num_bins_hog, this->min_val_hog, this->max_val_hog);
this->hog_desc_median.setTo(0, this->hog_desc_median < 0);
}
// Geom descriptor and its median
geom_descriptor_frame = clnf_model.params_local.t();
@ -626,6 +639,7 @@ void FaceAnalyser::PostprocessPredictions()
{
// Find the appropriate AU (if not found add it)
AU_predictions_reg_all_hist[AU_predictions_reg[au].first][all_ind] = AU_predictions_reg[au].second;
}
auto AU_predictions_class = PredictCurrentAUsClass(views[success_ind]);
@ -660,6 +674,9 @@ void FaceAnalyser::ExtractAllPredictionsOfflineReg(vector<std::pair<std::string,
confidences = this->confidences;
successes = this->valid_preds;
vector<string> dyn_au_names = AU_SVR_dynamic_appearance_lin_regressors.GetAUNames();
// Allow these AUs to be person calirated based on expected number of neutral frames (learned from the data)
for(auto au_iter = AU_predictions_reg_all_hist.begin(); au_iter != AU_predictions_reg_all_hist.end(); ++au_iter)
{
vector<double> au_good;
@ -685,8 +702,30 @@ void FaceAnalyser::ExtractAllPredictionsOfflineReg(vector<std::pair<std::string,
else
{
std::sort(au_good.begin(), au_good.end());
offsets.push_back(au_good.at((int)au_good.size() / 4));
// If it is a dynamic AU regressor we can also do some prediction shifting to make it more accurate
// The shifting proportion is learned and is callen cutoff
// Find the current id of the AU and the corresponding cutoff
int au_id = -1;
for (int a = 0; a < dyn_au_names.size(); ++a)
{
if (au_name.compare(dyn_au_names[a]) == 0)
{
au_id = a;
}
}
if (au_id != -1 && AU_SVR_dynamic_appearance_lin_regressors.GetCutoffs()[au_id] != -1)
{
double cutoff = AU_SVR_dynamic_appearance_lin_regressors.GetCutoffs()[au_id];
offsets.push_back(au_good.at((int)au_good.size() * cutoff));
}
else
{
offsets.push_back(0);
}
}
aus_valid.push_back(au_good);
}
@ -728,7 +767,7 @@ void FaceAnalyser::ExtractAllPredictionsOfflineReg(vector<std::pair<std::string,
for (size_t i = (window_size - 1) / 2; i < au_iter->second.size() - (window_size - 1) / 2; ++i)
{
double sum = 0;
for (int w = -(window_size - 1) / 2; w < (window_size - 1) / 2; ++w)
for (int w = -(window_size - 1) / 2; w <= (window_size - 1) / 2; ++w)
{
sum += au_vals_tmp[i + w];
}
@ -739,7 +778,6 @@ void FaceAnalyser::ExtractAllPredictionsOfflineReg(vector<std::pair<std::string,
}
}
void FaceAnalyser::ExtractAllPredictionsOfflineClass(vector<std::pair<std::string, vector<double>>>& au_predictions, vector<double>& confidences, vector<bool>& successes, vector<double>& timestamps, bool dynamic)
@ -763,7 +801,7 @@ void FaceAnalyser::ExtractAllPredictionsOfflineClass(vector<std::pair<std::strin
for (size_t i = (window_size - 1)/2; i < au_vals.size() - (window_size - 1) / 2; ++i)
{
double sum = 0;
for (int w = -(window_size - 1) / 2; w < (window_size - 1) / 2; ++w)
for (int w = -(window_size - 1) / 2; w <= (window_size - 1) / 2; ++w)
{
sum += au_vals_tmp[i + w];
}
@ -856,7 +894,6 @@ void FaceAnalyser::UpdateRunningMedian(cv::Mat_<unsigned int>& histogram, int& h
converted_descriptor.setTo(cv::Scalar(num_bins-1), converted_descriptor > num_bins - 1);
converted_descriptor.setTo(cv::Scalar(0), converted_descriptor < 0);
// Only count the median till a certain number of frame seen?
for(int i = 0; i < histogram.rows; ++i)
{
int index = (int)converted_descriptor.at<double>(i);
@ -883,9 +920,9 @@ void FaceAnalyser::UpdateRunningMedian(cv::Mat_<unsigned int>& histogram, int& h
for(int j = 0; j < histogram.cols; ++j)
{
cummulative_sum += histogram.at<unsigned int>(i, j);
if(cummulative_sum > cutoff_point)
if(cummulative_sum >= cutoff_point)
{
median.at<double>(i) = min_val + j * (length/num_bins) + (0.5*(length)/num_bins);
median.at<double>(i) = min_val + ((double)j) * (length/((double)num_bins)) + (0.5*(length)/ ((double)num_bins));
break;
}
}
@ -953,7 +990,7 @@ vector<pair<string, double>> FaceAnalyser::PredictCurrentAUs(int view)
vector<string> svr_lin_dyn_aus;
vector<double> svr_lin_dyn_preds;
AU_SVR_dynamic_appearance_lin_regressors.Predict(svr_lin_dyn_preds, svr_lin_dyn_aus, hog_desc_frame, geom_descriptor_frame, this->hog_desc_median, this->geom_descriptor_frame);
AU_SVR_dynamic_appearance_lin_regressors.Predict(svr_lin_dyn_preds, svr_lin_dyn_aus, hog_desc_frame, geom_descriptor_frame, this->hog_desc_median, this->geom_descriptor_median);
for(size_t i = 0; i < svr_lin_dyn_preds.size(); ++i)
{
@ -996,7 +1033,6 @@ vector<pair<string, double>> FaceAnalyser::CorrectOnlineAUs(std::vector<std::pai
for(size_t i = 0; i < predictions.size(); ++i)
{
// First establish presence (assume it is maximum as we have not seen max)
// TODO this could be more robust by removing some outliers, or by doing it only for certain AUs?
if(predictions[i].second > 1)
{
double scaling_curr = 5.0 / predictions[i].second;

View file

@ -249,7 +249,7 @@ namespace FaceAnalysis
for(size_t i = 0; i < aligned_face_channels.size(); ++i)
{
aligned_face_channels[i] = aligned_face_channels[i].mul(paw.pixel_mask);
cv::multiply(aligned_face_channels[i], paw.pixel_mask, aligned_face_channels[i], 1.0, CV_8U);
}
if(aligned_face.channels() == 3)

View file

@ -65,6 +65,12 @@ using namespace FaceAnalysis;
void SVR_dynamic_lin_regressors::Read(std::ifstream& stream, const std::vector<std::string>& au_names)
{
// For person specific calibration in a video
double cutoff;
stream.read((char*)&cutoff, 8);
cutoffs.push_back(cutoff);
// The feature normalization using the mean
if(this->means.empty())
{
LandmarkDetector::ReadMatBin(stream, this->means);
@ -115,7 +121,6 @@ void SVR_dynamic_lin_regressors::Predict(std::vector<double>& predictions, std::
{
if(AU_names.size() > 0)
{
cv::Mat_<double> preds;
if(fhog_descriptor.cols == this->means.cols)
{

View file

@ -632,12 +632,6 @@ bool CLNF::DetectLandmarks(const cv::Mat_<uchar> &image, const cv::Mat_<float> &
// Do the actual landmark detection
hierarchical_models[part_model].DetectLandmarks(image, depth, hierarchical_params[part_model]);
// Reincorporate the models into main tracker
for (size_t mapping_ind = 0; mapping_ind < mappings.size(); ++mapping_ind)
{
detected_landmarks.at<double>(mappings[mapping_ind].first) = hierarchical_models[part_model].detected_landmarks.at<double>(mappings[mapping_ind].second);
detected_landmarks.at<double>(mappings[mapping_ind].first + pdm.NumberOfPoints()) = hierarchical_models[part_model].detected_landmarks.at<double>(mappings[mapping_ind].second + hierarchical_models[part_model].pdm.NumberOfPoints());
}
}
else
{
@ -650,9 +644,28 @@ bool CLNF::DetectLandmarks(const cv::Mat_<uchar> &image, const cv::Mat_<float> &
// Recompute main model based on the fit part models
if(parts_used)
{
for (int part_model = 0; part_model < hierarchical_models.size(); ++part_model)
{
vector<pair<int, int>> mappings = this->hierarchical_mapping[part_model];
if (!((hierarchical_model_names[part_model].compare("right_eye_28") == 0 ||
hierarchical_model_names[part_model].compare("left_eye_28") == 0)
&& !params.track_gaze))
{
// Reincorporate the models into main tracker
for (size_t mapping_ind = 0; mapping_ind < mappings.size(); ++mapping_ind)
{
detected_landmarks.at<double>(mappings[mapping_ind].first) = hierarchical_models[part_model].detected_landmarks.at<double>(mappings[mapping_ind].second);
detected_landmarks.at<double>(mappings[mapping_ind].first + pdm.NumberOfPoints()) = hierarchical_models[part_model].detected_landmarks.at<double>(mappings[mapping_ind].second + hierarchical_models[part_model].pdm.NumberOfPoints());
}
}
}
pdm.CalcParams(params_global, params_local, detected_landmarks);
pdm.CalcShape2D(detected_landmarks, params_local, params_global);
}
}
// Check detection correctness

View file

@ -384,7 +384,7 @@ void get_image_input_output_params(vector<string> &input_image_files, vector<str
path image_loc(input_image_files[i]);
path fname = image_loc.filename();
fname = fname.replace_extension("jpg");
fname = fname.replace_extension("bmp");
output_image_files.push_back(out_img_dir + "/" + fname.string());
}

View file

@ -1,11 +0,0 @@
AU1 class, Precision - 0.470, Recall - 0.527, F1 - 0.497
AU2 class, Precision - 0.371, Recall - 0.376, F1 - 0.373
AU4 class, Precision - 0.422, Recall - 0.570, F1 - 0.485
AU6 class, Precision - 0.845, Recall - 0.698, F1 - 0.765
AU7 class, Precision - 0.719, Recall - 0.766, F1 - 0.742
AU10 class, Precision - 0.811, Recall - 0.801, F1 - 0.806
AU12 class, Precision - 0.902, Recall - 0.780, F1 - 0.837
AU14 class, Precision - 0.513, Recall - 0.874, F1 - 0.647
AU15 class, Precision - 0.406, Recall - 0.431, F1 - 0.418
AU17 class, Precision - 0.638, Recall - 0.615, F1 - 0.626
AU23 class, Precision - 0.357, Recall - 0.507, F1 - 0.419

Some files were not shown because too many files have changed in this diff Show more