- Fixing the issue with gaze not tracking properly in video and landmark modes.
- Fixing the simscale/simalign bug
This commit is contained in:
parent
a3e66319b5
commit
52c50b4ff3
10 changed files with 226 additions and 256 deletions
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -44,3 +44,6 @@ exe/Recording/Debug/
|
||||||
lib/3rdParty/dlib/Debug/
|
lib/3rdParty/dlib/Debug/
|
||||||
lib/local/FaceAnalyser/Debug/
|
lib/local/FaceAnalyser/Debug/
|
||||||
lib/local/LandmarkDetector/Debug/
|
lib/local/LandmarkDetector/Debug/
|
||||||
|
matlab_runners/Head Pose Experiments/experiments/biwi_out/
|
||||||
|
matlab_runners/Head Pose Experiments/experiments/bu_out/
|
||||||
|
matlab_runners/Head Pose Experiments/experiments/ict_out/
|
||||||
|
|
|
@ -75,4 +75,5 @@ script:
|
||||||
- ../build/bin/FaceLandmarkImg -inroot ../videos -f Obama.jpg -outroot data -of obama.txt -op obama.3d -oi obama.bmp -multi_view 1 -wild -q
|
- ../build/bin/FaceLandmarkImg -inroot ../videos -f Obama.jpg -outroot data -of obama.txt -op obama.3d -oi obama.bmp -multi_view 1 -wild -q
|
||||||
- ../build/bin/FaceLandmarkVidMulti -inroot ../videos -f multi_face.avi -outroot output -ov multi_face.avi -q
|
- ../build/bin/FaceLandmarkVidMulti -inroot ../videos -f multi_face.avi -outroot output -ov multi_face.avi -q
|
||||||
- ../build/bin/FeatureExtraction -f "../videos/1815_01_008_tony_blair.avi" -outroot output_features -ov blair.avi -of "1815_01_008_tony_blair.txt" -simalign aligned -ov feat_test.avi -hogalign hog_test.dat -q
|
- ../build/bin/FeatureExtraction -f "../videos/1815_01_008_tony_blair.avi" -outroot output_features -ov blair.avi -of "1815_01_008_tony_blair.txt" -simalign aligned -ov feat_test.avi -hogalign hog_test.dat -q
|
||||||
|
- ../build/bin/FeatureExtraction -f "../videos/1815_01_008_tony_blair.avi" -outroot output_features -simsize 200 -simscale 0.5 -ov blair.avi -of "1815_01_008_tony_blair.txt" -simalign aligned -ov feat_test.avi -hogalign hog_test.dat -q
|
||||||
- ../build/bin/FaceLandmarkVid -inroot ../videos -f 1815_01_008_tony_blair.avi -f 0188_03_021_al_pacino.avi -f 0217_03_006_alanis_morissette.avi -outroot output_data -ov 1.avi -ov 2.avi -ov 3.avi -q
|
- ../build/bin/FaceLandmarkVid -inroot ../videos -f 1815_01_008_tony_blair.avi -f 0188_03_021_al_pacino.avi -f 0217_03_006_alanis_morissette.avi -outroot output_data -ov 1.avi -ov 2.avi -ov 3.avi -q
|
||||||
|
|
|
@ -25,4 +25,5 @@ test_script:
|
||||||
- cmd: if exist "../videos" (FaceLandmarkImg.exe -inroot ../videos -f obama.jpg -outroot out_data -of obama.pts -op obama.3d -oi obama.bmp -q) else (FaceLandmarkImg.exe -inroot ../../videos -f obama.jpg -outroot out_data -of obama.pts -op obama.3d -oi obama.bmp -q)
|
- cmd: if exist "../videos" (FaceLandmarkImg.exe -inroot ../videos -f obama.jpg -outroot out_data -of obama.pts -op obama.3d -oi obama.bmp -q) else (FaceLandmarkImg.exe -inroot ../../videos -f obama.jpg -outroot out_data -of obama.pts -op obama.3d -oi obama.bmp -q)
|
||||||
- cmd: if exist "../videos" (FaceLandmarkVidMulti.exe -inroot ../videos -f multi_face.avi -ov multi_face.avi -q) else (FaceLandmarkVidMulti.exe -inroot ../../videos -f multi_face.avi -ov multi_face.avi -q)
|
- cmd: if exist "../videos" (FaceLandmarkVidMulti.exe -inroot ../videos -f multi_face.avi -ov multi_face.avi -q) else (FaceLandmarkVidMulti.exe -inroot ../../videos -f multi_face.avi -ov multi_face.avi -q)
|
||||||
- cmd: if exist "../videos" (FeatureExtraction.exe -f "../videos/1815_01_008_tony_blair.avi" -outroot output_features -of "1815_01_008_tony_blair.txt" -simalign aligned -ov feat_track.avi -hogalign hog_test.dat -q) else (FeatureExtraction.exe -f "../../videos/1815_01_008_tony_blair.avi" -outroot output_features -of "1815_01_008_tony_blair.txt" -simalign aligned -ov feat_track.avi -hogalign hog_test.dat -q)
|
- cmd: if exist "../videos" (FeatureExtraction.exe -f "../videos/1815_01_008_tony_blair.avi" -outroot output_features -of "1815_01_008_tony_blair.txt" -simalign aligned -ov feat_track.avi -hogalign hog_test.dat -q) else (FeatureExtraction.exe -f "../../videos/1815_01_008_tony_blair.avi" -outroot output_features -of "1815_01_008_tony_blair.txt" -simalign aligned -ov feat_track.avi -hogalign hog_test.dat -q)
|
||||||
|
- cmd: if exist "../videos" (FeatureExtraction.exe -f "../videos/1815_01_008_tony_blair.avi" -outroot output_features -of "1815_01_008_tony_blair.txt" -simalign aligned -simsize 200 -simscale 0.5 -ov feat_track.avi -hogalign hog_test.dat -q) else (FeatureExtraction.exe -f "../../videos/1815_01_008_tony_blair.avi" -outroot output_features -of "1815_01_008_tony_blair.txt" -simalign aligned -simsize 200 -simscale 0.5 -ov feat_track.avi -hogalign hog_test.dat -q)
|
||||||
- cmd: if exist "../videos" (FaceLandmarkVid.exe -f "../videos/1815_01_008_tony_blair.avi" -ov track.avi -q) else (FaceLandmarkVid.exe -f "../../videos/1815_01_008_tony_blair.avi" -ov track.avi -q)
|
- cmd: if exist "../videos" (FaceLandmarkVid.exe -f "../videos/1815_01_008_tony_blair.avi" -ov track.avi -q) else (FaceLandmarkVid.exe -f "../../videos/1815_01_008_tony_blair.avi" -ov track.avi -q)
|
||||||
|
|
|
@ -308,7 +308,7 @@ int main (int argc, char **argv)
|
||||||
vector<string> output_similarity_align;
|
vector<string> output_similarity_align;
|
||||||
vector<string> output_hog_align_files;
|
vector<string> output_hog_align_files;
|
||||||
|
|
||||||
double sim_scale = 0.7;
|
double sim_scale = -1;
|
||||||
int sim_size = 112;
|
int sim_size = 112;
|
||||||
bool grayscale = false;
|
bool grayscale = false;
|
||||||
bool video_output = false;
|
bool video_output = false;
|
||||||
|
@ -391,7 +391,10 @@ int main (int argc, char **argv)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creating a face analyser that will be used for AU extraction
|
// Creating a face analyser that will be used for AU extraction
|
||||||
FaceAnalysis::FaceAnalyser face_analyser(vector<cv::Vec3d>(), 0.7, 112, 112, au_loc, tri_loc);
|
// Make sure sim_scale is proportional to sim_size if not set
|
||||||
|
if (sim_scale == -1) sim_scale = sim_size * (0.7 / 112.0);
|
||||||
|
|
||||||
|
FaceAnalysis::FaceAnalyser face_analyser(vector<cv::Vec3d>(), sim_scale, sim_size, sim_size, au_loc, tri_loc);
|
||||||
|
|
||||||
while(!done) // this is not a for loop as we might also be reading from a webcam
|
while(!done) // this is not a for loop as we might also be reading from a webcam
|
||||||
{
|
{
|
||||||
|
@ -593,7 +596,7 @@ int main (int argc, char **argv)
|
||||||
}
|
}
|
||||||
if(hog_output_file.is_open())
|
if(hog_output_file.is_open())
|
||||||
{
|
{
|
||||||
FaceAnalysis::Extract_FHOG_descriptor(hog_descriptor, sim_warped_img, num_hog_rows, num_hog_cols);
|
face_analyser.GetLatestHOG(hog_descriptor, num_hog_rows, num_hog_cols);
|
||||||
|
|
||||||
if(visualise_hog && !det_parameters.quiet_mode)
|
if(visualise_hog && !det_parameters.quiet_mode)
|
||||||
{
|
{
|
||||||
|
@ -631,8 +634,8 @@ int main (int argc, char **argv)
|
||||||
|
|
||||||
char name[100];
|
char name[100];
|
||||||
|
|
||||||
// output the frame number
|
// Filename is based on frame number
|
||||||
std::sprintf(name, "frame_det_%06d.bmp", frame_count);
|
std::sprintf(name, "frame_det_%06d.bmp", frame_count + 1);
|
||||||
|
|
||||||
// Construct the output filename
|
// Construct the output filename
|
||||||
boost::filesystem::path slash("/");
|
boost::filesystem::path slash("/");
|
||||||
|
@ -1206,6 +1209,7 @@ void get_output_feature_params(vector<string> &output_similarity_aligned, vector
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Can process images via directories creating a separate output file per directory
|
// Can process images via directories creating a separate output file per directory
|
||||||
void get_image_input_output_params_feats(vector<vector<string> > &input_image_files, bool& as_video, vector<string> &arguments)
|
void get_image_input_output_params_feats(vector<vector<string> > &input_image_files, bool& as_video, vector<string> &arguments)
|
||||||
{
|
{
|
||||||
|
|
|
@ -112,12 +112,8 @@ public:
|
||||||
|
|
||||||
cv::Mat_<int> GetTriangulation();
|
cv::Mat_<int> GetTriangulation();
|
||||||
|
|
||||||
cv::Mat_<uchar> GetLatestAlignedFaceGrayscale();
|
|
||||||
|
|
||||||
void GetGeomDescriptor(cv::Mat_<double>& geom_desc);
|
void GetGeomDescriptor(cv::Mat_<double>& geom_desc);
|
||||||
|
|
||||||
void ExtractCurrentMedians(vector<cv::Mat>& hog_medians, vector<cv::Mat>& face_image_medians, vector<cv::Vec3d>& orientations);
|
|
||||||
|
|
||||||
// Grab the names of AUs being predicted
|
// Grab the names of AUs being predicted
|
||||||
std::vector<std::string> GetAUClassNames() const; // Presence
|
std::vector<std::string> GetAUClassNames() const; // Presence
|
||||||
std::vector<std::string> GetAURegNames() const; // Intensity
|
std::vector<std::string> GetAURegNames() const; // Intensity
|
||||||
|
@ -130,6 +126,9 @@ public:
|
||||||
void ExtractAllPredictionsOfflineReg(vector<std::pair<std::string, vector<double>>>& au_predictions, vector<double>& confidences, vector<bool>& successes, vector<double>& timestamps, bool dynamic);
|
void ExtractAllPredictionsOfflineReg(vector<std::pair<std::string, vector<double>>>& au_predictions, vector<double>& confidences, vector<bool>& successes, vector<double>& timestamps, bool dynamic);
|
||||||
void ExtractAllPredictionsOfflineClass(vector<std::pair<std::string, vector<double>>>& au_predictions, vector<double>& confidences, vector<bool>& successes, vector<double>& timestamps, bool dynamic);
|
void ExtractAllPredictionsOfflineClass(vector<std::pair<std::string, vector<double>>>& au_predictions, vector<double>& confidences, vector<bool>& successes, vector<double>& timestamps, bool dynamic);
|
||||||
|
|
||||||
|
// Helper function for post-processing AU output files
|
||||||
|
void FaceAnalyser::PostprocessOutputFile(string output_file, bool dynamic);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
// Where the predictions are kept
|
// Where the predictions are kept
|
||||||
|
@ -148,8 +147,8 @@ private:
|
||||||
int frames_tracking;
|
int frames_tracking;
|
||||||
|
|
||||||
// Cache of intermediate images
|
// Cache of intermediate images
|
||||||
cv::Mat_<uchar> aligned_face_grayscale;
|
cv::Mat aligned_face_for_au;
|
||||||
cv::Mat aligned_face;
|
cv::Mat aligned_face_for_output;
|
||||||
cv::Mat hog_descriptor_visualisation;
|
cv::Mat hog_descriptor_visualisation;
|
||||||
|
|
||||||
// Private members to be used for predictions
|
// Private members to be used for predictions
|
||||||
|
|
|
@ -226,7 +226,7 @@ void FaceAnalyser::GetLatestHOG(cv::Mat_<double>& hog_descriptor, int& num_rows,
|
||||||
|
|
||||||
void FaceAnalyser::GetLatestAlignedFace(cv::Mat& image)
|
void FaceAnalyser::GetLatestAlignedFace(cv::Mat& image)
|
||||||
{
|
{
|
||||||
image = this->aligned_face.clone();
|
image = this->aligned_face_for_output.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
void FaceAnalyser::GetLatestNeutralHOG(cv::Mat_<double>& hog_descriptor, int& num_rows, int& num_cols)
|
void FaceAnalyser::GetLatestNeutralHOG(cv::Mat_<double>& hog_descriptor, int& num_rows, int& num_cols)
|
||||||
|
@ -267,50 +267,15 @@ int GetViewId(const vector<cv::Vec3d> orientations_all, const cv::Vec3d& orienta
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void FaceAnalyser::ExtractCurrentMedians(vector<cv::Mat>& hog_medians, vector<cv::Mat>& face_image_medians, vector<cv::Vec3d>& orientations)
|
|
||||||
{
|
|
||||||
|
|
||||||
orientations = this->head_orientations;
|
|
||||||
|
|
||||||
for(size_t i = 0; i < orientations.size(); ++i)
|
|
||||||
{
|
|
||||||
cv::Mat_<double> median_face(this->face_image_median.rows, this->face_image_median.cols, 0.0);
|
|
||||||
cv::Mat_<double> median_hog(this->hog_desc_median.rows, this->hog_desc_median.cols, 0.0);
|
|
||||||
|
|
||||||
ExtractMedian(this->face_image_hist[i], this->face_image_hist_sum[i], median_face, 256, 0, 255);
|
|
||||||
ExtractMedian(this->hog_desc_hist[i], this->hog_hist_sum[i], median_hog, this->num_bins_hog, 0, 1);
|
|
||||||
|
|
||||||
// Add the HOG sample
|
|
||||||
hog_medians.push_back(median_hog.clone());
|
|
||||||
|
|
||||||
// For the face image need to convert it to suitable format
|
|
||||||
cv::Mat_<uchar> aligned_face_cols_uchar;
|
|
||||||
median_face.convertTo(aligned_face_cols_uchar, CV_8U);
|
|
||||||
|
|
||||||
cv::Mat aligned_face_uchar;
|
|
||||||
if(aligned_face.channels() == 1)
|
|
||||||
{
|
|
||||||
aligned_face_uchar = cv::Mat(aligned_face.rows, aligned_face.cols, CV_8U, aligned_face_cols_uchar.data);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
aligned_face_uchar = cv::Mat(aligned_face.rows, aligned_face.cols, CV_8UC3, aligned_face_cols_uchar.data);
|
|
||||||
}
|
|
||||||
|
|
||||||
face_image_medians.push_back(aligned_face_uchar.clone());
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
std::pair<std::vector<std::pair<string, double>>, std::vector<std::pair<string, double>>> FaceAnalyser::PredictStaticAUs(const cv::Mat& frame, const LandmarkDetector::CLNF& clnf, bool visualise)
|
std::pair<std::vector<std::pair<string, double>>, std::vector<std::pair<string, double>>> FaceAnalyser::PredictStaticAUs(const cv::Mat& frame, const LandmarkDetector::CLNF& clnf, bool visualise)
|
||||||
{
|
{
|
||||||
|
|
||||||
// First align the face
|
// First align the face
|
||||||
AlignFaceMask(aligned_face, frame, clnf, triangulation, true, align_scale, align_width, align_height);
|
AlignFaceMask(aligned_face_for_au, frame, clnf, triangulation, true, 0.7, 112, 112);
|
||||||
|
|
||||||
// Extract HOG descriptor from the frame and convert it to a useable format
|
// Extract HOG descriptor from the frame and convert it to a useable format
|
||||||
cv::Mat_<double> hog_descriptor;
|
cv::Mat_<double> hog_descriptor;
|
||||||
Extract_FHOG_descriptor(hog_descriptor, aligned_face, this->num_hog_rows, this->num_hog_cols);
|
Extract_FHOG_descriptor(hog_descriptor, aligned_face_for_au, this->num_hog_rows, this->num_hog_cols);
|
||||||
|
|
||||||
// Store the descriptor
|
// Store the descriptor
|
||||||
hog_desc_frame = hog_descriptor;
|
hog_desc_frame = hog_descriptor;
|
||||||
|
@ -326,10 +291,10 @@ std::pair<std::vector<std::pair<string, double>>, std::vector<std::pair<string,
|
||||||
|
|
||||||
cv::hconcat(locs.t(), geom_descriptor_frame.clone(), geom_descriptor_frame);
|
cv::hconcat(locs.t(), geom_descriptor_frame.clone(), geom_descriptor_frame);
|
||||||
|
|
||||||
// First convert the face image to double representation as a row vector
|
// First convert the face image to double representation as a row vector, TODO rem
|
||||||
cv::Mat_<uchar> aligned_face_cols(1, aligned_face.cols * aligned_face.rows * aligned_face.channels(), aligned_face.data, 1);
|
//cv::Mat_<uchar> aligned_face_cols(1, aligned_face_for_au.cols * aligned_face_for_au.rows * aligned_face_for_au.channels(), aligned_face_for_au.data, 1);
|
||||||
cv::Mat_<double> aligned_face_cols_double;
|
//cv::Mat_<double> aligned_face_cols_double;
|
||||||
aligned_face_cols.convertTo(aligned_face_cols_double, CV_64F);
|
//aligned_face_cols.convertTo(aligned_face_cols_double, CV_64F);
|
||||||
|
|
||||||
// Visualising the median HOG
|
// Visualising the median HOG
|
||||||
if (visualise)
|
if (visualise)
|
||||||
|
@ -363,26 +328,31 @@ void FaceAnalyser::AddNextFrame(const cv::Mat& frame, const LandmarkDetector::CL
|
||||||
// First align the face if tracking was successfull
|
// First align the face if tracking was successfull
|
||||||
if (clnf_model.detection_success)
|
if (clnf_model.detection_success)
|
||||||
{
|
{
|
||||||
AlignFaceMask(aligned_face, frame, clnf_model, triangulation, true, align_scale, align_width, align_height);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
aligned_face = cv::Mat(align_height, align_width, CV_8UC3);
|
|
||||||
aligned_face.setTo(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
if(aligned_face.channels() == 3)
|
// The aligned face requirement for AUs
|
||||||
|
AlignFaceMask(aligned_face_for_au, frame, clnf_model, triangulation, true, 0.7, 112, 112);
|
||||||
|
|
||||||
|
// If the output requirement matches use the already computed one, else compute it again
|
||||||
|
if (align_scale == 0.7 && align_width == 112 && align_height == 112)
|
||||||
{
|
{
|
||||||
cv::cvtColor(aligned_face, aligned_face_grayscale, CV_BGR2GRAY);
|
aligned_face_for_output = aligned_face_for_au.clone();
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
aligned_face_grayscale = aligned_face.clone();
|
AlignFaceMask(aligned_face_for_output, frame, clnf_model, triangulation, true, align_scale, align_width, align_height);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
aligned_face_for_output = cv::Mat(align_height, align_width, CV_8UC3);
|
||||||
|
aligned_face_for_au = cv::Mat(112, 112, CV_8UC3);
|
||||||
|
aligned_face_for_output.setTo(0);
|
||||||
|
aligned_face_for_au.setTo(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extract HOG descriptor from the frame and convert it to a useable format
|
// Extract HOG descriptor from the frame and convert it to a useable format
|
||||||
cv::Mat_<double> hog_descriptor;
|
cv::Mat_<double> hog_descriptor;
|
||||||
Extract_FHOG_descriptor(hog_descriptor, aligned_face, this->num_hog_rows, this->num_hog_cols);
|
Extract_FHOG_descriptor(hog_descriptor, aligned_face_for_au, this->num_hog_rows, this->num_hog_cols);
|
||||||
|
|
||||||
// Store the descriptor
|
// Store the descriptor
|
||||||
hog_desc_frame = hog_descriptor;
|
hog_desc_frame = hog_descriptor;
|
||||||
|
@ -450,13 +420,10 @@ void FaceAnalyser::AddNextFrame(const cv::Mat& frame, const LandmarkDetector::CL
|
||||||
UpdateRunningMedian(this->geom_desc_hist, this->geom_hist_sum, this->geom_descriptor_median, geom_descriptor_frame, update_median, this->num_bins_geom, this->min_val_geom, this->max_val_geom);
|
UpdateRunningMedian(this->geom_desc_hist, this->geom_hist_sum, this->geom_descriptor_median, geom_descriptor_frame, update_median, this->num_bins_geom, this->min_val_geom, this->max_val_geom);
|
||||||
}
|
}
|
||||||
|
|
||||||
// First convert the face image to double representation as a row vector
|
// First convert the face image to double representation as a row vector, TODO rem?
|
||||||
cv::Mat_<uchar> aligned_face_cols(1, aligned_face.cols * aligned_face.rows * aligned_face.channels(), aligned_face.data, 1);
|
//cv::Mat_<uchar> aligned_face_cols(1, aligned_face.cols * aligned_face.rows * aligned_face.channels(), aligned_face.data, 1);
|
||||||
cv::Mat_<double> aligned_face_cols_double;
|
//cv::Mat_<double> aligned_face_cols_double;
|
||||||
aligned_face_cols.convertTo(aligned_face_cols_double, CV_64F);
|
//aligned_face_cols.convertTo(aligned_face_cols_double, CV_64F);
|
||||||
|
|
||||||
// TODO get rid of this completely as it takes too long?
|
|
||||||
//UpdateRunningMedian(this->face_image_hist[orientation_to_use], this->face_image_hist_sum[orientation_to_use], this->face_image_median, aligned_face_cols_double, update_median, 256, 0, 255);
|
|
||||||
|
|
||||||
// Visualising the median HOG
|
// Visualising the median HOG
|
||||||
if (visualise)
|
if (visualise)
|
||||||
|
@ -470,7 +437,7 @@ void FaceAnalyser::AddNextFrame(const cv::Mat& frame, const LandmarkDetector::CL
|
||||||
std::vector<std::pair<std::string, double>> AU_predictions_reg_corrected;
|
std::vector<std::pair<std::string, double>> AU_predictions_reg_corrected;
|
||||||
if (online)
|
if (online)
|
||||||
{
|
{
|
||||||
AU_predictions_reg_corrected = CorrectOnlineAUs(AU_predictions_reg, orientation_to_use, true, false, clnf_model.detection_success);
|
AU_predictions_reg_corrected = CorrectOnlineAUs(AU_predictions_reg, orientation_to_use, true, false, clnf_model.detection_success, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add the reg predictions to the historic data
|
// Add the reg predictions to the historic data
|
||||||
|
@ -531,8 +498,6 @@ void FaceAnalyser::AddNextFrame(const cv::Mat& frame, const LandmarkDetector::CL
|
||||||
valid_preds.push_back(success);
|
valid_preds.push_back(success);
|
||||||
timestamps.push_back(timestamp_seconds);
|
timestamps.push_back(timestamp_seconds);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void FaceAnalyser::GetGeomDescriptor(cv::Mat_<double>& geom_desc)
|
void FaceAnalyser::GetGeomDescriptor(cv::Mat_<double>& geom_desc)
|
||||||
|
@ -1101,12 +1066,6 @@ vector<pair<string, double>> FaceAnalyser::PredictCurrentAUsClass(int view)
|
||||||
return predictions;
|
return predictions;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
cv::Mat_<uchar> FaceAnalyser::GetLatestAlignedFaceGrayscale()
|
|
||||||
{
|
|
||||||
return aligned_face_grayscale.clone();
|
|
||||||
}
|
|
||||||
|
|
||||||
cv::Mat FaceAnalyser::GetLatestHOGDescriptorVisualisation()
|
cv::Mat FaceAnalyser::GetLatestHOGDescriptorVisualisation()
|
||||||
{
|
{
|
||||||
return hog_descriptor_visualisation;
|
return hog_descriptor_visualisation;
|
||||||
|
|
|
@ -221,19 +221,19 @@ namespace FaceAnalysis
|
||||||
destination_landmarks.col(1) = destination_landmarks.col(1) + warp_matrix(1,2);
|
destination_landmarks.col(1) = destination_landmarks.col(1) + warp_matrix(1,2);
|
||||||
|
|
||||||
// Move the eyebrows up to include more of upper face
|
// Move the eyebrows up to include more of upper face
|
||||||
destination_landmarks.at<double>(0,1) -= 30;
|
destination_landmarks.at<double>(0,1) -= (30/0.7)*sim_scale;
|
||||||
destination_landmarks.at<double>(16,1) -= 30;
|
destination_landmarks.at<double>(16,1) -= (30 / 0.7)*sim_scale;
|
||||||
|
|
||||||
destination_landmarks.at<double>(17,1) -= 30;
|
destination_landmarks.at<double>(17,1) -= (30 / 0.7)*sim_scale;
|
||||||
destination_landmarks.at<double>(18,1) -= 30;
|
destination_landmarks.at<double>(18,1) -= (30 / 0.7)*sim_scale;
|
||||||
destination_landmarks.at<double>(19,1) -= 30;
|
destination_landmarks.at<double>(19,1) -= (30 / 0.7)*sim_scale;
|
||||||
destination_landmarks.at<double>(20,1) -= 30;
|
destination_landmarks.at<double>(20,1) -= (30 / 0.7)*sim_scale;
|
||||||
destination_landmarks.at<double>(21,1) -= 30;
|
destination_landmarks.at<double>(21,1) -= (30 / 0.7)*sim_scale;
|
||||||
destination_landmarks.at<double>(22,1) -= 30;
|
destination_landmarks.at<double>(22,1) -= (30 / 0.7)*sim_scale;
|
||||||
destination_landmarks.at<double>(23,1) -= 30;
|
destination_landmarks.at<double>(23,1) -= (30 / 0.7)*sim_scale;
|
||||||
destination_landmarks.at<double>(24,1) -= 30;
|
destination_landmarks.at<double>(24,1) -= (30 / 0.7)*sim_scale;
|
||||||
destination_landmarks.at<double>(25,1) -= 30;
|
destination_landmarks.at<double>(25,1) -= (30 / 0.7)*sim_scale;
|
||||||
destination_landmarks.at<double>(26,1) -= 30;
|
destination_landmarks.at<double>(26,1) -= (30 / 0.7)*sim_scale;
|
||||||
|
|
||||||
destination_landmarks = cv::Mat(destination_landmarks.t()).reshape(1, 1).t();
|
destination_landmarks = cv::Mat(destination_landmarks.t()).reshape(1, 1).t();
|
||||||
|
|
||||||
|
|
|
@ -366,6 +366,9 @@ void CLNF::Read(string main_location)
|
||||||
// The other module locations should be defined as relative paths from the main model
|
// The other module locations should be defined as relative paths from the main model
|
||||||
boost::filesystem::path root = boost::filesystem::path(main_location).parent_path();
|
boost::filesystem::path root = boost::filesystem::path(main_location).parent_path();
|
||||||
|
|
||||||
|
// Assume no eye model, unless read-in
|
||||||
|
eye_model = false;
|
||||||
|
|
||||||
// The main file contains the references to other files
|
// The main file contains the references to other files
|
||||||
while (!locations.eof())
|
while (!locations.eof())
|
||||||
{
|
{
|
||||||
|
@ -387,6 +390,7 @@ void CLNF::Read(string main_location)
|
||||||
location = location.substr(0, location.size()-1);
|
location = location.substr(0, location.size()-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// append to root
|
// append to root
|
||||||
location = (root / location).string();
|
location = (root / location).string();
|
||||||
if (module.compare("LandmarkDetector") == 0)
|
if (module.compare("LandmarkDetector") == 0)
|
||||||
|
@ -536,7 +540,6 @@ void CLNF::Read(string main_location)
|
||||||
tracking_initialised = false;
|
tracking_initialised = false;
|
||||||
model_likelihood = -10; // very low
|
model_likelihood = -10; // very low
|
||||||
detection_certainty = 1; // very uncertain
|
detection_certainty = 1; // very uncertain
|
||||||
eye_model = false;
|
|
||||||
|
|
||||||
// Initialising default values for the rest of the variables
|
// Initialising default values for the rest of the variables
|
||||||
|
|
||||||
|
|
|
@ -35,7 +35,7 @@ for i=1:numel(in_dirs)
|
||||||
|
|
||||||
command = cat(2, command, ['-asvid -fdir "' in_dirs{i} '" -of "' outputFile '" ']);
|
command = cat(2, command, ['-asvid -fdir "' in_dirs{i} '" -of "' outputFile '" ']);
|
||||||
|
|
||||||
command = cat(2, command, [' -simalign "' outputDir_aligned '" -hogalign "' outputHOG_aligned '"']);
|
command = cat(2, command, [' -simalign "' outputDir_aligned '" -simsize 200 -hogalign "' outputHOG_aligned '"']);
|
||||||
|
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
Binary file not shown.
Loading…
Reference in a new issue