Fixing recorder and visualizer for FaceLandmarkImg
This commit is contained in:
parent
734c4fd34d
commit
dcb9d90eca
4 changed files with 42 additions and 11 deletions
|
@ -248,7 +248,7 @@ int main (int argc, char **argv)
|
|||
// Perform AU detection and HOG feature extraction, as this can be expensive only compute it if needed by output or visualization
|
||||
if (recording_params.outputAlignedFaces() || recording_params.outputHOG() || recording_params.outputAUs() || visualizer.vis_align || visualizer.vis_hog)
|
||||
{
|
||||
face_analyser.PredictStaticAUs(captured_image, face_model.detected_landmarks);
|
||||
face_analyser.PredictStaticAUsAndComputeFeatures(captured_image, face_model.detected_landmarks);
|
||||
face_analyser.GetLatestAlignedFace(sim_warped_img);
|
||||
face_analyser.GetLatestHOG(hog_descriptor, num_hog_rows, num_hog_cols);
|
||||
}
|
||||
|
@ -271,11 +271,12 @@ int main (int argc, char **argv)
|
|||
open_face_rec.SetObservationFaceAlign(sim_warped_img);
|
||||
open_face_rec.WriteObservation();
|
||||
|
||||
// Grabbing the next frame in the sequence
|
||||
captured_image = image_reader.GetNextImage();
|
||||
}
|
||||
visualizer.ShowObservation();
|
||||
|
||||
// Grabbing the next frame in the sequence
|
||||
captured_image = image_reader.GetNextImage();
|
||||
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -73,9 +73,8 @@ public:
|
|||
std::vector<std::pair<std::string, double>> GetCurrentAUsReg() const; // AU intensity
|
||||
std::vector<std::pair<std::string, double>> GetCurrentAUsCombined() const; // Both presense and intensity
|
||||
|
||||
// A standalone call for predicting AUs from a static image, the first element in the pair represents occurence the second intensity
|
||||
// This call is useful for detecting action units in images
|
||||
std::pair<std::vector<std::pair<std::string, double>>, std::vector<std::pair<std::string, double>>> PredictStaticAUs(const cv::Mat& frame, const cv::Mat_<float>& detected_landmarks);
|
||||
// A standalone call for predicting AUs and computing face texture features from a static image
|
||||
void PredictStaticAUsAndComputeFeatures(const cv::Mat& frame, const cv::Mat_<float>& detected_landmarks);
|
||||
|
||||
void Reset();
|
||||
|
||||
|
|
|
@ -248,7 +248,7 @@ int GetViewId(const vector<cv::Vec3d> orientations_all, const cv::Vec3d& orienta
|
|||
|
||||
}
|
||||
|
||||
std::pair<std::vector<std::pair<string, double>>, std::vector<std::pair<string, double>>> FaceAnalyser::PredictStaticAUs(const cv::Mat& frame, const cv::Mat_<float>& detected_landmarks)
|
||||
void FaceAnalyser::PredictStaticAUsAndComputeFeatures(const cv::Mat& frame, const cv::Mat_<float>& detected_landmarks)
|
||||
{
|
||||
|
||||
// Extract shape parameters from the detected landmarks
|
||||
|
@ -259,6 +259,16 @@ std::pair<std::vector<std::pair<string, double>>, std::vector<std::pair<string,
|
|||
// First align the face
|
||||
AlignFaceMask(aligned_face_for_au, frame, detected_landmarks, params_global, pdm, triangulation, true, 0.7, 112, 112);
|
||||
|
||||
// If the output requirement matches use the already computed one, else compute it again
|
||||
if (align_scale_out == align_scale_au && align_width_out == align_width_au && align_height_out == align_height_au)
|
||||
{
|
||||
aligned_face_for_output = aligned_face_for_au.clone();
|
||||
}
|
||||
else
|
||||
{
|
||||
AlignFaceMask(aligned_face_for_output, frame, detected_landmarks, params_global, pdm, triangulation, true, align_scale_out, align_width_out, align_height_out);
|
||||
}
|
||||
|
||||
// Extract HOG descriptor from the frame and convert it to a useable format
|
||||
cv::Mat_<double> hog_descriptor;
|
||||
Extract_FHOG_descriptor(hog_descriptor, aligned_face_for_au, this->num_hog_rows, this->num_hog_cols);
|
||||
|
@ -303,8 +313,6 @@ std::pair<std::vector<std::pair<string, double>>, std::vector<std::pair<string,
|
|||
AU_predictions_reg = AU_predictions_intensity;
|
||||
AU_predictions_class = AU_predictions_occurence;
|
||||
|
||||
return std::pair<std::vector<std::pair<std::string, double>>, std::vector<std::pair<std::string, double>>>(AU_predictions_intensity, AU_predictions_occurence);
|
||||
|
||||
}
|
||||
|
||||
void FaceAnalyser::AddNextFrame(const cv::Mat& frame, const cv::Mat_<float>& detected_landmarks, bool success, double timestamp_seconds, bool online)
|
||||
|
|
|
@ -82,6 +82,7 @@ Visualizer::Visualizer(bool vis_track, bool vis_hog, bool vis_align)
|
|||
this->vis_align = vis_align;
|
||||
}
|
||||
|
||||
// Setting the image on which to draw
|
||||
void Visualizer::SetImage(const cv::Mat& canvas, float fx, float fy, float cx, float cy)
|
||||
{
|
||||
captured_image = canvas.clone();
|
||||
|
@ -89,20 +90,42 @@ void Visualizer::SetImage(const cv::Mat& canvas, float fx, float fy, float cx, f
|
|||
this->fy = fy;
|
||||
this->cx = cx;
|
||||
this->cy = cy;
|
||||
|
||||
// Clearing other images
|
||||
hog_image = cv::Mat();
|
||||
aligned_face_image = cv::Mat();
|
||||
|
||||
}
|
||||
|
||||
|
||||
void Visualizer::SetObservationFaceAlign(const cv::Mat& aligned_face)
|
||||
{
|
||||
this->aligned_face_image = aligned_face;
|
||||
if(this->aligned_face_image.empty())
|
||||
{
|
||||
this->aligned_face_image = aligned_face;
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::vconcat(this->aligned_face_image, aligned_face, this->aligned_face_image);
|
||||
}
|
||||
}
|
||||
|
||||
void Visualizer::SetObservationHOG(const cv::Mat_<double>& hog_descriptor, int num_cols, int num_rows)
|
||||
{
|
||||
if(vis_hog)
|
||||
{
|
||||
Visualise_FHOG(hog_descriptor, num_rows, num_cols, this->hog_image);
|
||||
if (this->hog_image.empty())
|
||||
{
|
||||
Visualise_FHOG(hog_descriptor, num_rows, num_cols, this->hog_image);
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::Mat tmp_hog;
|
||||
Visualise_FHOG(hog_descriptor, num_rows, num_cols, tmp_hog);
|
||||
cv::vconcat(this->hog_image, tmp_hog, this->hog_image);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
|
Loading…
Reference in a new issue