Moving shared rotation helpers to utilities, so as not to double code.

This commit is contained in:
Tadas Baltrusaitis 2017-11-11 11:57:57 +00:00
parent 42bcb55d28
commit e636b4ca7c
20 changed files with 626 additions and 481 deletions

View file

@ -59,6 +59,7 @@
#include <RecorderOpenFace.h> #include <RecorderOpenFace.h>
#include <RecorderOpenFaceParameters.h> #include <RecorderOpenFaceParameters.h>
#include <SequenceCapture.h> #include <SequenceCapture.h>
#include <Visualizer.h>
#ifndef CONFIG_DIR #ifndef CONFIG_DIR
#define CONFIG_DIR "~" #define CONFIG_DIR "~"
@ -96,8 +97,6 @@ vector<string> get_arguments(int argc, char **argv)
return arguments; return arguments;
} }
void get_visualization_params(bool& visualize_track, bool& visualize_align, bool& visualize_hog, vector<string> &arguments);
// Some globals for tracking timing information for visualisation (TODO bit ugly) // Some globals for tracking timing information for visualisation (TODO bit ugly)
double fps_tracker = -1.0; double fps_tracker = -1.0;
int64 t0 = 0; int64 t0 = 0;
@ -162,12 +161,6 @@ int main (int argc, char **argv)
vector<string> arguments = get_arguments(argc, argv); vector<string> arguments = get_arguments(argc, argv);
// Deciding what to visualize
bool visualize_track = false;
bool visualize_align = false;
bool visualize_hog = false;
get_visualization_params(visualize_track, visualize_align, visualize_hog, arguments);
// Load the modules that are being used for tracking and face analysis // Load the modules that are being used for tracking and face analysis
// Load face landmark detector // Load face landmark detector
LandmarkDetector::FaceModelParameters det_parameters(arguments); LandmarkDetector::FaceModelParameters det_parameters(arguments);
@ -181,6 +174,9 @@ int main (int argc, char **argv)
Utilities::SequenceCapture sequence_reader; Utilities::SequenceCapture sequence_reader;
// A utility for visualizing the results
Utilities::Visualizer visualizer(arguments);
while (true) // this is not a for loop as we might also be reading from a webcam while (true) // this is not a for loop as we might also be reading from a webcam
{ {
@ -211,9 +207,7 @@ int main (int argc, char **argv)
bool detection_success = LandmarkDetector::DetectLandmarksInVideo(grayscale_image, face_model, det_parameters); bool detection_success = LandmarkDetector::DetectLandmarksInVideo(grayscale_image, face_model, det_parameters);
// Gaze tracking, absolute gaze direction // Gaze tracking, absolute gaze direction
cv::Point3f gazeDirection0(0, 0, -1); cv::Point3f gazeDirection0(0, 0, -1); cv::Point3f gazeDirection1(0, 0, -1); cv::Vec2d gazeAngle(0, 0);
cv::Point3f gazeDirection1(0, 0, -1);
cv::Vec2d gazeAngle(0, 0);
if (det_parameters.track_gaze && detection_success && face_model.eye_model) if (det_parameters.track_gaze && detection_success && face_model.eye_model)
{ {
@ -224,44 +218,54 @@ int main (int argc, char **argv)
// Do face alignment // Do face alignment
cv::Mat sim_warped_img; cv::Mat sim_warped_img;
cv::Mat_<double> hog_descriptor; cv::Mat_<double> hog_descriptor; int num_hog_rows = 0, num_hog_cols = 0;
int num_hog_rows = 0, num_hog_cols = 0;
// As this can be expensive only compute it if needed by output or visualization // Perform AU detection and HOG feature extraction, as this can be expensive only compute it if needed by output or visualization
if (recording_params.outputAlignedFaces() || recording_params.outputHOG() || recording_params.outputAUs() || visualize_align || visualize_hog) if (recording_params.outputAlignedFaces() || recording_params.outputHOG() || recording_params.outputAUs() || visualize_align || visualize_hog)
{ {
face_analyser.AddNextFrame(captured_image, face_model.detected_landmarks, face_model.detection_success, sequence_reader.time_stamp, false, !det_parameters.quiet_mode); face_analyser.AddNextFrame(captured_image, face_model.detected_landmarks, face_model.detection_success, sequence_reader.time_stamp, false);
face_analyser.GetLatestAlignedFace(sim_warped_img); face_analyser.GetLatestAlignedFace(sim_warped_img);
face_analyser.GetLatestHOG(hog_descriptor, num_hog_rows, num_hog_cols);
if (!det_parameters.quiet_mode && visualize_align)
{
cv::imshow("sim_warp", sim_warped_img);
}
if (recording_params.outputHOG() || (visualize_hog && !det_parameters.quiet_mode))
{
face_analyser.GetLatestHOG(hog_descriptor, num_hog_rows, num_hog_cols);
if (visualize_hog && !det_parameters.quiet_mode)
{
cv::Mat_<double> hog_descriptor_vis;
FaceAnalysis::Visualise_FHOG(hog_descriptor, num_hog_rows, num_hog_cols, hog_descriptor_vis);
cv::imshow("hog", hog_descriptor_vis);
}
}
} }
// Work out the pose of the head from the tracked model // Work out the pose of the head from the tracked model
cv::Vec6d pose_estimate = LandmarkDetector::GetPose(face_model, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy); cv::Vec6d pose_estimate = LandmarkDetector::GetPose(face_model, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy);
// Drawing the visualization on the captured image // Drawing the visualization on the captured image
if (recording_params.outputTrackedVideo() || (visualize_track && !det_parameters.quiet_mode)) visualise_tracking(captured_image, face_model, det_parameters, gazeDirection0, gazeDirection1, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy);
// Displaying the tracking visualizations
visualizer.SetImage(captured_image, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy);
visualizer.SetObservationFaceAlign(sim_warped_img);
visualizer.SetObservationGaze(gazeDirection0, gazeDirection1, gazeAngle, LandmarkDetector::CalculateAllEyeLandmarks(face_model));
visualizer.SetObservationHOG(hog_descriptor, num_hog_rows, num_hog_cols);
visualizer.SetObservationLandmarks(face_model.detected_landmarks, face_model.detection_certainty, detection_success);
visualizer.SetObservationPose(pose_estimate);
visualizer.ShowObservation();
if(!det_parameters.quiet_mode)
{ {
visualise_tracking(captured_image, face_model, det_parameters, gazeDirection0, gazeDirection1, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy); if (recording_params.outputTrackedVideo() || visualize_track)
{
cv::namedWindow("tracking_result", 1);
cv::imshow("tracking_result", captured_image);
cv::waitKey(1);
}
if (visualize_align)
{
cv::imshow("sim_warp", sim_warped_img);
}
if (visualize_hog)
{
cv::Mat_<double> hog_descriptor_vis;
FaceAnalysis::Visualise_FHOG(hog_descriptor, num_hog_rows, num_hog_cols, hog_descriptor_vis);
cv::imshow("hog", hog_descriptor_vis);
}
} }
// Setting up the recorder output // Setting up the recorder output
open_face_rec.SetObservationHOG(detection_success, hog_descriptor, num_hog_rows, num_hog_cols, 31); // The number of channels in HOG is fixed at the moment, as using FHOG open_face_rec.SetObservationHOG(detection_success, hog_descriptor, num_hog_rows, num_hog_cols, 31); // The number of channels in HOG is fixed at the moment, as using FHOG
open_face_rec.SetObservationVisualization(captured_image); open_face_rec.SetObservationVisualization(visualizer.GetVisImage());
open_face_rec.SetObservationActionUnits(face_analyser.GetCurrentAUsReg(), face_analyser.GetCurrentAUsClass()); open_face_rec.SetObservationActionUnits(face_analyser.GetCurrentAUsReg(), face_analyser.GetCurrentAUsClass());
open_face_rec.SetObservationGaze(gazeDirection0, gazeDirection1, gazeAngle, LandmarkDetector::CalculateAllEyeLandmarks(face_model)); open_face_rec.SetObservationGaze(gazeDirection0, gazeDirection1, gazeAngle, LandmarkDetector::CalculateAllEyeLandmarks(face_model));
open_face_rec.SetObservationLandmarks(face_model.detected_landmarks, face_model.GetShape(sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy), open_face_rec.SetObservationLandmarks(face_model.detected_landmarks, face_model.GetShape(sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy),
@ -270,18 +274,7 @@ int main (int argc, char **argv)
open_face_rec.SetObservationTimestamp(sequence_reader.time_stamp); open_face_rec.SetObservationTimestamp(sequence_reader.time_stamp);
open_face_rec.SetObservationFaceAlign(sim_warped_img); open_face_rec.SetObservationFaceAlign(sim_warped_img);
open_face_rec.WriteObservation(); open_face_rec.WriteObservation();
// Visualize the image if desired
if (visualize_track && !det_parameters.quiet_mode)
{
cv::namedWindow("tracking_result", 1);
cv::imshow("tracking_result", captured_image);
cv::waitKey(1);
}
// Grabbing the next frame (todo this should be part of capture)
captured_image = sequence_reader.GetNextFrame();
// Reporting progress // Reporting progress
if(sequence_reader.GetProgress() >= reported_completion / 10.0) if(sequence_reader.GetProgress() >= reported_completion / 10.0)
{ {
@ -289,13 +282,16 @@ int main (int argc, char **argv)
reported_completion = reported_completion + 1; reported_completion = reported_completion + 1;
} }
// Grabbing the next frame in the sequence
captured_image = sequence_reader.GetNextFrame();
} }
open_face_rec.Close(); open_face_rec.Close();
if (recording_params.outputAUs()) if (recording_params.outputAUs())
{ {
cout << "Postprocessing the Action Unit predictions" << endl; INFO_STREAM("Postprocessing the Action Unit predictions");
face_analyser.PostprocessOutputFile(open_face_rec.GetCSVFile()); face_analyser.PostprocessOutputFile(open_face_rec.GetCSVFile());
} }
@ -307,55 +303,3 @@ int main (int argc, char **argv)
return 0; return 0;
} }
void get_visualization_params(bool& visualize_track, bool& visualize_align, bool& visualize_hog,vector<string> &arguments)
{
bool* valid = new bool[arguments.size()];
for (size_t i = 0; i < arguments.size(); ++i)
{
valid[i] = true;
}
string output_root = "";
visualize_align = false;
visualize_hog = false;
visualize_track = false;
for (size_t i = 0; i < arguments.size(); ++i)
{
if (arguments[i].compare("-verbose") == 0)
{
visualize_track = true;
visualize_align = true;
visualize_hog = true;
}
else if (arguments[i].compare("-vis-align") == 0)
{
visualize_align = true;
valid[i] = false;
}
else if (arguments[i].compare("-vis-hog") == 0)
{
visualize_hog = true;
valid[i] = false;
}
else if (arguments[i].compare("-vis-track") == 0)
{
visualize_track = true;
valid[i] = false;
}
}
for (int i = arguments.size() - 1; i >= 0; --i)
{
if (!valid[i])
{
arguments.erase(arguments.begin() + i);
}
}
}

View file

@ -98,7 +98,7 @@
<WarningLevel>Level3</WarningLevel> <WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization> <Optimization>Disabled</Optimization>
<SDLCheck>false</SDLCheck> <SDLCheck>false</SDLCheck>
<AdditionalIncludeDirectories>./include;$(SolutionDir)lib\local\LandmarkDetector\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>./include;$(SolutionDir)lib\local\LandmarkDetector\include;$(SolutionDir)lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet> <EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
</ClCompile> </ClCompile>
@ -117,7 +117,7 @@
<WarningLevel>Level3</WarningLevel> <WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization> <Optimization>Disabled</Optimization>
<SDLCheck>false</SDLCheck> <SDLCheck>false</SDLCheck>
<AdditionalIncludeDirectories>./include;$(SolutionDir)lib\local\LandmarkDetector\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>./include;$(SolutionDir)lib\local\LandmarkDetector\include;$(SolutionDir)lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet> <EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
</ClCompile> </ClCompile>
@ -139,7 +139,7 @@
<IntrinsicFunctions>true</IntrinsicFunctions> <IntrinsicFunctions>true</IntrinsicFunctions>
<SDLCheck> <SDLCheck>
</SDLCheck> </SDLCheck>
<AdditionalIncludeDirectories>./include;$(SolutionDir)lib\local\LandmarkDetector\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>./include;$(SolutionDir)lib\local\LandmarkDetector\include;$(SolutionDir)lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet> <EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
</ClCompile> </ClCompile>
@ -164,7 +164,7 @@
<IntrinsicFunctions>true</IntrinsicFunctions> <IntrinsicFunctions>true</IntrinsicFunctions>
<SDLCheck> <SDLCheck>
</SDLCheck> </SDLCheck>
<AdditionalIncludeDirectories>./include;$(SolutionDir)lib\local\LandmarkDetector\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>./include;$(SolutionDir)lib\local\LandmarkDetector\include;$(SolutionDir)lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet> <EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
</ClCompile> </ClCompile>

View file

@ -64,7 +64,7 @@ public:
// Constructor for FaceAnalyser using the parameters structure // Constructor for FaceAnalyser using the parameters structure
FaceAnalyser(const FaceAnalysis::FaceAnalyserParameters& face_analyser_params); FaceAnalyser(const FaceAnalysis::FaceAnalyserParameters& face_analyser_params);
void AddNextFrame(const cv::Mat& frame, const cv::Mat_<float>& detected_landmarks, bool success, double timestamp_seconds, bool online = false, bool visualise = true); void AddNextFrame(const cv::Mat& frame, const cv::Mat_<float>& detected_landmarks, bool success, double timestamp_seconds, bool online = false);
cv::Mat GetLatestHOGDescriptorVisualisation(); cv::Mat GetLatestHOGDescriptorVisualisation();
@ -77,7 +77,7 @@ public:
// A standalone call for predicting AUs from a static image, the first element in the pair represents occurence the second intensity // A standalone call for predicting AUs from a static image, the first element in the pair represents occurence the second intensity
// This call is useful for detecting action units in images // This call is useful for detecting action units in images
std::pair<std::vector<std::pair<std::string, double>>, std::vector<std::pair<std::string, double>>> PredictStaticAUs(const cv::Mat& frame, const cv::Mat_<float>& detected_landmarks, bool visualise = true); std::pair<std::vector<std::pair<std::string, double>>, std::vector<std::pair<std::string, double>>> PredictStaticAUs(const cv::Mat& frame, const cv::Mat_<float>& detected_landmarks);
void Reset(); void Reset();

View file

@ -52,8 +52,6 @@ namespace FaceAnalysis
void Extract_FHOG_descriptor(cv::Mat_<double>& descriptor, const cv::Mat& image, int& num_rows, int& num_cols, int cell_size = 8); void Extract_FHOG_descriptor(cv::Mat_<double>& descriptor, const cv::Mat& image, int& num_rows, int& num_cols, int cell_size = 8);
void Visualise_FHOG(const cv::Mat_<double>& descriptor, int num_rows, int num_cols, cv::Mat& visualisation);
// The following two methods go hand in hand // The following two methods go hand in hand
void ExtractSummaryStatistics(const cv::Mat_<double>& descriptors, cv::Mat_<double>& sum_stats, bool mean, bool stdev, bool max_min); void ExtractSummaryStatistics(const cv::Mat_<double>& descriptors, cv::Mat_<double>& sum_stats, bool mean, bool stdev, bool max_min);
void AddDescriptor(cv::Mat_<double>& descriptors, cv::Mat_<double> new_descriptor, int curr_frame, int num_frames_to_keep = 120); void AddDescriptor(cv::Mat_<double>& descriptors, cv::Mat_<double> new_descriptor, int curr_frame, int num_frames_to_keep = 120);
@ -70,26 +68,10 @@ namespace FaceAnalysis
cv::Matx22f AlignShapesWithScale(cv::Mat_<float>& src, cv::Mat_<float> dst); cv::Matx22f AlignShapesWithScale(cv::Mat_<float>& src, cv::Mat_<float> dst);
//=========================================================================== //===========================================================================
// Visualisation functions // Visualisation functions, TODO move
//=========================================================================== //===========================================================================
void Project(cv::Mat_<float>& dest, const cv::Mat_<float>& mesh, float fx, float fy, float cx, float cy); void Project(cv::Mat_<float>& dest, const cv::Mat_<float>& mesh, float fx, float fy, float cx, float cy);
//===========================================================================
// Angle representation conversion helpers
//===========================================================================
cv::Matx33f Euler2RotationMatrix(const cv::Vec3f& eulerAngles);
// Using the XYZ convention R = Rx * Ry * Rz, left-handed positive sign
cv::Vec3f RotationMatrix2Euler(const cv::Matx33f& rotation_matrix);
cv::Vec3f Euler2AxisAngle(const cv::Vec3f& euler);
cv::Vec3f AxisAngle2Euler(const cv::Vec3f& axis_angle);
cv::Matx33f AxisAngle2RotationMatrix(const cv::Vec3f& axis_angle);
cv::Vec3f RotationMatrix2AxisAngle(const cv::Matx33f& rotation_matrix);
//============================================================================ //============================================================================
// Matrix reading functionality // Matrix reading functionality
//============================================================================ //============================================================================

View file

@ -248,7 +248,7 @@ int GetViewId(const vector<cv::Vec3d> orientations_all, const cv::Vec3d& orienta
} }
std::pair<std::vector<std::pair<string, double>>, std::vector<std::pair<string, double>>> FaceAnalyser::PredictStaticAUs(const cv::Mat& frame, const cv::Mat_<float>& detected_landmarks, bool visualise) std::pair<std::vector<std::pair<string, double>>, std::vector<std::pair<string, double>>> FaceAnalyser::PredictStaticAUs(const cv::Mat& frame, const cv::Mat_<float>& detected_landmarks)
{ {
// Extract shape parameters from the detected landmarks // Extract shape parameters from the detected landmarks
@ -285,13 +285,7 @@ std::pair<std::vector<std::pair<string, double>>, std::vector<std::pair<string,
//cv::Mat_<uchar> aligned_face_cols(1, aligned_face_for_au.cols * aligned_face_for_au.rows * aligned_face_for_au.channels(), aligned_face_for_au.data, 1); //cv::Mat_<uchar> aligned_face_cols(1, aligned_face_for_au.cols * aligned_face_for_au.rows * aligned_face_for_au.channels(), aligned_face_for_au.data, 1);
//cv::Mat_<double> aligned_face_cols_double; //cv::Mat_<double> aligned_face_cols_double;
//aligned_face_cols.convertTo(aligned_face_cols_double, CV_64F); //aligned_face_cols.convertTo(aligned_face_cols_double, CV_64F);
// Visualising the median HOG
if (visualise)
{
FaceAnalysis::Visualise_FHOG(hog_descriptor, num_hog_rows, num_hog_cols, hog_descriptor_visualisation);
}
// Perform AU prediction // Perform AU prediction
auto AU_predictions_intensity = PredictCurrentAUs(orientation_to_use); auto AU_predictions_intensity = PredictCurrentAUs(orientation_to_use);
auto AU_predictions_occurence = PredictCurrentAUsClass(orientation_to_use); auto AU_predictions_occurence = PredictCurrentAUsClass(orientation_to_use);
@ -310,7 +304,7 @@ std::pair<std::vector<std::pair<string, double>>, std::vector<std::pair<string,
} }
void FaceAnalyser::AddNextFrame(const cv::Mat& frame, const cv::Mat_<float>& detected_landmarks, bool success, double timestamp_seconds, bool online, bool visualise) void FaceAnalyser::AddNextFrame(const cv::Mat& frame, const cv::Mat_<float>& detected_landmarks, bool success, double timestamp_seconds, bool online)
{ {
frames_tracking++; frames_tracking++;
@ -423,13 +417,7 @@ void FaceAnalyser::AddNextFrame(const cv::Mat& frame, const cv::Mat_<float>& det
{ {
UpdateRunningMedian(this->geom_desc_hist, this->geom_hist_sum, this->geom_descriptor_median, geom_descriptor_frame, update_median, this->num_bins_geom, this->min_val_geom, this->max_val_geom); UpdateRunningMedian(this->geom_desc_hist, this->geom_hist_sum, this->geom_descriptor_median, geom_descriptor_frame, update_median, this->num_bins_geom, this->min_val_geom, this->max_val_geom);
} }
// Visualising the median HOG
if(visualise)
{
FaceAnalysis::Visualise_FHOG(hog_descriptor, num_hog_rows, num_hog_cols, hog_descriptor_visualisation);
}
// Perform AU prediction // Perform AU prediction
AU_predictions_reg = PredictCurrentAUs(orientation_to_use); AU_predictions_reg = PredictCurrentAUs(orientation_to_use);

View file

@ -242,30 +242,6 @@ namespace FaceAnalysis
} }
} }
void Visualise_FHOG(const cv::Mat_<double>& descriptor, int num_rows, int num_cols, cv::Mat& visualisation)
{
// First convert to dlib format
dlib::array2d<dlib::matrix<float,31,1> > hog(num_rows, num_cols);
cv::MatConstIterator_<double> descriptor_it = descriptor.begin();
for(int y = 0; y < num_cols; ++y)
{
for(int x = 0; x < num_rows; ++x)
{
for(unsigned int o = 0; o < 31; ++o)
{
hog[y][x](o) = *descriptor_it++;
}
}
}
// Draw the FHOG to OpenCV format
auto fhog_vis = dlib::draw_fhog(hog);
visualisation = dlib::toMat(fhog_vis).clone();
}
// Create a row vector Felzenszwalb HOG descriptor from a given image // Create a row vector Felzenszwalb HOG descriptor from a given image
void Extract_FHOG_descriptor(cv::Mat_<double>& descriptor, const cv::Mat& image, int& num_rows, int& num_cols, int cell_size) void Extract_FHOG_descriptor(cv::Mat_<double>& descriptor, const cv::Mat& image, int& num_rows, int& num_cols, int cell_size)
{ {
@ -442,7 +418,7 @@ namespace FaceAnalysis
//=========================================================================== //===========================================================================
// Visualisation functions // Visualisation functions, TODO rem
//=========================================================================== //===========================================================================
void Project(cv::Mat_<float>& dest, const cv::Mat_<float>& mesh, float fx, float fy, float cx, float cy) void Project(cv::Mat_<float>& dest, const cv::Mat_<float>& mesh, float fx, float fy, float cx, float cy)
{ {
@ -485,81 +461,6 @@ namespace FaceAnalysis
} }
//===========================================================================
// Angle representation conversion helpers
//===========================================================================
// Using the XYZ convention R = Rx * Ry * Rz, left-handed positive sign
cv::Matx33f Euler2RotationMatrix(const cv::Vec3f& eulerAngles)
{
cv::Matx33f rotation_matrix;
float s1 = sin(eulerAngles[0]);
float s2 = sin(eulerAngles[1]);
float s3 = sin(eulerAngles[2]);
float c1 = cos(eulerAngles[0]);
float c2 = cos(eulerAngles[1]);
float c3 = cos(eulerAngles[2]);
rotation_matrix(0, 0) = c2 * c3;
rotation_matrix(0, 1) = -c2 *s3;
rotation_matrix(0, 2) = s2;
rotation_matrix(1, 0) = c1 * s3 + c3 * s1 * s2;
rotation_matrix(1, 1) = c1 * c3 - s1 * s2 * s3;
rotation_matrix(1, 2) = -c2 * s1;
rotation_matrix(2, 0) = s1 * s3 - c1 * c3 * s2;
rotation_matrix(2, 1) = c3 * s1 + c1 * s2 * s3;
rotation_matrix(2, 2) = c1 * c2;
return rotation_matrix;
}
// Using the XYZ convention R = Rx * Ry * Rz, left-handed positive sign
cv::Vec3f RotationMatrix2Euler(const cv::Matx33f& rotation_matrix)
{
float q0 = sqrt(1 + rotation_matrix(0, 0) + rotation_matrix(1, 1) + rotation_matrix(2, 2)) / 2.0f;
float q1 = (rotation_matrix(2, 1) - rotation_matrix(1, 2)) / (4.0f*q0);
float q2 = (rotation_matrix(0, 2) - rotation_matrix(2, 0)) / (4.0f*q0);
float q3 = (rotation_matrix(1, 0) - rotation_matrix(0, 1)) / (4.0f*q0);
float t1 = 2.0f * (q0*q2 + q1*q3);
float yaw = asin(2.0 * (q0*q2 + q1*q3));
float pitch = atan2(2.0 * (q0*q1 - q2*q3), q0*q0 - q1*q1 - q2*q2 + q3*q3);
float roll = atan2(2.0 * (q0*q3 - q1*q2), q0*q0 + q1*q1 - q2*q2 - q3*q3);
return cv::Vec3f(pitch, yaw, roll);
}
cv::Vec3f Euler2AxisAngle(const cv::Vec3f& euler)
{
cv::Matx33f rotMatrix = Euler2RotationMatrix(euler);
cv::Vec3f axis_angle;
cv::Rodrigues(rotMatrix, axis_angle);
return axis_angle;
}
cv::Vec3f AxisAngle2Euler(const cv::Vec3f& axis_angle)
{
cv::Matx33f rotation_matrix;
cv::Rodrigues(axis_angle, rotation_matrix);
return RotationMatrix2Euler(rotation_matrix);
}
cv::Matx33f AxisAngle2RotationMatrix(const cv::Vec3f& axis_angle)
{
cv::Matx33f rotation_matrix;
cv::Rodrigues(axis_angle, rotation_matrix);
return rotation_matrix;
}
cv::Vec3f RotationMatrix2AxisAngle(const cv::Matx33f& rotation_matrix)
{
cv::Vec3f axis_angle;
cv::Rodrigues(rotation_matrix, axis_angle);
return axis_angle;
}
//============================================================================ //============================================================================
// Matrix reading functionality // Matrix reading functionality

View file

@ -48,6 +48,7 @@
#endif #endif
#include <Face_utils.h> #include <Face_utils.h>
#include <RotationHelpers.h>
// OpenBLAS // OpenBLAS
#include <cblas.h> #include <cblas.h>
@ -107,7 +108,7 @@ void PDM::CalcShape2D(cv::Mat_<float>& out_shape, const cv::Mat_<float>& params_
// get the rotation matrix from the euler angles // get the rotation matrix from the euler angles
cv::Vec3f euler(params_global[1], params_global[2], params_global[3]); cv::Vec3f euler(params_global[1], params_global[2], params_global[3]);
cv::Matx33f currRot = Euler2RotationMatrix(euler); cv::Matx33f currRot = Utilities::Euler2RotationMatrix(euler);
// get the 3D shape of the object // get the 3D shape of the object
cv::Mat_<float> Shape_3D = mean_shape + princ_comp * params_local; cv::Mat_<float> Shape_3D = mean_shape + princ_comp * params_local;
@ -138,7 +139,7 @@ void PDM::CalcParams(cv::Vec6f& out_params_global, const cv::Rect_<float>& bound
CalcShape3D(current_shape, params_local); CalcShape3D(current_shape, params_local);
// rotate the shape // rotate the shape
cv::Matx33f rotation_matrix = Euler2RotationMatrix(rotation); cv::Matx33f rotation_matrix = Utilities::Euler2RotationMatrix(rotation);
cv::Mat_<float> reshaped = current_shape.reshape(1, 3); cv::Mat_<float> reshaped = current_shape.reshape(1, 3);
@ -213,7 +214,7 @@ void PDM::ComputeRigidJacobian(const cv::Mat_<float>& p_local, const cv::Vec6f&
// Get the rotation matrix // Get the rotation matrix
cv::Vec3f euler(params_global[1], params_global[2], params_global[3]); cv::Vec3f euler(params_global[1], params_global[2], params_global[3]);
cv::Matx33f currRot = Euler2RotationMatrix(euler); cv::Matx33f currRot = Utilities::Euler2RotationMatrix(euler);
float r11 = currRot(0, 0); float r11 = currRot(0, 0);
float r12 = currRot(0, 1); float r12 = currRot(0, 1);
@ -306,7 +307,7 @@ void PDM::ComputeJacobian(const cv::Mat_<float>& params_local, const cv::Vec6f&
this->CalcShape3D(shape_3D, params_local); this->CalcShape3D(shape_3D, params_local);
cv::Vec3f euler(params_global[1], params_global[2], params_global[3]); cv::Vec3f euler(params_global[1], params_global[2], params_global[3]);
cv::Matx33f currRot = Euler2RotationMatrix(euler); cv::Matx33f currRot = Utilities::Euler2RotationMatrix(euler);
float r11 = currRot(0, 0); float r11 = currRot(0, 0);
float r12 = currRot(0, 1); float r12 = currRot(0, 1);
@ -404,7 +405,7 @@ void PDM::UpdateModelParameters(const cv::Mat_<float>& delta_p, cv::Mat_<float>&
// get the original rotation matrix // get the original rotation matrix
cv::Vec3f eulerGlobal(params_global[1], params_global[2], params_global[3]); cv::Vec3f eulerGlobal(params_global[1], params_global[2], params_global[3]);
cv::Matx33f R1 = Euler2RotationMatrix(eulerGlobal); cv::Matx33f R1 = Utilities::Euler2RotationMatrix(eulerGlobal);
// construct R' = [1, -wz, wy // construct R' = [1, -wz, wy
// wz, 1, -wx // wz, 1, -wx
@ -422,8 +423,8 @@ void PDM::UpdateModelParameters(const cv::Mat_<float>& delta_p, cv::Mat_<float>&
cv::Matx33f R3 = R1 *R2; cv::Matx33f R3 = R1 *R2;
// Extract euler angle (through axis angle first to make sure it's legal) // Extract euler angle (through axis angle first to make sure it's legal)
cv::Vec3f axis_angle = RotationMatrix2AxisAngle(R3); cv::Vec3f axis_angle = Utilities::RotationMatrix2AxisAngle(R3);
cv::Vec3f euler = AxisAngle2Euler(axis_angle); cv::Vec3f euler = Utilities::AxisAngle2Euler(axis_angle);
params_global[1] = euler[0]; params_global[1] = euler[0];
params_global[2] = euler[1]; params_global[2] = euler[1];
@ -466,7 +467,7 @@ void PDM::CalcParams(cv::Vec6f& out_params_global, cv::Mat_<float>& out_params_l
float scaling = ((width / model_bbox.width) + (height / model_bbox.height)) / 2; float scaling = ((width / model_bbox.width) + (height / model_bbox.height)) / 2;
cv::Vec3f rotation_init(rotation[0], rotation[1], rotation[2]); cv::Vec3f rotation_init(rotation[0], rotation[1], rotation[2]);
cv::Matx33f R = Euler2RotationMatrix(rotation_init); cv::Matx33f R = Utilities::Euler2RotationMatrix(rotation_init);
cv::Vec2f translation((min_x + max_x) / 2.0, (min_y + max_y) / 2.0); cv::Vec2f translation((min_x + max_x) / 2.0, (min_y + max_y) / 2.0);
cv::Mat_<float> loc_params(this->NumberOfModes(),1, 0.0); cv::Mat_<float> loc_params(this->NumberOfModes(),1, 0.0);
@ -553,7 +554,7 @@ void PDM::CalcParams(cv::Vec6f& out_params_global, cv::Mat_<float>& out_params_l
translation[0] = glob_params[4]; translation[0] = glob_params[4];
translation[1] = glob_params[5]; translation[1] = glob_params[5];
R = Euler2RotationMatrix(rotation_init); R = Utilities::Euler2RotationMatrix(rotation_init);
R_2D(0,0) = R(0,0);R_2D(0,1) = R(0,1); R_2D(0,2) = R(0,2); R_2D(0,0) = R(0,0);R_2D(0,1) = R(0,1); R_2D(0,2) = R(0,2);
R_2D(1,0) = R(1,0);R_2D(1,1) = R(1,1); R_2D(1,2) = R(1,2); R_2D(1,0) = R(1,0);R_2D(1,1) = R(1,1); R_2D(1,2) = R(1,2);

View file

@ -93,7 +93,7 @@
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile> <ClCompile>
<Optimization>Disabled</Optimization> <Optimization>Disabled</Optimization>
<AdditionalIncludeDirectories>./include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>./include;$(SolutionDir)lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PreprocessorDefinitions>WIN32;_DEBUG;_LIB;EIGEN_MPL2_ONLY;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;EIGEN_MPL2_ONLY;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<MinimalRebuild>true</MinimalRebuild> <MinimalRebuild>true</MinimalRebuild>
<BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks> <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
@ -117,7 +117,7 @@ xcopy /I /E /Y /D "$(SolutionDir)lib\3rdParty\OpenCV3.1\classifiers" "$(OutDir)c
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile> <ClCompile>
<Optimization>Disabled</Optimization> <Optimization>Disabled</Optimization>
<AdditionalIncludeDirectories>./include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>./include;$(SolutionDir)lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PreprocessorDefinitions>WIN64;_DEBUG;_LIB;EIGEN_MPL2_ONLY;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>WIN64;_DEBUG;_LIB;EIGEN_MPL2_ONLY;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks> <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
<RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary> <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
@ -141,7 +141,7 @@ xcopy /I /E /Y /D "$(SolutionDir)lib\3rdParty\OpenCV3.1\classifiers" "$(OutDir)c
<ClCompile> <ClCompile>
<Optimization>Full</Optimization> <Optimization>Full</Optimization>
<IntrinsicFunctions>true</IntrinsicFunctions> <IntrinsicFunctions>true</IntrinsicFunctions>
<AdditionalIncludeDirectories>./include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>./include;$(SolutionDir)lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PreprocessorDefinitions>WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary> <RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>
<FunctionLevelLinking>false</FunctionLevelLinking> <FunctionLevelLinking>false</FunctionLevelLinking>
@ -166,7 +166,7 @@ xcopy /I /E /Y /D "$(SolutionDir)lib\3rdParty\OpenCV3.1\classifiers" "$(OutDir)c
<ClCompile> <ClCompile>
<Optimization>Full</Optimization> <Optimization>Full</Optimization>
<IntrinsicFunctions>true</IntrinsicFunctions> <IntrinsicFunctions>true</IntrinsicFunctions>
<AdditionalIncludeDirectories>./include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>./include;$(SolutionDir)lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PreprocessorDefinitions>WIN64;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>WIN64;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary> <RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>
<FunctionLevelLinking>false</FunctionLevelLinking> <FunctionLevelLinking>false</FunctionLevelLinking>

View file

@ -94,23 +94,6 @@ namespace LandmarkDetector
void Draw(cv::Mat img, const cv::Mat_<double>& shape2D); void Draw(cv::Mat img, const cv::Mat_<double>& shape2D);
void Draw(cv::Mat img, const CLNF& clnf_model); void Draw(cv::Mat img, const CLNF& clnf_model);
//===========================================================================
// Angle representation conversion helpers
//===========================================================================
cv::Matx33d Euler2RotationMatrix(const cv::Vec3d& eulerAngles);
// Using the XYZ convention R = Rx * Ry * Rz, left-handed positive sign
cv::Vec3d RotationMatrix2Euler(const cv::Matx33d& rotation_matrix);
cv::Vec3d Euler2AxisAngle(const cv::Vec3d& euler);
cv::Vec3d AxisAngle2Euler(const cv::Vec3d& axis_angle);
cv::Matx33d AxisAngle2RotationMatrix(const cv::Vec3d& axis_angle);
cv::Vec3d RotationMatrix2AxisAngle(const cv::Matx33d& rotation_matrix);
//============================================================================ //============================================================================
// Face detection helpers // Face detection helpers
//============================================================================ //============================================================================

View file

@ -35,6 +35,7 @@
#include "stdafx.h" #include "stdafx.h"
#include <LandmarkDetectorFunc.h> #include <LandmarkDetectorFunc.h>
#include "RotationHelpers.h"
// OpenCV includes // OpenCV includes
#include <opencv2/core/core.hpp> #include <opencv2/core/core.hpp>
@ -83,7 +84,7 @@ cv::Vec6d LandmarkDetector::GetPose(const CLNF& clnf_model, float fx, float fy,
cv::solvePnP(landmarks_3D, landmarks_2D, camera_matrix, cv::Mat(), vec_rot, vec_trans, true); cv::solvePnP(landmarks_3D, landmarks_2D, camera_matrix, cv::Mat(), vec_rot, vec_trans, true);
cv::Vec3d euler = LandmarkDetector::AxisAngle2Euler(vec_rot); cv::Vec3d euler = Utilities::AxisAngle2Euler(vec_rot);
return cv::Vec6d(vec_trans[0], vec_trans[1], vec_trans[2], euler[0], euler[1], euler[2]); return cv::Vec6d(vec_trans[0], vec_trans[1], vec_trans[2], euler[0], euler[1], euler[2]);
} }
@ -136,12 +137,12 @@ cv::Vec6d LandmarkDetector::GetPoseWRTCamera(const CLNF& clnf_model, float fx, f
double z_y = cv::sqrt(vec_trans[1] * vec_trans[1] + vec_trans[2] * vec_trans[2]); double z_y = cv::sqrt(vec_trans[1] * vec_trans[1] + vec_trans[2] * vec_trans[2]);
double eul_y = -atan2(vec_trans[0], z_y); double eul_y = -atan2(vec_trans[0], z_y);
cv::Matx33d camera_rotation = LandmarkDetector::Euler2RotationMatrix(cv::Vec3d(eul_x, eul_y, 0)); cv::Matx33d camera_rotation = Utilities::Euler2RotationMatrix(cv::Vec3d(eul_x, eul_y, 0));
cv::Matx33d head_rotation = LandmarkDetector::AxisAngle2RotationMatrix(vec_rot); cv::Matx33d head_rotation = Utilities::AxisAngle2RotationMatrix(vec_rot);
cv::Matx33d corrected_rotation = camera_rotation * head_rotation; cv::Matx33d corrected_rotation = camera_rotation * head_rotation;
cv::Vec3d euler_corrected = LandmarkDetector::RotationMatrix2Euler(corrected_rotation); cv::Vec3d euler_corrected = Utilities::RotationMatrix2Euler(corrected_rotation);
return cv::Vec6d(vec_trans[0], vec_trans[1], vec_trans[2], euler_corrected[0], euler_corrected[1], euler_corrected[2]); return cv::Vec6d(vec_trans[0], vec_trans[1], vec_trans[2], euler_corrected[0], euler_corrected[1], euler_corrected[2]);
} }

View file

@ -45,6 +45,7 @@
// Local includes // Local includes
#include <LandmarkDetectorUtils.h> #include <LandmarkDetectorUtils.h>
#include <RotationHelpers.h>
using namespace LandmarkDetector; using namespace LandmarkDetector;
@ -1098,7 +1099,7 @@ cv::Mat_<double> CLNF::GetShape(double fx, double fy, double cx, double cy) cons
// Need to rotate the shape to get the actual 3D representation // Need to rotate the shape to get the actual 3D representation
// get the rotation matrix from the euler angles // get the rotation matrix from the euler angles
cv::Matx33d R = LandmarkDetector::Euler2RotationMatrix(cv::Vec3d(params_global[1], params_global[2], params_global[3])); cv::Matx33d R = Utilities::Euler2RotationMatrix(cv::Vec3d(params_global[1], params_global[2], params_global[3]));
shape3d = shape3d.reshape(1, 3); shape3d = shape3d.reshape(1, 3);

View file

@ -39,7 +39,6 @@
// OpenCV includes // OpenCV includes
#include <opencv2/core/core.hpp> #include <opencv2/core/core.hpp>
#include <opencv2/imgproc.hpp> #include <opencv2/imgproc.hpp>
#include <opencv2/calib3d.hpp>
// Boost includes // Boost includes
#include <filesystem.hpp> #include <filesystem.hpp>
@ -970,81 +969,7 @@ void DrawLandmarks(cv::Mat img, vector<cv::Point> landmarks)
} }
//===========================================================================
// Angle representation conversion helpers
//===========================================================================
// Using the XYZ convention R = Rx * Ry * Rz, left-handed positive sign
cv::Matx33d Euler2RotationMatrix(const cv::Vec3d& eulerAngles)
{
cv::Matx33d rotation_matrix;
double s1 = sin(eulerAngles[0]);
double s2 = sin(eulerAngles[1]);
double s3 = sin(eulerAngles[2]);
double c1 = cos(eulerAngles[0]);
double c2 = cos(eulerAngles[1]);
double c3 = cos(eulerAngles[2]);
rotation_matrix(0,0) = c2 * c3;
rotation_matrix(0,1) = -c2 *s3;
rotation_matrix(0,2) = s2;
rotation_matrix(1,0) = c1 * s3 + c3 * s1 * s2;
rotation_matrix(1,1) = c1 * c3 - s1 * s2 * s3;
rotation_matrix(1,2) = -c2 * s1;
rotation_matrix(2,0) = s1 * s3 - c1 * c3 * s2;
rotation_matrix(2,1) = c3 * s1 + c1 * s2 * s3;
rotation_matrix(2,2) = c1 * c2;
return rotation_matrix;
}
// Using the XYZ convention R = Rx * Ry * Rz, left-handed positive sign
cv::Vec3d RotationMatrix2Euler(const cv::Matx33d& rotation_matrix)
{
double q0 = sqrt( 1 + rotation_matrix(0,0) + rotation_matrix(1,1) + rotation_matrix(2,2) ) / 2.0;
double q1 = (rotation_matrix(2,1) - rotation_matrix(1,2)) / (4.0*q0) ;
double q2 = (rotation_matrix(0,2) - rotation_matrix(2,0)) / (4.0*q0) ;
double q3 = (rotation_matrix(1,0) - rotation_matrix(0,1)) / (4.0*q0) ;
double t1 = 2.0 * (q0*q2 + q1*q3);
double yaw = asin(2.0 * (q0*q2 + q1*q3));
double pitch= atan2(2.0 * (q0*q1-q2*q3), q0*q0-q1*q1-q2*q2+q3*q3);
double roll = atan2(2.0 * (q0*q3-q1*q2), q0*q0+q1*q1-q2*q2-q3*q3);
return cv::Vec3d(pitch, yaw, roll);
}
cv::Vec3d Euler2AxisAngle(const cv::Vec3d& euler)
{
cv::Matx33d rotMatrix = LandmarkDetector::Euler2RotationMatrix(euler);
cv::Vec3d axis_angle;
cv::Rodrigues(rotMatrix, axis_angle);
return axis_angle;
}
cv::Vec3d AxisAngle2Euler(const cv::Vec3d& axis_angle)
{
cv::Matx33d rotation_matrix;
cv::Rodrigues(axis_angle, rotation_matrix);
return RotationMatrix2Euler(rotation_matrix);
}
cv::Matx33d AxisAngle2RotationMatrix(const cv::Vec3d& axis_angle)
{
cv::Matx33d rotation_matrix;
cv::Rodrigues(axis_angle, rotation_matrix);
return rotation_matrix;
}
cv::Vec3d RotationMatrix2AxisAngle(const cv::Matx33d& rotation_matrix)
{
cv::Vec3d axis_angle;
cv::Rodrigues(rotation_matrix, axis_angle);
return axis_angle;
}
//=========================================================================== //===========================================================================

View file

@ -49,6 +49,7 @@
#endif #endif
#include <LandmarkDetectorUtils.h> #include <LandmarkDetectorUtils.h>
#include "RotationHelpers.h"
using namespace LandmarkDetector; using namespace LandmarkDetector;
//=========================================================================== //===========================================================================
@ -154,7 +155,7 @@ void PDM::CalcShape2D(cv::Mat_<double>& out_shape, const cv::Mat_<double>& param
// get the rotation matrix from the euler angles // get the rotation matrix from the euler angles
cv::Vec3d euler(params_global[1], params_global[2], params_global[3]); cv::Vec3d euler(params_global[1], params_global[2], params_global[3]);
cv::Matx33d currRot = Euler2RotationMatrix(euler); cv::Matx33d currRot = Utilities::Euler2RotationMatrix(euler);
// get the 3D shape of the object // get the 3D shape of the object
cv::Mat_<double> Shape_3D = mean_shape + princ_comp * params_local; cv::Mat_<double> Shape_3D = mean_shape + princ_comp * params_local;
@ -185,7 +186,7 @@ void PDM::CalcParams(cv::Vec6d& out_params_global, const cv::Rect_<double>& boun
CalcShape3D(current_shape, params_local); CalcShape3D(current_shape, params_local);
// rotate the shape // rotate the shape
cv::Matx33d rotation_matrix = Euler2RotationMatrix(rotation); cv::Matx33d rotation_matrix = Utilities::Euler2RotationMatrix(rotation);
cv::Mat_<double> reshaped = current_shape.reshape(1, 3); cv::Mat_<double> reshaped = current_shape.reshape(1, 3);
@ -265,7 +266,7 @@ void PDM::ComputeRigidJacobian(const cv::Mat_<float>& p_local, const cv::Vec6d&
// Get the rotation matrix // Get the rotation matrix
cv::Vec3d euler(params_global[1], params_global[2], params_global[3]); cv::Vec3d euler(params_global[1], params_global[2], params_global[3]);
cv::Matx33d currRot = Euler2RotationMatrix(euler); cv::Matx33d currRot = Utilities::Euler2RotationMatrix(euler);
float r11 = (float) currRot(0,0); float r11 = (float) currRot(0,0);
float r12 = (float) currRot(0,1); float r12 = (float) currRot(0,1);
@ -363,7 +364,7 @@ void PDM::ComputeJacobian(const cv::Mat_<float>& params_local, const cv::Vec6d&
shape_3D_d.convertTo(shape_3D, CV_32F); shape_3D_d.convertTo(shape_3D, CV_32F);
cv::Vec3d euler(params_global[1], params_global[2], params_global[3]); cv::Vec3d euler(params_global[1], params_global[2], params_global[3]);
cv::Matx33d currRot = Euler2RotationMatrix(euler); cv::Matx33d currRot = Utilities::Euler2RotationMatrix(euler);
float r11 = (float) currRot(0,0); float r11 = (float) currRot(0,0);
float r12 = (float) currRot(0,1); float r12 = (float) currRot(0,1);
@ -460,7 +461,7 @@ void PDM::UpdateModelParameters(const cv::Mat_<float>& delta_p, cv::Mat_<float>&
// get the original rotation matrix // get the original rotation matrix
cv::Vec3d eulerGlobal(params_global[1], params_global[2], params_global[3]); cv::Vec3d eulerGlobal(params_global[1], params_global[2], params_global[3]);
cv::Matx33d R1 = Euler2RotationMatrix(eulerGlobal); cv::Matx33d R1 = Utilities::Euler2RotationMatrix(eulerGlobal);
// construct R' = [1, -wz, wy // construct R' = [1, -wz, wy
// wz, 1, -wx // wz, 1, -wx
@ -478,8 +479,8 @@ void PDM::UpdateModelParameters(const cv::Mat_<float>& delta_p, cv::Mat_<float>&
cv::Matx33d R3 = R1 *R2; cv::Matx33d R3 = R1 *R2;
// Extract euler angle (through axis angle first to make sure it's legal) // Extract euler angle (through axis angle first to make sure it's legal)
cv::Vec3d axis_angle = RotationMatrix2AxisAngle(R3); cv::Vec3d axis_angle = Utilities::RotationMatrix2AxisAngle(R3);
cv::Vec3d euler = AxisAngle2Euler(axis_angle); cv::Vec3d euler = Utilities::AxisAngle2Euler(axis_angle);
params_global[1] = euler[0]; params_global[1] = euler[0];
params_global[2] = euler[1]; params_global[2] = euler[1];
@ -569,7 +570,7 @@ void PDM::CalcParams(cv::Vec6d& out_params_global, const cv::Mat_<double>& out_p
double scaling = ((width / model_bbox.width) + (height / model_bbox.height)) / 2; double scaling = ((width / model_bbox.width) + (height / model_bbox.height)) / 2;
cv::Vec3d rotation_init = rotation; cv::Vec3d rotation_init = rotation;
cv::Matx33d R = Euler2RotationMatrix(rotation_init); cv::Matx33d R = Utilities::Euler2RotationMatrix(rotation_init);
cv::Vec2d translation((min_x + max_x) / 2.0, (min_y + max_y) / 2.0); cv::Vec2d translation((min_x + max_x) / 2.0, (min_y + max_y) / 2.0);
cv::Mat_<float> loc_params(this->NumberOfModes(),1, 0.0); cv::Mat_<float> loc_params(this->NumberOfModes(),1, 0.0);
@ -656,7 +657,7 @@ void PDM::CalcParams(cv::Vec6d& out_params_global, const cv::Mat_<double>& out_p
translation[0] = glob_params[4]; translation[0] = glob_params[4];
translation[1] = glob_params[5]; translation[1] = glob_params[5];
R = Euler2RotationMatrix(rotation_init); R = Utilities::Euler2RotationMatrix(rotation_init);
R_2D(0,0) = R(0,0);R_2D(0,1) = R(0,1); R_2D(0,2) = R(0,2); R_2D(0,0) = R(0,0);R_2D(0,1) = R(0,1); R_2D(0,2) = R(0,2);
R_2D(1,0) = R(1,0);R_2D(1,1) = R(1,1); R_2D(1,2) = R(1,2); R_2D(1,0) = R(1,0);R_2D(1,1) = R(1,1); R_2D(1,2) = R(1,2);

View file

@ -61,21 +61,29 @@
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="..\..\3rdParty\OpenCV3.1\openCV3.1.props" /> <Import Project="..\..\3rdParty\OpenCV3.1\openCV3.1.props" />
<Import Project="..\..\3rdParty\boost\boost_d.props" /> <Import Project="..\..\3rdParty\boost\boost_d.props" />
<Import Project="..\..\3rdParty\dlib\dlib.props" />
<Import Project="..\..\3rdParty\tbb\tbb_d.props" />
</ImportGroup> </ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="..\..\3rdParty\OpenCV3.1\openCV3.1.props" /> <Import Project="..\..\3rdParty\OpenCV3.1\openCV3.1.props" />
<Import Project="..\..\3rdParty\boost\boost.props" /> <Import Project="..\..\3rdParty\boost\boost.props" />
<Import Project="..\..\3rdParty\dlib\dlib.props" />
<Import Project="..\..\3rdParty\tbb\tbb.props" />
</ImportGroup> </ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="..\..\3rdParty\OpenCV3.1\openCV3.1.props" /> <Import Project="..\..\3rdParty\OpenCV3.1\openCV3.1.props" />
<Import Project="..\..\3rdParty\boost\boost_d.props" /> <Import Project="..\..\3rdParty\boost\boost_d.props" />
<Import Project="..\..\3rdParty\dlib\dlib.props" />
<Import Project="..\..\3rdParty\tbb\tbb_d.props" />
</ImportGroup> </ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="..\..\3rdParty\OpenCV3.1\openCV3.1.props" /> <Import Project="..\..\3rdParty\OpenCV3.1\openCV3.1.props" />
<Import Project="..\..\3rdParty\boost\boost.props" /> <Import Project="..\..\3rdParty\boost\boost.props" />
<Import Project="..\..\3rdParty\dlib\dlib.props" />
<Import Project="..\..\3rdParty\tbb\tbb.props" />
</ImportGroup> </ImportGroup>
<PropertyGroup Label="UserMacros" /> <PropertyGroup Label="UserMacros" />
<PropertyGroup /> <PropertyGroup />
@ -146,14 +154,22 @@
<ClCompile Include="src\RecorderOpenFaceParameters.cpp" /> <ClCompile Include="src\RecorderOpenFaceParameters.cpp" />
<ClCompile Include="src\SequenceCapture.cpp" /> <ClCompile Include="src\SequenceCapture.cpp" />
<ClCompile Include="src\VisualizationUtils.cpp" /> <ClCompile Include="src\VisualizationUtils.cpp" />
<ClCompile Include="src\Visualizer.cpp" />
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<ClInclude Include="include\RecorderCSV.h" /> <ClInclude Include="include\RecorderCSV.h" />
<ClInclude Include="include\RecorderHOG.h" /> <ClInclude Include="include\RecorderHOG.h" />
<ClInclude Include="include\RecorderOpenFace.h" /> <ClInclude Include="include\RecorderOpenFace.h" />
<ClInclude Include="include\RecorderOpenFaceParameters.h" /> <ClInclude Include="include\RecorderOpenFaceParameters.h" />
<ClInclude Include="include\RotationHelpers.h" />
<ClInclude Include="include\SequenceCapture.h" /> <ClInclude Include="include\SequenceCapture.h" />
<ClInclude Include="include\VisualizationUtils.h" /> <ClInclude Include="include\VisualizationUtils.h" />
<ClInclude Include="include\Visualizer.h" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\3rdParty\dlib\dlib.vcxproj">
<Project>{b47a5f12-2567-44e9-ae49-35763ec82149}</Project>
</ProjectReference>
</ItemGroup> </ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets"> <ImportGroup Label="ExtensionTargets">

View file

@ -33,6 +33,9 @@
<ClCompile Include="src\VisualizationUtils.cpp"> <ClCompile Include="src\VisualizationUtils.cpp">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="src\Visualizer.cpp">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<ClInclude Include="include\RecorderCSV.h"> <ClInclude Include="include\RecorderCSV.h">
@ -50,8 +53,14 @@
<ClInclude Include="include\SequenceCapture.h"> <ClInclude Include="include\SequenceCapture.h">
<Filter>Header Files</Filter> <Filter>Header Files</Filter>
</ClInclude> </ClInclude>
<ClInclude Include="include\Visualizer.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="include\VisualizationUtils.h"> <ClInclude Include="include\VisualizationUtils.h">
<Filter>Header Files</Filter> <Filter>Header Files</Filter>
</ClInclude> </ClInclude>
<ClInclude Include="include\RotationHelpers.h">
<Filter>Header Files</Filter>
</ClInclude>
</ItemGroup> </ItemGroup>
</Project> </Project>

View file

@ -0,0 +1,118 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (C) 2017, Tadas Baltrusaitis all rights reserved.
//
// ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
//
// BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT.
// IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
//
// License can be found in OpenFace-license.txt
//
// * Any publications arising from the use of this software, including but
// not limited to academic journal and conference publications, technical
// reports and manuals, must cite at least one of the following works:
//
// OpenFace: an open source facial behavior analysis toolkit
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency
// in IEEE Winter Conference on Applications of Computer Vision, 2016
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency.
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __ROTATION_HELPERS_h_
#define __ROTATION_HELPERS_h_
#include <opencv2/core/core.hpp>
#include <opencv2/calib3d.hpp>
namespace Utilities
{
//===========================================================================
// Angle representation conversion helpers
//===========================================================================
// Using the XYZ convention R = Rx * Ry * Rz, left-handed positive sign
static cv::Matx33d Euler2RotationMatrix(const cv::Vec3d& eulerAngles)
{
cv::Matx33d rotation_matrix;
double s1 = sin(eulerAngles[0]);
double s2 = sin(eulerAngles[1]);
double s3 = sin(eulerAngles[2]);
double c1 = cos(eulerAngles[0]);
double c2 = cos(eulerAngles[1]);
double c3 = cos(eulerAngles[2]);
rotation_matrix(0, 0) = c2 * c3;
rotation_matrix(0, 1) = -c2 *s3;
rotation_matrix(0, 2) = s2;
rotation_matrix(1, 0) = c1 * s3 + c3 * s1 * s2;
rotation_matrix(1, 1) = c1 * c3 - s1 * s2 * s3;
rotation_matrix(1, 2) = -c2 * s1;
rotation_matrix(2, 0) = s1 * s3 - c1 * c3 * s2;
rotation_matrix(2, 1) = c3 * s1 + c1 * s2 * s3;
rotation_matrix(2, 2) = c1 * c2;
return rotation_matrix;
}
// Using the XYZ convention R = Rx * Ry * Rz, left-handed positive sign
static cv::Vec3d RotationMatrix2Euler(const cv::Matx33d& rotation_matrix)
{
double q0 = sqrt(1 + rotation_matrix(0, 0) + rotation_matrix(1, 1) + rotation_matrix(2, 2)) / 2.0;
double q1 = (rotation_matrix(2, 1) - rotation_matrix(1, 2)) / (4.0*q0);
double q2 = (rotation_matrix(0, 2) - rotation_matrix(2, 0)) / (4.0*q0);
double q3 = (rotation_matrix(1, 0) - rotation_matrix(0, 1)) / (4.0*q0);
double t1 = 2.0 * (q0*q2 + q1*q3);
double yaw = asin(2.0 * (q0*q2 + q1*q3));
double pitch = atan2(2.0 * (q0*q1 - q2*q3), q0*q0 - q1*q1 - q2*q2 + q3*q3);
double roll = atan2(2.0 * (q0*q3 - q1*q2), q0*q0 + q1*q1 - q2*q2 - q3*q3);
return cv::Vec3d(pitch, yaw, roll);
}
static cv::Vec3d Euler2AxisAngle(const cv::Vec3d& euler)
{
cv::Matx33d rotMatrix = Euler2RotationMatrix(euler);
cv::Vec3d axis_angle;
cv::Rodrigues(rotMatrix, axis_angle);
return axis_angle;
}
static cv::Vec3d AxisAngle2Euler(const cv::Vec3d& axis_angle)
{
cv::Matx33d rotation_matrix;
cv::Rodrigues(axis_angle, rotation_matrix);
return RotationMatrix2Euler(rotation_matrix);
}
static cv::Matx33d AxisAngle2RotationMatrix(const cv::Vec3d& axis_angle)
{
cv::Matx33d rotation_matrix;
cv::Rodrigues(axis_angle, rotation_matrix);
return rotation_matrix;
}
static cv::Vec3d RotationMatrix2AxisAngle(const cv::Matx33d& rotation_matrix)
{
cv::Vec3d axis_angle;
cv::Rodrigues(rotation_matrix, axis_angle);
return axis_angle;
}
}
#endif

View file

@ -41,10 +41,6 @@
namespace Utilities namespace Utilities
{ {
void DrawLandmarkDetResults(cv::Mat img, const cv::Mat_<double>& shape2D, const cv::Mat_<int>& visibilities);
void DrawPoseDetResults(cv::Mat image, cv::Vec6d pose, double confidence, float fx, float fy, float cx, float cy);
void DrawEyeTrackResults(cv::Mat image, const cv::Mat_<double>& eye_landmarks, cv::Point3f gazeVecAxisLeft, cv::Point3f gazeVecAxisRight, float fx, float fy, float cx, float cy);
// TODO draw AU results // TODO draw AU results
// Helper utilities // Helper utilities
@ -52,11 +48,12 @@ namespace Utilities
// Drawing a bounding box around the face in an image // Drawing a bounding box around the face in an image
void DrawBox(cv::Mat image, cv::Vec6d pose, cv::Scalar color, int thickness, float fx, float fy, float cx, float cy); void DrawBox(cv::Mat image, cv::Vec6d pose, cv::Scalar color, int thickness, float fx, float fy, float cx, float cy);
void DrawBox(std::vector<std::pair<cv::Point, cv::Point>> lines, cv::Mat image, cv::Scalar color, int thickness); void DrawBox(const std::vector<std::pair<cv::Point2d, cv::Point2d>>& lines, cv::Mat image, cv::Scalar color, int thickness);
// Computing a bounding box to be drawn // Computing a bounding box to be drawn
std::vector<std::pair<cv::Point2d, cv::Point2d>> CalculateBox(cv::Vec6d pose, float fx, float fy, float cx, float cy); std::vector<std::pair<cv::Point2d, cv::Point2d>> CalculateBox(cv::Vec6d pose, float fx, float fy, float cx, float cy);
void Visualise_FHOG(const cv::Mat_<double>& descriptor, int num_rows, int num_cols, cv::Mat& visualisation);
} }

View file

@ -0,0 +1,105 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (C) 2017, Tadas Baltrusaitis all rights reserved.
//
// ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
//
// BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT.
// IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
//
// License can be found in OpenFace-license.txt
//
// * Any publications arising from the use of this software, including but
// not limited to academic journal and conference publications, technical
// reports and manuals, must cite at least one of the following works:
//
// OpenFace: an open source facial behavior analysis toolkit
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency
// in IEEE Winter Conference on Applications of Computer Vision, 2016
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency.
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __VISUALIZER_h_
#define __VISUALIZER_h_
// System includes
#include <vector>
// OpenCV includes
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
namespace Utilities
{
//===========================================================================
/**
A class for recording data processed by OpenFace (facial landmarks, head pose, facial action units, aligned face, HOG features, and tracked video
*/
class Visualizer {
public:
// The constructor for the visualizer that specifies what to visualize
Visualizer(std::vector<std::string> arguments);
Visualizer(bool vis_track, bool vis_hog, bool vis_align);
~Visualizer();
// TODO copy, assignment and move operators? Do not allow
// Adding observations to the visualizer
// Pose related observations
void SetImage(const cv::Mat& canvas, float fx, float fy, float cx, float cy);
// All observations relevant to facial landmarks (optional visibilities parameter to not display all landmarks)
void SetObservationLandmarks(const cv::Mat_<double>& landmarks_2D, double confidence, bool success, const cv::Mat_<int>& visibilities = cv::Mat_<int>());
// Pose related observations
void SetObservationPose(const cv::Vec6d& pose, double confidence);
// Gaze related observations
void SetObservationGaze(const cv::Point3f& gazeDirection0, const cv::Point3f& gazeDirection1, const cv::Vec2d& gaze_angle, const std::vector<cv::Point2d>& eye_landmarks);
// Face alignment related observations
void SetObservationFaceAlign(const cv::Mat& aligned_face);
// HOG feature related observations
void SetObservationHOG(const cv::Mat_<double>& hog_descriptor, int num_cols, int num_rows);
void ShowObservation();
cv::Mat GetVisImage();
private:
// Keeping track of what we're visualizing
bool vis_track;
bool vis_hog;
bool vis_align;
// Temporary variables for visualization
cv::Mat captured_image; // out canvas
cv::Mat tracked_image;
cv::Mat hog_image;
cv::Mat aligned_face_image;
// Useful for drawing 3d
float fx, fy, cx, cy;
};
}
#endif

View file

@ -32,134 +32,167 @@
/////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////
#include "VisualizationUtils.h" #include "VisualizationUtils.h"
#include "RotationHelpers.h"
using namespace Utilities; // For FHOG visualisation
#include <dlib/opencv.h>
#include <dlib/image_processing/frontal_face_detector.h>
void Project(cv::Mat_<double>& dest, const cv::Mat_<double>& mesh, double fx, double fy, double cx, double cy) // For drawing on images
#include <opencv2/imgproc.hpp>
namespace Utilities
{ {
dest = cv::Mat_<double>(mesh.rows, 2, 0.0); void Project(cv::Mat_<double>& dest, const cv::Mat_<double>& mesh, double fx, double fy, double cx, double cy)
int num_points = mesh.rows;
double X, Y, Z;
cv::Mat_<double>::const_iterator mData = mesh.begin();
cv::Mat_<double>::iterator projected = dest.begin();
for (int i = 0; i < num_points; i++)
{ {
// Get the points dest = cv::Mat_<double>(mesh.rows, 2, 0.0);
X = *(mData++);
Y = *(mData++);
Z = *(mData++);
double x; int num_points = mesh.rows;
double y;
// if depth is 0 the projection is different double X, Y, Z;
if (Z != 0)
cv::Mat_<double>::const_iterator mData = mesh.begin();
cv::Mat_<double>::iterator projected = dest.begin();
for (int i = 0; i < num_points; i++)
{ {
x = ((X * fx / Z) + cx); // Get the points
y = ((Y * fy / Z) + cy); X = *(mData++);
} Y = *(mData++);
else Z = *(mData++);
{
x = X;
y = Y;
}
// Project and store in dest matrix double x;
(*projected++) = x; double y;
(*projected++) = y;
}
} // if depth is 0 the projection is different
if (Z != 0)
{
x = ((X * fx / Z) + cx);
y = ((Y * fy / Z) + cy);
}
else
{
x = X;
y = Y;
}
void DrawBox(cv::Mat image, cv::Vec6d pose, cv::Scalar color, int thickness, float fx, float fy, float cx, float cy) // Project and store in dest matrix
{ (*projected++) = x;
auto edge_lines = CalculateBox(pose, fx, fy, cx, cy); (*projected++) = y;
DrawBox(edge_lines, image, color, thickness);
}
std::vector<std::pair<cv::Point2d, cv::Point2d>> CalculateBox(cv::Vec6d pose, float fx, float fy, float cx, float cy)
{
double boxVerts[] = { -1, 1, -1,
1, 1, -1,
1, 1, 1,
-1, 1, 1,
1, -1, 1,
1, -1, -1,
-1, -1, -1,
-1, -1, 1 };
std::vector<std::pair<int, int>> edges;
edges.push_back(std::pair<int, int>(0, 1));
edges.push_back(std::pair<int, int>(1, 2));
edges.push_back(std::pair<int, int>(2, 3));
edges.push_back(std::pair<int, int>(0, 3));
edges.push_back(std::pair<int, int>(2, 4));
edges.push_back(std::pair<int, int>(1, 5));
edges.push_back(std::pair<int, int>(0, 6));
edges.push_back(std::pair<int, int>(3, 7));
edges.push_back(std::pair<int, int>(6, 5));
edges.push_back(std::pair<int, int>(5, 4));
edges.push_back(std::pair<int, int>(4, 7));
edges.push_back(std::pair<int, int>(7, 6));
// The size of the head is roughly 200mm x 200mm x 200mm
cv::Mat_<double> box = cv::Mat(8, 3, CV_64F, boxVerts).clone() * 100;
cv::Matx33d rot = LandmarkDetector::Euler2RotationMatrix(cv::Vec3d(pose[3], pose[4], pose[5]));
cv::Mat_<double> rotBox;
// Rotate the box
rotBox = cv::Mat(rot) * box.t();
rotBox = rotBox.t();
// Move the bounding box to head position
rotBox.col(0) = rotBox.col(0) + pose[0];
rotBox.col(1) = rotBox.col(1) + pose[1];
rotBox.col(2) = rotBox.col(2) + pose[2];
// draw the lines
cv::Mat_<double> rotBoxProj;
Project(rotBoxProj, rotBox, fx, fy, cx, cy);
std::vector<std::pair<cv::Point2d, cv::Point2d>> lines;
for (size_t i = 0; i < edges.size(); ++i)
{
cv::Mat_<double> begin;
cv::Mat_<double> end;
rotBoxProj.row(edges[i].first).copyTo(begin);
rotBoxProj.row(edges[i].second).copyTo(end);
cv::Point2d p1(begin.at<double>(0), begin.at<double>(1));
cv::Point2d p2(end.at<double>(0), end.at<double>(1));
lines.push_back(std::pair<cv::Point2d, cv::Point2d>(p1, p2));
}
return lines;
}
void DrawBox(std::vector<std::pair<cv::Point, cv::Point>> lines, cv::Mat image, cv::Scalar color, int thickness)
{
cv::Rect image_rect(0, 0, image.cols, image.rows);
for (size_t i = 0; i < lines.size(); ++i)
{
cv::Point p1 = lines.at(i).first;
cv::Point p2 = lines.at(i).second;
// Only draw the line if one of the points is inside the image
if (p1.inside(image_rect) || p2.inside(image_rect))
{
cv::line(image, p1, p2, color, thickness, CV_AA);
} }
} }
} void DrawBox(cv::Mat image, cv::Vec6d pose, cv::Scalar color, int thickness, float fx, float fy, float cx, float cy)
{
auto edge_lines = CalculateBox(pose, fx, fy, cx, cy);
DrawBox(edge_lines, image, color, thickness);
}
std::vector<std::pair<cv::Point2d, cv::Point2d>> CalculateBox(cv::Vec6d pose, float fx, float fy, float cx, float cy)
{
double boxVerts[] = { -1, 1, -1,
1, 1, -1,
1, 1, 1,
-1, 1, 1,
1, -1, 1,
1, -1, -1,
-1, -1, -1,
-1, -1, 1 };
std::vector<std::pair<int, int>> edges;
edges.push_back(std::pair<int, int>(0, 1));
edges.push_back(std::pair<int, int>(1, 2));
edges.push_back(std::pair<int, int>(2, 3));
edges.push_back(std::pair<int, int>(0, 3));
edges.push_back(std::pair<int, int>(2, 4));
edges.push_back(std::pair<int, int>(1, 5));
edges.push_back(std::pair<int, int>(0, 6));
edges.push_back(std::pair<int, int>(3, 7));
edges.push_back(std::pair<int, int>(6, 5));
edges.push_back(std::pair<int, int>(5, 4));
edges.push_back(std::pair<int, int>(4, 7));
edges.push_back(std::pair<int, int>(7, 6));
// The size of the head is roughly 200mm x 200mm x 200mm
cv::Mat_<double> box = cv::Mat(8, 3, CV_64F, boxVerts).clone() * 100;
cv::Matx33d rot = Euler2RotationMatrix(cv::Vec3d(pose[3], pose[4], pose[5]));
cv::Mat_<double> rotBox;
// Rotate the box
rotBox = cv::Mat(rot) * box.t();
rotBox = rotBox.t();
// Move the bounding box to head position
rotBox.col(0) = rotBox.col(0) + pose[0];
rotBox.col(1) = rotBox.col(1) + pose[1];
rotBox.col(2) = rotBox.col(2) + pose[2];
// draw the lines
cv::Mat_<double> rotBoxProj;
Project(rotBoxProj, rotBox, fx, fy, cx, cy);
std::vector<std::pair<cv::Point2d, cv::Point2d>> lines;
for (size_t i = 0; i < edges.size(); ++i)
{
cv::Mat_<double> begin;
cv::Mat_<double> end;
rotBoxProj.row(edges[i].first).copyTo(begin);
rotBoxProj.row(edges[i].second).copyTo(end);
cv::Point2d p1(begin.at<double>(0), begin.at<double>(1));
cv::Point2d p2(end.at<double>(0), end.at<double>(1));
lines.push_back(std::pair<cv::Point2d, cv::Point2d>(p1, p2));
}
return lines;
}
void DrawBox(const std::vector<std::pair<cv::Point2d, cv::Point2d>>& lines, cv::Mat image, cv::Scalar color, int thickness)
{
cv::Rect image_rect(0, 0, image.cols, image.rows);
for (size_t i = 0; i < lines.size(); ++i)
{
cv::Point2d p1 = lines.at(i).first;
cv::Point2d p2 = lines.at(i).second;
// Only draw the line if one of the points is inside the image
if (p1.inside(image_rect) || p2.inside(image_rect))
{
cv::line(image, p1, p2, color, thickness, CV_AA);
}
}
}
void Visualise_FHOG(const cv::Mat_<double>& descriptor, int num_rows, int num_cols, cv::Mat& visualisation)
{
// First convert to dlib format
dlib::array2d<dlib::matrix<float, 31, 1> > hog(num_rows, num_cols);
cv::MatConstIterator_<double> descriptor_it = descriptor.begin();
for (int y = 0; y < num_cols; ++y)
{
for (int x = 0; x < num_rows; ++x)
{
for (unsigned int o = 0; o < 31; ++o)
{
hog[y][x](o) = *descriptor_it++;
}
}
}
// Draw the FHOG to OpenCV format
auto fhog_vis = dlib::draw_fhog(hog);
visualisation = dlib::toMat(fhog_vis).clone();
}
}

View file

@ -0,0 +1,140 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (C) 2017, Tadas Baltrusaitis, all rights reserved.
//
// ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
//
// BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT.
// IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
//
// License can be found in OpenFace-license.txt
//
// * Any publications arising from the use of this software, including but
// not limited to academic journal and conference publications, technical
// reports and manuals, must cite at least one of the following works:
//
// OpenFace: an open source facial behavior analysis toolkit
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency
// in IEEE Winter Conference on Applications of Computer Vision, 2016
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency.
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
//
///////////////////////////////////////////////////////////////////////////////
#include "Visualizer.h"
#include "VisualizationUtils.h"
using namespace Utilities;
Visualizer::Visualizer(std::vector<std::string> arguments)
{
// By default not visualizing anything
this->vis_track = false;
this->vis_hog = false;
this->vis_align = false;
for (size_t i = 0; i < arguments.size(); ++i)
{
if (arguments[i].compare("-verbose") == 0)
{
vis_track = true;
vis_align = true;
vis_hog = true;
}
else if (arguments[i].compare("-vis-align") == 0)
{
vis_align = true;
}
else if (arguments[i].compare("-vis-hog") == 0)
{
vis_hog = true;
}
else if (arguments[i].compare("-vis-track") == 0)
{
vis_track = true;
}
}
}
Visualizer::Visualizer(bool vis_track, bool vis_hog, bool vis_align)
{
this->vis_track = vis_track;
this->vis_hog = vis_hog;
this->vis_align = vis_align;
}
void Visualizer::SetImage(const cv::Mat& canvas, float fx, float fy, float cx, float cy)
{
captured_image = canvas.clone();
this->fx = fx;
this->fy = fy;
this->cx = cx;
this->cy = cy;
}
void Visualizer::SetObservationFaceAlign(const cv::Mat& aligned_face)
{
this->aligned_face_image = aligned_face;
}
void Visualizer::SetObservationHOG(const cv::Mat_<double>& hog_descriptor, int num_cols, int num_rows)
{
Visualise_FHOG(hog_descriptor, num_rows, num_cols, this->hog_image);
}
void Visualizer::SetObservationLandmarks(const cv::Mat_<double>& landmarks_2D, double confidence, bool success, const cv::Mat_<int>& visibilities)
{
DrawLandmarkDetResults(captured_image, landmarks_2D, visibilities);
}
void Visualizer::SetObservationPose(const cv::Vec6d& pose, double confidence)
{
double visualisation_boundary = 0.4;
// Only draw if the reliability is reasonable, the value is slightly ad-hoc
if (confidence > visualisation_boundary)
{
double vis_certainty = confidence;
if (vis_certainty > 1)
vis_certainty = 1;
// Scale from 0 to 1, to allow to indicated by colour how confident we are in the tracking
vis_certainty = (vis_certainty - visualisation_boundary) / (1 - visualisation_boundary);
// A rough heuristic for box around the face width
int thickness = (int)std::ceil(2.0* ((double)captured_image.cols) / 640.0);
// Draw it in reddish if uncertain, blueish if certain
DrawBox(captured_image, pose, cv::Scalar(vis_certainty*255.0, 0, (1 - vis_certainty) * 255), thickness, fx, fy, cx, cy);
}
}
void Visualizer::SetObservationGaze(const cv::Point3f& gaze_direction0, const cv::Point3f& gaze_direction1,
const cv::Vec2d& gaze_angle, const std::vector<cv::Point2d>& eye_landmarks)
{
// TODO actual drawing
if (det_parameters.track_gaze && detection_success && face_model.eye_model)
{
GazeAnalysis::DrawGaze(captured_image, face_model, gazeDirection0, gazeDirection1, fx, fy, cx, cy);
}
}