Continuing towards visualizer integration and 3D eye landmark inclusion in output. Cleanup of rotation conversion in unilities
This commit is contained in:
parent
e636b4ca7c
commit
e1c260eee6
10 changed files with 195 additions and 61 deletions
|
@ -106,37 +106,6 @@ int frame_count = 0;
|
||||||
void visualise_tracking(cv::Mat& captured_image, const LandmarkDetector::CLNF& face_model, const LandmarkDetector::FaceModelParameters& det_parameters, cv::Point3f gazeDirection0, cv::Point3f gazeDirection1, double fx, double fy, double cx, double cy)
|
void visualise_tracking(cv::Mat& captured_image, const LandmarkDetector::CLNF& face_model, const LandmarkDetector::FaceModelParameters& det_parameters, cv::Point3f gazeDirection0, cv::Point3f gazeDirection1, double fx, double fy, double cx, double cy)
|
||||||
{
|
{
|
||||||
|
|
||||||
// Drawing the facial landmarks on the face and the bounding box around it if tracking is successful and initialised
|
|
||||||
double detection_certainty = face_model.detection_certainty;
|
|
||||||
bool detection_success = face_model.detection_success;
|
|
||||||
|
|
||||||
double visualisation_boundary = 0.4;
|
|
||||||
|
|
||||||
// Only draw if the reliability is reasonable, the value is slightly ad-hoc
|
|
||||||
if (detection_certainty > visualisation_boundary)
|
|
||||||
{
|
|
||||||
LandmarkDetector::Draw(captured_image, face_model);
|
|
||||||
|
|
||||||
double vis_certainty = detection_certainty;
|
|
||||||
if (vis_certainty > 1)
|
|
||||||
vis_certainty = 1;
|
|
||||||
|
|
||||||
// Scale from 0 to 1, to allow to indicated by colour how confident we are in the tracking
|
|
||||||
vis_certainty = (vis_certainty - visualisation_boundary) / (1 - visualisation_boundary);
|
|
||||||
|
|
||||||
// A rough heuristic for box around the face width
|
|
||||||
int thickness = (int)std::ceil(2.0* ((double)captured_image.cols) / 640.0);
|
|
||||||
|
|
||||||
cv::Vec6d pose_estimate_to_draw = LandmarkDetector::GetPose(face_model, fx, fy, cx, cy);
|
|
||||||
|
|
||||||
// Draw it in reddish if uncertain, blueish if certain
|
|
||||||
LandmarkDetector::DrawBox(captured_image, pose_estimate_to_draw, cv::Scalar(vis_certainty*255.0, 0, (1-vis_certainty) * 255), thickness, fx, fy, cx, cy);
|
|
||||||
|
|
||||||
if (det_parameters.track_gaze && detection_success && face_model.eye_model)
|
|
||||||
{
|
|
||||||
GazeAnalysis::DrawGaze(captured_image, face_model, gazeDirection0, gazeDirection1, fx, fy, cx, cy);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Work out the framerate TODO
|
// Work out the framerate TODO
|
||||||
if (frame_count % 10 == 0)
|
if (frame_count % 10 == 0)
|
||||||
|
@ -221,7 +190,7 @@ int main (int argc, char **argv)
|
||||||
cv::Mat_<double> hog_descriptor; int num_hog_rows = 0, num_hog_cols = 0;
|
cv::Mat_<double> hog_descriptor; int num_hog_rows = 0, num_hog_cols = 0;
|
||||||
|
|
||||||
// Perform AU detection and HOG feature extraction, as this can be expensive only compute it if needed by output or visualization
|
// Perform AU detection and HOG feature extraction, as this can be expensive only compute it if needed by output or visualization
|
||||||
if (recording_params.outputAlignedFaces() || recording_params.outputHOG() || recording_params.outputAUs() || visualize_align || visualize_hog)
|
if (recording_params.outputAlignedFaces() || recording_params.outputHOG() || recording_params.outputAUs() || visualizer.vis_align || visualizer.vis_hog)
|
||||||
{
|
{
|
||||||
face_analyser.AddNextFrame(captured_image, face_model.detected_landmarks, face_model.detection_success, sequence_reader.time_stamp, false);
|
face_analyser.AddNextFrame(captured_image, face_model.detected_landmarks, face_model.detection_success, sequence_reader.time_stamp, false);
|
||||||
face_analyser.GetLatestAlignedFace(sim_warped_img);
|
face_analyser.GetLatestAlignedFace(sim_warped_img);
|
||||||
|
|
|
@ -91,7 +91,7 @@
|
||||||
<WarningLevel>Level3</WarningLevel>
|
<WarningLevel>Level3</WarningLevel>
|
||||||
<Optimization>Disabled</Optimization>
|
<Optimization>Disabled</Optimization>
|
||||||
<SDLCheck>true</SDLCheck>
|
<SDLCheck>true</SDLCheck>
|
||||||
<AdditionalIncludeDirectories>./include;../LandmarkDetector/include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
<AdditionalIncludeDirectories>./include;../LandmarkDetector/include;../Utilities/include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||||
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
|
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
|
||||||
</ClCompile>
|
</ClCompile>
|
||||||
</ItemDefinitionGroup>
|
</ItemDefinitionGroup>
|
||||||
|
@ -100,7 +100,7 @@
|
||||||
<WarningLevel>Level3</WarningLevel>
|
<WarningLevel>Level3</WarningLevel>
|
||||||
<Optimization>Disabled</Optimization>
|
<Optimization>Disabled</Optimization>
|
||||||
<SDLCheck>true</SDLCheck>
|
<SDLCheck>true</SDLCheck>
|
||||||
<AdditionalIncludeDirectories>./include;../LandmarkDetector/include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
<AdditionalIncludeDirectories>./include;../LandmarkDetector/include;../Utilities/include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||||
<PreprocessorDefinitions>WIN64;_DEBUG;_LIB;EIGEN_MPL2_ONLY;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
<PreprocessorDefinitions>WIN64;_DEBUG;_LIB;EIGEN_MPL2_ONLY;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||||
<EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet>
|
<EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet>
|
||||||
</ClCompile>
|
</ClCompile>
|
||||||
|
@ -113,7 +113,7 @@
|
||||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||||
<SDLCheck>
|
<SDLCheck>
|
||||||
</SDLCheck>
|
</SDLCheck>
|
||||||
<AdditionalIncludeDirectories>./include;../LandmarkDetector/include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
<AdditionalIncludeDirectories>./include;../LandmarkDetector/include;../Utilities/include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||||
<PreprocessorDefinitions>WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
<PreprocessorDefinitions>WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||||
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
|
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
|
||||||
|
@ -131,7 +131,7 @@
|
||||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||||
<SDLCheck>
|
<SDLCheck>
|
||||||
</SDLCheck>
|
</SDLCheck>
|
||||||
<AdditionalIncludeDirectories>./include;../LandmarkDetector/include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
<AdditionalIncludeDirectories>./include;../LandmarkDetector/include;../Utilities/include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||||
<PreprocessorDefinitions>WIN64;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
<PreprocessorDefinitions>WIN64;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||||
<EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet>
|
<EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet>
|
||||||
|
|
|
@ -42,6 +42,7 @@
|
||||||
|
|
||||||
#include "LandmarkDetectorUtils.h"
|
#include "LandmarkDetectorUtils.h"
|
||||||
#include "LandmarkDetectorFunc.h"
|
#include "LandmarkDetectorFunc.h"
|
||||||
|
#include "Utilities.h"
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
|
@ -93,7 +94,7 @@ void GazeAnalysis::EstimateGaze(const LandmarkDetector::CLNF& clnf_model, cv::Po
|
||||||
{
|
{
|
||||||
cv::Vec6d headPose = LandmarkDetector::GetPose(clnf_model, fx, fy, cx, cy);
|
cv::Vec6d headPose = LandmarkDetector::GetPose(clnf_model, fx, fy, cx, cy);
|
||||||
cv::Vec3d eulerAngles(headPose(3), headPose(4), headPose(5));
|
cv::Vec3d eulerAngles(headPose(3), headPose(4), headPose(5));
|
||||||
cv::Matx33d rotMat = LandmarkDetector::Euler2RotationMatrix(eulerAngles);
|
cv::Matx33d rotMat = Utilities::Euler2RotationMatrix(eulerAngles);
|
||||||
|
|
||||||
int part = -1;
|
int part = -1;
|
||||||
for (size_t i = 0; i < clnf_model.hierarchical_models.size(); ++i)
|
for (size_t i = 0; i < clnf_model.hierarchical_models.size(); ++i)
|
||||||
|
|
|
@ -51,10 +51,6 @@ using namespace std;
|
||||||
namespace LandmarkDetector
|
namespace LandmarkDetector
|
||||||
{
|
{
|
||||||
|
|
||||||
// For subpixel accuracy drawing
|
|
||||||
const int draw_shiftbits = 4;
|
|
||||||
const int draw_multiplier = 1 << 4;
|
|
||||||
|
|
||||||
|
|
||||||
// Useful utility for creating directories for storing the output files
|
// Useful utility for creating directories for storing the output files
|
||||||
void create_directory_from_file(string output_path)
|
void create_directory_from_file(string output_path)
|
||||||
|
@ -802,6 +798,32 @@ vector<cv::Point2d> CalculateVisibleEyeLandmarks(const CLNF& clnf_model)
|
||||||
return to_return;
|
return to_return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Computing the 3D eye landmarks
|
||||||
|
vector<cv::Point3d> Calculate3DEyeLandmarks(const CLNF& clnf_model, double fx, double fy, double cx, double cy)
|
||||||
|
{
|
||||||
|
|
||||||
|
vector<cv::Point3d> to_return;
|
||||||
|
// If the model has hierarchical updates draw those too
|
||||||
|
for (size_t i = 0; i < clnf_model.hierarchical_models.size(); ++i)
|
||||||
|
{
|
||||||
|
|
||||||
|
if (clnf_model.hierarchical_model_names[i].compare("left_eye_28") == 0 ||
|
||||||
|
clnf_model.hierarchical_model_names[i].compare("right_eye_28") == 0)
|
||||||
|
{
|
||||||
|
|
||||||
|
auto lmks = clnf_model.hierarchical_models[i].GetShape(fx, fy, cx, cy);
|
||||||
|
|
||||||
|
int num_landmarks = lmks.rows / 3;
|
||||||
|
|
||||||
|
for (int lmk = 0; lmk < num_landmarks; ++lmk)
|
||||||
|
{
|
||||||
|
cv::Point3d curr_lmk(lmks.at<double>(lmk), lmks.at<double>(lmk + num_landmarks), lmks.at<double>(lmk + 2 * num_landmarks));
|
||||||
|
to_return.push_back(curr_lmk);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return to_return;
|
||||||
|
}
|
||||||
// Computing eye landmarks (to be drawn later or in different interfaces)
|
// Computing eye landmarks (to be drawn later or in different interfaces)
|
||||||
vector<cv::Point2d> CalculateAllEyeLandmarks(const CLNF& clnf_model)
|
vector<cv::Point2d> CalculateAllEyeLandmarks(const CLNF& clnf_model)
|
||||||
{
|
{
|
||||||
|
|
|
@ -65,7 +65,7 @@ namespace Utilities
|
||||||
|
|
||||||
void WriteLine(int observation_count, double time_stamp, bool landmark_detection_success, double landmark_confidence,
|
void WriteLine(int observation_count, double time_stamp, bool landmark_detection_success, double landmark_confidence,
|
||||||
const cv::Mat_<double>& landmarks_2D, const cv::Mat_<double>& landmarks_3D, const cv::Mat_<double>& pdm_model_params, const cv::Vec6d& rigid_shape_params, cv::Vec6d& pose_estimate,
|
const cv::Mat_<double>& landmarks_2D, const cv::Mat_<double>& landmarks_3D, const cv::Mat_<double>& pdm_model_params, const cv::Vec6d& rigid_shape_params, cv::Vec6d& pose_estimate,
|
||||||
const cv::Point3f& gazeDirection0, const cv::Point3f& gazeDirection1, const cv::Vec2d& gaze_angle, const std::vector<cv::Point2d>& eye_landmarks,
|
const cv::Point3f& gazeDirection0, const cv::Point3f& gazeDirection1, const cv::Vec2d& gaze_angle, const std::vector<cv::Point2d>& eye_landmarks2d, const std::vector<cv::Point3d>& eye_landmarks3d,
|
||||||
const std::vector<std::pair<std::string, double> >& au_intensities, const std::vector<std::pair<std::string, double> >& au_occurences);
|
const std::vector<std::pair<std::string, double> >& au_intensities, const std::vector<std::pair<std::string, double> >& au_occurences);
|
||||||
|
|
||||||
// TODO have set functions?
|
// TODO have set functions?
|
||||||
|
|
|
@ -84,7 +84,7 @@ namespace Utilities
|
||||||
|
|
||||||
// Gaze related observations
|
// Gaze related observations
|
||||||
void SetObservationGaze(const cv::Point3f& gazeDirection0, const cv::Point3f& gazeDirection1,
|
void SetObservationGaze(const cv::Point3f& gazeDirection0, const cv::Point3f& gazeDirection1,
|
||||||
const cv::Vec2d& gaze_angle, const std::vector<cv::Point2d>& eye_landmarks);
|
const cv::Vec2d& gaze_angle, const std::vector<cv::Point2d>& eye_landmarks2D, const std::vector<cv::Point3d>& eye_landmarks3D);
|
||||||
|
|
||||||
// Face alignment related observations
|
// Face alignment related observations
|
||||||
void SetObservationFaceAlign(const cv::Mat& aligned_face);
|
void SetObservationFaceAlign(const cv::Mat& aligned_face);
|
||||||
|
@ -135,7 +135,8 @@ namespace Utilities
|
||||||
cv::Point3f gaze_direction0;
|
cv::Point3f gaze_direction0;
|
||||||
cv::Point3f gaze_direction1;
|
cv::Point3f gaze_direction1;
|
||||||
cv::Vec2d gaze_angle;
|
cv::Vec2d gaze_angle;
|
||||||
std::vector<cv::Point2d> eye_landmarks;
|
std::vector<cv::Point2d> eye_landmarks2D;
|
||||||
|
std::vector<cv::Point3d> eye_landmarks3D;
|
||||||
|
|
||||||
int observation_count;
|
int observation_count;
|
||||||
|
|
||||||
|
|
|
@ -72,7 +72,7 @@ namespace Utilities
|
||||||
void SetObservationPose(const cv::Vec6d& pose, double confidence);
|
void SetObservationPose(const cv::Vec6d& pose, double confidence);
|
||||||
|
|
||||||
// Gaze related observations
|
// Gaze related observations
|
||||||
void SetObservationGaze(const cv::Point3f& gazeDirection0, const cv::Point3f& gazeDirection1, const cv::Vec2d& gaze_angle, const std::vector<cv::Point2d>& eye_landmarks);
|
void SetObservationGaze(const cv::Point3f& gazeDirection0, const cv::Point3f& gazeDirection1, const cv::Vec2d& gaze_angle, const std::vector<cv::Point2d>& eye_landmarks, const std::vector<cv::Point3d>& eye_landmarks3d);
|
||||||
|
|
||||||
// Face alignment related observations
|
// Face alignment related observations
|
||||||
void SetObservationFaceAlign(const cv::Mat& aligned_face);
|
void SetObservationFaceAlign(const cv::Mat& aligned_face);
|
||||||
|
@ -84,13 +84,13 @@ namespace Utilities
|
||||||
|
|
||||||
cv::Mat GetVisImage();
|
cv::Mat GetVisImage();
|
||||||
|
|
||||||
private:
|
|
||||||
|
|
||||||
// Keeping track of what we're visualizing
|
// Keeping track of what we're visualizing
|
||||||
bool vis_track;
|
bool vis_track;
|
||||||
bool vis_hog;
|
bool vis_hog;
|
||||||
bool vis_align;
|
bool vis_align;
|
||||||
|
|
||||||
|
private:
|
||||||
|
|
||||||
// Temporary variables for visualization
|
// Temporary variables for visualization
|
||||||
cv::Mat captured_image; // out canvas
|
cv::Mat captured_image; // out canvas
|
||||||
cv::Mat tracked_image;
|
cv::Mat tracked_image;
|
||||||
|
|
|
@ -91,6 +91,19 @@ bool RecorderCSV::Open(std::string output_file_name, bool is_sequence, bool outp
|
||||||
{
|
{
|
||||||
output_file << ", eye_lmk_y_" << i;
|
output_file << ", eye_lmk_y_" << i;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (int i = 0; i < num_eye_landmarks; ++i)
|
||||||
|
{
|
||||||
|
output_file << ", eye_lmk_X_" << i;
|
||||||
|
}
|
||||||
|
for (int i = 0; i < num_eye_landmarks; ++i)
|
||||||
|
{
|
||||||
|
output_file << ", eye_lmk_Y_" << i;
|
||||||
|
}
|
||||||
|
for (int i = 0; i < num_eye_landmarks; ++i)
|
||||||
|
{
|
||||||
|
output_file << ", eye_lmk_Z_" << i;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (output_pose)
|
if (output_pose)
|
||||||
|
@ -160,7 +173,7 @@ bool RecorderCSV::Open(std::string output_file_name, bool is_sequence, bool outp
|
||||||
// TODO check if the stream is open
|
// TODO check if the stream is open
|
||||||
void RecorderCSV::WriteLine(int observation_count, double time_stamp, bool landmark_detection_success, double landmark_confidence,
|
void RecorderCSV::WriteLine(int observation_count, double time_stamp, bool landmark_detection_success, double landmark_confidence,
|
||||||
const cv::Mat_<double>& landmarks_2D, const cv::Mat_<double>& landmarks_3D, const cv::Mat_<double>& pdm_model_params, const cv::Vec6d& rigid_shape_params, cv::Vec6d& pose_estimate,
|
const cv::Mat_<double>& landmarks_2D, const cv::Mat_<double>& landmarks_3D, const cv::Mat_<double>& pdm_model_params, const cv::Vec6d& rigid_shape_params, cv::Vec6d& pose_estimate,
|
||||||
const cv::Point3f& gazeDirection0, const cv::Point3f& gazeDirection1, const cv::Vec2d& gaze_angle, const std::vector<cv::Point2d>& eye_landmarks,
|
const cv::Point3f& gazeDirection0, const cv::Point3f& gazeDirection1, const cv::Vec2d& gaze_angle, const std::vector<cv::Point2d>& eye_landmarks2d, const std::vector<cv::Point3d>& eye_landmarks3d,
|
||||||
const std::vector<std::pair<std::string, double> >& au_intensities, const std::vector<std::pair<std::string, double> >& au_occurences)
|
const std::vector<std::pair<std::string, double> >& au_intensities, const std::vector<std::pair<std::string, double> >& au_occurences)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -188,15 +201,31 @@ void RecorderCSV::WriteLine(int observation_count, double time_stamp, bool landm
|
||||||
output_file << ", " << gaze_angle[0] << ", " << gaze_angle[1];
|
output_file << ", " << gaze_angle[0] << ", " << gaze_angle[1];
|
||||||
|
|
||||||
// Output the 2D eye landmarks
|
// Output the 2D eye landmarks
|
||||||
for (auto eye_lmk : eye_landmarks)
|
for (auto eye_lmk : eye_landmarks2d)
|
||||||
{
|
{
|
||||||
output_file << ", " << eye_lmk.x;
|
output_file << ", " << eye_lmk.x;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (auto eye_lmk : eye_landmarks)
|
for (auto eye_lmk : eye_landmarks2d)
|
||||||
{
|
{
|
||||||
output_file << ", " << eye_lmk.y;
|
output_file << ", " << eye_lmk.y;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Output the 3D eye landmarks
|
||||||
|
for (auto eye_lmk : eye_landmarks3d)
|
||||||
|
{
|
||||||
|
output_file << ", " << eye_lmk.x;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (auto eye_lmk : eye_landmarks3d)
|
||||||
|
{
|
||||||
|
output_file << ", " << eye_lmk.y;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (auto eye_lmk : eye_landmarks3d)
|
||||||
|
{
|
||||||
|
output_file << ", " << eye_lmk.z;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Output the estimated head pose
|
// Output the estimated head pose
|
||||||
|
|
|
@ -259,12 +259,13 @@ void RecorderOpenFace::SetObservationActionUnits(const std::vector<std::pair<std
|
||||||
}
|
}
|
||||||
|
|
||||||
void RecorderOpenFace::SetObservationGaze(const cv::Point3f& gaze_direction0, const cv::Point3f& gaze_direction1,
|
void RecorderOpenFace::SetObservationGaze(const cv::Point3f& gaze_direction0, const cv::Point3f& gaze_direction1,
|
||||||
const cv::Vec2d& gaze_angle, const std::vector<cv::Point2d>& eye_landmarks)
|
const cv::Vec2d& gaze_angle, const std::vector<cv::Point2d>& eye_landmarks2D, const std::vector<cv::Point3d>& eye_landmarks3D)
|
||||||
{
|
{
|
||||||
this->gaze_direction0 = gaze_direction0;
|
this->gaze_direction0 = gaze_direction0;
|
||||||
this->gaze_direction1 = gaze_direction1;
|
this->gaze_direction1 = gaze_direction1;
|
||||||
this->gaze_angle = gaze_angle;
|
this->gaze_angle = gaze_angle;
|
||||||
this->eye_landmarks = eye_landmarks;
|
this->eye_landmarks2D = eye_landmarks2D;
|
||||||
|
this->eye_landmarks3D = eye_landmarks3D;
|
||||||
}
|
}
|
||||||
|
|
||||||
RecorderOpenFace::~RecorderOpenFace()
|
RecorderOpenFace::~RecorderOpenFace()
|
||||||
|
|
|
@ -34,8 +34,15 @@
|
||||||
#include "Visualizer.h"
|
#include "Visualizer.h"
|
||||||
#include "VisualizationUtils.h"
|
#include "VisualizationUtils.h"
|
||||||
|
|
||||||
|
// For drawing on images
|
||||||
|
#include <opencv2/imgproc.hpp>
|
||||||
|
|
||||||
using namespace Utilities;
|
using namespace Utilities;
|
||||||
|
|
||||||
|
// For subpixel accuracy drawing
|
||||||
|
const int draw_shiftbits = 4;
|
||||||
|
const int draw_multiplier = 1 << 4;
|
||||||
|
|
||||||
Visualizer::Visualizer(std::vector<std::string> arguments)
|
Visualizer::Visualizer(std::vector<std::string> arguments)
|
||||||
{
|
{
|
||||||
// By default not visualizing anything
|
// By default not visualizing anything
|
||||||
|
@ -97,7 +104,26 @@ void Visualizer::SetObservationHOG(const cv::Mat_<double>& hog_descriptor, int n
|
||||||
|
|
||||||
void Visualizer::SetObservationLandmarks(const cv::Mat_<double>& landmarks_2D, double confidence, bool success, const cv::Mat_<int>& visibilities)
|
void Visualizer::SetObservationLandmarks(const cv::Mat_<double>& landmarks_2D, double confidence, bool success, const cv::Mat_<int>& visibilities)
|
||||||
{
|
{
|
||||||
DrawLandmarkDetResults(captured_image, landmarks_2D, visibilities);
|
|
||||||
|
// Draw 2D landmarks on the image
|
||||||
|
int n = landmarks_2D.rows / 2;
|
||||||
|
|
||||||
|
// Drawing feature points
|
||||||
|
for (int i = 0; i < n; ++i)
|
||||||
|
{
|
||||||
|
if (visibilities.empty() || visibilities.at<int>(i))
|
||||||
|
{
|
||||||
|
cv::Point featurePoint(cvRound(landmarks_2D.at<double>(i) * (double)draw_multiplier), cvRound(landmarks_2D.at<double>(i + n) * (double)draw_multiplier));
|
||||||
|
|
||||||
|
// A rough heuristic for drawn point size
|
||||||
|
int thickness = (int)std::ceil(3.0* ((double)captured_image.cols) / 640.0);
|
||||||
|
int thickness_2 = (int)std::ceil(1.0* ((double)captured_image.cols) / 640.0);
|
||||||
|
|
||||||
|
cv::circle(captured_image, featurePoint, 1 * draw_multiplier, cv::Scalar(0, 0, 255), thickness, CV_AA, draw_shiftbits);
|
||||||
|
cv::circle(captured_image, featurePoint, 1 * draw_multiplier, cv::Scalar(255, 0, 0), thickness_2, CV_AA, draw_shiftbits);
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Visualizer::SetObservationPose(const cv::Vec6d& pose, double confidence)
|
void Visualizer::SetObservationPose(const cv::Vec6d& pose, double confidence)
|
||||||
|
@ -123,18 +149,103 @@ void Visualizer::SetObservationPose(const cv::Vec6d& pose, double confidence)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO add 3D eye landmark locations
|
||||||
void Visualizer::SetObservationGaze(const cv::Point3f& gaze_direction0, const cv::Point3f& gaze_direction1,
|
void Visualizer::SetObservationGaze(const cv::Point3f& gaze_direction0, const cv::Point3f& gaze_direction1, const cv::Vec2d& gaze_angle, const std::vector<cv::Point2d>& eye_landmarks2d, const std::vector<cv::Point3d>& eye_landmarks3d)
|
||||||
const cv::Vec2d& gaze_angle, const std::vector<cv::Point2d>& eye_landmarks)
|
|
||||||
{
|
{
|
||||||
// TODO actual drawing
|
// TODO actual drawing, first of eye landmarks then of gaze
|
||||||
|
|
||||||
if (det_parameters.track_gaze && detection_success && face_model.eye_model)
|
if (eye_landmarks.size() > 0)
|
||||||
{
|
{
|
||||||
GazeAnalysis::DrawGaze(captured_image, face_model, gazeDirection0, gazeDirection1, fx, fy, cx, cy);
|
// FIrst draw the eye region landmarks
|
||||||
|
for (size_t i = 0; i < eye_landmarks.size(); ++i)
|
||||||
|
{
|
||||||
|
cv::Point featurePoint(cvRound(eye_landmarks[i].x * (double)draw_multiplier), eye_landmarks[i].y * (double)draw_multiplier));
|
||||||
|
|
||||||
|
// A rough heuristic for drawn point size
|
||||||
|
int thickness = 1.0;
|
||||||
|
int thickness_2 = 1.0;
|
||||||
|
|
||||||
|
int next_point = i + 1;
|
||||||
|
if (i == 7)
|
||||||
|
next_point = 0;
|
||||||
|
if (i == 19)
|
||||||
|
next_point = 8;
|
||||||
|
if (i == 27)
|
||||||
|
next_point = 20;
|
||||||
|
|
||||||
|
cv::Point nextFeaturePoint(cvRound(eye_landmarks[next_point].x * (double)draw_multiplier), cvRound(eye_landmarks[next_point].y * (double)draw_multiplier));
|
||||||
|
if (i < 8 || i > 19)
|
||||||
|
cv::line(captured_image, featurePoint, nextFeaturePoint, cv::Scalar(255, 0, 0), thickness_2, CV_AA, draw_shiftbits);
|
||||||
|
else
|
||||||
|
cv::line(captured_image, featurePoint, nextFeaturePoint, cv::Scalar(0, 0, 255), thickness_2, CV_AA, draw_shiftbits);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now draw the gaze lines themselves
|
||||||
|
cv::Mat cameraMat = (cv::Mat_<double>(3, 3) << fx, 0, cx, 0, fy, cy, 0, 0, 0);
|
||||||
|
|
||||||
|
int part_left = -1;
|
||||||
|
int part_right = -1;
|
||||||
|
for (size_t i = 0; i < clnf_model.hierarchical_models.size(); ++i)
|
||||||
|
{
|
||||||
|
if (clnf_model.hierarchical_model_names[i].compare("left_eye_28") == 0)
|
||||||
|
{
|
||||||
|
part_left = i;
|
||||||
|
}
|
||||||
|
if (clnf_model.hierarchical_model_names[i].compare("right_eye_28") == 0)
|
||||||
|
{
|
||||||
|
part_right = i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cv::Mat eyeLdmks3d_left = clnf_model.hierarchical_models[part_left].GetShape(fx, fy, cx, cy);
|
||||||
|
cv::Point3f pupil_left = GetPupilPosition(eyeLdmks3d_left);
|
||||||
|
|
||||||
|
cv::Mat_<double> irisLdmks3d_left = eyeLdmks3d_left.rowRange(0, 8);
|
||||||
|
cv::Point3f pupil_left(cv::mean(irisLdmks3d_left.col(0))[0], cv::mean(irisLdmks3d_left.col(1))[0], cv::mean(irisLdmks3d_left.col(2))[0]);
|
||||||
|
|
||||||
|
cv::Mat eyeLdmks3d_right = clnf_model.hierarchical_models[part_right].GetShape(fx, fy, cx, cy);
|
||||||
|
cv::Point3f pupil_right = GetPupilPosition(eyeLdmks3d_right);
|
||||||
|
|
||||||
|
std::vector<cv::Point3d> points_left;
|
||||||
|
points_left.push_back(cv::Point3d(pupil_left));
|
||||||
|
points_left.push_back(cv::Point3d(pupil_left + gaze_direction0*50.0));
|
||||||
|
|
||||||
|
std::vector<cv::Point3d> points_right;
|
||||||
|
points_right.push_back(cv::Point3d(pupil_right));
|
||||||
|
points_right.push_back(cv::Point3d(pupil_right + gaze_direction1*50.0));
|
||||||
|
|
||||||
|
cv::Mat_<double> proj_points;
|
||||||
|
cv::Mat_<double> mesh_0 = (cv::Mat_<double>(2, 3) << points_left[0].x, points_left[0].y, points_left[0].z, points_left[1].x, points_left[1].y, points_left[1].z);
|
||||||
|
Project(proj_points, mesh_0, fx, fy, cx, cy);
|
||||||
|
cv::line(captured_image, cv::Point(cvRound(proj_points.at<double>(0, 0) * (double)draw_multiplier), cvRound(proj_points.at<double>(0, 1) * (double)draw_multiplier)),
|
||||||
|
cv::Point(cvRound(proj_points.at<double>(1, 0) * (double)draw_multiplier), cvRound(proj_points.at<double>(1, 1) * (double)draw_multiplier)), cv::Scalar(110, 220, 0), 2, CV_AA, draw_shiftbits);
|
||||||
|
|
||||||
|
cv::Mat_<double> mesh_1 = (cv::Mat_<double>(2, 3) << points_right[0].x, points_right[0].y, points_right[0].z, points_right[1].x, points_right[1].y, points_right[1].z);
|
||||||
|
Project(proj_points, mesh_1, fx, fy, cx, cy);
|
||||||
|
cv::line(captured_image, cv::Point(cvRound(proj_points.at<double>(0, 0) * (double)draw_multiplier), cvRound(proj_points.at<double>(0, 1) * (double)draw_multiplier)),
|
||||||
|
cv::Point(cvRound(proj_points.at<double>(1, 0) * (double)draw_multiplier), cvRound(proj_points.at<double>(1, 1) * (double)draw_multiplier)), cv::Scalar(110, 220, 0), 2, CV_AA, draw_shiftbits);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void Visualizer::ShowObservation()
|
||||||
|
{
|
||||||
|
if (vis_track)
|
||||||
|
{
|
||||||
|
cv::namedWindow("tracking_result", 1);
|
||||||
|
cv::imshow("tracking_result", captured_image);
|
||||||
|
cv::waitKey(1);
|
||||||
|
}
|
||||||
|
if (vis_align)
|
||||||
|
{
|
||||||
|
cv::imshow("sim_warp", aligned_face_image);
|
||||||
|
}
|
||||||
|
if (vis_hog)
|
||||||
|
{
|
||||||
|
cv::imshow("hog", hog_image);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue