Finishing HOG recording, starting to integrate a visualizer, so that output videos/images are consistent in C++ and C#

This commit is contained in:
Tadas Baltrusaitis 2018-01-19 08:58:37 +00:00
parent d6654dec55
commit a84eea57c8
6 changed files with 176 additions and 9 deletions

View file

@ -108,7 +108,10 @@ namespace OpenFaceOffline
FaceAnalyserManaged face_analyser; FaceAnalyserManaged face_analyser;
GazeAnalyserManaged gaze_analyser; GazeAnalyserManaged gaze_analyser;
// Recording parameters (default values) // For visualization of results
Visualizer visualizer_of;
// For output recording
Recorder recorder; Recorder recorder;
public bool RecordAligned { get; set; } = false; // Aligned face images public bool RecordAligned { get; set; } = false; // Aligned face images
@ -300,12 +303,12 @@ namespace OpenFaceOffline
// Predic eye gaze // Predic eye gaze
gaze_analyser.AddNextFrame(clnf_model, detectionSucceeding, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy()); gaze_analyser.AddNextFrame(clnf_model, detectionSucceeding, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy());
// Only the final face will contain the details
VisualizeFeatures(frame, landmarks, i == 0, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy(), progress);
// Record an observation // Record an observation
RecordObservation(recorder, detectionSucceeding, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy()); RecordObservation(recorder, detectionSucceeding, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy());
// Only the final face will contain the details
VisualizeFeatures(frame, landmarks, i==0, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy(), progress);
} }
latest_img = null; latest_img = null;
@ -468,6 +471,7 @@ namespace OpenFaceOffline
private void VisualizeFeatures(RawImage frame, List<Tuple<double, double>> landmarks, bool new_image, double fx, double fy, double cx, double cy, double progress) private void VisualizeFeatures(RawImage frame, List<Tuple<double, double>> landmarks, bool new_image, double fx, double fy, double cx, double cy, double progress)
{ {
List<Tuple<Point, Point>> lines = null; List<Tuple<Point, Point>> lines = null;
List<Tuple<double, double>> eye_landmarks = null; List<Tuple<double, double>> eye_landmarks = null;
List<Tuple<Point, Point>> gaze_lines = null; List<Tuple<Point, Point>> gaze_lines = null;
@ -486,6 +490,14 @@ namespace OpenFaceOffline
double scale = 0; double scale = 0;
// Helps with recording and showing the visualizations
///visualizer.SetObservationFaceAlign(sim_warped_img);
//visualizer.SetObservationHOG(hog_descriptor, num_hog_rows, num_hog_cols);
//visualizer.SetObservationLandmarks(face_model.detected_landmarks, 1.0, face_model.detection_success); // Set confidence to high to make sure we always visualize
//visualizer.SetObservationPose(pose_estimate, 1.0);
//visualizer.SetObservationGaze(gaze_direction0, gaze_direction1, LandmarkDetector::CalculateAllEyeLandmarks(face_model), LandmarkDetector::Calculate3DEyeLandmarks(face_model, image_reader.fx, image_reader.fy, image_reader.cx, image_reader.cy), face_model.detection_certainty);
if (detectionSucceeding) if (detectionSucceeding)
{ {

View file

@ -186,6 +186,7 @@
<ClInclude Include="FaceDetectorInterop.h" /> <ClInclude Include="FaceDetectorInterop.h" />
<ClInclude Include="OpenCVWrappers.h" /> <ClInclude Include="OpenCVWrappers.h" />
<ClInclude Include="RecorderInterop.h" /> <ClInclude Include="RecorderInterop.h" />
<ClInclude Include="VisualizerInterop.h" />
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<ProjectReference Include="..\..\3rdParty\dlib\dlib.vcxproj"> <ProjectReference Include="..\..\3rdParty\dlib\dlib.vcxproj">

View file

@ -47,5 +47,8 @@
<ClInclude Include="RecorderInterop.h"> <ClInclude Include="RecorderInterop.h">
<Filter>Header Files</Filter> <Filter>Header Files</Filter>
</ClInclude> </ClInclude>
<ClInclude Include="VisualizerInterop.h">
<Filter>Header Files</Filter>
</ClInclude>
</ItemGroup> </ItemGroup>
</Project> </Project>

View file

@ -40,4 +40,5 @@
#include "CameraInterop.h" #include "CameraInterop.h"
#include "ImageReader.h" #include "ImageReader.h"
#include "FaceDetectorInterop.h" #include "FaceDetectorInterop.h"
#include "RecorderInterop.h" #include "RecorderInterop.h"
#include "VisualizerInterop.h"

View file

@ -335,14 +335,28 @@ public:
return HOG_vis_image; return HOG_vis_image;
} }
OpenCVWrappers::RawImage^ GetLatestHOGFeature(System::Int32^ num_rows, System::Int32^ num_cols, System::Int32^ num_channels) { OpenCVWrappers::RawImage^ GetLatestHOGFeature() {
num_rows = gcnew System::Int32(*this->num_rows);
num_cols = gcnew System::Int32(*this->num_cols);
num_channels = gcnew System::Int32(31);
OpenCVWrappers::RawImage^ HOG_feature = gcnew OpenCVWrappers::RawImage(*hog_features); OpenCVWrappers::RawImage^ HOG_feature = gcnew OpenCVWrappers::RawImage(*hog_features);
return HOG_feature; return HOG_feature;
} }
// As the number of HOG rows and columns might not be known in advance, have methods for querying them
int GetHOGRows()
{
return *num_rows;
}
int GetHOGCols()
{
return *num_cols;
}
// The number of channels is always the same
int GetHOGChannels()
{
return 31;
}
void Reset() void Reset()
{ {
face_analyser->Reset(); face_analyser->Reset();

View file

@ -0,0 +1,136 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (C) 2017, Tadas Baltrusaitis.
//
// ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
//
// BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT.
// IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
//
// License can be found in OpenFace-license.txt
// * Any publications arising from the use of this software, including but
// not limited to academic journal and conference publications, technical
// reports and manuals, must cite at least one of the following works:
//
// OpenFace: an open source facial behavior analysis toolkit
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency
// in IEEE Winter Conference on Applications of Computer Vision, 2016
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency.
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
//
///////////////////////////////////////////////////////////////////////////////
#pragma once
#pragma unmanaged
// Include all the unmanaged things we need.
#include "Visualizer.h"
#pragma managed
using System::Collections::Generic::List;
namespace UtilitiesOF {
public ref class Visualizer
{
private:
// OpenCV based video capture for reading from files
Utilities::Visualizer* m_visualizer;
public:
Visualizer(bool vis_track, bool vis_hog, bool vis_aligned)
{
m_visualizer = new Utilities::Visualizer(vis_track, vis_hog, vis_aligned);
}
void SetObservationGaze(System::Tuple<double, double, double>^ gaze_direction0, System::Tuple<double, double, double>^ gaze_direction1,
List<System::Tuple<double, double>^>^ landmarks_2D, List<System::Tuple<double, double, double>^>^ landmarks_3D,
double confidence)
{
cv::Point3f gaze_direction0_cv(gaze_direction0->Item1, gaze_direction0->Item2, gaze_direction0->Item3);
cv::Point3f gaze_direction1_cv(gaze_direction1->Item1, gaze_direction1->Item2, gaze_direction1->Item3);
// Construct an OpenCV matrix from the landmarks
std::vector<cv::Point2d> landmarks_2D_cv;
for (int i = 0; i < landmarks_2D->Count; ++i)
{
landmarks_2D_cv.push_back(cv::Point2d(landmarks_2D[i]->Item1, landmarks_2D[i]->Item2));
}
// Construct an OpenCV matrix from the landmarks
std::vector<cv::Point3d> landmarks_3D_cv;
for (int i = 0; i < landmarks_3D->Count; ++i)
{
landmarks_3D_cv.push_back(cv::Point3d(landmarks_3D[i]->Item1, landmarks_3D[i]->Item2, landmarks_3D[i]->Item3));
}
m_visualizer->SetObservationGaze(gaze_direction0_cv, gaze_direction1_cv, landmarks_2D_cv, landmarks_3D_cv, confidence);
}
// Setting the observations
void SetObservationPose(List<double>^ pose, double confidence)
{
cv::Vec6d pose_vec(pose[0], pose[1], pose[2], pose[3], pose[4], pose[5]);
m_visualizer->SetObservationPose(pose_vec, confidence);
}
void SetObservationFaceAlign(OpenCVWrappers::RawImage^ aligned_face_image)
{
m_visualizer->SetObservationFaceAlign(aligned_face_image->Mat);
}
void SetObservationHOG(bool success, OpenCVWrappers::RawImage^ observation_HOG, int num_cols, int num_rows)
{
m_visualizer->SetObservationHOG(observation_HOG->Mat, num_cols, num_rows);
}
void SetObservationLandmarks(List<System::Tuple<double, double>^>^ landmarks_2D, double confidence, bool success)
{
// Construct an OpenCV matrix from the landmarks
cv::Mat_<double> landmarks_2D_mat(landmarks_2D->Count * 2, 1, 0.0);
for (int i = 0; i < landmarks_2D->Count; ++i)
{
landmarks_2D_mat.at<double>(i, 0) = landmarks_2D[i]->Item1;
landmarks_2D_mat.at<double>(i + landmarks_2D->Count, 0) = landmarks_2D[i]->Item2;
}
// TODO add visibilities
m_visualizer->SetObservationLandmarks(landmarks_2D_mat, confidence, success);
}
// Finalizer. Definitely called before Garbage Collection,
// but not automatically called on explicit Dispose().
// May be called multiple times.
!Visualizer()
{
// Automatically closes capture object before freeing memory.
if (m_visualizer != nullptr)
{
delete m_visualizer;
}
}
// Destructor. Called on explicit Dispose() only.
~Visualizer()
{
this->!Visualizer();
}
};
}