sustaining_gazes/lib/local/LandmarkDetector/include/LandmarkDetectorUtils.h

173 lines
11 KiB
C
Raw Normal View History

2016-04-28 19:40:36 +00:00
///////////////////////////////////////////////////////////////////////////////
// Copyright (C) 2016, Carnegie Mellon University and University of Cambridge,
// all rights reserved.
//
// THIS SOFTWARE IS PROVIDED <20>AS IS<49> FOR ACADEMIC USE ONLY AND ANY EXPRESS
// OR IMPLIED WARRANTIES WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS
// BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY.
// OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Notwithstanding the license granted herein, Licensee acknowledges that certain components
// of the Software may be covered by so-called <20>open source<63> software licenses (<28>Open Source
// Components<74>), which means any software licenses approved as open source licenses by the
// Open Source Initiative or any substantially similar licenses, including without limitation any
// license that, as a condition of distribution of the software licensed under such license,
// requires that the distributor make the software available in source code format. Licensor shall
// provide a list of Open Source Components for a particular version of the Software upon
// Licensee<65>s request. Licensee will comply with the applicable terms of such licenses and to
// the extent required by the licenses covering Open Source Components, the terms of such
// licenses will apply in lieu of the terms of this Agreement. To the extent the terms of the
// licenses applicable to Open Source Components prohibit any of the restrictions in this
// License Agreement with respect to such Open Source Component, such restrictions will not
// apply to such Open Source Component. To the extent the terms of the licenses applicable to
// Open Source Components require Licensor to make an offer to provide source code or
// related information in connection with the Software, such offer is hereby made. Any request
// for source code or related information should be directed to cl-face-tracker-distribution@lists.cam.ac.uk
// Licensee acknowledges receipt of notices for the Open Source Components for the initial
// delivery of the Software.
// * Any publications arising from the use of this software, including but
// not limited to academic journal and conference publications, technical
// reports and manuals, must cite at least one of the following works:
//
// OpenFace: an open source facial behavior analysis toolkit
// Tadas Baltru<72>aitis, Peter Robinson, and Louis-Philippe Morency
// in IEEE Winter Conference on Applications of Computer Vision, 2016
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltru<72>aitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
// Tadas Baltru<72>aitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
// Tadas Baltru<72>aitis, Peter Robinson, and Louis-Philippe Morency.
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
//
///////////////////////////////////////////////////////////////////////////////
// Header for all external CLNF/CLM-Z/CLM methods of interest to the user
#ifndef __LANDMARK_DETECTOR_UTILS_h_
#define __LANDMARK_DETECTOR_UTILS_h_
// OpenCV includes
#include <opencv2/core/core.hpp>
#include "LandmarkDetectorModel.h"
using namespace std;
namespace LandmarkDetector
{
//===========================================================================
// Defining a set of useful utility functions to be used within CLNF
//=============================================================================================
// Helper functions for parsing the inputs
//=============================================================================================
void get_video_input_output_params(vector<string> &input_video_file, vector<string> &depth_dir, vector<string> &output_files,
2016-11-23 22:17:26 +00:00
vector<string> &output_video_files, string &output_codec, vector<string> &arguments);
2016-04-28 19:40:36 +00:00
void get_camera_params(int &device, float &fx, float &fy, float &cx, float &cy, vector<string> &arguments);
void get_image_input_output_params(vector<string> &input_image_files, vector<string> &input_depth_files, vector<string> &output_feature_files, vector<string> &output_pose_files, vector<string> &output_image_files,
vector<cv::Rect_<double>> &input_bounding_boxes, vector<string> &arguments);
//===========================================================================
// Fast patch expert response computation (linear model across a ROI) using normalised cross-correlation
//===========================================================================
// This is a modified version of openCV code that allows for precomputed dfts of templates and for precomputed dfts of an image
// _img is the input img, _img_dft it's dft (optional), _integral_img the images integral image (optional), squared integral image (optional),
// templ is the template we are convolving with, templ_dfts it's dfts at varying windows sizes (optional), _result - the output, method the type of convolution
void matchTemplate_m( const cv::Mat_<float>& input_img, cv::Mat_<double>& img_dft, cv::Mat& _integral_img, cv::Mat& _integral_img_sq, const cv::Mat_<float>& templ, map<int, cv::Mat_<double> >& templ_dfts, cv::Mat_<float>& result, int method );
//===========================================================================
// Point set and landmark manipulation functions
//===========================================================================
// Using Kabsch's algorithm for aligning shapes
//This assumes that align_from and align_to are already mean normalised
cv::Matx22d AlignShapesKabsch2D(const cv::Mat_<double>& align_from, const cv::Mat_<double>& align_to );
//=============================================================================
// Basically Kabsch's algorithm but also allows the collection of points to be different in scale from each other
cv::Matx22d AlignShapesWithScale(cv::Mat_<double>& src, cv::Mat_<double> dst);
//===========================================================================
// Visualisation functions
//===========================================================================
void Project(cv::Mat_<double>& dest, const cv::Mat_<double>& mesh, double fx, double fy, double cx, double cy);
void DrawBox(cv::Mat image, cv::Vec6d pose, cv::Scalar color, int thickness, float fx, float fy, float cx, float cy);
// Drawing face bounding box
vector<std::pair<cv::Point2d, cv::Point2d>> CalculateBox(cv::Vec6d pose, float fx, float fy, float cx, float cy);
2016-04-28 19:40:36 +00:00
void DrawBox(vector<pair<cv::Point, cv::Point>> lines, cv::Mat image, cv::Scalar color, int thickness);
vector<cv::Point2d> CalculateLandmarks(const cv::Mat_<double>& shape2D, cv::Mat_<int>& visibilities);
vector<cv::Point2d> CalculateLandmarks(CLNF& clnf_model);
2016-11-28 21:58:33 +00:00
vector<cv::Point2d> CalculateEyeLandmarks(CLNF& clnf_model);
2016-04-28 19:40:36 +00:00
void DrawLandmarks(cv::Mat img, vector<cv::Point> landmarks);
void Draw(cv::Mat img, const cv::Mat_<double>& shape2D, const cv::Mat_<int>& visibilities);
void Draw(cv::Mat img, const cv::Mat_<double>& shape2D);
void Draw(cv::Mat img, const CLNF& clnf_model);
//===========================================================================
// Angle representation conversion helpers
//===========================================================================
cv::Matx33d Euler2RotationMatrix(const cv::Vec3d& eulerAngles);
// Using the XYZ convention R = Rx * Ry * Rz, left-handed positive sign
cv::Vec3d RotationMatrix2Euler(const cv::Matx33d& rotation_matrix);
cv::Vec3d Euler2AxisAngle(const cv::Vec3d& euler);
cv::Vec3d AxisAngle2Euler(const cv::Vec3d& axis_angle);
cv::Matx33d AxisAngle2RotationMatrix(const cv::Vec3d& axis_angle);
cv::Vec3d RotationMatrix2AxisAngle(const cv::Matx33d& rotation_matrix);
//============================================================================
// Face detection helpers
//============================================================================
// Face detection using Haar cascade classifier
bool DetectFaces(vector<cv::Rect_<double> >& o_regions, const cv::Mat_<uchar>& intensity, double min_width = -1, cv::Rect_<double> roi = cv::Rect_<double>(0.0, 0.0, 1.0, 1.0));
bool DetectFaces(vector<cv::Rect_<double> >& o_regions, const cv::Mat_<uchar>& intensity, cv::CascadeClassifier& classifier, double min_width = -1, cv::Rect_<double> roi = cv::Rect_<double>(0.0, 0.0, 1.0, 1.0));
2016-04-28 19:40:36 +00:00
// The preference point allows for disambiguation if multiple faces are present (pick the closest one), if it is not set the biggest face is chosen
bool DetectSingleFace(cv::Rect_<double>& o_region, const cv::Mat_<uchar>& intensity, cv::CascadeClassifier& classifier, const cv::Point preference = cv::Point(-1,-1), double min_width = -1, cv::Rect_<double> roi = cv::Rect_<double>(0.0, 0.0, 1.0, 1.0));
2016-04-28 19:40:36 +00:00
// Face detection using HOG-SVM classifier
bool DetectFacesHOG(vector<cv::Rect_<double> >& o_regions, const cv::Mat_<uchar>& intensity, std::vector<double>& confidences, double min_width = -1, cv::Rect_<double> roi = cv::Rect_<double>(0.0, 0.0, 1.0, 1.0));
bool DetectFacesHOG(vector<cv::Rect_<double> >& o_regions, const cv::Mat_<uchar>& intensity, dlib::frontal_face_detector& classifier, std::vector<double>& confidences, double min_width = -1, cv::Rect_<double> roi = cv::Rect_<double>(0.0, 0.0, 1.0, 1.0));
2016-04-28 19:40:36 +00:00
// The preference point allows for disambiguation if multiple faces are present (pick the closest one), if it is not set the biggest face is chosen
bool DetectSingleFaceHOG(cv::Rect_<double>& o_region, const cv::Mat_<uchar>& intensity, dlib::frontal_face_detector& classifier, double& confidence, const cv::Point preference = cv::Point(-1,-1), double min_width = -1, cv::Rect_<double> roi = cv::Rect_<double>(0.0,0.0,1.0,1.0));
2016-04-28 19:40:36 +00:00
//============================================================================
// Matrix reading functionality
//============================================================================
// Reading a matrix written in a binary format
void ReadMatBin(std::ifstream& stream, cv::Mat &output_mat);
// Reading in a matrix from a stream
void ReadMat(std::ifstream& stream, cv::Mat& output_matrix);
// Skipping comments (lines starting with # symbol)
void SkipComments(std::ifstream& stream);
}
#endif