2016-04-28 19:40:36 +00:00
///////////////////////////////////////////////////////////////////////////////
2017-05-09 01:36:23 +00:00
// Copyright (C) 2017, Carnegie Mellon University and University of Cambridge,
2016-04-28 19:40:36 +00:00
// all rights reserved.
//
2017-05-09 01:36:23 +00:00
// ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
2016-04-28 19:40:36 +00:00
//
2017-05-09 01:36:23 +00:00
// BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT.
// IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
//
// License can be found in OpenFace-license.txt
2016-04-28 19:40:36 +00:00
// * Any publications arising from the use of this software, including but
// not limited to academic journal and conference publications, technical
// reports and manuals, must cite at least one of the following works:
//
// OpenFace: an open source facial behavior analysis toolkit
// Tadas Baltru<72> aitis, Peter Robinson, and Louis-Philippe Morency
// in IEEE Winter Conference on Applications of Computer Vision, 2016
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltru<72> aitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
// Tadas Baltru<72> aitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
// Tadas Baltru<72> aitis, Peter Robinson, and Louis-Philippe Morency.
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
//
///////////////////////////////////////////////////////////////////////////////
// FaceLandmarkImg.cpp : Defines the entry point for the console application for detecting landmarks in images.
# include "LandmarkCoreIncludes.h"
// System includes
# include <fstream>
// OpenCV includes
# include <opencv2/core/core.hpp>
# include <opencv2/highgui/highgui.hpp>
# include <opencv2/imgproc.hpp>
// Boost includes
# include <filesystem.hpp>
# include <filesystem/fstream.hpp>
# include <dlib/image_processing/frontal_face_detector.h>
# include <tbb/tbb.h>
# include <FaceAnalyser.h>
# include <GazeEstimation.h>
2017-11-14 19:59:08 +00:00
# include <ImageCapture.h>
# include <Visualizer.h>
# include <VisualizationUtils.h>
# include <RecorderOpenFace.h>
# include <RecorderOpenFaceParameters.h>
2016-12-31 16:52:30 +00:00
# ifndef CONFIG_DIR
# define CONFIG_DIR "~"
# endif
2016-04-28 19:40:36 +00:00
using namespace std ;
vector < string > get_arguments ( int argc , char * * argv )
{
vector < string > arguments ;
for ( int i = 0 ; i < argc ; + + i )
{
arguments . push_back ( string ( argv [ i ] ) ) ;
}
return arguments ;
}
int main ( int argc , char * * argv )
{
//Convert arguments to more convenient vector form
vector < string > arguments = get_arguments ( argc , argv ) ;
2018-02-23 08:25:19 +00:00
// no arguments: output usage
if ( arguments . size ( ) = = 1 )
{
cout < < " For command line arguments see: " < < endl ;
cout < < " https://github.com/TadasBaltrusaitis/OpenFace/wiki/Command-line-arguments " ;
return 0 ;
}
2017-11-14 19:59:08 +00:00
// Prepare for image reading
Utilities : : ImageCapture image_reader ;
2016-04-28 19:40:36 +00:00
2017-11-14 19:59:08 +00:00
// The sequence reader chooses what to open based on command line arguments provided
if ( ! image_reader . Open ( arguments ) )
2016-04-28 19:40:36 +00:00
{
2017-11-14 19:59:08 +00:00
cout < < " Could not open any images " < < endl ;
return 1 ;
2016-04-28 19:40:36 +00:00
}
2017-11-14 19:59:08 +00:00
// Load the models if images found
LandmarkDetector : : FaceModelParameters det_parameters ( arguments ) ;
2016-04-28 19:40:36 +00:00
// The modules that are being used for tracking
cout < < " Loading the model " < < endl ;
2017-11-14 19:59:08 +00:00
LandmarkDetector : : CLNF face_model ( det_parameters . model_location ) ;
2016-04-28 19:40:36 +00:00
cout < < " Model loaded " < < endl ;
2017-10-23 16:58:35 +00:00
// Load facial feature extractor and AU analyser (make sure it is static)
FaceAnalysis : : FaceAnalyserParameters face_analysis_params ( arguments ) ;
face_analysis_params . OptimizeForImages ( ) ;
FaceAnalysis : : FaceAnalyser face_analyser ( face_analysis_params ) ;
2016-06-14 21:55:16 +00:00
2017-11-14 19:59:08 +00:00
// If bounding boxes not provided, use a face detector
cv : : CascadeClassifier classifier ( det_parameters . face_detector_location ) ;
dlib : : frontal_face_detector face_detector_hog = dlib : : get_frontal_face_detector ( ) ;
2016-04-28 19:40:36 +00:00
2017-11-15 19:55:16 +00:00
// A utility for visualizing the results
Utilities : : Visualizer visualizer ( arguments ) ;
2017-11-14 19:59:08 +00:00
cv : : Mat captured_image ;
2016-04-28 19:40:36 +00:00
2017-11-15 20:20:17 +00:00
captured_image = image_reader . GetNextImage ( ) ;
2016-04-28 19:40:36 +00:00
2017-11-14 19:59:08 +00:00
cout < < " Starting tracking " < < endl ;
while ( ! captured_image . empty ( ) )
{
2016-04-28 19:40:36 +00:00
2017-12-12 17:48:46 +00:00
Utilities : : RecorderOpenFaceParameters recording_params ( arguments , false , false ,
2017-12-12 08:55:23 +00:00
image_reader . fx , image_reader . fy , image_reader . cx , image_reader . cy ) ;
2017-11-14 19:59:08 +00:00
Utilities : : RecorderOpenFace open_face_rec ( image_reader . name , recording_params , arguments ) ;
2016-04-28 19:40:36 +00:00
2017-11-16 09:00:47 +00:00
visualizer . SetImage ( captured_image , image_reader . fx , image_reader . fy , image_reader . cx , image_reader . cy ) ;
if ( recording_params . outputGaze ( ) & & ! face_model . eye_model )
cout < < " WARNING: no eye model defined, but outputting gaze " < < endl ;
2017-11-14 19:59:08 +00:00
// Making sure the image is in uchar grayscale
cv : : Mat_ < uchar > grayscale_image = image_reader . GetGrayFrame ( ) ;
2016-04-28 19:40:36 +00:00
2017-11-15 19:57:58 +00:00
// Detect faces in an image
vector < cv : : Rect_ < double > > face_detections ;
if ( image_reader . has_bounding_boxes )
{
face_detections = image_reader . GetBoundingBoxes ( ) ;
}
else
{
if ( det_parameters . curr_face_detector = = LandmarkDetector : : FaceModelParameters : : HOG_SVM_DETECTOR )
2016-04-28 19:40:36 +00:00
{
2017-11-15 19:57:58 +00:00
vector < double > confidences ;
LandmarkDetector : : DetectFacesHOG ( face_detections , grayscale_image , face_detector_hog , confidences ) ;
2016-04-28 19:40:36 +00:00
}
else
{
2017-11-15 19:57:58 +00:00
LandmarkDetector : : DetectFaces ( face_detections , grayscale_image , classifier ) ;
2016-04-28 19:40:36 +00:00
}
2017-11-15 19:57:58 +00:00
}
// Detect landmarks around detected faces
int face_det = 0 ;
// perform landmark detection for every face detected
for ( size_t face = 0 ; face < face_detections . size ( ) ; + + face )
{
// if there are multiple detections go through them
bool success = LandmarkDetector : : DetectLandmarksInImage ( grayscale_image , face_detections [ face ] , face_model , det_parameters ) ;
// Estimate head pose and eye gaze
cv : : Vec6d pose_estimate = LandmarkDetector : : GetPose ( face_model , image_reader . fx , image_reader . fy , image_reader . cx , image_reader . cy ) ;
2016-04-28 19:40:36 +00:00
2017-11-15 19:57:58 +00:00
// Gaze tracking, absolute gaze direction
cv : : Point3f gaze_direction0 ( 0 , 0 , - 1 ) ;
cv : : Point3f gaze_direction1 ( 0 , 0 , - 1 ) ;
cv : : Vec2d gaze_angle ( 0 , 0 ) ;
2017-12-16 13:14:02 +00:00
if ( face_model . eye_model )
2016-04-28 19:40:36 +00:00
{
2017-11-15 19:57:58 +00:00
GazeAnalysis : : EstimateGaze ( face_model , gaze_direction0 , image_reader . fx , image_reader . fy , image_reader . cx , image_reader . cy , true ) ;
GazeAnalysis : : EstimateGaze ( face_model , gaze_direction1 , image_reader . fx , image_reader . fy , image_reader . cx , image_reader . cy , false ) ;
gaze_angle = GazeAnalysis : : GetGazeAngle ( gaze_direction0 , gaze_direction1 ) ;
2016-04-28 19:40:36 +00:00
}
2017-11-15 20:06:23 +00:00
cv : : Mat sim_warped_img ;
cv : : Mat_ < double > hog_descriptor ; int num_hog_rows = 0 , num_hog_cols = 0 ;
// Perform AU detection and HOG feature extraction, as this can be expensive only compute it if needed by output or visualization
if ( recording_params . outputAlignedFaces ( ) | | recording_params . outputHOG ( ) | | recording_params . outputAUs ( ) | | visualizer . vis_align | | visualizer . vis_hog )
{
2017-11-16 17:46:34 +00:00
face_analyser . PredictStaticAUsAndComputeFeatures ( captured_image , face_model . detected_landmarks ) ;
2017-11-15 20:06:23 +00:00
face_analyser . GetLatestAlignedFace ( sim_warped_img ) ;
face_analyser . GetLatestHOG ( hog_descriptor , num_hog_rows , num_hog_cols ) ;
}
2017-11-15 19:57:58 +00:00
// Displaying the tracking visualizations
2017-11-15 20:06:23 +00:00
visualizer . SetObservationFaceAlign ( sim_warped_img ) ;
visualizer . SetObservationHOG ( hog_descriptor , num_hog_rows , num_hog_cols ) ;
2018-01-31 08:23:48 +00:00
visualizer . SetObservationLandmarks ( face_model . detected_landmarks , 1.0 , face_model . GetVisibilities ( ) ) ; // Set confidence to high to make sure we always visualize
2017-11-19 20:04:00 +00:00
visualizer . SetObservationPose ( pose_estimate , 1.0 ) ;
2017-11-15 19:57:58 +00:00
visualizer . SetObservationGaze ( gaze_direction0 , gaze_direction1 , LandmarkDetector : : CalculateAllEyeLandmarks ( face_model ) , LandmarkDetector : : Calculate3DEyeLandmarks ( face_model , image_reader . fx , image_reader . fy , image_reader . cx , image_reader . cy ) , face_model . detection_certainty ) ;
// Setting up the recorder output
2017-11-15 20:06:23 +00:00
open_face_rec . SetObservationHOG ( face_model . detection_success , hog_descriptor , num_hog_rows , num_hog_cols , 31 ) ; // The number of channels in HOG is fixed at the moment, as using FHOG
2017-11-15 19:57:58 +00:00
open_face_rec . SetObservationVisualization ( visualizer . GetVisImage ( ) ) ;
open_face_rec . SetObservationActionUnits ( face_analyser . GetCurrentAUsReg ( ) , face_analyser . GetCurrentAUsClass ( ) ) ;
open_face_rec . SetObservationLandmarks ( face_model . detected_landmarks , face_model . GetShape ( image_reader . fx , image_reader . fy , image_reader . cx , image_reader . cy ) ,
face_model . params_global , face_model . params_local , face_model . detection_certainty , face_model . detection_success ) ;
open_face_rec . SetObservationPose ( pose_estimate ) ;
open_face_rec . SetObservationGaze ( gaze_direction0 , gaze_direction1 , gaze_angle , LandmarkDetector : : CalculateAllEyeLandmarks ( face_model ) , LandmarkDetector : : Calculate3DEyeLandmarks ( face_model , image_reader . fx , image_reader . fy , image_reader . cx , image_reader . cy ) ) ;
2017-11-15 20:06:23 +00:00
open_face_rec . SetObservationFaceAlign ( sim_warped_img ) ;
2018-02-23 08:25:19 +00:00
open_face_rec . SetObservationFaceID ( face ) ;
2017-11-15 19:57:58 +00:00
open_face_rec . WriteObservation ( ) ;
}
2017-12-15 19:56:58 +00:00
if ( face_detections . size ( ) > 0 )
{
visualizer . ShowObservation ( ) ;
}
2017-11-15 19:57:58 +00:00
2017-11-16 17:46:34 +00:00
// Grabbing the next frame in the sequence
captured_image = image_reader . GetNextImage ( ) ;
2016-04-28 19:40:36 +00:00
}
return 0 ;
}