Merge branch 'master' of https://github.com/TadasBaltrusaitis/OpenFace into feature/Windows-GUI

# Conflicts:
#	.gitignore
#	exe/FaceLandmarkVidMulti/FaceLandmarkVidMulti.cpp
#	lib/local/FaceAnalyser/FaceAnalyser.vcxproj
#	lib/local/LandmarkDetector/include/LandmarkDetectorUtils.h
#	lib/local/LandmarkDetector/src/LandmarkDetectorUtils.cpp
#	matlab_runners/Demos/feature_extraction_demo_vid.m
#	matlab_runners/Demos/gaze_extraction_demo_vid.m
This commit is contained in:
Tadas Baltrusaitis 2017-12-29 16:27:00 +00:00
commit b224fcdfc9
204 changed files with 5369 additions and 4141 deletions

9
.gitignore vendored
View file

@ -77,3 +77,12 @@ matlab_runners/Head Pose Experiments/experiments/ict_out/
OpenFace\.VC\.db OpenFace\.VC\.db
matlab_version/face_validation/vlfeat-0.9.20/ matlab_version/face_validation/vlfeat-0.9.20/
matlab_version/face_validation/trained/intermediate/ matlab_version/face_validation/trained/intermediate/
lib/local/GazeAnalyser/x64/
lib/local/Utilities/x64/
exe/FeatureExtraction/processed/
exe/FaceLandmarkImg/processed/
exe/FeatureExtraction/processed_features/
matlab_runners/Demos/processed_features/
*.db
exe/releases/OpenFace_0.3.0_win_x64/
exe/releases/OpenFace_0.3.0_win_x86/

View file

@ -1,5 +1,8 @@
language: cpp language: cpp
dist: trusty
sudo: required
branches: branches:
only: only:
- master - master
@ -9,32 +12,29 @@ compiler:
- gcc - gcc
os: os:
- osx
- linux - linux
before_install: before_install:
# OpenCV dependencies and boost # OpenCV dependencies and boost
- if [ ${TRAVIS_OS_NAME} = linux ]; then - if [ ${TRAVIS_OS_NAME} = linux ]; then
sudo apt-get update;
sudo apt-get install libopenblas-dev;
sudo apt-get install git libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev; sudo apt-get install git libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev;
sudo apt-get install python-dev python-numpy libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev libjasper-dev libdc1394-22-dev checkinstall; sudo apt-get install python-dev python-numpy libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev libjasper-dev libdc1394-22-dev checkinstall;
sudo add-apt-repository -y ppa:boost-latest/ppa; sudo apt-get install cmake;
sudo apt-get update;
sudo apt-get install libboost1.55-all-dev; sudo apt-get install libboost1.55-all-dev;
wget https://github.com/Itseez/opencv/archive/3.1.0.zip; wget https://github.com/Itseez/opencv/archive/3.1.0.zip;
sudo unzip 3.1.0.zip; unzip -q 3.1.0.zip;
cd opencv-3.1.0; cd opencv-3.1.0;
sudo mkdir build; mkdir build;
cd build; cd build;
fi fi
# g++4.8.1 # g++4.8.1
- if [ "$CXX" = "g++" ]; then - if [ "$CXX" = "g++" ]; then
if [ ${TRAVIS_OS_NAME} = linux ]; then if [ ${TRAVIS_OS_NAME} = linux ]; then
sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test; $CXX --version;
sudo apt-get update -qq;
sudo apt-get install -qq g++-4.8;
export CXX="g++-4.8";
sudo cmake -D CMAKE_BUILD_TYPE=RELEASE -D WITH_V4L=ON -D WITH_OPENCL=OFF -D INSTALL_C_EXAMPLES=OFF -D BUILD_EXAMPLES=OFF -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_EXAMPLES=OFF -D INSTALL_PYTHON_EXAMPLES=OFF ..; sudo cmake -D CMAKE_BUILD_TYPE=RELEASE -D WITH_V4L=ON -D WITH_OPENCL=OFF -D INSTALL_C_EXAMPLES=OFF -D BUILD_EXAMPLES=OFF -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_EXAMPLES=OFF -D INSTALL_PYTHON_EXAMPLES=OFF ..;
fi fi
fi fi
@ -43,7 +43,6 @@ before_install:
- if [ "$CXX" = "clang++" ]; then - if [ "$CXX" = "clang++" ]; then
if [ ${TRAVIS_OS_NAME} = linux ]; then if [ ${TRAVIS_OS_NAME} = linux ]; then
$CXX --version; $CXX --version;
sudo sed -i -e 's/^Defaults\tsecure_path.*$//' /etc/sudoers;
sudo cmake -D CMAKE_BUILD_TYPE=RELEASE -D WITH_V4L=ON -D WITH_OPENCL=OFF -D INSTALL_C_EXAMPLES=OFF -D BUILD_EXAMPLES=OFF -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_EXAMPLES=OFF -D INSTALL_PYTHON_EXAMPLES=OFF -D WITH_TBB=ON ..; sudo cmake -D CMAKE_BUILD_TYPE=RELEASE -D WITH_V4L=ON -D WITH_OPENCL=OFF -D INSTALL_C_EXAMPLES=OFF -D BUILD_EXAMPLES=OFF -D BUILD_TESTS=OFF -D BUILD_PERF_TESTS=OFF -D BUILD_EXAMPLES=OFF -D INSTALL_PYTHON_EXAMPLES=OFF -D WITH_TBB=ON ..;
fi fi
fi fi
@ -51,8 +50,10 @@ before_install:
- if [ ${TRAVIS_OS_NAME} = osx ]; then - if [ ${TRAVIS_OS_NAME} = osx ]; then
brew update; brew update;
brew install tbb; brew install tbb;
brew tap homebrew/science;
brew install vtk;
wget https://github.com/Itseez/opencv/archive/3.1.0.zip; wget https://github.com/Itseez/opencv/archive/3.1.0.zip;
sudo unzip 3.1.0.zip; unzip -q 3.1.0.zip;
cd opencv-3.1.0; cd opencv-3.1.0;
sudo mkdir build; sudo mkdir build;
cd build; cd build;
@ -70,9 +71,7 @@ script:
- cd build - cd build
- cmake -D CMAKE_BUILD_TYPE=RELEASE CMAKE_CXX_FLAGS="-std=c++11" -D CMAKE_EXE_LINKER_FLAGS="-std=c++11" .. - cmake -D CMAKE_BUILD_TYPE=RELEASE CMAKE_CXX_FLAGS="-std=c++11" -D CMAKE_EXE_LINKER_FLAGS="-std=c++11" ..
- make -j2 - make -j2
- ../build/bin/FaceLandmarkImg -fdir "../samples/" -ofdir "./demo_img/" -oidir "./demo_img/" -wild -q - ../build/bin/FaceLandmarkImg -fdir "../samples/" -out_dir "./demo_img" -wild -q
- ../build/bin/FaceLandmarkImg -inroot ../samples -f sample1.jpg -outroot data -of sample1.txt -op sample1.3d -oi sample1.bmp -multi_view 1 -wild -q - ../build/bin/FaceLandmarkImg -inroot ../samples -f sample1.jpg -out_dir data -of sample1.txt -multi_view 1 -wild -q
- ../build/bin/FaceLandmarkVidMulti -inroot ../samples -f multi_face.avi -outroot output -ov multi_face.avi -q - ../build/bin/FeatureExtraction -f "../samples/2015-10-15-15-14.avi" -out_dir output -q
- ../build/bin/FeatureExtraction -f "../samples/2015-10-15-15-14.avi" -outroot output_features -ov default.avi -of "default.txt" -simalign aligned -ov feat_test.avi -hogalign hog_test.dat -q - ../build/bin/FeatureExtraction -f "../samples/2015-10-15-15-14.avi" -simsize 200 -simscale 0.5 -q
- ../build/bin/FeatureExtraction -f "../samples/2015-10-15-15-14.avi" -outroot output_features -simsize 200 -simscale 0.5 -ov default.avi -of "default.txt" -simalign aligned -ov feat_test.avi -hogalign hog_test.dat -q
- ../build/bin/FaceLandmarkVid -inroot ../samples -f 2015-10-15-15-14.avi -f multi_face.avi -outroot output_data -ov 1.avi -ov 2.avi -q

View file

@ -13,7 +13,7 @@ find_package( BLAS REQUIRED )
include_directories( ${BLAS_INCLUDE_DIRS} ) include_directories( ${BLAS_INCLUDE_DIRS} )
LINK_DIRECTORIES(${BLAS_LIBRARY_DIRS}) LINK_DIRECTORIES(${BLAS_LIBRARY_DIRS})
find_package( OpenCV REQUIRED ) find_package( OpenCV 3.1 REQUIRED )
MESSAGE("OpenCV information:") MESSAGE("OpenCV information:")
MESSAGE(" OpenCV_INCLUDE_DIRS: ${OpenCV_INCLUDE_DIRS}") MESSAGE(" OpenCV_INCLUDE_DIRS: ${OpenCV_INCLUDE_DIRS}")
@ -191,6 +191,8 @@ add_subdirectory(lib/local/LandmarkDetector)
add_subdirectory(lib/local/FaceAnalyser) add_subdirectory(lib/local/FaceAnalyser)
# Gaze library # Gaze library
add_subdirectory(lib/local/GazeAnalyser) add_subdirectory(lib/local/GazeAnalyser)
# Utilities library
add_subdirectory(lib/local/Utilities)
# executables # executables
add_subdirectory(exe/FaceLandmarkImg) add_subdirectory(exe/FaceLandmarkImg)
add_subdirectory(exe/FaceLandmarkVid) add_subdirectory(exe/FaceLandmarkVid)

View file

@ -39,6 +39,8 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "OpenFaceDemo", "gui\OpenFac
EndProject EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "HeadPoseLive", "gui\HeadPose-live\HeadPoseLive.csproj", "{F396362D-821E-4EA6-9BBF-1F6050844118}" Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "HeadPoseLive", "gui\HeadPose-live\HeadPoseLive.csproj", "{F396362D-821E-4EA6-9BBF-1F6050844118}"
EndProject EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Utilities", "lib\local\Utilities\Utilities.vcxproj", "{8E741EA2-9386-4CF2-815E-6F9B08991EAC}"
EndProject
Global Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Win32 = Debug|Win32 Debug|Win32 = Debug|Win32
@ -158,6 +160,14 @@ Global
{5F915541-F531-434F-9C81-79F5DB58012B}.Release|Win32.Build.0 = Release|Win32 {5F915541-F531-434F-9C81-79F5DB58012B}.Release|Win32.Build.0 = Release|Win32
{5F915541-F531-434F-9C81-79F5DB58012B}.Release|x64.ActiveCfg = Release|x64 {5F915541-F531-434F-9C81-79F5DB58012B}.Release|x64.ActiveCfg = Release|x64
{5F915541-F531-434F-9C81-79F5DB58012B}.Release|x64.Build.0 = Release|x64 {5F915541-F531-434F-9C81-79F5DB58012B}.Release|x64.Build.0 = Release|x64
{8E741EA2-9386-4CF2-815E-6F9B08991EAC}.Debug|Win32.ActiveCfg = Debug|Win32
{8E741EA2-9386-4CF2-815E-6F9B08991EAC}.Debug|Win32.Build.0 = Debug|Win32
{8E741EA2-9386-4CF2-815E-6F9B08991EAC}.Debug|x64.ActiveCfg = Debug|x64
{8E741EA2-9386-4CF2-815E-6F9B08991EAC}.Debug|x64.Build.0 = Debug|x64
{8E741EA2-9386-4CF2-815E-6F9B08991EAC}.Release|Win32.ActiveCfg = Release|Win32
{8E741EA2-9386-4CF2-815E-6F9B08991EAC}.Release|Win32.Build.0 = Release|Win32
{8E741EA2-9386-4CF2-815E-6F9B08991EAC}.Release|x64.ActiveCfg = Release|x64
{8E741EA2-9386-4CF2-815E-6F9B08991EAC}.Release|x64.Build.0 = Release|x64
EndGlobalSection EndGlobalSection
GlobalSection(SolutionProperties) = preSolution GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE HideSolutionNode = FALSE
@ -177,5 +187,6 @@ Global
{E143A2AA-312E-4DFE-B61D-9A87CCBC8E90} = {C9D8D0B0-11EC-41CB-8524-2DDB5BE94297} {E143A2AA-312E-4DFE-B61D-9A87CCBC8E90} = {C9D8D0B0-11EC-41CB-8524-2DDB5BE94297}
{F396362D-821E-4EA6-9BBF-1F6050844118} = {C9D8D0B0-11EC-41CB-8524-2DDB5BE94297} {F396362D-821E-4EA6-9BBF-1F6050844118} = {C9D8D0B0-11EC-41CB-8524-2DDB5BE94297}
{5F915541-F531-434F-9C81-79F5DB58012B} = {99FEBA13-BDDF-4076-B57E-D8EF4076E20D} {5F915541-F531-434F-9C81-79F5DB58012B} = {99FEBA13-BDDF-4076-B57E-D8EF4076E20D}
{8E741EA2-9386-4CF2-815E-6F9B08991EAC} = {99FEBA13-BDDF-4076-B57E-D8EF4076E20D}
EndGlobalSection EndGlobalSection
EndGlobal EndGlobal

View file

@ -1,19 +1,21 @@
# OpenFace: an open source facial behavior analysis toolkit # OpenFace 0.3.0: an open source facial behavior analysis toolkit
[![Build Status](https://travis-ci.org/TadasBaltrusaitis/OpenFace.svg?branch=master)](https://travis-ci.org/TadasBaltrusaitis/OpenFace) [![Build Status](https://travis-ci.org/TadasBaltrusaitis/OpenFace.svg?branch=master)](https://travis-ci.org/TadasBaltrusaitis/OpenFace)
[![Build status](https://ci.appveyor.com/api/projects/status/8msiklxfbhlnsmxp/branch/master?svg=true)](https://ci.appveyor.com/project/TadasBaltrusaitis/openface/branch/master) [![Build status](https://ci.appveyor.com/api/projects/status/8msiklxfbhlnsmxp/branch/master?svg=true)](https://ci.appveyor.com/project/TadasBaltrusaitis/openface/branch/master)
Over the past few years, there has been an increased interest in automatic facial behavior analysis and understanding. We present OpenFace an open source tool intended for computer vision and machine learning researchers, affective computing community and people interested in building interactive applications based on facial behavior analysis. OpenFace is the first open source tool capable of facial landmark detection, head pose estimation, facial action unit recognition, and eye-gaze estimation. The computer vision algorithms which represent the core of OpenFace demonstrate state-of-the-art results in all of the above mentioned tasks. Furthermore, our tool is capable of real-time performance and is able to run from a simple webcam without any specialist hardware. Over the past few years, there has been an increased interest in automatic facial behavior analysis and understanding. We present OpenFace a tool intended for computer vision and machine learning researchers, affective computing community and people interested in building interactive applications based on facial behavior analysis. OpenFace is the first toolkit capable of facial landmark detection, head pose estimation, facial action unit recognition, and eye-gaze estimation with available source code. The computer vision algorithms which represent the core of OpenFace demonstrate state-of-the-art results in all of the above mentioned tasks. Furthermore, our tool is capable of real-time performance and is able to run from a simple webcam without any specialist hardware.
The code was written mainly by Tadas Baltrusaitis during his time at the Language Technologies Institute at the Carnegie Mellon University; Computer Laboratory, University of Cambridge; and Institute for Creative Technologies, University of Southern California. ![Multicomp logo](https://github.com/TadasBaltrusaitis/OpenFace/blob/master/imgs/muticomp_logo_black.png)
Special thanks goes to Louis-Philippe Morency and his MultiComp Lab at Institute for Creative Technologies for help in writing and testing the code, and Erroll Wood for the gaze estimation work. ![Rainbow logo](https://github.com/TadasBaltrusaitis/OpenFace/blob/master/imgs/rainbow-logo.gif)
OpenFace is an implementation of a number of research papers from the Multicomp group, Language Technologies Institute at the Carnegie Mellon University and Rainbow Group, Computer Laboratory, University of Cambridge. The founder of the project and main developer is Tadas Baltrušaitis.
Special thanks goes to Louis-Philippe Morency and his MultiComp Lab at Carnegie Mellon University for help in writing and testing the code, and Erroll Wood for the gaze estimation work.
## WIKI ## WIKI
**For instructions of how to install/compile/use the project please see [WIKI](https://github.com/TadasBaltrusaitis/OpenFace/wiki)** **For instructions of how to install/compile/use the project please see [WIKI](https://github.com/TadasBaltrusaitis/OpenFace/wiki)**
More details about the project - http://www.cl.cam.ac.uk/research/rainbow/projects/openface/
## Functionality ## Functionality
The system is capable of performing a number of facial analysis tasks: The system is capable of performing a number of facial analysis tasks:
@ -68,17 +70,18 @@ Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
in *Facial Expression Recognition and Analysis Challenge*, in *Facial Expression Recognition and Analysis Challenge*,
*IEEE International Conference on Automatic Face and Gesture Recognition*, 2015 *IEEE International Conference on Automatic Face and Gesture Recognition*, 2015
# Copyright
Copyright can be found in the Copyright.txt
You have to respect boost, TBB, dlib, OpenBLAS, and OpenCV licenses.
# Commercial license # Commercial license
For inquiries about the commercial licensing of the OpenFace toolkit please contact innovation@cmu.edu For inquiries about the commercial licensing of the OpenFace toolkit please contact innovation@cmu.edu
# Final remarks # Final remarks
I did my best to make sure that the code runs out of the box but there are always issues and I would be grateful for your understanding that this is research code and not full fledged product. However, if you encounter any problems/bugs/issues please contact me on github or by emailing me at Tadas.Baltrusaitis@cl.cam.ac.uk for any bug reports/questions/suggestions. I did my best to make sure that the code runs out of the box but there are always issues and I would be grateful for your understanding that this is research code and not full fledged product. However, if you encounter any problems/bugs/issues please contact me on github or by emailing me at Tadas.Baltrusaitis@cl.cam.ac.uk for any bug reports/questions/suggestions. I prefer questions and bug reports on github as that provides visibility to others who might be encountering same issues or who have the same questions.
# Copyright
Copyright can be found in the Copyright.txt
You have to respect boost, TBB, dlib, OpenBLAS, and OpenCV licenses.
Furthermore you have to respect the licenses of the datasets used for model training - https://github.com/TadasBaltrusaitis/OpenFace/wiki/Datasets

View file

@ -6,7 +6,6 @@ branches:
- /^feature-.*$/ - /^feature-.*$/
max_jobs: 4 max_jobs: 4
configuration: configuration:
- Debug
- Release - Release
platform: platform:
- x64 - x64
@ -21,9 +20,9 @@ test_script:
- cmd: if exist Debug (cd Debug) - cmd: if exist Debug (cd Debug)
- cmd: if exist Release (cd Release) - cmd: if exist Release (cd Release)
- cmd: dir - cmd: dir
- cmd: if exist "../samples" (FaceLandmarkImg.exe -fdir "../samples/" -ofdir "./demo_img/" -oidir "./demo_img/" -wild -q) else (FaceLandmarkImg.exe -fdir "../../samples/" -ofdir "./demo_img/" -oidir "./demo_img/" -wild -q) - cmd: if exist "../samples" (FaceLandmarkImg.exe -fdir "../samples/" -out_dir "./demo_img/" -wild -q) else (FaceLandmarkImg.exe -fdir "../../samples/" -out_dir "./demo_img/" -wild -q)
- cmd: if exist "../samples" (FaceLandmarkImg.exe -inroot ../samples -f sample1.jpg -outroot out_data -of sample1.pts -op sample1.3d -oi sample1.bmp -q) else (FaceLandmarkImg.exe -inroot ../../samples -f sample1.jpg -outroot out_data -of sample1.pts -op sample1.3d -oi sample1.bmp -q) - cmd: if exist "../samples" (FaceLandmarkImg.exe -inroot ../samples -f sample1.jpg -out_dir out_data -of sample1.pts -q) else (FaceLandmarkImg.exe -inroot ../../samples -f sample1.jpg -out_dir out_data -q)
- cmd: if exist "../samples" (FaceLandmarkVidMulti.exe -inroot ../samples -f multi_face.avi -ov multi_face.avi -q) else (FaceLandmarkVidMulti.exe -inroot ../../samples -f multi_face.avi -ov multi_face.avi -q) - cmd: if exist "../samples" (FaceLandmarkVidMulti.exe -inroot ../samples -f multi_face.avi -q) else (FaceLandmarkVidMulti.exe -inroot ../../samples -f multi_face.avi -q)
- cmd: if exist "../samples" (FeatureExtraction.exe -f "../samples/default.wmv" -outroot output_features -of "default.txt" -simalign aligned -ov feat_track.avi -hogalign hog_test.dat -q) else (FeatureExtraction.exe -f "../../samples/default.wmv" -outroot output_features -of "default.txt" -simalign aligned -ov feat_track.avi -hogalign hog_test.dat -q) - cmd: if exist "../samples" (FeatureExtraction.exe -f "../samples/default.wmv" -q) else (FeatureExtraction.exe -f "../../samples/default.wmv" -q)
- cmd: if exist "../samples" (FeatureExtraction.exe -f "../samples/default.wmv" -outroot output_features -of "default.txt" -simalign aligned -simsize 200 -simscale 0.5 -ov feat_track.avi -hogalign hog_test.dat -q) else (FeatureExtraction.exe -f "../../samples/default.wmv" -outroot output_features -of "default.txt" -simalign aligned -simsize 200 -simscale 0.5 -ov feat_track.avi -hogalign hog_test.dat -q) - cmd: if exist "../samples" (FeatureExtraction.exe -f "../samples/default.wmv" -simsize 200 -simscale 0.5 -q) else (FeatureExtraction.exe -f "../../samples/default.wmv" -simsize 200 -simscale 0.5 -ov feat_track.avi -q)
- cmd: if exist "../samples" (FaceLandmarkVid.exe -f "../samples/default.wmv" -ov track.avi -q) else (FaceLandmarkVid.exe -f "../../samples/default.wmv" -ov track.avi -q) - cmd: if exist "../samples" (FaceLandmarkVid.exe -f "../samples/default.wmv" -q) else (FaceLandmarkVid.exe -f "../../samples/default.wmv" -q)

View file

@ -7,11 +7,13 @@ include_directories(${LandmarkDetector_SOURCE_DIR}/include)
include_directories(../../lib/local/LandmarkDetector/include) include_directories(../../lib/local/LandmarkDetector/include)
include_directories(../../lib/local/FaceAnalyser/include) include_directories(../../lib/local/FaceAnalyser/include)
include_directories(../../lib/local/GazeAnalyser/include) include_directories(../../lib/local/GazeAnalyser/include)
include_directories(../../lib/local/Utilities/include)
add_executable(FaceLandmarkImg FaceLandmarkImg.cpp) add_executable(FaceLandmarkImg FaceLandmarkImg.cpp)
target_link_libraries(FaceLandmarkImg LandmarkDetector) target_link_libraries(FaceLandmarkImg LandmarkDetector)
target_link_libraries(FaceLandmarkImg FaceAnalyser) target_link_libraries(FaceLandmarkImg FaceAnalyser)
target_link_libraries(FaceLandmarkImg GazeAnalyser) target_link_libraries(FaceLandmarkImg GazeAnalyser)
target_link_libraries(FaceLandmarkImg Utilities)
target_link_libraries(FaceLandmarkImg dlib) target_link_libraries(FaceLandmarkImg dlib)
target_link_libraries(FaceLandmarkImg ${OpenCV_LIBS} ${Boost_LIBRARIES} ${TBB_LIBRARIES} ${BLAS_LIBRARIES}) target_link_libraries(FaceLandmarkImg ${OpenCV_LIBS} ${Boost_LIBRARIES} ${TBB_LIBRARIES} ${BLAS_LIBRARIES})

View file

@ -54,6 +54,13 @@
#include <FaceAnalyser.h> #include <FaceAnalyser.h>
#include <GazeEstimation.h> #include <GazeEstimation.h>
#include <ImageCapture.h>
#include <Visualizer.h>
#include <VisualizationUtils.h>
#include <RecorderOpenFace.h>
#include <RecorderOpenFaceParameters.h>
#ifndef CONFIG_DIR #ifndef CONFIG_DIR
#define CONFIG_DIR "~" #define CONFIG_DIR "~"
#endif #endif
@ -72,337 +79,72 @@ vector<string> get_arguments(int argc, char **argv)
return arguments; return arguments;
} }
void convert_to_grayscale(const cv::Mat& in, cv::Mat& out)
{
if(in.channels() == 3)
{
// Make sure it's in a correct format
if(in.depth() != CV_8U)
{
if(in.depth() == CV_16U)
{
cv::Mat tmp = in / 256;
tmp.convertTo(tmp, CV_8U);
cv::cvtColor(tmp, out, CV_BGR2GRAY);
}
}
else
{
cv::cvtColor(in, out, CV_BGR2GRAY);
}
}
else if(in.channels() == 4)
{
cv::cvtColor(in, out, CV_BGRA2GRAY);
}
else
{
if(in.depth() == CV_16U)
{
cv::Mat tmp = in / 256;
out = tmp.clone();
}
else if(in.depth() != CV_8U)
{
in.convertTo(out, CV_8U);
}
else
{
out = in.clone();
}
}
}
// Useful utility for creating directories for storing the output files
void create_directory_from_file(string output_path)
{
// Creating the right directory structure
// First get rid of the file
auto p = boost::filesystem::path(boost::filesystem::path(output_path).parent_path());
if (!p.empty() && !boost::filesystem::exists(p))
{
bool success = boost::filesystem::create_directories(p);
if (!success)
{
cout << "Failed to create a directory... " << p.string() << endl;
}
}
}
// This will only be accurate when camera parameters are accurate, useful for work on 3D data
void write_out_pose_landmarks(const string& outfeatures, const cv::Mat_<double>& shape3D, const cv::Vec6d& pose, const cv::Point3f& gaze0, const cv::Point3f& gaze1)
{
create_directory_from_file(outfeatures);
std::ofstream featuresFile;
featuresFile.open(outfeatures);
if (featuresFile.is_open())
{
int n = shape3D.cols;
featuresFile << "version: 1" << endl;
featuresFile << "npoints: " << n << endl;
featuresFile << "{" << endl;
for (int i = 0; i < n; ++i)
{
// Use matlab format, so + 1
featuresFile << shape3D.at<double>(i) << " " << shape3D.at<double>(i + n) << " " << shape3D.at<double>(i + 2*n) << endl;
}
featuresFile << "}" << endl;
// Do the pose and eye gaze if present as well
featuresFile << "pose: eul_x, eul_y, eul_z: " << endl;
featuresFile << "{" << endl;
featuresFile << pose[3] << " " << pose[4] << " " << pose[5] << endl;
featuresFile << "}" << endl;
// Do the pose and eye gaze if present as well
featuresFile << "gaze_vec: dir_x_1, dir_y_1, dir_z_1, dir_x_2, dir_y_2, dir_z_2: " << endl;
featuresFile << "{" << endl;
featuresFile << gaze0.x << " " << gaze0.y << " " << gaze0.z << " " << gaze1.x << " " << gaze1.y << " " << gaze1.z << endl;
featuresFile << "}" << endl;
featuresFile.close();
}
}
void write_out_landmarks(const string& outfeatures, const LandmarkDetector::CLNF& clnf_model, const cv::Vec6d& pose, const cv::Point3f& gaze0, const cv::Point3f& gaze1, const cv::Vec2d gaze_angle, std::vector<std::pair<std::string, double>> au_intensities, std::vector<std::pair<std::string, double>> au_occurences, bool output_gaze)
{
create_directory_from_file(outfeatures);
std::ofstream featuresFile;
featuresFile.open(outfeatures);
if (featuresFile.is_open())
{
int n = clnf_model.patch_experts.visibilities[0][0].rows;
featuresFile << "version: 2" << endl;
featuresFile << "npoints: " << n << endl;
featuresFile << "{" << endl;
for (int i = 0; i < n; ++i)
{
// Use matlab format, so + 1
featuresFile << clnf_model.detected_landmarks.at<double>(i) + 1 << " " << clnf_model.detected_landmarks.at<double>(i + n) + 1 << endl;
}
featuresFile << "}" << endl;
// Do the pose and eye gaze if present as well
featuresFile << "pose: eul_x, eul_y, eul_z: " << endl;
featuresFile << "{" << endl;
featuresFile << pose[3] << " " << pose[4] << " " << pose[5] << endl;
featuresFile << "}" << endl;
if(output_gaze)
{
featuresFile << "gaze: dir_x_1, dir_y_1, dir_z_1, dir_x_2, dir_y_2, dir_z_2: " << endl;
featuresFile << "{" << endl;
featuresFile << gaze0.x << " " << gaze0.y << " " << gaze0.z << " " << gaze1.x << " " << gaze1.y << " " << gaze1.z << endl;
featuresFile << "}" << endl;
featuresFile << "gaze: angle_x, angle_y: " << endl;
featuresFile << "{" << endl;
featuresFile << gaze_angle[0] << " " << gaze_angle[1] << endl;
featuresFile << "}" << endl;
std::vector<cv::Point2d> eye_landmark_points = LandmarkDetector::CalculateAllEyeLandmarks(clnf_model);
featuresFile << "eye_lmks: " << eye_landmark_points.size() << endl;
featuresFile << "{" << endl;
for (int i = 0; i < eye_landmark_points.size(); ++i)
{
// Use matlab format, so + 1
featuresFile << (eye_landmark_points[i].x + 1) << " " << (eye_landmark_points[i].y + 1) << endl;
}
featuresFile << "}" << endl;
}
// Do the au intensities
featuresFile << "au intensities: " << au_intensities.size() << endl;
featuresFile << "{" << endl;
for (size_t i = 0; i < au_intensities.size(); ++i)
{
// Use matlab format, so + 1
featuresFile << au_intensities[i].first << " " << au_intensities[i].second << endl;
}
featuresFile << "}" << endl;
// Do the au occurences
featuresFile << "au occurences: " << au_occurences.size() << endl;
featuresFile << "{" << endl;
for (size_t i = 0; i < au_occurences.size(); ++i)
{
// Use matlab format, so + 1
featuresFile << au_occurences[i].first << " " << au_occurences[i].second << endl;
}
featuresFile << "}" << endl;
featuresFile.close();
}
}
void create_display_image(const cv::Mat& orig, cv::Mat& display_image, LandmarkDetector::CLNF& clnf_model)
{
// Draw head pose if present and draw eye gaze as well
// preparing the visualisation image
display_image = orig.clone();
// Creating a display image
cv::Mat xs = clnf_model.detected_landmarks(cv::Rect(0, 0, 1, clnf_model.detected_landmarks.rows/2));
cv::Mat ys = clnf_model.detected_landmarks(cv::Rect(0, clnf_model.detected_landmarks.rows/2, 1, clnf_model.detected_landmarks.rows/2));
double min_x, max_x, min_y, max_y;
cv::minMaxLoc(xs, &min_x, &max_x);
cv::minMaxLoc(ys, &min_y, &max_y);
double width = max_x - min_x;
double height = max_y - min_y;
int minCropX = max((int)(min_x-width/3.0),0);
int minCropY = max((int)(min_y-height/3.0),0);
int widthCrop = min((int)(width*5.0/3.0), display_image.cols - minCropX - 1);
int heightCrop = min((int)(height*5.0/3.0), display_image.rows - minCropY - 1);
double scaling = 350.0/widthCrop;
// first crop the image
display_image = display_image(cv::Rect((int)(minCropX), (int)(minCropY), (int)(widthCrop), (int)(heightCrop)));
// now scale it
cv::resize(display_image.clone(), display_image, cv::Size(), scaling, scaling);
// Make the adjustments to points
xs = (xs - minCropX)*scaling;
ys = (ys - minCropY)*scaling;
cv::Mat shape = clnf_model.detected_landmarks.clone();
xs.copyTo(shape(cv::Rect(0, 0, 1, clnf_model.detected_landmarks.rows/2)));
ys.copyTo(shape(cv::Rect(0, clnf_model.detected_landmarks.rows/2, 1, clnf_model.detected_landmarks.rows/2)));
// Do the shifting for the hierarchical models as well
for (size_t part = 0; part < clnf_model.hierarchical_models.size(); ++part)
{
cv::Mat xs = clnf_model.hierarchical_models[part].detected_landmarks(cv::Rect(0, 0, 1, clnf_model.hierarchical_models[part].detected_landmarks.rows / 2));
cv::Mat ys = clnf_model.hierarchical_models[part].detected_landmarks(cv::Rect(0, clnf_model.hierarchical_models[part].detected_landmarks.rows / 2, 1, clnf_model.hierarchical_models[part].detected_landmarks.rows / 2));
xs = (xs - minCropX)*scaling;
ys = (ys - minCropY)*scaling;
cv::Mat shape = clnf_model.hierarchical_models[part].detected_landmarks.clone();
xs.copyTo(shape(cv::Rect(0, 0, 1, clnf_model.hierarchical_models[part].detected_landmarks.rows / 2)));
ys.copyTo(shape(cv::Rect(0, clnf_model.hierarchical_models[part].detected_landmarks.rows / 2, 1, clnf_model.hierarchical_models[part].detected_landmarks.rows / 2)));
}
LandmarkDetector::Draw(display_image, clnf_model);
}
int main (int argc, char **argv) int main (int argc, char **argv)
{ {
//Convert arguments to more convenient vector form //Convert arguments to more convenient vector form
vector<string> arguments = get_arguments(argc, argv); vector<string> arguments = get_arguments(argc, argv);
// Some initial parameters that can be overriden from command line // Prepare for image reading
vector<string> files, output_images, output_landmark_locations, output_pose_locations; Utilities::ImageCapture image_reader;
// Bounding boxes for a face in each image (optional) // The sequence reader chooses what to open based on command line arguments provided
vector<cv::Rect_<double> > bounding_boxes; if (!image_reader.Open(arguments))
{
cout << "Could not open any images" << endl;
return 1;
}
LandmarkDetector::get_image_input_output_params(files, output_landmark_locations, output_pose_locations, output_images, bounding_boxes, arguments); // Load the models if images found
LandmarkDetector::FaceModelParameters det_parameters(arguments); LandmarkDetector::FaceModelParameters det_parameters(arguments);
// No need to validate detections, as we're not doing tracking
det_parameters.validate_detections = false;
// Grab camera parameters if provided (only used for pose and eye gaze and are quite important for accurate estimates)
float fx = 0, fy = 0, cx = 0, cy = 0;
int device = -1;
LandmarkDetector::get_camera_params(device, fx, fy, cx, cy, arguments);
// If cx (optical axis centre) is undefined will use the image size/2 as an estimate
bool cx_undefined = false;
bool fx_undefined = false;
if (cx == 0 || cy == 0)
{
cx_undefined = true;
}
if (fx == 0 || fy == 0)
{
fx_undefined = true;
}
// The modules that are being used for tracking // The modules that are being used for tracking
cout << "Loading the model" << endl; cout << "Loading the model" << endl;
LandmarkDetector::CLNF clnf_model(det_parameters.model_location); LandmarkDetector::CLNF face_model(det_parameters.model_location);
cout << "Model loaded" << endl; cout << "Model loaded" << endl;
cv::CascadeClassifier classifier(det_parameters.face_detector_location);
dlib::frontal_face_detector face_detector_hog = dlib::get_frontal_face_detector();
// Load facial feature extractor and AU analyser (make sure it is static) // Load facial feature extractor and AU analyser (make sure it is static)
FaceAnalysis::FaceAnalyserParameters face_analysis_params(arguments); FaceAnalysis::FaceAnalyserParameters face_analysis_params(arguments);
face_analysis_params.OptimizeForImages(); face_analysis_params.OptimizeForImages();
FaceAnalysis::FaceAnalyser face_analyser(face_analysis_params); FaceAnalysis::FaceAnalyser face_analyser(face_analysis_params);
bool visualise = !det_parameters.quiet_mode; // If bounding boxes not provided, use a face detector
cv::CascadeClassifier classifier(det_parameters.face_detector_location);
dlib::frontal_face_detector face_detector_hog = dlib::get_frontal_face_detector();
// Do some image loading // A utility for visualizing the results
for(size_t i = 0; i < files.size(); i++) Utilities::Visualizer visualizer(arguments);
cv::Mat captured_image;
captured_image = image_reader.GetNextImage();
cout << "Starting tracking" << endl;
while (!captured_image.empty())
{ {
string file = files.at(i);
// Loading image Utilities::RecorderOpenFaceParameters recording_params(arguments, false, false,
cv::Mat read_image = cv::imread(file, -1); image_reader.fx, image_reader.fy, image_reader.cx, image_reader.cy);
Utilities::RecorderOpenFace open_face_rec(image_reader.name, recording_params, arguments);
if (read_image.empty()) visualizer.SetImage(captured_image, image_reader.fx, image_reader.fy, image_reader.cx, image_reader.cy);
{
cout << "Could not read the input image" << endl; if (recording_params.outputGaze() && !face_model.eye_model)
return 1; cout << "WARNING: no eye model defined, but outputting gaze" << endl;
}
// Making sure the image is in uchar grayscale // Making sure the image is in uchar grayscale
cv::Mat_<uchar> grayscale_image; cv::Mat_<uchar> grayscale_image = image_reader.GetGrayFrame();
convert_to_grayscale(read_image, grayscale_image);
// If optical centers are not defined just use center of image
if (cx_undefined)
{
cx = grayscale_image.cols / 2.0f;
cy = grayscale_image.rows / 2.0f;
}
// Use a rough guess-timate of focal length
if (fx_undefined)
{
fx = 500 * (grayscale_image.cols / 640.0);
fy = 500 * (grayscale_image.rows / 480.0);
fx = (fx + fy) / 2.0;
fy = fx;
}
// if no pose defined we just use a face detector
if(bounding_boxes.empty())
{
// Detect faces in an image // Detect faces in an image
vector<cv::Rect_<double> > face_detections; vector<cv::Rect_<double> > face_detections;
if(det_parameters.curr_face_detector == LandmarkDetector::FaceModelParameters::HOG_SVM_DETECTOR) if (image_reader.has_bounding_boxes)
{
face_detections = image_reader.GetBoundingBoxes();
}
else
{
if (det_parameters.curr_face_detector == LandmarkDetector::FaceModelParameters::HOG_SVM_DETECTOR)
{ {
vector<double> confidences; vector<double> confidences;
LandmarkDetector::DetectFacesHOG(face_detections, grayscale_image, face_detector_hog, confidences); LandmarkDetector::DetectFacesHOG(face_detections, grayscale_image, face_detector_hog, confidences);
@ -411,199 +153,68 @@ int main (int argc, char **argv)
{ {
LandmarkDetector::DetectFaces(face_detections, grayscale_image, classifier); LandmarkDetector::DetectFaces(face_detections, grayscale_image, classifier);
} }
}
// Detect landmarks around detected faces // Detect landmarks around detected faces
int face_det = 0; int face_det = 0;
// perform landmark detection for every face detected // perform landmark detection for every face detected
for(size_t face=0; face < face_detections.size(); ++face) for (size_t face = 0; face < face_detections.size(); ++face)
{ {
// if there are multiple detections go through them // if there are multiple detections go through them
bool success = LandmarkDetector::DetectLandmarksInImage(grayscale_image, face_detections[face], clnf_model, det_parameters); bool success = LandmarkDetector::DetectLandmarksInImage(grayscale_image, face_detections[face], face_model, det_parameters);
// Estimate head pose and eye gaze // Estimate head pose and eye gaze
cv::Vec6d headPose = LandmarkDetector::GetPose(clnf_model, fx, fy, cx, cy); cv::Vec6d pose_estimate = LandmarkDetector::GetPose(face_model, image_reader.fx, image_reader.fy, image_reader.cx, image_reader.cy);
// Gaze tracking, absolute gaze direction // Gaze tracking, absolute gaze direction
cv::Point3f gazeDirection0(0, 0, -1); cv::Point3f gaze_direction0(0, 0, -1);
cv::Point3f gazeDirection1(0, 0, -1); cv::Point3f gaze_direction1(0, 0, -1);
cv::Vec2d gazeAngle(0, 0); cv::Vec2d gaze_angle(0, 0);
if (success && det_parameters.track_gaze) if (face_model.eye_model)
{ {
GazeAnalysis::EstimateGaze(clnf_model, gazeDirection0, fx, fy, cx, cy, true); GazeAnalysis::EstimateGaze(face_model, gaze_direction0, image_reader.fx, image_reader.fy, image_reader.cx, image_reader.cy, true);
GazeAnalysis::EstimateGaze(clnf_model, gazeDirection1, fx, fy, cx, cy, false); GazeAnalysis::EstimateGaze(face_model, gaze_direction1, image_reader.fx, image_reader.fy, image_reader.cx, image_reader.cy, false);
gazeAngle = GazeAnalysis::GetGazeAngle(gazeDirection0, gazeDirection1); gaze_angle = GazeAnalysis::GetGazeAngle(gaze_direction0, gaze_direction1);
} }
auto ActionUnits = face_analyser.PredictStaticAUs(read_image, clnf_model.detected_landmarks, false); cv::Mat sim_warped_img;
cv::Mat_<double> hog_descriptor; int num_hog_rows = 0, num_hog_cols = 0;
// Writing out the detected landmarks (in an OS independent manner) // Perform AU detection and HOG feature extraction, as this can be expensive only compute it if needed by output or visualization
if(!output_landmark_locations.empty()) if (recording_params.outputAlignedFaces() || recording_params.outputHOG() || recording_params.outputAUs() || visualizer.vis_align || visualizer.vis_hog)
{ {
char name[100]; face_analyser.PredictStaticAUsAndComputeFeatures(captured_image, face_model.detected_landmarks);
// append detection number (in case multiple faces are detected) face_analyser.GetLatestAlignedFace(sim_warped_img);
sprintf(name, "_det_%d", face_det); face_analyser.GetLatestHOG(hog_descriptor, num_hog_rows, num_hog_cols);
// Construct the output filename
boost::filesystem::path slash("/");
std::string preferredSlash = slash.make_preferred().string();
boost::filesystem::path out_feat_path(output_landmark_locations.at(i));
boost::filesystem::path dir = out_feat_path.parent_path();
boost::filesystem::path fname = out_feat_path.filename().replace_extension("");
boost::filesystem::path ext = out_feat_path.extension();
string outfeatures = dir.string() + preferredSlash + fname.string() + string(name) + ext.string();
write_out_landmarks(outfeatures, clnf_model, headPose, gazeDirection0, gazeDirection1, gazeAngle, ActionUnits.first, ActionUnits.second, det_parameters.track_gaze);
} }
if (!output_pose_locations.empty()) // Displaying the tracking visualizations
visualizer.SetObservationFaceAlign(sim_warped_img);
visualizer.SetObservationHOG(hog_descriptor, num_hog_rows, num_hog_cols);
visualizer.SetObservationLandmarks(face_model.detected_landmarks, 1.0, face_model.detection_success); // Set confidence to high to make sure we always visualize
visualizer.SetObservationPose(pose_estimate, 1.0);
visualizer.SetObservationGaze(gaze_direction0, gaze_direction1, LandmarkDetector::CalculateAllEyeLandmarks(face_model), LandmarkDetector::Calculate3DEyeLandmarks(face_model, image_reader.fx, image_reader.fy, image_reader.cx, image_reader.cy), face_model.detection_certainty);
// Setting up the recorder output
open_face_rec.SetObservationHOG(face_model.detection_success, hog_descriptor, num_hog_rows, num_hog_cols, 31); // The number of channels in HOG is fixed at the moment, as using FHOG
open_face_rec.SetObservationVisualization(visualizer.GetVisImage());
open_face_rec.SetObservationActionUnits(face_analyser.GetCurrentAUsReg(), face_analyser.GetCurrentAUsClass());
open_face_rec.SetObservationLandmarks(face_model.detected_landmarks, face_model.GetShape(image_reader.fx, image_reader.fy, image_reader.cx, image_reader.cy),
face_model.params_global, face_model.params_local, face_model.detection_certainty, face_model.detection_success);
open_face_rec.SetObservationPose(pose_estimate);
open_face_rec.SetObservationGaze(gaze_direction0, gaze_direction1, gaze_angle, LandmarkDetector::CalculateAllEyeLandmarks(face_model), LandmarkDetector::Calculate3DEyeLandmarks(face_model, image_reader.fx, image_reader.fy, image_reader.cx, image_reader.cy));
open_face_rec.SetObservationFaceAlign(sim_warped_img);
open_face_rec.WriteObservation();
}
if(face_detections.size() > 0)
{ {
char name[100]; visualizer.ShowObservation();
// append detection number (in case multiple faces are detected)
sprintf(name, "_det_%d", face_det);
// Construct the output filename
boost::filesystem::path slash("/");
std::string preferredSlash = slash.make_preferred().string();
boost::filesystem::path out_pose_path(output_pose_locations.at(i));
boost::filesystem::path dir = out_pose_path.parent_path();
boost::filesystem::path fname = out_pose_path.filename().replace_extension("");
boost::filesystem::path ext = out_pose_path.extension();
string outfeatures = dir.string() + preferredSlash + fname.string() + string(name) + ext.string();
write_out_pose_landmarks(outfeatures, clnf_model.GetShape(fx, fy, cx, cy), headPose, gazeDirection0, gazeDirection1);
} }
if (det_parameters.track_gaze) // Grabbing the next frame in the sequence
{ captured_image = image_reader.GetNextImage();
cv::Vec6d pose_estimate_to_draw = LandmarkDetector::GetPose(clnf_model, fx, fy, cx, cy);
// Draw it in reddish if uncertain, blueish if certain
LandmarkDetector::DrawBox(read_image, pose_estimate_to_draw, cv::Scalar(255.0, 0, 0), 3, fx, fy, cx, cy);
GazeAnalysis::DrawGaze(read_image, clnf_model, gazeDirection0, gazeDirection1, fx, fy, cx, cy);
}
// displaying detected landmarks
cv::Mat display_image;
create_display_image(read_image, display_image, clnf_model);
if(visualise && success)
{
imshow("colour", display_image);
cv::waitKey(1);
}
// Saving the display images (in an OS independent manner)
if(!output_images.empty() && success)
{
string outimage = output_images.at(i);
if(!outimage.empty())
{
char name[100];
sprintf(name, "_det_%d", face_det);
boost::filesystem::path slash("/");
std::string preferredSlash = slash.make_preferred().string();
// append detection number
boost::filesystem::path out_feat_path(outimage);
boost::filesystem::path dir = out_feat_path.parent_path();
boost::filesystem::path fname = out_feat_path.filename().replace_extension("");
boost::filesystem::path ext = out_feat_path.extension();
outimage = dir.string() + preferredSlash + fname.string() + string(name) + ext.string();
create_directory_from_file(outimage);
bool write_success = cv::imwrite(outimage, display_image);
if (!write_success)
{
cout << "Could not output a processed image" << endl;
return 1;
}
}
}
if(success)
{
face_det++;
}
}
}
else
{
// Have provided bounding boxes
LandmarkDetector::DetectLandmarksInImage(grayscale_image, bounding_boxes[i], clnf_model, det_parameters);
// Estimate head pose and eye gaze
cv::Vec6d headPose = LandmarkDetector::GetPose(clnf_model, fx, fy, cx, cy);
// Gaze tracking, absolute gaze direction
cv::Point3f gazeDirection0(0, 0, -1);
cv::Point3f gazeDirection1(0, 0, -1);
cv::Vec2d gazeAngle(0, 0);
if (det_parameters.track_gaze)
{
GazeAnalysis::EstimateGaze(clnf_model, gazeDirection0, fx, fy, cx, cy, true);
GazeAnalysis::EstimateGaze(clnf_model, gazeDirection1, fx, fy, cx, cy, false);
gazeAngle = GazeAnalysis::GetGazeAngle(gazeDirection0, gazeDirection1);
}
auto ActionUnits = face_analyser.PredictStaticAUs(read_image, clnf_model.detected_landmarks, false);
// Writing out the detected landmarks
if(!output_landmark_locations.empty())
{
string outfeatures = output_landmark_locations.at(i);
write_out_landmarks(outfeatures, clnf_model, headPose, gazeDirection0, gazeDirection1, gazeAngle, ActionUnits.first, ActionUnits.second, det_parameters.track_gaze);
}
// Writing out the detected landmarks
if (!output_pose_locations.empty())
{
string outfeatures = output_pose_locations.at(i);
write_out_pose_landmarks(outfeatures, clnf_model.GetShape(fx, fy, cx, cy), headPose, gazeDirection0, gazeDirection1);
}
// displaying detected stuff
cv::Mat display_image;
if (det_parameters.track_gaze)
{
cv::Vec6d pose_estimate_to_draw = LandmarkDetector::GetPose(clnf_model, fx, fy, cx, cy);
// Draw it in reddish if uncertain, blueish if certain
LandmarkDetector::DrawBox(read_image, pose_estimate_to_draw, cv::Scalar(255.0, 0, 0), 3, fx, fy, cx, cy);
GazeAnalysis::DrawGaze(read_image, clnf_model, gazeDirection0, gazeDirection1, fx, fy, cx, cy);
}
create_display_image(read_image, display_image, clnf_model);
if(visualise)
{
imshow("colour", display_image);
cv::waitKey(1);
}
if(!output_images.empty())
{
string outimage = output_images.at(i);
if(!outimage.empty())
{
create_directory_from_file(outimage);
bool write_success = imwrite(outimage, display_image);
if (!write_success)
{
cout << "Could not output a processed image" << endl;
return 1;
}
}
}
}
} }

View file

@ -112,7 +112,7 @@
<WarningLevel>Level3</WarningLevel> <WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization> <Optimization>Disabled</Optimization>
<PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>$(SolutionDir)\lib\local\LandmarkDetector\include;$(SolutionDir)\lib\local\FaceAnalyser\include;$(SolutionDir)\lib\local\GazeAnalyser\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>$(SolutionDir)\lib\local\LandmarkDetector\include;$(SolutionDir)\lib\local\FaceAnalyser\include;$(SolutionDir)\lib\local\GazeAnalyser\include;$(SolutionDir)\lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet> <EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
</ClCompile> </ClCompile>
@ -127,7 +127,7 @@
<WarningLevel>Level3</WarningLevel> <WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization> <Optimization>Disabled</Optimization>
<PreprocessorDefinitions>WIN64;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>WIN64;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>$(SolutionDir)\lib\local\LandmarkDetector\include;$(SolutionDir)\lib\local\FaceAnalyser\include;$(SolutionDir)\lib\local\GazeAnalyser\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>$(SolutionDir)\lib\local\LandmarkDetector\include;$(SolutionDir)\lib\local\FaceAnalyser\include;$(SolutionDir)\lib\local\GazeAnalyser\include;$(SolutionDir)\lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet> <EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
</ClCompile> </ClCompile>
@ -144,7 +144,7 @@
<FunctionLevelLinking>true</FunctionLevelLinking> <FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions> <IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>$(SolutionDir)\lib\local\LandmarkDetector\include;$(SolutionDir)\lib\local\FaceAnalyser\include;$(SolutionDir)\lib\local\GazeAnalyser\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>$(SolutionDir)\lib\local\LandmarkDetector\include;$(SolutionDir)\lib\local\FaceAnalyser\include;$(SolutionDir)\lib\local\GazeAnalyser\include;$(SolutionDir)\lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<OpenMPSupport>false</OpenMPSupport> <OpenMPSupport>false</OpenMPSupport>
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet> <EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
@ -162,12 +162,14 @@
<PrecompiledHeader>NotUsing</PrecompiledHeader> <PrecompiledHeader>NotUsing</PrecompiledHeader>
<Optimization>Full</Optimization> <Optimization>Full</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking> <FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions> <IntrinsicFunctions>false</IntrinsicFunctions>
<PreprocessorDefinitions>WIN64;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>WIN64;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>$(SolutionDir)\lib\local\LandmarkDetector\include;$(SolutionDir)\lib\local\FaceAnalyser\include;$(SolutionDir)\lib\local\GazeAnalyser\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>$(SolutionDir)\lib\local\LandmarkDetector\include;$(SolutionDir)\lib\local\FaceAnalyser\include;$(SolutionDir)\lib\local\GazeAnalyser\include;$(SolutionDir)\lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<OpenMPSupport>false</OpenMPSupport> <OpenMPSupport>false</OpenMPSupport>
<EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet> <EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
<InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
</ClCompile> </ClCompile>
<Link> <Link>
<SubSystem>Console</SubSystem> <SubSystem>Console</SubSystem>
@ -189,6 +191,9 @@
<ProjectReference Include="..\..\lib\local\LandmarkDetector\LandmarkDetector.vcxproj"> <ProjectReference Include="..\..\lib\local\LandmarkDetector\LandmarkDetector.vcxproj">
<Project>{bdc1d107-de17-4705-8e7b-cdde8bfb2bf8}</Project> <Project>{bdc1d107-de17-4705-8e7b-cdde8bfb2bf8}</Project>
</ProjectReference> </ProjectReference>
<ProjectReference Include="..\..\lib\local\Utilities\Utilities.vcxproj">
<Project>{8e741ea2-9386-4cf2-815e-6f9b08991eac}</Project>
</ProjectReference>
</ItemGroup> </ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets"> <ImportGroup Label="ExtensionTargets">

View file

@ -9,10 +9,12 @@ include_directories(${LandmarkDetector_SOURCE_DIR}/include)
include_directories(../../lib/local/LandmarkDetector/include) include_directories(../../lib/local/LandmarkDetector/include)
include_directories(../../lib/local/FaceAnalyser/include) include_directories(../../lib/local/FaceAnalyser/include)
include_directories(../../lib/local/GazeAnalyser/include) include_directories(../../lib/local/GazeAnalyser/include)
include_directories(../../lib/local/Utilities/include)
target_link_libraries(FaceLandmarkVid LandmarkDetector) target_link_libraries(FaceLandmarkVid LandmarkDetector)
target_link_libraries(FaceLandmarkVid FaceAnalyser) target_link_libraries(FaceLandmarkVid FaceAnalyser)
target_link_libraries(FaceLandmarkVid GazeAnalyser) target_link_libraries(FaceLandmarkVid GazeAnalyser)
target_link_libraries(FaceLandmarkVid Utilities)
target_link_libraries(FaceLandmarkVid dlib) target_link_libraries(FaceLandmarkVid dlib)

View file

@ -46,6 +46,10 @@
#include <opencv2/imgproc.hpp> #include <opencv2/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp> #include <opencv2/highgui/highgui.hpp>
#include <SequenceCapture.h>
#include <Visualizer.h>
#include <VisualizationUtils.h>
// Boost includes // Boost includes
#include <filesystem.hpp> #include <filesystem.hpp>
#include <filesystem/fstream.hpp> #include <filesystem/fstream.hpp>
@ -82,276 +86,113 @@ vector<string> get_arguments(int argc, char **argv)
return arguments; return arguments;
} }
// Some globals for tracking timing information for visualisation
double fps_tracker = -1.0;
int64 t0 = 0;
// Visualising the results
void visualise_tracking(cv::Mat& captured_image, const LandmarkDetector::CLNF& face_model, const LandmarkDetector::FaceModelParameters& det_parameters, cv::Point3f gazeDirection0, cv::Point3f gazeDirection1, int frame_count, double fx, double fy, double cx, double cy)
{
// Drawing the facial landmarks on the face and the bounding box around it if tracking is successful and initialised
double detection_certainty = face_model.detection_certainty;
bool detection_success = face_model.detection_success;
double visualisation_boundary = 0.2;
// Only draw if the reliability is reasonable, the value is slightly ad-hoc
if (detection_certainty < visualisation_boundary)
{
LandmarkDetector::Draw(captured_image, face_model);
double vis_certainty = detection_certainty;
if (vis_certainty > 1)
vis_certainty = 1;
if (vis_certainty < -1)
vis_certainty = -1;
vis_certainty = (vis_certainty + 1) / (visualisation_boundary + 1);
// A rough heuristic for box around the face width
int thickness = (int)std::ceil(2.0* ((double)captured_image.cols) / 640.0);
cv::Vec6d pose_estimate_to_draw = LandmarkDetector::GetPose(face_model, fx, fy, cx, cy);
// Draw it in reddish if uncertain, blueish if certain
LandmarkDetector::DrawBox(captured_image, pose_estimate_to_draw, cv::Scalar((1 - vis_certainty)*255.0, 0, vis_certainty * 255), thickness, fx, fy, cx, cy);
if (det_parameters.track_gaze && detection_success && face_model.eye_model)
{
GazeAnalysis::DrawGaze(captured_image, face_model, gazeDirection0, gazeDirection1, fx, fy, cx, cy);
}
}
// Work out the framerate
if (frame_count % 10 == 0)
{
double t1 = cv::getTickCount();
fps_tracker = 10.0 / (double(t1 - t0) / cv::getTickFrequency());
t0 = t1;
}
// Write out the framerate on the image before displaying it
char fpsC[255];
std::sprintf(fpsC, "%d", (int)fps_tracker);
string fpsSt("FPS:");
fpsSt += fpsC;
cv::putText(captured_image, fpsSt, cv::Point(10, 20), CV_FONT_HERSHEY_SIMPLEX, 0.5, CV_RGB(255, 0, 0));
if (!det_parameters.quiet_mode)
{
cv::namedWindow("tracking_result", 1);
cv::imshow("tracking_result", captured_image);
}
}
int main (int argc, char **argv) int main (int argc, char **argv)
{ {
vector<string> arguments = get_arguments(argc, argv); vector<string> arguments = get_arguments(argc, argv);
// Some initial parameters that can be overriden from command line
vector<string> files, output_video_files, out_dummy;
// By default try webcam 0
int device = 0;
LandmarkDetector::FaceModelParameters det_parameters(arguments); LandmarkDetector::FaceModelParameters det_parameters(arguments);
// Get the input output file parameters
// Indicates that rotation should be with respect to world or camera coordinates
string output_codec;
LandmarkDetector::get_video_input_output_params(files, out_dummy, output_video_files, output_codec, arguments);
// The modules that are being used for tracking // The modules that are being used for tracking
LandmarkDetector::CLNF clnf_model(det_parameters.model_location); LandmarkDetector::CLNF face_model(det_parameters.model_location);
// Grab camera parameters, if they are not defined (approximate values will be used) // Open a sequence
float fx = 0, fy = 0, cx = 0, cy = 0; Utilities::SequenceCapture sequence_reader;
// Get camera parameters
LandmarkDetector::get_camera_params(device, fx, fy, cx, cy, arguments);
// If cx (optical axis centre) is undefined will use the image size/2 as an estimate // A utility for visualizing the results (show just the tracks)
bool cx_undefined = false; Utilities::Visualizer visualizer(true, false, false);
bool fx_undefined = false;
if (cx == 0 || cy == 0) // Tracking FPS for visualization
Utilities::FpsTracker fps_tracker;
fps_tracker.AddFrame();
int sequence_number = 0;
while (true) // this is not a for loop as we might also be reading from a webcam
{ {
cx_undefined = true;
// The sequence reader chooses what to open based on command line arguments provided
if(!sequence_reader.Open(arguments))
{
// If failed to open because no input files specified, attempt to open a webcam
if (sequence_reader.no_input_specified && sequence_number == 0)
{
// If that fails, revert to webcam
INFO_STREAM("No input specified, attempting to open a webcam 0");
if (!sequence_reader.OpenWebcam(0))
{
ERROR_STREAM("Failed to open the webcam");
break;
} }
if (fx == 0 || fy == 0)
{
fx_undefined = true;
}
// If multiple video files are tracked, use this to indicate if we are done
bool done = false;
int f_n = -1;
det_parameters.track_gaze = true;
while(!done) // this is not a for loop as we might also be reading from a webcam
{
string current_file;
// We might specify multiple video files as arguments
if(files.size() > 0)
{
f_n++;
current_file = files[f_n];
} }
else else
{ {
// If we want to write out from webcam ERROR_STREAM("Failed to open a sequence");
f_n = 0; break;
}
// Do some grabbing
cv::VideoCapture video_capture;
if( current_file.size() > 0 )
{
if (!boost::filesystem::exists(current_file))
{
FATAL_STREAM("File does not exist");
return 1;
}
current_file = boost::filesystem::path(current_file).generic_string();
INFO_STREAM( "Attempting to read from file: " << current_file );
video_capture = cv::VideoCapture( current_file );
}
else
{
INFO_STREAM( "Attempting to capture from device: " << device );
video_capture = cv::VideoCapture( device );
// Read a first frame often empty in camera
cv::Mat captured_image;
video_capture >> captured_image;
}
if (!video_capture.isOpened())
{
FATAL_STREAM("Failed to open video source");
return 1;
}
else INFO_STREAM( "Device or file opened");
cv::Mat captured_image;
video_capture >> captured_image;
// If optical centers are not defined just use center of image
if (cx_undefined)
{
cx = captured_image.cols / 2.0f;
cy = captured_image.rows / 2.0f;
}
// Use a rough guess-timate of focal length
if (fx_undefined)
{
fx = 500 * (captured_image.cols / 640.0);
fy = 500 * (captured_image.rows / 480.0);
fx = (fx + fy) / 2.0;
fy = fx;
}
int frame_count = 0;
// saving the videos
cv::VideoWriter writerFace;
if (!output_video_files.empty())
{
try
{
writerFace = cv::VideoWriter(output_video_files[f_n], CV_FOURCC(output_codec[0], output_codec[1], output_codec[2], output_codec[3]), 30, captured_image.size(), true);
}
catch(cv::Exception e)
{
WARN_STREAM( "Could not open VideoWriter, OUTPUT FILE WILL NOT BE WRITTEN. Currently using codec " << output_codec << ", try using an other one (-oc option)");
} }
} }
INFO_STREAM("Device or file opened");
// Use for timestamping if using a webcam cv::Mat captured_image = sequence_reader.GetNextFrame();
int64 t_initial = cv::getTickCount();
INFO_STREAM( "Starting tracking"); INFO_STREAM("Starting tracking");
while(!captured_image.empty()) while (!captured_image.empty()) // this is not a for loop as we might also be reading from a webcam
{ {
// Reading the images // Reading the images
cv::Mat_<uchar> grayscale_image; cv::Mat_<uchar> grayscale_image = sequence_reader.GetGrayFrame();
if(captured_image.channels() == 3)
{
cv::cvtColor(captured_image, grayscale_image, CV_BGR2GRAY);
}
else
{
grayscale_image = captured_image.clone();
}
// The actual facial landmark detection / tracking // The actual facial landmark detection / tracking
bool detection_success = LandmarkDetector::DetectLandmarksInVideo(grayscale_image, clnf_model, det_parameters); bool detection_success = LandmarkDetector::DetectLandmarksInVideo(grayscale_image, face_model, det_parameters);
// Visualising the results
// Drawing the facial landmarks on the face and the bounding box around it if tracking is successful and initialised
double detection_certainty = clnf_model.detection_certainty;
// Gaze tracking, absolute gaze direction // Gaze tracking, absolute gaze direction
cv::Point3f gazeDirection0(0, 0, -1); cv::Point3f gazeDirection0(0, 0, -1);
cv::Point3f gazeDirection1(0, 0, -1); cv::Point3f gazeDirection1(0, 0, -1);
if (det_parameters.track_gaze && detection_success && clnf_model.eye_model) // If tracking succeeded and we have an eye model, estimate gaze
if (detection_success && face_model.eye_model)
{ {
GazeAnalysis::EstimateGaze(clnf_model, gazeDirection0, fx, fy, cx, cy, true); GazeAnalysis::EstimateGaze(face_model, gazeDirection0, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy, true);
GazeAnalysis::EstimateGaze(clnf_model, gazeDirection1, fx, fy, cx, cy, false); GazeAnalysis::EstimateGaze(face_model, gazeDirection1, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy, false);
} }
visualise_tracking(captured_image, clnf_model, det_parameters, gazeDirection0, gazeDirection1, frame_count, fx, fy, cx, cy); // Work out the pose of the head from the tracked model
cv::Vec6d pose_estimate = LandmarkDetector::GetPose(face_model, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy);
// output the tracked video // Keeping track of FPS
if (!output_video_files.empty()) fps_tracker.AddFrame();
{
writerFace << captured_image;
}
// Displaying the tracking visualizations
video_capture >> captured_image; visualizer.SetImage(captured_image, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy);
visualizer.SetObservationLandmarks(face_model.detected_landmarks, face_model.detection_certainty, detection_success);
// detect key presses visualizer.SetObservationPose(pose_estimate, face_model.detection_certainty);
char character_press = cv::waitKey(1); visualizer.SetObservationGaze(gazeDirection0, gazeDirection1, LandmarkDetector::CalculateAllEyeLandmarks(face_model), LandmarkDetector::Calculate3DEyeLandmarks(face_model, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy), face_model.detection_certainty);
visualizer.SetFps(fps_tracker.GetFPS());
// detect key presses (due to pecularities of OpenCV, you can get it when displaying images)
char character_press = visualizer.ShowObservation();
// restart the tracker // restart the tracker
if(character_press == 'r') if (character_press == 'r')
{ {
clnf_model.Reset(); face_model.Reset();
} }
// quit the application // quit the application
else if(character_press=='q') else if (character_press == 'q')
{ {
return(0); return(0);
} }
// Update the frame count // Grabbing the next frame in the sequence
frame_count++; captured_image = sequence_reader.GetNextFrame();
} }
frame_count = 0;
// Reset the model, for the next video // Reset the model, for the next video
clnf_model.Reset(); face_model.Reset();
// break out of the loop if done with all the files (or using a webcam) sequence_number++;
if(f_n == files.size() -1 || files.empty())
{
done = true;
}
}
}
return 0; return 0;
} }

View file

@ -112,7 +112,7 @@
<WarningLevel>Level3</WarningLevel> <WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization> <Optimization>Disabled</Optimization>
<PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>$(SolutionDir)\lib\local\LandmarkDetector\include;$(SolutionDir)\lib\local\FaceAnalyser\include;$(SolutionDir)\lib\local\GazeAnalyser\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>$(SolutionDir)\lib\local\LandmarkDetector\include;$(SolutionDir)\lib\local\FaceAnalyser\include;$(SolutionDir)\lib\local\GazeAnalyser\include;$(SolutionDir)\lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<OpenMPSupport>false</OpenMPSupport> <OpenMPSupport>false</OpenMPSupport>
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet> <EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
@ -128,7 +128,7 @@
<WarningLevel>Level3</WarningLevel> <WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization> <Optimization>Disabled</Optimization>
<PreprocessorDefinitions>WIN64;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>WIN64;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>$(SolutionDir)\lib\local\LandmarkDetector\include;$(SolutionDir)\lib\local\FaceAnalyser\include;$(SolutionDir)\lib\local\GazeAnalyser\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>$(SolutionDir)\lib\local\LandmarkDetector\include;$(SolutionDir)\lib\local\FaceAnalyser\include;$(SolutionDir)\lib\local\GazeAnalyser\include;$(SolutionDir)\lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<OpenMPSupport>false</OpenMPSupport> <OpenMPSupport>false</OpenMPSupport>
<EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet> <EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
@ -147,7 +147,7 @@
</FunctionLevelLinking> </FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions> <IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>$(SolutionDir)\lib\local\LandmarkDetector\include;$(SolutionDir)\lib\local\FaceAnalyser\include;$(SolutionDir)\lib\local\GazeAnalyser\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>$(SolutionDir)\lib\local\LandmarkDetector\include;$(SolutionDir)\lib\local\FaceAnalyser\include;$(SolutionDir)\lib\local\GazeAnalyser\include;$(SolutionDir)\lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<OpenMPSupport>false</OpenMPSupport> <OpenMPSupport>false</OpenMPSupport>
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed> <FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet> <EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
@ -164,16 +164,17 @@
<ClCompile> <ClCompile>
<WarningLevel>Level3</WarningLevel> <WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>NotUsing</PrecompiledHeader> <PrecompiledHeader>NotUsing</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization> <Optimization>Full</Optimization>
<FunctionLevelLinking> <FunctionLevelLinking>
</FunctionLevelLinking> </FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions> <IntrinsicFunctions>false</IntrinsicFunctions>
<PreprocessorDefinitions>WIN64;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>WIN64;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>$(SolutionDir)\lib\local\LandmarkDetector\include;$(SolutionDir)\lib\local\FaceAnalyser\include;$(SolutionDir)\lib\local\GazeAnalyser\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>$(SolutionDir)\lib\local\LandmarkDetector\include;$(SolutionDir)\lib\local\FaceAnalyser\include;$(SolutionDir)\lib\local\GazeAnalyser\include;$(SolutionDir)\lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<OpenMPSupport>false</OpenMPSupport> <OpenMPSupport>false</OpenMPSupport>
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed> <FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
<EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet> <EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
<InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>
</ClCompile> </ClCompile>
<Link> <Link>
<SubSystem>Console</SubSystem> <SubSystem>Console</SubSystem>
@ -195,6 +196,9 @@
<ProjectReference Include="..\..\lib\local\LandmarkDetector\LandmarkDetector.vcxproj"> <ProjectReference Include="..\..\lib\local\LandmarkDetector\LandmarkDetector.vcxproj">
<Project>{bdc1d107-de17-4705-8e7b-cdde8bfb2bf8}</Project> <Project>{bdc1d107-de17-4705-8e7b-cdde8bfb2bf8}</Project>
</ProjectReference> </ProjectReference>
<ProjectReference Include="..\..\lib\local\Utilities\Utilities.vcxproj">
<Project>{8e741ea2-9386-4cf2-815e-6f9b08991eac}</Project>
</ProjectReference>
</ItemGroup> </ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets"> <ImportGroup Label="ExtensionTargets">

View file

@ -5,9 +5,11 @@ include_directories(${TBB_ROOT_DIR}/include)
include_directories(${LandmarkDetector_SOURCE_DIR}/include) include_directories(${LandmarkDetector_SOURCE_DIR}/include)
include_directories(../../lib/local/LandmarkDetector/include) include_directories(../../lib/local/LandmarkDetector/include)
include_directories(../../lib/local/Utilities/include)
add_executable(FaceLandmarkVidMulti FaceLandmarkVidMulti.cpp) add_executable(FaceLandmarkVidMulti FaceLandmarkVidMulti.cpp)
target_link_libraries(FaceLandmarkVidMulti LandmarkDetector) target_link_libraries(FaceLandmarkVidMulti LandmarkDetector)
target_link_libraries(FaceLandmarkVidMulti Utilities)
target_link_libraries(FaceLandmarkVidMulti dlib) target_link_libraries(FaceLandmarkVidMulti dlib)
target_link_libraries(FaceLandmarkVidMulti ${OpenCV_LIBS} ${Boost_LIBRARIES} ${TBB_LIBRARIES} ${BLAS_LIBRARIES}) target_link_libraries(FaceLandmarkVidMulti ${OpenCV_LIBS} ${Boost_LIBRARIES} ${TBB_LIBRARIES} ${BLAS_LIBRARIES})

View file

@ -36,6 +36,10 @@
// FaceTrackingVidMulti.cpp : Defines the entry point for the multiple face tracking console application. // FaceTrackingVidMulti.cpp : Defines the entry point for the multiple face tracking console application.
#include "LandmarkCoreIncludes.h" #include "LandmarkCoreIncludes.h"
#include "VisualizationUtils.h"
#include "Visualizer.h"
#include "SequenceCapture.h"
#include <fstream> #include <fstream>
#include <sstream> #include <sstream>
@ -106,17 +110,8 @@ int main (int argc, char **argv)
vector<string> arguments = get_arguments(argc, argv); vector<string> arguments = get_arguments(argc, argv);
// Some initial parameters that can be overriden from command line
vector<string> files, tracked_videos_output, dummy_out;
// By default try webcam 0
int device = 0;
// cx and cy aren't necessarilly in the image center, so need to be able to override it (start with unit vals and init them if none specified)
float fx = 600, fy = 600, cx = 0, cy = 0;
LandmarkDetector::FaceModelParameters det_params(arguments); LandmarkDetector::FaceModelParameters det_params(arguments);
det_params.use_face_template = true;
// This is so that the model would not try re-initialising itself // This is so that the model would not try re-initialising itself
det_params.reinit_video_every = -1; det_params.reinit_video_every = -1;
@ -125,114 +120,69 @@ int main (int argc, char **argv)
vector<LandmarkDetector::FaceModelParameters> det_parameters; vector<LandmarkDetector::FaceModelParameters> det_parameters;
det_parameters.push_back(det_params); det_parameters.push_back(det_params);
// Get the input output file parameters
string output_codec;
LandmarkDetector::get_video_input_output_params(files, dummy_out, tracked_videos_output, output_codec, arguments);
// Get camera parameters
LandmarkDetector::get_camera_params(device, fx, fy, cx, cy, arguments);
// The modules that are being used for tracking // The modules that are being used for tracking
vector<LandmarkDetector::CLNF> clnf_models; vector<LandmarkDetector::CLNF> face_models;
vector<bool> active_models; vector<bool> active_models;
int num_faces_max = 4; int num_faces_max = 4;
LandmarkDetector::CLNF clnf_model(det_parameters[0].model_location); LandmarkDetector::CLNF face_model(det_parameters[0].model_location);
clnf_model.face_detector_HAAR.load(det_parameters[0].face_detector_location); face_model.face_detector_HAAR.load(det_parameters[0].face_detector_location);
clnf_model.face_detector_location = det_parameters[0].face_detector_location; face_model.face_detector_location = det_parameters[0].face_detector_location;
clnf_models.reserve(num_faces_max); face_models.reserve(num_faces_max);
clnf_models.push_back(clnf_model); face_models.push_back(face_model);
active_models.push_back(false); active_models.push_back(false);
for (int i = 1; i < num_faces_max; ++i) for (int i = 1; i < num_faces_max; ++i)
{ {
clnf_models.push_back(clnf_model); face_models.push_back(face_model);
active_models.push_back(false); active_models.push_back(false);
det_parameters.push_back(det_params); det_parameters.push_back(det_params);
} }
// If multiple video files are tracked, use this to indicate if we are done // Open a sequence
bool done = false; Utilities::SequenceCapture sequence_reader;
int f_n = -1;
// If cx (optical axis centre) is undefined will use the image size/2 as an estimate // A utility for visualizing the results (show just the tracks)
bool cx_undefined = false; Utilities::Visualizer visualizer(true, false, false);
if(cx == 0 || cy == 0)
// Tracking FPS for visualization
Utilities::FpsTracker fps_tracker;
fps_tracker.AddFrame();
int sequence_number = 0;
while(true) // this is not a for loop as we might also be reading from a webcam
{ {
cx_undefined = true;
// The sequence reader chooses what to open based on command line arguments provided
if (!sequence_reader.Open(arguments))
{
// If failed to open because no input files specified, attempt to open a webcam
if (sequence_reader.no_input_specified && sequence_number == 0)
{
// If that fails, revert to webcam
INFO_STREAM("No input specified, attempting to open a webcam 0");
if (!sequence_reader.OpenWebcam(0))
{
ERROR_STREAM("Failed to open the webcam");
break;
} }
while(!done) // this is not a for loop as we might also be reading from a webcam
{
string current_file;
// We might specify multiple video files as arguments
if(files.size() > 0)
{
f_n++;
current_file = files[f_n];
}
// Do some grabbing
cv::VideoCapture video_capture;
if( current_file.size() > 0 )
{
INFO_STREAM( "Attempting to read from file: " << current_file );
video_capture = cv::VideoCapture( current_file );
} }
else else
{ {
INFO_STREAM( "Attempting to capture from device: " << device ); ERROR_STREAM("Failed to open a sequence");
video_capture = cv::VideoCapture( device ); break;
// Read a first frame often empty in camera
cv::Mat captured_image;
video_capture >> captured_image;
} }
if (!video_capture.isOpened())
{
FATAL_STREAM("Failed to open video source");
return 1;
} }
else INFO_STREAM( "Device or file opened"); INFO_STREAM("Device or file opened");
cv::Mat captured_image; cv::Mat captured_image = sequence_reader.GetNextFrame();
video_capture >> captured_image;
// If optical centers are not defined just use center of image
if(cx_undefined)
{
cx = captured_image.cols / 2.0f;
cy = captured_image.rows / 2.0f;
}
int frame_count = 0; int frame_count = 0;
// saving the videos
cv::VideoWriter writerFace;
if(!tracked_videos_output.empty())
{
try
{
writerFace = cv::VideoWriter(tracked_videos_output[f_n], CV_FOURCC(output_codec[0],output_codec[1],output_codec[2],output_codec[3]), 30, captured_image.size(), true);
}
catch(cv::Exception e)
{
WARN_STREAM( "Could not open VideoWriter, OUTPUT FILE WILL NOT BE WRITTEN. Currently using codec " << output_codec << ", try using an other one (-oc option)");
}
}
// For measuring the timings
int64 t1,t0 = cv::getTickCount();
double fps = 10;
INFO_STREAM( "Starting tracking"); INFO_STREAM( "Starting tracking");
while(!captured_image.empty()) while(!captured_image.empty())
{ {
@ -254,7 +204,7 @@ int main (int argc, char **argv)
vector<cv::Rect_<double> > face_detections; vector<cv::Rect_<double> > face_detections;
bool all_models_active = true; bool all_models_active = true;
for(unsigned int model = 0; model < clnf_models.size(); ++model) for(unsigned int model = 0; model < face_models.size(); ++model)
{ {
if(!active_models[model]) if(!active_models[model])
{ {
@ -268,33 +218,32 @@ int main (int argc, char **argv)
if(det_parameters[0].curr_face_detector == LandmarkDetector::FaceModelParameters::HOG_SVM_DETECTOR) if(det_parameters[0].curr_face_detector == LandmarkDetector::FaceModelParameters::HOG_SVM_DETECTOR)
{ {
vector<double> confidences; vector<double> confidences;
LandmarkDetector::DetectFacesHOG(face_detections, grayscale_image, clnf_models[0].face_detector_HOG, confidences); LandmarkDetector::DetectFacesHOG(face_detections, grayscale_image, face_models[0].face_detector_HOG, confidences);
} }
else else
{ {
LandmarkDetector::DetectFaces(face_detections, grayscale_image, clnf_models[0].face_detector_HAAR); LandmarkDetector::DetectFaces(face_detections, grayscale_image, face_models[0].face_detector_HAAR);
} }
} }
// Keep only non overlapping detections (also convert to a concurrent vector // Keep only non overlapping detections (also convert to a concurrent vector
NonOverlapingDetections(clnf_models, face_detections); NonOverlapingDetections(face_models, face_detections);
vector<tbb::atomic<bool> > face_detections_used(face_detections.size()); vector<tbb::atomic<bool> > face_detections_used(face_detections.size());
// Go through every model and update the tracking // Go through every model and update the tracking
tbb::parallel_for(0, (int)clnf_models.size(), [&](int model){ tbb::parallel_for(0, (int)face_models.size(), [&](int model){
//for(unsigned int model = 0; model < clnf_models.size(); ++model) //for(unsigned int model = 0; model < clnf_models.size(); ++model)
//{ //{
bool detection_success = false; bool detection_success = false;
// If the current model has failed more than 4 times in a row, remove it // If the current model has failed more than 4 times in a row, remove it
if(clnf_models[model].failures_in_a_row > 4) if(face_models[model].failures_in_a_row > 4)
{ {
active_models[model] = false; active_models[model] = false;
clnf_models[model].Reset(); face_models[model].Reset();
} }
// If the model is inactive reactivate it with new detections // If the model is inactive reactivate it with new detections
@ -308,11 +257,11 @@ int main (int argc, char **argv)
{ {
// Reinitialise the model // Reinitialise the model
clnf_models[model].Reset(); face_models[model].Reset();
// This ensures that a wider window is used for the initial landmark localisation // This ensures that a wider window is used for the initial landmark localisation
clnf_models[model].detection_success = false; face_models[model].detection_success = false;
detection_success = LandmarkDetector::DetectLandmarksInVideo(grayscale_image, face_detections[detection_ind], clnf_models[model], det_parameters[model]); detection_success = LandmarkDetector::DetectLandmarksInVideo(grayscale_image, face_detections[detection_ind], face_models[model], det_parameters[model]);
// This activates the model // This activates the model
active_models[model] = true; active_models[model] = true;
@ -326,123 +275,64 @@ int main (int argc, char **argv)
else else
{ {
// The actual facial landmark detection / tracking // The actual facial landmark detection / tracking
detection_success = LandmarkDetector::DetectLandmarksInVideo(grayscale_image, clnf_models[model], det_parameters[model]); detection_success = LandmarkDetector::DetectLandmarksInVideo(grayscale_image, face_models[model], det_parameters[model]);
} }
}); });
// Keeping track of FPS
fps_tracker.AddFrame();
visualizer.SetImage(captured_image, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy);
// Go through every model and visualise the results // Go through every model and visualise the results
for(size_t model = 0; model < clnf_models.size(); ++model) for(size_t model = 0; model < face_models.size(); ++model)
{ {
// Visualising the results // Visualising the results
// Drawing the facial landmarks on the face and the bounding box around it if tracking is successful and initialised if(active_models[model])
double detection_certainty = clnf_models[model].detection_certainty;
double visualisation_boundary = -0.1;
// Only draw if the reliability is reasonable, the value is slightly ad-hoc
if(detection_certainty < visualisation_boundary)
{ {
LandmarkDetector::Draw(disp_image, clnf_models[model]); visualizer.SetObservationLandmarks(face_models[model].detected_landmarks, face_models[model].detection_certainty, face_models[model].detection_success);
visualizer.SetObservationPose(LandmarkDetector::GetPose(face_models[model], sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy), face_models[model].detection_certainty);
if(detection_certainty > 1)
detection_certainty = 1;
if(detection_certainty < -1)
detection_certainty = -1;
detection_certainty = (detection_certainty + 1)/(visualisation_boundary +1);
// A rough heuristic for box around the face width
int thickness = (int)std::ceil(2.0* ((double)captured_image.cols) / 640.0);
// Work out the pose of the head from the tracked model
cv::Vec6d pose_estimate = LandmarkDetector::GetPose(clnf_models[model], fx, fy, cx, cy);
// Draw it in reddish if uncertain, blueish if certain
LandmarkDetector::DrawBox(disp_image, pose_estimate, cv::Scalar((1-detection_certainty)*255.0,0, detection_certainty*255), thickness, fx, fy, cx, cy);
} }
} }
visualizer.SetFps(fps_tracker.GetFPS());
// Work out the framerate // show visualization and detect key presses
if(frame_count % 10 == 0) char character_press = visualizer.ShowObservation();
{
t1 = cv::getTickCount();
fps = 10.0 / (double(t1-t0)/cv::getTickFrequency());
t0 = t1;
}
// Write out the framerate on the image before displaying it
char fpsC[255];
sprintf(fpsC, "%d", (int)fps);
string fpsSt("FPS:");
fpsSt += fpsC;
cv::putText(disp_image, fpsSt, cv::Point(10,20), CV_FONT_HERSHEY_SIMPLEX, 0.5, CV_RGB(255,0,0), 1, CV_AA);
int num_active_models = 0;
for( size_t active_model = 0; active_model < active_models.size(); active_model++)
{
if(active_models[active_model])
{
num_active_models++;
}
}
char active_m_C[255];
sprintf(active_m_C, "%d", num_active_models);
string active_models_st("Active models:");
active_models_st += active_m_C;
cv::putText(disp_image, active_models_st, cv::Point(10,60), CV_FONT_HERSHEY_SIMPLEX, 0.5, CV_RGB(255,0,0), 1, CV_AA);
if(!det_parameters[0].quiet_mode)
{
cv::namedWindow("tracking_result",1);
cv::imshow("tracking_result", disp_image);
}
// output the tracked video
if(!tracked_videos_output.empty())
{
writerFace << disp_image;
}
video_capture >> captured_image;
// detect key presses
char character_press = cv::waitKey(1);
// restart the trackers // restart the trackers
if(character_press == 'r') if(character_press == 'r')
{ {
for(size_t i=0; i < clnf_models.size(); ++i) for(size_t i=0; i < face_models.size(); ++i)
{ {
clnf_models[i].Reset(); face_models[i].Reset();
active_models[i] = false; active_models[i] = false;
} }
} }
// quit the application // quit the application
else if(character_press=='q') else if(character_press=='q')
{ {
return(0); return 0;
} }
// Update the frame count // Update the frame count
frame_count++; frame_count++;
// Grabbing the next frame in the sequence
captured_image = sequence_reader.GetNextFrame();
} }
frame_count = 0; frame_count = 0;
// Reset the model, for the next video // Reset the model, for the next video
for(size_t model=0; model < clnf_models.size(); ++model) for(size_t model=0; model < face_models.size(); ++model)
{ {
clnf_models[model].Reset(); face_models[model].Reset();
active_models[model] = false; active_models[model] = false;
} }
// break out of the loop if done with all the files sequence_number++;
if(f_n == files.size() -1)
{
done = true;
}
} }
return 0; return 0;

View file

@ -105,7 +105,7 @@
<ClCompile> <ClCompile>
<WarningLevel>Level3</WarningLevel> <WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization> <Optimization>Disabled</Optimization>
<AdditionalIncludeDirectories>$(SolutionDir)\lib\local\LandmarkDetector\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>$(SolutionDir)\lib\local\LandmarkDetector\include;$(SolutionDir)\lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet> <EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
</ClCompile> </ClCompile>
@ -117,7 +117,7 @@
<ClCompile> <ClCompile>
<WarningLevel>Level3</WarningLevel> <WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization> <Optimization>Disabled</Optimization>
<AdditionalIncludeDirectories>$(SolutionDir)\lib\local\LandmarkDetector\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>$(SolutionDir)\lib\local\LandmarkDetector\include;$(SolutionDir)\lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet> <EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
</ClCompile> </ClCompile>
@ -131,7 +131,7 @@
<Optimization>MaxSpeed</Optimization> <Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking> <FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions> <IntrinsicFunctions>true</IntrinsicFunctions>
<AdditionalIncludeDirectories>$(SolutionDir)\lib\local\LandmarkDetector\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>$(SolutionDir)\lib\local\LandmarkDetector\include;$(SolutionDir)\lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet> <EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
<RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary> <RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
@ -145,13 +145,15 @@
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile> <ClCompile>
<WarningLevel>Level3</WarningLevel> <WarningLevel>Level3</WarningLevel>
<Optimization>MaxSpeed</Optimization> <Optimization>Full</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking> <FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions> <IntrinsicFunctions>false</IntrinsicFunctions>
<AdditionalIncludeDirectories>$(SolutionDir)\lib\local\LandmarkDetector\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>$(SolutionDir)\lib\local\LandmarkDetector\include;$(SolutionDir)\lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet> <EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet>
<RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary> <RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
<InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
</ClCompile> </ClCompile>
<Link> <Link>
<GenerateDebugInformation>true</GenerateDebugInformation> <GenerateDebugInformation>true</GenerateDebugInformation>
@ -166,6 +168,9 @@
<ProjectReference Include="..\..\lib\local\LandmarkDetector\LandmarkDetector.vcxproj"> <ProjectReference Include="..\..\lib\local\LandmarkDetector\LandmarkDetector.vcxproj">
<Project>{bdc1d107-de17-4705-8e7b-cdde8bfb2bf8}</Project> <Project>{bdc1d107-de17-4705-8e7b-cdde8bfb2bf8}</Project>
</ProjectReference> </ProjectReference>
<ProjectReference Include="..\..\lib\local\Utilities\Utilities.vcxproj">
<Project>{8e741ea2-9386-4cf2-815e-6f9b08991eac}</Project>
</ProjectReference>
</ItemGroup> </ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets"> <ImportGroup Label="ExtensionTargets">

View file

@ -9,10 +9,12 @@ include_directories(${LandmarkDetector_SOURCE_DIR}/include)
include_directories(../../lib/local/LandmarkDetector/include) include_directories(../../lib/local/LandmarkDetector/include)
include_directories(../../lib/local/FaceAnalyser/include) include_directories(../../lib/local/FaceAnalyser/include)
include_directories(../../lib/local/GazeAnalyser/include) include_directories(../../lib/local/GazeAnalyser/include)
include_directories(../../lib/local/Utilities/include)
target_link_libraries(FeatureExtraction LandmarkDetector) target_link_libraries(FeatureExtraction LandmarkDetector)
target_link_libraries(FeatureExtraction FaceAnalyser) target_link_libraries(FeatureExtraction FaceAnalyser)
target_link_libraries(FeatureExtraction GazeAnalyser) target_link_libraries(FeatureExtraction GazeAnalyser)
target_link_libraries(FeatureExtraction Utilities)
target_link_libraries(FeatureExtraction dlib) target_link_libraries(FeatureExtraction dlib)
target_link_libraries(FeatureExtraction ${OpenCV_LIBS} ${Boost_LIBRARIES} ${TBB_LIBRARIES} ${BLAS_LIBRARIES}) target_link_libraries(FeatureExtraction ${OpenCV_LIBS} ${Boost_LIBRARIES} ${TBB_LIBRARIES} ${BLAS_LIBRARIES})

File diff suppressed because it is too large Load diff

View file

@ -111,7 +111,7 @@
<WarningLevel>Level3</WarningLevel> <WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization> <Optimization>Disabled</Optimization>
<PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>$(SolutionDir)\lib\local\FaceAnalyser\include;$(SolutionDir)\lib\local\LandmarkDetector\include;$(SolutionDir)\lib\local\GazeAnalyser\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>$(SolutionDir)\lib\local\FaceAnalyser\include;$(SolutionDir)\lib\local\LandmarkDetector\include;$(SolutionDir)\lib\local\GazeAnalyser\include;$(SolutionDir)\lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<OpenMPSupport>false</OpenMPSupport> <OpenMPSupport>false</OpenMPSupport>
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet> <EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
@ -127,7 +127,7 @@
<WarningLevel>Level3</WarningLevel> <WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization> <Optimization>Disabled</Optimization>
<PreprocessorDefinitions>WIN64;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>WIN64;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>$(SolutionDir)\lib\local\FaceAnalyser\include;$(SolutionDir)\lib\local\LandmarkDetector\include;$(SolutionDir)\lib\local\GazeAnalyser\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>$(SolutionDir)\lib\local\FaceAnalyser\include;$(SolutionDir)\lib\local\LandmarkDetector\include;$(SolutionDir)\lib\local\GazeAnalyser\include;$(SolutionDir)\lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<OpenMPSupport>false</OpenMPSupport> <OpenMPSupport>false</OpenMPSupport>
<EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet> <EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
@ -145,7 +145,7 @@
<FunctionLevelLinking>false</FunctionLevelLinking> <FunctionLevelLinking>false</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions> <IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>$(SolutionDir)\lib\local\FaceAnalyser\include;$(SolutionDir)\lib\local\LandmarkDetector\include;$(SolutionDir)\lib\local\GazeAnalyser\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>$(SolutionDir)\lib\local\FaceAnalyser\include;$(SolutionDir)\lib\local\LandmarkDetector\include;$(SolutionDir)\lib\local\GazeAnalyser\include;$(SolutionDir)\lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<OpenMPSupport>false</OpenMPSupport> <OpenMPSupport>false</OpenMPSupport>
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed> <FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet> <EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
@ -163,16 +163,17 @@
<ClCompile> <ClCompile>
<WarningLevel>Level3</WarningLevel> <WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>NotUsing</PrecompiledHeader> <PrecompiledHeader>NotUsing</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization> <Optimization>Full</Optimization>
<FunctionLevelLinking>false</FunctionLevelLinking> <FunctionLevelLinking>false</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions> <IntrinsicFunctions>false</IntrinsicFunctions>
<PreprocessorDefinitions>WIN64;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>WIN64;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>$(SolutionDir)\lib\local\FaceAnalyser\include;$(SolutionDir)\lib\local\LandmarkDetector\include;$(SolutionDir)\lib\local\GazeAnalyser\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>$(SolutionDir)\lib\local\FaceAnalyser\include;$(SolutionDir)\lib\local\LandmarkDetector\include;$(SolutionDir)\lib\local\GazeAnalyser\include;$(SolutionDir)\lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<OpenMPSupport>false</OpenMPSupport> <OpenMPSupport>false</OpenMPSupport>
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed> <FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
<EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet> <EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet>
<RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary> <RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
<InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>
</ClCompile> </ClCompile>
<Link> <Link>
<SubSystem>Console</SubSystem> <SubSystem>Console</SubSystem>
@ -194,6 +195,9 @@
<ProjectReference Include="..\..\lib\local\LandmarkDetector\LandmarkDetector.vcxproj"> <ProjectReference Include="..\..\lib\local\LandmarkDetector\LandmarkDetector.vcxproj">
<Project>{bdc1d107-de17-4705-8e7b-cdde8bfb2bf8}</Project> <Project>{bdc1d107-de17-4705-8e7b-cdde8bfb2bf8}</Project>
</ProjectReference> </ProjectReference>
<ProjectReference Include="..\..\lib\local\Utilities\Utilities.vcxproj">
<Project>{8e741ea2-9386-4cf2-815e-6f9b08991eac}</Project>
</ProjectReference>
</ItemGroup> </ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets"> <ImportGroup Label="ExtensionTargets">

View file

@ -86,6 +86,7 @@
<WarningLevel>Level3</WarningLevel> <WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization> <Optimization>Disabled</Optimization>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
<AdditionalIncludeDirectories>./include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile> </ClCompile>
<Link> <Link>
<GenerateDebugInformation>true</GenerateDebugInformation> <GenerateDebugInformation>true</GenerateDebugInformation>
@ -110,6 +111,7 @@
<OpenMPSupport>false</OpenMPSupport> <OpenMPSupport>false</OpenMPSupport>
<CallingConvention>Cdecl</CallingConvention> <CallingConvention>Cdecl</CallingConvention>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
<AdditionalIncludeDirectories>./include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile> </ClCompile>
<Link> <Link>
<GenerateDebugInformation>true</GenerateDebugInformation> <GenerateDebugInformation>true</GenerateDebugInformation>
@ -128,6 +130,7 @@
<OpenMPSupport>false</OpenMPSupport> <OpenMPSupport>false</OpenMPSupport>
<CallingConvention>Cdecl</CallingConvention> <CallingConvention>Cdecl</CallingConvention>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
<AdditionalIncludeDirectories>./include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile> </ClCompile>
<Link> <Link>
<GenerateDebugInformation>true</GenerateDebugInformation> <GenerateDebugInformation>true</GenerateDebugInformation>

View file

@ -0,0 +1,78 @@
clear;
version = '0.3.0';
out_x86 = sprintf('OpenFace_%s_win_x86', version);
out_x64 = sprintf('OpenFace_%s_win_x64', version);
mkdir(out_x86);
mkdir(out_x64);
in_x86 = '../../Release/';
in_x64 = '../../x64/Release/';
% Copy models
copyfile([in_x86, 'AU_predictors'], [out_x86, '/AU_predictors'])
copyfile([in_x86, 'classifiers'], [out_x86, '/classifiers'])
copyfile([in_x86, 'model'], [out_x86, '/model'])
copyfile([in_x64, 'AU_predictors'], [out_x64, '/AU_predictors'])
copyfile([in_x64, 'classifiers'], [out_x64, '/classifiers'])
copyfile([in_x64, 'model'], [out_x64, '/model'])
%% Copy libraries
libs_x86 = dir([in_x86, '*.lib'])';
for lib = libs_x86
copyfile([in_x86, '/', lib.name], [out_x86, '/', lib.name])
end
libs_x64 = dir([in_x64, '*.lib'])';
for lib = libs_x64
copyfile([in_x64, '/', lib.name], [out_x64, '/', lib.name])
end
%% Copy dlls
dlls_x86 = dir([in_x86, '*.dll'])';
for dll = dlls_x86
copyfile([in_x86, '/', dll.name], [out_x86, '/', dll.name])
end
dlls_x64 = dir([in_x64, '*.dll'])';
for dll = dlls_x64
copyfile([in_x64, '/', dll.name], [out_x64, '/', dll.name])
end
%% Copy exe's
exes_x86 = dir([in_x86, '*.exe'])';
for exe = exes_x86
copyfile([in_x86, '/', exe.name], [out_x86, '/', exe.name])
end
exes_x64 = dir([in_x64, '*.exe'])';
for exe = exes_x64
copyfile([in_x64, '/', exe.name], [out_x64, '/', exe.name])
end
%% Copy license and copyright
copyfile('../../Copyright.txt', [out_x86, '/Copyright.txt']);
copyfile('../../OpenFace-license.txt', [out_x86, '/OpenFace-license.txt']);
copyfile('../../Copyright.txt', [out_x64, '/Copyright.txt']);
copyfile('../../OpenFace-license.txt', [out_x64, '/OpenFace-license.txt']);

BIN
imgs/landmark_scheme_68.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 483 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.5 KiB

BIN
imgs/rainbow-logo.gif Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.4 KiB

View file

@ -187,7 +187,7 @@ public:
face_analyser->PostprocessOutputFile(msclr::interop::marshal_as<std::string>(file)); face_analyser->PostprocessOutputFile(msclr::interop::marshal_as<std::string>(file));
} }
void AddNextFrame(OpenCVWrappers::RawImage^ frame, List<System::Tuple<double, double>^>^ landmarks, bool success, bool online, bool vis_hog) { void AddNextFrame(OpenCVWrappers::RawImage^ frame, List<System::Tuple<double, double>^>^ landmarks, bool success, bool online) {
// Construct an OpenCV matric from the landmarks // Construct an OpenCV matric from the landmarks
cv::Mat_<double> landmarks_mat(landmarks->Count * 2, 1, 0.0); cv::Mat_<double> landmarks_mat(landmarks->Count * 2, 1, 0.0);
@ -197,7 +197,9 @@ public:
landmarks_mat.at<double>(i + landmarks->Count, 0) = landmarks[i]->Item2; landmarks_mat.at<double>(i + landmarks->Count, 0) = landmarks[i]->Item2;
} }
face_analyser->AddNextFrame(frame->Mat, landmarks_mat, success, 0, online, vis_hog); //(captured_image, face_model.detected_landmarks, face_model.detection_success, sequence_reader.time_stamp, sequence_reader.IsWebcam());
face_analyser->AddNextFrame(frame->Mat, landmarks_mat, success, 0, online);
face_analyser->GetLatestHOG(*hog_features, *num_rows, *num_cols); face_analyser->GetLatestHOG(*hog_features, *num_rows, *num_cols);

View file

@ -97,7 +97,6 @@ namespace CppInterop {
params->model_location = "model/main_clnf_demos.txt"; params->model_location = "model/main_clnf_demos.txt";
} }
params->track_gaze = true;
} }
// TODO this could have optimize for demo mode (also could appropriately update sigma, reg_factor as well) // TODO this could have optimize for demo mode (also could appropriately update sigma, reg_factor as well)

View file

@ -10,6 +10,9 @@ include_directories(../../3rdParty/OpenBLAS/include)
#LandmarkDetector library #LandmarkDetector library
include_directories(../../local/LandmarkDetector/include) include_directories(../../local/LandmarkDetector/include)
#Utilities library
include_directories(../../local/Utilities/include)
SET(SOURCE SET(SOURCE
src/Face_utils.cpp src/Face_utils.cpp
src/FaceAnalyser.cpp src/FaceAnalyser.cpp

View file

@ -98,7 +98,7 @@
<WarningLevel>Level3</WarningLevel> <WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization> <Optimization>Disabled</Optimization>
<SDLCheck>false</SDLCheck> <SDLCheck>false</SDLCheck>
<AdditionalIncludeDirectories>./include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>./include;$(SolutionDir)lib\local\LandmarkDetector\include;$(SolutionDir)lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet> <EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
</ClCompile> </ClCompile>
@ -117,10 +117,9 @@
<WarningLevel>Level3</WarningLevel> <WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization> <Optimization>Disabled</Optimization>
<SDLCheck>false</SDLCheck> <SDLCheck>false</SDLCheck>
<AdditionalIncludeDirectories>./include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>./include;$(SolutionDir)lib\local\LandmarkDetector\include;$(SolutionDir)lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet> <EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
<PreprocessorDefinitions>WIN64;_DEBUG;_LIB;EIGEN_MPL2_ONLY;%(PreprocessorDefinitions)</PreprocessorDefinitions>
</ClCompile> </ClCompile>
<Link> <Link>
<GenerateDebugInformation>true</GenerateDebugInformation> <GenerateDebugInformation>true</GenerateDebugInformation>
@ -140,7 +139,7 @@
<IntrinsicFunctions>true</IntrinsicFunctions> <IntrinsicFunctions>true</IntrinsicFunctions>
<SDLCheck> <SDLCheck>
</SDLCheck> </SDLCheck>
<AdditionalIncludeDirectories>./include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>./include;$(SolutionDir)lib\local\LandmarkDetector\include;$(SolutionDir)lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet> <EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
</ClCompile> </ClCompile>
@ -160,15 +159,16 @@
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile> <ClCompile>
<WarningLevel>Level3</WarningLevel> <WarningLevel>Level3</WarningLevel>
<Optimization>MaxSpeed</Optimization> <Optimization>Full</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking> <FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions> <IntrinsicFunctions>false</IntrinsicFunctions>
<SDLCheck> <SDLCheck>
</SDLCheck> </SDLCheck>
<AdditionalIncludeDirectories>./include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>./include;$(SolutionDir)lib\local\LandmarkDetector\include;$(SolutionDir)lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet> <EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
<PreprocessorDefinitions>WIN64;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions> <InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
</ClCompile> </ClCompile>
<Link> <Link>
<GenerateDebugInformation>true</GenerateDebugInformation> <GenerateDebugInformation>true</GenerateDebugInformation>
@ -187,6 +187,9 @@
<ProjectReference Include="..\..\3rdParty\dlib\dlib.vcxproj"> <ProjectReference Include="..\..\3rdParty\dlib\dlib.vcxproj">
<Project>{b47a5f12-2567-44e9-ae49-35763ec82149}</Project> <Project>{b47a5f12-2567-44e9-ae49-35763ec82149}</Project>
</ProjectReference> </ProjectReference>
<ProjectReference Include="..\LandmarkDetector\LandmarkDetector.vcxproj">
<Project>{bdc1d107-de17-4705-8e7b-cdde8bfb2bf8}</Project>
</ProjectReference>
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<ClCompile Include="src\FaceAnalyserParameters.cpp" /> <ClCompile Include="src\FaceAnalyserParameters.cpp" />

View file

@ -64,9 +64,7 @@ public:
// Constructor for FaceAnalyser using the parameters structure // Constructor for FaceAnalyser using the parameters structure
FaceAnalyser(const FaceAnalysis::FaceAnalyserParameters& face_analyser_params); FaceAnalyser(const FaceAnalysis::FaceAnalyserParameters& face_analyser_params);
void AddNextFrame(const cv::Mat& frame, const cv::Mat_<float>& detected_landmarks, bool success, double timestamp_seconds, bool online = false, bool visualise = true); void AddNextFrame(const cv::Mat& frame, const cv::Mat_<float>& detected_landmarks, bool success, double timestamp_seconds, bool online = false);
cv::Mat GetLatestHOGDescriptorVisualisation();
double GetCurrentTimeSeconds(); double GetCurrentTimeSeconds();
@ -75,9 +73,8 @@ public:
std::vector<std::pair<std::string, double>> GetCurrentAUsReg() const; // AU intensity std::vector<std::pair<std::string, double>> GetCurrentAUsReg() const; // AU intensity
std::vector<std::pair<std::string, double>> GetCurrentAUsCombined() const; // Both presense and intensity std::vector<std::pair<std::string, double>> GetCurrentAUsCombined() const; // Both presense and intensity
// A standalone call for predicting AUs from a static image, the first element in the pair represents occurence the second intensity // A standalone call for predicting AUs and computing face texture features from a static image
// This call is useful for detecting action units in images void PredictStaticAUsAndComputeFeatures(const cv::Mat& frame, const cv::Mat_<float>& detected_landmarks);
std::pair<std::vector<std::pair<std::string, double>>, std::vector<std::pair<std::string, double>>> PredictStaticAUs(const cv::Mat& frame, const cv::Mat_<float>& detected_landmarks, bool visualise = true);
void Reset(); void Reset();
@ -132,7 +129,6 @@ private:
// Cache of intermediate images // Cache of intermediate images
cv::Mat aligned_face_for_au; cv::Mat aligned_face_for_au;
cv::Mat aligned_face_for_output; cv::Mat aligned_face_for_output;
cv::Mat hog_descriptor_visualisation;
bool out_grayscale; bool out_grayscale;
// Private members to be used for predictions // Private members to be used for predictions

View file

@ -52,8 +52,6 @@ namespace FaceAnalysis
void Extract_FHOG_descriptor(cv::Mat_<double>& descriptor, const cv::Mat& image, int& num_rows, int& num_cols, int cell_size = 8); void Extract_FHOG_descriptor(cv::Mat_<double>& descriptor, const cv::Mat& image, int& num_rows, int& num_cols, int cell_size = 8);
void Visualise_FHOG(const cv::Mat_<double>& descriptor, int num_rows, int num_cols, cv::Mat& visualisation);
// The following two methods go hand in hand // The following two methods go hand in hand
void ExtractSummaryStatistics(const cv::Mat_<double>& descriptors, cv::Mat_<double>& sum_stats, bool mean, bool stdev, bool max_min); void ExtractSummaryStatistics(const cv::Mat_<double>& descriptors, cv::Mat_<double>& sum_stats, bool mean, bool stdev, bool max_min);
void AddDescriptor(cv::Mat_<double>& descriptors, cv::Mat_<double> new_descriptor, int curr_frame, int num_frames_to_keep = 120); void AddDescriptor(cv::Mat_<double>& descriptors, cv::Mat_<double> new_descriptor, int curr_frame, int num_frames_to_keep = 120);
@ -70,26 +68,10 @@ namespace FaceAnalysis
cv::Matx22f AlignShapesWithScale(cv::Mat_<float>& src, cv::Mat_<float> dst); cv::Matx22f AlignShapesWithScale(cv::Mat_<float>& src, cv::Mat_<float> dst);
//=========================================================================== //===========================================================================
// Visualisation functions // Visualisation functions, TODO move
//=========================================================================== //===========================================================================
void Project(cv::Mat_<float>& dest, const cv::Mat_<float>& mesh, float fx, float fy, float cx, float cy); void Project(cv::Mat_<float>& dest, const cv::Mat_<float>& mesh, float fx, float fy, float cx, float cy);
//===========================================================================
// Angle representation conversion helpers
//===========================================================================
cv::Matx33f Euler2RotationMatrix(const cv::Vec3f& eulerAngles);
// Using the XYZ convention R = Rx * Ry * Rz, left-handed positive sign
cv::Vec3f RotationMatrix2Euler(const cv::Matx33f& rotation_matrix);
cv::Vec3f Euler2AxisAngle(const cv::Vec3f& euler);
cv::Vec3f AxisAngle2Euler(const cv::Vec3f& axis_angle);
cv::Matx33f AxisAngle2RotationMatrix(const cv::Vec3f& axis_angle);
cv::Vec3f RotationMatrix2AxisAngle(const cv::Matx33f& rotation_matrix);
//============================================================================ //============================================================================
// Matrix reading functionality // Matrix reading functionality
//============================================================================ //============================================================================

View file

@ -248,7 +248,7 @@ int GetViewId(const vector<cv::Vec3d> orientations_all, const cv::Vec3d& orienta
} }
std::pair<std::vector<std::pair<string, double>>, std::vector<std::pair<string, double>>> FaceAnalyser::PredictStaticAUs(const cv::Mat& frame, const cv::Mat_<float>& detected_landmarks, bool visualise) void FaceAnalyser::PredictStaticAUsAndComputeFeatures(const cv::Mat& frame, const cv::Mat_<float>& detected_landmarks)
{ {
// Extract shape parameters from the detected landmarks // Extract shape parameters from the detected landmarks
@ -259,6 +259,16 @@ std::pair<std::vector<std::pair<string, double>>, std::vector<std::pair<string,
// First align the face // First align the face
AlignFaceMask(aligned_face_for_au, frame, detected_landmarks, params_global, pdm, triangulation, true, 0.7, 112, 112); AlignFaceMask(aligned_face_for_au, frame, detected_landmarks, params_global, pdm, triangulation, true, 0.7, 112, 112);
// If the output requirement matches use the already computed one, else compute it again
if (align_scale_out == align_scale_au && align_width_out == align_width_au && align_height_out == align_height_au)
{
aligned_face_for_output = aligned_face_for_au.clone();
}
else
{
AlignFaceMask(aligned_face_for_output, frame, detected_landmarks, params_global, pdm, triangulation, true, align_scale_out, align_width_out, align_height_out);
}
// Extract HOG descriptor from the frame and convert it to a useable format // Extract HOG descriptor from the frame and convert it to a useable format
cv::Mat_<double> hog_descriptor; cv::Mat_<double> hog_descriptor;
Extract_FHOG_descriptor(hog_descriptor, aligned_face_for_au, this->num_hog_rows, this->num_hog_cols); Extract_FHOG_descriptor(hog_descriptor, aligned_face_for_au, this->num_hog_rows, this->num_hog_cols);
@ -286,12 +296,6 @@ std::pair<std::vector<std::pair<string, double>>, std::vector<std::pair<string,
//cv::Mat_<double> aligned_face_cols_double; //cv::Mat_<double> aligned_face_cols_double;
//aligned_face_cols.convertTo(aligned_face_cols_double, CV_64F); //aligned_face_cols.convertTo(aligned_face_cols_double, CV_64F);
// Visualising the median HOG
if (visualise)
{
FaceAnalysis::Visualise_FHOG(hog_descriptor, num_hog_rows, num_hog_cols, hog_descriptor_visualisation);
}
// Perform AU prediction // Perform AU prediction
auto AU_predictions_intensity = PredictCurrentAUs(orientation_to_use); auto AU_predictions_intensity = PredictCurrentAUs(orientation_to_use);
auto AU_predictions_occurence = PredictCurrentAUsClass(orientation_to_use); auto AU_predictions_occurence = PredictCurrentAUsClass(orientation_to_use);
@ -306,11 +310,12 @@ std::pair<std::vector<std::pair<string, double>>, std::vector<std::pair<string,
AU_predictions_intensity[au].second = 5; AU_predictions_intensity[au].second = 5;
} }
return std::pair<std::vector<std::pair<std::string, double>>, std::vector<std::pair<std::string, double>>>(AU_predictions_intensity, AU_predictions_occurence); AU_predictions_reg = AU_predictions_intensity;
AU_predictions_class = AU_predictions_occurence;
} }
void FaceAnalyser::AddNextFrame(const cv::Mat& frame, const cv::Mat_<float>& detected_landmarks, bool success, double timestamp_seconds, bool online, bool visualise) void FaceAnalyser::AddNextFrame(const cv::Mat& frame, const cv::Mat_<float>& detected_landmarks, bool success, double timestamp_seconds, bool online)
{ {
frames_tracking++; frames_tracking++;
@ -318,12 +323,13 @@ void FaceAnalyser::AddNextFrame(const cv::Mat& frame, const cv::Mat_<float>& det
// Extract shape parameters from the detected landmarks // Extract shape parameters from the detected landmarks
cv::Vec6f params_global; cv::Vec6f params_global;
cv::Mat_<float> params_local; cv::Mat_<float> params_local;
pdm.CalcParams(params_global, params_local, detected_landmarks);
// First align the face if tracking was successfull // First align the face if tracking was successfull
if(success) if(success)
{ {
pdm.CalcParams(params_global, params_local, detected_landmarks);
// The aligned face requirement for AUs // The aligned face requirement for AUs
AlignFaceMask(aligned_face_for_au, frame, detected_landmarks, params_global, pdm, triangulation, true, align_scale_au, align_width_au, align_height_au); AlignFaceMask(aligned_face_for_au, frame, detected_landmarks, params_global, pdm, triangulation, true, align_scale_au, align_width_au, align_height_au);
@ -343,6 +349,7 @@ void FaceAnalyser::AddNextFrame(const cv::Mat& frame, const cv::Mat_<float>& det
aligned_face_for_au = cv::Mat(align_height_au, align_width_au, CV_8UC3); aligned_face_for_au = cv::Mat(align_height_au, align_width_au, CV_8UC3);
aligned_face_for_output.setTo(0); aligned_face_for_output.setTo(0);
aligned_face_for_au.setTo(0); aligned_face_for_au.setTo(0);
params_local = cv::Mat_<float>(pdm.NumberOfModes(), 1, 0.0f);
} }
if (aligned_face_for_output.channels() == 3 && out_grayscale) if (aligned_face_for_output.channels() == 3 && out_grayscale)
@ -424,21 +431,9 @@ void FaceAnalyser::AddNextFrame(const cv::Mat& frame, const cv::Mat_<float>& det
UpdateRunningMedian(this->geom_desc_hist, this->geom_hist_sum, this->geom_descriptor_median, geom_descriptor_frame, update_median, this->num_bins_geom, this->min_val_geom, this->max_val_geom); UpdateRunningMedian(this->geom_desc_hist, this->geom_hist_sum, this->geom_descriptor_median, geom_descriptor_frame, update_median, this->num_bins_geom, this->min_val_geom, this->max_val_geom);
} }
// Visualising the median HOG
if(visualise)
{
FaceAnalysis::Visualise_FHOG(hog_descriptor, num_hog_rows, num_hog_cols, hog_descriptor_visualisation);
}
// Perform AU prediction // Perform AU prediction
AU_predictions_reg = PredictCurrentAUs(orientation_to_use); AU_predictions_reg = PredictCurrentAUs(orientation_to_use);
std::vector<std::pair<std::string, double>> AU_predictions_reg_corrected;
if(online)
{
AU_predictions_reg_corrected = CorrectOnlineAUs(AU_predictions_reg, orientation_to_use, true, false, success, true);
}
// Add the reg predictions to the historic data // Add the reg predictions to the historic data
for (size_t au = 0; au < AU_predictions_reg.size(); ++au) for (size_t au = 0; au < AU_predictions_reg.size(); ++au)
{ {
@ -452,6 +447,9 @@ void FaceAnalyser::AddNextFrame(const cv::Mat& frame, const cv::Mat_<float>& det
else else
{ {
AU_predictions_reg_all_hist[AU_predictions_reg[au].first].push_back(0); AU_predictions_reg_all_hist[AU_predictions_reg[au].first].push_back(0);
// Also invalidate AU if not successful
AU_predictions_reg[au].second = 0;
} }
} }
@ -469,23 +467,27 @@ void FaceAnalyser::AddNextFrame(const cv::Mat& frame, const cv::Mat_<float>& det
else else
{ {
AU_predictions_class_all_hist[AU_predictions_class[au].first].push_back(0); AU_predictions_class_all_hist[AU_predictions_class[au].first].push_back(0);
// Also invalidate AU if not successful
AU_predictions_class[au].second = 0;
} }
} }
// A workaround for online predictions to make them a bit more accurate
if(online) std::vector<std::pair<std::string, double>> AU_predictions_reg_corrected;
if (online)
{ {
AU_predictions_reg_corrected = CorrectOnlineAUs(AU_predictions_reg, orientation_to_use, true, false, success, true);
AU_predictions_reg = AU_predictions_reg_corrected; AU_predictions_reg = AU_predictions_reg_corrected;
} }
else
{ // Useful for prediction corrections (calibration after the whole video is processed)
if (success && frames_tracking_succ - 1 < max_init_frames) if (success && frames_tracking_succ - 1 < max_init_frames)
{ {
hog_desc_frames_init.push_back(hog_descriptor); hog_desc_frames_init.push_back(hog_descriptor);
geom_descriptor_frames_init.push_back(geom_descriptor_frame); geom_descriptor_frames_init.push_back(geom_descriptor_frame);
views.push_back(orientation_to_use); views.push_back(orientation_to_use);
} }
}
this->current_time_seconds = timestamp_seconds; this->current_time_seconds = timestamp_seconds;
@ -982,11 +984,6 @@ vector<pair<string, double>> FaceAnalyser::PredictCurrentAUsClass(int view)
return predictions; return predictions;
} }
cv::Mat FaceAnalyser::GetLatestHOGDescriptorVisualisation()
{
return hog_descriptor_visualisation;
}
vector<pair<string, double>> FaceAnalyser::GetCurrentAUsClass() const vector<pair<string, double>> FaceAnalyser::GetCurrentAUsClass() const
{ {
return AU_predictions_class; return AU_predictions_class;
@ -1324,7 +1321,10 @@ void FaceAnalyser::PostprocessOutputFile(string output_file)
// Now overwrite the whole file // Now overwrite the whole file
std::ofstream outfile(output_file, ios_base::out); std::ofstream outfile(output_file, ios_base::out);
// Write the header // Write the header
outfile << std::setprecision(4); outfile << std::setprecision(2);
outfile << std::fixed;
outfile << std::noshowpoint;
outfile << output_file_contents[0].c_str() << endl; outfile << output_file_contents[0].c_str() << endl;
// Write the contents // Write the contents

View file

@ -242,30 +242,6 @@ namespace FaceAnalysis
} }
} }
void Visualise_FHOG(const cv::Mat_<double>& descriptor, int num_rows, int num_cols, cv::Mat& visualisation)
{
// First convert to dlib format
dlib::array2d<dlib::matrix<float,31,1> > hog(num_rows, num_cols);
cv::MatConstIterator_<double> descriptor_it = descriptor.begin();
for(int y = 0; y < num_cols; ++y)
{
for(int x = 0; x < num_rows; ++x)
{
for(unsigned int o = 0; o < 31; ++o)
{
hog[y][x](o) = *descriptor_it++;
}
}
}
// Draw the FHOG to OpenCV format
auto fhog_vis = dlib::draw_fhog(hog);
visualisation = dlib::toMat(fhog_vis).clone();
}
// Create a row vector Felzenszwalb HOG descriptor from a given image // Create a row vector Felzenszwalb HOG descriptor from a given image
void Extract_FHOG_descriptor(cv::Mat_<double>& descriptor, const cv::Mat& image, int& num_rows, int& num_cols, int cell_size) void Extract_FHOG_descriptor(cv::Mat_<double>& descriptor, const cv::Mat& image, int& num_rows, int& num_cols, int cell_size)
{ {
@ -442,7 +418,7 @@ namespace FaceAnalysis
//=========================================================================== //===========================================================================
// Visualisation functions // Visualisation functions, TODO rem
//=========================================================================== //===========================================================================
void Project(cv::Mat_<float>& dest, const cv::Mat_<float>& mesh, float fx, float fy, float cx, float cy) void Project(cv::Mat_<float>& dest, const cv::Mat_<float>& mesh, float fx, float fy, float cx, float cy)
{ {
@ -485,81 +461,6 @@ namespace FaceAnalysis
} }
//===========================================================================
// Angle representation conversion helpers
//===========================================================================
// Using the XYZ convention R = Rx * Ry * Rz, left-handed positive sign
cv::Matx33f Euler2RotationMatrix(const cv::Vec3f& eulerAngles)
{
cv::Matx33f rotation_matrix;
float s1 = sin(eulerAngles[0]);
float s2 = sin(eulerAngles[1]);
float s3 = sin(eulerAngles[2]);
float c1 = cos(eulerAngles[0]);
float c2 = cos(eulerAngles[1]);
float c3 = cos(eulerAngles[2]);
rotation_matrix(0, 0) = c2 * c3;
rotation_matrix(0, 1) = -c2 *s3;
rotation_matrix(0, 2) = s2;
rotation_matrix(1, 0) = c1 * s3 + c3 * s1 * s2;
rotation_matrix(1, 1) = c1 * c3 - s1 * s2 * s3;
rotation_matrix(1, 2) = -c2 * s1;
rotation_matrix(2, 0) = s1 * s3 - c1 * c3 * s2;
rotation_matrix(2, 1) = c3 * s1 + c1 * s2 * s3;
rotation_matrix(2, 2) = c1 * c2;
return rotation_matrix;
}
// Using the XYZ convention R = Rx * Ry * Rz, left-handed positive sign
cv::Vec3f RotationMatrix2Euler(const cv::Matx33f& rotation_matrix)
{
float q0 = sqrt(1 + rotation_matrix(0, 0) + rotation_matrix(1, 1) + rotation_matrix(2, 2)) / 2.0f;
float q1 = (rotation_matrix(2, 1) - rotation_matrix(1, 2)) / (4.0f*q0);
float q2 = (rotation_matrix(0, 2) - rotation_matrix(2, 0)) / (4.0f*q0);
float q3 = (rotation_matrix(1, 0) - rotation_matrix(0, 1)) / (4.0f*q0);
float t1 = 2.0f * (q0*q2 + q1*q3);
float yaw = asin(2.0 * (q0*q2 + q1*q3));
float pitch = atan2(2.0 * (q0*q1 - q2*q3), q0*q0 - q1*q1 - q2*q2 + q3*q3);
float roll = atan2(2.0 * (q0*q3 - q1*q2), q0*q0 + q1*q1 - q2*q2 - q3*q3);
return cv::Vec3f(pitch, yaw, roll);
}
cv::Vec3f Euler2AxisAngle(const cv::Vec3f& euler)
{
cv::Matx33f rotMatrix = Euler2RotationMatrix(euler);
cv::Vec3f axis_angle;
cv::Rodrigues(rotMatrix, axis_angle);
return axis_angle;
}
cv::Vec3f AxisAngle2Euler(const cv::Vec3f& axis_angle)
{
cv::Matx33f rotation_matrix;
cv::Rodrigues(axis_angle, rotation_matrix);
return RotationMatrix2Euler(rotation_matrix);
}
cv::Matx33f AxisAngle2RotationMatrix(const cv::Vec3f& axis_angle)
{
cv::Matx33f rotation_matrix;
cv::Rodrigues(axis_angle, rotation_matrix);
return rotation_matrix;
}
cv::Vec3f RotationMatrix2AxisAngle(const cv::Matx33f& rotation_matrix)
{
cv::Vec3f axis_angle;
cv::Rodrigues(rotation_matrix, axis_angle);
return axis_angle;
}
//============================================================================ //============================================================================
// Matrix reading functionality // Matrix reading functionality

View file

@ -48,6 +48,7 @@
#endif #endif
#include <Face_utils.h> #include <Face_utils.h>
#include <RotationHelpers.h>
// OpenBLAS // OpenBLAS
#include <cblas.h> #include <cblas.h>
@ -107,7 +108,7 @@ void PDM::CalcShape2D(cv::Mat_<float>& out_shape, const cv::Mat_<float>& params_
// get the rotation matrix from the euler angles // get the rotation matrix from the euler angles
cv::Vec3f euler(params_global[1], params_global[2], params_global[3]); cv::Vec3f euler(params_global[1], params_global[2], params_global[3]);
cv::Matx33f currRot = Euler2RotationMatrix(euler); cv::Matx33f currRot = Utilities::Euler2RotationMatrix(euler);
// get the 3D shape of the object // get the 3D shape of the object
cv::Mat_<float> Shape_3D = mean_shape + princ_comp * params_local; cv::Mat_<float> Shape_3D = mean_shape + princ_comp * params_local;
@ -138,7 +139,7 @@ void PDM::CalcParams(cv::Vec6f& out_params_global, const cv::Rect_<float>& bound
CalcShape3D(current_shape, params_local); CalcShape3D(current_shape, params_local);
// rotate the shape // rotate the shape
cv::Matx33f rotation_matrix = Euler2RotationMatrix(rotation); cv::Matx33f rotation_matrix = Utilities::Euler2RotationMatrix(rotation);
cv::Mat_<float> reshaped = current_shape.reshape(1, 3); cv::Mat_<float> reshaped = current_shape.reshape(1, 3);
@ -213,7 +214,7 @@ void PDM::ComputeRigidJacobian(const cv::Mat_<float>& p_local, const cv::Vec6f&
// Get the rotation matrix // Get the rotation matrix
cv::Vec3f euler(params_global[1], params_global[2], params_global[3]); cv::Vec3f euler(params_global[1], params_global[2], params_global[3]);
cv::Matx33f currRot = Euler2RotationMatrix(euler); cv::Matx33f currRot = Utilities::Euler2RotationMatrix(euler);
float r11 = currRot(0, 0); float r11 = currRot(0, 0);
float r12 = currRot(0, 1); float r12 = currRot(0, 1);
@ -306,7 +307,7 @@ void PDM::ComputeJacobian(const cv::Mat_<float>& params_local, const cv::Vec6f&
this->CalcShape3D(shape_3D, params_local); this->CalcShape3D(shape_3D, params_local);
cv::Vec3f euler(params_global[1], params_global[2], params_global[3]); cv::Vec3f euler(params_global[1], params_global[2], params_global[3]);
cv::Matx33f currRot = Euler2RotationMatrix(euler); cv::Matx33f currRot = Utilities::Euler2RotationMatrix(euler);
float r11 = currRot(0, 0); float r11 = currRot(0, 0);
float r12 = currRot(0, 1); float r12 = currRot(0, 1);
@ -404,7 +405,7 @@ void PDM::UpdateModelParameters(const cv::Mat_<float>& delta_p, cv::Mat_<float>&
// get the original rotation matrix // get the original rotation matrix
cv::Vec3f eulerGlobal(params_global[1], params_global[2], params_global[3]); cv::Vec3f eulerGlobal(params_global[1], params_global[2], params_global[3]);
cv::Matx33f R1 = Euler2RotationMatrix(eulerGlobal); cv::Matx33f R1 = Utilities::Euler2RotationMatrix(eulerGlobal);
// construct R' = [1, -wz, wy // construct R' = [1, -wz, wy
// wz, 1, -wx // wz, 1, -wx
@ -422,8 +423,8 @@ void PDM::UpdateModelParameters(const cv::Mat_<float>& delta_p, cv::Mat_<float>&
cv::Matx33f R3 = R1 *R2; cv::Matx33f R3 = R1 *R2;
// Extract euler angle (through axis angle first to make sure it's legal) // Extract euler angle (through axis angle first to make sure it's legal)
cv::Vec3f axis_angle = RotationMatrix2AxisAngle(R3); cv::Vec3f axis_angle = Utilities::RotationMatrix2AxisAngle(R3);
cv::Vec3f euler = AxisAngle2Euler(axis_angle); cv::Vec3f euler = Utilities::AxisAngle2Euler(axis_angle);
params_global[1] = euler[0]; params_global[1] = euler[0];
params_global[2] = euler[1]; params_global[2] = euler[1];
@ -466,7 +467,7 @@ void PDM::CalcParams(cv::Vec6f& out_params_global, cv::Mat_<float>& out_params_l
float scaling = ((width / model_bbox.width) + (height / model_bbox.height)) / 2; float scaling = ((width / model_bbox.width) + (height / model_bbox.height)) / 2;
cv::Vec3f rotation_init(rotation[0], rotation[1], rotation[2]); cv::Vec3f rotation_init(rotation[0], rotation[1], rotation[2]);
cv::Matx33f R = Euler2RotationMatrix(rotation_init); cv::Matx33f R = Utilities::Euler2RotationMatrix(rotation_init);
cv::Vec2f translation((min_x + max_x) / 2.0, (min_y + max_y) / 2.0); cv::Vec2f translation((min_x + max_x) / 2.0, (min_y + max_y) / 2.0);
cv::Mat_<float> loc_params(this->NumberOfModes(),1, 0.0); cv::Mat_<float> loc_params(this->NumberOfModes(),1, 0.0);
@ -553,7 +554,7 @@ void PDM::CalcParams(cv::Vec6f& out_params_global, cv::Mat_<float>& out_params_l
translation[0] = glob_params[4]; translation[0] = glob_params[4];
translation[1] = glob_params[5]; translation[1] = glob_params[5];
R = Euler2RotationMatrix(rotation_init); R = Utilities::Euler2RotationMatrix(rotation_init);
R_2D(0,0) = R(0,0);R_2D(0,1) = R(0,1); R_2D(0,2) = R(0,2); R_2D(0,0) = R(0,0);R_2D(0,1) = R(0,1); R_2D(0,2) = R(0,2);
R_2D(1,0) = R(1,0);R_2D(1,1) = R(1,1); R_2D(1,2) = R(1,2); R_2D(1,0) = R(1,0);R_2D(1,1) = R(1,1); R_2D(1,2) = R(1,2);

View file

@ -10,6 +10,9 @@ include_directories(../../3rdParty/OpenBLAS/include)
#LandmarkDetector library #LandmarkDetector library
include_directories(../../local/LandmarkDetector/include) include_directories(../../local/LandmarkDetector/include)
#Utilities library
include_directories(../../local/Utilities/include)
SET(SOURCE SET(SOURCE
src/GazeEstimation.cpp src/GazeEstimation.cpp
) )

View file

@ -91,7 +91,7 @@
<WarningLevel>Level3</WarningLevel> <WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization> <Optimization>Disabled</Optimization>
<SDLCheck>true</SDLCheck> <SDLCheck>true</SDLCheck>
<AdditionalIncludeDirectories>./include;../LandmarkDetector/include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>./include;../LandmarkDetector/include;$(SolutionDir)lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet> <EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
</ClCompile> </ClCompile>
</ItemDefinitionGroup> </ItemDefinitionGroup>
@ -100,7 +100,7 @@
<WarningLevel>Level3</WarningLevel> <WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization> <Optimization>Disabled</Optimization>
<SDLCheck>true</SDLCheck> <SDLCheck>true</SDLCheck>
<AdditionalIncludeDirectories>./include;../LandmarkDetector/include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>./include;../LandmarkDetector/include;$(SolutionDir)lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PreprocessorDefinitions>WIN64;_DEBUG;_LIB;EIGEN_MPL2_ONLY;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>WIN64;_DEBUG;_LIB;EIGEN_MPL2_ONLY;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet> <EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet>
</ClCompile> </ClCompile>
@ -113,7 +113,7 @@
<IntrinsicFunctions>true</IntrinsicFunctions> <IntrinsicFunctions>true</IntrinsicFunctions>
<SDLCheck> <SDLCheck>
</SDLCheck> </SDLCheck>
<AdditionalIncludeDirectories>./include;../LandmarkDetector/include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>./include;../LandmarkDetector/include;$(SolutionDir)lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
<PreprocessorDefinitions>WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet> <EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
@ -126,15 +126,17 @@
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile> <ClCompile>
<WarningLevel>Level3</WarningLevel> <WarningLevel>Level3</WarningLevel>
<Optimization>MaxSpeed</Optimization> <Optimization>Full</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking> <FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions> <IntrinsicFunctions>false</IntrinsicFunctions>
<SDLCheck> <SDLCheck>
</SDLCheck> </SDLCheck>
<AdditionalIncludeDirectories>./include;../LandmarkDetector/include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>./include;../LandmarkDetector/include;$(SolutionDir)lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
<PreprocessorDefinitions>WIN64;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>WIN64;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet> <EnableEnhancedInstructionSet>AdvancedVectorExtensions</EnableEnhancedInstructionSet>
<InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
</ClCompile> </ClCompile>
<Link> <Link>
<EnableCOMDATFolding>true</EnableCOMDATFolding> <EnableCOMDATFolding>true</EnableCOMDATFolding>

View file

@ -43,7 +43,6 @@ namespace GazeAnalysis
{ {
void EstimateGaze(const LandmarkDetector::CLNF& clnf_model, cv::Point3f& gaze_absolute, float fx, float fy, float cx, float cy, bool left_eye); void EstimateGaze(const LandmarkDetector::CLNF& clnf_model, cv::Point3f& gaze_absolute, float fx, float fy, float cx, float cy, bool left_eye);
void DrawGaze(cv::Mat img, const LandmarkDetector::CLNF& clnf_model, cv::Point3f gazeVecAxisLeft, cv::Point3f gazeVecAxisRight, float fx, float fy, float cx, float cy);
// Getting the gaze angle in radians with respect to the world coordinates (camera plane), when looking ahead straight at camera plane the gaze angle will be (0,0) // Getting the gaze angle in radians with respect to the world coordinates (camera plane), when looking ahead straight at camera plane the gaze angle will be (0,0)
cv::Vec2d GetGazeAngle(cv::Point3f& gaze_vector_1, cv::Point3f& gaze_vector_2); cv::Vec2d GetGazeAngle(cv::Point3f& gaze_vector_1, cv::Point3f& gaze_vector_2);

View file

@ -42,15 +42,12 @@
#include "LandmarkDetectorUtils.h" #include "LandmarkDetectorUtils.h"
#include "LandmarkDetectorFunc.h" #include "LandmarkDetectorFunc.h"
#include "RotationHelpers.h"
using namespace std; using namespace std;
using namespace GazeAnalysis; using namespace GazeAnalysis;
// For subpixel accuracy drawing
const int gaze_draw_shiftbits = 4;
const int gaze_draw_multiplier = 1 << 4;
cv::Point3f RaySphereIntersect(cv::Point3f rayOrigin, cv::Point3f rayDir, cv::Point3f sphereOrigin, float sphereRadius){ cv::Point3f RaySphereIntersect(cv::Point3f rayOrigin, cv::Point3f rayDir, cv::Point3f sphereOrigin, float sphereRadius){
float dx = rayDir.x; float dx = rayDir.x;
@ -93,7 +90,7 @@ void GazeAnalysis::EstimateGaze(const LandmarkDetector::CLNF& clnf_model, cv::Po
{ {
cv::Vec6d headPose = LandmarkDetector::GetPose(clnf_model, fx, fy, cx, cy); cv::Vec6d headPose = LandmarkDetector::GetPose(clnf_model, fx, fy, cx, cy);
cv::Vec3d eulerAngles(headPose(3), headPose(4), headPose(5)); cv::Vec3d eulerAngles(headPose(3), headPose(4), headPose(5));
cv::Matx33d rotMat = LandmarkDetector::Euler2RotationMatrix(eulerAngles); cv::Matx33d rotMat = Utilities::Euler2RotationMatrix(eulerAngles);
int part = -1; int part = -1;
for (size_t i = 0; i < clnf_model.hierarchical_models.size(); ++i) for (size_t i = 0; i < clnf_model.hierarchical_models.size(); ++i)
@ -149,47 +146,3 @@ cv::Vec2d GazeAnalysis::GetGazeAngle(cv::Point3f& gaze_vector_1, cv::Point3f& ga
return cv::Vec2d(x_angle, y_angle); return cv::Vec2d(x_angle, y_angle);
} }
void GazeAnalysis::DrawGaze(cv::Mat img, const LandmarkDetector::CLNF& clnf_model, cv::Point3f gazeVecAxisLeft, cv::Point3f gazeVecAxisRight, float fx, float fy, float cx, float cy)
{
cv::Mat cameraMat = (cv::Mat_<double>(3, 3) << fx, 0, cx, 0, fy, cy, 0, 0, 0);
int part_left = -1;
int part_right = -1;
for (size_t i = 0; i < clnf_model.hierarchical_models.size(); ++i)
{
if (clnf_model.hierarchical_model_names[i].compare("left_eye_28") == 0)
{
part_left = i;
}
if (clnf_model.hierarchical_model_names[i].compare("right_eye_28") == 0)
{
part_right = i;
}
}
cv::Mat eyeLdmks3d_left = clnf_model.hierarchical_models[part_left].GetShape(fx, fy, cx, cy);
cv::Point3f pupil_left = GetPupilPosition(eyeLdmks3d_left);
cv::Mat eyeLdmks3d_right = clnf_model.hierarchical_models[part_right].GetShape(fx, fy, cx, cy);
cv::Point3f pupil_right = GetPupilPosition(eyeLdmks3d_right);
vector<cv::Point3d> points_left;
points_left.push_back(cv::Point3d(pupil_left));
points_left.push_back(cv::Point3d(pupil_left + gazeVecAxisLeft*50.0));
vector<cv::Point3d> points_right;
points_right.push_back(cv::Point3d(pupil_right));
points_right.push_back(cv::Point3d(pupil_right + gazeVecAxisRight*50.0));
cv::Mat_<double> proj_points;
cv::Mat_<double> mesh_0 = (cv::Mat_<double>(2, 3) << points_left[0].x, points_left[0].y, points_left[0].z, points_left[1].x, points_left[1].y, points_left[1].z);
LandmarkDetector::Project(proj_points, mesh_0, fx, fy, cx, cy);
cv::line(img, cv::Point(cvRound(proj_points.at<double>(0,0) * (double)gaze_draw_multiplier), cvRound(proj_points.at<double>(0, 1) * (double)gaze_draw_multiplier)),
cv::Point(cvRound(proj_points.at<double>(1, 0) * (double)gaze_draw_multiplier), cvRound(proj_points.at<double>(1, 1) * (double)gaze_draw_multiplier)), cv::Scalar(110, 220, 0), 2, CV_AA, gaze_draw_shiftbits);
cv::Mat_<double> mesh_1 = (cv::Mat_<double>(2, 3) << points_right[0].x, points_right[0].y, points_right[0].z, points_right[1].x, points_right[1].y, points_right[1].z);
LandmarkDetector::Project(proj_points, mesh_1, fx, fy, cx, cy);
cv::line(img, cv::Point(cvRound(proj_points.at<double>(0, 0) * (double)gaze_draw_multiplier), cvRound(proj_points.at<double>(0, 1) * (double)gaze_draw_multiplier)),
cv::Point(cvRound(proj_points.at<double>(1, 0) * (double)gaze_draw_multiplier), cvRound(proj_points.at<double>(1, 1) * (double)gaze_draw_multiplier)), cv::Scalar(110, 220, 0), 2, CV_AA, gaze_draw_shiftbits);
}

View file

@ -6,6 +6,9 @@ include_directories(${BOOST_INCLUDE_DIR})
#OpenBlas library #OpenBlas library
include_directories(../../3rdParty/OpenBLAS/include) include_directories(../../3rdParty/OpenBLAS/include)
#Utilities library
include_directories(../../local/Utilities/include)
SET(SOURCE SET(SOURCE
src/CCNF_patch_expert.cpp src/CCNF_patch_expert.cpp
src/LandmarkDetectionValidator.cpp src/LandmarkDetectionValidator.cpp

View file

@ -93,7 +93,7 @@
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile> <ClCompile>
<Optimization>Disabled</Optimization> <Optimization>Disabled</Optimization>
<AdditionalIncludeDirectories>./include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>./include;$(SolutionDir)lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PreprocessorDefinitions>WIN32;_DEBUG;_LIB;EIGEN_MPL2_ONLY;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;EIGEN_MPL2_ONLY;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<MinimalRebuild>true</MinimalRebuild> <MinimalRebuild>true</MinimalRebuild>
<BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks> <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
@ -117,7 +117,7 @@ xcopy /I /E /Y /D "$(SolutionDir)lib\3rdParty\OpenCV3.1\classifiers" "$(OutDir)c
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile> <ClCompile>
<Optimization>Disabled</Optimization> <Optimization>Disabled</Optimization>
<AdditionalIncludeDirectories>./include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>./include;$(SolutionDir)lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PreprocessorDefinitions>WIN64;_DEBUG;_LIB;EIGEN_MPL2_ONLY;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>WIN64;_DEBUG;_LIB;EIGEN_MPL2_ONLY;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks> <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
<RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary> <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
@ -141,7 +141,7 @@ xcopy /I /E /Y /D "$(SolutionDir)lib\3rdParty\OpenCV3.1\classifiers" "$(OutDir)c
<ClCompile> <ClCompile>
<Optimization>Full</Optimization> <Optimization>Full</Optimization>
<IntrinsicFunctions>true</IntrinsicFunctions> <IntrinsicFunctions>true</IntrinsicFunctions>
<AdditionalIncludeDirectories>./include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>./include;$(SolutionDir)lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PreprocessorDefinitions>WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary> <RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>
<FunctionLevelLinking>false</FunctionLevelLinking> <FunctionLevelLinking>false</FunctionLevelLinking>
@ -165,8 +165,8 @@ xcopy /I /E /Y /D "$(SolutionDir)lib\3rdParty\OpenCV3.1\classifiers" "$(OutDir)c
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile> <ClCompile>
<Optimization>Full</Optimization> <Optimization>Full</Optimization>
<IntrinsicFunctions>true</IntrinsicFunctions> <IntrinsicFunctions>false</IntrinsicFunctions>
<AdditionalIncludeDirectories>./include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <AdditionalIncludeDirectories>./include;$(SolutionDir)lib\local\Utilities\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PreprocessorDefinitions>WIN64;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions> <PreprocessorDefinitions>WIN64;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary> <RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>
<FunctionLevelLinking>false</FunctionLevelLinking> <FunctionLevelLinking>false</FunctionLevelLinking>
@ -178,6 +178,7 @@ xcopy /I /E /Y /D "$(SolutionDir)lib\3rdParty\OpenCV3.1\classifiers" "$(OutDir)c
<OpenMPSupport>false</OpenMPSupport> <OpenMPSupport>false</OpenMPSupport>
<AdditionalOptions>/Zm300 %(AdditionalOptions)</AdditionalOptions> <AdditionalOptions>/Zm300 %(AdditionalOptions)</AdditionalOptions>
<MultiProcessorCompilation>true</MultiProcessorCompilation> <MultiProcessorCompilation>true</MultiProcessorCompilation>
<InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>
</ClCompile> </ClCompile>
<Lib> <Lib>
<OutputFile>$(OutDir)$(TargetName)$(TargetExt)</OutputFile> <OutputFile>$(OutDir)$(TargetName)$(TargetExt)</OutputFile>

View file

@ -93,16 +93,13 @@ public:
dlib::frontal_face_detector face_detector_HOG; dlib::frontal_face_detector face_detector_HOG;
// Validate if the detected landmarks are correct using an SVR regressor // Validate if the detected landmarks are correct using a predictor on detected landmarks
DetectionValidator landmark_validator; DetectionValidator landmark_validator;
// Indicating if landmark detection succeeded (based on SVR validator) // Indicating if landmark detection succeeded (based on detection validator)
bool detection_success; bool detection_success;
// Indicating if the tracking has been initialised (for video based tracking) // Representing how confident we are that tracking succeeds (0 - complete failure, 1 - perfect success)
bool tracking_initialised;
// The actual output of the regressor (-1 is perfect detection 1 is worst detection)
double detection_certainty; double detection_certainty;
// Indicator if eye model is there for eye detection // Indicator if eye model is there for eye detection
@ -131,10 +128,6 @@ public:
// Useful when resetting or initialising the model closer to a specific location (when multiple faces are present) // Useful when resetting or initialising the model closer to a specific location (when multiple faces are present)
cv::Point_<double> preference_det; cv::Point_<double> preference_det;
// Useful to control where face detections should not occur (for live demos etc.)
double detect_Z_max = -1; // Do not detect faces further than this in mm. (-1) refers to detecting all faces
cv::Rect_<double> detect_ROI = cv::Rect(0, 0, 1, 1); // the face detection bounds (0,0,1,1) means full image (0.25,0.25,0.5,0.5) would imply region of the face only
// A default constructor // A default constructor
CLNF(); CLNF();
@ -178,8 +171,17 @@ public:
// Helper reading function // Helper reading function
void Read_CLNF(string clnf_location); void Read_CLNF(string clnf_location);
// Allows to set initialization accross hierarchical models as well
bool IsInitialized() const { return tracking_initialised; }
void SetInitialized(bool initialized);
void SetDetectionSuccess(bool detection_success);
private: private:
// Indicating if the tracking has been initialised (for video based tracking)
bool tracking_initialised;
// the speedup of RLMS using precalculated KDE responses (described in Saragih 2011 RLMS paper) // the speedup of RLMS using precalculated KDE responses (described in Saragih 2011 RLMS paper)
map<int, cv::Mat_<float> > kde_resp_precalc; map<int, cv::Mat_<float> > kde_resp_precalc;

View file

@ -102,9 +102,6 @@ struct FaceModelParameters
// Should the parameters be refined for different scales // Should the parameters be refined for different scales
bool refine_parameters; bool refine_parameters;
// Using the brand new and experimental gaze tracker
bool track_gaze;
FaceModelParameters(); FaceModelParameters();
FaceModelParameters(vector<string> &arguments); FaceModelParameters(vector<string> &arguments);

View file

@ -48,18 +48,6 @@ namespace LandmarkDetector
//=========================================================================== //===========================================================================
// Defining a set of useful utility functions to be used within CLNF // Defining a set of useful utility functions to be used within CLNF
//=============================================================================================
// Helper functions for parsing the inputs
//=============================================================================================
void get_video_input_output_params(vector<string> &input_video_file, vector<string> &output_files,
vector<string> &output_video_files, string &output_codec, vector<string> &arguments);
void get_camera_params(int &device, float &fx, float &fy, float &cx, float &cy, vector<string> &arguments);
void get_image_input_output_params(vector<string> &input_image_files, vector<string> &output_feature_files, vector<string> &output_pose_files, vector<string> &output_image_files,
vector<cv::Rect_<double>> &input_bounding_boxes, vector<string> &arguments);
//=========================================================================== //===========================================================================
// Fast patch expert response computation (linear model across a ROI) using normalised cross-correlation // Fast patch expert response computation (linear model across a ROI) using normalised cross-correlation
//=========================================================================== //===========================================================================
@ -82,13 +70,6 @@ namespace LandmarkDetector
//=========================================================================== //===========================================================================
// Visualisation functions // Visualisation functions
//=========================================================================== //===========================================================================
void Project(cv::Mat_<double>& dest, const cv::Mat_<double>& mesh, double fx, double fy, double cx, double cy);
void DrawBox(cv::Mat image, cv::Vec6d pose, cv::Scalar color, int thickness, float fx, float fy, float cx, float cy);
// Drawing face bounding box
vector<std::pair<cv::Point2d, cv::Point2d>> CalculateBox(cv::Vec6d pose, float fx, float fy, float cx, float cy);
void DrawBox(vector<pair<cv::Point, cv::Point>> lines, cv::Mat image, cv::Scalar color, int thickness);
vector<cv::Point2d> CalculateVisibleLandmarks(const cv::Mat_<double>& shape2D, const cv::Mat_<int>& visibilities); vector<cv::Point2d> CalculateVisibleLandmarks(const cv::Mat_<double>& shape2D, const cv::Mat_<int>& visibilities);
vector<cv::Point2d> CalculateVisibleLandmarks(const CLNF& clnf_model); vector<cv::Point2d> CalculateVisibleLandmarks(const CLNF& clnf_model);
vector<cv::Point2d> CalculateVisibleEyeLandmarks(const CLNF& clnf_model); vector<cv::Point2d> CalculateVisibleEyeLandmarks(const CLNF& clnf_model);
@ -96,45 +77,23 @@ namespace LandmarkDetector
vector<cv::Point2d> CalculateAllLandmarks(const cv::Mat_<double>& shape2D); vector<cv::Point2d> CalculateAllLandmarks(const cv::Mat_<double>& shape2D);
vector<cv::Point2d> CalculateAllLandmarks(const CLNF& clnf_model); vector<cv::Point2d> CalculateAllLandmarks(const CLNF& clnf_model);
vector<cv::Point2d> CalculateAllEyeLandmarks(const CLNF& clnf_model); vector<cv::Point2d> CalculateAllEyeLandmarks(const CLNF& clnf_model);
vector<cv::Point3d> Calculate3DEyeLandmarks(const CLNF& clnf_model, double fx, double fy, double cx, double cy);
void DrawLandmarks(cv::Mat img, vector<cv::Point> landmarks);
void Draw(cv::Mat img, const cv::Mat_<double>& shape2D, const cv::Mat_<int>& visibilities);
void Draw(cv::Mat img, const cv::Mat_<double>& shape2D);
void Draw(cv::Mat img, const CLNF& clnf_model);
//===========================================================================
// Angle representation conversion helpers
//===========================================================================
cv::Matx33d Euler2RotationMatrix(const cv::Vec3d& eulerAngles);
// Using the XYZ convention R = Rx * Ry * Rz, left-handed positive sign
cv::Vec3d RotationMatrix2Euler(const cv::Matx33d& rotation_matrix);
cv::Vec3d Euler2AxisAngle(const cv::Vec3d& euler);
cv::Vec3d AxisAngle2Euler(const cv::Vec3d& axis_angle);
cv::Matx33d AxisAngle2RotationMatrix(const cv::Vec3d& axis_angle);
cv::Vec3d RotationMatrix2AxisAngle(const cv::Matx33d& rotation_matrix);
//============================================================================ //============================================================================
// Face detection helpers // Face detection helpers
//============================================================================ //============================================================================
// Face detection using Haar cascade classifier // Face detection using Haar cascade classifier
bool DetectFaces(vector<cv::Rect_<double> >& o_regions, const cv::Mat_<uchar>& intensity, double min_width = -1, cv::Rect_<double> roi = cv::Rect_<double>(0.0, 0.0, 1.0, 1.0)); bool DetectFaces(vector<cv::Rect_<double> >& o_regions, const cv::Mat_<uchar>& intensity);
bool DetectFaces(vector<cv::Rect_<double> >& o_regions, const cv::Mat_<uchar>& intensity, cv::CascadeClassifier& classifier, double min_width = -1, cv::Rect_<double> roi = cv::Rect_<double>(0.0, 0.0, 1.0, 1.0)); bool DetectFaces(vector<cv::Rect_<double> >& o_regions, const cv::Mat_<uchar>& intensity, cv::CascadeClassifier& classifier);
// The preference point allows for disambiguation if multiple faces are present (pick the closest one), if it is not set the biggest face is chosen // The preference point allows for disambiguation if multiple faces are present (pick the closest one), if it is not set the biggest face is chosen
bool DetectSingleFace(cv::Rect_<double>& o_region, const cv::Mat_<uchar>& intensity, cv::CascadeClassifier& classifier, const cv::Point preference = cv::Point(-1,-1), double min_width = -1, cv::Rect_<double> roi = cv::Rect_<double>(0.0, 0.0, 1.0, 1.0)); bool DetectSingleFace(cv::Rect_<double>& o_region, const cv::Mat_<uchar>& intensity, cv::CascadeClassifier& classifier, const cv::Point preference = cv::Point(-1,-1));
// Face detection using HOG-SVM classifier // Face detection using HOG-SVM classifier
bool DetectFacesHOG(vector<cv::Rect_<double> >& o_regions, const cv::Mat_<uchar>& intensity, std::vector<double>& confidences, double min_width = -1, cv::Rect_<double> roi = cv::Rect_<double>(0.0, 0.0, 1.0, 1.0)); bool DetectFacesHOG(vector<cv::Rect_<double> >& o_regions, const cv::Mat_<uchar>& intensity, std::vector<double>& confidences);
bool DetectFacesHOG(vector<cv::Rect_<double> >& o_regions, const cv::Mat_<uchar>& intensity, dlib::frontal_face_detector& classifier, std::vector<double>& confidences, double min_width = -1, cv::Rect_<double> roi = cv::Rect_<double>(0.0, 0.0, 1.0, 1.0)); bool DetectFacesHOG(vector<cv::Rect_<double> >& o_regions, const cv::Mat_<uchar>& intensity, dlib::frontal_face_detector& classifier, std::vector<double>& confidences);
// The preference point allows for disambiguation if multiple faces are present (pick the closest one), if it is not set the biggest face is chosen // The preference point allows for disambiguation if multiple faces are present (pick the closest one), if it is not set the biggest face is chosen
bool DetectSingleFaceHOG(cv::Rect_<double>& o_region, const cv::Mat_<uchar>& intensity, dlib::frontal_face_detector& classifier, double& confidence, const cv::Point preference = cv::Point(-1,-1), double min_width = -1, cv::Rect_<double> roi = cv::Rect_<double>(0.0,0.0,1.0,1.0)); bool DetectSingleFaceHOG(cv::Rect_<double>& o_region, const cv::Mat_<uchar>& intensity, dlib::frontal_face_detector& classifier, double& confidence, const cv::Point preference = cv::Point(-1,-1));
//============================================================================ //============================================================================
// Matrix reading functionality // Matrix reading functionality

View file

@ -488,6 +488,10 @@ double DetectionValidator::Check(const cv::Vec3d& orientation, const cv::Mat_<uc
//dec = CheckCNN(warped, id); //dec = CheckCNN(warped, id);
dec = CheckCNN_tbb(warped, id); dec = CheckCNN_tbb(warped, id);
} }
// Convert it to a more interpretable signal (0 low confidence, 1 high confidence)
dec = 0.5 * (1 - dec);
return dec; return dec;
} }

View file

@ -35,6 +35,7 @@
#include "stdafx.h" #include "stdafx.h"
#include <LandmarkDetectorFunc.h> #include <LandmarkDetectorFunc.h>
#include "RotationHelpers.h"
// OpenCV includes // OpenCV includes
#include <opencv2/core/core.hpp> #include <opencv2/core/core.hpp>
@ -83,7 +84,7 @@ cv::Vec6d LandmarkDetector::GetPose(const CLNF& clnf_model, float fx, float fy,
cv::solvePnP(landmarks_3D, landmarks_2D, camera_matrix, cv::Mat(), vec_rot, vec_trans, true); cv::solvePnP(landmarks_3D, landmarks_2D, camera_matrix, cv::Mat(), vec_rot, vec_trans, true);
cv::Vec3d euler = LandmarkDetector::AxisAngle2Euler(vec_rot); cv::Vec3d euler = Utilities::AxisAngle2Euler(vec_rot);
return cv::Vec6d(vec_trans[0], vec_trans[1], vec_trans[2], euler[0], euler[1], euler[2]); return cv::Vec6d(vec_trans[0], vec_trans[1], vec_trans[2], euler[0], euler[1], euler[2]);
} }
@ -136,12 +137,12 @@ cv::Vec6d LandmarkDetector::GetPoseWRTCamera(const CLNF& clnf_model, float fx, f
double z_y = cv::sqrt(vec_trans[1] * vec_trans[1] + vec_trans[2] * vec_trans[2]); double z_y = cv::sqrt(vec_trans[1] * vec_trans[1] + vec_trans[2] * vec_trans[2]);
double eul_y = -atan2(vec_trans[0], z_y); double eul_y = -atan2(vec_trans[0], z_y);
cv::Matx33d camera_rotation = LandmarkDetector::Euler2RotationMatrix(cv::Vec3d(eul_x, eul_y, 0)); cv::Matx33d camera_rotation = Utilities::Euler2RotationMatrix(cv::Vec3d(eul_x, eul_y, 0));
cv::Matx33d head_rotation = LandmarkDetector::AxisAngle2RotationMatrix(vec_rot); cv::Matx33d head_rotation = Utilities::AxisAngle2RotationMatrix(vec_rot);
cv::Matx33d corrected_rotation = camera_rotation * head_rotation; cv::Matx33d corrected_rotation = camera_rotation * head_rotation;
cv::Vec3d euler_corrected = LandmarkDetector::RotationMatrix2Euler(corrected_rotation); cv::Vec3d euler_corrected = Utilities::RotationMatrix2Euler(corrected_rotation);
return cv::Vec6d(vec_trans[0], vec_trans[1], vec_trans[2], euler_corrected[0], euler_corrected[1], euler_corrected[2]); return cv::Vec6d(vec_trans[0], vec_trans[1], vec_trans[2], euler_corrected[0], euler_corrected[1], euler_corrected[2]);
} }
@ -214,10 +215,10 @@ bool LandmarkDetector::DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_i
// and using a smaller search area // and using a smaller search area
// Indicating that this is a first detection in video sequence or after restart // Indicating that this is a first detection in video sequence or after restart
bool initial_detection = !clnf_model.tracking_initialised; bool initial_detection = !clnf_model.IsInitialized();
// Only do it if there was a face detection at all // Only do it if there was a face detection at all
if(clnf_model.tracking_initialised) if(clnf_model.IsInitialized())
{ {
// The area of interest search size will depend if the previous track was successful // The area of interest search size will depend if the previous track was successful
@ -253,8 +254,8 @@ bool LandmarkDetector::DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_i
// This is used for both detection (if it the tracking has not been initialised yet) or if the tracking failed (however we do this every n frames, for speed) // This is used for both detection (if it the tracking has not been initialised yet) or if the tracking failed (however we do this every n frames, for speed)
// This also has the effect of an attempt to reinitialise just after the tracking has failed, which is useful during large motions // This also has the effect of an attempt to reinitialise just after the tracking has failed, which is useful during large motions
if((!clnf_model.tracking_initialised && (clnf_model.failures_in_a_row + 1) % (params.reinit_video_every * 6) == 0) if((!clnf_model.IsInitialized() && (clnf_model.failures_in_a_row + 1) % (params.reinit_video_every * 6) == 0)
|| (clnf_model.tracking_initialised && !clnf_model.detection_success && params.reinit_video_every > 0 && clnf_model.failures_in_a_row % params.reinit_video_every == 0)) || (clnf_model.IsInitialized() && !clnf_model.detection_success && params.reinit_video_every > 0 && clnf_model.failures_in_a_row % params.reinit_video_every == 0))
{ {
cv::Rect_<double> bounding_box; cv::Rect_<double> bounding_box;
@ -276,33 +277,21 @@ bool LandmarkDetector::DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_i
bool face_detection_success; bool face_detection_success;
// For pruning face detections
double min_size;
if (clnf_model.detect_Z_max == -1)
{
min_size = -1;
}
else
{
double fx_est = 500.0 * (grayscale_image.cols / 640.0);
min_size = fx_est * 150.0 / clnf_model.detect_Z_max;
}
if(params.curr_face_detector == FaceModelParameters::HOG_SVM_DETECTOR) if(params.curr_face_detector == FaceModelParameters::HOG_SVM_DETECTOR)
{ {
double confidence; double confidence;
face_detection_success = LandmarkDetector::DetectSingleFaceHOG(bounding_box, grayscale_image, clnf_model.face_detector_HOG, confidence, preference_det, min_size, clnf_model.detect_ROI); face_detection_success = LandmarkDetector::DetectSingleFaceHOG(bounding_box, grayscale_image, clnf_model.face_detector_HOG, confidence, preference_det);
} }
else if(params.curr_face_detector == FaceModelParameters::HAAR_DETECTOR) else if(params.curr_face_detector == FaceModelParameters::HAAR_DETECTOR)
{ {
face_detection_success = LandmarkDetector::DetectSingleFace(bounding_box, grayscale_image, clnf_model.face_detector_HAAR, preference_det, min_size, clnf_model.detect_ROI); face_detection_success = LandmarkDetector::DetectSingleFace(bounding_box, grayscale_image, clnf_model.face_detector_HAAR, preference_det);
} }
// Attempt to detect landmarks using the detected face (if unseccessful the detection will be ignored) // Attempt to detect landmarks using the detected face (if unseccessful the detection will be ignored)
if(face_detection_success) if(face_detection_success)
{ {
// Indicate that tracking has started as a face was detected // Indicate that tracking has started as a face was detected
clnf_model.tracking_initialised = true; clnf_model.SetInitialized(true);
// Keep track of old model values so that they can be restored if redetection fails // Keep track of old model values so that they can be restored if redetection fails
cv::Vec6d params_global_init = clnf_model.params_global; cv::Vec6d params_global_init = clnf_model.params_global;
@ -347,7 +336,7 @@ bool LandmarkDetector::DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_i
} }
// if the model has not been initialised yet class it as a failure // if the model has not been initialised yet class it as a failure
if(!clnf_model.tracking_initialised) if(!clnf_model.IsInitialized())
{ {
clnf_model.failures_in_a_row++; clnf_model.failures_in_a_row++;
} }
@ -355,7 +344,7 @@ bool LandmarkDetector::DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_i
// un-initialise the tracking // un-initialise the tracking
if( clnf_model.failures_in_a_row > 100) if( clnf_model.failures_in_a_row > 100)
{ {
clnf_model.tracking_initialised = false; clnf_model.SetInitialized(false);
} }
return clnf_model.detection_success; return clnf_model.detection_success;
@ -371,7 +360,7 @@ bool LandmarkDetector::DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_i
clnf_model.pdm.CalcParams(clnf_model.params_global, bounding_box, clnf_model.params_local); clnf_model.pdm.CalcParams(clnf_model.params_global, bounding_box, clnf_model.params_local);
// indicate that face was detected so initialisation is not necessary // indicate that face was detected so initialisation is not necessary
clnf_model.tracking_initialised = true; clnf_model.SetInitialized(true);
} }
return DetectLandmarksInVideo(grayscale_image, clnf_model, params); return DetectLandmarksInVideo(grayscale_image, clnf_model, params);
@ -475,6 +464,9 @@ bool LandmarkDetector::DetectLandmarksInImage(const cv::Mat_<uchar> &grayscale_i
clnf_model.hierarchical_models[part].landmark_likelihoods = best_landmark_likelihoods_h[part].clone(); clnf_model.hierarchical_models[part].landmark_likelihoods = best_landmark_likelihoods_h[part].clone();
} }
// To indicate that tracking/detection started and the values are valid, we assume that there is a face in the bounding box
clnf_model.SetInitialized(true);
return best_success; return best_success;
} }

View file

@ -45,6 +45,7 @@
// Local includes // Local includes
#include <LandmarkDetectorUtils.h> #include <LandmarkDetectorUtils.h>
#include <RotationHelpers.h>
using namespace LandmarkDetector; using namespace LandmarkDetector;
@ -66,10 +67,10 @@ CLNF::CLNF(string fname)
} }
// Copy constructor (makes a deep copy of CLNF) // Copy constructor (makes a deep copy of CLNF)
CLNF::CLNF(const CLNF& other): pdm(other.pdm), params_local(other.params_local.clone()), params_global(other.params_global), detected_landmarks(other.detected_landmarks.clone()), CLNF::CLNF(const CLNF& other) : pdm(other.pdm), params_local(other.params_local.clone()), params_global(other.params_global), detected_landmarks(other.detected_landmarks.clone()),
landmark_likelihoods(other.landmark_likelihoods.clone()), patch_experts(other.patch_experts), landmark_validator(other.landmark_validator), face_detector_location(other.face_detector_location), landmark_likelihoods(other.landmark_likelihoods.clone()), patch_experts(other.patch_experts), landmark_validator(other.landmark_validator), face_detector_location(other.face_detector_location),
hierarchical_mapping(other.hierarchical_mapping), hierarchical_models(other.hierarchical_models), hierarchical_model_names(other.hierarchical_model_names), hierarchical_mapping(other.hierarchical_mapping), hierarchical_models(other.hierarchical_models), hierarchical_model_names(other.hierarchical_model_names),
hierarchical_params(other.hierarchical_params), eye_model(other.eye_model), detect_Z_max(other.detect_Z_max), detect_ROI(other.detect_ROI) hierarchical_params(other.hierarchical_params), eye_model(other.eye_model)
{ {
this->detection_success = other.detection_success; this->detection_success = other.detection_success;
this->tracking_initialised = other.tracking_initialised; this->tracking_initialised = other.tracking_initialised;
@ -78,20 +79,20 @@ CLNF::CLNF(const CLNF& other): pdm(other.pdm), params_local(other.params_local.c
this->failures_in_a_row = other.failures_in_a_row; this->failures_in_a_row = other.failures_in_a_row;
// Load the CascadeClassifier (as it does not have a proper copy constructor) // Load the CascadeClassifier (as it does not have a proper copy constructor)
if(!face_detector_location.empty()) if (!face_detector_location.empty())
{ {
this->face_detector_HAAR.load(face_detector_location); this->face_detector_HAAR.load(face_detector_location);
} }
// Make sure the matrices are allocated properly // Make sure the matrices are allocated properly
this->triangulations.resize(other.triangulations.size()); this->triangulations.resize(other.triangulations.size());
for(size_t i = 0; i < other.triangulations.size(); ++i) for (size_t i = 0; i < other.triangulations.size(); ++i)
{ {
// Make sure the matrix is copied. // Make sure the matrix is copied.
this->triangulations[i] = other.triangulations[i].clone(); this->triangulations[i] = other.triangulations[i].clone();
} }
// Make sure the matrices are allocated properly // Make sure the matrices are allocated properly
for(std::map<int, cv::Mat_<float>>::const_iterator it = other.kde_resp_precalc.begin(); it!= other.kde_resp_precalc.end(); it++) for (std::map<int, cv::Mat_<float>>::const_iterator it = other.kde_resp_precalc.begin(); it != other.kde_resp_precalc.end(); it++)
{ {
// Make sure the matrix is copied. // Make sure the matrix is copied.
this->kde_resp_precalc.insert(std::pair<int, cv::Mat_<float>>(it->first, it->second.clone())); this->kde_resp_precalc.insert(std::pair<int, cv::Mat_<float>>(it->first, it->second.clone()));
@ -111,7 +112,7 @@ CLNF & CLNF::operator= (const CLNF& other)
params_global = other.params_global; params_global = other.params_global;
detected_landmarks = other.detected_landmarks.clone(); detected_landmarks = other.detected_landmarks.clone();
landmark_likelihoods =other.landmark_likelihoods.clone(); landmark_likelihoods = other.landmark_likelihoods.clone();
patch_experts = Patch_experts(other.patch_experts); patch_experts = Patch_experts(other.patch_experts);
landmark_validator = DetectionValidator(other.landmark_validator); landmark_validator = DetectionValidator(other.landmark_validator);
face_detector_location = other.face_detector_location; face_detector_location = other.face_detector_location;
@ -125,20 +126,20 @@ CLNF & CLNF::operator= (const CLNF& other)
this->eye_model = other.eye_model; this->eye_model = other.eye_model;
// Load the CascadeClassifier (as it does not have a proper copy constructor) // Load the CascadeClassifier (as it does not have a proper copy constructor)
if(!face_detector_location.empty()) if (!face_detector_location.empty())
{ {
this->face_detector_HAAR.load(face_detector_location); this->face_detector_HAAR.load(face_detector_location);
} }
// Make sure the matrices are allocated properly // Make sure the matrices are allocated properly
this->triangulations.resize(other.triangulations.size()); this->triangulations.resize(other.triangulations.size());
for(size_t i = 0; i < other.triangulations.size(); ++i) for (size_t i = 0; i < other.triangulations.size(); ++i)
{ {
// Make sure the matrix is copied. // Make sure the matrix is copied.
this->triangulations[i] = other.triangulations[i].clone(); this->triangulations[i] = other.triangulations[i].clone();
} }
// Make sure the matrices are allocated properly // Make sure the matrices are allocated properly
for(std::map<int, cv::Mat_<float>>::const_iterator it = other.kde_resp_precalc.begin(); it!= other.kde_resp_precalc.end(); it++) for (std::map<int, cv::Mat_<float>>::const_iterator it = other.kde_resp_precalc.begin(); it != other.kde_resp_precalc.end(); it++)
{ {
// Make sure the matrix is copied. // Make sure the matrix is copied.
this->kde_resp_precalc.insert(std::pair<int, cv::Mat_<float>>(it->first, it->second.clone())); this->kde_resp_precalc.insert(std::pair<int, cv::Mat_<float>>(it->first, it->second.clone()));
@ -149,9 +150,6 @@ CLNF & CLNF::operator= (const CLNF& other)
this->hierarchical_models = other.hierarchical_models; this->hierarchical_models = other.hierarchical_models;
this->hierarchical_model_names = other.hierarchical_model_names; this->hierarchical_model_names = other.hierarchical_model_names;
this->hierarchical_params = other.hierarchical_params; this->hierarchical_params = other.hierarchical_params;
this->detect_Z_max = other.detect_Z_max;
this->detect_ROI = detect_ROI;
} }
face_detector_HOG = dlib::get_frontal_face_detector(); face_detector_HOG = dlib::get_frontal_face_detector();
@ -192,8 +190,6 @@ CLNF::CLNF(const CLNF&& other)
this->eye_model = other.eye_model; this->eye_model = other.eye_model;
this->detect_Z_max = other.detect_Z_max;
this->detect_ROI = detect_ROI;
} }
// Assignment operator for rvalues // Assignment operator for rvalues
@ -229,9 +225,6 @@ CLNF & CLNF::operator= (const CLNF&& other)
this->eye_model = other.eye_model; this->eye_model = other.eye_model;
this->detect_Z_max = other.detect_Z_max;
this->detect_ROI = detect_ROI;
return *this; return *this;
} }
@ -241,7 +234,7 @@ void CLNF::Read_CLNF(string clnf_location)
// Location of modules // Location of modules
ifstream locations(clnf_location.c_str(), ios_base::in); ifstream locations(clnf_location.c_str(), ios_base::in);
if(!locations.is_open()) if (!locations.is_open())
{ {
cout << "Couldn't open the CLNF model file aborting" << endl; cout << "Couldn't open the CLNF model file aborting" << endl;
cout.flush(); cout.flush();
@ -272,13 +265,13 @@ void CLNF::Read_CLNF(string clnf_location)
getline(lineStream, location); getline(lineStream, location);
if(location.size() > 0) if (location.size() > 0)
location.erase(location.begin()); // remove the first space location.erase(location.begin()); // remove the first space
// remove carriage return at the end for compatibility with unix systems // remove carriage return at the end for compatibility with unix systems
if(location.size() > 0 && location.at(location.size()-1) == '\r') if (location.size() > 0 && location.at(location.size() - 1) == '\r')
{ {
location = location.substr(0, location.size()-1); location = location.substr(0, location.size() - 1);
} }
// append the lovstion to root location (boost syntax) // append the lovstion to root location (boost syntax)
@ -304,18 +297,18 @@ void CLNF::Read_CLNF(string clnf_location)
// read in the triangulations // read in the triangulations
triangulations.resize(numViews); triangulations.resize(numViews);
for(int i = 0; i < numViews; ++i) for (int i = 0; i < numViews; ++i)
{ {
LandmarkDetector::SkipComments(triangulationFile); LandmarkDetector::SkipComments(triangulationFile);
LandmarkDetector::ReadMat(triangulationFile, triangulations[i]); LandmarkDetector::ReadMat(triangulationFile, triangulations[i]);
} }
cout << "Done" << endl; cout << "Done" << endl;
} }
else if(module.compare("PatchesIntensity") == 0) else if (module.compare("PatchesIntensity") == 0)
{ {
intensity_expert_locations.push_back(location); intensity_expert_locations.push_back(location);
} }
else if(module.compare("PatchesCCNF") == 0) else if (module.compare("PatchesCCNF") == 0)
{ {
ccnf_expert_locations.push_back(location); ccnf_expert_locations.push_back(location);
} }
@ -335,7 +328,7 @@ void CLNF::Read(string main_location)
cout << "Reading the CLNF landmark detector/tracker from: " << main_location << endl; cout << "Reading the CLNF landmark detector/tracker from: " << main_location << endl;
ifstream locations(main_location.c_str(), ios_base::in); ifstream locations(main_location.c_str(), ios_base::in);
if(!locations.is_open()) if (!locations.is_open())
{ {
cout << "Couldn't open the model file, aborting" << endl; cout << "Couldn't open the model file, aborting" << endl;
return; return;
@ -345,10 +338,6 @@ void CLNF::Read(string main_location)
// The other module locations should be defined as relative paths from the main model // The other module locations should be defined as relative paths from the main model
boost::filesystem::path root = boost::filesystem::path(main_location).parent_path(); boost::filesystem::path root = boost::filesystem::path(main_location).parent_path();
// Initializing defaults that could be overwriten
detect_Z_max = -1;
detect_ROI = cv::Rect_<double>(0, 0, 1, 1);
// Assume no eye model, unless read-in // Assume no eye model, unless read-in
eye_model = false; eye_model = false;
@ -368,9 +357,9 @@ void CLNF::Read(string main_location)
lineStream >> location; lineStream >> location;
// remove carriage return at the end for compatibility with unix systems // remove carriage return at the end for compatibility with unix systems
if(location.size() > 0 && location.at(location.size()-1) == '\r') if (location.size() > 0 && location.at(location.size() - 1) == '\r')
{ {
location = location.substr(0, location.size()-1); location = location.substr(0, location.size() - 1);
} }
@ -383,14 +372,14 @@ void CLNF::Read(string main_location)
// The CLNF module includes the PDM and the patch experts // The CLNF module includes the PDM and the patch experts
Read_CLNF(location); Read_CLNF(location);
} }
else if(module.compare("LandmarkDetector_part") == 0) else if (module.compare("LandmarkDetector_part") == 0)
{ {
string part_name; string part_name;
lineStream >> part_name; lineStream >> part_name;
cout << "Reading part based module...." << part_name << endl; cout << "Reading part based module...." << part_name << endl;
vector<pair<int, int>> mappings; vector<pair<int, int>> mappings;
while(!lineStream.eof()) while (!lineStream.eof())
{ {
int ind_in_main; int ind_in_main;
lineStream >> ind_in_main; lineStream >> ind_in_main;
@ -413,7 +402,7 @@ void CLNF::Read(string main_location)
params.refine_hierarchical = false; params.refine_hierarchical = false;
params.refine_parameters = false; params.refine_parameters = false;
if(part_name.compare("left_eye") == 0 || part_name.compare("right_eye") == 0) if (part_name.compare("left_eye") == 0 || part_name.compare("right_eye") == 0)
{ {
vector<int> windows_large; vector<int> windows_large;
@ -431,7 +420,7 @@ void CLNF::Read(string main_location)
params.reg_factor = 0.1; params.reg_factor = 0.1;
params.sigma = 2; params.sigma = 2;
} }
else if(part_name.compare("left_eye_28") == 0 || part_name.compare("right_eye_28") == 0) else if (part_name.compare("left_eye_28") == 0 || part_name.compare("right_eye_28") == 0)
{ {
vector<int> windows_large; vector<int> windows_large;
windows_large.push_back(3); windows_large.push_back(3);
@ -453,7 +442,7 @@ void CLNF::Read(string main_location)
eye_model = true; eye_model = true;
} }
else if(part_name.compare("mouth") == 0) else if (part_name.compare("mouth") == 0)
{ {
vector<int> windows_large; vector<int> windows_large;
windows_large.push_back(7); windows_large.push_back(7);
@ -470,7 +459,7 @@ void CLNF::Read(string main_location)
params.reg_factor = 1.0; params.reg_factor = 1.0;
params.sigma = 2.0; params.sigma = 2.0;
} }
else if(part_name.compare("brow") == 0) else if (part_name.compare("brow") == 0)
{ {
vector<int> windows_large; vector<int> windows_large;
windows_large.push_back(11); windows_large.push_back(11);
@ -487,7 +476,7 @@ void CLNF::Read(string main_location)
params.reg_factor = 10.0; params.reg_factor = 10.0;
params.sigma = 3.5; params.sigma = 3.5;
} }
else if(part_name.compare("inner") == 0) else if (part_name.compare("inner") == 0)
{ {
vector<int> windows_large; vector<int> windows_large;
windows_large.push_back(9); windows_large.push_back(9);
@ -514,25 +503,15 @@ void CLNF::Read(string main_location)
landmark_validator.Read(location); landmark_validator.Read(location);
cout << "Done" << endl; cout << "Done" << endl;
} }
else if (module.compare("DetectorConstaints") == 0)
{
cout << "Reading detectir constraints...";
lineStream >> detect_Z_max;
lineStream >> detect_ROI.x;
lineStream >> detect_ROI.y;
lineStream >> detect_ROI.width;
lineStream >> detect_ROI.height;
cout << "Done" << endl;
}
} }
detected_landmarks.create(2 * pdm.NumberOfPoints(), 1); detected_landmarks.create(2 * pdm.NumberOfPoints(), 1);
detected_landmarks.setTo(0); detected_landmarks.setTo(0);
detection_success = false; SetDetectionSuccess(false);
tracking_initialised = false; SetInitialized(false);
model_likelihood = -10; // very low model_likelihood = -10; // very low
detection_certainty = 1; // very uncertain detection_certainty = 0; // very uncertain
// Initialising default values for the rest of the variables // Initialising default values for the rest of the variables
@ -541,13 +520,31 @@ void CLNF::Read(string main_location)
params_local.setTo(0.0); params_local.setTo(0.0);
// global parameters (pose) [scale, euler_x, euler_y, euler_z, tx, ty] // global parameters (pose) [scale, euler_x, euler_y, euler_z, tx, ty]
params_global = cv::Vec6d(1, 0, 0, 0, 0, 0); params_global = cv::Vec6d(0, 0, 0, 0, 0, 0);
failures_in_a_row = -1; failures_in_a_row = -1;
preference_det.x = -1; }
preference_det.y = -1;
void CLNF::SetDetectionSuccess(bool success)
{
this->detection_success = success;
for (size_t i = 0; i < hierarchical_models.size(); ++i)
{
hierarchical_models[i].SetDetectionSuccess(success);
}
}
void CLNF::SetInitialized(bool initialized)
{
this->tracking_initialised = initialized;
for (size_t i = 0; i < hierarchical_models.size(); ++i)
{
hierarchical_models[i].SetInitialized(initialized);
}
} }
@ -556,10 +553,10 @@ void CLNF::Reset()
{ {
detected_landmarks.setTo(0); detected_landmarks.setTo(0);
detection_success = false; SetDetectionSuccess(false);
tracking_initialised = false; SetInitialized(false);
model_likelihood = -10; // very low model_likelihood = -10; // very low
detection_certainty = 1; // very uncertain detection_certainty = 0; // very uncertain
// local parameters (shape) // local parameters (shape)
params_local.setTo(0.0); params_local.setTo(0.0);
@ -594,17 +591,12 @@ bool CLNF::DetectLandmarks(const cv::Mat_<uchar> &image, FaceModelParameters& pa
// Store the landmarks converged on in detected_landmarks // Store the landmarks converged on in detected_landmarks
pdm.CalcShape2D(detected_landmarks, params_local, params_global); pdm.CalcShape2D(detected_landmarks, params_local, params_global);
if(params.refine_hierarchical && hierarchical_models.size() > 0) if (params.refine_hierarchical && hierarchical_models.size() > 0)
{ {
bool parts_used = false; bool parts_used = false;
// Do the hierarchical models in parallel // Do the hierarchical models in parallel
tbb::parallel_for(0, (int)hierarchical_models.size(), [&](int part_model){ tbb::parallel_for(0, (int)hierarchical_models.size(), [&](int part_model) {
{
// Only do the synthetic eye models if we're doing gaze
if (!((hierarchical_model_names[part_model].compare("right_eye_28") == 0 ||
hierarchical_model_names[part_model].compare("left_eye_28") == 0)
&& !params.track_gaze))
{ {
int n_part_points = hierarchical_models[part_model].pdm.NumberOfPoints(); int n_part_points = hierarchical_models[part_model].pdm.NumberOfPoints();
@ -639,21 +631,16 @@ bool CLNF::DetectLandmarks(const cv::Mat_<uchar> &image, FaceModelParameters& pa
hierarchical_models[part_model].pdm.CalcShape2D(hierarchical_models[part_model].detected_landmarks, hierarchical_models[part_model].params_local, hierarchical_models[part_model].params_global); hierarchical_models[part_model].pdm.CalcShape2D(hierarchical_models[part_model].detected_landmarks, hierarchical_models[part_model].params_local, hierarchical_models[part_model].params_global);
} }
} }
}
}); });
// Recompute main model based on the fit part models // Recompute main model based on the fit part models
if(parts_used) if (parts_used)
{ {
for (size_t part_model = 0; part_model < hierarchical_models.size(); ++part_model) for (size_t part_model = 0; part_model < hierarchical_models.size(); ++part_model)
{ {
vector<pair<int, int>> mappings = this->hierarchical_mapping[part_model]; vector<pair<int, int>> mappings = this->hierarchical_mapping[part_model];
if (!((hierarchical_model_names[part_model].compare("right_eye_28") == 0 ||
hierarchical_model_names[part_model].compare("left_eye_28") == 0)
&& !params.track_gaze))
{
// Reincorporate the models into main tracker // Reincorporate the models into main tracker
for (size_t mapping_ind = 0; mapping_ind < mappings.size(); ++mapping_ind) for (size_t mapping_ind = 0; mapping_ind < mappings.size(); ++mapping_ind)
{ {
@ -661,7 +648,6 @@ bool CLNF::DetectLandmarks(const cv::Mat_<uchar> &image, FaceModelParameters& pa
detected_landmarks.at<double>(mappings[mapping_ind].first + pdm.NumberOfPoints()) = hierarchical_models[part_model].detected_landmarks.at<double>(mappings[mapping_ind].second + hierarchical_models[part_model].pdm.NumberOfPoints()); detected_landmarks.at<double>(mappings[mapping_ind].first + pdm.NumberOfPoints()) = hierarchical_models[part_model].detected_landmarks.at<double>(mappings[mapping_ind].second + hierarchical_models[part_model].pdm.NumberOfPoints());
} }
} }
}
pdm.CalcParams(params_global, params_local, detected_landmarks); pdm.CalcParams(params_global, params_local, detected_landmarks);
pdm.CalcShape2D(detected_landmarks, params_local, params_global); pdm.CalcShape2D(detected_landmarks, params_local, params_global);
@ -670,24 +656,24 @@ bool CLNF::DetectLandmarks(const cv::Mat_<uchar> &image, FaceModelParameters& pa
} }
// Check detection correctness // Check detection correctness
if(params.validate_detections && fit_success) if (params.validate_detections && fit_success)
{ {
cv::Vec3d orientation(params_global[1], params_global[2], params_global[3]); cv::Vec3d orientation(params_global[1], params_global[2], params_global[3]);
detection_certainty = landmark_validator.Check(orientation, image, detected_landmarks); detection_certainty = landmark_validator.Check(orientation, image, detected_landmarks);
detection_success = detection_certainty < params.validation_boundary; detection_success = detection_certainty > params.validation_boundary;
} }
else else
{ {
detection_success = fit_success; detection_success = fit_success;
if(fit_success) if (fit_success)
{ {
detection_certainty = -1; detection_certainty = 1;
} }
else else
{ {
detection_certainty = 1; detection_certainty = 0;
} }
} }
@ -702,7 +688,7 @@ bool CLNF::Fit(const cv::Mat_<uchar>& im, const std::vector<int>& window_sizes,
assert(im.channels() == 1); assert(im.channels() == 1);
// Placeholder for the landmarks // Placeholder for the landmarks
cv::Mat_<double> current_shape(2 * pdm.NumberOfPoints() , 1, 0.0); cv::Mat_<double> current_shape(2 * pdm.NumberOfPoints(), 1, 0.0);
int n = pdm.NumberOfPoints(); int n = pdm.NumberOfPoints();
@ -718,27 +704,27 @@ bool CLNF::Fit(const cv::Mat_<uchar>& im, const std::vector<int>& window_sizes,
FaceModelParameters tmp_parameters = parameters; FaceModelParameters tmp_parameters = parameters;
// Optimise the model across a number of areas of interest (usually in descending window size and ascending scale size) // Optimise the model across a number of areas of interest (usually in descending window size and ascending scale size)
for(int scale = 0; scale < num_scales; scale++) for (int scale = 0; scale < num_scales; scale++)
{ {
int window_size = window_sizes[scale]; int window_size = window_sizes[scale];
if(window_size == 0 || 0.9 * patch_experts.patch_scaling[scale] > params_global[0]) if (window_size == 0 || 0.9 * patch_experts.patch_scaling[scale] > params_global[0])
continue; continue;
// The patch expert response computation // The patch expert response computation
patch_experts.Response(patch_expert_responses, sim_ref_to_img, sim_img_to_ref, im, pdm, params_global, params_local, window_size, scale); patch_experts.Response(patch_expert_responses, sim_ref_to_img, sim_img_to_ref, im, pdm, params_global, params_local, window_size, scale);
if(parameters.refine_parameters == true) if (parameters.refine_parameters == true)
{ {
// Adapt the parameters based on scale (wan't to reduce regularisation as scale increases, but increa sigma and tikhonov) // Adapt the parameters based on scale (wan't to reduce regularisation as scale increases, but increa sigma and tikhonov)
tmp_parameters.reg_factor = parameters.reg_factor - 15 * log(patch_experts.patch_scaling[scale]/0.25)/log(2); tmp_parameters.reg_factor = parameters.reg_factor - 15 * log(patch_experts.patch_scaling[scale] / 0.25) / log(2);
if(tmp_parameters.reg_factor <= 0) if (tmp_parameters.reg_factor <= 0)
tmp_parameters.reg_factor = 0.001; tmp_parameters.reg_factor = 0.001;
tmp_parameters.sigma = parameters.sigma + 0.25 * log(patch_experts.patch_scaling[scale]/0.25)/log(2); tmp_parameters.sigma = parameters.sigma + 0.25 * log(patch_experts.patch_scaling[scale] / 0.25) / log(2);
tmp_parameters.weight_factor = parameters.weight_factor + 2 * parameters.weight_factor * log(patch_experts.patch_scaling[scale]/0.25)/log(2); tmp_parameters.weight_factor = parameters.weight_factor + 2 * parameters.weight_factor * log(patch_experts.patch_scaling[scale] / 0.25) / log(2);
} }
// Get the current landmark locations // Get the current landmark locations
@ -754,7 +740,7 @@ bool CLNF::Fit(const cv::Mat_<uchar>& im, const std::vector<int>& window_sizes,
this->model_likelihood = this->NU_RLMS(params_global, params_local, patch_expert_responses, cv::Vec6d(params_global), params_local.clone(), current_shape, sim_img_to_ref, sim_ref_to_img, window_size, view_id, false, scale, this->landmark_likelihoods, tmp_parameters); this->model_likelihood = this->NU_RLMS(params_global, params_local, patch_expert_responses, cv::Vec6d(params_global), params_local.clone(), current_shape, sim_img_to_ref, sim_ref_to_img, window_size, view_id, false, scale, this->landmark_likelihoods, tmp_parameters);
// Can't track very small images reliably (less than ~30px across) // Can't track very small images reliably (less than ~30px across)
if(params_global[0] < 0.25) if (params_global[0] < 0.25)
{ {
cout << "Face too small for landmark detection" << endl; cout << "Face too small for landmark detection" << endl;
return false; return false;
@ -773,30 +759,30 @@ void CLNF::NonVectorisedMeanShift_precalc_kde(cv::Mat_<float>& out_mean_shifts,
float step_size = 0.1; float step_size = 0.1;
// if this has not been precomputer, precompute it, otherwise use it // if this has not been precomputer, precompute it, otherwise use it
if(kde_resp_precalc.find(resp_size) == kde_resp_precalc.end()) if (kde_resp_precalc.find(resp_size) == kde_resp_precalc.end())
{ {
kde_resp = cv::Mat_<float>((int)((resp_size / step_size)*(resp_size/step_size)), resp_size * resp_size); kde_resp = cv::Mat_<float>((int)((resp_size / step_size)*(resp_size / step_size)), resp_size * resp_size);
cv::MatIterator_<float> kde_it = kde_resp.begin(); cv::MatIterator_<float> kde_it = kde_resp.begin();
for(int x = 0; x < resp_size/step_size; x++) for (int x = 0; x < resp_size / step_size; x++)
{ {
float dx = x * step_size; float dx = x * step_size;
for(int y = 0; y < resp_size/step_size; y++) for (int y = 0; y < resp_size / step_size; y++)
{ {
float dy = y * step_size; float dy = y * step_size;
int ii,jj; int ii, jj;
float v,vx,vy; float v, vx, vy;
for(ii = 0; ii < resp_size; ii++) for (ii = 0; ii < resp_size; ii++)
{ {
vx = (dy-ii)*(dy-ii); vx = (dy - ii)*(dy - ii);
for(jj = 0; jj < resp_size; jj++) for (jj = 0; jj < resp_size; jj++)
{ {
vy = (dx-jj)*(dx-jj); vy = (dx - jj)*(dx - jj);
// the KDE evaluation of that point // the KDE evaluation of that point
v = exp(a*(vx+vy)); v = exp(a*(vx + vy));
*kde_it++ = v; *kde_it++ = v;
} }
@ -813,12 +799,12 @@ void CLNF::NonVectorisedMeanShift_precalc_kde(cv::Mat_<float>& out_mean_shifts,
} }
// for every point (patch) calculating mean-shift // for every point (patch) calculating mean-shift
for(int i = 0; i < n; i++) for (int i = 0; i < n; i++)
{ {
if(patch_experts.visibilities[scale][view_id].at<int>(i,0) == 0) if (patch_experts.visibilities[scale][view_id].at<int>(i, 0) == 0)
{ {
out_mean_shifts.at<float>(i,0) = 0; out_mean_shifts.at<float>(i, 0) = 0;
out_mean_shifts.at<float>(i+n,0) = 0; out_mean_shifts.at<float>(i + n, 0) = 0;
continue; continue;
} }
@ -827,33 +813,33 @@ void CLNF::NonVectorisedMeanShift_precalc_kde(cv::Mat_<float>& out_mean_shifts,
float dy = dys.at<float>(i); float dy = dys.at<float>(i);
// Ensure that we are within bounds (important for precalculation) // Ensure that we are within bounds (important for precalculation)
if(dx < 0) if (dx < 0)
dx = 0; dx = 0;
if(dy < 0) if (dy < 0)
dy = 0; dy = 0;
if(dx > resp_size - step_size) if (dx > resp_size - step_size)
dx = resp_size - step_size; dx = resp_size - step_size;
if(dy > resp_size - step_size) if (dy > resp_size - step_size)
dy = resp_size - step_size; dy = resp_size - step_size;
// Pick the row from precalculated kde that approximates the current dx, dy best // Pick the row from precalculated kde that approximates the current dx, dy best
int closest_col = (int)(dy /step_size + 0.5); // Plus 0.5 is there, as C++ rounds down with int cast int closest_col = (int)(dy / step_size + 0.5); // Plus 0.5 is there, as C++ rounds down with int cast
int closest_row = (int)(dx /step_size + 0.5); // Plus 0.5 is there, as C++ rounds down with int cast int closest_row = (int)(dx / step_size + 0.5); // Plus 0.5 is there, as C++ rounds down with int cast
int idx = closest_row * ((int)(resp_size/step_size + 0.5)) + closest_col; // Plus 0.5 is there, as C++ rounds down with int cast int idx = closest_row * ((int)(resp_size / step_size + 0.5)) + closest_col; // Plus 0.5 is there, as C++ rounds down with int cast
cv::MatIterator_<float> kde_it = kde_resp.begin() + kde_resp.cols*idx; cv::MatIterator_<float> kde_it = kde_resp.begin() + kde_resp.cols*idx;
float mx=0.0; float mx = 0.0;
float my=0.0; float my = 0.0;
float sum=0.0; float sum = 0.0;
// Iterate over the patch responses here // Iterate over the patch responses here
cv::MatConstIterator_<float> p = patch_expert_responses[i].begin(); cv::MatConstIterator_<float> p = patch_expert_responses[i].begin();
for(int ii = 0; ii < resp_size; ii++) for (int ii = 0; ii < resp_size; ii++)
{ {
for(int jj = 0; jj < resp_size; jj++) for (int jj = 0; jj < resp_size; jj++)
{ {
// the KDE evaluation of that point multiplied by the probability at the current, xi, yi // the KDE evaluation of that point multiplied by the probability at the current, xi, yi
@ -868,11 +854,11 @@ void CLNF::NonVectorisedMeanShift_precalc_kde(cv::Mat_<float>& out_mean_shifts,
} }
} }
float msx = (mx/sum - dx); float msx = (mx / sum - dx);
float msy = (my/sum - dy); float msy = (my / sum - dy);
out_mean_shifts.at<float>(i,0) = msx; out_mean_shifts.at<float>(i, 0) = msx;
out_mean_shifts.at<float>(i+n,0) = msy; out_mean_shifts.at<float>(i + n, 0) = msy;
} }
@ -883,39 +869,39 @@ void CLNF::GetWeightMatrix(cv::Mat_<float>& WeightMatrix, int scale, int view_id
int n = pdm.NumberOfPoints(); int n = pdm.NumberOfPoints();
// Is the weight matrix needed at all // Is the weight matrix needed at all
if(parameters.weight_factor > 0) if (parameters.weight_factor > 0)
{ {
WeightMatrix = cv::Mat_<float>::zeros(n*2, n*2); WeightMatrix = cv::Mat_<float>::zeros(n * 2, n * 2);
for (int p=0; p < n; p++) for (int p = 0; p < n; p++)
{ {
if(!patch_experts.ccnf_expert_intensity.empty()) if (!patch_experts.ccnf_expert_intensity.empty())
{ {
// for the x dimension // for the x dimension
WeightMatrix.at<float>(p,p) = WeightMatrix.at<float>(p,p) + patch_experts.ccnf_expert_intensity[scale][view_id][p].patch_confidence; WeightMatrix.at<float>(p, p) = WeightMatrix.at<float>(p, p) + patch_experts.ccnf_expert_intensity[scale][view_id][p].patch_confidence;
// for they y dimension // for they y dimension
WeightMatrix.at<float>(p+n,p+n) = WeightMatrix.at<float>(p,p); WeightMatrix.at<float>(p + n, p + n) = WeightMatrix.at<float>(p, p);
} }
else else
{ {
// Across the modalities add the confidences // Across the modalities add the confidences
for(size_t pc=0; pc < patch_experts.svr_expert_intensity[scale][view_id][p].svr_patch_experts.size(); pc++) for (size_t pc = 0; pc < patch_experts.svr_expert_intensity[scale][view_id][p].svr_patch_experts.size(); pc++)
{ {
// for the x dimension // for the x dimension
WeightMatrix.at<float>(p,p) = WeightMatrix.at<float>(p,p) + patch_experts.svr_expert_intensity[scale][view_id][p].svr_patch_experts.at(pc).confidence; WeightMatrix.at<float>(p, p) = WeightMatrix.at<float>(p, p) + patch_experts.svr_expert_intensity[scale][view_id][p].svr_patch_experts.at(pc).confidence;
} }
// for the y dimension // for the y dimension
WeightMatrix.at<float>(p+n,p+n) = WeightMatrix.at<float>(p,p); WeightMatrix.at<float>(p + n, p + n) = WeightMatrix.at<float>(p, p);
} }
} }
WeightMatrix = parameters.weight_factor * WeightMatrix; WeightMatrix = parameters.weight_factor * WeightMatrix;
} }
else else
{ {
WeightMatrix = cv::Mat_<float>::eye(n*2, n*2); WeightMatrix = cv::Mat_<float>::eye(n * 2, n * 2);
} }
} }
@ -946,9 +932,9 @@ double CLNF::NU_RLMS(cv::Vec6d& final_global, cv::Mat_<double>& final_local, con
// Pre-calculate the regularisation term // Pre-calculate the regularisation term
cv::Mat_<float> regTerm; cv::Mat_<float> regTerm;
if(rigid) if (rigid)
{ {
regTerm = cv::Mat_<float>::zeros(6,6); regTerm = cv::Mat_<float>::zeros(6, 6);
} }
else else
{ {
@ -969,15 +955,15 @@ double CLNF::NU_RLMS(cv::Vec6d& final_global, cv::Mat_<double>& final_local, con
cv::Mat_<float> mean_shifts(2 * pdm.NumberOfPoints(), 1, 0.0); cv::Mat_<float> mean_shifts(2 * pdm.NumberOfPoints(), 1, 0.0);
// Number of iterations // Number of iterations
for(int iter = 0; iter < parameters.num_optimisation_iteration; iter++) for (int iter = 0; iter < parameters.num_optimisation_iteration; iter++)
{ {
// get the current estimates of x // get the current estimates of x
pdm.CalcShape2D(current_shape, current_local, current_global); pdm.CalcShape2D(current_shape, current_local, current_global);
if(iter > 0) if (iter > 0)
{ {
// if the shape hasn't changed terminate // if the shape hasn't changed terminate
if(norm(current_shape, previous_shape) < 0.01) if (norm(current_shape, previous_shape) < 0.01)
{ {
break; break;
} }
@ -989,7 +975,7 @@ double CLNF::NU_RLMS(cv::Vec6d& final_global, cv::Mat_<double>& final_local, con
cv::Mat_<float> J, J_w_t; cv::Mat_<float> J, J_w_t;
// calculate the appropriate Jacobians in 2D, even though the actual behaviour is in 3D, using small angle approximation and oriented shape // calculate the appropriate Jacobians in 2D, even though the actual behaviour is in 3D, using small angle approximation and oriented shape
if(rigid) if (rigid)
{ {
pdm.ComputeRigidJacobian(current_local, current_global, J, WeightMatrix, J_w_t); pdm.ComputeRigidJacobian(current_local, current_global, J, WeightMatrix, J_w_t);
} }
@ -999,7 +985,7 @@ double CLNF::NU_RLMS(cv::Vec6d& final_global, cv::Mat_<double>& final_local, con
} }
// useful for mean shift calculation // useful for mean shift calculation
float a = -0.5/(parameters.sigma * parameters.sigma); float a = -0.5 / (parameters.sigma * parameters.sigma);
cv::Mat_<double> current_shape_2D = current_shape.reshape(1, 2).t(); cv::Mat_<double> current_shape_2D = current_shape.reshape(1, 2).t();
cv::Mat_<double> base_shape_2D = base_shape.reshape(1, 2).t(); cv::Mat_<double> base_shape_2D = base_shape.reshape(1, 2).t();
@ -1007,8 +993,8 @@ double CLNF::NU_RLMS(cv::Vec6d& final_global, cv::Mat_<double>& final_local, con
cv::Mat_<float> offsets; cv::Mat_<float> offsets;
cv::Mat((current_shape_2D - base_shape_2D) * cv::Mat(sim_img_to_ref).t()).convertTo(offsets, CV_32F); cv::Mat((current_shape_2D - base_shape_2D) * cv::Mat(sim_img_to_ref).t()).convertTo(offsets, CV_32F);
dxs = offsets.col(0) + (resp_size-1)/2; dxs = offsets.col(0) + (resp_size - 1) / 2;
dys = offsets.col(1) + (resp_size-1)/2; dys = offsets.col(1) + (resp_size - 1) / 2;
NonVectorisedMeanShift_precalc_kde(mean_shifts, patch_expert_responses, dxs, dys, resp_size, a, scale, view_id, kde_resp_precalc); NonVectorisedMeanShift_precalc_kde(mean_shifts, patch_expert_responses, dxs, dys, resp_size, a, scale, view_id, kde_resp_precalc);
@ -1016,20 +1002,20 @@ double CLNF::NU_RLMS(cv::Vec6d& final_global, cv::Mat_<double>& final_local, con
cv::Mat_<float> mean_shifts_2D = (mean_shifts.reshape(1, 2)).t(); cv::Mat_<float> mean_shifts_2D = (mean_shifts.reshape(1, 2)).t();
mean_shifts_2D = mean_shifts_2D * cv::Mat(sim_ref_to_img).t(); mean_shifts_2D = mean_shifts_2D * cv::Mat(sim_ref_to_img).t();
mean_shifts = cv::Mat(mean_shifts_2D.t()).reshape(1, n*2); mean_shifts = cv::Mat(mean_shifts_2D.t()).reshape(1, n * 2);
// remove non-visible observations // remove non-visible observations
for(int i = 0; i < n; ++i) for (int i = 0; i < n; ++i)
{ {
// if patch unavailable for current index // if patch unavailable for current index
if(patch_experts.visibilities[scale][view_id].at<int>(i,0) == 0) if (patch_experts.visibilities[scale][view_id].at<int>(i, 0) == 0)
{ {
cv::Mat Jx = J.row(i); cv::Mat Jx = J.row(i);
Jx = cvScalar(0); Jx = cvScalar(0);
cv::Mat Jy = J.row(i+n); cv::Mat Jy = J.row(i + n);
Jy = cvScalar(0); Jy = cvScalar(0);
mean_shifts.at<float>(i,0) = 0.0f; mean_shifts.at<float>(i, 0) = 0.0f;
mean_shifts.at<float>(i+n,0) = 0.0f; mean_shifts.at<float>(i + n, 0) = 0.0f;
} }
} }
@ -1037,9 +1023,9 @@ double CLNF::NU_RLMS(cv::Vec6d& final_global, cv::Mat_<double>& final_local, con
cv::Mat_<float> J_w_t_m = J_w_t * mean_shifts; cv::Mat_<float> J_w_t_m = J_w_t * mean_shifts;
// Add the regularisation term // Add the regularisation term
if(!rigid) if (!rigid)
{ {
J_w_t_m(cv::Rect(0,6,1, m)) = J_w_t_m(cv::Rect(0,6,1, m)) - regTerm(cv::Rect(6,6, m, m)) * current_local; J_w_t_m(cv::Rect(0, 6, 1, m)) = J_w_t_m(cv::Rect(0, 6, 1, m)) - regTerm(cv::Rect(6, 6, m, m)) * current_local;
} }
// Calculating the Hessian approximation // Calculating the Hessian approximation
@ -1065,45 +1051,45 @@ double CLNF::NU_RLMS(cv::Vec6d& final_global, cv::Mat_<double>& final_local, con
landmark_lhoods = cv::Mat_<double>(n, 1, -1e8); landmark_lhoods = cv::Mat_<double>(n, 1, -1e8);
for(int i = 0; i < n; i++) for (int i = 0; i < n; i++)
{ {
if(patch_experts.visibilities[scale][view_id].at<int>(i,0) == 0 ) if (patch_experts.visibilities[scale][view_id].at<int>(i, 0) == 0)
{ {
continue; continue;
} }
float dx = dxs.at<float>(i); float dx = dxs.at<float>(i);
float dy = dys.at<float>(i); float dy = dys.at<float>(i);
int ii,jj; int ii, jj;
float v,vx,vy,sum=0.0; float v, vx, vy, sum = 0.0;
// Iterate over the patch responses here // Iterate over the patch responses here
cv::MatConstIterator_<float> p = patch_expert_responses[i].begin(); cv::MatConstIterator_<float> p = patch_expert_responses[i].begin();
for(ii = 0; ii < resp_size; ii++) for (ii = 0; ii < resp_size; ii++)
{ {
vx = (dy-ii)*(dy-ii); vx = (dy - ii)*(dy - ii);
for(jj = 0; jj < resp_size; jj++) for (jj = 0; jj < resp_size; jj++)
{ {
vy = (dx-jj)*(dx-jj); vy = (dx - jj)*(dx - jj);
// the probability at the current, xi, yi // the probability at the current, xi, yi
v = *p++; v = *p++;
// the KDE evaluation of that point // the KDE evaluation of that point
v *= exp(-0.5*(vx+vy)/(parameters.sigma * parameters.sigma)); v *= exp(-0.5*(vx + vy) / (parameters.sigma * parameters.sigma));
sum += v; sum += v;
} }
} }
landmark_lhoods.at<double>(i,0) = (double)sum; landmark_lhoods.at<double>(i, 0) = (double)sum;
// the offset is there for numerical stability // the offset is there for numerical stability
loglhood += log(sum + 1e-8); loglhood += log(sum + 1e-8);
} }
loglhood = loglhood/sum(patch_experts.visibilities[scale][view_id])[0]; loglhood = loglhood / sum(patch_experts.visibilities[scale][view_id])[0];
final_global = current_global; final_global = current_global;
final_local = current_local; final_local = current_local;
@ -1115,16 +1101,22 @@ double CLNF::NU_RLMS(cv::Vec6d& final_global, cv::Mat_<double>& final_local, con
// Getting a 3D shape model from the current detected landmarks (in camera space) // Getting a 3D shape model from the current detected landmarks (in camera space)
cv::Mat_<double> CLNF::GetShape(double fx, double fy, double cx, double cy) const cv::Mat_<double> CLNF::GetShape(double fx, double fy, double cx, double cy) const
{ {
int n = this->detected_landmarks.rows/2; int n = this->detected_landmarks.rows / 2;
cv::Mat_<double> shape3d(n*3, 1); cv::Mat_<double> outShape(n, 3, 0.0);
// If the tracking started (otherwise no point reporting 3D shape)
if (this->IsInitialized())
{
cv::Mat_<double> shape3d(n * 3, 1);
this->pdm.CalcShape3D(shape3d, this->params_local); this->pdm.CalcShape3D(shape3d, this->params_local);
// Need to rotate the shape to get the actual 3D representation // Need to rotate the shape to get the actual 3D representation
// get the rotation matrix from the euler angles // get the rotation matrix from the euler angles
cv::Matx33d R = LandmarkDetector::Euler2RotationMatrix(cv::Vec3d(params_global[1], params_global[2], params_global[3])); cv::Matx33d R = Utilities::Euler2RotationMatrix(cv::Vec3d(params_global[1], params_global[2], params_global[3]));
shape3d = shape3d.reshape(1, 3); shape3d = shape3d.reshape(1, 3);
@ -1133,21 +1125,21 @@ cv::Mat_<double> CLNF::GetShape(double fx, double fy, double cx, double cy) cons
// from the weak perspective model can determine the average depth of the object // from the weak perspective model can determine the average depth of the object
double Zavg = fx / params_global[0]; double Zavg = fx / params_global[0];
cv::Mat_<double> outShape(n,3,0.0);
// this is described in the paper in section 3.4 (equation 10) (of the CLM-Z paper) // this is described in the paper in section 3.4 (equation 10) (of the CLM-Z paper)
for(int i = 0; i < n; i++) for (int i = 0; i < n; i++)
{ {
double Z = Zavg + shape3d.at<double>(i,2); double Z = Zavg + shape3d.at<double>(i, 2);
double X = Z * ((this->detected_landmarks.at<double>(i) - cx)/fx); double X = Z * ((this->detected_landmarks.at<double>(i) - cx) / fx);
double Y = Z * ((this->detected_landmarks.at<double>(i + n) - cy)/fy); double Y = Z * ((this->detected_landmarks.at<double>(i + n) - cy) / fy);
outShape.at<double>(i,0) = (double)X; outShape.at<double>(i, 0) = (double)X;
outShape.at<double>(i,1) = (double)Y; outShape.at<double>(i, 1) = (double)Y;
outShape.at<double>(i,2) = (double)Z; outShape.at<double>(i, 2) = (double)Z;
} }
}
// The format is 3 rows - n cols // The format is 3 rows - n cols
return outShape.t(); return outShape.t();
@ -1157,8 +1149,8 @@ cv::Mat_<double> CLNF::GetShape(double fx, double fy, double cx, double cy) cons
// A utility bounding box function // A utility bounding box function
cv::Rect_<double> CLNF::GetBoundingBox() const cv::Rect_<double> CLNF::GetBoundingBox() const
{ {
cv::Mat_<double> xs = this->detected_landmarks(cv::Rect(0,0,1,this->detected_landmarks.rows/2)); cv::Mat_<double> xs = this->detected_landmarks(cv::Rect(0, 0, 1, this->detected_landmarks.rows / 2));
cv::Mat_<double> ys = this->detected_landmarks(cv::Rect(0,this->detected_landmarks.rows/2, 1, this->detected_landmarks.rows/2)); cv::Mat_<double> ys = this->detected_landmarks(cv::Rect(0, this->detected_landmarks.rows / 2, 1, this->detected_landmarks.rows / 2));
double min_x, max_x; double min_x, max_x;
double min_y, max_y; double min_y, max_y;
@ -1176,13 +1168,13 @@ void CLNF::NonVectorisedMeanShift(cv::Mat_<double>& out_mean_shifts, const vecto
int n = dxs.rows; int n = dxs.rows;
for(int i = 0; i < n; i++) for (int i = 0; i < n; i++)
{ {
if(patch_experts.visibilities[scale][view_id].at<int>(i,0) == 0 || sum(patch_expert_responses[i])[0] == 0) if (patch_experts.visibilities[scale][view_id].at<int>(i, 0) == 0 || sum(patch_expert_responses[i])[0] == 0)
{ {
out_mean_shifts.at<double>(i,0) = 0; out_mean_shifts.at<double>(i, 0) = 0;
out_mean_shifts.at<double>(i+n,0) = 0; out_mean_shifts.at<double>(i + n, 0) = 0;
continue; continue;
} }
@ -1190,24 +1182,24 @@ void CLNF::NonVectorisedMeanShift(cv::Mat_<double>& out_mean_shifts, const vecto
double dx = dxs.at<double>(i); double dx = dxs.at<double>(i);
double dy = dys.at<double>(i); double dy = dys.at<double>(i);
int ii,jj; int ii, jj;
double v,vx,vy,mx=0.0,my=0.0,sum=0.0; double v, vx, vy, mx = 0.0, my = 0.0, sum = 0.0;
// Iterate over the patch responses here // Iterate over the patch responses here
cv::MatConstIterator_<float> p = patch_expert_responses[i].begin(); cv::MatConstIterator_<float> p = patch_expert_responses[i].begin();
for(ii = 0; ii < resp_size; ii++) for (ii = 0; ii < resp_size; ii++)
{ {
vx = (dy-ii)*(dy-ii); vx = (dy - ii)*(dy - ii);
for(jj = 0; jj < resp_size; jj++) for (jj = 0; jj < resp_size; jj++)
{ {
vy = (dx-jj)*(dx-jj); vy = (dx - jj)*(dx - jj);
// the probability at the current, xi, yi // the probability at the current, xi, yi
v = *p++; v = *p++;
// the KDE evaluation of that point // the KDE evaluation of that point
double kd = exp(a*(vx+vy)); double kd = exp(a*(vx + vy));
v *= kd; v *= kd;
sum += v; sum += v;
@ -1220,8 +1212,8 @@ void CLNF::NonVectorisedMeanShift(cv::Mat_<double>& out_mean_shifts, const vecto
} }
// setting the actual mean shift update // setting the actual mean shift update
double msx = (mx/sum - dx); double msx = (mx / sum - dx);
double msy = (my/sum - dy); double msy = (my / sum - dy);
out_mean_shifts.at<double>(i, 0) = msx; out_mean_shifts.at<double>(i, 0) = msx;
out_mean_shifts.at<double>(i + n, 0) = msy; out_mean_shifts.at<double>(i + n, 0) = msy;

View file

@ -148,12 +148,6 @@ FaceModelParameters::FaceModelParameters(vector<string> &arguments)
valid[i + 1] = false; valid[i + 1] = false;
i++; i++;
} }
else if (arguments[i].compare("-gaze") == 0)
{
track_gaze = true;
valid[i] = false;
}
else if (arguments[i].compare("-q") == 0) else if (arguments[i].compare("-q") == 0)
{ {
@ -253,7 +247,7 @@ void FaceModelParameters::init()
reg_factor = 25; reg_factor = 25;
weight_factor = 0; // By default do not use NU-RLMS for videos as it does not work as well for them weight_factor = 0; // By default do not use NU-RLMS for videos as it does not work as well for them
validation_boundary = -0.45; validation_boundary = 0.725;
limit_pose = true; limit_pose = true;
multi_view = false; multi_view = false;
@ -267,7 +261,5 @@ void FaceModelParameters::init()
// By default use HOG SVM // By default use HOG SVM
curr_face_detector = HOG_SVM_DETECTOR; curr_face_detector = HOG_SVM_DETECTOR;
// The gaze tracking has to be explicitly initialised
track_gaze = false;
} }

File diff suppressed because it is too large Load diff

View file

@ -49,6 +49,7 @@
#endif #endif
#include <LandmarkDetectorUtils.h> #include <LandmarkDetectorUtils.h>
#include "RotationHelpers.h"
using namespace LandmarkDetector; using namespace LandmarkDetector;
//=========================================================================== //===========================================================================
@ -154,7 +155,7 @@ void PDM::CalcShape2D(cv::Mat_<double>& out_shape, const cv::Mat_<double>& param
// get the rotation matrix from the euler angles // get the rotation matrix from the euler angles
cv::Vec3d euler(params_global[1], params_global[2], params_global[3]); cv::Vec3d euler(params_global[1], params_global[2], params_global[3]);
cv::Matx33d currRot = Euler2RotationMatrix(euler); cv::Matx33d currRot = Utilities::Euler2RotationMatrix(euler);
// get the 3D shape of the object // get the 3D shape of the object
cv::Mat_<double> Shape_3D = mean_shape + princ_comp * params_local; cv::Mat_<double> Shape_3D = mean_shape + princ_comp * params_local;
@ -185,7 +186,7 @@ void PDM::CalcParams(cv::Vec6d& out_params_global, const cv::Rect_<double>& boun
CalcShape3D(current_shape, params_local); CalcShape3D(current_shape, params_local);
// rotate the shape // rotate the shape
cv::Matx33d rotation_matrix = Euler2RotationMatrix(rotation); cv::Matx33d rotation_matrix = Utilities::Euler2RotationMatrix(rotation);
cv::Mat_<double> reshaped = current_shape.reshape(1, 3); cv::Mat_<double> reshaped = current_shape.reshape(1, 3);
@ -265,7 +266,7 @@ void PDM::ComputeRigidJacobian(const cv::Mat_<float>& p_local, const cv::Vec6d&
// Get the rotation matrix // Get the rotation matrix
cv::Vec3d euler(params_global[1], params_global[2], params_global[3]); cv::Vec3d euler(params_global[1], params_global[2], params_global[3]);
cv::Matx33d currRot = Euler2RotationMatrix(euler); cv::Matx33d currRot = Utilities::Euler2RotationMatrix(euler);
float r11 = (float) currRot(0,0); float r11 = (float) currRot(0,0);
float r12 = (float) currRot(0,1); float r12 = (float) currRot(0,1);
@ -363,7 +364,7 @@ void PDM::ComputeJacobian(const cv::Mat_<float>& params_local, const cv::Vec6d&
shape_3D_d.convertTo(shape_3D, CV_32F); shape_3D_d.convertTo(shape_3D, CV_32F);
cv::Vec3d euler(params_global[1], params_global[2], params_global[3]); cv::Vec3d euler(params_global[1], params_global[2], params_global[3]);
cv::Matx33d currRot = Euler2RotationMatrix(euler); cv::Matx33d currRot = Utilities::Euler2RotationMatrix(euler);
float r11 = (float) currRot(0,0); float r11 = (float) currRot(0,0);
float r12 = (float) currRot(0,1); float r12 = (float) currRot(0,1);
@ -460,7 +461,7 @@ void PDM::UpdateModelParameters(const cv::Mat_<float>& delta_p, cv::Mat_<float>&
// get the original rotation matrix // get the original rotation matrix
cv::Vec3d eulerGlobal(params_global[1], params_global[2], params_global[3]); cv::Vec3d eulerGlobal(params_global[1], params_global[2], params_global[3]);
cv::Matx33d R1 = Euler2RotationMatrix(eulerGlobal); cv::Matx33d R1 = Utilities::Euler2RotationMatrix(eulerGlobal);
// construct R' = [1, -wz, wy // construct R' = [1, -wz, wy
// wz, 1, -wx // wz, 1, -wx
@ -478,8 +479,8 @@ void PDM::UpdateModelParameters(const cv::Mat_<float>& delta_p, cv::Mat_<float>&
cv::Matx33d R3 = R1 *R2; cv::Matx33d R3 = R1 *R2;
// Extract euler angle (through axis angle first to make sure it's legal) // Extract euler angle (through axis angle first to make sure it's legal)
cv::Vec3d axis_angle = RotationMatrix2AxisAngle(R3); cv::Vec3d axis_angle = Utilities::RotationMatrix2AxisAngle(R3);
cv::Vec3d euler = AxisAngle2Euler(axis_angle); cv::Vec3d euler = Utilities::AxisAngle2Euler(axis_angle);
params_global[1] = euler[0]; params_global[1] = euler[0];
params_global[2] = euler[1]; params_global[2] = euler[1];
@ -569,7 +570,7 @@ void PDM::CalcParams(cv::Vec6d& out_params_global, cv::Mat_<double>& out_params_
double scaling = ((width / model_bbox.width) + (height / model_bbox.height)) / 2; double scaling = ((width / model_bbox.width) + (height / model_bbox.height)) / 2;
cv::Vec3d rotation_init = rotation; cv::Vec3d rotation_init = rotation;
cv::Matx33d R = Euler2RotationMatrix(rotation_init); cv::Matx33d R = Utilities::Euler2RotationMatrix(rotation_init);
cv::Vec2d translation((min_x + max_x) / 2.0, (min_y + max_y) / 2.0); cv::Vec2d translation((min_x + max_x) / 2.0, (min_y + max_y) / 2.0);
cv::Mat_<float> loc_params(this->NumberOfModes(),1, 0.0); cv::Mat_<float> loc_params(this->NumberOfModes(),1, 0.0);
@ -656,7 +657,7 @@ void PDM::CalcParams(cv::Vec6d& out_params_global, cv::Mat_<double>& out_params_
translation[0] = glob_params[4]; translation[0] = glob_params[4];
translation[1] = glob_params[5]; translation[1] = glob_params[5];
R = Euler2RotationMatrix(rotation_init); R = Utilities::Euler2RotationMatrix(rotation_init);
R_2D(0,0) = R(0,0);R_2D(0,1) = R(0,1); R_2D(0,2) = R(0,2); R_2D(0,0) = R(0,0);R_2D(0,1) = R(0,1); R_2D(0,2) = R(0,2);
R_2D(1,0) = R(1,0);R_2D(1,1) = R(1,1); R_2D(1,2) = R(1,2); R_2D(1,0) = R(1,0);R_2D(1,1) = R(1,1); R_2D(1,2) = R(1,2);

View file

@ -0,0 +1,31 @@
include_directories(${BOOST_INCLUDE_DIR})
SET(SOURCE
src/ImageCapture.cpp
src/RecorderCSV.cpp
src/RecorderHOG.cpp
src/RecorderOpenFace.cpp
src/RecorderOpenFaceParameters.cpp
src/SequenceCapture.cpp
src/VisualizationUtils.cpp
src/Visualizer.cpp
)
SET(HEADERS
include/ImageCapture.h
include/RecorderCSV.h
include/RecorderHOG.h
include/RecorderOpenFace.h
include/RecorderOpenFaceParameters.h
include/SequenceCapture.h
include/VisualizationUtils.h
include/Visualizer.h
)
include_directories(./include)
include_directories(${UTILITIES_SOURCE_DIR}/include)
add_library( Utilities ${SOURCE} ${HEADERS})
install (TARGETS Utilities DESTINATION lib)
install (FILES ${HEADERS} DESTINATION include/OpenFace)

View file

@ -0,0 +1,183 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{8E741EA2-9386-4CF2-815E-6F9B08991EAC}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>Utilities</RootNamespace>
<WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
<ProjectName>Utilities</ProjectName>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="Shared">
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="..\..\3rdParty\OpenCV3.1\openCV3.1.props" />
<Import Project="..\..\3rdParty\boost\boost_d.props" />
<Import Project="..\..\3rdParty\dlib\dlib.props" />
<Import Project="..\..\3rdParty\tbb\tbb_d.props" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="..\..\3rdParty\OpenCV3.1\openCV3.1.props" />
<Import Project="..\..\3rdParty\boost\boost.props" />
<Import Project="..\..\3rdParty\dlib\dlib.props" />
<Import Project="..\..\3rdParty\tbb\tbb.props" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="..\..\3rdParty\OpenCV3.1\openCV3.1.props" />
<Import Project="..\..\3rdParty\boost\boost_d.props" />
<Import Project="..\..\3rdParty\dlib\dlib.props" />
<Import Project="..\..\3rdParty\tbb\tbb_d.props" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="..\..\3rdParty\OpenCV3.1\openCV3.1.props" />
<Import Project="..\..\3rdParty\boost\boost.props" />
<Import Project="..\..\3rdParty\dlib\dlib.props" />
<Import Project="..\..\3rdParty\tbb\tbb.props" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup />
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>./include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>./include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>./include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>Full</Optimization>
<FunctionLevelLinking>
</FunctionLevelLinking>
<IntrinsicFunctions>false</IntrinsicFunctions>
<PreprocessorDefinitions>NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>./include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
<InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="src\ImageCapture.cpp" />
<ClCompile Include="src\RecorderCSV.cpp" />
<ClCompile Include="src\RecorderHOG.cpp" />
<ClCompile Include="src\RecorderOpenFace.cpp" />
<ClCompile Include="src\RecorderOpenFaceParameters.cpp" />
<ClCompile Include="src\SequenceCapture.cpp" />
<ClCompile Include="src\VisualizationUtils.cpp" />
<ClCompile Include="src\Visualizer.cpp" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="include\ImageCapture.h" />
<ClInclude Include="include\ImageManipulationHelpers.h" />
<ClInclude Include="include\RecorderCSV.h" />
<ClInclude Include="include\RecorderHOG.h" />
<ClInclude Include="include\RecorderOpenFace.h" />
<ClInclude Include="include\RecorderOpenFaceParameters.h" />
<ClInclude Include="include\RotationHelpers.h" />
<ClInclude Include="include\SequenceCapture.h" />
<ClInclude Include="include\VisualizationUtils.h" />
<ClInclude Include="include\Visualizer.h" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\3rdParty\dlib\dlib.vcxproj">
<Project>{b47a5f12-2567-44e9-ae49-35763ec82149}</Project>
</ProjectReference>
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>

View file

@ -0,0 +1,75 @@
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<Filter Include="Source Files">
<UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
<Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
</Filter>
<Filter Include="Header Files">
<UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
<Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
</Filter>
<Filter Include="Resource Files">
<UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
<Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
</Filter>
</ItemGroup>
<ItemGroup>
<ClCompile Include="src\RecorderCSV.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\RecorderHOG.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\RecorderOpenFace.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\RecorderOpenFaceParameters.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\SequenceCapture.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\VisualizationUtils.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\Visualizer.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\ImageCapture.cpp">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="include\RecorderCSV.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="include\RecorderHOG.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="include\RecorderOpenFace.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="include\RecorderOpenFaceParameters.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="include\SequenceCapture.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="include\Visualizer.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="include\VisualizationUtils.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="include\RotationHelpers.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="include\ImageCapture.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="include\ImageManipulationHelpers.h">
<Filter>Header Files</Filter>
</ClInclude>
</ItemGroup>
</Project>

View file

@ -0,0 +1,121 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (C) 2017, Tadas Baltrusaitis all rights reserved.
//
// ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
//
// BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT.
// IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
//
// License can be found in OpenFace-license.txt
//
// * Any publications arising from the use of this software, including but
// not limited to academic journal and conference publications, technical
// reports and manuals, must cite at least one of the following works:
//
// OpenFace: an open source facial behavior analysis toolkit
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency
// in IEEE Winter Conference on Applications of Computer Vision, 2016
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency.
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __IMAGE_CAPTURE_h_
#define __IMAGE_CAPTURE_h_
// System includes
#include <fstream>
#include <sstream>
#include <vector>
// OpenCV includes
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
namespace Utilities
{
//===========================================================================
/**
A class for capturing sequences from video, webcam, and image directories
*/
class ImageCapture {
public:
// Default constructor
ImageCapture() {};
// Opening based on command line arguments
bool Open(std::vector<std::string>& arguments);
// Direct opening
// Image sequence in the directory
bool OpenDirectory(std::string directory, std::string bbox_directory="", float fx = -1, float fy = -1, float cx = -1, float cy = -1);
// Video file
bool OpenImageFiles(const std::vector<std::string>& image_files, float fx = -1, float fy = -1, float cx = -1, float cy = -1);
// Getting the next frame
cv::Mat GetNextImage();
// Getting the most recent grayscale frame (need to call GetNextImage first)
cv::Mat_<uchar> GetGrayFrame();
// Return bounding boxes associated with the image (if defined)
std::vector<cv::Rect_<double> > GetBoundingBoxes();
// Parameters describing the sequence and it's progress (what's the proportion of images opened)
double GetProgress();
int image_width;
int image_height;
float fx, fy, cx, cy;
// Name of the video file, image directory, or the webcam
std::string name;
bool has_bounding_boxes;
private:
// Blocking copy and move, as it doesn't make sense to have several readers pointed at the same source
ImageCapture & operator= (const ImageCapture& other);
ImageCapture & operator= (const ImageCapture&& other);
ImageCapture(const ImageCapture&& other);
ImageCapture(const ImageCapture& other);
// Storing the latest captures
cv::Mat latest_frame;
cv::Mat_<uchar> latest_gray_frame;
// Keeping track of how many files are read and the filenames
size_t frame_num;
std::vector<std::string> image_files;
// Could optionally read the bounding box locations from files (each image could have multiple bounding boxes)
std::vector<std::vector<cv::Rect_<double> > > bounding_boxes;
void SetCameraIntrinsics(float fx, float fy, float cx, float cy);
bool image_focal_length_set;
bool image_optical_center_set;
bool no_input_specified;
};
}
#endif

View file

@ -0,0 +1,91 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (C) 2017, Tadas Baltrusaitis all rights reserved.
//
// ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
//
// BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT.
// IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
//
// License can be found in OpenFace-license.txt
//
// * Any publications arising from the use of this software, including but
// not limited to academic journal and conference publications, technical
// reports and manuals, must cite at least one of the following works:
//
// OpenFace: an open source facial behavior analysis toolkit
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency
// in IEEE Winter Conference on Applications of Computer Vision, 2016
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency.
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __IMAGE_MANIPULATION_HELPERS_h_
#define __IMAGE_MANIPULATION_HELPERS_h_
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc.hpp>
namespace Utilities
{
//===========================================================================
// Converting between color spaces and bit depths
//===========================================================================
static void ConvertToGrayscale_8bit(const cv::Mat& in, cv::Mat& out)
{
if (in.channels() == 3)
{
// Make sure it's in a correct format
if (in.depth() == CV_16U)
{
cv::Mat tmp = in / 256;
tmp.convertTo(out, CV_8U);
cv::cvtColor(out, out, CV_BGR2GRAY);
}
else
{
cv::cvtColor(in, out, CV_BGR2GRAY);
}
}
else if (in.channels() == 4)
{
if (in.depth() == CV_16U)
{
cv::Mat tmp = in / 256;
tmp.convertTo(out, CV_8U);
cv::cvtColor(out, out, CV_BGRA2GRAY);
}
else
{
cv::cvtColor(in, out, CV_BGRA2GRAY);
}
}
else
{
if (in.depth() == CV_16U)
{
cv::Mat tmp = in / 256;
tmp.convertTo(out, CV_8U);
}
else if (in.depth() == CV_8U)
{
out = in.clone();
}
}
}
}
#endif

View file

@ -0,0 +1,98 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (C) 2017, Tadas Baltrusaitis all rights reserved.
//
// ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
//
// BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT.
// IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
//
// License can be found in OpenFace-license.txt
//
// * Any publications arising from the use of this software, including but
// not limited to academic journal and conference publications, technical
// reports and manuals, must cite at least one of the following works:
//
// OpenFace: an open source facial behavior analysis toolkit
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency
// in IEEE Winter Conference on Applications of Computer Vision, 2016
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency.
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __RECORDER_CSV_h_
#define __RECORDER_CSV_h_
// System includes
#include <fstream>
#include <sstream>
#include <vector>
// OpenCV includes
#include <opencv2/core/core.hpp>
namespace Utilities
{
//===========================================================================
/**
A class for recording CSV file from OpenFace
*/
class RecorderCSV {
public:
// The constructor for the recorder, need to specify if we are recording a sequence or not
RecorderCSV();
// Opening the file and preparing the header for it
bool Open(std::string output_file_name, bool is_sequence, bool output_2D_landmarks, bool output_3D_landmarks, bool output_model_params, bool output_pose, bool output_AUs, bool output_gaze,
int num_face_landmarks, int num_model_modes, int num_eye_landmarks, const std::vector<std::string>& au_names_class, const std::vector<std::string>& au_names_reg);
// Closing the file and cleaning up
void Close();
void WriteLine(int observation_count, double time_stamp, bool landmark_detection_success, double landmark_confidence,
const cv::Mat_<double>& landmarks_2D, const cv::Mat_<double>& landmarks_3D, const cv::Mat_<double>& pdm_model_params, const cv::Vec6d& rigid_shape_params, cv::Vec6d& pose_estimate,
const cv::Point3f& gazeDirection0, const cv::Point3f& gazeDirection1, const cv::Vec2d& gaze_angle, const std::vector<cv::Point2d>& eye_landmarks2d, const std::vector<cv::Point3d>& eye_landmarks3d,
const std::vector<std::pair<std::string, double> >& au_intensities, const std::vector<std::pair<std::string, double> >& au_occurences);
private:
// Blocking copy and move, as it doesn't make sense to read to write to the same file
RecorderCSV & operator= (const RecorderCSV& other);
RecorderCSV & operator= (const RecorderCSV&& other);
RecorderCSV(const RecorderCSV&& other);
RecorderCSV(const RecorderCSV& other);
// The actual output file stream that will be written
std::ofstream output_file;
// If we are recording results from a sequence each row refers to a frame, if we are recording an image each row is a face
bool is_sequence;
// Keep track of what we are recording
bool output_2D_landmarks;
bool output_3D_landmarks;
bool output_model_params;
bool output_pose;
bool output_AUs;
bool output_gaze;
std::vector<std::string> au_names_class;
std::vector<std::string> au_names_reg;
};
}
#endif

View file

@ -0,0 +1,88 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (C) 2017, Tadas Baltrusaitis all rights reserved.
//
// ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
//
// BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT.
// IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
//
// License can be found in OpenFace-license.txt
//
// * Any publications arising from the use of this software, including but
// not limited to academic journal and conference publications, technical
// reports and manuals, must cite at least one of the following works:
//
// OpenFace: an open source facial behavior analysis toolkit
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency
// in IEEE Winter Conference on Applications of Computer Vision, 2016
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency.
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __RECORDER_HOG_h_
#define __RECORDER_HOG_h_
// System includes
#include <vector>
// OpenCV includes
#include <opencv2/core/core.hpp>
#include <iostream>
#include <fstream>
namespace Utilities
{
//===========================================================================
/**
A class for recording CSV file from OpenFace
*/
class RecorderHOG {
public:
// The constructor for the recorder, by default does not do anything
RecorderHOG();
// Adding observations to the recorder
void SetObservationHOG(bool success, const cv::Mat_<double>& hog_descriptor, int num_cols, int num_rows, int num_channels);
void Write();
bool Open(std::string filename);
void Close();
private:
// Blocking copy and move, as it doesn't make sense to read to write to the same file
RecorderHOG & operator= (const RecorderHOG& other);
RecorderHOG & operator= (const RecorderHOG&& other);
RecorderHOG(const RecorderHOG&& other);
RecorderHOG(const RecorderHOG& other);
std::ofstream hog_file;
// Internals for recording
int num_cols;
int num_rows;
int num_channels;
cv::Mat_<double> hog_descriptor;
bool good_frame;
};
}
#endif

View file

@ -0,0 +1,160 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (C) 2017, Tadas Baltrusaitis all rights reserved.
//
// ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
//
// BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT.
// IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
//
// License can be found in OpenFace-license.txt
//
// * Any publications arising from the use of this software, including but
// not limited to academic journal and conference publications, technical
// reports and manuals, must cite at least one of the following works:
//
// OpenFace: an open source facial behavior analysis toolkit
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency
// in IEEE Winter Conference on Applications of Computer Vision, 2016
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency.
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __RECORDER_OPENFACE_h_
#define __RECORDER_OPENFACE_h_
#include "RecorderCSV.h"
#include "RecorderHOG.h"
#include "RecorderOpenFaceParameters.h"
// System includes
#include <vector>
// OpenCV includes
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
namespace Utilities
{
//===========================================================================
/**
A class for recording data processed by OpenFace (facial landmarks, head pose, facial action units, aligned face, HOG features, and tracked video
*/
class RecorderOpenFace {
public:
// The constructor for the recorder, need to specify if we are recording a sequence or not, in_filename should be just the name and not contain extensions
RecorderOpenFace(const std::string in_filename, RecorderOpenFaceParameters parameters, std::vector<std::string>& arguments);
~RecorderOpenFace();
// Closing and cleaning up the recorder
void Close();
// Adding observations to the recorder
// Required observations for video/image-sequence
void SetObservationTimestamp(double timestamp);
// All observations relevant to facial landmarks
void SetObservationLandmarks(const cv::Mat_<double>& landmarks_2D, const cv::Mat_<double>& landmarks_3D,
const cv::Vec6d& params_global, const cv::Mat_<double>& params_local, double confidence, bool success);
// Pose related observations
void SetObservationPose(const cv::Vec6d& pose);
// AU related observations
void SetObservationActionUnits(const std::vector<std::pair<std::string, double> >& au_intensities,
const std::vector<std::pair<std::string, double> >& au_occurences);
// Gaze related observations
void SetObservationGaze(const cv::Point3f& gazeDirection0, const cv::Point3f& gazeDirection1,
const cv::Vec2d& gaze_angle, const std::vector<cv::Point2d>& eye_landmarks2D, const std::vector<cv::Point3d>& eye_landmarks3D);
// Face alignment related observations
void SetObservationFaceAlign(const cv::Mat& aligned_face);
// HOG feature related observations
void SetObservationHOG(bool good_frame, const cv::Mat_<double>& hog_descriptor, int num_cols, int num_rows, int num_channels);
void SetObservationVisualization(const cv::Mat &vis_track);
void WriteObservation();
std::string GetCSVFile() { return csv_filename; }
private:
// Blocking copy, assignment and move operators, as it does not make sense to save to the same location
RecorderOpenFace & operator= (const RecorderOpenFace& other);
RecorderOpenFace & operator= (const RecorderOpenFace&& other);
RecorderOpenFace(const RecorderOpenFace&& other);
RecorderOpenFace(const RecorderOpenFace& other);
// Keeping track of what to output and how to output it
const RecorderOpenFaceParameters params;
// Keep track of the file and output root location
std::string record_root;
std::string default_record_directory = "processed"; // By default we are writing in the processed directory in the working directory, if no output parameters provided
std::string of_filename;
std::string filename;
std::string csv_filename;
std::string aligned_output_directory;
std::ofstream metadata_file;
// The actual output file stream that will be written
RecorderCSV csv_recorder;
RecorderHOG hog_recorder;
// The actual temporary storage for the observations
double timestamp;
// Facial landmark related observations
cv::Mat_<double> landmarks_2D;
cv::Mat_<double> landmarks_3D;
cv::Vec6d pdm_params_global;
cv::Mat_<double> pdm_params_local;
double landmark_detection_confidence;
bool landmark_detection_success;
// Head pose related observations
cv::Vec6d head_pose;
// Action Unit related observations
std::vector<std::pair<std::string, double> > au_intensities;
std::vector<std::pair<std::string, double> > au_occurences;
// Gaze related observations
cv::Point3f gaze_direction0;
cv::Point3f gaze_direction1;
cv::Vec2d gaze_angle;
std::vector<cv::Point2d> eye_landmarks2D;
std::vector<cv::Point3d> eye_landmarks3D;
int observation_count;
// For video writing
cv::VideoWriter video_writer;
std::string media_filename;
cv::Mat vis_to_out;
// For aligned face writing
cv::Mat aligned_face;
};
}
#endif

View file

@ -0,0 +1,106 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (C) 2017, Tadas Baltrusaitis, all rights reserved.
//
// ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
//
// BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT.
// IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
//
// License can be found in OpenFace-license.txt
//
// * Any publications arising from the use of this software, including but
// not limited to academic journal and conference publications, technical
// reports and manuals, must cite at least one of the following works:
//
// OpenFace: an open source facial behavior analysis toolkit
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency
// in IEEE Winter Conference on Applications of Computer Vision, 2016
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency.
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
//
///////////////////////////////////////////////////////////////////////////////
// Parameters of the Face analyser
#ifndef __RECORDER_OPENFACE_PARAM_H
#define __RECORDER_OPENFACE_PARAM_H
#include <vector>
#include <opencv2/core/core.hpp>
// Boost includes
#include <filesystem.hpp>
#include <filesystem/fstream.hpp>
using namespace std;
namespace Utilities
{
class RecorderOpenFaceParameters
{
public:
// Constructors
RecorderOpenFaceParameters(std::vector<std::string> &arguments, bool sequence, bool is_from_webcam, float fx = -1, float fy = -1, float cx = -1, float cy = -1, double fps_vid_out = 30);
bool isSequence() const { return is_sequence; }
bool isFromWebcam() const { return is_from_webcam; }
bool output2DLandmarks() const { return output_2D_landmarks; }
bool output3DLandmarks() const { return output_3D_landmarks; }
bool outputPDMParams() const { return output_model_params; }
bool outputPose() const { return output_pose; }
bool outputAUs() const { return output_AUs; }
bool outputGaze() const { return output_gaze; }
bool outputHOG() const { return output_hog; }
bool outputTracked() const { return output_tracked; }
bool outputAlignedFaces() const { return output_aligned_faces; }
std::string outputCodec() const { return output_codec; }
double outputFps() const { return fps_vid_out; }
float getFx() const { return fx; }
float getFy() const { return fy; }
float getCx() const { return cx; }
float getCy() const { return cy; }
private:
// If we are recording results from a sequence each row refers to a frame, if we are recording an image each row is a face
bool is_sequence;
// If the data is coming from a webcam
bool is_from_webcam;
// Keep track of what we are recording
bool output_2D_landmarks;
bool output_3D_landmarks;
bool output_model_params;
bool output_pose;
bool output_AUs;
bool output_gaze;
bool output_hog;
bool output_tracked;
bool output_aligned_faces;
// Some video recording parameters
std::string output_codec;
double fps_vid_out;
// Camera parameters for recording in the meta file;
float fx, fy, cx, cy;
};
}
#endif // ____RECORDER_OPENFACE_PARAM_H

View file

@ -0,0 +1,160 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (C) 2017, Tadas Baltrusaitis all rights reserved.
//
// ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
//
// BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT.
// IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
//
// License can be found in OpenFace-license.txt
//
// * Any publications arising from the use of this software, including but
// not limited to academic journal and conference publications, technical
// reports and manuals, must cite at least one of the following works:
//
// OpenFace: an open source facial behavior analysis toolkit
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency
// in IEEE Winter Conference on Applications of Computer Vision, 2016
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency.
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __ROTATION_HELPERS_h_
#define __ROTATION_HELPERS_h_
#include <opencv2/core/core.hpp>
#include <opencv2/calib3d.hpp>
namespace Utilities
{
//===========================================================================
// Angle representation conversion helpers
//===========================================================================
// Using the XYZ convention R = Rx * Ry * Rz, left-handed positive sign
static cv::Matx33d Euler2RotationMatrix(const cv::Vec3d& eulerAngles)
{
cv::Matx33d rotation_matrix;
double s1 = sin(eulerAngles[0]);
double s2 = sin(eulerAngles[1]);
double s3 = sin(eulerAngles[2]);
double c1 = cos(eulerAngles[0]);
double c2 = cos(eulerAngles[1]);
double c3 = cos(eulerAngles[2]);
rotation_matrix(0, 0) = c2 * c3;
rotation_matrix(0, 1) = -c2 *s3;
rotation_matrix(0, 2) = s2;
rotation_matrix(1, 0) = c1 * s3 + c3 * s1 * s2;
rotation_matrix(1, 1) = c1 * c3 - s1 * s2 * s3;
rotation_matrix(1, 2) = -c2 * s1;
rotation_matrix(2, 0) = s1 * s3 - c1 * c3 * s2;
rotation_matrix(2, 1) = c3 * s1 + c1 * s2 * s3;
rotation_matrix(2, 2) = c1 * c2;
return rotation_matrix;
}
// Using the XYZ convention R = Rx * Ry * Rz, left-handed positive sign
static cv::Vec3d RotationMatrix2Euler(const cv::Matx33d& rotation_matrix)
{
double q0 = sqrt(1 + rotation_matrix(0, 0) + rotation_matrix(1, 1) + rotation_matrix(2, 2)) / 2.0;
double q1 = (rotation_matrix(2, 1) - rotation_matrix(1, 2)) / (4.0*q0);
double q2 = (rotation_matrix(0, 2) - rotation_matrix(2, 0)) / (4.0*q0);
double q3 = (rotation_matrix(1, 0) - rotation_matrix(0, 1)) / (4.0*q0);
double t1 = 2.0 * (q0*q2 + q1*q3);
double yaw = asin(2.0 * (q0*q2 + q1*q3));
double pitch = atan2(2.0 * (q0*q1 - q2*q3), q0*q0 - q1*q1 - q2*q2 + q3*q3);
double roll = atan2(2.0 * (q0*q3 - q1*q2), q0*q0 + q1*q1 - q2*q2 - q3*q3);
return cv::Vec3d(pitch, yaw, roll);
}
static cv::Vec3d Euler2AxisAngle(const cv::Vec3d& euler)
{
cv::Matx33d rotMatrix = Euler2RotationMatrix(euler);
cv::Vec3d axis_angle;
cv::Rodrigues(rotMatrix, axis_angle);
return axis_angle;
}
static cv::Vec3d AxisAngle2Euler(const cv::Vec3d& axis_angle)
{
cv::Matx33d rotation_matrix;
cv::Rodrigues(axis_angle, rotation_matrix);
return RotationMatrix2Euler(rotation_matrix);
}
static cv::Matx33d AxisAngle2RotationMatrix(const cv::Vec3d& axis_angle)
{
cv::Matx33d rotation_matrix;
cv::Rodrigues(axis_angle, rotation_matrix);
return rotation_matrix;
}
static cv::Vec3d RotationMatrix2AxisAngle(const cv::Matx33d& rotation_matrix)
{
cv::Vec3d axis_angle;
cv::Rodrigues(rotation_matrix, axis_angle);
return axis_angle;
}
// Generally useful 3D functions
static void Project(cv::Mat_<double>& dest, const cv::Mat_<double>& mesh, double fx, double fy, double cx, double cy)
{
dest = cv::Mat_<double>(mesh.rows, 2, 0.0);
int num_points = mesh.rows;
double X, Y, Z;
cv::Mat_<double>::const_iterator mData = mesh.begin();
cv::Mat_<double>::iterator projected = dest.begin();
for (int i = 0; i < num_points; i++)
{
// Get the points
X = *(mData++);
Y = *(mData++);
Z = *(mData++);
double x;
double y;
// if depth is 0 the projection is different
if (Z != 0)
{
x = ((X * fx / Z) + cx);
y = ((Y * fy / Z) + cy);
}
else
{
x = X;
y = Y;
}
// Project and store in dest matrix
(*projected++) = x;
(*projected++) = y;
}
}
}
#endif

View file

@ -0,0 +1,140 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (C) 2017, Tadas Baltrusaitis all rights reserved.
//
// ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
//
// BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT.
// IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
//
// License can be found in OpenFace-license.txt
//
// * Any publications arising from the use of this software, including but
// not limited to academic journal and conference publications, technical
// reports and manuals, must cite at least one of the following works:
//
// OpenFace: an open source facial behavior analysis toolkit
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency
// in IEEE Winter Conference on Applications of Computer Vision, 2016
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency.
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __SEQUENCE_CAPTURE_h_
#define __SEQUENCE_CAPTURE_h_
// System includes
#include <fstream>
#include <sstream>
#include <vector>
// OpenCV includes
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
namespace Utilities
{
//===========================================================================
/**
A class for capturing sequences from video, webcam, and image directories
*/
class SequenceCapture {
public:
// Default constructor
SequenceCapture() {};
// Destructor
~SequenceCapture();
// Opening based on command line arguments
bool Open(std::vector<std::string>& arguments);
// Direct opening
// Webcam
bool OpenWebcam(int device_id, int image_width = 640, int image_height = 480, float fx = -1, float fy = -1, float cx = -1, float cy = -1);
// Image sequence in the directory
bool OpenImageSequence(std::string directory, float fx = -1, float fy = -1, float cx = -1, float cy = -1);
// Video file
bool OpenVideoFile(std::string video_file, float fx = -1, float fy = -1, float cx = -1, float cy = -1);
bool IsWebcam() { return is_webcam; }
// Getting the next frame
cv::Mat GetNextFrame();
// Getting the most recent grayscale frame (need to call GetNextFrame first)
cv::Mat_<uchar> GetGrayFrame();
// Parameters describing the sequence and it's progress
double GetProgress();
bool IsOpened();
int frame_width;
int frame_height;
float fx, fy, cx, cy;
double fps;
double time_stamp;
// Name of the video file, image directory, or the webcam
std::string name;
// Allows to differentiate if failed because no input specified or if failed to open a specified input
bool no_input_specified;
private:
// Blocking copy and move, as it doesn't make sense to have several readers pointed at the same source, and this would cause issues, especially with webcams
SequenceCapture & operator= (const SequenceCapture& other);
SequenceCapture & operator= (const SequenceCapture&& other);
SequenceCapture(const SequenceCapture&& other);
SequenceCapture(const SequenceCapture& other);
// Used for capturing webcam and video
cv::VideoCapture capture;
// Storing the latest captures
cv::Mat latest_frame;
cv::Mat_<uchar> latest_gray_frame;
// Keeping track of frame number and the files in the image sequence
size_t frame_num;
std::vector<std::string> image_files;
// Length of video allowing to assess progress
size_t vid_length;
// If using a webcam, helps to keep track of time
int64 start_time;
// Keeping track if we are opening a video, webcam or image sequence
bool is_webcam;
bool is_image_seq;
void SetCameraIntrinsics(float fx, float fy, float cx, float cy);
};
}
#endif

View file

@ -0,0 +1,76 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (C) 2017, Tadas Baltrusaitis all rights reserved.
//
// ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
//
// BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT.
// IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
//
// License can be found in OpenFace-license.txt
//
// * Any publications arising from the use of this software, including but
// not limited to academic journal and conference publications, technical
// reports and manuals, must cite at least one of the following works:
//
// OpenFace: an open source facial behavior analysis toolkit
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency
// in IEEE Winter Conference on Applications of Computer Vision, 2016
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency.
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __VISUALIZATION_UTILS_h_
#define __VISUALIZATION_UTILS_h_
#include <opencv2/core/core.hpp>
#include <vector>
#include <queue>
namespace Utilities
{
// TODO draw AU results
// Drawing a bounding box around the face in an image
void DrawBox(cv::Mat image, cv::Vec6d pose, cv::Scalar color, int thickness, float fx, float fy, float cx, float cy);
void DrawBox(const std::vector<std::pair<cv::Point2d, cv::Point2d>>& lines, cv::Mat image, cv::Scalar color, int thickness);
// Computing a bounding box to be drawn
std::vector<std::pair<cv::Point2d, cv::Point2d>> CalculateBox(cv::Vec6d pose, float fx, float fy, float cx, float cy);
void Visualise_FHOG(const cv::Mat_<double>& descriptor, int num_rows, int num_cols, cv::Mat& visualisation);
class FpsTracker
{
public:
double history_length;
void AddFrame();
double GetFPS();
FpsTracker();
private:
std::queue<double> frame_times;
void DiscardOldFrames();
};
}
#endif

View file

@ -0,0 +1,107 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (C) 2017, Tadas Baltrusaitis all rights reserved.
//
// ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
//
// BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT.
// IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
//
// License can be found in OpenFace-license.txt
//
// * Any publications arising from the use of this software, including but
// not limited to academic journal and conference publications, technical
// reports and manuals, must cite at least one of the following works:
//
// OpenFace: an open source facial behavior analysis toolkit
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency
// in IEEE Winter Conference on Applications of Computer Vision, 2016
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency.
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef __VISUALIZER_h_
#define __VISUALIZER_h_
// System includes
#include <vector>
// OpenCV includes
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
namespace Utilities
{
//===========================================================================
/**
A class for recording data processed by OpenFace (facial landmarks, head pose, facial action units, aligned face, HOG features, and tracked video
*/
class Visualizer {
public:
// The constructor for the visualizer that specifies what to visualize
Visualizer(std::vector<std::string> arguments);
Visualizer(bool vis_track, bool vis_hog, bool vis_align);
// Adding observations to the visualizer
// Pose related observations
void SetImage(const cv::Mat& canvas, float fx, float fy, float cx, float cy);
// All observations relevant to facial landmarks (optional visibilities parameter to not display all landmarks)
void SetObservationLandmarks(const cv::Mat_<double>& landmarks_2D, double confidence, bool success, const cv::Mat_<int>& visibilities = cv::Mat_<int>());
// Pose related observations
void SetObservationPose(const cv::Vec6d& pose, double confidence);
// Gaze related observations
void SetObservationGaze(const cv::Point3f& gazeDirection0, const cv::Point3f& gazeDirection1, const std::vector<cv::Point2d>& eye_landmarks, const std::vector<cv::Point3d>& eye_landmarks3d, double confidence);
// Face alignment related observations
void SetObservationFaceAlign(const cv::Mat& aligned_face);
// HOG feature related observations
void SetObservationHOG(const cv::Mat_<double>& hog_descriptor, int num_cols, int num_rows);
void SetFps(double fps);
// Return key-press that could have resulted in the open windows
char ShowObservation();
cv::Mat GetVisImage();
// Keeping track of what we're visualizing
bool vis_track;
bool vis_hog;
bool vis_align;
// Can be adjusted to show less confident frames
double visualisation_boundary = 0.4;
private:
// Temporary variables for visualization
cv::Mat captured_image; // out canvas
cv::Mat tracked_image;
cv::Mat hog_image;
cv::Mat aligned_face_image;
// Useful for drawing 3d
float fx, fy, cx, cy;
};
}
#endif

View file

@ -0,0 +1,421 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (C) 2017, Tadas Baltrusaitis, all rights reserved.
//
// ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
//
// BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT.
// IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
//
// License can be found in OpenFace-license.txt
//
// * Any publications arising from the use of this software, including but
// not limited to academic journal and conference publications, technical
// reports and manuals, must cite at least one of the following works:
//
// OpenFace: an open source facial behavior analysis toolkit
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency
// in IEEE Winter Conference on Applications of Computer Vision, 2016
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency.
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
//
///////////////////////////////////////////////////////////////////////////////
#include "ImageCapture.h"
#include "ImageManipulationHelpers.h"
#include <iostream>
// Boost includes
#include <filesystem.hpp>
#include <filesystem/fstream.hpp>
#include <boost/algorithm/string.hpp>
// OpenCV includes
#include <opencv2/imgproc.hpp>
using namespace Utilities;
#define INFO_STREAM( stream ) \
std::cout << stream << std::endl
#define WARN_STREAM( stream ) \
std::cout << "Warning: " << stream << std::endl
#define ERROR_STREAM( stream ) \
std::cout << "Error: " << stream << std::endl
bool ImageCapture::Open(std::vector<std::string>& arguments)
{
// Consuming the input arguments
bool* valid = new bool[arguments.size()];
for (size_t i = 0; i < arguments.size(); ++i)
{
valid[i] = true;
}
// Some default values
std::string input_root = "";
fx = -1; fy = -1; cx = -1; cy = -1;
frame_num = 0;
std::string separator = std::string(1, boost::filesystem::path::preferred_separator);
// First check if there is a root argument (so that videos and input directories could be defined more easily)
for (size_t i = 0; i < arguments.size(); ++i)
{
if (arguments[i].compare("-root") == 0)
{
input_root = arguments[i + 1] + separator;
i++;
}
if (arguments[i].compare("-inroot") == 0)
{
input_root = arguments[i + 1] + separator;
i++;
}
}
std::string input_directory;
std::string bbox_directory;
bool directory_found = false;
has_bounding_boxes = false;
std::vector<std::string> input_image_files;
for (size_t i = 0; i < arguments.size(); ++i)
{
if (arguments[i].compare("-f") == 0)
{
input_image_files.push_back(input_root + arguments[i + 1]);
valid[i] = false;
valid[i + 1] = false;
i++;
}
else if (arguments[i].compare("-fdir") == 0)
{
if (directory_found)
{
WARN_STREAM("Input directory already found, using the first one:" + input_directory);
}
else
{
input_directory = (input_root + arguments[i + 1]);
valid[i] = false;
valid[i + 1] = false;
i++;
directory_found = true;
}
}
else if (arguments[i].compare("-bboxdir") == 0)
{
bbox_directory = (input_root + arguments[i + 1]);
valid[i] = false;
valid[i + 1] = false;
has_bounding_boxes = true;
i++;
}
else if (arguments[i].compare("-fx") == 0)
{
std::stringstream data(arguments[i + 1]);
data >> fx;
i++;
}
else if (arguments[i].compare("-fy") == 0)
{
std::stringstream data(arguments[i + 1]);
data >> fy;
i++;
}
else if (arguments[i].compare("-cx") == 0)
{
std::stringstream data(arguments[i + 1]);
data >> cx;
i++;
}
else if (arguments[i].compare("-cy") == 0)
{
std::stringstream data(arguments[i + 1]);
data >> cy;
i++;
}
}
for (int i = (int)arguments.size() - 1; i >= 0; --i)
{
if (!valid[i])
{
arguments.erase(arguments.begin() + i);
}
}
// Based on what was read in open the sequence
if (!input_image_files.empty())
{
return OpenImageFiles(input_image_files, fx, fy, cx, cy);
}
if (!input_directory.empty())
{
return OpenDirectory(input_directory, bbox_directory, fx, fy, cx, cy);
}
// If no input found return false and set a flag for it
no_input_specified = true;
return false;
}
bool ImageCapture::OpenImageFiles(const std::vector<std::string>& image_files, float fx, float fy, float cx, float cy)
{
no_input_specified = false;
latest_frame = cv::Mat();
latest_gray_frame = cv::Mat();
this->image_files = image_files;
// Allow for setting the camera intrinsics, but have to be the same ones for every image
if (fx != -1 && fy != -1 )
{
image_focal_length_set = true;
this->fx = fx;
this->fy = fy;
}
else
{
image_focal_length_set = false;
}
if (cx != -1 && cy != -1)
{
this->cx = cx;
this->cy = cy;
image_optical_center_set = true;
}
else
{
image_optical_center_set = false;
}
return true;
}
bool ImageCapture::OpenDirectory(std::string directory, std::string bbox_directory, float fx, float fy, float cx, float cy)
{
INFO_STREAM("Attempting to read from directory: " << directory);
no_input_specified = false;
image_files.clear();
boost::filesystem::path image_directory(directory);
std::vector<boost::filesystem::path> file_in_directory;
copy(boost::filesystem::directory_iterator(image_directory), boost::filesystem::directory_iterator(), back_inserter(file_in_directory));
// Sort the images in the directory first
sort(file_in_directory.begin(), file_in_directory.end());
std::vector<std::string> curr_dir_files;
for (std::vector<boost::filesystem::path>::const_iterator file_iterator(file_in_directory.begin()); file_iterator != file_in_directory.end(); ++file_iterator)
{
// Possible image extension .jpg and .png
if (file_iterator->extension().string().compare(".jpg") == 0 || file_iterator->extension().string().compare(".jpeg") == 0 || file_iterator->extension().string().compare(".png") == 0 || file_iterator->extension().string().compare(".bmp") == 0)
{
curr_dir_files.push_back(file_iterator->string());
// If bounding box directory is specified, read the bounding boxes from it
if (!bbox_directory.empty())
{
boost::filesystem::path current_file = *file_iterator;
boost::filesystem::path bbox_file = current_file.replace_extension("txt");
// If there is a bounding box file push it to the list of bounding boxes
if (boost::filesystem::exists(bbox_file))
{
std::ifstream in_bbox(bbox_file.string().c_str(), std::ios_base::in);
std::vector<cv::Rect_<double> > bboxes_image;
// Keep reading bounding boxes from a file, stop if empty line or
while (!in_bbox.eof())
{
std::string bbox_string;
std::getline(in_bbox, bbox_string);
if (bbox_string.empty())
continue;
std::stringstream ss(bbox_string);
double min_x, min_y, max_x, max_y;
ss >> min_x >> min_y >> max_x >> max_y;
bboxes_image.push_back(cv::Rect_<double>(min_x, min_y, max_x - min_x, max_y - min_y));
}
in_bbox.close();
bounding_boxes.push_back(bboxes_image);
}
else
{
ERROR_STREAM("Could not find the corresponding bounding box for file:" + file_iterator->string());
exit(1);
}
}
}
}
image_files = curr_dir_files;
if (image_files.empty())
{
std::cout << "No images found in the directory: " << directory << std::endl;
return false;
}
// Allow for setting the camera intrinsics, but have to be the same ones for every image
if (fx != -1 && fy != -1)
{
image_focal_length_set = true;
this->fx = fx;
this->fy = fy;
}
else
{
image_focal_length_set = false;
}
if (cx != -1 && cy != -1)
{
this->cx = cx;
this->cy = cy;
image_optical_center_set = true;
}
else
{
image_optical_center_set = false;
}
return true;
}
void ImageCapture::SetCameraIntrinsics(float fx, float fy, float cx, float cy)
{
// If optical centers are not defined just use center of image
if (cx == -1)
{
this->cx = this->image_width / 2.0f;
this->cy = this->image_height / 2.0f;
}
else
{
this->cx = cx;
this->cy = cy;
}
// Use a rough guess-timate of focal length
if (fx == -1)
{
this->fx = 500.0f * (this->image_width / 640.0f);
this->fy = 500.0f * (this->image_height / 480.0f);
this->fx = (this->fx + this->fy) / 2.0f;
this->fy = this->fx;
}
else
{
this->fx = fx;
this->fy = fy;
}
}
// Returns a read image in 3 channel RGB format, also prepares a grayscale frame if needed
cv::Mat ImageCapture::GetNextImage()
{
if (image_files.empty() || frame_num >= image_files.size())
{
// Indicate lack of success by returning an empty image
latest_frame = cv::Mat();
return latest_frame;
}
// Load the image as an 8 bit RGB
latest_frame = cv::imread(image_files[frame_num], CV_LOAD_IMAGE_COLOR);
if (latest_frame.empty())
{
ERROR_STREAM("Could not open the image: " + image_files[frame_num]);
exit(1);
}
image_height = latest_frame.size().height;
image_width = latest_frame.size().width;
// Reset the intrinsics for every image if they are not set globally
float _fx = -1;
float _fy = -1;
if (image_focal_length_set)
{
_fx = fx;
_fy = fy;
}
float _cx = -1;
float _cy = -1;
if (image_optical_center_set)
{
_cx = cx;
_cy = cy;
}
SetCameraIntrinsics(_fx, _fy, _cx, _cy);
// Set the grayscale frame
ConvertToGrayscale_8bit(latest_frame, latest_gray_frame);
this->name = image_files[frame_num];
frame_num++;
return latest_frame;
}
std::vector<cv::Rect_<double> > ImageCapture::GetBoundingBoxes()
{
if (!bounding_boxes.empty())
{
return bounding_boxes[frame_num - 1];
}
else
{
return std::vector<cv::Rect_<double> >();
}
}
double ImageCapture::GetProgress()
{
return (double)frame_num / (double)image_files.size();
}
cv::Mat_<uchar> ImageCapture::GetGrayFrame()
{
return latest_gray_frame;
}

View file

@ -0,0 +1,347 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (C) 2017, Tadas Baltrusaitis, all rights reserved.
//
// ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
//
// BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT.
// IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
//
// License can be found in OpenFace-license.txt
//
// * Any publications arising from the use of this software, including but
// not limited to academic journal and conference publications, technical
// reports and manuals, must cite at least one of the following works:
//
// OpenFace: an open source facial behavior analysis toolkit
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency
// in IEEE Winter Conference on Applications of Computer Vision, 2016
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency.
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
//
///////////////////////////////////////////////////////////////////////////////
#include "RecorderCSV.h"
// For sorting
#include <algorithm>
// For standard out
#include <iostream>
#include <iomanip>
#include <locale>
using namespace Utilities;
// Default constructor initializes the variables
RecorderCSV::RecorderCSV():output_file(){};
// Making sure full stop is used for decimal point separation
struct fullstop : std::numpunct<char> {
char do_decimal_point() const { return '.'; }
};
// Opening the file and preparing the header for it
bool RecorderCSV::Open(std::string output_file_name, bool is_sequence, bool output_2D_landmarks, bool output_3D_landmarks, bool output_model_params, bool output_pose, bool output_AUs, bool output_gaze,
int num_face_landmarks, int num_model_modes, int num_eye_landmarks, const std::vector<std::string>& au_names_class, const std::vector<std::string>& au_names_reg)
{
output_file.open(output_file_name, std::ios_base::out);
output_file.imbue(std::locale(output_file.getloc(), new fullstop));
if (!output_file.is_open())
return false;
this->is_sequence = is_sequence;
// Set up what we are recording
this->output_2D_landmarks = output_2D_landmarks;
this->output_3D_landmarks = output_3D_landmarks;
this->output_AUs = output_AUs;
this->output_gaze = output_gaze;
this->output_model_params = output_model_params;
this->output_pose = output_pose;
this->au_names_class = au_names_class;
this->au_names_reg = au_names_reg;
// Different headers if we are writing out the results on a sequence or an individual image
if(this->is_sequence)
{
output_file << "frame, timestamp, confidence, success";
}
else
{
output_file << "face, confidence";
}
if (output_gaze)
{
output_file << ", gaze_0_x, gaze_0_y, gaze_0_z, gaze_1_x, gaze_1_y, gaze_1_z, gaze_angle_x, gaze_angle_y";
for (int i = 0; i < num_eye_landmarks; ++i)
{
output_file << ", eye_lmk_x_" << i;
}
for (int i = 0; i < num_eye_landmarks; ++i)
{
output_file << ", eye_lmk_y_" << i;
}
for (int i = 0; i < num_eye_landmarks; ++i)
{
output_file << ", eye_lmk_X_" << i;
}
for (int i = 0; i < num_eye_landmarks; ++i)
{
output_file << ", eye_lmk_Y_" << i;
}
for (int i = 0; i < num_eye_landmarks; ++i)
{
output_file << ", eye_lmk_Z_" << i;
}
}
if (output_pose)
{
output_file << ", pose_Tx, pose_Ty, pose_Tz, pose_Rx, pose_Ry, pose_Rz";
}
if (output_2D_landmarks)
{
for (int i = 0; i < num_face_landmarks; ++i)
{
output_file << ", x_" << i;
}
for (int i = 0; i < num_face_landmarks; ++i)
{
output_file << ", y_" << i;
}
}
if (output_3D_landmarks)
{
for (int i = 0; i < num_face_landmarks; ++i)
{
output_file << ", X_" << i;
}
for (int i = 0; i < num_face_landmarks; ++i)
{
output_file << ", Y_" << i;
}
for (int i = 0; i < num_face_landmarks; ++i)
{
output_file << ", Z_" << i;
}
}
// Outputting model parameters (rigid and non-rigid), the first parameters are the 6 rigid shape parameters, they are followed by the non rigid shape parameters
if (output_model_params)
{
output_file << ", p_scale, p_rx, p_ry, p_rz, p_tx, p_ty";
for (int i = 0; i < num_model_modes; ++i)
{
output_file << ", p_" << i;
}
}
if (output_AUs)
{
std::sort(this->au_names_reg.begin(), this->au_names_reg.end());
for (std::string reg_name : this->au_names_reg)
{
output_file << ", " << reg_name << "_r";
}
std::sort(this->au_names_class.begin(), this->au_names_class.end());
for (std::string class_name : this->au_names_class)
{
output_file << ", " << class_name << "_c";
}
}
output_file << std::endl;
return true;
}
void RecorderCSV::WriteLine(int observation_count, double time_stamp, bool landmark_detection_success, double landmark_confidence,
const cv::Mat_<double>& landmarks_2D, const cv::Mat_<double>& landmarks_3D, const cv::Mat_<double>& pdm_model_params, const cv::Vec6d& rigid_shape_params, cv::Vec6d& pose_estimate,
const cv::Point3f& gazeDirection0, const cv::Point3f& gazeDirection1, const cv::Vec2d& gaze_angle, const std::vector<cv::Point2d>& eye_landmarks2d, const std::vector<cv::Point3d>& eye_landmarks3d,
const std::vector<std::pair<std::string, double> >& au_intensities, const std::vector<std::pair<std::string, double> >& au_occurences)
{
if (!output_file.is_open())
{
std::cout << "The output CSV file is not open, exiting" << std::endl;
exit(1);
}
// Making sure fixed and not scientific notation is used
output_file << std::fixed;
output_file << std::noshowpoint;
if(is_sequence)
{
output_file << std::setprecision(3);
output_file << observation_count << ", " << time_stamp;
output_file << std::setprecision(2);
output_file << ", " << landmark_confidence;
output_file << std::setprecision(0);
output_file << ", " << landmark_detection_success;
}
else
{
output_file << std::setprecision(3);
output_file << observation_count << ", " << landmark_confidence;
}
// Output the estimated gaze
if (output_gaze)
{
output_file << std::setprecision(6);
output_file << ", " << gazeDirection0.x << ", " << gazeDirection0.y << ", " << gazeDirection0.z
<< ", " << gazeDirection1.x << ", " << gazeDirection1.y << ", " << gazeDirection1.z;
// Output gaze angle (same format as head pose angle)
output_file << std::setprecision(3);
output_file << ", " << gaze_angle[0] << ", " << gaze_angle[1];
// Output the 2D eye landmarks
output_file << std::setprecision(1);
for (auto eye_lmk : eye_landmarks2d)
{
output_file << ", " << eye_lmk.x;
}
for (auto eye_lmk : eye_landmarks2d)
{
output_file << ", " << eye_lmk.y;
}
// Output the 3D eye landmarks
for (auto eye_lmk : eye_landmarks3d)
{
output_file << ", " << eye_lmk.x;
}
for (auto eye_lmk : eye_landmarks3d)
{
output_file << ", " << eye_lmk.y;
}
for (auto eye_lmk : eye_landmarks3d)
{
output_file << ", " << eye_lmk.z;
}
}
// Output the estimated head pose
if (output_pose)
{
output_file << std::setprecision(1);
output_file << ", " << pose_estimate[0] << ", " << pose_estimate[1] << ", " << pose_estimate[2];
output_file << std::setprecision(3);
output_file << ", " << pose_estimate[3] << ", " << pose_estimate[4] << ", " << pose_estimate[5];
}
// Output the detected 2D facial landmarks
if (output_2D_landmarks)
{
output_file.precision(1);
// Output the 2D eye landmarks
for (auto lmk : landmarks_2D)
{
output_file << ", " << lmk;
}
}
// Output the detected 3D facial landmarks
if (output_3D_landmarks)
{
output_file.precision(1);
// Output the 2D eye landmarks
for (auto lmk : landmarks_3D)
{
output_file << ", " << lmk;
}
}
if (output_model_params)
{
output_file.precision(3);
for (int i = 0; i < 6; ++i)
{
output_file << ", " << rigid_shape_params[i];
}
// Output the non_rigid shape parameters
for (auto lmk : pdm_model_params)
{
output_file << ", " << lmk;
}
}
if (output_AUs)
{
// write out ar the correct index
output_file.precision(2);
for (std::string au_name : au_names_reg)
{
for (auto au_reg : au_intensities)
{
if (au_name.compare(au_reg.first) == 0)
{
output_file << ", " << au_reg.second;
break;
}
}
}
if (au_intensities.size() == 0)
{
for (size_t p = 0; p < au_names_reg.size(); ++p)
{
output_file << ", 0";
}
}
output_file.precision(1);
// write out ar the correct index
for (std::string au_name : au_names_class)
{
for (auto au_class : au_occurences)
{
if (au_name.compare(au_class.first) == 0)
{
output_file << ", " << au_class.second;
break;
}
}
}
if (au_occurences.size() == 0)
{
for (size_t p = 0; p < au_names_class.size(); ++p)
{
output_file << ", 0";
}
}
}
output_file << std::endl;
}
// Closing the file and cleaning up
void RecorderCSV::Close()
{
output_file.close();
}

View file

@ -0,0 +1,95 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (C) 2017, Tadas Baltrusaitis, all rights reserved.
//
// ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
//
// BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT.
// IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
//
// License can be found in OpenFace-license.txt
//
// * Any publications arising from the use of this software, including but
// not limited to academic journal and conference publications, technical
// reports and manuals, must cite at least one of the following works:
//
// OpenFace: an open source facial behavior analysis toolkit
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency
// in IEEE Winter Conference on Applications of Computer Vision, 2016
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency.
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
//
///////////////////////////////////////////////////////////////////////////////
#include "RecorderHOG.h"
#include <fstream>
using namespace Utilities;
// Default constructor initializes the variables
RecorderHOG::RecorderHOG() :hog_file() {};
// Opening the file and preparing the header for it
bool RecorderHOG::Open(std::string output_file_name)
{
hog_file.open(output_file_name, std::ios_base::out | std::ios_base::binary);
return hog_file.is_open();
}
void RecorderHOG::Close()
{
hog_file.close();
}
void RecorderHOG::Write()
{
hog_file.write((char*)(&num_cols), 4);
hog_file.write((char*)(&num_rows), 4);
hog_file.write((char*)(&num_channels), 4);
// Not the best way to store a bool, but will be much easier to read it
float good_frame_float;
if (good_frame)
good_frame_float = 1;
else
good_frame_float = -1;
hog_file.write((char*)(&good_frame_float), 4);
cv::MatConstIterator_<double> descriptor_it = hog_descriptor.begin();
for (int y = 0; y < num_cols; ++y)
{
for (int x = 0; x < num_rows; ++x)
{
for (unsigned int o = 0; o < 31; ++o)
{
float hog_data = (float)(*descriptor_it++);
hog_file.write((char*)&hog_data, 4);
}
}
}
}
// Writing to a HOG file
void RecorderHOG::SetObservationHOG(bool good_frame, const cv::Mat_<double>& hog_descriptor, int num_cols, int num_rows, int num_channels)
{
this->num_cols = num_cols;
this->num_rows = num_rows;
this->num_channels = num_channels;
this->hog_descriptor = hog_descriptor;
this->good_frame = good_frame;
}

View file

@ -0,0 +1,408 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (C) 2017, Tadas Baltrusaitis, all rights reserved.
//
// ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
//
// BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT.
// IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
//
// License can be found in OpenFace-license.txt
//
// * Any publications arising from the use of this software, including but
// not limited to academic journal and conference publications, technical
// reports and manuals, must cite at least one of the following works:
//
// OpenFace: an open source facial behavior analysis toolkit
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency
// in IEEE Winter Conference on Applications of Computer Vision, 2016
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency.
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
//
///////////////////////////////////////////////////////////////////////////////
#include "RecorderOpenFace.h"
// For sorting
#include <algorithm>
// File manipulation
#include <fstream>
#include <sstream>
#include <iostream>
// Boost includes for file system manipulation
#include <filesystem.hpp>
#include <filesystem/fstream.hpp>
#include <boost/algorithm/string.hpp>
using namespace boost::filesystem;
using namespace Utilities;
#define WARN_STREAM( stream ) \
std::cout << "Warning: " << stream << std::endl
void CreateDirectory(std::string output_path)
{
// Creating the right directory structure
auto p = path(output_path);
if (!boost::filesystem::exists(p))
{
bool success = boost::filesystem::create_directories(p);
if (!success)
{
std::cout << "ERROR: failed to create output directory:" << p.string() << ", do you have permission to create directory" << std::endl;
exit(1);
}
}
}
RecorderOpenFace::RecorderOpenFace(const std::string in_filename, RecorderOpenFaceParameters parameters, std::vector<std::string>& arguments):video_writer(), params(parameters)
{
// From the filename, strip out the name without directory and extension
if (boost::filesystem::is_directory(in_filename))
{
filename = boost::filesystem::canonical(boost::filesystem::path(in_filename)).filename().string();
}
else
{
filename = boost::filesystem::path(in_filename).filename().replace_extension("").string();
}
// Consuming the input arguments
bool* valid = new bool[arguments.size()];
for (size_t i = 0; i < arguments.size(); ++i)
{
valid[i] = true;
}
for (size_t i = 0; i < arguments.size(); ++i)
{
if (arguments[i].compare("-out_dir") == 0)
{
record_root = arguments[i + 1];
}
}
// Determine output directory
bool output_found = false;
for (size_t i = 0; i < arguments.size(); ++i)
{
if (!output_found && arguments[i].compare("-of") == 0)
{
record_root = (boost::filesystem::path(record_root) / boost::filesystem::path(arguments[i + 1])).remove_filename().string();
filename = path(boost::filesystem::path(arguments[i + 1])).replace_extension("").filename().string();
valid[i] = false;
valid[i + 1] = false;
i++;
output_found = true;
}
}
// If recording directory not set, record to default location
if (record_root.empty())
record_root = default_record_directory;
for (int i = (int)arguments.size() - 1; i >= 0; --i)
{
if (!valid[i])
{
arguments.erase(arguments.begin() + i);
}
}
// Construct the directories required for the output
CreateDirectory(record_root);
// Create the filename for the general output file that contains all of the meta information about the recording
path of_det_name(filename);
of_det_name = path(record_root) / path(filename + "_of_details.txt");
// Write in the of file what we are outputing what is the input etc.
metadata_file.open(of_det_name.string(), std::ios_base::out);
if (!metadata_file.is_open())
{
cout << "ERROR: could not open the output file:" << of_det_name << ", either the path of the output directory is wrong or you do not have the permissions to write to it" << endl;
exit(1);
}
// Populate relative and full path names in the meta file, unless it is a webcam
if(!params.isFromWebcam())
{
string input_filename_relative = in_filename;
string input_filename_full = in_filename;
if (!boost::filesystem::path(input_filename_full).is_absolute())
{
input_filename_full = boost::filesystem::canonical(input_filename_relative).string();
}
metadata_file << "Input:" << input_filename_relative << endl;
metadata_file << "Input full path:" << input_filename_full << endl;
}
else
{
// Populate the metadata file
metadata_file << "Input:webcam" << endl;
}
metadata_file << "Camera parameters:" << parameters.getFx() << "," << parameters.getFy() << "," << parameters.getCx() << "," << parameters.getCy() << endl;
// Create the required individual recorders, CSV, HOG, aligned, video
csv_filename = filename + ".csv";
// Consruct HOG recorder here
if(params.outputHOG())
{
// Output the data based on record_root, but do not include record_root in the meta file, as it is also in that directory
std::string hog_filename = filename + ".hog";
metadata_file << "Output HOG:" << hog_filename << endl;
hog_filename = (path(record_root) / hog_filename).string();
hog_recorder.Open(hog_filename);
}
// saving the videos
if (params.outputTracked())
{
if(parameters.isSequence())
{
// Output the data based on record_root, but do not include record_root in the meta file, as it is also in that directory
this->media_filename = filename + ".avi";
metadata_file << "Output video:" << this->media_filename << endl;
this->media_filename = (path(record_root) / this->media_filename).string();
}
else
{
this->media_filename = filename + ".jpg";
metadata_file << "Output image:" << this->media_filename << endl;
this->media_filename = (path(record_root) / this->media_filename).string();
}
}
// Prepare image recording
if (params.outputAlignedFaces())
{
aligned_output_directory = filename + "_aligned";
metadata_file << "Output aligned directory:" << this->aligned_output_directory << endl;
this->aligned_output_directory = (path(record_root) / this->aligned_output_directory).string();
CreateDirectory(aligned_output_directory);
}
observation_count = 0;
}
void RecorderOpenFace::SetObservationFaceAlign(const cv::Mat& aligned_face)
{
this->aligned_face = aligned_face;
}
void RecorderOpenFace::SetObservationVisualization(const cv::Mat &vis_track)
{
if (params.outputTracked())
{
// Initialize the video writer if it has not been opened yet
if(params.isSequence() && !video_writer.isOpened())
{
std::string output_codec = params.outputCodec();
try
{
video_writer.open(media_filename, CV_FOURCC(output_codec[0], output_codec[1], output_codec[2], output_codec[3]), params.outputFps(), vis_track.size(), true);
if (!video_writer.isOpened())
{
WARN_STREAM("Could not open VideoWriter, OUTPUT FILE WILL NOT BE WRITTEN.");
}
}
catch (cv::Exception e)
{
WARN_STREAM("Could not open VideoWriter, OUTPUT FILE WILL NOT BE WRITTEN. Currently using codec " << output_codec << ", try using an other one (-oc option)");
}
}
vis_to_out = vis_track;
}
}
void RecorderOpenFace::WriteObservation()
{
observation_count++;
// Write out the CSV file (it will always be there, even if not outputting anything more but frame/face numbers)
if(observation_count == 1)
{
// As we are writing out the header, work out some things like number of landmarks, names of AUs etc.
int num_face_landmarks = landmarks_2D.rows / 2;
int num_eye_landmarks = (int)eye_landmarks2D.size();
int num_model_modes = pdm_params_local.rows;
std::vector<std::string> au_names_class;
for (auto au : au_occurences)
{
au_names_class.push_back(au.first);
}
std::sort(au_names_class.begin(), au_names_class.end());
std::vector<std::string> au_names_reg;
for (auto au : au_intensities)
{
au_names_reg.push_back(au.first);
}
std::sort(au_names_reg.begin(), au_names_reg.end());
metadata_file << "Output csv:" << csv_filename << endl;
metadata_file << "Gaze: " << params.outputGaze() << endl;
metadata_file << "AUs: " << params.outputAUs() << endl;
metadata_file << "Landmarks 2D: " << params.output2DLandmarks() << endl;
metadata_file << "Landmarks 3D: " << params.output3DLandmarks() << endl;
metadata_file << "Pose: " << params.outputPose() << endl;
metadata_file << "Shape parameters: " << params.outputPDMParams() << endl;
csv_filename = (path(record_root) / csv_filename).string();
csv_recorder.Open(csv_filename, params.isSequence(), params.output2DLandmarks(), params.output3DLandmarks(), params.outputPDMParams(), params.outputPose(),
params.outputAUs(), params.outputGaze(), num_face_landmarks, num_model_modes, num_eye_landmarks, au_names_class, au_names_reg);
}
this->csv_recorder.WriteLine(observation_count, timestamp, landmark_detection_success,
landmark_detection_confidence, landmarks_2D, landmarks_3D, pdm_params_local, pdm_params_global, head_pose,
gaze_direction0, gaze_direction1, gaze_angle, eye_landmarks2D, eye_landmarks3D, au_intensities, au_occurences);
if(params.outputHOG())
{
this->hog_recorder.Write();
}
// Write aligned faces
if (params.outputAlignedFaces())
{
char name[100];
// Filename is based on frame number
if(params.isSequence())
std::sprintf(name, "frame_det_%06d.bmp", observation_count);
else
std::sprintf(name, "face_det_%06d.bmp", observation_count);
// Construct the output filename
boost::filesystem::path slash("/");
std::string preferredSlash = slash.make_preferred().string();
string out_file = aligned_output_directory + preferredSlash + string(name);
bool write_success = cv::imwrite(out_file, aligned_face);
if (!write_success)
{
WARN_STREAM("Could not output similarity aligned image image");
}
}
if(params.outputTracked())
{
if (vis_to_out.empty())
{
WARN_STREAM("Output tracked video frame is not set");
}
if(params.isSequence() )
{
if(video_writer.isOpened())
{
video_writer.write(vis_to_out);
}
}
else
{
bool out_success = cv::imwrite(media_filename, vis_to_out);
if (!out_success)
{
WARN_STREAM("Could not output tracked image");
}
}
// Clear the output
vis_to_out = cv::Mat();
}
}
void RecorderOpenFace::SetObservationHOG(bool good_frame, const cv::Mat_<double>& hog_descriptor, int num_cols, int num_rows, int num_channels)
{
this->hog_recorder.SetObservationHOG(good_frame, hog_descriptor, num_cols, num_rows, num_channels);
}
void RecorderOpenFace::SetObservationTimestamp(double timestamp)
{
this->timestamp = timestamp;
}
void RecorderOpenFace::SetObservationLandmarks(const cv::Mat_<double>& landmarks_2D, const cv::Mat_<double>& landmarks_3D,
const cv::Vec6d& pdm_params_global, const cv::Mat_<double>& pdm_params_local, double confidence, bool success)
{
this->landmarks_2D = landmarks_2D;
this->landmarks_3D = landmarks_3D;
this->pdm_params_global = pdm_params_global;
this->pdm_params_local = pdm_params_local;
this->landmark_detection_confidence = confidence;
this->landmark_detection_success = success;
}
void RecorderOpenFace::SetObservationPose(const cv::Vec6d& pose)
{
this->head_pose = pose;
}
void RecorderOpenFace::SetObservationActionUnits(const std::vector<std::pair<std::string, double> >& au_intensities,
const std::vector<std::pair<std::string, double> >& au_occurences)
{
this->au_intensities = au_intensities;
this->au_occurences = au_occurences;
}
void RecorderOpenFace::SetObservationGaze(const cv::Point3f& gaze_direction0, const cv::Point3f& gaze_direction1,
const cv::Vec2d& gaze_angle, const std::vector<cv::Point2d>& eye_landmarks2D, const std::vector<cv::Point3d>& eye_landmarks3D)
{
this->gaze_direction0 = gaze_direction0;
this->gaze_direction1 = gaze_direction1;
this->gaze_angle = gaze_angle;
this->eye_landmarks2D = eye_landmarks2D;
this->eye_landmarks3D = eye_landmarks3D;
}
RecorderOpenFace::~RecorderOpenFace()
{
this->Close();
}
void RecorderOpenFace::Close()
{
hog_recorder.Close();
csv_recorder.Close();
video_writer.release();
metadata_file.close();
}

View file

@ -0,0 +1,140 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (C) 2017, Tadas Baltrusaitis, all rights reserved.
//
// ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
//
// BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT.
// IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
//
// License can be found in OpenFace-license.txt
//
// * Any publications arising from the use of this software, including but
// not limited to academic journal and conference publications, technical
// reports and manuals, must cite at least one of the following works:
//
// OpenFace: an open source facial behavior analysis toolkit
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency
// in IEEE Winter Conference on Applications of Computer Vision, 2016
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency.
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
//
///////////////////////////////////////////////////////////////////////////////
#include "RecorderOpenFaceParameters.h"
using namespace std;
using namespace Utilities;
RecorderOpenFaceParameters::RecorderOpenFaceParameters(std::vector<std::string> &arguments, bool sequence, bool from_webcam, float fx, float fy, float cx, float cy, double fps_vid_out)
{
string separator = string(1, boost::filesystem::path::preferred_separator);
this->is_sequence = sequence;
this->is_from_webcam = from_webcam;
this->fx = fx;
this->fy = fy;
this->cx = cx;
this->cy = cy;
if(fps_vid_out > 0)
{
this->fps_vid_out = fps_vid_out;
}
else
{
this->fps_vid_out = 30; // If an illegal value for fps provided, default to 30
}
// Default output code
this->output_codec = "DIVX";
bool output_set = false;
output_2D_landmarks = false;
output_3D_landmarks = false;
output_model_params = false;
output_pose = false;
output_AUs = false;
output_gaze = false;
output_hog = false;
output_tracked = false;
output_aligned_faces = false;
for (size_t i = 0; i < arguments.size(); ++i)
{
if (arguments[i].compare("-simalign") == 0)
{
output_aligned_faces = true;
output_set = true;
}
else if (arguments[i].compare("-hogalign") == 0)
{
output_hog = true;
output_set = true;
}
else if (arguments[i].compare("-2Dfp") == 0)
{
output_2D_landmarks = true;
output_set = true;
}
else if (arguments[i].compare("-3Dfp") == 0)
{
output_3D_landmarks = true;
output_set = true;
}
else if (arguments[i].compare("-pdmparams") == 0)
{
output_model_params = true;
output_set = true;
}
else if (arguments[i].compare("-pose") == 0)
{
output_pose = true;
output_set = true;
}
else if (arguments[i].compare("-aus") == 0)
{
output_AUs = true;
output_set = true;
}
else if (arguments[i].compare("-gaze") == 0)
{
output_gaze = true;
output_set = true;
}
else if (arguments[i].compare("-tracked") == 0)
{
output_tracked = true;
output_set = true;
}
}
// Output everything if nothing has been set
if (!output_set)
{
output_2D_landmarks = true;
output_3D_landmarks = true;
output_model_params = true;
output_pose = true;
output_AUs = true;
output_gaze = true;
output_hog = true;
output_tracked = true;
output_aligned_faces = true;
}
}

View file

@ -0,0 +1,461 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (C) 2017, Tadas Baltrusaitis, all rights reserved.
//
// ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
//
// BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT.
// IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
//
// License can be found in OpenFace-license.txt
//
// * Any publications arising from the use of this software, including but
// not limited to academic journal and conference publications, technical
// reports and manuals, must cite at least one of the following works:
//
// OpenFace: an open source facial behavior analysis toolkit
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency
// in IEEE Winter Conference on Applications of Computer Vision, 2016
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency.
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
//
///////////////////////////////////////////////////////////////////////////////
#include "SequenceCapture.h"
#include "ImageManipulationHelpers.h"
#include <iostream>
// Boost includes
#include <filesystem.hpp>
#include <filesystem/fstream.hpp>
#include <boost/algorithm/string.hpp>
// OpenCV includes
#include <opencv2/imgproc.hpp>
// For timing
#include <chrono>
#include <ctime>
using namespace Utilities;
#define INFO_STREAM( stream ) \
std::cout << stream << std::endl
#define WARN_STREAM( stream ) \
std::cout << "Warning: " << stream << std::endl
#define ERROR_STREAM( stream ) \
std::cout << "Error: " << stream << std::endl
bool SequenceCapture::Open(std::vector<std::string>& arguments)
{
// Consuming the input arguments
bool* valid = new bool[arguments.size()];
for (size_t i = 0; i < arguments.size(); ++i)
{
valid[i] = true;
}
// Some default values
std::string input_root = "";
fx = -1; fy = -1; cx = -1; cy = -1;
frame_num = 0;
time_stamp = 0;
std::string separator = std::string(1, boost::filesystem::path::preferred_separator);
// First check if there is a root argument (so that videos and input directories could be defined more easily)
for (size_t i = 0; i < arguments.size(); ++i)
{
if (arguments[i].compare("-root") == 0)
{
input_root = arguments[i + 1] + separator;
i++;
}
if (arguments[i].compare("-inroot") == 0)
{
input_root = arguments[i + 1] + separator;
i++;
}
}
std::string input_video_file;
std::string input_sequence_directory;
int device = -1;
bool file_found = false;
for (size_t i = 0; i < arguments.size(); ++i)
{
if (!file_found && arguments[i].compare("-f") == 0)
{
input_video_file = (input_root + arguments[i + 1]);
valid[i] = false;
valid[i + 1] = false;
i++;
file_found = true;
}
else if (!file_found && arguments[i].compare("-fdir") == 0)
{
input_sequence_directory = (input_root + arguments[i + 1]);
valid[i] = false;
valid[i + 1] = false;
i++;
file_found = true;
}
else if (arguments[i].compare("-fx") == 0)
{
std::stringstream data(arguments[i + 1]);
data >> fx;
i++;
}
else if (arguments[i].compare("-fy") == 0)
{
std::stringstream data(arguments[i + 1]);
data >> fy;
i++;
}
else if (arguments[i].compare("-cx") == 0)
{
std::stringstream data(arguments[i + 1]);
data >> cx;
i++;
}
else if (arguments[i].compare("-cy") == 0)
{
std::stringstream data(arguments[i + 1]);
data >> cy;
i++;
}
else if (arguments[i].compare("-device") == 0)
{
std::stringstream data(arguments[i + 1]);
data >> device;
valid[i] = false;
valid[i + 1] = false;
i++;
}
}
for (int i = (int)arguments.size() - 1; i >= 0; --i)
{
if (!valid[i])
{
arguments.erase(arguments.begin() + i);
}
}
no_input_specified = !file_found;
// Based on what was read in open the sequence
if (device != -1)
{
// TODO allow to specify webcam resolution
return OpenWebcam(device, 640, 480, fx, fy, cx, cy);
}
if (!input_video_file.empty())
{
return OpenVideoFile(input_video_file, fx, fy, cx, cy);
}
if (!input_sequence_directory.empty())
{
return OpenImageSequence(input_sequence_directory, fx, fy, cx, cy);
}
// If no input found return false and set a flag for it
no_input_specified = true;
return false;
}
// Get current date/time, format is YYYY-MM-DD.HH:mm, useful for saving data from webcam
const std::string currentDateTime()
{
time_t rawtime;
struct tm * timeinfo;
char buffer[80];
time(&rawtime);
timeinfo = localtime(&rawtime);
strftime(buffer, sizeof(buffer), "%Y-%m-%d-%H-%M", timeinfo);
return buffer;
}
bool SequenceCapture::OpenWebcam(int device, int image_width, int image_height, float fx, float fy, float cx, float cy)
{
INFO_STREAM("Attempting to read from webcam: " << device);
no_input_specified = false;
if (device < 0)
{
std::cout << "Specify a valid device" << std::endl;
return false;
}
latest_frame = cv::Mat();
latest_gray_frame = cv::Mat();
capture.open(device);
capture.set(CV_CAP_PROP_FRAME_WIDTH, image_width);
capture.set(CV_CAP_PROP_FRAME_HEIGHT, image_height);
is_webcam = true;
is_image_seq = false;
vid_length = 0;
this->frame_width = (int)capture.get(CV_CAP_PROP_FRAME_WIDTH);
this->frame_height = (int)capture.get(CV_CAP_PROP_FRAME_HEIGHT);
if (!capture.isOpened())
{
std::cout << "Failed to open the webcam" << std::endl;
return false;
}
if (frame_width != image_width || frame_height != image_height)
{
std::cout << "Failed to open the webcam with desired resolution" << std::endl;
std::cout << "Defaulting to " << frame_width << "x" << frame_height << std::endl;
}
this->fps = capture.get(CV_CAP_PROP_FPS);
// Check if fps is nan or less than 0
if (fps != fps || fps <= 0)
{
INFO_STREAM("FPS of the webcam cannot be determined, assuming 30");
fps = 30;
}
SetCameraIntrinsics(fx, fy, cx, cy);
std::string time = currentDateTime();
this->name = "webcam_" + time;
start_time = cv::getTickCount();
return true;
}
// Destructor that releases the capture
SequenceCapture::~SequenceCapture()
{
if (capture.isOpened())
capture.release();
}
bool SequenceCapture::OpenVideoFile(std::string video_file, float fx, float fy, float cx, float cy)
{
INFO_STREAM("Attempting to read from file: " << video_file);
no_input_specified = false;
latest_frame = cv::Mat();
latest_gray_frame = cv::Mat();
capture.open(video_file);
if (!capture.isOpened())
{
std::cout << "Failed to open the video file at location: " << video_file << std::endl;
return false;
}
this->fps = capture.get(CV_CAP_PROP_FPS);
// Check if fps is nan or less than 0
if (fps != fps || fps <= 0)
{
WARN_STREAM("FPS of the video file cannot be determined, assuming 30");
fps = 30;
}
is_webcam = false;
is_image_seq = false;
this->frame_width = (int)capture.get(CV_CAP_PROP_FRAME_WIDTH);
this->frame_height = (int)capture.get(CV_CAP_PROP_FRAME_HEIGHT);
vid_length = (int)capture.get(CV_CAP_PROP_FRAME_COUNT);
SetCameraIntrinsics(fx, fy, cx, cy);
this->name = video_file;
return true;
}
bool SequenceCapture::OpenImageSequence(std::string directory, float fx, float fy, float cx, float cy)
{
INFO_STREAM("Attempting to read from directory: " << directory);
no_input_specified = false;
image_files.clear();
boost::filesystem::path image_directory(directory);
std::vector<boost::filesystem::path> file_in_directory;
copy(boost::filesystem::directory_iterator(image_directory), boost::filesystem::directory_iterator(), back_inserter(file_in_directory));
// Sort the images in the directory first
sort(file_in_directory.begin(), file_in_directory.end());
std::vector<std::string> curr_dir_files;
for (std::vector<boost::filesystem::path>::const_iterator file_iterator(file_in_directory.begin()); file_iterator != file_in_directory.end(); ++file_iterator)
{
// Possible image extension .jpg and .png
if (file_iterator->extension().string().compare(".jpg") == 0 || file_iterator->extension().string().compare(".jpeg") == 0 || file_iterator->extension().string().compare(".png") == 0 || file_iterator->extension().string().compare(".bmp") == 0)
{
curr_dir_files.push_back(file_iterator->string());
}
}
image_files = curr_dir_files;
if (image_files.empty())
{
std::cout << "No images found in the directory: " << directory << std::endl;
return false;
}
// Assume all images are same size in an image sequence
cv::Mat tmp = cv::imread(image_files[0], CV_LOAD_IMAGE_COLOR);
this->frame_height = tmp.size().height;
this->frame_width = tmp.size().width;
SetCameraIntrinsics(fx, fy, cx, cy);
// No fps as we have a sequence
this->fps = 0;
this->name = directory;
is_webcam = false;
is_image_seq = true;
vid_length = image_files.size();
return true;
}
void SequenceCapture::SetCameraIntrinsics(float fx, float fy, float cx, float cy)
{
// If optical centers are not defined just use center of image
if (cx == -1)
{
this->cx = this->frame_width / 2.0f;
this->cy = this->frame_height / 2.0f;
}
else
{
this->cx = cx;
this->cy = cy;
}
// Use a rough guess-timate of focal length
if (fx == -1)
{
this->fx = 500.0f * (this->frame_width / 640.0f);
this->fy = 500.0f * (this->frame_height / 480.0f);
this->fx = (this->fx + this->fy) / 2.0f;
this->fy = this->fx;
}
else
{
this->fx = fx;
this->fy = fy;
}
}
cv::Mat SequenceCapture::GetNextFrame()
{
if (is_webcam || !is_image_seq)
{
bool success = capture.read(latest_frame);
if (!success)
{
// Indicate lack of success by returning an empty image
latest_frame = cv::Mat();
}
// Recording the timestamp
if (!is_webcam)
{
time_stamp = frame_num * (1.0 / fps);
}
else
{
time_stamp = (cv::getTickCount() - start_time) / cv::getTickFrequency();
}
}
else if (is_image_seq)
{
if (image_files.empty() || frame_num >= image_files.size())
{
// Indicate lack of success by returning an empty image
latest_frame = cv::Mat();
}
else
{
latest_frame = cv::imread(image_files[frame_num], CV_LOAD_IMAGE_COLOR);
}
time_stamp = 0;
}
// Set the grayscale frame
ConvertToGrayscale_8bit(latest_frame, latest_gray_frame);
frame_num++;
return latest_frame;
}
double SequenceCapture::GetProgress()
{
if (is_webcam)
{
return -1.0;
}
else
{
return (double)frame_num / (double)vid_length;
}
}
bool SequenceCapture::IsOpened()
{
if (is_webcam || !is_image_seq)
return capture.isOpened();
else
return (image_files.size() > 0 && frame_num < image_files.size());
}
cv::Mat_<uchar> SequenceCapture::GetGrayFrame()
{
return latest_gray_frame;
}

View file

@ -0,0 +1,191 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (C) 2017, Tadas Baltrusaitis, all rights reserved.
//
// ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
//
// BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT.
// IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
//
// License can be found in OpenFace-license.txt
//
// * Any publications arising from the use of this software, including but
// not limited to academic journal and conference publications, technical
// reports and manuals, must cite at least one of the following works:
//
// OpenFace: an open source facial behavior analysis toolkit
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency
// in IEEE Winter Conference on Applications of Computer Vision, 2016
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency.
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
//
///////////////////////////////////////////////////////////////////////////////
#include "VisualizationUtils.h"
#include "RotationHelpers.h"
// For FHOG visualisation
#include <dlib/opencv.h>
#include <dlib/image_processing/frontal_face_detector.h>
// For drawing on images
#include <opencv2/imgproc.hpp>
namespace Utilities
{
FpsTracker::FpsTracker()
{
// Keep two seconds of history
history_length = 2;
}
void FpsTracker::AddFrame()
{
double current_time = cv::getTickCount() / cv::getTickFrequency();
frame_times.push(current_time);
DiscardOldFrames();
}
double FpsTracker::GetFPS()
{
DiscardOldFrames();
if (frame_times.size() == 0)
return 0;
double current_time = cv::getTickCount() / cv::getTickFrequency();
return ((double)frame_times.size()) / (current_time - frame_times.front());
}
void FpsTracker::DiscardOldFrames()
{
double current_time = cv::getTickCount() / cv::getTickFrequency();
// Remove old history
while (frame_times.size() > 0 && (current_time - frame_times.front()) > history_length)
frame_times.pop();
}
void DrawBox(cv::Mat image, cv::Vec6d pose, cv::Scalar color, int thickness, float fx, float fy, float cx, float cy)
{
auto edge_lines = CalculateBox(pose, fx, fy, cx, cy);
DrawBox(edge_lines, image, color, thickness);
}
std::vector<std::pair<cv::Point2d, cv::Point2d>> CalculateBox(cv::Vec6d pose, float fx, float fy, float cx, float cy)
{
double boxVerts[] = { -1, 1, -1,
1, 1, -1,
1, 1, 1,
-1, 1, 1,
1, -1, 1,
1, -1, -1,
-1, -1, -1,
-1, -1, 1 };
std::vector<std::pair<int, int>> edges;
edges.push_back(std::pair<int, int>(0, 1));
edges.push_back(std::pair<int, int>(1, 2));
edges.push_back(std::pair<int, int>(2, 3));
edges.push_back(std::pair<int, int>(0, 3));
edges.push_back(std::pair<int, int>(2, 4));
edges.push_back(std::pair<int, int>(1, 5));
edges.push_back(std::pair<int, int>(0, 6));
edges.push_back(std::pair<int, int>(3, 7));
edges.push_back(std::pair<int, int>(6, 5));
edges.push_back(std::pair<int, int>(5, 4));
edges.push_back(std::pair<int, int>(4, 7));
edges.push_back(std::pair<int, int>(7, 6));
// The size of the head is roughly 200mm x 200mm x 200mm
cv::Mat_<double> box = cv::Mat(8, 3, CV_64F, boxVerts).clone() * 100;
cv::Matx33d rot = Euler2RotationMatrix(cv::Vec3d(pose[3], pose[4], pose[5]));
cv::Mat_<double> rotBox;
// Rotate the box
rotBox = cv::Mat(rot) * box.t();
rotBox = rotBox.t();
// Move the bounding box to head position
rotBox.col(0) = rotBox.col(0) + pose[0];
rotBox.col(1) = rotBox.col(1) + pose[1];
rotBox.col(2) = rotBox.col(2) + pose[2];
// draw the lines
cv::Mat_<double> rotBoxProj;
Project(rotBoxProj, rotBox, fx, fy, cx, cy);
std::vector<std::pair<cv::Point2d, cv::Point2d>> lines;
for (size_t i = 0; i < edges.size(); ++i)
{
cv::Mat_<double> begin;
cv::Mat_<double> end;
rotBoxProj.row(edges[i].first).copyTo(begin);
rotBoxProj.row(edges[i].second).copyTo(end);
cv::Point2d p1(begin.at<double>(0), begin.at<double>(1));
cv::Point2d p2(end.at<double>(0), end.at<double>(1));
lines.push_back(std::pair<cv::Point2d, cv::Point2d>(p1, p2));
}
return lines;
}
void DrawBox(const std::vector<std::pair<cv::Point2d, cv::Point2d>>& lines, cv::Mat image, cv::Scalar color, int thickness)
{
cv::Rect image_rect(0, 0, image.cols, image.rows);
for (size_t i = 0; i < lines.size(); ++i)
{
cv::Point2d p1 = lines.at(i).first;
cv::Point2d p2 = lines.at(i).second;
// Only draw the line if one of the points is inside the image
if (p1.inside(image_rect) || p2.inside(image_rect))
{
cv::line(image, p1, p2, color, thickness, CV_AA);
}
}
}
void Visualise_FHOG(const cv::Mat_<double>& descriptor, int num_rows, int num_cols, cv::Mat& visualisation)
{
// First convert to dlib format
dlib::array2d<dlib::matrix<float, 31, 1> > hog(num_rows, num_cols);
cv::MatConstIterator_<double> descriptor_it = descriptor.begin();
for (int y = 0; y < num_cols; ++y)
{
for (int x = 0; x < num_rows; ++x)
{
for (unsigned int o = 0; o < 31; ++o)
{
hog[y][x](o) = *descriptor_it++;
}
}
}
// Draw the FHOG to OpenCV format
auto fhog_vis = dlib::draw_fhog(hog);
visualisation = dlib::toMat(fhog_vis).clone();
}
}

View file

@ -0,0 +1,294 @@
///////////////////////////////////////////////////////////////////////////////
// Copyright (C) 2017, Tadas Baltrusaitis, all rights reserved.
//
// ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
//
// BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT.
// IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
//
// License can be found in OpenFace-license.txt
//
// * Any publications arising from the use of this software, including but
// not limited to academic journal and conference publications, technical
// reports and manuals, must cite at least one of the following works:
//
// OpenFace: an open source facial behavior analysis toolkit
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency
// in IEEE Winter Conference on Applications of Computer Vision, 2016
//
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
// in IEEE International. Conference on Computer Vision (ICCV), 2015
//
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
// in Facial Expression Recognition and Analysis Challenge,
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
//
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency.
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
//
///////////////////////////////////////////////////////////////////////////////
#include "Visualizer.h"
#include "VisualizationUtils.h"
#include "RotationHelpers.h"
#include "ImageManipulationHelpers.h"
// For drawing on images
#include <opencv2/imgproc.hpp>
using namespace Utilities;
// For subpixel accuracy drawing
const int draw_shiftbits = 4;
const int draw_multiplier = 1 << 4;
Visualizer::Visualizer(std::vector<std::string> arguments)
{
// By default not visualizing anything
this->vis_track = false;
this->vis_hog = false;
this->vis_align = false;
for (size_t i = 0; i < arguments.size(); ++i)
{
if (arguments[i].compare("-verbose") == 0)
{
vis_track = true;
vis_align = true;
vis_hog = true;
}
else if (arguments[i].compare("-vis-align") == 0)
{
vis_align = true;
}
else if (arguments[i].compare("-vis-hog") == 0)
{
vis_hog = true;
}
else if (arguments[i].compare("-vis-track") == 0)
{
vis_track = true;
}
}
}
Visualizer::Visualizer(bool vis_track, bool vis_hog, bool vis_align)
{
this->vis_track = vis_track;
this->vis_hog = vis_hog;
this->vis_align = vis_align;
}
// Setting the image on which to draw
void Visualizer::SetImage(const cv::Mat& canvas, float fx, float fy, float cx, float cy)
{
// Convert the image to 8 bit RGB
captured_image = canvas.clone();
this->fx = fx;
this->fy = fy;
this->cx = cx;
this->cy = cy;
// Clearing other images
hog_image = cv::Mat();
aligned_face_image = cv::Mat();
}
void Visualizer::SetObservationFaceAlign(const cv::Mat& aligned_face)
{
if(this->aligned_face_image.empty())
{
this->aligned_face_image = aligned_face;
}
else
{
cv::vconcat(this->aligned_face_image, aligned_face, this->aligned_face_image);
}
}
void Visualizer::SetObservationHOG(const cv::Mat_<double>& hog_descriptor, int num_cols, int num_rows)
{
if(vis_hog)
{
if (this->hog_image.empty())
{
Visualise_FHOG(hog_descriptor, num_rows, num_cols, this->hog_image);
}
else
{
cv::Mat tmp_hog;
Visualise_FHOG(hog_descriptor, num_rows, num_cols, tmp_hog);
cv::vconcat(this->hog_image, tmp_hog, this->hog_image);
}
}
}
void Visualizer::SetObservationLandmarks(const cv::Mat_<double>& landmarks_2D, double confidence, bool success, const cv::Mat_<int>& visibilities)
{
if(confidence > visualisation_boundary)
{
// Draw 2D landmarks on the image
int n = landmarks_2D.rows / 2;
// Drawing feature points
for (int i = 0; i < n; ++i)
{
if (visibilities.empty() || visibilities.at<int>(i))
{
cv::Point featurePoint(cvRound(landmarks_2D.at<double>(i) * (double)draw_multiplier), cvRound(landmarks_2D.at<double>(i + n) * (double)draw_multiplier));
// A rough heuristic for drawn point size
int thickness = (int)std::ceil(3.0* ((double)captured_image.cols) / 640.0);
int thickness_2 = (int)std::ceil(1.0* ((double)captured_image.cols) / 640.0);
cv::circle(captured_image, featurePoint, 1 * draw_multiplier, cv::Scalar(0, 0, 255), thickness, CV_AA, draw_shiftbits);
cv::circle(captured_image, featurePoint, 1 * draw_multiplier, cv::Scalar(255, 0, 0), thickness_2, CV_AA, draw_shiftbits);
}
}
}
}
void Visualizer::SetObservationPose(const cv::Vec6d& pose, double confidence)
{
// Only draw if the reliability is reasonable, the value is slightly ad-hoc
if (confidence > visualisation_boundary)
{
double vis_certainty = confidence;
if (vis_certainty > 1)
vis_certainty = 1;
// Scale from 0 to 1, to allow to indicated by colour how confident we are in the tracking
vis_certainty = (vis_certainty - visualisation_boundary) / (1 - visualisation_boundary);
// A rough heuristic for box around the face width
int thickness = (int)std::ceil(2.0* ((double)captured_image.cols) / 640.0);
// Draw it in reddish if uncertain, blueish if certain
DrawBox(captured_image, pose, cv::Scalar(vis_certainty*255.0, 0, (1 - vis_certainty) * 255), thickness, fx, fy, cx, cy);
}
}
// Eye gaze infomration drawing, first of eye landmarks then of gaze
void Visualizer::SetObservationGaze(const cv::Point3f& gaze_direction0, const cv::Point3f& gaze_direction1, const std::vector<cv::Point2d>& eye_landmarks2d, const std::vector<cv::Point3d>& eye_landmarks3d, double confidence)
{
if(confidence > visualisation_boundary)
{
if (eye_landmarks2d.size() > 0)
{
// First draw the eye region landmarks
for (size_t i = 0; i < eye_landmarks2d.size(); ++i)
{
cv::Point featurePoint(cvRound(eye_landmarks2d[i].x * (double)draw_multiplier), cvRound(eye_landmarks2d[i].y * (double)draw_multiplier));
// A rough heuristic for drawn point size
int thickness = 1;
int thickness_2 = 1;
size_t next_point = i + 1;
if (i == 7)
next_point = 0;
if (i == 19)
next_point = 8;
if (i == 27)
next_point = 20;
if (i == 7 + 28)
next_point = 0 + 28;
if (i == 19 + 28)
next_point = 8 + 28;
if (i == 27 + 28)
next_point = 20 + 28;
cv::Point nextFeaturePoint(cvRound(eye_landmarks2d[next_point].x * (double)draw_multiplier), cvRound(eye_landmarks2d[next_point].y * (double)draw_multiplier));
if ((i < 28 && (i < 8 || i > 19)) || (i >= 28 && (i < 8 + 28 || i > 19 + 28)))
cv::line(captured_image, featurePoint, nextFeaturePoint, cv::Scalar(255, 0, 0), thickness_2, CV_AA, draw_shiftbits);
else
cv::line(captured_image, featurePoint, nextFeaturePoint, cv::Scalar(0, 0, 255), thickness_2, CV_AA, draw_shiftbits);
}
// Now draw the gaze lines themselves
cv::Mat cameraMat = (cv::Mat_<double>(3, 3) << fx, 0, cx, 0, fy, cy, 0, 0, 0);
// Grabbing the pupil location, to draw eye gaze need to know where the pupil is
cv::Point3d pupil_left(0, 0, 0);
cv::Point3d pupil_right(0, 0, 0);
for (size_t i = 0; i < 8; ++i)
{
pupil_left = pupil_left + eye_landmarks3d[i];
pupil_right = pupil_right + eye_landmarks3d[i + eye_landmarks3d.size()/2];
}
pupil_left = pupil_left / 8;
pupil_right = pupil_right / 8;
std::vector<cv::Point3d> points_left;
points_left.push_back(cv::Point3d(pupil_left));
points_left.push_back(cv::Point3d(pupil_left + cv::Point3d(gaze_direction0)*50.0));
std::vector<cv::Point3d> points_right;
points_right.push_back(cv::Point3d(pupil_right));
points_right.push_back(cv::Point3d(pupil_right + cv::Point3d(gaze_direction1)*50.0));
cv::Mat_<double> proj_points;
cv::Mat_<double> mesh_0 = (cv::Mat_<double>(2, 3) << points_left[0].x, points_left[0].y, points_left[0].z, points_left[1].x, points_left[1].y, points_left[1].z);
Project(proj_points, mesh_0, fx, fy, cx, cy);
cv::line(captured_image, cv::Point(cvRound(proj_points.at<double>(0, 0) * (double)draw_multiplier), cvRound(proj_points.at<double>(0, 1) * (double)draw_multiplier)),
cv::Point(cvRound(proj_points.at<double>(1, 0) * (double)draw_multiplier), cvRound(proj_points.at<double>(1, 1) * (double)draw_multiplier)), cv::Scalar(110, 220, 0), 2, CV_AA, draw_shiftbits);
cv::Mat_<double> mesh_1 = (cv::Mat_<double>(2, 3) << points_right[0].x, points_right[0].y, points_right[0].z, points_right[1].x, points_right[1].y, points_right[1].z);
Project(proj_points, mesh_1, fx, fy, cx, cy);
cv::line(captured_image, cv::Point(cvRound(proj_points.at<double>(0, 0) * (double)draw_multiplier), cvRound(proj_points.at<double>(0, 1) * (double)draw_multiplier)),
cv::Point(cvRound(proj_points.at<double>(1, 0) * (double)draw_multiplier), cvRound(proj_points.at<double>(1, 1) * (double)draw_multiplier)), cv::Scalar(110, 220, 0), 2, CV_AA, draw_shiftbits);
}
}
}
void Visualizer::SetFps(double fps)
{
// Write out the framerate on the image before displaying it
char fpsC[255];
std::sprintf(fpsC, "%d", (int)fps);
std::string fpsSt("FPS:");
fpsSt += fpsC;
cv::putText(captured_image, fpsSt, cv::Point(10, 20), CV_FONT_HERSHEY_SIMPLEX, 0.5, CV_RGB(255, 0, 0), 1, CV_AA);
}
char Visualizer::ShowObservation()
{
if (vis_track)
{
cv::namedWindow("tracking_result", 1);
cv::imshow("tracking_result", captured_image);
}
if (vis_align)
{
cv::imshow("sim_warp", aligned_face_image);
}
if (vis_hog)
{
cv::imshow("hog", hog_image);
}
return cv::waitKey(1);
}
cv::Mat Visualizer::GetVisImage()
{
return captured_image;
}

View file

@ -1,4 +1,4 @@
function [ labels, valid_ids, vid_ids ] = extract_SEMAINE_labels( SEMAINE_dir, recs, aus ) function [ labels, valid_ids, vid_ids, vid_names ] = extract_SEMAINE_labels( SEMAINE_dir, recs, aus )
%EXTRACT_SEMAINE_LABELS Summary of this function goes here %EXTRACT_SEMAINE_LABELS Summary of this function goes here
% Detailed explanation goes here % Detailed explanation goes here
@ -16,6 +16,7 @@ function [ labels, valid_ids, vid_ids ] = extract_SEMAINE_labels( SEMAINE_dir,
labels = cell(numel(recs), 1); labels = cell(numel(recs), 1);
valid_ids = cell(numel(recs), 1); valid_ids = cell(numel(recs), 1);
vid_names = cell(numel(recs), 1);
vid_ids = zeros(numel(recs), 2); vid_ids = zeros(numel(recs), 2);
for i=1:numel(recs) for i=1:numel(recs)
@ -24,6 +25,9 @@ function [ labels, valid_ids, vid_ids ] = extract_SEMAINE_labels( SEMAINE_dir,
vid_ids(i,:) = dlmread([SEMAINE_dir, '/', recs{i}, '.txt'], ' '); vid_ids(i,:) = dlmread([SEMAINE_dir, '/', recs{i}, '.txt'], ' ');
vid_names_c = dir([SEMAINE_dir, '/', recs{i}, '/*.avi']);
[~, vid_names{i},~] = fileparts(vid_names_c.name);
xml_file = [SEMAINE_dir, recs{i}, '\' file.name]; xml_file = [SEMAINE_dir, recs{i}, '\' file.name];
[root_xml, name_xml, ~] = fileparts(xml_file); [root_xml, name_xml, ~] = fileparts(xml_file);

View file

@ -16,6 +16,9 @@ elseif(exist('I:\datasets\FERA_2015\BP4D\AUCoding/', 'file'))
elseif(exist('D:/fera_2015/bp4d/AUCoding/', 'file')) elseif(exist('D:/fera_2015/bp4d/AUCoding/', 'file'))
BP4D_dir = 'D:/fera_2015/bp4d/AUCoding/'; BP4D_dir = 'D:/fera_2015/bp4d/AUCoding/';
BP4D_dir_int = 'D:/fera_2015/bp4d/AU Intensity Codes3.0/'; BP4D_dir_int = 'D:/fera_2015/bp4d/AU Intensity Codes3.0/';
elseif(exist('/media/tadas/2EBEA130BEA0F20F/datasets/FERA_2015/BP4D', 'file'))
BP4D_dir = '/media/tadas/2EBEA130BEA0F20F/datasets/FERA_2015/BP4D/AUCoding/';
BP4D_dir_int = '/media/tadas/2EBEA130BEA0F20F/datasets/FERA_2015/BP4D/AU Intensity Codes3.0/';
else else
fprintf('BP4D location not found (or not defined)\n'); fprintf('BP4D location not found (or not defined)\n');
end end

View file

@ -2,6 +2,8 @@ if(exist('D:/Datasets/Bosphorus/', 'file'))
Bosphorus_dir = 'D:\Datasets\Bosphorus/'; Bosphorus_dir = 'D:\Datasets\Bosphorus/';
elseif(exist('E:/Datasets/Bosphorus/', 'file')) elseif(exist('E:/Datasets/Bosphorus/', 'file'))
Bosphorus_dir = 'E:\Datasets\Bosphorus/'; Bosphorus_dir = 'E:\Datasets\Bosphorus/';
elseif(exist('/media/tadas/2EBEA130BEA0F20F/datasets/Bosphorus/', 'file'))
Bosphorus_dir = '/media/tadas/2EBEA130BEA0F20F/datasets/Bosphorus/';
else else
fprintf('Bosphorus dataset location not found (or not defined)\n'); fprintf('Bosphorus dataset location not found (or not defined)\n');
end end

View file

@ -4,6 +4,8 @@ elseif(exist('/multicomp/datasets/fera2011/', 'file'))
FERA2011_dir = '/multicomp/datasets/fera2011/au_training/'; FERA2011_dir = '/multicomp/datasets/fera2011/au_training/';
elseif(exist('E:\Datasets\fera/au_training', 'file')) elseif(exist('E:\Datasets\fera/au_training', 'file'))
FERA2011_dir = 'E:\Datasets\fera/au_training/'; FERA2011_dir = 'E:\Datasets\fera/au_training/';
elseif(exist('/media/tadas/2EBEA130BEA0F20F/datasets/fera/au_training/', 'file'))
FERA2011_dir = '/media/tadas/2EBEA130BEA0F20F/datasets/fera/au_training/';
else else
fprintf('FERA2011 location not found (or not defined)\n'); fprintf('FERA2011 location not found (or not defined)\n');
end end

View file

@ -12,6 +12,8 @@ elseif(exist('D:/fera_2015/semaine/SEMAINE-Sessions/', 'file'))
SEMAINE_dir = 'D:/fera_2015/semaine/SEMAINE-Sessions/'; SEMAINE_dir = 'D:/fera_2015/semaine/SEMAINE-Sessions/';
elseif(exist('/multicomp/datasets/face_datasets/FERA_2015/Semaine/SEMAINE-Sessions/', 'file')) elseif(exist('/multicomp/datasets/face_datasets/FERA_2015/Semaine/SEMAINE-Sessions/', 'file'))
SEMAINE_dir = '/multicomp/datasets/face_datasets/FERA_2015/Semaine/SEMAINE-Sessions/'; SEMAINE_dir = '/multicomp/datasets/face_datasets/FERA_2015/Semaine/SEMAINE-Sessions/';
elseif(exist('/media/tadas/2EBEA130BEA0F20F/datasets/FERA_2015/semaine/SEMAINE-Sessions/', 'file'))
SEMAINE_dir = '/media/tadas/2EBEA130BEA0F20F/datasets/FERA_2015/semaine/SEMAINE-Sessions/';
else else
fprintf('SEMAINE location not found (or not defined)\n'); fprintf('SEMAINE location not found (or not defined)\n');
end end

View file

@ -1,5 +1,5 @@
AU1 class, Precision - 0.555, Recall - 0.533, F1 - 0.544 AU1 class, Precision - 0.555, Recall - 0.533, F1 - 0.544
AU2 class, Precision - 0.403, Recall - 0.440, F1 - 0.420 AU2 class, Precision - 0.403, Recall - 0.439, F1 - 0.420
AU4 class, Precision - 0.491, Recall - 0.513, F1 - 0.502 AU4 class, Precision - 0.491, Recall - 0.513, F1 - 0.502
AU6 class, Precision - 0.741, Recall - 0.776, F1 - 0.758 AU6 class, Precision - 0.741, Recall - 0.776, F1 - 0.758
AU7 class, Precision - 0.764, Recall - 0.727, F1 - 0.745 AU7 class, Precision - 0.764, Recall - 0.727, F1 - 0.745

View file

@ -1,17 +1,17 @@
AU1 class, Precision - 0.393, Recall - 0.727, F1 - 0.510 AU1 class, Precision - 0.434, Recall - 0.673, F1 - 0.528
AU2 class, Precision - 0.266, Recall - 0.850, F1 - 0.405 AU2 class, Precision - 0.298, Recall - 0.818, F1 - 0.437
AU4 class, Precision - 0.511, Recall - 0.874, F1 - 0.645 AU4 class, Precision - 0.564, Recall - 0.861, F1 - 0.681
AU5 class, Precision - 0.294, Recall - 0.968, F1 - 0.451 AU5 class, Precision - 0.387, Recall - 0.915, F1 - 0.544
AU6 class, Precision - 0.346, Recall - 0.833, F1 - 0.489 AU6 class, Precision - 0.355, Recall - 0.811, F1 - 0.494
AU7 class, Precision - 0.793, Recall - 0.750, F1 - 0.771 AU7 class, Precision - 0.778, Recall - 0.783, F1 - 0.780
AU9 class, Precision - 0.316, Recall - 0.960, F1 - 0.475 AU9 class, Precision - 0.370, Recall - 0.953, F1 - 0.533
AU10 class, Precision - 0.349, Recall - 0.773, F1 - 0.481 AU10 class, Precision - 0.340, Recall - 0.788, F1 - 0.475
AU12 class, Precision - 0.674, Recall - 0.864, F1 - 0.757 AU12 class, Precision - 0.690, Recall - 0.842, F1 - 0.758
AU14 class, Precision - 0.183, Recall - 0.863, F1 - 0.302 AU14 class, Precision - 0.185, Recall - 0.881, F1 - 0.305
AU15 class, Precision - 0.183, Recall - 0.851, F1 - 0.302 AU15 class, Precision - 0.171, Recall - 0.851, F1 - 0.285
AU17 class, Precision - 0.293, Recall - 0.889, F1 - 0.441 AU17 class, Precision - 0.309, Recall - 0.861, F1 - 0.455
AU20 class, Precision - 0.114, Recall - 0.930, F1 - 0.203 AU20 class, Precision - 0.130, Recall - 0.921, F1 - 0.228
AU23 class, Precision - 0.107, Recall - 0.889, F1 - 0.191 AU23 class, Precision - 0.104, Recall - 0.837, F1 - 0.186
AU25 class, Precision - 0.860, Recall - 0.873, F1 - 0.866 AU25 class, Precision - 0.869, Recall - 0.860, F1 - 0.865
AU26 class, Precision - 0.359, Recall - 0.811, F1 - 0.498 AU26 class, Precision - 0.368, Recall - 0.809, F1 - 0.506
AU45 class, Precision - 0.318, Recall - 0.771, F1 - 0.450 AU45 class, Precision - 0.367, Recall - 0.754, F1 - 0.494

View file

@ -1,17 +1,17 @@
AU1 intensity, Corr - 0.717, RMS - 0.892, CCC - 0.668 AU1 intensity, Corr - 0.712, RMS - 0.925, CCC - 0.652
AU2 intensity, Corr - 0.696, RMS - 0.774, CCC - 0.625 AU2 intensity, Corr - 0.696, RMS - 0.772, CCC - 0.626
AU4 intensity, Corr - 0.802, RMS - 0.603, CCC - 0.776 AU4 intensity, Corr - 0.797, RMS - 0.623, CCC - 0.764
AU5 intensity, Corr - 0.747, RMS - 0.832, CCC - 0.640 AU5 intensity, Corr - 0.767, RMS - 0.740, CCC - 0.694
AU6 intensity, Corr - 0.556, RMS - 0.735, CCC - 0.533 AU6 intensity, Corr - 0.541, RMS - 0.786, CCC - 0.506
AU7 intensity, Corr - 0.831, RMS - 0.757, CCC - 0.804 AU7 intensity, Corr - 0.830, RMS - 0.750, CCC - 0.811
AU9 intensity, Corr - 0.779, RMS - 0.551, CCC - 0.738 AU9 intensity, Corr - 0.763, RMS - 0.611, CCC - 0.701
AU10 intensity, Corr - 0.495, RMS - 0.719, CCC - 0.475 AU10 intensity, Corr - 0.491, RMS - 0.759, CCC - 0.461
AU12 intensity, Corr - 0.810, RMS - 0.714, CCC - 0.753 AU12 intensity, Corr - 0.804, RMS - 0.715, CCC - 0.766
AU14 intensity, Corr - 0.348, RMS - 0.896, CCC - 0.280 AU14 intensity, Corr - 0.357, RMS - 0.931, CCC - 0.277
AU15 intensity, Corr - 0.527, RMS - 0.538, CCC - 0.448 AU15 intensity, Corr - 0.516, RMS - 0.565, CCC - 0.431
AU17 intensity, Corr - 0.561, RMS - 0.882, CCC - 0.484 AU17 intensity, Corr - 0.554, RMS - 0.893, CCC - 0.477
AU20 intensity, Corr - 0.413, RMS - 0.880, CCC - 0.285 AU20 intensity, Corr - 0.411, RMS - 0.901, CCC - 0.277
AU23 intensity, Corr - 0.354, RMS - 0.753, CCC - 0.268 AU23 intensity, Corr - 0.351, RMS - 0.736, CCC - 0.274
AU25 intensity, Corr - 0.847, RMS - 0.818, CCC - 0.811 AU25 intensity, Corr - 0.846, RMS - 0.809, CCC - 0.822
AU26 intensity, Corr - 0.514, RMS - 0.955, CCC - 0.465 AU26 intensity, Corr - 0.516, RMS - 0.995, CCC - 0.453
AU45 intensity, Corr - 0.868, RMS - 0.550, CCC - 0.848 AU45 intensity, Corr - 0.840, RMS - 0.662, CCC - 0.791

View file

@ -9,4 +9,4 @@ AU15 results - corr 0.745, rms 0.269, ccc - 0.712
AU17 results - corr 0.642, rms 0.517, ccc - 0.574 AU17 results - corr 0.642, rms 0.517, ccc - 0.574
AU20 results - corr 0.619, rms 0.311, ccc - 0.581 AU20 results - corr 0.619, rms 0.311, ccc - 0.581
AU25 results - corr 0.926, rms 0.500, ccc - 0.920 AU25 results - corr 0.926, rms 0.500, ccc - 0.920
AU26 results - corr 0.803, rms 0.449, ccc - 0.762 AU26 results - corr 0.802, rms 0.449, ccc - 0.762

View file

@ -4,12 +4,12 @@ find_BP4D;
BP4D_dir = [BP4D_dir, '../BP4D-training/']; BP4D_dir = [BP4D_dir, '../BP4D-training/'];
out_loc = './out_bp4d/'; out_loc = './out_bp4d/';
if(~exist(out_loc, 'dir'))
mkdir(out_loc);
end
%% %%
executable = '"../../x64/Release/FeatureExtraction.exe"'; if(isunix)
executable = '"../../build/bin/FeatureExtraction"';
else
executable = '"../../x64/Release/FeatureExtraction.exe"';
end
bp4d_dirs = {'F002', 'F004', 'F006', 'F008', 'F010', 'F012', 'F014', 'F016', 'F018', 'F020', 'F022', 'M002', 'M004', 'M006', 'M008', 'M010', 'M012', 'M014', 'M016', 'M018'}; bp4d_dirs = {'F002', 'F004', 'F006', 'F008', 'F010', 'F012', 'F014', 'F016', 'F018', 'F020', 'F022', 'M002', 'M004', 'M006', 'M008', 'M010', 'M012', 'M014', 'M016', 'M018'};
@ -23,7 +23,7 @@ new_bp4d_dirs = {};
for i = 1:numel(bp4d_dirs) for i = 1:numel(bp4d_dirs)
dirs = dir([BP4D_dir, '/', bp4d_dirs{i}, '/T*']); dirs = dir([BP4D_dir, '/', bp4d_dirs{i}, '/T*']);
tmp_dir = [BP4D_dir, '/../tmp/', bp4d_dirs{i}, '/']; tmp_dir = [BP4D_dir, '/../tmp/', bp4d_dirs{i}];
new_bp4d_dirs = cat(1, new_bp4d_dirs, tmp_dir); new_bp4d_dirs = cat(1, new_bp4d_dirs, tmp_dir);
if(~exist(tmp_dir, 'file')) if(~exist(tmp_dir, 'file'))
@ -51,18 +51,15 @@ for i = 1:numel(bp4d_dirs)
end end
%% %%
parfor f1=1:numel(new_bp4d_dirs) for f1=1:numel(new_bp4d_dirs)
command = [executable ' -asvid -no2Dfp -no3Dfp -noMparams -noPose -noGaze ']; command = sprintf('%s -aus -fdir "%s" -out_dir "%s"', executable, new_bp4d_dirs{f1}, out_loc);
[f,~,~] = fileparts(new_bp4d_dirs{f1});
[~,f,~] = fileparts(f);
output_file = [out_loc f '.au.txt'];
command = cat(2, command, [' -fdir "' new_bp4d_dirs{f1} '" -of "' output_file '"']);
if(isunix)
unix(command, '-echo')
else
dos(command); dos(command);
end
end end
%% %%
@ -76,7 +73,7 @@ aus_BP4D = [1, 2, 4, 6, 7, 10, 12, 14, 15, 17, 23];
labels_gt = cat(1, labels_gt{:}); labels_gt = cat(1, labels_gt{:});
%% Identifying which column IDs correspond to which AU %% Identifying which column IDs correspond to which AU
tab = readtable([out_loc, bp4d_dirs{1}, '.au.txt']); tab = readtable([out_loc, bp4d_dirs{1}, '.csv']);
column_names = tab.Properties.VariableNames; column_names = tab.Properties.VariableNames;
% As there are both classes and intensities list and evaluate both of them % As there are both classes and intensities list and evaluate both of them
@ -110,10 +107,9 @@ preds_all_class = [];
for i=1:numel(new_bp4d_dirs) for i=1:numel(new_bp4d_dirs)
[f,~,~] = fileparts(new_bp4d_dirs{i}); [~,f,~] = fileparts(new_bp4d_dirs{i});
[~,f,~] = fileparts(f);
fname = [out_loc, f, '.au.txt']; fname = [out_loc, f, '.csv'];
preds = dlmread(fname, ',', 1, 0); preds = dlmread(fname, ',', 1, 0);
@ -157,7 +153,7 @@ valid_ids = cat(1, valid_ids{:});
labels_gt = cat(1, labels_gt{:}); labels_gt = cat(1, labels_gt{:});
%% Identifying which column IDs correspond to which AU %% Identifying which column IDs correspond to which AU
tab = readtable([out_loc, bp4d_dirs{1}, '.au.txt']); tab = readtable([out_loc, bp4d_dirs{1}, '.csv']);
column_names = tab.Properties.VariableNames; column_names = tab.Properties.VariableNames;
% As there are both classes and intensities list and evaluate both of them % As there are both classes and intensities list and evaluate both of them
@ -184,10 +180,9 @@ preds_all_int = [];
for i=1:numel(new_bp4d_dirs) for i=1:numel(new_bp4d_dirs)
[f,~,~] = fileparts(new_bp4d_dirs{i}); [~,f,~] = fileparts(new_bp4d_dirs{i});
[~,f,~] = fileparts(f);
fname = [out_loc, f, '.au.txt']; fname = [out_loc, f, '.csv'];
preds = dlmread(fname, ',', 1, 0); preds = dlmread(fname, ',', 1, 0);
% Read all of the intensity AUs % Read all of the intensity AUs

View file

@ -7,12 +7,12 @@ addpath('./helpers');
find_Bosphorus; find_Bosphorus;
out_loc = './out_bosph/'; out_loc = './out_bosph/';
if(~exist(out_loc, 'dir'))
mkdir(out_loc);
end
%% %%
executable = '"../../x64/Release/FaceLandmarkImg.exe"'; if(isunix)
executable = '"../../build/bin/FaceLandmarkImg"';
else
executable = '"../../x64/Release/FaceLandmarkImg.exe"';
end
bosph_dirs = dir([Bosphorus_dir, '/BosphorusDB/BosphorusDB/bs*']); bosph_dirs = dir([Bosphorus_dir, '/BosphorusDB/BosphorusDB/bs*']);
@ -22,11 +22,14 @@ parfor f1=1:numel(bosph_dirs)
command = executable; command = executable;
input_dir = [Bosphorus_dir, '/BosphorusDB/BosphorusDB/', bosph_dirs(f1).name]; input_dir = [Bosphorus_dir, '/BosphorusDB/BosphorusDB/', bosph_dirs(f1).name];
command = cat(2, command, [' -fdir "' input_dir '" -ofdir "' out_loc '"']); command = cat(2, command, [' -fdir "' input_dir '" -out_dir "' out_loc '"']);
command = cat(2, command, ' -multi_view 1 -wild -q'); command = cat(2, command, ' -multi_view 1 -wild -aus ');
if(isunix)
unix(command, '-echo')
else
dos(command); dos(command);
end
end end
%% %%
@ -37,64 +40,32 @@ aus_Bosph = [1, 2, 4, 5, 6, 7, 9, 10, 12, 14, 15, 17, 20, 23, 25, 26, 45];
%% Read the predicted values %% Read the predicted values
% First read the first file to get the ids and line numbers % First read the first file to get the column ids
% au occurences tab = readtable([out_loc, filenames{1}, '.csv']);
fid = fopen([out_loc, filenames{1}, '_det_0.pts']); column_names = tab.Properties.VariableNames;
data = fgetl(fid); aus_det_id = cellfun(@(x) ~isempty(x) && x==5, strfind(column_names, '_c'));
aus_det_cell = column_names(aus_det_id);
ind = 0; aus_det = zeros(size(aus_det_cell));
beg_ind = -1; for i=1:numel(aus_det)
end_ind = -1; aus_det(i) = str2num(aus_det_cell{i}(3:4));
aus_det = [];
aus_det_id = [];
%%
while ischar(data)
if(~isempty(findstr(data, 'au occurences:')))
num_occurences = str2num(data(numel('au occurences:')+1:end));
% Skip ahead two lines
data = fgetl(fid);
data = fgetl(fid);
ind = ind + 2;
beg_ind = ind;
end
if(beg_ind ~= -1 && end_ind == -1)
if(~isempty(findstr(data, '}')))
end_ind = ind;
else
d = strsplit(data, ' ');
aus_det = cat(1, aus_det, str2num(d{1}(3:end)));
aus_det_id = cat(1, aus_det_id, ind - beg_ind + 1);
end
end
data = fgetl(fid);
ind = ind + 1;
end end
fclose(fid);
%% %%
labels_pred = zeros(size(labels_gt)); labels_pred = zeros(size(labels_gt));
for i=1:numel(filenames) for i=1:numel(filenames)
% Will need to read the relevant AUs only % Will need to read the relevant AUs only
if(exist([out_loc, filenames{i}, '_det_0.pts'], 'file')) all_params = dlmread([out_loc, filenames{i}, '.csv'], ',', 1, 0);
fid = fopen([out_loc, filenames{i}, '_det_0.pts']);
for k=1:beg_ind
data = fgetl(fid);
end
for k=1:num_occurences % if multiple faces detected just take the first row
data = fgetl(fid); aus_pred = all_params(1, aus_det_id);
for k=1:numel(aus_det)
if(sum(aus_Bosph == aus_det(k))>0) if(sum(aus_Bosph == aus_det(k))>0)
d = strsplit(data, ' '); labels_pred(i, aus_Bosph == aus_det(k)) = aus_pred(k);
labels_pred(i, aus_Bosph == aus_det(k)) = str2num(d{2});
end end
end end
fclose(fid);
end
end end
%% %%
@ -122,63 +93,32 @@ fclose(f);
%% Read the predicted values for intensities %% Read the predicted values for intensities
% First read the first file to get the ids and line numbers % First read the first file to get the column ids
% au occurences tab = readtable([out_loc, filenames{1}, '.csv']);
fid = fopen([out_loc, filenames{1}, '_det_0.pts']); column_names = tab.Properties.VariableNames;
data = fgetl(fid); aus_det_id = cellfun(@(x) ~isempty(x) && x==5, strfind(column_names, '_r'));
aus_det_cell = column_names(aus_det_id);
ind = 0; aus_det = zeros(size(aus_det_cell));
beg_ind = -1; for i=1:numel(aus_det)
end_ind = -1; aus_det(i) = str2num(aus_det_cell{i}(3:4));
aus_det = [];
aus_det_id = [];
while ischar(data)
if(~isempty(findstr(data, 'au intensities:')))
num_occurences = str2num(data(numel('au intensities:')+1:end));
% Skip ahead two lines
data = fgetl(fid);
data = fgetl(fid);
ind = ind + 2;
beg_ind = ind;
end
if(beg_ind ~= -1 && end_ind == -1)
if(~isempty(findstr(data, '}')))
end_ind = ind;
else
d = strsplit(data, ' ');
aus_det = cat(1, aus_det, str2num(d{1}(3:end)));
aus_det_id = cat(1, aus_det_id, ind - beg_ind + 1);
end
end
data = fgetl(fid);
ind = ind + 1;
end end
fclose(fid);
%% %%
labels_pred = zeros(size(labels_gt)); labels_pred = zeros(size(labels_gt));
for i=1:numel(filenames) for i=1:numel(filenames)
% Will need to read the relevant AUs only % Will need to read the relevant AUs only
if(exist([out_loc, filenames{i}, '_det_0.pts'], 'file')) all_params = dlmread([out_loc, filenames{i}, '.csv'], ',', 1, 0);
fid = fopen([out_loc, filenames{i}, '_det_0.pts']);
for k=1:beg_ind
data = fgetl(fid);
end
for k=1:num_occurences % if multiple faces detected just take the first row
data = fgetl(fid); aus_pred = all_params(1, aus_det_id);
for k=1:numel(aus_det)
if(sum(aus_Bosph == aus_det(k))>0) if(sum(aus_Bosph == aus_det(k))>0)
d = strsplit(data, ' '); labels_pred(i, aus_Bosph == aus_det(k)) = aus_pred(k);
labels_pred(i, aus_Bosph == aus_det(k)) = str2num(d{2});
end end
end end
fclose(fid);
end
end end
%% %%

View file

@ -10,17 +10,18 @@ if(exist('D:/Datasets/DISFA/Videos_LeftCamera/', 'file'))
DISFA_dir = 'D:/Datasets/DISFA/Videos_LeftCamera/'; DISFA_dir = 'D:/Datasets/DISFA/Videos_LeftCamera/';
elseif(exist('E:/Datasets/DISFA/Videos_LeftCamera/', 'file')) elseif(exist('E:/Datasets/DISFA/Videos_LeftCamera/', 'file'))
DISFA_dir = 'E:/Datasets/DISFA/Videos_LeftCamera/'; DISFA_dir = 'E:/Datasets/DISFA/Videos_LeftCamera/';
else elseif(exist('/multicomp/datasets/face_datasets/DISFA/Videos_LeftCamera/', 'file'))
DISFA_dir = '/multicomp/datasets/face_datasets/DISFA/Videos_LeftCamera/'; DISFA_dir = '/multicomp/datasets/face_datasets/DISFA/Videos_LeftCamera/';
elseif(exist('/media/tadas/2EBEA130BEA0F20F/datasets/DISFA/', 'file'))
DISFA_dir = '/media/tadas/2EBEA130BEA0F20F/datasets/DISFA/Videos_LeftCamera/';
else
fprintf('Cannot find DIFA location\n');
end end
videos = dir([DISFA_dir, '*.avi']); videos = dir([DISFA_dir, '*.avi']);
output = 'out_DISFA/'; output = 'out_DISFA/';
if(~exist(output, 'file'))
mkdir(output);
end
%% %%
% Do it in parrallel for speed (replace the parfor with for if no parallel % Do it in parrallel for speed (replace the parfor with for if no parallel
@ -29,11 +30,7 @@ parfor v = 1:numel(videos)
vid_file = [DISFA_dir, videos(v).name]; vid_file = [DISFA_dir, videos(v).name];
[~, name, ~] = fileparts(vid_file); command = sprintf('%s -f "%s" -out_dir "%s" -aus ', executable, vid_file, output);
% where to output tracking results
output_file = [output name '_au.txt'];
command = [executable ' -f "' vid_file '" -of "' output_file '" -q -no2Dfp -no3Dfp -noMparams -noPose -noGaze'];
if(isunix) if(isunix)
unix(command, '-echo'); unix(command, '-echo');
@ -70,30 +67,32 @@ for i=1:numel(label_folders)
label_ids = cat(1, label_ids, repmat(user_id, size(labels,1),1)); label_ids = cat(1, label_ids, repmat(user_id, size(labels,1),1));
end end
preds_files = dir([prediction_dir, '*SN*.txt']); preds_files = dir([prediction_dir, '*SN*.csv']);
tab = readtable([prediction_dir, preds_files(1).name]); tab = readtable([prediction_dir, preds_files(1).name]);
column_names = tab.Properties.VariableNames; column_names = tab.Properties.VariableNames;
aus_pred_int = []; aus_pred_int = [];
for c=3:numel(column_names) au_inds_in_file = [];
for c=1:numel(column_names)
if(strfind(column_names{c}, '_r') > 0) if(strfind(column_names{c}, '_r') > 0)
aus_pred_int = cat(1, aus_pred_int, int32(str2num(column_names{c}(3:end-2)))); aus_pred_int = cat(1, aus_pred_int, int32(str2num(column_names{c}(3:end-2))));
au_inds_in_file = cat(1, au_inds_in_file, c);
end end
end end
inds_au = zeros(numel(AUs_disfa),1); inds_au = zeros(numel(AUs_disfa),1);
for ind=1:numel(AUs_disfa) for ind=1:numel(AUs_disfa)
inds_au(ind) = find(aus_pred_int==AUs_disfa(ind)); inds_au(ind) = au_inds_in_file(aus_pred_int==AUs_disfa(ind));
end end
preds_all = zeros(size(labels_all,1), numel(AUs_disfa)); preds_all = zeros(size(labels_all,1), numel(AUs_disfa));
for i=1:numel(preds_files) for i=1:numel(preds_files)
preds = dlmread([prediction_dir, preds_files(i).name], ',', 1, 0); preds = dlmread([prediction_dir, preds_files(i).name], ',', 1, 0);
preds = preds(:,5:5+numel(aus_pred_int)-1); %preds = preds(:,5:5+numel(aus_pred_int)-1);
user_id = str2num(preds_files(i).name(end - 14:end-12)); user_id = str2num(preds_files(i).name(end - 11:end-9));
rel_ids = label_ids == user_id; rel_ids = label_ids == user_id;
preds_all(rel_ids,:) = preds(:,inds_au); preds_all(rel_ids,:) = preds(:,inds_au);
end end

View file

@ -5,10 +5,6 @@ find_FERA2011;
out_loc = './out_fera/'; out_loc = './out_fera/';
if(~exist(out_loc, 'dir'))
mkdir(out_loc);
end
%% %%
if(isunix) if(isunix)
executable = '"../../build/bin/FeatureExtraction"'; executable = '"../../build/bin/FeatureExtraction"';
@ -18,20 +14,17 @@ end
fera_dirs = dir([FERA2011_dir, 'train*']); fera_dirs = dir([FERA2011_dir, 'train*']);
parfor f1=1:numel(fera_dirs) for f1=1:numel(fera_dirs)
vid_files = dir([FERA2011_dir, fera_dirs(f1).name, '/*.avi']); vid_files = dir([FERA2011_dir, fera_dirs(f1).name, '/*.avi']);
for v=1:numel(vid_files) for v=1:numel(vid_files)
command = [executable ' -asvid -q -no2Dfp -no3Dfp -noMparams -noPose -noGaze -au_static ']; command = [executable ' -aus -au_static '];
curr_vid = [FERA2011_dir, fera_dirs(f1).name, '/', vid_files(v).name]; curr_vid = [FERA2011_dir, fera_dirs(f1).name, '/', vid_files(v).name];
[~,name,~] = fileparts(curr_vid); command = cat(2, command, [' -f "' curr_vid '" -out_dir "' out_loc '"']);
output_file = [out_loc name '.au.txt'];
command = cat(2, command, [' -f "' curr_vid '" -of "' output_file '"']);
if(isunix) if(isunix)
unix(command, '-echo'); unix(command, '-echo');
@ -50,7 +43,7 @@ for i=1:numel(filenames)
end end
%% Identifying which column IDs correspond to which AU %% Identifying which column IDs correspond to which AU
tab = readtable([out_loc, 'train_001.au.txt']); tab = readtable([out_loc, 'train_001.csv']);
column_names = tab.Properties.VariableNames; column_names = tab.Properties.VariableNames;
% As there are both classes and intensities list and evaluate both of them % As there are both classes and intensities list and evaluate both of them
@ -79,7 +72,7 @@ preds_all_class = [];
for i=1:numel(filenames) for i=1:numel(filenames)
fname = dir([out_loc, '/*', filenames{i}, '.au.txt']); fname = dir([out_loc, '/*', filenames{i}, '.csv']);
fname = fname(1).name; fname = fname(1).name;
preds = dlmread([out_loc '/' fname], ',', 1, 0); preds = dlmread([out_loc '/' fname], ',', 1, 0);

View file

@ -5,10 +5,6 @@ find_SEMAINE;
out_loc = './out_SEMAINE/'; out_loc = './out_SEMAINE/';
if(~exist(out_loc, 'dir'))
mkdir(out_loc);
end
if(isunix) if(isunix)
executable = '"../../build/bin/FeatureExtraction"'; executable = '"../../build/bin/FeatureExtraction"';
else else
@ -24,14 +20,9 @@ parfor f1=1:numel(devel_recs)
f1_dir = devel_recs{f1}; f1_dir = devel_recs{f1};
command = [executable, ' -fx 800 -fy 800 -q -no2Dfp -no3Dfp -noMparams -noPose -noGaze '];
curr_vid = [SEMAINE_dir, f1_dir, '/', vid_file.name]; curr_vid = [SEMAINE_dir, f1_dir, '/', vid_file.name];
name = f1_dir; command = sprintf('%s -aus -f "%s" -out_dir "%s" ', executable, curr_vid, out_loc);
output_aus = [out_loc name '.au.txt'];
command = cat(2, command, [' -f "' curr_vid '" -of "' output_aus, '"']);
if(isunix) if(isunix)
unix(command, '-echo'); unix(command, '-echo');
@ -43,26 +34,19 @@ parfor f1=1:numel(devel_recs)
end end
%% Actual model evaluation %% Actual model evaluation
[ labels, valid_ids, vid_ids ] = extract_SEMAINE_labels(SEMAINE_dir, devel_recs, aus_SEMAINE); [ labels, valid_ids, vid_ids, vid_names ] = extract_SEMAINE_labels(SEMAINE_dir, devel_recs, aus_SEMAINE);
labels_gt = cat(1, labels{:}); labels_gt = cat(1, labels{:});
%% Identifying which column IDs correspond to which AU %% Identifying which column IDs correspond to which AU
tab = readtable([out_loc, devel_recs{1}, '.au.txt']); tab = readtable([out_loc, vid_names{1}, '.csv']);
column_names = tab.Properties.VariableNames; column_names = tab.Properties.VariableNames;
% As there are both classes and intensities list and evaluate both of them % As there are both classes and intensities list and evaluate both of them
aus_pred_int = [];
aus_pred_class = []; aus_pred_class = [];
inds_int_in_file = [];
inds_class_in_file = []; inds_class_in_file = [];
for c=1:numel(column_names) for c=1:numel(column_names)
if(strfind(column_names{c}, '_r') > 0)
aus_pred_int = cat(1, aus_pred_int, int32(str2num(column_names{c}(3:end-2))));
inds_int_in_file = cat(1, inds_int_in_file, c);
end
if(strfind(column_names{c}, '_c') > 0) if(strfind(column_names{c}, '_c') > 0)
aus_pred_class = cat(1, aus_pred_class, int32(str2num(column_names{c}(3:end-2)))); aus_pred_class = cat(1, aus_pred_class, int32(str2num(column_names{c}(3:end-2))));
inds_class_in_file = cat(1, inds_class_in_file, c); inds_class_in_file = cat(1, inds_class_in_file, c);
@ -70,37 +54,20 @@ for c=1:numel(column_names)
end end
%% %%
inds_au_int = zeros(size(aus_SEMAINE));
inds_au_class = zeros(size(aus_SEMAINE)); inds_au_class = zeros(size(aus_SEMAINE));
for ind=1:numel(aus_SEMAINE)
if(~isempty(find(aus_pred_int==aus_SEMAINE(ind), 1)))
inds_au_int(ind) = find(aus_pred_int==aus_SEMAINE(ind));
end
end
for ind=1:numel(aus_SEMAINE) for ind=1:numel(aus_SEMAINE)
if(~isempty(find(aus_pred_class==aus_SEMAINE(ind), 1))) if(~isempty(find(aus_pred_class==aus_SEMAINE(ind), 1)))
inds_au_class(ind) = find(aus_pred_class==aus_SEMAINE(ind)); inds_au_class(ind) = inds_class_in_file(aus_pred_class==aus_SEMAINE(ind));
end end
end end
preds_all_class = []; preds_all = [];
preds_all_int = []; for i=1:numel(vid_names)
for i=1:numel(devel_recs) fname = [out_loc, vid_names{i}, '.csv'];
fname = [out_loc, devel_recs{i}, '.au.txt'];
preds = dlmread(fname, ',', 1, 0); preds = dlmread(fname, ',', 1, 0);
preds_all = cat(1, preds_all, preds(vid_ids(i,1):vid_ids(i,2) - 1, :));
% Read all of the intensity AUs
preds_int = preds(vid_ids(i,1):vid_ids(i,2) - 1, inds_int_in_file);
% Read all of the classification AUs
preds_class = preds(vid_ids(i,1):vid_ids(i,2) - 1, inds_class_in_file);
preds_all_class = cat(1, preds_all_class, preds_class);
preds_all_int = cat(1, preds_all_int, preds_int);
end end
%% %%
@ -109,10 +76,10 @@ f1s = zeros(1, numel(aus_SEMAINE));
for au = 1:numel(aus_SEMAINE) for au = 1:numel(aus_SEMAINE)
if(inds_au_class(au) ~= 0) if(inds_au_class(au) ~= 0)
tp = sum(labels_gt(:,au) == 1 & preds_all_class(:, inds_au_class(au)) == 1); tp = sum(labels_gt(:,au) == 1 & preds_all(:, inds_au_class(au)) == 1);
fp = sum(labels_gt(:,au) == 0 & preds_all_class(:, inds_au_class(au)) == 1); fp = sum(labels_gt(:,au) == 0 & preds_all(:, inds_au_class(au)) == 1);
fn = sum(labels_gt(:,au) == 1 & preds_all_class(:, inds_au_class(au)) == 0); fn = sum(labels_gt(:,au) == 1 & preds_all(:, inds_au_class(au)) == 0);
tn = sum(labels_gt(:,au) == 0 & preds_all_class(:, inds_au_class(au)) == 0); tn = sum(labels_gt(:,au) == 0 & preds_all(:, inds_au_class(au)) == 0);
precision = tp./(tp+fp); precision = tp./(tp+fp);
recall = tp./(tp+fn); recall = tp./(tp+fn);

View file

@ -1,43 +1,24 @@
% A demo script that demonstrates how to process a single video file using
% OpenFace and extract and visualize all of the features
clear clear
% The location executable will depend on the OS
if(isunix) if(isunix)
executable = '"../../build/bin/FeatureExtraction"'; executable = '"../../build/bin/FeatureExtraction"';
else else
executable = '"../../x64/Release/FeatureExtraction.exe"'; executable = '"../../x64/Release/FeatureExtraction.exe"';
end end
output = './output_features_seq/'; % Input file
in_dir = '../../samples/image_sequence';
if(~exist(output, 'file')) % Where to store the output
mkdir(output) output_dir = './processed_features/';
end
in_dirs = {'../../image_sequence'}; % This will take directory after -fdir and output all the features to directory
% some parameters % after -out_dir
verbose = true; command = sprintf('%s -fdir "%s" -out_dir "%s" -verbose', executable, in_dir, output_dir);
command = executable;
% Remove for a speedup
command = cat(2, command, ' -verbose ');
% add all videos to single argument list (so as not to load the model anew
% for every video)
for i=1:numel(in_dirs)
[~, name, ~] = fileparts(in_dirs{i});
% where to output tracking results
outputFile = [output name '.txt'];
outputDir_aligned = [output name];
outputHOG_aligned = [output name '.hog'];
command = cat(2, command, ['-asvid -fdir "' in_dirs{i} '" -of "' outputFile '" ']);
command = cat(2, command, [' -simalign "' outputDir_aligned '" -simsize 200 -hogalign "' outputHOG_aligned '"']);
end
if(isunix) if(isunix)
unix(command); unix(command);
@ -47,15 +28,30 @@ end
%% Demonstrating reading the output files %% Demonstrating reading the output files
% First read in the column names % Most of the features will be in the csv file in the output directory with
tab = readtable(outputFile); % the same name as the input file
[~,name,~] = fileparts(in_dir);
output_csv = sprintf('%s/%s.csv', output_dir, name);
% First read in the column names, to know which columns to read for
% particular features
tab = readtable(output_csv);
column_names = tab.Properties.VariableNames; column_names = tab.Properties.VariableNames;
all_params = dlmread(outputFile, ',', 1, 0); % Read all of the data
all_params = dlmread(output_csv, ',', 1, 0);
% This indicates which frames were succesfully tracked % This indicates which frames were succesfully tracked
valid_frames = logical(all_params(:,4));
time = all_params(valid_frames, 2); % Find which column contains success of tracking data and timestamp data
valid_ind = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'success'));
frame_ind = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'frame'));
% Extract tracking success data and only read those frame
valid_frames = logical(all_params(:,valid_ind));
% Get the timestamp data
frame_nums = all_params(valid_frames, frame_ind);
%% Finding which header line starts with p_ (basically model params) %% Finding which header line starts with p_ (basically model params)
shape_inds = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'p_')); shape_inds = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'p_'));
@ -64,9 +60,9 @@ shape_inds = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'p_'));
shape_params = all_params(valid_frames, shape_inds); shape_params = all_params(valid_frames, shape_inds);
figure figure
plot(time, shape_params); plot(frame_nums, shape_params);
title('Shape parameters'); title('Shape parameters');
xlabel('Time (s)'); xlabel('Frame');
%% Demonstrate 2D landmarks %% Demonstrate 2D landmarks
landmark_inds_x = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'x_')); landmark_inds_x = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'x_'));
@ -75,10 +71,20 @@ landmark_inds_y = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'y_'))
xs = all_params(valid_frames, landmark_inds_x); xs = all_params(valid_frames, landmark_inds_x);
ys = all_params(valid_frames, landmark_inds_y); ys = all_params(valid_frames, landmark_inds_y);
eye_landmark_inds_x = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'eye_lmk_x_'));
eye_landmark_inds_y = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'eye_lmk_y_'));
eye_xs = all_params(valid_frames, eye_landmark_inds_x);
eye_ys = all_params(valid_frames, eye_landmark_inds_y);
figure figure
for j = 1:size(xs,1) for j = 1:size(xs,1)
plot(xs(j,:), -ys(j,:), '.'); plot(xs(j,:), -ys(j,:), '.');
hold on;
plot(eye_xs(j,:), -eye_ys(j,:), '.r');
hold off;
xlim([min(xs(1,:)) * 0.5, max(xs(2,:))*1.4]); xlim([min(xs(1,:)) * 0.5, max(xs(2,:))*1.4]);
ylim([min(-ys(1,:)) * 1.4, max(-ys(2,:))*0.5]); ylim([min(-ys(1,:)) * 1.4, max(-ys(2,:))*0.5]);
xlabel('x (px)'); xlabel('x (px)');
@ -96,9 +102,20 @@ xs = all_params(valid_frames, landmark_inds_x);
ys = all_params(valid_frames, landmark_inds_y); ys = all_params(valid_frames, landmark_inds_y);
zs = all_params(valid_frames, landmark_inds_z); zs = all_params(valid_frames, landmark_inds_z);
eye_landmark_inds_x = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'eye_lmk_X_'));
eye_landmark_inds_y = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'eye_lmk_Y_'));
eye_landmark_inds_z = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'eye_lmk_Z_'));
eye_xs = all_params(valid_frames, eye_landmark_inds_x);
eye_ys = all_params(valid_frames, eye_landmark_inds_y);
eye_zs = all_params(valid_frames, eye_landmark_inds_z);
figure figure
for j = 1:size(xs,1) for j = 1:size(xs,1)
plot3(xs(j,:), ys(j,:), zs(j,:), '.');axis equal; plot3(xs(j,:), ys(j,:), zs(j,:), '.');axis equal;
hold on;
plot3(eye_xs(j,:), eye_ys(j,:), eye_zs(j,:), '.r');
hold off;
xlabel('X (mm)'); xlabel('X (mm)');
ylabel('Y (mm)'); ylabel('Y (mm)');
zlabel('Z (mm)'); zlabel('Z (mm)');
@ -110,7 +127,7 @@ au_reg_inds = cellfun(@(x) ~isempty(x) && x==5, strfind(column_names, '_r'));
aus = all_params(valid_frames, au_reg_inds); aus = all_params(valid_frames, au_reg_inds);
figure figure
plot(aus); plot(frame_nums, aus);
title('Facial Action Units (intensity)'); title('Facial Action Units (intensity)');
xlabel('Time (s)'); xlabel('Time (s)');
ylabel('Intensity'); ylabel('Intensity');
@ -120,7 +137,7 @@ au_class_inds = cellfun(@(x) ~isempty(x) && x==5, strfind(column_names, '_c'));
aus = all_params(valid_frames, au_class_inds); aus = all_params(valid_frames, au_class_inds);
figure figure
plot(aus); plot(frame_nums, aus);
title('Facial Action Units (presense)'); title('Facial Action Units (presense)');
xlabel('Time (s)'); xlabel('Time (s)');
ylim([0,2]); ylim([0,2]);
@ -129,18 +146,34 @@ pose_inds = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'pose_'));
pose = all_params(valid_frames, pose_inds); pose = all_params(valid_frames, pose_inds);
figure figure
plot(pose); plot(frame_nums, pose);
title('Pose (rotation and translation)'); title('Pose (rotation and translation)');
xlabel('Time (s)'); xlabel('Frame number');
%% Demo gaze
gaze_inds = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'gaze_angle'));
% Read gaze (x,y,z) for one eye and (x,y,z) for another
gaze = all_params(valid_frames, gaze_inds);
plot(frame_nums, gaze(:,1), 'DisplayName', 'Left - right');
hold on;
plot(frame_nums, gaze(:,2), 'DisplayName', 'Up - down');
xlabel('Frame number') % x-axis label
ylabel('Angle radians') % y-axis label
legend('show');
hold off;
%% Output HOG files %% Output HOG files
[hog_data, valid_inds] = Read_HOG_file(outputHOG_aligned); output_hog_file = sprintf('%s/%s.hog', output_dir, name);
[hog_data, valid_inds] = Read_HOG_file(output_hog_file);
%% Output aligned images %% Output aligned images
img_files = dir([outputDir_aligned, '/*.png']); output_aligned_dir = sprintf('%s/%s_aligned/', output_dir, name);
img_files = dir([output_aligned_dir, '/*.bmp']);
imgs = cell(numel(img_files, 1)); imgs = cell(numel(img_files, 1));
for i=1:numel(img_files) for i=1:numel(img_files)
imgs{i} = imread([ outputDir_aligned, '/', img_files(i).name]); imgs{i} = imread([ output_aligned_dir, '/', img_files(i).name]);
imshow(imgs{i}) imshow(imgs{i})
drawnow drawnow
end end

View file

@ -1,48 +1,24 @@
% A demo script that demonstrates how to process a single video file using
% OpenFace and extract and visualize all of the features
clear clear
% The location executable will depend on the OS
if(isunix) if(isunix)
executable = '"../../build/bin/FeatureExtraction"'; executable = '"../../build/bin/FeatureExtraction"';
else else
executable = '"../../x64/Release/FeatureExtraction.exe"'; executable = '"../../x64/Release/FeatureExtraction.exe"';
end end
output = './output_features_vid/'; % Input file
in_file = '../../samples/default.wmv';
if(~exist(output, 'file')) % Where to store the output
mkdir(output) output_dir = './processed_features/';
end
in_files = dir('../../samples/default.wmv'); % This will take file after -f and output all the features to directory
% some parameters % after -out_dir
verbose = true; command = sprintf('%s -f "%s" -out_dir "%s" -verbose', executable, in_file, output_dir);
command = executable;
% Remove for a speedup
command = cat(2, command, ' -verbose ');
% add all videos to single argument list (so as not to load the model anew
% for every video)
for i=1:numel(in_files)
inputFile = ['../../samples/', in_files(i).name];
[~, name, ~] = fileparts(inputFile);
% where to output tracking results
outputFile = [output name '.csv'];
if(~exist([output name], 'file'))
mkdir([output name]);
end
outputDir_aligned = [output name];
outputHOG_aligned = [output name '.hog'];
command = cat(2, command, [' -f "' inputFile '" -of "' outputFile '"']);
command = cat(2, command, [' -au_static -simsize 224 -simalign "' outputDir_aligned '" -hogalign "' outputHOG_aligned '"' ]);
end
if(isunix) if(isunix)
unix(command); unix(command);
@ -52,15 +28,30 @@ end
%% Demonstrating reading the output files %% Demonstrating reading the output files
% First read in the column names % Most of the features will be in the csv file in the output directory with
tab = readtable(outputFile); % the same name as the input file
[~,name,~] = fileparts(in_file);
output_csv = sprintf('%s/%s.csv', output_dir, name);
% First read in the column names, to know which columns to read for
% particular features
tab = readtable(output_csv);
column_names = tab.Properties.VariableNames; column_names = tab.Properties.VariableNames;
all_params = dlmread(outputFile, ',', 1, 0); % Read all of the data
all_params = dlmread(output_csv, ',', 1, 0);
% This indicates which frames were succesfully tracked % This indicates which frames were succesfully tracked
valid_frames = logical(all_params(:,4));
time = all_params(valid_frames, 2); % Find which column contains success of tracking data and timestamp data
valid_ind = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'success'));
time_stamp_ind = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'timestamp'));
% Extract tracking success data and only read those frame
valid_frames = logical(all_params(:,valid_ind));
% Get the timestamp data
time_stamps = all_params(valid_frames, time_stamp_ind);
%% Finding which header line starts with p_ (basically model params) %% Finding which header line starts with p_ (basically model params)
shape_inds = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'p_')); shape_inds = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'p_'));
@ -69,7 +60,7 @@ shape_inds = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'p_'));
shape_params = all_params(valid_frames, shape_inds); shape_params = all_params(valid_frames, shape_inds);
figure figure
plot(time, shape_params); plot(time_stamps, shape_params);
title('Shape parameters'); title('Shape parameters');
xlabel('Time (s)'); xlabel('Time (s)');
@ -77,21 +68,21 @@ xlabel('Time (s)');
landmark_inds_x = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'x_')); landmark_inds_x = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'x_'));
landmark_inds_y = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'y_')); landmark_inds_y = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'y_'));
landmark_inds_x_eye = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'eye_lmk_x_'));
landmark_inds_y_eye = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'eye_lmk_y_'));
xs = all_params(valid_frames, landmark_inds_x); xs = all_params(valid_frames, landmark_inds_x);
ys = all_params(valid_frames, landmark_inds_y); ys = all_params(valid_frames, landmark_inds_y);
xs_eye = all_params(valid_frames, landmark_inds_x_eye); eye_landmark_inds_x = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'eye_lmk_x_'));
ys_eye = all_params(valid_frames, landmark_inds_y_eye); eye_landmark_inds_y = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'eye_lmk_y_'));
eye_xs = all_params(valid_frames, eye_landmark_inds_x);
eye_ys = all_params(valid_frames, eye_landmark_inds_y);
figure figure
for j = 1:size(xs,1) for j = 1:size(xs,1)
plot(xs(j,:), -ys(j,:), '.'); plot(xs(j,:), -ys(j,:), '.');
hold on; hold on;
plot(xs_eye(j,:), -ys_eye(j,:), '.'); plot(eye_xs(j,:), -eye_ys(j,:), '.r');
hold off; hold off;
xlim([min(xs(1,:)) * 0.5, max(xs(2,:))*1.4]); xlim([min(xs(1,:)) * 0.5, max(xs(2,:))*1.4]);
@ -111,9 +102,20 @@ xs = all_params(valid_frames, landmark_inds_x);
ys = all_params(valid_frames, landmark_inds_y); ys = all_params(valid_frames, landmark_inds_y);
zs = all_params(valid_frames, landmark_inds_z); zs = all_params(valid_frames, landmark_inds_z);
eye_landmark_inds_x = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'eye_lmk_X_'));
eye_landmark_inds_y = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'eye_lmk_Y_'));
eye_landmark_inds_z = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'eye_lmk_Z_'));
eye_xs = all_params(valid_frames, eye_landmark_inds_x);
eye_ys = all_params(valid_frames, eye_landmark_inds_y);
eye_zs = all_params(valid_frames, eye_landmark_inds_z);
figure figure
for j = 1:size(xs,1) for j = 1:size(xs,1)
plot3(xs(j,:), ys(j,:), zs(j,:), '.');axis equal; plot3(xs(j,:), ys(j,:), zs(j,:), '.');axis equal;
hold on;
plot3(eye_xs(j,:), eye_ys(j,:), eye_zs(j,:), '.r');
hold off;
xlabel('X (mm)'); xlabel('X (mm)');
ylabel('Y (mm)'); ylabel('Y (mm)');
zlabel('Z (mm)'); zlabel('Z (mm)');
@ -125,7 +127,7 @@ au_reg_inds = cellfun(@(x) ~isempty(x) && x==5, strfind(column_names, '_r'));
aus = all_params(valid_frames, au_reg_inds); aus = all_params(valid_frames, au_reg_inds);
figure figure
plot(time, aus); plot(time_stamps, aus);
title('Facial Action Units (intensity)'); title('Facial Action Units (intensity)');
xlabel('Time (s)'); xlabel('Time (s)');
ylabel('Intensity'); ylabel('Intensity');
@ -135,7 +137,7 @@ au_class_inds = cellfun(@(x) ~isempty(x) && x==5, strfind(column_names, '_c'));
aus = all_params(valid_frames, au_class_inds); aus = all_params(valid_frames, au_class_inds);
figure figure
plot(time, aus); plot(time_stamps, aus);
title('Facial Action Units (presense)'); title('Facial Action Units (presense)');
xlabel('Time (s)'); xlabel('Time (s)');
ylim([0,2]); ylim([0,2]);
@ -144,7 +146,7 @@ pose_inds = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'pose_'));
pose = all_params(valid_frames, pose_inds); pose = all_params(valid_frames, pose_inds);
figure figure
plot(pose); plot(time_stamps, pose);
title('Pose (rotation and translation)'); title('Pose (rotation and translation)');
xlabel('Time (s)'); xlabel('Time (s)');
@ -154,22 +156,24 @@ gaze_inds = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'gaze_angle'
% Read gaze (x,y,z) for one eye and (x,y,z) for another % Read gaze (x,y,z) for one eye and (x,y,z) for another
gaze = all_params(valid_frames, gaze_inds); gaze = all_params(valid_frames, gaze_inds);
plot(time, gaze(:,1), 'DisplayName', 'Left - right'); plot(time_stamps, gaze(:,1), 'DisplayName', 'Left - right');
hold on; hold on;
plot(time, gaze(:,2), 'DisplayName', 'Up - down'); plot(time_stamps, gaze(:,2), 'DisplayName', 'Up - down');
xlabel('Time(s)') % x-axis label xlabel('Time(s)') % x-axis label
ylabel('Angle radians') % y-axis label ylabel('Angle radians') % y-axis label
legend('show'); legend('show');
hold off; hold off;
%% Output HOG files %% Output HOG files
[hog_data, valid_inds] = Read_HOG_file(outputHOG_aligned); output_hog_file = sprintf('%s/%s.hog', output_dir, name);
[hog_data, valid_inds] = Read_HOG_file(output_hog_file);
%% Output aligned images %% Output aligned images
img_files = dir([outputDir_aligned, '/*.bmp']); output_aligned_dir = sprintf('%s/%s_aligned/', output_dir, name);
img_files = dir([output_aligned_dir, '/*.bmp']);
imgs = cell(numel(img_files, 1)); imgs = cell(numel(img_files, 1));
for i=1:numel(img_files) for i=1:numel(img_files)
imgs{i} = imread([ outputDir_aligned, '/', img_files(i).name]); imgs{i} = imread([ output_aligned_dir, '/', img_files(i).name]);
imshow(imgs{i}) imshow(imgs{i})
drawnow drawnow
end end

View file

@ -6,36 +6,11 @@ else
executable = '"../../x64/Release/FeatureExtraction.exe"'; executable = '"../../x64/Release/FeatureExtraction.exe"';
end end
output = './output_features_vid/'; output = './processed_features/';
if(~exist(output, 'file')) in_file = '../../samples/2015-10-15-15-14.avi';
mkdir(output)
end
in_files = dir('../../samples/2015-10-15-15-14.avi'); command = sprintf('%s -f "%s" -out_dir "%s" -gaze -verbose', executable, in_file, output);
% some parameters
verbose = true;
command = executable;
command = cat(2, command, ' -verbose -no2Dfp -no3Dfp -noMparams -noPose -noAUs ');
% add all videos to single argument list (so as not to load the model anew
% for every video)
for i=1:numel(in_files)
inputFile = ['../../samples/', in_files(i).name];
[~, name, ~] = fileparts(inputFile);
% where to output tracking results
outputFile_gaze = [output name '_gaze.txt'];
if(~exist([output name], 'file'))
mkdir([output name]);
end
command = cat(2, command, ['-fx 700 -fy 700 -f "' inputFile '" -of "' outputFile_gaze '"']);
end
if(isunix) if(isunix)
unix(command); unix(command);
@ -44,41 +19,23 @@ else
end end
%% Demonstrating reading the output files %% Demonstrating reading the output files
filename = [output name]; [~, out_filename,~] = fileparts(in_file);
out_filename = sprintf("%s/%s.csv", output, out_filename);
% Read gaze (x,y,z) for one eye and (x,y,z) for another % First read in the column names
gaze = dlmread([filename, '_gaze.txt'], ',', 1, 0); tab = readtable(out_filename);
column_names = tab.Properties.VariableNames;
% This indicates which frames were succesfully tracked all_params = dlmread(out_filename, ',', 1, 0);
valid_frames = gaze(:,4);
% only picking left, right and up down views for visualisation gaze_angle_ids = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'gaze_angle_'));
% These are gaze angles with respect to head pose gaze = all_params(:,gaze_angle_ids);
gaze_angle = gaze(:,[11,12]);
figure;
plot(smooth(gaze_angle(:,1)), 'DisplayName', 'Left - right');
hold on;
plot(smooth(gaze_angle(:,2)), 'DisplayName', 'Up - down');
xlabel('Frame') % x-axis label
ylabel('Radians') % y-axis label
legend('show');
hold off;
% These are gaze direction vectors
gaze = gaze(:,[5,6,7,8,9,10]);
gaze = (gaze(:,[1,2,3]) + gaze(:,[4,5,6]))/2;
gaze(:,1) = smooth(gaze(:,1));
gaze(:,2) = smooth(gaze(:,2));
gaze(:,3) = smooth(gaze(:,3));
figure;
plot(gaze(:,1), 'DisplayName', 'Left - right'); plot(gaze(:,1), 'DisplayName', 'Left - right');
hold on; hold on;
plot(gaze(:,2), 'DisplayName', 'Up - down'); plot(gaze(:,2), 'DisplayName', 'Up - down');
xlabel('Frame') % x-axis label xlabel('Frame') % x-axis label
ylabel('Gaze vector size') % y-axis label ylabel('Angle in radians') % y-axis label
legend('show'); legend('show');
hold off; hold off;

View file

@ -0,0 +1,47 @@
% A demo script that demonstrates how to extract aligned faces from a sequence,
% also shows how to extract different sized aligned faces
clear
% The location executable will depend on the OS
if(isunix)
executable = '"../../build/bin/FeatureExtraction"';
else
executable = '"../../x64/Release/FeatureExtraction.exe"';
end
% Input file
in_file = '../../samples/default.wmv';
% Where to store the output
output_dir = './processed_features/';
img_sizes = [64, 112, 224];
for s=1:numel(img_sizes)
% This will take file after -f and output all the features to directory
% after -out_dir, with name after -of
command = sprintf('%s -f "%s" -out_dir "%s" -simalign -simsize %d -of sample_%d', executable, in_file, output_dir, img_sizes(s), img_sizes(s) );
if(isunix)
unix(command);
else
dos(command);
end
%% Output aligned images
output_aligned_dir = sprintf('%s/sample_%d_aligned/', output_dir, img_sizes(s));
img_files = dir([output_aligned_dir, '/*.bmp']);
imgs = cell(numel(img_files), 1);
assert(numel(imgs) > 0);
for i=1:numel(img_files)
imgs{i} = imread([ output_aligned_dir, '/', img_files(i).name]);
assert(size(imgs{i},1) == img_sizes(s) && size(imgs{i},2) == img_sizes(s));
imshow(imgs{i})
drawnow
end
end

View file

@ -8,38 +8,22 @@ end
in_dir = '../../samples/'; in_dir = '../../samples/';
out_dir = './demo_img/'; out_dir = './demo_img/';
if(~exist(out_dir, 'file')) model = 'model/main_clnf_general.txt'; % Trained on in the wild and multi-pie data (a CLNF model)
mkdir(out_dir);
end
% some parameters % Uncomment the below models if you want to try them
verbose = true; %model = 'model/main_clnf_wild.txt'; % Trained on in-the-wild data only
% Trained on in the wild and multi-pie data (less accurate CLM model) %model = 'model/main_clm_general.txt'; % Trained on in the wild and multi-pie data (less accurate SVR/CLM model)
% model = 'model/main_clm_general.txt'; %model = 'model/main_clm_wild.txt'; % Trained on in-the-wild
% Trained on in-the-wild
%model = 'model/main_clm_wild.txt';
% Trained on in the wild and multi-pie data (more accurate CLNF model) % Load images (-fdir), output images and all the features (-out_dir), use a
model = 'model/main_clnf_general.txt'; % user specified model (-mloc), and visualize everything (-verbose)
% Trained on in-the-wild command = sprintf('%s -fdir "%s" -out_dir "%s" -verbose -mloc "%s" ', executable, in_dir, out_dir, model);
%model = 'model/main_clnf_wild.txt';
command = executable;
command = cat(2, command, [' -fdir "' in_dir '"']);
if(verbose)
command = cat(2, command, [' -ofdir "' out_dir '"']);
command = cat(2, command, [' -oidir "' out_dir '"']);
end
command = cat(2, command, [' -mloc "', model, '"']);
% Demonstrates the multi-hypothesis slow landmark detection (more accurate % Demonstrates the multi-hypothesis slow landmark detection (more accurate
% when dealing with non-frontal faces and less accurate face detections) % when dealing with non-frontal faces and less accurate face detections)
% Comment to skip this functionality % Comment to skip this functionality
command = cat(2, command, ' -wild '); command = cat(2, command, ' -wild -multi_view 1');
if(isunix) if(isunix)
unix(command); unix(command);

View file

@ -1,3 +1,5 @@
% A demo how to run a multi-face face tracker
clear; clear;
if(isunix) if(isunix)
@ -6,44 +8,27 @@ else
executable = '"../../x64/Release/FaceLandmarkVidMulti.exe"'; executable = '"../../x64/Release/FaceLandmarkVidMulti.exe"';
end end
output = './demo_vid/';
if(~exist(output, 'file'))
mkdir(output)
end
in_files = dir('../../samples/multi_face.avi'); in_files = dir('../../samples/multi_face.avi');
% some parameters
verbose = true;
% Trained on in the wild and multi-pie data (less accurate CLM model) model = 'model/main_clnf_general.txt'; % Trained on in the wild and multi-pie data (a CLNF model)
%model = 'model/main_clm_general.txt';
% Trained on in-the-wild
%model = 'model/main_clm_wild.txt';
% Trained on in the wild and multi-pie data (more accurate CLNF model) % Uncomment the below models if you want to try them
model = 'model/main_clnf_general.txt'; %model = 'model/main_clnf_wild.txt'; % Trained on in-the-wild data only
% Trained on in-the-wild
%model = 'model/main_clnf_wild.txt'; %model = 'model/main_clm_general.txt'; % Trained on in the wild and multi-pie data (less accurate SVR/CLM model)
%model = 'model/main_clm_wild.txt'; % Trained on in-the-wild
% Create a command that will run the tracker on set of videos and display the output
command = sprintf('%s -mloc "%s" ', executable, model);
command = executable;
command = cat(2, command, [' -mloc "', model, '"']);
% add all videos to single argument list (so as not to load the model anew % add all videos to single argument list (so as not to load the model anew
% for every video) % for every video)
for i=1:numel(in_files) for i=1:numel(in_files)
inputFile = ['../../samples/', in_files(i).name]; inputFile = ['../../samples/', in_files(i).name];
[~, name, ~] = fileparts(inputFile);
command = cat(2, command, [' -f "' inputFile '" ']); command = cat(2, command, [' -f "' inputFile '" ']);
if(verbose)
outputVideo = ['"' output name '.avi' '"'];
command = cat(2, command, [' -ov ' outputVideo]);
end
end end
% Call the executable
if(isunix) if(isunix)
unix(command); unix(command);
else else

View file

@ -1,3 +1,5 @@
% A demo how to run a single-face face tracker
clear clear
if(isunix) if(isunix)
@ -6,45 +8,28 @@ else
executable = '"../../x64/Release/FaceLandmarkVid.exe"'; executable = '"../../x64/Release/FaceLandmarkVid.exe"';
end end
output = './demo_vid/';
if(~exist(output, 'file'))
mkdir(output)
end
in_files = dir('../../samples/*.wmv'); in_files = dir('../../samples/*.wmv');
in_files = cat(1, in_files, dir('../../samples/*.avi')); in_files = cat(1, in_files, dir('../../samples/*.avi'));
% some parameters
verbose = true;
% Trained on in the wild and multi-pie data (less accurate SVR/CLM model) model = 'model/main_clnf_general.txt'; % Trained on in the wild and multi-pie data (a CLNF model)
%model = 'model/main_clm_general.txt';
% Trained on in-the-wild
%model = 'model/main_clm_wild.txt';
% Trained on in the wild and multi-pie data (more accurate CLNF model) % Uncomment the below models if you want to try them
model = 'model/main_clnf_general.txt'; %model = 'model/main_clnf_wild.txt'; % Trained on in-the-wild data only
% Trained on in-the-wild
%model = 'model/main_clnf_wild.txt';
command = executable; %model = 'model/main_clm_general.txt'; % Trained on in the wild and multi-pie data (less accurate SVR/CLM model)
command = cat(2, command, [' -mloc "', model, '"']); %model = 'model/main_clm_wild.txt'; % Trained on in-the-wild
% add all videos to single argument list (so as not to load the model anew
% for every video) % Create a command that will run the tracker on set of videos and display the output
command = sprintf('%s -mloc "%s" ', executable, model);
% add all videos to single argument list by appending -f comments
% so as not to load the model anew for every video)
for i=1:numel(in_files) for i=1:numel(in_files)
inputFile = ['../../samples/', in_files(i).name]; inputFile = ['../../samples/', in_files(i).name];
[~, name, ~] = fileparts(inputFile);
command = cat(2, command, [' -f "' inputFile '" ']); command = cat(2, command, [' -f "' inputFile '" ']);
if(verbose)
outputVideo = ['"' output name '.avi' '"'];
command = cat(2, command, [' -ov ' outputVideo]);
end
end end
% Call the executable
if(isunix) if(isunix)
unix(command); unix(command);
else else

View file

@ -0,0 +1,29 @@
% A test script on image sequences, making sure grayscale and 16 bit
% sequences work
clear
% The location executable will depend on the OS
if(isunix)
executable = '"../../build/bin/FeatureExtraction"';
else
executable = '"../../x64/Release/FeatureExtraction.exe"';
end
% Input file
in_dirs = {'../../samples/image_sequence',...
'../../samples/image_sequence_gray', ...
'../../samples/image_sequence_16bit'};
% Where to store the output
output_dir = './processed_features/';
for i=1:numel(in_dirs)
command = sprintf('%s -fdir "%s" -out_dir "%s" -verbose', executable, in_dirs{i}, output_dir);
if(isunix)
unix(command);
else
dos(command);
end
end

View file

@ -26,9 +26,9 @@ else
end end
if(isunix) if(isunix)
command = '"../../build/bin/FaceLandmarkImg"'; executable = '"../../build/bin/FaceLandmarkImg"';
else else
command = '"../../x64/Release/FaceLandmarkImg.exe"'; executable = '"../../x64/Release/FaceLandmarkImg.exe"';
end end
if(any(strcmp(varargin, 'model'))) if(any(strcmp(varargin, 'model')))
@ -44,24 +44,13 @@ else
multi_view = 0; multi_view = 0;
end end
command = cat(2, command, [' -mloc ' model ' ']); command = sprintf('%s -mloc %s -multi_view %s -2Dfp ', executable, model, num2str(multi_view));
command = cat(2, command, [' -multi_view ' num2str(multi_view) ' ']);
tic tic
parfor i=1:numel(dataset_dirs) for i=1:numel(dataset_dirs)
input_loc = ['-fdir "', dataset_dirs{i}, '" ']; command_c = sprintf('%s -fdir "%s" -bboxdir "%s" -out_dir "%s" -wild ',...
command_c = cat(2, command, input_loc); command, dataset_dirs{i}, dataset_dirs{i}, output_loc);
out_loc = ['-ofdir "', output_loc, '" '];
command_c = cat(2, command_c, out_loc);
if(verbose)
out_im_loc = ['-oidir "', output_loc, '" '];
command_c = cat(2, command_c, out_im_loc);
end
command_c = cat(2, command_c, ' -wild ');
if(isunix) if(isunix)
unix(command_c, '-echo'); unix(command_c, '-echo');
@ -80,7 +69,7 @@ dirs = {[database_root '/AFW/'];
[database_root '/helen/testset/']; [database_root '/helen/testset/'];
[database_root 'lfpw/testset/'];}; [database_root 'lfpw/testset/'];};
landmark_dets = dir([output_loc '/*.pts']); landmark_dets = dir([output_loc '/*.csv']);
landmark_det_dir = [output_loc '/']; landmark_det_dir = [output_loc '/'];
@ -92,6 +81,12 @@ shapes = zeros(68,2,num_imgs);
curr = 0; curr = 0;
% work out which columns in the csv file are relevant
tab = readtable([landmark_det_dir, landmark_dets(1).name]);
column_names = tab.Properties.VariableNames;
landmark_inds_x = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'x_'));
landmark_inds_y = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'y_'));
for i=1:numel(dirs) for i=1:numel(dirs)
@ -101,9 +96,11 @@ for i=1:numel(dirs)
curr = curr+1; curr = curr+1;
gt_landmarks = dlmread([dirs{i}, gt_labels(g).name], ' ', 'A4..B71'); gt_landmarks = dlmread([dirs{i}, gt_labels(g).name], ' ', 'A4..B71');
[~, name, ~] = fileparts(gt_labels(g).name);
% find the corresponding detection % find the corresponding detection
landmark_det = dlmread([landmark_det_dir, gt_labels(g).name], ' ', 'A4..B71'); all_params = dlmread([landmark_det_dir, name, '.csv'], ',', 1, 0);
landmark_det = [all_params(landmark_inds_x); all_params(landmark_inds_y)]';
labels(:,:,curr) = gt_landmarks; labels(:,:,curr) = gt_landmarks;
@ -126,8 +123,8 @@ if(size(shapes,2) == 66 && size(labels,2) == 68)
shapes = shapes(inds_66,:,:); shapes = shapes(inds_66,:,:);
end end
% Center the pixel % Center the pixel, and convert to OCV format
labels = labels - 0.5; labels = labels - 1.5;
err_outline = compute_error(labels, shapes); err_outline = compute_error(labels, shapes);

View file

@ -1,9 +1,9 @@
Type, mean, median Type, mean, median
err clnf: 0.054348, 0.040034 err clnf: 0.054473, 0.040001
err clnf wild: 0.053107, 0.038525 err clnf wild: 0.053173, 0.038511
err svr: 0.070552, 0.050640 err svr: 0.070548, 0.050640
err svr wild: 0.067452, 0.048706 err svr wild: 0.067535, 0.048691
err clnf no out: 0.043081, 0.029782 err clnf no out: 0.043240, 0.030040
err clnf wild no out: 0.041386, 0.027463 err clnf wild no out: 0.041460, 0.027516
err svr no out: 0.058766, 0.038836 err svr no out: 0.058769, 0.038858
err svr wild no out: 0.054020, 0.036252 err svr wild no out: 0.054168, 0.036253

View file

@ -9,40 +9,26 @@ elseif(exist('D:/Dropbox/Dropbox/AAM/test data/', 'file'))
database_root = 'D:/Dropbox/Dropbox/AAM/test data/'; database_root = 'D:/Dropbox/Dropbox/AAM/test data/';
elseif(exist('F:/Dropbox/AAM/test data/', 'file')) elseif(exist('F:/Dropbox/AAM/test data/', 'file'))
database_root = 'F:/Dropbox/AAM/test data/'; database_root = 'F:/Dropbox/AAM/test data/';
else elseif(exist('/multicomp/datasets/300-W/', 'file'))
database_root = '/multicomp/datasets/300-W/'; database_root = '/multicomp/datasets/300-W/';
elseif(exist('/media/tadas/5E08AE0D08ADE3ED/Dropbox/AAM/test data/', 'file'))
database_root = '/media/tadas/5E08AE0D08ADE3ED/Dropbox/AAM/test data/';
end end
%% Run using CLNF in the wild model %% Run using CLNF in the wild model
out_clnf = [curr_dir '/out_wild_clnf_wild/']; out_clnf = [curr_dir '/out_wild_clnf_wild/'];
if(~exist(out_clnf, 'file'))
mkdir(out_clnf);
end
[err_clnf_wild, err_no_out_clnf_wild] = Run_OF_on_images(out_clnf, database_root, 'use_afw', 'use_lfpw', 'use_ibug', 'use_helen', 'verbose', 'model', 'model/main_clnf_wild.txt', 'multi_view', 1); [err_clnf_wild, err_no_out_clnf_wild] = Run_OF_on_images(out_clnf, database_root, 'use_afw', 'use_lfpw', 'use_ibug', 'use_helen', 'verbose', 'model', 'model/main_clnf_wild.txt', 'multi_view', 1);
%% Run using SVR model %% Run using SVR model
out_svr = [curr_dir '/out_wild_svr_wild/']; out_svr = [curr_dir '/out_wild_svr_wild/'];
if(~exist(out_svr, 'file'))
mkdir(out_svr);
end
[err_svr_wild, err_no_out_svr_wild] = Run_OF_on_images(out_svr, database_root, 'use_afw', 'use_lfpw', 'use_ibug', 'use_helen', 'verbose', 'model', 'model/main_clm_wild.txt', 'multi_view', 1); [err_svr_wild, err_no_out_svr_wild] = Run_OF_on_images(out_svr, database_root, 'use_afw', 'use_lfpw', 'use_ibug', 'use_helen', 'verbose', 'model', 'model/main_clm_wild.txt', 'multi_view', 1);
%% Run using general CLNF model %% Run using general CLNF model
out_clnf = [curr_dir '/out_wild_clnf/']; out_clnf = [curr_dir '/out_wild_clnf/'];
if(~exist(out_clnf, 'file'))
mkdir(out_clnf);
end
[err_clnf, err_no_out_clnf] = Run_OF_on_images(out_clnf, database_root, 'use_afw', 'use_lfpw', 'use_ibug', 'use_helen', 'verbose', 'model', 'model/main_clnf_general.txt', 'multi_view', 1); [err_clnf, err_no_out_clnf] = Run_OF_on_images(out_clnf, database_root, 'use_afw', 'use_lfpw', 'use_ibug', 'use_helen', 'verbose', 'model', 'model/main_clnf_general.txt', 'multi_view', 1);
%% Run using SVR model %% Run using SVR model
out_svr = [curr_dir '/out_wild_svr/']; out_svr = [curr_dir '/out_wild_svr/'];
if(~exist(out_svr, 'file'))
mkdir(out_svr);
end
[err_svr, err_no_out_svr] = Run_OF_on_images(out_svr, database_root, 'use_afw', 'use_lfpw', 'use_ibug', 'use_helen', 'verbose', 'model', 'model/main_clm_general.txt', 'multi_view', 1); [err_svr, err_no_out_svr] = Run_OF_on_images(out_svr, database_root, 'use_afw', 'use_lfpw', 'use_ibug', 'use_helen', 'verbose', 'model', 'model/main_clm_general.txt', 'multi_view', 1);
%% %%
@ -93,7 +79,7 @@ load('landmark_det_baselines/zhu_wild.mat');
load('out_wild_clnf_wild/res.mat'); load('out_wild_clnf_wild/res.mat');
for i=1:size(labels,3) for i=1:size(labels,3)
diffs = squeeze(sum(sum(bsxfun(@plus, labels_all(18:end,:,:)-0.5, - labels([18:60,62:64,66:end],:,i)),1),2)); diffs = squeeze(sum(sum(bsxfun(@plus, labels_all(18:end,:,:)-1.5, - labels([18:60,62:64,66:end],:,i)),1),2));
inds_in_cpp = cat(1, inds_in_cpp, find(diffs == 0)); inds_in_cpp = cat(1, inds_in_cpp, find(diffs == 0));
end end

View file

@ -18,6 +18,8 @@ elseif(exist('D:/Dropbox/Dropbox/AAM/test data/', 'file'))
database_root = 'D:/Dropbox/Dropbox/AAM/test data/'; database_root = 'D:/Dropbox/Dropbox/AAM/test data/';
elseif(exist('F:/Dropbox/AAM/test data/', 'file')) elseif(exist('F:/Dropbox/AAM/test data/', 'file'))
database_root = 'F:/Dropbox/AAM/test data/'; database_root = 'F:/Dropbox/AAM/test data/';
elseif(exist('/media/tadas/5E08AE0D08ADE3ED/Dropbox/AAM/test data/', 'file'))
database_root = '/media/tadas/5E08AE0D08ADE3ED/Dropbox/AAM/test data/';
else else
database_root = '/multicomp/datasets/'; database_root = '/multicomp/datasets/';
end end
@ -26,19 +28,14 @@ database_root = [database_root, '/ytceleb/'];
in_vids = dir([database_root '/*.avi']); in_vids = dir([database_root '/*.avi']);
command = executable; command = sprintf('%s -2Dfp -out_dir "%s" ', executable, output);
command = cat(2, command, ' -no3Dfp -noMparams -noPose -noGaze -noAUs ');
% add all videos to single argument list (so as not to load the model anew % add all videos to single argument list (so as not to load the model anew
% for every video) % for every video)
for i=1:numel(in_vids) for i=1:numel(in_vids)
[~, name, ~] = fileparts(in_vids(i).name);
% where to output tracking results
outputFile_fp = [output name '_fp.txt'];
in_file_name = [database_root, '/', in_vids(i).name]; in_file_name = [database_root, '/', in_vids(i).name];
command = cat(2, command, [' -f "' in_file_name '" -of "' outputFile_fp '"']); command = cat(2, command, [' -f "' in_file_name '" ']);
end end
if(isunix) if(isunix)
@ -54,21 +51,13 @@ if(~exist(output, 'file'))
mkdir(output) mkdir(output)
end end
command = executable; command = sprintf('%s -2Dfp -out_dir "%s" -mloc model/main_clm_general.txt ', executable, output);
command = cat(2, command, ' -mloc model/main_clm_general.txt ');
command = cat(2, command, ' -no3Dfp -noMparams -noPose -noGaze -noAUs ');
% add all videos to single argument list (so as not to load the model anew % add all videos to single argument list (so as not to load the model anew
% for every video) % for every video)
for i=1:numel(in_vids) for i=1:numel(in_vids)
[~, name, ~] = fileparts(in_vids(i).name);
% where to output tracking results
outputFile_fp = [output name '_fp.txt'];
in_file_name = [database_root, '/', in_vids(i).name]; in_file_name = [database_root, '/', in_vids(i).name];
command = cat(2, command, [' -f "' in_file_name '"']);
command = cat(2, command, [' -f "' in_file_name '" -of "' outputFile_fp '"']);
end end
if(isunix) if(isunix)
@ -80,7 +69,7 @@ end
d_loc = 'yt_features/'; d_loc = 'yt_features/';
d_loc_clm = 'yt_features_clm/'; d_loc_clm = 'yt_features_clm/';
files_yt = dir([d_loc, '/*.txt']); files_yt = dir([d_loc, '/*.csv']);
preds_all = []; preds_all = [];
preds_all_clm = []; preds_all_clm = [];
gts_all = []; gts_all = [];
@ -104,7 +93,7 @@ for i = 1:numel(files_yt)
pred_landmarks_clm(:,1,:) = xs'; pred_landmarks_clm(:,1,:) = xs';
pred_landmarks_clm(:,2,:) = ys'; pred_landmarks_clm(:,2,:) = ys';
load([database_root, name(1:end-3), '.mat']); load([database_root, name, '.mat']);
preds_all = cat(3, preds_all, pred_landmarks); preds_all = cat(3, preds_all, pred_landmarks);
preds_all_clm = cat(3, preds_all_clm, pred_landmarks_clm); preds_all_clm = cat(3, preds_all_clm, pred_landmarks_clm);
gts_all = cat(3, gts_all, labels); gts_all = cat(3, gts_all, labels);

View file

@ -23,7 +23,7 @@ cd('../');
cd('Action Unit Experiments'); cd('Action Unit Experiments');
run_AU_prediction_Bosphorus run_AU_prediction_Bosphorus
assert(mean(cccs_reg) > 0.56); assert(mean(cccs_reg) > 0.56);
assert(mean(f1s_class) > 0.46); assert(mean(f1s_class) > 0.49);
run_AU_prediction_BP4D run_AU_prediction_BP4D
assert(mean(ints_cccs) > 0.6); assert(mean(ints_cccs) > 0.6);
@ -48,10 +48,14 @@ assert(median_error < 9.0)
cd('../'); cd('../');
%% Demos %% Demos
clear;
close all;
cd('Demos'); cd('Demos');
run_demo_images; run_demo_images;
run_demo_videos; run_demo_videos;
run_demo_video_multi; run_demo_video_multi;
run_demo_align_size;
run_test_img_seq;
feature_extraction_demo_vid; feature_extraction_demo_vid;
feature_extraction_demo_img_seq; feature_extraction_demo_img_seq;
gaze_extraction_demo_vid; gaze_extraction_demo_vid;

View file

@ -29,22 +29,21 @@ else
executable = '"../../x64/Release/FaceLandmarkImg.exe"'; executable = '"../../x64/Release/FaceLandmarkImg.exe"';
end end
command = sprintf('%s -fx 1028 -fy 1028 -gaze ', executable); command = sprintf('%s -fx 1028 -fy 1028 ', executable);
p_dirs = dir([database_root, 'p*']); p_dirs = dir([database_root, 'p*']);
parfor p=1:numel(p_dirs) parfor p=1:numel(p_dirs)
tic tic
input_loc = ['-fdir "', [database_root, p_dirs(p).name], '" ']; input_loc = ['-gaze -fdir "', [database_root, p_dirs(p).name], '" '];
out_img_loc = ['-oidir "', [output, p_dirs(p).name], '" ']; out_img_loc = ['-out_dir "', [output, p_dirs(p).name], '" '];
out_p_loc = ['-opdir "', [output, p_dirs(p).name], '" ']; command_c = cat(2, command, input_loc, out_img_loc);
command_c = cat(2, command, input_loc, out_img_loc, out_p_loc);
if(isunix) if(isunix)
unix(command_c, '-echo'); unix(command_c, '-echo');
else else
dos(command_c); dos(command_c);
end; end
end end
%% %%
@ -65,29 +64,36 @@ for p=1:numel(p_dirs)
for i=1:size(filenames, 1) for i=1:size(filenames, 1)
fname = sprintf('%s/%s/%d_%d_%d_%d_%d_%d_%d_det_0.pose', output, p_dirs(p).name,... fname = sprintf('%s/%s/%d_%d_%d_%d_%d_%d_%d.csv', output, p_dirs(p).name,...
filenames(i,1), filenames(i,2), filenames(i,3), filenames(i,4),... filenames(i,1), filenames(i,2), filenames(i,3), filenames(i,4),...
filenames(i,5), filenames(i,6), filenames(i,7)); filenames(i,5), filenames(i,6), filenames(i,7));
try
A = dlmread(fname, ' ', 'A79..F79'); if(p==1 && i==1)
valid = true; % First read in the column names
catch tab = readtable(fname);
A = zeros(1,6); column_names = tab.Properties.VariableNames;
A(1,3) = -1;
A(1,6) = -1; gaze_0_ids = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'gaze_0_'));
valid = false; gaze_1_ids = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'gaze_1_'));
end end
head_rot = headpose(i,1:3); if(exist(fname, 'file'))
all_params = dlmread(fname, ',', 1, 0);
else
all_params = [];
end
predictions_r(curr,:) = A(1:3); % If there was a face detected
predictions_l(curr,:) = A(4:6); if(size(all_params,1)>0)
predictions_r(curr,:) = all_params(1,gaze_0_ids);
if(~valid) predictions_l(curr,:) = all_params(1,gaze_1_ids);
else
predictions_r(curr,:) = [0,0,-1]; predictions_r(curr,:) = [0,0,-1];
predictions_l(curr,:) = [0,0,-1]; predictions_l(curr,:) = [0,0,-1];
end end
head_rot = headpose(i,1:3);
gt_r(curr,:) = data.right.gaze(i,:)'; gt_r(curr,:) = data.right.gaze(i,:)';
gt_r(curr,:) = gt_r(curr,:) / norm(gt_r(curr,:)); gt_r(curr,:) = gt_r(curr,:) / norm(gt_r(curr,:));
gt_l(curr,:) = data.left.gaze(i,:)'; gt_l(curr,:) = data.left.gaze(i,:)';

Some files were not shown because too many files have changed in this diff Show more