diff --git a/exe/FaceLandmarkVid/FaceLandmarkVid.cpp b/exe/FaceLandmarkVid/FaceLandmarkVid.cpp
index 6d1ec45..f317ca9 100644
--- a/exe/FaceLandmarkVid/FaceLandmarkVid.cpp
+++ b/exe/FaceLandmarkVid/FaceLandmarkVid.cpp
@@ -118,8 +118,8 @@ int main (int argc, char **argv)
if (sequence_reader.no_input_specified && sequence_number == 0)
{
// If that fails, revert to webcam
- INFO_STREAM("No input specified, attempting to open a webcam 0");
- if (!sequence_reader.OpenWebcam(0))
+ INFO_STREAM("No input specified, attempting to open a webcam 0 at 640 x 480px");
+ if (!sequence_reader.OpenWebcam(0, 640, 480))
{
ERROR_STREAM("Failed to open the webcam");
break;
diff --git a/gui/OpenFaceDemo/MainWindow.xaml.cs b/gui/OpenFaceDemo/MainWindow.xaml.cs
index aeeb6fd..052c725 100644
--- a/gui/OpenFaceDemo/MainWindow.xaml.cs
+++ b/gui/OpenFaceDemo/MainWindow.xaml.cs
@@ -117,7 +117,7 @@ namespace OpenFaceDemo
// TODO, create a demo version of parameters
face_model_params = new FaceModelParameters(root, false);
landmark_detector = new CLNF(face_model_params);
- face_analyser = new FaceAnalyserManaged(root, true, 112);
+ face_analyser = new FaceAnalyserManaged(root, true, 112, true);
gaze_analyser = new GazeAnalyserManaged();
Dispatcher.Invoke(DispatcherPriority.Render, new TimeSpan(0, 0, 0, 0, 200), (Action)(() =>
diff --git a/gui/OpenFaceOffline/MainWindow.xaml b/gui/OpenFaceOffline/MainWindow.xaml
index c768161..25cb1c2 100644
--- a/gui/OpenFaceOffline/MainWindow.xaml
+++ b/gui/OpenFaceOffline/MainWindow.xaml
@@ -48,6 +48,7 @@
diff --git a/gui/OpenFaceOffline/MainWindow.xaml.cs b/gui/OpenFaceOffline/MainWindow.xaml.cs
index 7b3f87d..6900fa1 100644
--- a/gui/OpenFaceOffline/MainWindow.xaml.cs
+++ b/gui/OpenFaceOffline/MainWindow.xaml.cs
@@ -124,7 +124,8 @@ namespace OpenFaceOffline
public bool ShowAUs { get; set; } = true; // Showing Facial Action Units
int image_output_size = 112;
-
+ public bool MaskAligned { get; set; } = true; // Should the aligned images be masked
+
// Where the recording is done (by default in a record directory, from where the application executed)
String record_root = "./processed";
@@ -187,7 +188,7 @@ namespace OpenFaceOffline
Visualizer visualizer_of = new Visualizer(ShowTrackedVideo || RecordTracked, ShowAppearance, ShowAppearance);
// Initialize the face analyser
- face_analyser = new FaceAnalyserManaged(AppDomain.CurrentDomain.BaseDirectory, DynamicAUModels, image_output_size);
+ face_analyser = new FaceAnalyserManaged(AppDomain.CurrentDomain.BaseDirectory, DynamicAUModels, image_output_size, MaskAligned);
// Reset the tracker
landmark_detector.Reset();
@@ -281,7 +282,7 @@ namespace OpenFaceOffline
}
// Initialize the face analyser
- face_analyser = new FaceAnalyserManaged(AppDomain.CurrentDomain.BaseDirectory, false, image_output_size);
+ face_analyser = new FaceAnalyserManaged(AppDomain.CurrentDomain.BaseDirectory, false, image_output_size, MaskAligned);
// Loading an image file
var frame = new RawImage(reader.GetNextImage());
diff --git a/lib/local/CppInerop/FaceAnalyserInterop.h b/lib/local/CppInerop/FaceAnalyserInterop.h
index a3a9026..2836654 100644
--- a/lib/local/CppInerop/FaceAnalyserInterop.h
+++ b/lib/local/CppInerop/FaceAnalyserInterop.h
@@ -92,7 +92,7 @@ private:
public:
- FaceAnalyserManaged(System::String^ root, bool dynamic, int output_width)
+ FaceAnalyserManaged(System::String^ root, bool dynamic, int output_width, bool mask_aligned)
{
string root_std = msclr::interop::marshal_as(root);
FaceAnalysis::FaceAnalyserParameters params(root_std);
@@ -102,7 +102,7 @@ public:
params.OptimizeForImages();
}
- params.setAlignedOutput(output_width);
+ params.setAlignedOutput(output_width, -1.0, mask_aligned);
face_analyser = new FaceAnalysis::FaceAnalyser(params);
hog_features = new cv::Mat_();
diff --git a/lib/local/Utilities/src/SequenceCapture.cpp b/lib/local/Utilities/src/SequenceCapture.cpp
index ab27230..a0a7b1e 100644
--- a/lib/local/Utilities/src/SequenceCapture.cpp
+++ b/lib/local/Utilities/src/SequenceCapture.cpp
@@ -94,6 +94,8 @@ bool SequenceCapture::Open(std::vector& arguments)
std::string input_video_file;
std::string input_sequence_directory;
int device = -1;
+ int cam_width = 640;
+ int cam_height = 480;
bool file_found = false;
@@ -147,6 +149,22 @@ bool SequenceCapture::Open(std::vector& arguments)
valid[i + 1] = false;
i++;
}
+ else if (arguments[i].compare("-cam_width") == 0)
+ {
+ std::stringstream data(arguments[i + 1]);
+ data >> cam_width;
+ valid[i] = false;
+ valid[i + 1] = false;
+ i++;
+ }
+ else if (arguments[i].compare("-cam_height") == 0)
+ {
+ std::stringstream data(arguments[i + 1]);
+ data >> cam_height;
+ valid[i] = false;
+ valid[i + 1] = false;
+ i++;
+ }
}
for (int i = (int)arguments.size() - 1; i >= 0; --i)
@@ -162,8 +180,7 @@ bool SequenceCapture::Open(std::vector& arguments)
// Based on what was read in open the sequence
if (device != -1)
{
- // TODO allow to specify webcam resolution
- return OpenWebcam(device, 640, 480, fx, fy, cx, cy);
+ return OpenWebcam(device, cam_width, cam_height, fx, fy, cx, cy);
}
if (!input_video_file.empty())
{