- nomask works in gui now

- llowing to set camera resolution in CLI.
This commit is contained in:
Tadas Baltrusaitis 2018-02-08 16:28:00 +00:00
parent c7a2771a94
commit 8692b5f9a8
6 changed files with 29 additions and 10 deletions

View file

@ -118,8 +118,8 @@ int main (int argc, char **argv)
if (sequence_reader.no_input_specified && sequence_number == 0) if (sequence_reader.no_input_specified && sequence_number == 0)
{ {
// If that fails, revert to webcam // If that fails, revert to webcam
INFO_STREAM("No input specified, attempting to open a webcam 0"); INFO_STREAM("No input specified, attempting to open a webcam 0 at 640 x 480px");
if (!sequence_reader.OpenWebcam(0)) if (!sequence_reader.OpenWebcam(0, 640, 480))
{ {
ERROR_STREAM("Failed to open the webcam"); ERROR_STREAM("Failed to open the webcam");
break; break;

View file

@ -117,7 +117,7 @@ namespace OpenFaceDemo
// TODO, create a demo version of parameters // TODO, create a demo version of parameters
face_model_params = new FaceModelParameters(root, false); face_model_params = new FaceModelParameters(root, false);
landmark_detector = new CLNF(face_model_params); landmark_detector = new CLNF(face_model_params);
face_analyser = new FaceAnalyserManaged(root, true, 112); face_analyser = new FaceAnalyserManaged(root, true, 112, true);
gaze_analyser = new GazeAnalyserManaged(); gaze_analyser = new GazeAnalyserManaged();
Dispatcher.Invoke(DispatcherPriority.Render, new TimeSpan(0, 0, 0, 0, 200), (Action)(() => Dispatcher.Invoke(DispatcherPriority.Render, new TimeSpan(0, 0, 0, 0, 200), (Action)(() =>

View file

@ -48,6 +48,7 @@
<MenuItem Name="SettingsMenu" Header="Recording settings"> <MenuItem Name="SettingsMenu" Header="Recording settings">
<MenuItem Name="OutputLocationItem" Header="Set output location..." Click="OutputLocationItem_Click" ></MenuItem> <MenuItem Name="OutputLocationItem" Header="Set output location..." Click="OutputLocationItem_Click" ></MenuItem>
<MenuItem Header="Set output image size..." Click="setOutputImageSize_Click"></MenuItem> <MenuItem Header="Set output image size..." Click="setOutputImageSize_Click"></MenuItem>
<MenuItem IsCheckable="True" Header="Mask aligned image" IsChecked="{Binding MaskAligned}"/>
</MenuItem> </MenuItem>
<MenuItem Name="AUSetting" Header="OpenFace settings" > <MenuItem Name="AUSetting" Header="OpenFace settings" >
<MenuItem IsCheckable="True" Header="Use dynamic AU models" IsChecked="{Binding DynamicAUModels}"/> <MenuItem IsCheckable="True" Header="Use dynamic AU models" IsChecked="{Binding DynamicAUModels}"/>

View file

@ -124,7 +124,8 @@ namespace OpenFaceOffline
public bool ShowAUs { get; set; } = true; // Showing Facial Action Units public bool ShowAUs { get; set; } = true; // Showing Facial Action Units
int image_output_size = 112; int image_output_size = 112;
public bool MaskAligned { get; set; } = true; // Should the aligned images be masked
// Where the recording is done (by default in a record directory, from where the application executed) // Where the recording is done (by default in a record directory, from where the application executed)
String record_root = "./processed"; String record_root = "./processed";
@ -187,7 +188,7 @@ namespace OpenFaceOffline
Visualizer visualizer_of = new Visualizer(ShowTrackedVideo || RecordTracked, ShowAppearance, ShowAppearance); Visualizer visualizer_of = new Visualizer(ShowTrackedVideo || RecordTracked, ShowAppearance, ShowAppearance);
// Initialize the face analyser // Initialize the face analyser
face_analyser = new FaceAnalyserManaged(AppDomain.CurrentDomain.BaseDirectory, DynamicAUModels, image_output_size); face_analyser = new FaceAnalyserManaged(AppDomain.CurrentDomain.BaseDirectory, DynamicAUModels, image_output_size, MaskAligned);
// Reset the tracker // Reset the tracker
landmark_detector.Reset(); landmark_detector.Reset();
@ -281,7 +282,7 @@ namespace OpenFaceOffline
} }
// Initialize the face analyser // Initialize the face analyser
face_analyser = new FaceAnalyserManaged(AppDomain.CurrentDomain.BaseDirectory, false, image_output_size); face_analyser = new FaceAnalyserManaged(AppDomain.CurrentDomain.BaseDirectory, false, image_output_size, MaskAligned);
// Loading an image file // Loading an image file
var frame = new RawImage(reader.GetNextImage()); var frame = new RawImage(reader.GetNextImage());

View file

@ -92,7 +92,7 @@ private:
public: public:
FaceAnalyserManaged(System::String^ root, bool dynamic, int output_width) FaceAnalyserManaged(System::String^ root, bool dynamic, int output_width, bool mask_aligned)
{ {
string root_std = msclr::interop::marshal_as<std::string>(root); string root_std = msclr::interop::marshal_as<std::string>(root);
FaceAnalysis::FaceAnalyserParameters params(root_std); FaceAnalysis::FaceAnalyserParameters params(root_std);
@ -102,7 +102,7 @@ public:
params.OptimizeForImages(); params.OptimizeForImages();
} }
params.setAlignedOutput(output_width); params.setAlignedOutput(output_width, -1.0, mask_aligned);
face_analyser = new FaceAnalysis::FaceAnalyser(params); face_analyser = new FaceAnalysis::FaceAnalyser(params);
hog_features = new cv::Mat_<float>(); hog_features = new cv::Mat_<float>();

View file

@ -94,6 +94,8 @@ bool SequenceCapture::Open(std::vector<std::string>& arguments)
std::string input_video_file; std::string input_video_file;
std::string input_sequence_directory; std::string input_sequence_directory;
int device = -1; int device = -1;
int cam_width = 640;
int cam_height = 480;
bool file_found = false; bool file_found = false;
@ -147,6 +149,22 @@ bool SequenceCapture::Open(std::vector<std::string>& arguments)
valid[i + 1] = false; valid[i + 1] = false;
i++; i++;
} }
else if (arguments[i].compare("-cam_width") == 0)
{
std::stringstream data(arguments[i + 1]);
data >> cam_width;
valid[i] = false;
valid[i + 1] = false;
i++;
}
else if (arguments[i].compare("-cam_height") == 0)
{
std::stringstream data(arguments[i + 1]);
data >> cam_height;
valid[i] = false;
valid[i + 1] = false;
i++;
}
} }
for (int i = (int)arguments.size() - 1; i >= 0; --i) for (int i = (int)arguments.size() - 1; i >= 0; --i)
@ -162,8 +180,7 @@ bool SequenceCapture::Open(std::vector<std::string>& arguments)
// Based on what was read in open the sequence // Based on what was read in open the sequence
if (device != -1) if (device != -1)
{ {
// TODO allow to specify webcam resolution return OpenWebcam(device, cam_width, cam_height, fx, fy, cx, cy);
return OpenWebcam(device, 640, 480, fx, fy, cx, cy);
} }
if (!input_video_file.empty()) if (!input_video_file.empty())
{ {