Continuing work on the recorder interop for recording in c#

This commit is contained in:
Tadas Baltrusaitis 2018-01-18 08:08:29 +00:00
parent deec0528cb
commit d5f63dbbe3
10 changed files with 195 additions and 60 deletions

View file

@ -41,6 +41,7 @@
<MenuItem IsCheckable="True" Header="Record HOG" IsChecked="{Binding RecordHOG}"/> <MenuItem IsCheckable="True" Header="Record HOG" IsChecked="{Binding RecordHOG}"/>
<MenuItem IsCheckable="True" Header="Record model parameters" IsChecked="{Binding RecordModelParameters}" /> <MenuItem IsCheckable="True" Header="Record model parameters" IsChecked="{Binding RecordModelParameters}" />
<MenuItem IsCheckable="True" Header="Record aligned faces" IsChecked="{Binding RecordAligned}"/> <MenuItem IsCheckable="True" Header="Record aligned faces" IsChecked="{Binding RecordAligned}"/>
<MenuItem IsCheckable="True" Header="Record tracked images/videos" IsChecked="{Binding RecordTracked}"/>
</MenuItem> </MenuItem>
<MenuItem Name="SettingsMenu" Header="Recording settings"> <MenuItem Name="SettingsMenu" Header="Recording settings">
<MenuItem Name="OutputLocationItem" Header="Set output location..." Click="OutputLocationItem_Click" ></MenuItem> <MenuItem Name="OutputLocationItem" Header="Set output location..." Click="OutputLocationItem_Click" ></MenuItem>

View file

@ -49,7 +49,7 @@ using CameraInterop;
using FaceAnalyser_Interop; using FaceAnalyser_Interop;
using GazeAnalyser_Interop; using GazeAnalyser_Interop;
using FaceDetectorInterop; using FaceDetectorInterop;
using MediaReader; using UtilitiesOF;
using Microsoft.WindowsAPICodePack.Dialogs; using Microsoft.WindowsAPICodePack.Dialogs;
using System.Windows.Forms; using System.Windows.Forms;
@ -121,6 +121,7 @@ namespace OpenFaceOffline
public bool RecordPose { get; set; } = true; // Head pose (position and orientation) public bool RecordPose { get; set; } = true; // Head pose (position and orientation)
public bool RecordAUs { get; set; } = true; // Facial action units public bool RecordAUs { get; set; } = true; // Facial action units
public bool RecordGaze { get; set; } = true; // Eye gaze public bool RecordGaze { get; set; } = true; // Eye gaze
public bool RecordTracked { get; set; } = false; // Recording tracked videos or images
// Visualisation options // Visualisation options
public bool ShowTrackedVideo { get; set; } = true; // Showing the actual tracking public bool ShowTrackedVideo { get; set; } = true; // Showing the actual tracking
@ -131,7 +132,7 @@ namespace OpenFaceOffline
int image_output_size = 112; int image_output_size = 112;
// Where the recording is done (by default in a record directory, from where the application executed) // Where the recording is done (by default in a record directory, from where the application executed)
String record_root = "./record"; String record_root = "./processed";
// For AU prediction, if videos are long dynamic models should be used // For AU prediction, if videos are long dynamic models should be used
public bool DynamicAUModels { get; set; } = true; public bool DynamicAUModels { get; set; } = true;
@ -241,8 +242,7 @@ namespace OpenFaceOffline
} }
// TODO here private void ProcessIndividualImages(ImageReader reader)
private void ProcessIndividualImages(ImageReader reader) // TODO need interface for recording settings
{ {
// Make sure the GUI is setup appropriately // Make sure the GUI is setup appropriately
SetupFeatureExtractionMode(); SetupFeatureExtractionMode();
@ -264,7 +264,7 @@ namespace OpenFaceOffline
// Loading an image file // Loading an image file
var frame = new RawImage(reader.GetNextImage()); var frame = new RawImage(reader.GetNextImage());
var grayFrame = new RawImage(reader.GetCurrentFrameGray()); var gray_frame = new RawImage(reader.GetCurrentFrameGray());
// This will be false when the image is not available // This will be false when the image is not available
while (reader.isOpened()) while (reader.isOpened())
@ -274,18 +274,25 @@ namespace OpenFaceOffline
continue; continue;
} }
clnf_model.Reset(); // Setup recording
RecorderOpenFaceParameters rec_params = new RecorderOpenFaceParameters(false, false,
Record2DLandmarks, Record3DLandmarks, RecordModelParameters, RecordPose, RecordAUs,
RecordGaze, RecordHOG, RecordTracked, RecordAligned,
reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy(), 0);
double progress = reader.GetProgress(); RecorderOpenFace recorder = new RecorderOpenFace(reader.GetName(), rec_params, record_root);
// Detect faces here and return bounding boxes // Detect faces here and return bounding boxes
List<Rect> face_detections = new List<Rect>(); List<Rect> face_detections = new List<Rect>();
List<double> confidences = new List<double>(); List<double> confidences = new List<double>();
face_detector.DetectFacesHOG(face_detections, grayFrame, confidences); face_detector.DetectFacesHOG(face_detections, gray_frame, confidences);
// For visualization
double progress = reader.GetProgress();
for (int i = 0; i < face_detections.Count; ++i) for (int i = 0; i < face_detections.Count; ++i)
{ {
detectionSucceeding = clnf_model.DetectFaceLandmarksInImage(grayFrame, face_detections[i], face_model_params); detectionSucceeding = clnf_model.DetectFaceLandmarksInImage(gray_frame, face_detections[i], face_model_params);
var landmarks = clnf_model.CalculateAllLandmarks(); var landmarks = clnf_model.CalculateAllLandmarks();
@ -293,7 +300,10 @@ namespace OpenFaceOffline
var au_preds = face_analyser.PredictStaticAUsAndComputeFeatures(frame, landmarks, ShowAppearance); var au_preds = face_analyser.PredictStaticAUsAndComputeFeatures(frame, landmarks, ShowAppearance);
// Predic eye gaze // Predic eye gaze
gaze_analyser.AddNextFrame(clnf_model, detectionSucceeding, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy()); gaze_analyser.AddNextFrame(clnf_model, detectionSucceeding, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy());
// Record an observation
RecordObservation(recorder, detectionSucceeding, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy());
// Only the final face will contain the details // Only the final face will contain the details
VisualizeFeatures(frame, landmarks, i==0, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy(), progress); VisualizeFeatures(frame, landmarks, i==0, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy(), progress);
@ -303,7 +313,12 @@ namespace OpenFaceOffline
latest_img = null; latest_img = null;
frame = new RawImage(reader.GetNextImage()); frame = new RawImage(reader.GetNextImage());
grayFrame = new RawImage(reader.GetCurrentFrameGray()); gray_frame = new RawImage(reader.GetCurrentFrameGray());
// Do not cary state accross images
clnf_model.Reset();
face_analyser.Reset();
// TODO how to report errors from the reader here? exceptions? logging? Problem for future versions? // TODO how to report errors from the reader here? exceptions? logging? Problem for future versions?
} }
@ -413,6 +428,40 @@ namespace OpenFaceOffline
} }
private void RecordObservation(RecorderOpenFace recorder, bool success, float fx, float fy, float cx, float cy)
{
double confidence = clnf_model.GetConfidence();
List<double> pose = new List<double>();
clnf_model.GetPose(pose, fx, fy, cx, cy);
recorder.SetObservationPose(pose);
List<Tuple<double, double>> landmarks_2D = clnf_model.CalculateAllLandmarks();
List<Tuple<double, double, double>> landmarks_3D = clnf_model.Calculate3DLandmarks(fx, fy, cx, cy);
List<double> global_params = clnf_model.GetRigidParams();
List<double> local_params = clnf_model.GetParams();
recorder.SetObservationLandmarks(landmarks_2D, landmarks_3D, global_params, local_params, confidence, success);
var gaze = gaze_analyser.GetGazeCamera();
var gaze_angle = gaze_analyser.GetGazeAngle();
var landmarks_2d_eyes = clnf_model.CalculateAllEyeLandmarks();
var landmarks_3d_eyes = clnf_model.CalculateAllEyeLandmarks3D(fx, fy, cx, cy);
recorder.SetObservationGaze(gaze.Item1, gaze.Item2, gaze_angle, landmarks_2d_eyes, landmarks_3d_eyes);
//open_face_rec.SetObservationActionUnits(face_analyser.GetCurrentAUsReg(), face_analyser.GetCurrentAUsClass());
//open_face_rec.SetObservationFaceAlign(sim_warped_img);
//open_face_rec.WriteObservation();
// TODO
//open_face_rec.SetObservationHOG(face_model.detection_success, hog_descriptor, num_hog_rows, num_hog_cols, 31); // The number of channels in HOG is fixed at the moment, as using FHOG
//open_face_rec.SetObservationVisualization(visualizer.GetVisImage());
}
private void VisualizeFeatures(RawImage frame, List<Tuple<double, double>> landmarks, bool new_image, double fx, double fy, double cx, double cy, double progress) private void VisualizeFeatures(RawImage frame, List<Tuple<double, double>> landmarks, bool new_image, double fx, double fy, double cx, double cy, double progress)
{ {
List<Tuple<Point, Point>> lines = null; List<Tuple<Point, Point>> lines = null;

View file

@ -237,16 +237,16 @@ namespace OpenFaceOffline
if (output_3D_landmarks) if (output_3D_landmarks)
{ {
List<System.Windows.Media.Media3D.Point3D> landmarks_3d = clnf_model.Calculate3DLandmarks(fx, fy, cx, cy); List<Tuple<double, double, double>> landmarks_3d = clnf_model.Calculate3DLandmarks(fx, fy, cx, cy);
for (int i = 0; i < landmarks_3d.Count; ++i) for (int i = 0; i < landmarks_3d.Count; ++i)
output_features_file.Write(", {0:F3}", landmarks_3d[i].X); output_features_file.Write(", {0:F3}", landmarks_3d[i].Item1);
for (int i = 0; i < landmarks_3d.Count; ++i) for (int i = 0; i < landmarks_3d.Count; ++i)
output_features_file.Write(", {0:F3}", landmarks_3d[i].Y); output_features_file.Write(", {0:F3}", landmarks_3d[i].Item2);
for (int i = 0; i < landmarks_3d.Count; ++i) for (int i = 0; i < landmarks_3d.Count; ++i)
output_features_file.Write(", {0:F3}", landmarks_3d[i].Z); output_features_file.Write(", {0:F3}", landmarks_3d[i].Item3);
} }
if (output_model_params) if (output_model_params)

View file

@ -40,3 +40,4 @@
#include "CameraInterop.h" #include "CameraInterop.h"
#include "ImageReader.h" #include "ImageReader.h"
#include "FaceDetectorInterop.h" #include "FaceDetectorInterop.h"
#include "RecorderInterop.h"

View file

@ -58,7 +58,7 @@
#include <msclr\marshal.h> #include <msclr\marshal.h>
#include <msclr\marshal_cppstd.h> #include <msclr\marshal_cppstd.h>
namespace MediaReader { namespace UtilitiesOF {
public ref class ReadingFailedException : System::Exception public ref class ReadingFailedException : System::Exception
{ {
@ -139,27 +139,33 @@ namespace MediaReader {
return m_rgb_frame; return m_rgb_frame;
} }
System::String^ GetName()
{
std::string filename = m_image_capture->name;
return gcnew System::String(filename.c_str());
}
double GetProgress() double GetProgress()
{ {
return m_image_capture->GetProgress(); return m_image_capture->GetProgress();
} }
double GetFx() float GetFx()
{ {
return m_image_capture->fx; return m_image_capture->fx;
} }
double GetFy() float GetFy()
{ {
return m_image_capture->fy; return m_image_capture->fy;
} }
double GetCx() float GetCx()
{ {
return m_image_capture->cx; return m_image_capture->cx;
} }
double GetCy() float GetCy()
{ {
return m_image_capture->cy; return m_image_capture->cy;
} }

View file

@ -307,6 +307,17 @@ namespace CppInterop {
return landmarks; return landmarks;
} }
List<System::Tuple<double, double, double>^>^ CalculateAllEyeLandmarks3D(double fx, double fy, double cx, double cy) {
vector<cv::Point3d> vecLandmarks = ::LandmarkDetector::Calculate3DEyeLandmarks(*clnf, fx, fy, cx, cy);
auto landmarks = gcnew System::Collections::Generic::List<System::Tuple<double, double, double>^>();
for (cv::Point3d p : vecLandmarks) {
landmarks->Add(gcnew System::Tuple<double, double, double>(p.x, p.y, p.z));
}
return landmarks;
}
List<System::Tuple<double, double>^>^ CalculateVisibleEyeLandmarks() { List<System::Tuple<double, double>^>^ CalculateVisibleEyeLandmarks() {
vector<cv::Point2d> vecLandmarks = ::LandmarkDetector::CalculateVisibleEyeLandmarks(*clnf); vector<cv::Point2d> vecLandmarks = ::LandmarkDetector::CalculateVisibleEyeLandmarks(*clnf);
@ -318,15 +329,15 @@ namespace CppInterop {
return landmarks; return landmarks;
} }
List<System::Windows::Media::Media3D::Point3D>^ Calculate3DLandmarks(double fx, double fy, double cx, double cy) { List<System::Tuple<double, double, double>^>^ Calculate3DLandmarks(double fx, double fy, double cx, double cy) {
cv::Mat_<double> shape3D = clnf->GetShape(fx, fy, cx, cy); cv::Mat_<double> shape3D = clnf->GetShape(fx, fy, cx, cy);
auto landmarks_3D = gcnew List<System::Windows::Media::Media3D::Point3D>(); auto landmarks_3D = gcnew List<System::Tuple<double, double, double>^>();
for(int i = 0; i < shape3D.cols; ++i) for(int i = 0; i < shape3D.cols; ++i)
{ {
landmarks_3D->Add(System::Windows::Media::Media3D::Point3D(shape3D.at<double>(0, i), shape3D.at<double>(1, i), shape3D.at<double>(2, i))); landmarks_3D->Add(gcnew System::Tuple<double, double, double>(shape3D.at<double>(0, i), shape3D.at<double>(1, i), shape3D.at<double>(2, i)));
} }
return landmarks_3D; return landmarks_3D;

View file

@ -43,6 +43,9 @@
#pragma managed #pragma managed
#include <msclr\marshal.h>
#include <msclr\marshal_cppstd.h>
namespace UtilitiesOF { namespace UtilitiesOF {
public ref class RecorderOpenFaceParameters public ref class RecorderOpenFaceParameters
@ -97,11 +100,77 @@ namespace UtilitiesOF {
public: public:
// Can provide a directory, or a list of files // Can provide a directory, or a list of files
RecorderOpenFace(const std::string in_filename, UtilitiesOF::RecorderOpenFaceParameters^ parameters, std::string output_directory, std::string output_name) RecorderOpenFace(System::String^ in_filename, UtilitiesOF::RecorderOpenFaceParameters^ parameters, System::String^ output_directory)
{ {
m_recorder = new Utilities::RecorderOpenFace(in_filename, parameters->GetParams(), output_directory, output_name); std::string in_filename_std = msclr::interop::marshal_as<std::string>(in_filename);
std::string output_directory_std = msclr::interop::marshal_as<std::string>(output_directory);
m_recorder = new Utilities::RecorderOpenFace(in_filename_std, *parameters->GetParams(), output_directory_std);
} }
void SetObservationGaze(System::Tuple<double, double, double>^ gaze_direction0, System::Tuple<double, double, double>^ gaze_direction1, System::Tuple<double, double>^ gaze_angle,
List<System::Tuple<double, double>^>^ landmarks_2D, List<System::Tuple<double,double,double>^>^ landmarks_3D)
{
cv::Point3f gaze_direction0_cv(gaze_direction0->Item1, gaze_direction0->Item2, gaze_direction0->Item3);
cv::Point3f gaze_direction1_cv(gaze_direction1->Item1, gaze_direction1->Item2, gaze_direction1->Item3);
cv::Vec2d gaze_angle_cv(gaze_angle->Item1, gaze_angle->Item2);
// Construct an OpenCV matrix from the landmarks
cv::Mat_<double> landmarks_2D_mat(landmarks_2D->Count * 2, 1, 0.0);
for (int i = 0; i < landmarks_2D->Count; ++i)
{
landmarks_2D_mat.at<double>(i, 0) = landmarks_2D[i]->Item1;
landmarks_2D_mat.at<double>(i + landmarks_2D->Count, 0) = landmarks_2D[i]->Item2;
}
// Construct an OpenCV matrix from the landmarks
cv::Mat_<double> landmarks_3D_mat(landmarks_3D->Count * 3, 1, 0.0);
for (int i = 0; i < landmarks_3D->Count; ++i)
{
landmarks_3D_mat.at<double>(i, 0) = landmarks_3D[i]->Item1;
landmarks_3D_mat.at<double>(i + landmarks_3D->Count, 0) = landmarks_3D[i]->Item2;
landmarks_3D_mat.at<double>(i + 2 * landmarks_3D->Count, 0) = landmarks_3D[i]->Item3;
}
m_recorder->SetObservationGaze(gaze_direction0_cv, gaze_direction1_cv, gaze_angle_cv, landmarks_2D_mat, landmarks_3D_mat);
}
// Setting the observations
void SetObservationPose(List<double>^ pose)
{
cv::Vec6d pose_vec(pose[0], pose[1], pose[2], pose[3], pose[4], pose[5]);
m_recorder->SetObservationPose(pose_vec);
}
void SetObservationLandmarks(List<System::Tuple<double, double>^>^ landmarks_2D, List<System::Tuple<double, double, double>^>^ landmarks_3D, List<double>^ params_global, List<double>^ params_local, double confidence, bool success)
{
// Construct an OpenCV matrix from the landmarks
cv::Mat_<double> landmarks_2D_mat(landmarks_2D->Count * 2, 1, 0.0);
for (int i = 0; i < landmarks_2D->Count; ++i)
{
landmarks_2D_mat.at<double>(i, 0) = landmarks_2D[i]->Item1;
landmarks_2D_mat.at<double>(i + landmarks_2D->Count, 0) = landmarks_2D[i]->Item2;
}
// Construct an OpenCV matrix from the landmarks
cv::Mat_<double> landmarks_3D_mat(landmarks_3D->Count * 3, 1, 0.0);
for (int i = 0; i < landmarks_3D->Count; ++i)
{
landmarks_3D_mat.at<double>(i, 0) = landmarks_3D[i]->Item1;
landmarks_3D_mat.at<double>(i + landmarks_3D->Count, 0) = landmarks_3D[i]->Item2;
landmarks_3D_mat.at<double>(i + 2 * landmarks_3D->Count, 0) = landmarks_3D[i]->Item3;
}
cv::Vec6d params_global_vec(params_global[0], params_global[1], params_global[2], params_global[3], params_global[4], params_global[5]);
cv::Mat_<double> params_local_vec(params_local->Count, 1, 0.0);
for (int i = 0; i < params_local->Count; ++i)
{
params_local_vec.at<double>(i, 0) = params_local[i];
}
m_recorder->SetObservationLandmarks(landmarks_2D_mat, landmarks_3D_mat, params_global_vec, params_local_vec, confidence, success);
}
// Finalizer. Definitely called before Garbage Collection, // Finalizer. Definitely called before Garbage Collection,
// but not automatically called on explicit Dispose(). // but not automatically called on explicit Dispose().

View file

@ -57,8 +57,8 @@ namespace Utilities
public: public:
// The constructor for the recorder, need to specify if we are recording a sequence or not, in_filename should be just the name and not contain extensions // The constructor for the recorder, need to specify if we are recording a sequence or not, in_filename should be just the name and not contain extensions
RecorderOpenFace(const std::string in_filename, RecorderOpenFaceParameters parameters, std::vector<std::string>& arguments); RecorderOpenFace(const std::string in_filename, const RecorderOpenFaceParameters& parameters, std::vector<std::string>& arguments);
RecorderOpenFace(const std::string in_filename, RecorderOpenFaceParameters parameters, std::string output_directory, std::string output_name); RecorderOpenFace(const std::string in_filename, const RecorderOpenFaceParameters& parameters, std::string output_directory);
~RecorderOpenFace(); ~RecorderOpenFace();
@ -105,7 +105,7 @@ namespace Utilities
RecorderOpenFace(const RecorderOpenFace&& other); RecorderOpenFace(const RecorderOpenFace&& other);
RecorderOpenFace(const RecorderOpenFace& other); RecorderOpenFace(const RecorderOpenFace& other);
void PrepareRecording(std::string in_filename); void PrepareRecording(const std::string& in_filename);
// Keeping track of what to output and how to output it // Keeping track of what to output and how to output it
const RecorderOpenFaceParameters params; const RecorderOpenFaceParameters params;
@ -113,8 +113,7 @@ namespace Utilities
// Keep track of the file and output root location // Keep track of the file and output root location
std::string record_root; std::string record_root;
std::string default_record_directory = "processed"; // By default we are writing in the processed directory in the working directory, if no output parameters provided std::string default_record_directory = "processed"; // By default we are writing in the processed directory in the working directory, if no output parameters provided
std::string of_filename; std::string out_name; // Short name, based on which other names are constructed
std::string filename;
std::string csv_filename; std::string csv_filename;
std::string aligned_output_directory; std::string aligned_output_directory;
std::ofstream metadata_file; std::ofstream metadata_file;

View file

@ -71,14 +71,14 @@ void CreateDirectory(std::string output_path)
} }
} }
void RecorderOpenFace::PrepareRecording(std::string in_filename) void RecorderOpenFace::PrepareRecording(const std::string& in_filename)
{ {
// Construct the directories required for the output // Construct the directories required for the output
CreateDirectory(record_root); CreateDirectory(record_root);
// Create the filename for the general output file that contains all of the meta information about the recording // Create the filename for the general output file that contains all of the meta information about the recording
path of_det_name(filename); path of_det_name(out_name);
of_det_name = path(record_root) / path(filename + "_of_details.txt"); of_det_name = path(record_root) / path(out_name + "_of_details.txt");
// Write in the of file what we are outputing what is the input etc. // Write in the of file what we are outputing what is the input etc.
metadata_file.open(of_det_name.string(), std::ios_base::out); metadata_file.open(of_det_name.string(), std::ios_base::out);
@ -106,16 +106,16 @@ void RecorderOpenFace::PrepareRecording(std::string in_filename)
metadata_file << "Input:webcam" << endl; metadata_file << "Input:webcam" << endl;
} }
metadata_file << "Camera parameters:" << parameters.getFx() << "," << parameters.getFy() << "," << parameters.getCx() << "," << parameters.getCy() << endl; metadata_file << "Camera parameters:" << params.getFx() << "," << params.getFy() << "," << params.getCx() << "," << params.getCy() << endl;
// Create the required individual recorders, CSV, HOG, aligned, video // Create the required individual recorders, CSV, HOG, aligned, video
csv_filename = filename + ".csv"; csv_filename = out_name + ".csv";
// Consruct HOG recorder here // Consruct HOG recorder here
if (params.outputHOG()) if (params.outputHOG())
{ {
// Output the data based on record_root, but do not include record_root in the meta file, as it is also in that directory // Output the data based on record_root, but do not include record_root in the meta file, as it is also in that directory
std::string hog_filename = filename + ".hog"; std::string hog_filename = out_name + ".hog";
metadata_file << "Output HOG:" << hog_filename << endl; metadata_file << "Output HOG:" << hog_filename << endl;
hog_filename = (path(record_root) / hog_filename).string(); hog_filename = (path(record_root) / hog_filename).string();
hog_recorder.Open(hog_filename); hog_recorder.Open(hog_filename);
@ -124,16 +124,16 @@ void RecorderOpenFace::PrepareRecording(std::string in_filename)
// saving the videos // saving the videos
if (params.outputTracked()) if (params.outputTracked())
{ {
if (parameters.isSequence()) if (params.isSequence())
{ {
// Output the data based on record_root, but do not include record_root in the meta file, as it is also in that directory // Output the data based on record_root, but do not include record_root in the meta file, as it is also in that directory
this->media_filename = filename + ".avi"; this->media_filename = out_name + ".avi";
metadata_file << "Output video:" << this->media_filename << endl; metadata_file << "Output video:" << this->media_filename << endl;
this->media_filename = (path(record_root) / this->media_filename).string(); this->media_filename = (path(record_root) / this->media_filename).string();
} }
else else
{ {
this->media_filename = filename + ".jpg"; this->media_filename = out_name + ".jpg";
metadata_file << "Output image:" << this->media_filename << endl; metadata_file << "Output image:" << this->media_filename << endl;
this->media_filename = (path(record_root) / this->media_filename).string(); this->media_filename = (path(record_root) / this->media_filename).string();
} }
@ -142,7 +142,7 @@ void RecorderOpenFace::PrepareRecording(std::string in_filename)
// Prepare image recording // Prepare image recording
if (params.outputAlignedFaces()) if (params.outputAlignedFaces())
{ {
aligned_output_directory = filename + "_aligned"; aligned_output_directory = out_name + "_aligned";
metadata_file << "Output aligned directory:" << this->aligned_output_directory << endl; metadata_file << "Output aligned directory:" << this->aligned_output_directory << endl;
this->aligned_output_directory = (path(record_root) / this->aligned_output_directory).string(); this->aligned_output_directory = (path(record_root) / this->aligned_output_directory).string();
CreateDirectory(aligned_output_directory); CreateDirectory(aligned_output_directory);
@ -152,17 +152,17 @@ void RecorderOpenFace::PrepareRecording(std::string in_filename)
} }
RecorderOpenFace::RecorderOpenFace(const std::string in_filename, RecorderOpenFaceParameters parameters, std::vector<std::string>& arguments):video_writer(), params(parameters) RecorderOpenFace::RecorderOpenFace(const std::string in_filename, const RecorderOpenFaceParameters& parameters, std::vector<std::string>& arguments):video_writer(), params(parameters)
{ {
// From the filename, strip out the name without directory and extension // From the filename, strip out the name without directory and extension
if (boost::filesystem::is_directory(in_filename)) if (boost::filesystem::is_directory(in_filename))
{ {
filename = boost::filesystem::canonical(boost::filesystem::path(in_filename)).filename().string(); out_name = boost::filesystem::canonical(boost::filesystem::path(in_filename)).filename().string();
} }
else else
{ {
filename = boost::filesystem::path(in_filename).filename().replace_extension("").string(); out_name = boost::filesystem::path(in_filename).filename().replace_extension("").string();
} }
// Consuming the input arguments // Consuming the input arguments
@ -188,7 +188,7 @@ RecorderOpenFace::RecorderOpenFace(const std::string in_filename, RecorderOpenFa
if (!output_found && arguments[i].compare("-of") == 0) if (!output_found && arguments[i].compare("-of") == 0)
{ {
record_root = (boost::filesystem::path(record_root) / boost::filesystem::path(arguments[i + 1])).remove_filename().string(); record_root = (boost::filesystem::path(record_root) / boost::filesystem::path(arguments[i + 1])).remove_filename().string();
filename = path(boost::filesystem::path(arguments[i + 1])).replace_extension("").filename().string(); out_name = path(boost::filesystem::path(arguments[i + 1])).replace_extension("").filename().string();
valid[i] = false; valid[i] = false;
valid[i + 1] = false; valid[i + 1] = false;
i++; i++;
@ -208,29 +208,28 @@ RecorderOpenFace::RecorderOpenFace(const std::string in_filename, RecorderOpenFa
} }
} }
PrepareRecording(); PrepareRecording(in_filename);
} }
RecorderOpenFace::RecorderOpenFace(const std::string in_filename, RecorderOpenFaceParameters parameters, std::string output_directory, std::string output_name) RecorderOpenFace::RecorderOpenFace(const std::string in_filename, const RecorderOpenFaceParameters& parameters, std::string output_directory):video_writer(), params(parameters)
{ {
// From the filename, strip out the name without directory and extension // From the filename, strip out the name without directory and extension
if (boost::filesystem::is_directory(in_filename)) if (boost::filesystem::is_directory(in_filename))
{ {
filename = boost::filesystem::canonical(boost::filesystem::path(in_filename)).filename().string(); out_name = boost::filesystem::canonical(boost::filesystem::path(in_filename)).filename().string();
} }
else else
{ {
filename = boost::filesystem::path(in_filename).filename().replace_extension("").string(); out_name = boost::filesystem::path(in_filename).filename().replace_extension("").string();
} }
record_root = output_directory; record_root = output_directory;
filename = output_name;
// If recording directory not set, record to default location // If recording directory not set, record to default location
if (record_root.empty()) if (record_root.empty())
record_root = default_record_directory; record_root = default_record_directory;
PrepareRecording(); PrepareRecording(in_filename);
} }

View file

@ -138,7 +138,7 @@ RecorderOpenFaceParameters::RecorderOpenFaceParameters(std::vector<std::string>
RecorderOpenFaceParameters::RecorderOpenFaceParameters(bool sequence, bool is_from_webcam, bool output_2D_landmarks, bool output_3D_landmarks, RecorderOpenFaceParameters::RecorderOpenFaceParameters(bool sequence, bool is_from_webcam, bool output_2D_landmarks, bool output_3D_landmarks,
bool output_model_params, bool output_pose, bool output_AUs, bool output_gaze, bool output_hog, bool output_tracked, bool output_model_params, bool output_pose, bool output_AUs, bool output_gaze, bool output_hog, bool output_tracked,
bool output_aligned_faces, float fx = -1, float fy = -1, float cx = -1, float cy = -1, double fps_vid_out = 30) bool output_aligned_faces, float fx, float fy, float cx, float cy, double fps_vid_out)
{ {
this->is_sequence = sequence; this->is_sequence = sequence;
this->is_from_webcam = is_from_webcam; this->is_from_webcam = is_from_webcam;
@ -158,13 +158,13 @@ RecorderOpenFaceParameters::RecorderOpenFaceParameters(bool sequence, bool is_fr
// Default output code // Default output code
this->output_codec = "DIVX"; this->output_codec = "DIVX";
this->output2DLandmarks = output_2D_landmarks; this->output_2D_landmarks = output_2D_landmarks;
this->output3DLandmarks = output_3D_landmarks; this->output_3D_landmarks = output_3D_landmarks;
this->outputPDMParams = output_model_params; this->output_model_params = output_model_params;
this->outputPose = output_pose; this->output_pose = output_pose;
this->outputAUs = output_AUs; this->output_AUs = output_AUs;
this->outputGaze = output_gaze; this->output_gaze = output_gaze;
this->outputHOG = output_hog; this->output_hog = output_hog;
this->outputTracked = output_tracked; this->output_tracked = output_tracked;
this->outputAlignedFaces = output_aligned_faces; this->output_aligned_faces = output_aligned_faces;
} }