Simplification of Recording in WPF through a separate file and data binding (WIP).
This commit is contained in:
parent
ee48946818
commit
99191fd3c9
10 changed files with 384 additions and 417 deletions
|
@ -225,7 +225,7 @@ void visualise_tracking(cv::Mat& captured_image, const LandmarkDetector::CLNF& f
|
|||
|
||||
void prepareOutputFile(std::ofstream* output_file, bool output_2D_landmarks, bool output_3D_landmarks,
|
||||
bool output_model_params, bool output_pose, bool output_AUs, bool output_gaze,
|
||||
int num_landmarks, int num_model_modes, vector<string> au_names_class, vector<string> au_names_reg);
|
||||
int num_landmarks, int num_eye_lmks, int num_model_modes, vector<string> au_names_class, vector<string> au_names_reg);
|
||||
|
||||
// Output all of the information into one file in one go (quite a few parameters, but simplifies the flow)
|
||||
void outputAllFeatures(std::ofstream* output_file, bool output_2D_landmarks, bool output_3D_landmarks,
|
||||
|
@ -476,7 +476,8 @@ int main (int argc, char **argv)
|
|||
if (!output_files.empty())
|
||||
{
|
||||
output_file.open(output_files[f_n], ios_base::out);
|
||||
prepareOutputFile(&output_file, output_2D_landmarks, output_3D_landmarks, output_model_params, output_pose, output_AUs, output_gaze, face_model.pdm.NumberOfPoints(), face_model.pdm.NumberOfModes(), face_analyser.GetAUClassNames(), face_analyser.GetAURegNames());
|
||||
prepareOutputFile(&output_file, output_2D_landmarks, output_3D_landmarks, output_model_params, output_pose, output_AUs, output_gaze, face_model.pdm.NumberOfPoints(),
|
||||
LandmarkDetector::CalculateEyeLandmarks(face_model).size(), face_model.pdm.NumberOfModes(), face_analyser.GetAUClassNames(), face_analyser.GetAURegNames());
|
||||
}
|
||||
|
||||
// Saving the HOG features
|
||||
|
@ -557,6 +558,7 @@ int main (int argc, char **argv)
|
|||
detection_success = LandmarkDetector::DetectLandmarksInImage(grayscale_image, face_model, det_parameters);
|
||||
}
|
||||
|
||||
|
||||
// Work out the pose of the head from the tracked model
|
||||
cv::Vec6d pose_estimate = LandmarkDetector::GetPose(face_model, fx, fy, cx, cy);
|
||||
|
||||
|
@ -842,7 +844,7 @@ void post_process_output_file(FaceAnalysis::FaceAnalyser& face_analyser, string
|
|||
|
||||
void prepareOutputFile(std::ofstream* output_file, bool output_2D_landmarks, bool output_3D_landmarks,
|
||||
bool output_model_params, bool output_pose, bool output_AUs, bool output_gaze,
|
||||
int num_landmarks, int num_model_modes, vector<string> au_names_class, vector<string> au_names_reg)
|
||||
int num_landmarks, int num_eye_lmks, int num_model_modes, vector<string> au_names_class, vector<string> au_names_reg)
|
||||
{
|
||||
|
||||
*output_file << "frame, timestamp, confidence, success";
|
||||
|
@ -850,6 +852,16 @@ void prepareOutputFile(std::ofstream* output_file, bool output_2D_landmarks, boo
|
|||
if (output_gaze)
|
||||
{
|
||||
*output_file << ", gaze_0_x, gaze_0_y, gaze_0_z, gaze_1_x, gaze_1_y, gaze_1_z, gaze_angle_x, gaze_angle_y";
|
||||
|
||||
// Also output eye-landmarks as they are needed for gaze visualization etc.
|
||||
for (int i = 0; i < num_eye_lmks; ++i)
|
||||
{
|
||||
*output_file << ", eye_lmk_x_" << i;
|
||||
}
|
||||
for (int i = 0; i < num_eye_lmks; ++i)
|
||||
{
|
||||
*output_file << ", eye_lmk_y_" << i;
|
||||
}
|
||||
}
|
||||
|
||||
if (output_pose)
|
||||
|
@ -938,6 +950,31 @@ void outputAllFeatures(std::ofstream* output_file, bool output_2D_landmarks, boo
|
|||
*output_file << ", " << gazeDirection0.x << ", " << gazeDirection0.y << ", " << gazeDirection0.z
|
||||
<< ", " << gazeDirection1.x << ", " << gazeDirection1.y << ", " << gazeDirection1.z
|
||||
<< ", " << gaze_angle[0] << ", " << gaze_angle[1];
|
||||
|
||||
// Output gaze landmarks
|
||||
vector<cv::Point2d> eye_lmks = LandmarkDetector::CalculateEyeLandmarks(face_model);
|
||||
for (size_t i = 0; i < eye_lmks.size(); ++i)
|
||||
{
|
||||
if (face_model.tracking_initialised)
|
||||
{
|
||||
*output_file << ", " << eye_lmks[i].x;
|
||||
}
|
||||
else
|
||||
{
|
||||
*output_file << ", 0";
|
||||
}
|
||||
}
|
||||
for (size_t i = 0; i < eye_lmks.size(); ++i)
|
||||
{
|
||||
if (face_model.tracking_initialised)
|
||||
{
|
||||
*output_file << ", " << eye_lmks[i].y;
|
||||
}
|
||||
else
|
||||
{
|
||||
*output_file << ", 0";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
*output_file << std::setprecision(4);
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
<local:AxesTimeSeriesPlot NumVertGrid="5" x:Name="headPosePlot" ShowLegend="True" MinVal="-1" MaxVal="1" MinHeight="180" Grid.Row="4" Grid.Column="0" Padding="60 20 30 40" RangeLabel="Head pose" Orientation="Horizontal">
|
||||
</local:AxesTimeSeriesPlot>
|
||||
|
||||
<local:AxesTimeSeriesPlot MinHeight="180" Grid.Row="4" Grid.Column="1" Padding="60 20 30 40" x:Name="gazePlot" ShowLegend="True" RangeLabel="Eye gaze" Orientation="Horizontal" MinVal="-1" MaxVal="1" NumVertGrid="5">
|
||||
<local:AxesTimeSeriesPlot MinHeight="180" Grid.Row="4" Grid.Column="1" Padding="60 20 30 40" x:Name="gazePlot" ShowLegend="True" RangeLabel="Eye gaze" Orientation="Horizontal" MinVal="-20" MaxVal="20" NumVertGrid="5">
|
||||
</local:AxesTimeSeriesPlot>
|
||||
|
||||
<local:AxesTimeSeriesPlot Grid.Column="2" Grid.Row="1" MinHeight="130" ShowXLabel="False" Padding="60 20 30 10" x:Name="smilePlot" ShowLegend="True" XTicks="False" RangeLabel="Lips" Orientation="Horizontal" MinVal="0" MaxVal="1" NumVertGrid="5">
|
||||
|
|
|
@ -273,15 +273,7 @@ namespace OpenFaceDemo
|
|||
List<Tuple<double, double>> landmarks = null;
|
||||
List<Tuple<double, double>> eye_landmarks = null;
|
||||
List<Tuple<Point, Point>> gaze_lines = null;
|
||||
var gaze = face_analyser.GetGazeCamera();
|
||||
|
||||
// Get the rough gaze angle
|
||||
double x_gaze = (Math.Atan2(gaze.Item1.Item1, -gaze.Item1.Item3) + Math.Atan2(gaze.Item2.Item1, -gaze.Item2.Item3))/2.0;
|
||||
double y_gaze = (Math.Atan2(gaze.Item1.Item2, -gaze.Item1.Item3) + Math.Atan2(gaze.Item2.Item2, -gaze.Item2.Item3)) / 2.0;
|
||||
|
||||
// Scaling for clearer vis.
|
||||
x_gaze *= 2.5;
|
||||
y_gaze *= 2.5;
|
||||
Tuple<double, double> gaze_angle = face_analyser.GetGazeAngle();
|
||||
|
||||
if (detectionSucceeding)
|
||||
{
|
||||
|
@ -335,9 +327,9 @@ namespace OpenFaceDemo
|
|||
headPosePlot.AddDataPoint(new DataPointGraph() { Time = CurrentTime, values = poseDict, Confidence = confidence });
|
||||
|
||||
Dictionary<int, double> gazeDict = new Dictionary<int, double>();
|
||||
gazeDict[0] = x_gaze;
|
||||
gazeDict[0] = gaze_angle.Item1 * (180.0 / Math.PI);
|
||||
gazeDict[0] = 0.5 * old_gaze_x + 0.5 * gazeDict[0];
|
||||
gazeDict[1] = -y_gaze;
|
||||
gazeDict[1] = -gaze_angle.Item2 * (180.0 / Math.PI);
|
||||
gazeDict[1] = 0.5 * old_gaze_y + 0.5 * gazeDict[1];
|
||||
gazePlot.AddDataPoint(new DataPointGraph() { Time = CurrentTime, values = gazeDict, Confidence = confidence });
|
||||
|
||||
|
|
|
@ -30,30 +30,27 @@
|
|||
</MenuItem>
|
||||
</MenuItem>
|
||||
<MenuItem Name="RecordingMenu" Header="Record">
|
||||
<MenuItem Name="RecordAUCheckBox" IsCheckable="True" Header="Record AUs" Click="recordCheckBox_click"></MenuItem>
|
||||
<MenuItem Name="RecordPoseCheckBox" IsCheckable="True" Header="Record pose" Click="recordCheckBox_click"></MenuItem>
|
||||
<MenuItem Name="RecordLandmarks2DCheckBox" IsCheckable="True" Header="Record 2D landmarks" Click="recordCheckBox_click"></MenuItem>
|
||||
<MenuItem Name="RecordGazeCheckBox" IsCheckable="True" Header="Record gaze" Click="recordCheckBox_click"></MenuItem>
|
||||
<MenuItem Name="RecordLandmarks3DCheckBox" IsCheckable="True" Header="Record 3D landmarks" Click="recordCheckBox_click"></MenuItem>
|
||||
<MenuItem Name="RecordHOGCheckBox" IsCheckable="True" Header="Record HOG" Click="recordCheckBox_click"></MenuItem>
|
||||
<MenuItem Name="RecordParamsCheckBox" IsCheckable="True" Header="Record model parameters" Click="recordCheckBox_click"></MenuItem>
|
||||
<MenuItem Name="RecordAlignedCheckBox" IsCheckable="True" Header="Record aligned faces" Click="recordCheckBox_click"></MenuItem>
|
||||
<MenuItem Name="RecordTrackedVidCheckBox" IsCheckable="True" Header="Record tracked video" Click="recordCheckBox_click"></MenuItem>
|
||||
<MenuItem IsCheckable="True" Header="Record AUs" IsChecked="{Binding RecordAUs}"/>
|
||||
<MenuItem IsCheckable="True" Header="Record pose" IsChecked="{Binding RecordPose}"/>
|
||||
<MenuItem IsCheckable="True" Header="Record 2D landmarks" IsChecked="{Binding Record2DLandmarks}" />
|
||||
<MenuItem IsCheckable="True" Header="Record gaze" IsChecked="{Binding RecordGaze}"/>
|
||||
<MenuItem IsCheckable="True" Header="Record 3D landmarks" IsChecked="{Binding Record3DLandmarks}"/>
|
||||
<MenuItem IsCheckable="True" Header="Record HOG" IsChecked="{Binding RecordHOG}"/>
|
||||
<MenuItem IsCheckable="True" Header="Record model parameters" IsChecked="{Binding RecordModelParameters}" />
|
||||
<MenuItem IsCheckable="True" Header="Record aligned faces" IsChecked="{Binding RecordAligned}"/>
|
||||
</MenuItem>
|
||||
<MenuItem Name="SettingsMenu" Header="Recording settings">
|
||||
<MenuItem Name="OutputLocationItem" Header="Set output location..." Click="OutputLocationItem_Click" ></MenuItem>
|
||||
<MenuItem Header="Set output image size..." Click="setOutputImageSize_Click"></MenuItem>
|
||||
</MenuItem>
|
||||
<MenuItem Header="AU settings">
|
||||
<MenuItem Name="UseDynamicModelsCheckBox" IsChecked="True" IsCheckable="True" Header="Use dynamic models" Click="UseDynamicModelsCheckBox_Click"></MenuItem>
|
||||
<MenuItem Name="UseDynamicShiftingCheckBox" IsCheckable="True" Header="Use dynamic shifting" Click="UseDynamicModelsCheckBox_Click"></MenuItem>
|
||||
<MenuItem Name="UseDynamicScalingCheckBox" IsCheckable="True" Header="Use dynamic scaling" Click="UseDynamicModelsCheckBox_Click"></MenuItem>
|
||||
<MenuItem Name="AUSetting" Header="AU settings">
|
||||
<MenuItem IsCheckable="True" Header="Use dynamic models" Click="UseDynamicModelsCheckBox_Click" IsChecked="{Binding DynamicAUModels}"/>
|
||||
</MenuItem>
|
||||
<MenuItem Header="View">
|
||||
<MenuItem Name="ShowVideoCheckBox" IsChecked="True" IsCheckable="True" Header="Show Video" Click="VisualisationCheckBox_Click"></MenuItem>
|
||||
<MenuItem Name="ShowAppearanceFeaturesCheckBox" IsChecked="True" IsCheckable="True" Header="Show Appearance" Click="VisualisationCheckBox_Click"></MenuItem>
|
||||
<MenuItem Name="ShowGeometryFeaturesCheckBox" IsChecked="True" IsCheckable="True" Header="Show Geometry" Click="VisualisationCheckBox_Click"></MenuItem>
|
||||
<MenuItem Name="ShowAUsCheckBox" IsChecked="True" IsCheckable="True" Header="Show AUs" Click="VisualisationCheckBox_Click"></MenuItem>
|
||||
<MenuItem IsCheckable="True" Header="Show Video" Click="VisualisationChange" IsChecked="{Binding ShowVideo}"/>
|
||||
<MenuItem IsCheckable="True" Header="Show Appearance" Click="VisualisationChange" IsChecked="{Binding ShowAppearance}"/>
|
||||
<MenuItem IsCheckable="True" Header="Show Geometry" Click="VisualisationChange" IsChecked="{Binding ShowGeometry}"/>
|
||||
<MenuItem IsCheckable="True" Header="Show AUs" Click="VisualisationChange" IsChecked="{Binding ShowAUs}"/>
|
||||
</MenuItem>
|
||||
</Menu>
|
||||
|
||||
|
|
|
@ -131,23 +131,22 @@ namespace OpenFaceOffline
|
|||
FaceAnalyserManaged face_analyser;
|
||||
|
||||
// Recording parameters (default values)
|
||||
bool record_HOG = false; // HOG features extracted from face images
|
||||
bool record_aligned = false; // aligned face images
|
||||
bool record_tracked_vid = false;
|
||||
Recorder recorder;
|
||||
|
||||
// Check wich things need to be recorded
|
||||
bool record_2D_landmarks = true;
|
||||
bool record_3D_landmarks = false;
|
||||
bool record_model_params = true;
|
||||
bool record_pose = true;
|
||||
bool record_AUs = true;
|
||||
bool record_gaze = true;
|
||||
public bool RecordAligned { get; set; } // Aligned face images
|
||||
public bool RecordHOG { get; set; } // HOG features extracted from face images
|
||||
public bool Record2DLandmarks { get; set; } // 2D locations of facial landmarks (in pixels)
|
||||
public bool Record3DLandmarks { get; set; } // 3D locations of facial landmarks (in pixels)
|
||||
public bool RecordModelParameters { get; set; } // Facial shape parameters (rigid and non-rigid geometry)
|
||||
public bool RecordPose { get; set; } // Head pose (position and orientation)
|
||||
public bool RecordAUs { get; set; } // Facial action units
|
||||
public bool RecordGaze { get; set; } // Eye gaze
|
||||
|
||||
// Visualisation options
|
||||
bool show_tracked_video = true;
|
||||
bool show_appearance = true;
|
||||
bool show_geometry = true;
|
||||
bool show_aus = true;
|
||||
public bool ShowTrackedVideo { get; set; } // Eye gaze
|
||||
public bool ShowAppearance { get; set; } // Eye gaze
|
||||
public bool ShowGeometry { get; set; } // Eye gaze
|
||||
public bool ShowAUs { get; set; } // Eye gaze
|
||||
|
||||
int image_output_size = 112;
|
||||
|
||||
|
@ -155,51 +154,38 @@ namespace OpenFaceOffline
|
|||
|
||||
// TODO indication that track is done
|
||||
|
||||
// The recording managers, TODO they should be all one
|
||||
StreamWriter output_features_file;
|
||||
|
||||
// Where the recording is done (by default in a record directory, from where the application executed), TODO maybe the same folder as iput?
|
||||
String record_root = "./record";
|
||||
|
||||
// For AU visualisation and output
|
||||
List<String> au_class_names;
|
||||
List<String> au_reg_names;
|
||||
|
||||
// For AU prediction
|
||||
bool dynamic_AU_shift = true;
|
||||
bool dynamic_AU_scale = false;
|
||||
bool use_dynamic_models = true;
|
||||
public bool DynamicAUModels { get; set; }
|
||||
|
||||
public MainWindow()
|
||||
{
|
||||
InitializeComponent();
|
||||
this.DataContext = this; // For WPF data binding
|
||||
|
||||
// Set the icon
|
||||
Uri iconUri = new Uri("logo1.ico", UriKind.RelativeOrAbsolute);
|
||||
this.Icon = BitmapFrame.Create(iconUri);
|
||||
|
||||
Dispatcher.Invoke(DispatcherPriority.Render, new TimeSpan(0, 0, 0, 0, 2000), (Action)(() =>
|
||||
{
|
||||
RecordAUCheckBox.IsChecked = record_AUs;
|
||||
RecordAlignedCheckBox.IsChecked = record_aligned;
|
||||
RecordTrackedVidCheckBox.IsChecked = record_tracked_vid;
|
||||
RecordHOGCheckBox.IsChecked = record_HOG;
|
||||
RecordGazeCheckBox.IsChecked = record_gaze;
|
||||
RecordLandmarks2DCheckBox.IsChecked = record_2D_landmarks;
|
||||
RecordLandmarks3DCheckBox.IsChecked = record_3D_landmarks;
|
||||
RecordParamsCheckBox.IsChecked = record_model_params;
|
||||
RecordPoseCheckBox.IsChecked = record_pose;
|
||||
// Setup the default features that will be recorded
|
||||
Record2DLandmarks = true; Record3DLandmarks = true; RecordModelParameters = true; RecordModelParameters = true;
|
||||
RecordGaze = true; RecordAUs = true; RecordPose = true;
|
||||
RecordAligned = false; RecordHOG = false;
|
||||
|
||||
UseDynamicModelsCheckBox.IsChecked = use_dynamic_models;
|
||||
UseDynamicScalingCheckBox.IsChecked = dynamic_AU_scale;
|
||||
UseDynamicShiftingCheckBox.IsChecked = dynamic_AU_shift;
|
||||
}));
|
||||
ShowTrackedVideo = true;
|
||||
ShowAppearance = true;
|
||||
ShowGeometry = true;
|
||||
ShowAUs = true;
|
||||
|
||||
DynamicAUModels = true;
|
||||
|
||||
String root = AppDomain.CurrentDomain.BaseDirectory;
|
||||
|
||||
clnf_params = new FaceModelParameters(root, false);
|
||||
clnf_model = new CLNF(clnf_params);
|
||||
face_analyser = new FaceAnalyserManaged(root, use_dynamic_models, image_output_size);
|
||||
face_analyser = new FaceAnalyserManaged(root, DynamicAUModels, image_output_size);
|
||||
|
||||
}
|
||||
|
||||
|
@ -212,11 +198,13 @@ namespace OpenFaceOffline
|
|||
|
||||
thread_running = true;
|
||||
|
||||
// Grab the boolean values of the check-boxes
|
||||
Dispatcher.Invoke(DispatcherPriority.Render, new TimeSpan(0, 0, 0, 0, 200), (Action)(() =>
|
||||
{
|
||||
ResetButton.IsEnabled = true;
|
||||
PauseButton.IsEnabled = true;
|
||||
StopButton.IsEnabled = true;
|
||||
|
||||
}));
|
||||
|
||||
// Create the video capture and call the VideoLoop
|
||||
|
@ -234,18 +222,13 @@ namespace OpenFaceOffline
|
|||
|
||||
if (capture.isOpened())
|
||||
{
|
||||
// Prepare recording if any based on the directory
|
||||
// Prepare recording if any based on the directory, TODO move this
|
||||
String file_no_ext = System.IO.Path.GetDirectoryName(filenames[0]);
|
||||
file_no_ext = System.IO.Path.GetFileName(file_no_ext);
|
||||
|
||||
SetupRecording(record_root, file_no_ext, capture.width, capture.height, record_2D_landmarks, record_2D_landmarks, record_model_params, record_pose, record_AUs, record_gaze);
|
||||
|
||||
// Start the actual processing
|
||||
VideoLoop();
|
||||
|
||||
// Clear up the recording
|
||||
StopRecording();
|
||||
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -305,16 +288,12 @@ namespace OpenFaceOffline
|
|||
|
||||
if (capture.isOpened())
|
||||
{
|
||||
// Prepare recording if any
|
||||
// Prepare recording if any TODO move this
|
||||
String file_no_ext = System.IO.Path.GetFileNameWithoutExtension(filename);
|
||||
|
||||
SetupRecording(record_root, file_no_ext, capture.width, capture.height, record_2D_landmarks, record_3D_landmarks, record_model_params, record_pose, record_AUs, record_gaze);
|
||||
|
||||
// Start the actual processing
|
||||
VideoLoop();
|
||||
|
||||
// Clear up the recording
|
||||
StopRecording();
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -391,9 +370,9 @@ namespace OpenFaceOffline
|
|||
}
|
||||
|
||||
// Visualisation
|
||||
Dispatcher.Invoke(DispatcherPriority.Render, new TimeSpan(0, 0, 0, 0, 200), (Action)(() =>
|
||||
if (ShowTrackedVideo)
|
||||
{
|
||||
if (show_tracked_video)
|
||||
Dispatcher.Invoke(DispatcherPriority.Render, new TimeSpan(0, 0, 0, 0, 200), (Action)(() =>
|
||||
{
|
||||
if (latest_img == null)
|
||||
{
|
||||
|
@ -410,10 +389,9 @@ namespace OpenFaceOffline
|
|||
video.OverlayLines = new List<Tuple<Point, Point>>();
|
||||
|
||||
video.OverlayPoints = landmark_points;
|
||||
}
|
||||
|
||||
}));
|
||||
|
||||
}
|
||||
latest_img = null;
|
||||
}
|
||||
|
||||
|
@ -421,6 +399,7 @@ namespace OpenFaceOffline
|
|||
// Capturing and processing the video frame by frame
|
||||
private void VideoLoop()
|
||||
{
|
||||
|
||||
Thread.CurrentThread.IsBackground = true;
|
||||
|
||||
DateTime? startTime = CurrentTime;
|
||||
|
@ -431,10 +410,18 @@ namespace OpenFaceOffline
|
|||
face_analyser.Reset();
|
||||
|
||||
// TODO add an ability to change these through a calibration procedure or setting menu
|
||||
double fx, fy, cx, cy;
|
||||
fx = 500.0;
|
||||
fy = 500.0;
|
||||
cx = cy = -1;
|
||||
double fx = 500.0 * (capture.width / 640.0);
|
||||
double fy = 500.0 * (capture.height / 480.0);
|
||||
|
||||
fx = (fx + fy) / 2.0;
|
||||
fy = fx;
|
||||
|
||||
double cx = capture.width / 2f;
|
||||
double cy = capture.height / 2f;
|
||||
|
||||
// Setup the recorder first, TODO change
|
||||
recorder = new Recorder(record_root, "test.txt", capture.width, capture.height, Record2DLandmarks, Record3DLandmarks, RecordModelParameters, RecordPose,
|
||||
RecordAUs, RecordGaze, RecordAligned, RecordHOG, clnf_model, face_analyser, fx, fy, cx, cy);
|
||||
|
||||
int frame_id = 0;
|
||||
|
||||
|
@ -470,19 +457,6 @@ namespace OpenFaceOffline
|
|||
continue;
|
||||
}
|
||||
|
||||
// This is more ore less guess work, but seems to work well enough
|
||||
if (cx == -1)
|
||||
{
|
||||
fx = fx * (grayFrame.Width / 640.0);
|
||||
fy = fy * (grayFrame.Height / 480.0);
|
||||
|
||||
fx = (fx + fy) / 2.0;
|
||||
fy = fx;
|
||||
|
||||
cx = grayFrame.Width / 2f;
|
||||
cy = grayFrame.Height / 2f;
|
||||
}
|
||||
|
||||
bool detectionSucceeding = ProcessFrame(clnf_model, clnf_params, frame, grayFrame, fx, fy, cx, cy);
|
||||
|
||||
double scale = clnf_model.GetRigidParams()[0];
|
||||
|
@ -498,11 +472,8 @@ namespace OpenFaceOffline
|
|||
clnf_model.GetPose(pose, fx, fy, cx, cy);
|
||||
List<double> non_rigid_params = clnf_model.GetNonRigidParams();
|
||||
|
||||
// The face analysis step (only done if recording AUs, HOGs or video)
|
||||
if (record_AUs || record_HOG || record_aligned || show_aus || show_appearance || record_tracked_vid || record_gaze)
|
||||
{
|
||||
face_analyser.AddNextFrame(frame, clnf_model, fx, fy, cx, cy, false, show_appearance, record_tracked_vid);
|
||||
}
|
||||
// The face analysis step (for AUs and eye gaze)
|
||||
face_analyser.AddNextFrame(frame, clnf_model, fx, fy, cx, cy, false, ShowAppearance, false); // TODO change
|
||||
|
||||
List<Tuple<Point, Point>> lines = null;
|
||||
List<Tuple<double, double>> landmarks = null;
|
||||
|
@ -522,7 +493,7 @@ namespace OpenFaceOffline
|
|||
// Visualisation
|
||||
Dispatcher.Invoke(DispatcherPriority.Render, new TimeSpan(0, 0, 0, 0, 200), (Action)(() =>
|
||||
{
|
||||
if (show_aus)
|
||||
if (ShowAUs)
|
||||
{
|
||||
var au_classes = face_analyser.GetCurrentAUsClass();
|
||||
var au_regs = face_analyser.GetCurrentAUsReg();
|
||||
|
@ -542,7 +513,7 @@ namespace OpenFaceOffline
|
|||
auRegGraph.Update(au_regs_scaled);
|
||||
}
|
||||
|
||||
if (show_geometry)
|
||||
if (ShowGeometry)
|
||||
{
|
||||
int yaw = (int)(pose[4] * 180 / Math.PI + 0.5);
|
||||
int roll = (int)(pose[5] * 180 / Math.PI + 0.5);
|
||||
|
@ -565,7 +536,7 @@ namespace OpenFaceOffline
|
|||
GazeYLabel.Content = y_angle;
|
||||
}
|
||||
|
||||
if (show_tracked_video)
|
||||
if (ShowTrackedVideo)
|
||||
{
|
||||
if (latest_img == null)
|
||||
{
|
||||
|
@ -609,7 +580,7 @@ namespace OpenFaceOffline
|
|||
}
|
||||
}
|
||||
|
||||
if (show_appearance)
|
||||
if (ShowAppearance)
|
||||
{
|
||||
RawImage aligned_face = face_analyser.GetLatestAlignedFace();
|
||||
RawImage hog_face = face_analyser.GetLatestHOGDescriptorVisualisation();
|
||||
|
@ -628,9 +599,7 @@ namespace OpenFaceOffline
|
|||
}
|
||||
}));
|
||||
|
||||
// Recording the tracked model
|
||||
RecordFrame(clnf_model, detectionSucceeding, frame_id + 1, frame, grayFrame, ((double)frame_id) / fps,
|
||||
record_2D_landmarks, record_2D_landmarks, record_model_params, record_pose, record_AUs, record_gaze, fx, fy, cx, cy);
|
||||
recorder.RecordFrame(clnf_model, face_analyser, detectionSucceeding, frame_id + 1, ((double)frame_id) / fps);
|
||||
|
||||
if (reset)
|
||||
{
|
||||
|
@ -662,6 +631,8 @@ namespace OpenFaceOffline
|
|||
PauseButton_Click(null, null);
|
||||
}));
|
||||
}
|
||||
|
||||
recorder.FinishRecording(clnf_model, face_analyser);
|
||||
}
|
||||
|
||||
private void StopTracking()
|
||||
|
@ -695,265 +666,25 @@ namespace OpenFaceOffline
|
|||
}
|
||||
|
||||
|
||||
// ----------------------------------------------------------
|
||||
// Recording helpers
|
||||
|
||||
private void SetupRecording(String root, String filename, int width, int height, bool output_2D_landmarks, bool output_3D_landmarks,
|
||||
bool output_model_params, bool output_pose, bool output_AUs, bool output_gaze)
|
||||
{
|
||||
// Disallow changing recording settings when the recording starts, TODO move this up a bit
|
||||
Dispatcher.Invoke(DispatcherPriority.Render, new TimeSpan(0, 0, 0, 0, 200), (Action)(() =>
|
||||
{
|
||||
RecordingMenu.IsEnabled = false;
|
||||
UseDynamicModelsCheckBox.IsEnabled = false;
|
||||
}));
|
||||
|
||||
if (!System.IO.Directory.Exists(root))
|
||||
{
|
||||
System.IO.Directory.CreateDirectory(root);
|
||||
}
|
||||
|
||||
output_features_file = new StreamWriter(root + "/" + filename + ".txt");
|
||||
output_features_file.Write("frame, timestamp, confidence, success");
|
||||
|
||||
if (output_gaze)
|
||||
output_features_file.Write(", gaze_0_x, gaze_0_y, gaze_0_z, gaze_1_x, gaze_1_y, gaze_1_z");
|
||||
|
||||
if (output_pose)
|
||||
output_features_file.Write(", pose_Tx, pose_Ty, pose_Tz, pose_Rx, pose_Ry, pose_Rz");
|
||||
|
||||
if (output_2D_landmarks)
|
||||
{
|
||||
for (int i = 0; i < clnf_model.GetNumPoints(); ++i)
|
||||
{
|
||||
output_features_file.Write(", x_" + i);
|
||||
}
|
||||
for (int i = 0; i < clnf_model.GetNumPoints(); ++i)
|
||||
{
|
||||
output_features_file.Write(", y_" + i);
|
||||
}
|
||||
}
|
||||
|
||||
if (output_3D_landmarks)
|
||||
{
|
||||
|
||||
for (int i = 0; i < clnf_model.GetNumPoints(); ++i)
|
||||
{
|
||||
output_features_file.Write(", X_" + i);
|
||||
}
|
||||
for (int i = 0; i < clnf_model.GetNumPoints(); ++i)
|
||||
{
|
||||
output_features_file.Write(", Y_" + i);
|
||||
}
|
||||
for (int i = 0; i < clnf_model.GetNumPoints(); ++i)
|
||||
{
|
||||
output_features_file.Write(", Z_" + i);
|
||||
}
|
||||
}
|
||||
|
||||
if (output_model_params)
|
||||
{
|
||||
output_features_file.Write(", p_scale, p_rx, p_ry, p_rz, p_tx, p_ty");
|
||||
for (int i = 0; i < clnf_model.GetNumModes(); ++i)
|
||||
{
|
||||
output_features_file.Write(", p_" + i);
|
||||
}
|
||||
}
|
||||
|
||||
if (output_AUs)
|
||||
{
|
||||
|
||||
au_reg_names = face_analyser.GetRegActionUnitsNames();
|
||||
au_reg_names.Sort();
|
||||
foreach (var name in au_reg_names)
|
||||
{
|
||||
output_features_file.Write(", " + name + "_r");
|
||||
}
|
||||
|
||||
au_class_names = face_analyser.GetClassActionUnitsNames();
|
||||
au_class_names.Sort();
|
||||
foreach (var name in au_class_names)
|
||||
{
|
||||
output_features_file.Write(", " + name + "_c");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
output_features_file.WriteLine();
|
||||
|
||||
|
||||
if (record_aligned)
|
||||
{
|
||||
String aligned_root = root + "/" + filename + "_aligned/";
|
||||
System.IO.Directory.CreateDirectory(aligned_root);
|
||||
face_analyser.SetupAlignedImageRecording(aligned_root);
|
||||
}
|
||||
|
||||
if (record_tracked_vid)
|
||||
{
|
||||
String vid_loc = root + "/" + filename + ".avi";
|
||||
System.IO.Directory.CreateDirectory(root);
|
||||
face_analyser.SetupTrackingRecording(vid_loc, width, height, 30);
|
||||
}
|
||||
|
||||
if (record_HOG)
|
||||
{
|
||||
String filename_HOG = root + "/" + filename + ".hog";
|
||||
face_analyser.SetupHOGRecording(filename_HOG);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private void StopRecording()
|
||||
{
|
||||
if (output_features_file != null)
|
||||
output_features_file.Close();
|
||||
|
||||
if (record_HOG)
|
||||
face_analyser.StopHOGRecording();
|
||||
|
||||
if (record_tracked_vid)
|
||||
face_analyser.StopTrackingRecording();
|
||||
|
||||
Dispatcher.Invoke(DispatcherPriority.Render, new TimeSpan(0, 0, 0, 0, 200), (Action)(() =>
|
||||
{
|
||||
RecordingMenu.IsEnabled = true;
|
||||
UseDynamicModelsCheckBox.IsEnabled = true;
|
||||
|
||||
}));
|
||||
|
||||
}
|
||||
|
||||
// Recording the relevant objects
|
||||
private void RecordFrame(CLNF clnf_model, bool success, int frame_ind, RawImage frame, RawImage grayscale_frame, double time_stamp, bool output_2D_landmarks, bool output_3D_landmarks,
|
||||
bool output_model_params, bool output_pose, bool output_AUs, bool output_gaze, double fx, double fy, double cx, double cy)
|
||||
{
|
||||
// Making sure that full stop is used instead of a comma for data recording
|
||||
System.Globalization.CultureInfo customCulture = (System.Globalization.CultureInfo)System.Threading.Thread.CurrentThread.CurrentCulture.Clone();
|
||||
customCulture.NumberFormat.NumberDecimalSeparator = ".";
|
||||
|
||||
System.Threading.Thread.CurrentThread.CurrentCulture = customCulture;
|
||||
|
||||
double confidence = (-clnf_model.GetConfidence()) / 2.0 + 0.5;
|
||||
|
||||
List<double> pose = new List<double>();
|
||||
clnf_model.GetPose(pose, fx, fy, cx, cy);
|
||||
|
||||
output_features_file.Write(String.Format("{0}, {1}, {2:F3}, {3}", frame_ind, time_stamp, confidence, success ? 1 : 0));
|
||||
|
||||
if (output_gaze)
|
||||
{
|
||||
var gaze = face_analyser.GetGazeCamera();
|
||||
var gaze_angle = face_analyser.GetGazeAngle();
|
||||
|
||||
output_features_file.Write(String.Format(", {0:F5}, {1:F5}, {2:F5}, {3:F5}, {4:F5}, {5:F5}, {6:F5}, {7:F5}", gaze.Item1.Item1, gaze.Item1.Item2, gaze.Item1.Item3,
|
||||
gaze.Item2.Item1, gaze.Item2.Item2, gaze.Item2.Item3, gaze_angle.Item1, gaze_angle.Item2));
|
||||
}
|
||||
|
||||
if (output_pose)
|
||||
output_features_file.Write(String.Format(", {0:F3}, {1:F3}, {2:F3}, {3:F3}, {4:F3}, {5:F3}", pose[0], pose[1], pose[2], pose[3], pose[4], pose[5]));
|
||||
|
||||
if (output_2D_landmarks)
|
||||
{
|
||||
List<Tuple<double, double>> landmarks_2d = clnf_model.CalculateLandmarks();
|
||||
|
||||
for (int i = 0; i < landmarks_2d.Count; ++i)
|
||||
output_features_file.Write(", {0:F2}", landmarks_2d[i].Item1);
|
||||
|
||||
for (int i = 0; i < landmarks_2d.Count; ++i)
|
||||
output_features_file.Write(", {0:F2}", landmarks_2d[i].Item2);
|
||||
}
|
||||
|
||||
if (output_3D_landmarks)
|
||||
{
|
||||
List<System.Windows.Media.Media3D.Point3D> landmarks_3d = clnf_model.Calculate3DLandmarks(fx, fy, cx, cy);
|
||||
|
||||
for (int i = 0; i < landmarks_3d.Count; ++i)
|
||||
output_features_file.Write(", {0:F2}", landmarks_3d[i].X);
|
||||
|
||||
for (int i = 0; i < landmarks_3d.Count; ++i)
|
||||
output_features_file.Write(", {0:F2}", landmarks_3d[i].Y);
|
||||
|
||||
for (int i = 0; i < landmarks_3d.Count; ++i)
|
||||
output_features_file.Write(", {0:F2}", landmarks_3d[i].Z);
|
||||
}
|
||||
|
||||
if (output_model_params)
|
||||
{
|
||||
List<double> all_params = clnf_model.GetParams();
|
||||
|
||||
for (int i = 0; i < all_params.Count; ++i)
|
||||
output_features_file.Write(String.Format(", {0,0:F5}", all_params[i]));
|
||||
}
|
||||
|
||||
if (output_AUs)
|
||||
{
|
||||
var au_regs = face_analyser.GetCurrentAUsReg();
|
||||
|
||||
foreach (var name_reg in au_reg_names)
|
||||
output_features_file.Write(", {0:F2}", au_regs[name_reg]);
|
||||
|
||||
var au_classes = face_analyser.GetCurrentAUsClass();
|
||||
|
||||
foreach (var name_class in au_class_names)
|
||||
output_features_file.Write(", {0:F0}", au_classes[name_class]);
|
||||
|
||||
}
|
||||
|
||||
output_features_file.WriteLine();
|
||||
|
||||
if (record_aligned)
|
||||
{
|
||||
face_analyser.RecordAlignedFrame(frame_ind);
|
||||
}
|
||||
|
||||
if (record_HOG)
|
||||
{
|
||||
face_analyser.RecordHOGFrame();
|
||||
}
|
||||
|
||||
if (record_tracked_vid)
|
||||
{
|
||||
face_analyser.RecordTrackedFace();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// ----------------------------------------------------------
|
||||
// Mode handling (image, video)
|
||||
// ----------------------------------------------------------
|
||||
private void SetupImageMode()
|
||||
{
|
||||
// Turn off recording
|
||||
record_aligned = false;
|
||||
record_HOG = false;
|
||||
record_tracked_vid = false;
|
||||
|
||||
// Turn off unneeded visualisations
|
||||
show_tracked_video = true;
|
||||
show_appearance = false;
|
||||
show_geometry = false;
|
||||
show_aus = false;
|
||||
// Turn off unneeded visualisations, TODO remove dispatch
|
||||
ShowTrackedVideo = true;
|
||||
ShowAppearance = false;
|
||||
ShowGeometry = false;
|
||||
ShowAUs = false;
|
||||
|
||||
RecordAligned = false;
|
||||
RecordHOG = false;
|
||||
|
||||
// Actually update the GUI accordingly
|
||||
Dispatcher.Invoke(DispatcherPriority.Render, new TimeSpan(0, 0, 0, 0, 2000), (Action)(() =>
|
||||
{
|
||||
RecordAUCheckBox.IsChecked = record_AUs;
|
||||
RecordAlignedCheckBox.IsChecked = record_aligned;
|
||||
RecordTrackedVidCheckBox.IsChecked = record_tracked_vid;
|
||||
RecordHOGCheckBox.IsChecked = record_HOG;
|
||||
RecordGazeCheckBox.IsChecked = record_gaze;
|
||||
RecordLandmarks2DCheckBox.IsChecked = record_2D_landmarks;
|
||||
RecordLandmarks3DCheckBox.IsChecked = record_3D_landmarks;
|
||||
RecordParamsCheckBox.IsChecked = record_model_params;
|
||||
RecordPoseCheckBox.IsChecked = record_pose;
|
||||
|
||||
ShowVideoCheckBox.IsChecked = true;
|
||||
ShowAppearanceFeaturesCheckBox.IsChecked = false;
|
||||
ShowGeometryFeaturesCheckBox.IsChecked = false;
|
||||
ShowAUsCheckBox.IsChecked = false;
|
||||
|
||||
VisualisationCheckBox_Click(null, null);
|
||||
VisualisationChange(null, null);
|
||||
}));
|
||||
|
||||
// TODO change what next and back buttons do?
|
||||
|
@ -1081,7 +812,7 @@ namespace OpenFaceOffline
|
|||
ResetButton.IsEnabled = false;
|
||||
RecordingMenu.IsEnabled = true;
|
||||
|
||||
UseDynamicModelsCheckBox.IsEnabled = true;
|
||||
AUSetting.IsEnabled = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1131,15 +862,10 @@ namespace OpenFaceOffline
|
|||
}
|
||||
|
||||
|
||||
private void VisualisationCheckBox_Click(object sender, RoutedEventArgs e)
|
||||
private void VisualisationChange(object sender, RoutedEventArgs e)
|
||||
{
|
||||
show_tracked_video = ShowVideoCheckBox.IsChecked;
|
||||
show_appearance = ShowAppearanceFeaturesCheckBox.IsChecked;
|
||||
show_geometry = ShowGeometryFeaturesCheckBox.IsChecked;
|
||||
show_aus = ShowAUsCheckBox.IsChecked;
|
||||
|
||||
// Collapsing or restoring the windows here
|
||||
if (!show_tracked_video)
|
||||
if (!ShowTrackedVideo)
|
||||
{
|
||||
VideoBorder.Visibility = System.Windows.Visibility.Collapsed;
|
||||
MainGrid.ColumnDefinitions[0].Width = new GridLength(0, GridUnitType.Star);
|
||||
|
@ -1150,7 +876,7 @@ namespace OpenFaceOffline
|
|||
MainGrid.ColumnDefinitions[0].Width = new GridLength(2.1, GridUnitType.Star);
|
||||
}
|
||||
|
||||
if (!show_appearance)
|
||||
if (!ShowAppearance)
|
||||
{
|
||||
AppearanceBorder.Visibility = System.Windows.Visibility.Collapsed;
|
||||
MainGrid.ColumnDefinitions[1].Width = new GridLength(0, GridUnitType.Star);
|
||||
|
@ -1162,7 +888,7 @@ namespace OpenFaceOffline
|
|||
}
|
||||
|
||||
// Collapsing or restoring the windows here
|
||||
if (!show_geometry)
|
||||
if (!ShowGeometry)
|
||||
{
|
||||
GeometryBorder.Visibility = System.Windows.Visibility.Collapsed;
|
||||
MainGrid.ColumnDefinitions[2].Width = new GridLength(0, GridUnitType.Star);
|
||||
|
@ -1174,7 +900,7 @@ namespace OpenFaceOffline
|
|||
}
|
||||
|
||||
// Collapsing or restoring the windows here
|
||||
if (!show_aus)
|
||||
if (!ShowAUs)
|
||||
{
|
||||
ActionUnitBorder.Visibility = System.Windows.Visibility.Collapsed;
|
||||
MainGrid.ColumnDefinitions[3].Width = new GridLength(0, GridUnitType.Star);
|
||||
|
@ -1187,32 +913,11 @@ namespace OpenFaceOffline
|
|||
|
||||
}
|
||||
|
||||
|
||||
private void recordCheckBox_click(object sender, RoutedEventArgs e)
|
||||
{
|
||||
record_AUs = RecordAUCheckBox.IsChecked;
|
||||
record_aligned = RecordAlignedCheckBox.IsChecked;
|
||||
record_HOG = RecordHOGCheckBox.IsChecked;
|
||||
record_gaze = RecordGazeCheckBox.IsChecked;
|
||||
record_tracked_vid = RecordTrackedVidCheckBox.IsChecked;
|
||||
record_2D_landmarks = RecordLandmarks2DCheckBox.IsChecked;
|
||||
record_3D_landmarks = RecordLandmarks3DCheckBox.IsChecked;
|
||||
record_model_params = RecordParamsCheckBox.IsChecked;
|
||||
record_pose = RecordPoseCheckBox.IsChecked;
|
||||
}
|
||||
|
||||
private void UseDynamicModelsCheckBox_Click(object sender, RoutedEventArgs e)
|
||||
{
|
||||
dynamic_AU_shift = UseDynamicShiftingCheckBox.IsChecked;
|
||||
dynamic_AU_scale = UseDynamicScalingCheckBox.IsChecked;
|
||||
|
||||
if (use_dynamic_models != UseDynamicModelsCheckBox.IsChecked)
|
||||
{
|
||||
// Change the face analyser, this should be safe as the model is only allowed to change when not running
|
||||
String root = AppDomain.CurrentDomain.BaseDirectory;
|
||||
face_analyser = new FaceAnalyserManaged(root, UseDynamicModelsCheckBox.IsChecked, image_output_size);
|
||||
}
|
||||
use_dynamic_models = UseDynamicModelsCheckBox.IsChecked;
|
||||
face_analyser = new FaceAnalyserManaged(root, DynamicAUModels, image_output_size);
|
||||
}
|
||||
|
||||
private void setOutputImageSize_Click(object sender, RoutedEventArgs e)
|
||||
|
@ -1227,7 +932,7 @@ namespace OpenFaceOffline
|
|||
{
|
||||
image_output_size = number_entry_window.OutputInt;
|
||||
String root = AppDomain.CurrentDomain.BaseDirectory;
|
||||
face_analyser = new FaceAnalyserManaged(root, use_dynamic_models, image_output_size);
|
||||
face_analyser = new FaceAnalyserManaged(root, DynamicAUModels, image_output_size);
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -87,6 +87,7 @@
|
|||
<SubType>Designer</SubType>
|
||||
</ApplicationDefinition>
|
||||
<Compile Include="FpsTracker.cs" />
|
||||
<Compile Include="Recorder.cs" />
|
||||
<Compile Include="UI_items\BarGraph.xaml.cs">
|
||||
<DependentUpon>BarGraph.xaml</DependentUpon>
|
||||
</Compile>
|
||||
|
|
223
gui/OpenFaceOffline/Recorder.cs
Normal file
223
gui/OpenFaceOffline/Recorder.cs
Normal file
|
@ -0,0 +1,223 @@
|
|||
using CppInterop.LandmarkDetector;
|
||||
using FaceAnalyser_Interop;
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System.Text;
|
||||
using System.Threading.Tasks;
|
||||
|
||||
namespace OpenFaceOffline
|
||||
{
|
||||
class Recorder
|
||||
{
|
||||
StreamWriter output_features_file;
|
||||
|
||||
bool output_2D_landmarks, output_3D_landmarks, output_model_params, output_pose, output_AUs, output_gaze, record_aligned, record_HOG;
|
||||
|
||||
double fx, fy, cx, cy;
|
||||
|
||||
List<string> au_reg_names;
|
||||
List<string> au_class_names;
|
||||
|
||||
public Recorder(string root, string filename, int width, int height, bool output_2D_landmarks, bool output_3D_landmarks, bool output_model_params,
|
||||
bool output_pose, bool output_AUs, bool output_gaze, bool record_aligned, bool record_HOG,
|
||||
CLNF clnf_model, FaceAnalyserManaged face_analyser, double fx, double fy, double cx, double cy)
|
||||
{
|
||||
|
||||
this.output_2D_landmarks = output_2D_landmarks; this.output_3D_landmarks = output_3D_landmarks;
|
||||
this.output_model_params = output_model_params; this.output_pose = output_pose;
|
||||
this.output_AUs = output_AUs; this.output_gaze = output_gaze;
|
||||
this.record_aligned = record_aligned; this.record_HOG = record_HOG;
|
||||
|
||||
this.fx = fx; this.fy = fy; this.cx = cx; this.cy = cy;
|
||||
|
||||
if (!System.IO.Directory.Exists(root))
|
||||
{
|
||||
System.IO.Directory.CreateDirectory(root);
|
||||
}
|
||||
|
||||
output_features_file = new StreamWriter(root + "/" + filename + ".txt");
|
||||
output_features_file.Write("frame, timestamp, confidence, success");
|
||||
|
||||
if (output_gaze)
|
||||
{
|
||||
output_features_file.Write(", gaze_0_x, gaze_0_y, gaze_0_z, gaze_1_x, gaze_1_y, gaze_1_z");
|
||||
}
|
||||
|
||||
if (output_pose)
|
||||
output_features_file.Write(", pose_Tx, pose_Ty, pose_Tz, pose_Rx, pose_Ry, pose_Rz");
|
||||
|
||||
if (output_2D_landmarks)
|
||||
{
|
||||
for (int i = 0; i < clnf_model.GetNumPoints(); ++i)
|
||||
{
|
||||
output_features_file.Write(", x_" + i);
|
||||
}
|
||||
for (int i = 0; i < clnf_model.GetNumPoints(); ++i)
|
||||
{
|
||||
output_features_file.Write(", y_" + i);
|
||||
}
|
||||
}
|
||||
|
||||
if (output_3D_landmarks)
|
||||
{
|
||||
|
||||
for (int i = 0; i < clnf_model.GetNumPoints(); ++i)
|
||||
{
|
||||
output_features_file.Write(", X_" + i);
|
||||
}
|
||||
for (int i = 0; i < clnf_model.GetNumPoints(); ++i)
|
||||
{
|
||||
output_features_file.Write(", Y_" + i);
|
||||
}
|
||||
for (int i = 0; i < clnf_model.GetNumPoints(); ++i)
|
||||
{
|
||||
output_features_file.Write(", Z_" + i);
|
||||
}
|
||||
}
|
||||
|
||||
if (output_model_params)
|
||||
{
|
||||
output_features_file.Write(", p_scale, p_rx, p_ry, p_rz, p_tx, p_ty");
|
||||
for (int i = 0; i < clnf_model.GetNumModes(); ++i)
|
||||
{
|
||||
output_features_file.Write(", p_" + i);
|
||||
}
|
||||
}
|
||||
|
||||
if (output_AUs)
|
||||
{
|
||||
|
||||
au_reg_names = face_analyser.GetRegActionUnitsNames();
|
||||
au_reg_names.Sort();
|
||||
foreach (var name in au_reg_names)
|
||||
{
|
||||
output_features_file.Write(", " + name + "_r");
|
||||
}
|
||||
|
||||
au_class_names = face_analyser.GetClassActionUnitsNames();
|
||||
au_class_names.Sort();
|
||||
foreach (var name in au_class_names)
|
||||
{
|
||||
output_features_file.Write(", " + name + "_c");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
output_features_file.WriteLine();
|
||||
|
||||
if (record_aligned)
|
||||
{
|
||||
String aligned_root = root + "/" + filename + "_aligned/";
|
||||
System.IO.Directory.CreateDirectory(aligned_root);
|
||||
face_analyser.SetupAlignedImageRecording(aligned_root);
|
||||
}
|
||||
|
||||
if (record_HOG)
|
||||
{
|
||||
String filename_HOG = root + "/" + filename + ".hog";
|
||||
face_analyser.SetupHOGRecording(filename_HOG);
|
||||
}
|
||||
}
|
||||
|
||||
public void RecordFrame(CLNF clnf_model, FaceAnalyserManaged face_analyser, bool success, int frame_ind, double time_stamp)
|
||||
{
|
||||
// Making sure that full stop is used instead of a comma for data recording
|
||||
System.Globalization.CultureInfo customCulture = (System.Globalization.CultureInfo)System.Threading.Thread.CurrentThread.CurrentCulture.Clone();
|
||||
customCulture.NumberFormat.NumberDecimalSeparator = ".";
|
||||
|
||||
System.Threading.Thread.CurrentThread.CurrentCulture = customCulture;
|
||||
|
||||
double confidence = (-clnf_model.GetConfidence()) / 2.0 + 0.5;
|
||||
|
||||
List<double> pose = new List<double>();
|
||||
clnf_model.GetPose(pose, fx, fy, cx, cy);
|
||||
|
||||
output_features_file.Write(String.Format("{0}, {1}, {2:F3}, {3}", frame_ind, time_stamp, confidence, success ? 1 : 0));
|
||||
|
||||
if (output_gaze)
|
||||
{
|
||||
var gaze = face_analyser.GetGazeCamera();
|
||||
var gaze_angle = face_analyser.GetGazeAngle();
|
||||
|
||||
output_features_file.Write(String.Format(", {0:F5}, {1:F5}, {2:F5}, {3:F5}, {4:F5}, {5:F5}, {6:F5}, {7:F5}", gaze.Item1.Item1, gaze.Item1.Item2, gaze.Item1.Item3,
|
||||
gaze.Item2.Item1, gaze.Item2.Item2, gaze.Item2.Item3, gaze_angle.Item1, gaze_angle.Item2));
|
||||
}
|
||||
|
||||
if (output_pose)
|
||||
output_features_file.Write(String.Format(", {0:F3}, {1:F3}, {2:F3}, {3:F3}, {4:F3}, {5:F3}", pose[0], pose[1], pose[2], pose[3], pose[4], pose[5]));
|
||||
|
||||
if (output_2D_landmarks)
|
||||
{
|
||||
List<Tuple<double, double>> landmarks_2d = clnf_model.CalculateLandmarks();
|
||||
|
||||
for (int i = 0; i < landmarks_2d.Count; ++i)
|
||||
output_features_file.Write(", {0:F2}", landmarks_2d[i].Item1);
|
||||
|
||||
for (int i = 0; i < landmarks_2d.Count; ++i)
|
||||
output_features_file.Write(", {0:F2}", landmarks_2d[i].Item2);
|
||||
}
|
||||
|
||||
if (output_3D_landmarks)
|
||||
{
|
||||
List<System.Windows.Media.Media3D.Point3D> landmarks_3d = clnf_model.Calculate3DLandmarks(fx, fy, cx, cy);
|
||||
|
||||
for (int i = 0; i < landmarks_3d.Count; ++i)
|
||||
output_features_file.Write(", {0:F2}", landmarks_3d[i].X);
|
||||
|
||||
for (int i = 0; i < landmarks_3d.Count; ++i)
|
||||
output_features_file.Write(", {0:F2}", landmarks_3d[i].Y);
|
||||
|
||||
for (int i = 0; i < landmarks_3d.Count; ++i)
|
||||
output_features_file.Write(", {0:F2}", landmarks_3d[i].Z);
|
||||
}
|
||||
|
||||
if (output_model_params)
|
||||
{
|
||||
List<double> all_params = clnf_model.GetParams();
|
||||
|
||||
for (int i = 0; i < all_params.Count; ++i)
|
||||
output_features_file.Write(String.Format(", {0,0:F5}", all_params[i]));
|
||||
}
|
||||
|
||||
if (output_AUs)
|
||||
{
|
||||
var au_regs = face_analyser.GetCurrentAUsReg();
|
||||
|
||||
foreach (var name_reg in au_reg_names)
|
||||
output_features_file.Write(", {0:F2}", au_regs[name_reg]);
|
||||
|
||||
var au_classes = face_analyser.GetCurrentAUsClass();
|
||||
|
||||
foreach (var name_class in au_class_names)
|
||||
output_features_file.Write(", {0:F0}", au_classes[name_class]);
|
||||
|
||||
}
|
||||
|
||||
output_features_file.WriteLine();
|
||||
|
||||
if (record_aligned)
|
||||
{
|
||||
face_analyser.RecordAlignedFrame(frame_ind);
|
||||
}
|
||||
|
||||
if (record_HOG)
|
||||
{
|
||||
face_analyser.RecordHOGFrame();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void FinishRecording(CLNF clnf_model, FaceAnalyserManaged face_analyser)
|
||||
{
|
||||
if (output_features_file != null)
|
||||
output_features_file.Close();
|
||||
|
||||
if (record_HOG)
|
||||
face_analyser.StopHOGRecording();
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
}
|
|
@ -113,9 +113,9 @@ namespace LandmarkDetector
|
|||
vector<std::pair<cv::Point2d, cv::Point2d>> CalculateBox(cv::Vec6d pose, float fx, float fy, float cx, float cy);
|
||||
void DrawBox(vector<pair<cv::Point, cv::Point>> lines, cv::Mat image, cv::Scalar color, int thickness);
|
||||
|
||||
vector<cv::Point2d> CalculateLandmarks(const cv::Mat_<double>& shape2D, cv::Mat_<int>& visibilities);
|
||||
vector<cv::Point2d> CalculateLandmarks(CLNF& clnf_model);
|
||||
vector<cv::Point2d> CalculateEyeLandmarks(CLNF& clnf_model);
|
||||
vector<cv::Point2d> CalculateLandmarks(const cv::Mat_<double>& shape2D, const cv::Mat_<int>& visibilities);
|
||||
vector<cv::Point2d> CalculateLandmarks(const CLNF& clnf_model);
|
||||
vector<cv::Point2d> CalculateEyeLandmarks(const CLNF& clnf_model);
|
||||
void DrawLandmarks(cv::Mat img, vector<cv::Point> landmarks);
|
||||
|
||||
void Draw(cv::Mat img, const cv::Mat_<double>& shape2D, const cv::Mat_<int>& visibilities);
|
||||
|
|
|
@ -1001,7 +1001,7 @@ void DrawBox(vector<pair<cv::Point, cv::Point>> lines, cv::Mat image, cv::Scalar
|
|||
}
|
||||
|
||||
// Computing landmarks (to be drawn later possibly)
|
||||
vector<cv::Point2d> CalculateLandmarks(const cv::Mat_<double>& shape2D, cv::Mat_<int>& visibilities)
|
||||
vector<cv::Point2d> CalculateLandmarks(const cv::Mat_<double>& shape2D, const cv::Mat_<int>& visibilities)
|
||||
{
|
||||
int n = shape2D.rows/2;
|
||||
vector<cv::Point2d> landmarks;
|
||||
|
@ -1020,7 +1020,7 @@ vector<cv::Point2d> CalculateLandmarks(const cv::Mat_<double>& shape2D, cv::Mat_
|
|||
}
|
||||
|
||||
// Computing landmarks (to be drawn later possibly)
|
||||
vector<cv::Point2d> CalculateLandmarks(cv::Mat img, const cv::Mat_<double>& shape2D)
|
||||
vector<cv::Point2d> CalculateLandmarks(const cv::Mat_<double>& shape2D)
|
||||
{
|
||||
|
||||
int n;
|
||||
|
@ -1054,7 +1054,7 @@ vector<cv::Point2d> CalculateLandmarks(cv::Mat img, const cv::Mat_<double>& shap
|
|||
}
|
||||
|
||||
// Computing landmarks (to be drawn later possibly)
|
||||
vector<cv::Point2d> CalculateLandmarks(CLNF& clnf_model)
|
||||
vector<cv::Point2d> CalculateLandmarks(const CLNF& clnf_model)
|
||||
{
|
||||
|
||||
int idx = clnf_model.patch_experts.GetViewIdx(clnf_model.params_global, 0);
|
||||
|
@ -1065,7 +1065,7 @@ vector<cv::Point2d> CalculateLandmarks(CLNF& clnf_model)
|
|||
}
|
||||
|
||||
// Computing eye landmarks (to be drawn later or in different interfaces)
|
||||
vector<cv::Point2d> CalculateEyeLandmarks(CLNF& clnf_model)
|
||||
vector<cv::Point2d> CalculateEyeLandmarks(const CLNF& clnf_model)
|
||||
{
|
||||
|
||||
vector<cv::Point2d> to_return;
|
||||
|
@ -1076,7 +1076,8 @@ vector<cv::Point2d> CalculateEyeLandmarks(CLNF& clnf_model)
|
|||
if (clnf_model.hierarchical_model_names[i].compare("left_eye_28") == 0 ||
|
||||
clnf_model.hierarchical_model_names[i].compare("right_eye_28") == 0)
|
||||
{
|
||||
auto lmks = CalculateLandmarks(clnf_model.hierarchical_models[i].detected_landmarks, clnf_model.hierarchical_models[i].patch_experts.visibilities[0][0]);
|
||||
|
||||
auto lmks = CalculateLandmarks(clnf_model.hierarchical_models[i]);
|
||||
for (auto lmk : lmks)
|
||||
{
|
||||
to_return.push_back(lmk);
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
clear
|
||||
executable = '"../../x64/Release/FeatureExtraction.exe"';
|
||||
|
||||
output = './output_features_vid/';
|
||||
|
@ -6,7 +7,8 @@ if(~exist(output, 'file'))
|
|||
mkdir(output)
|
||||
end
|
||||
|
||||
in_files = dir('../../videos/1815_01_008_tony_blair.avi');
|
||||
root_dir = '../../videos/';
|
||||
in_files = dir([root_dir, 'default.wmv']);
|
||||
% some parameters
|
||||
verbose = true;
|
||||
|
||||
|
@ -19,7 +21,7 @@ command = cat(2, command, ' -verbose ');
|
|||
% for every video)
|
||||
for i=1:numel(in_files)
|
||||
|
||||
inputFile = ['../../videos/', in_files(i).name];
|
||||
inputFile = [root_dir, in_files(i).name];
|
||||
[~, name, ~] = fileparts(inputFile);
|
||||
|
||||
% where to output tracking results
|
||||
|
@ -69,13 +71,22 @@ xlabel('Time (s)');
|
|||
landmark_inds_x = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'x_'));
|
||||
landmark_inds_y = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'y_'));
|
||||
|
||||
landmark_inds_x_eye = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'eye_lmk_x_'));
|
||||
landmark_inds_y_eye = cellfun(@(x) ~isempty(x) && x==1, strfind(column_names, 'eye_lmk_y_'));
|
||||
|
||||
xs = all_params(valid_frames, landmark_inds_x);
|
||||
ys = all_params(valid_frames, landmark_inds_y);
|
||||
|
||||
xs_eye = all_params(valid_frames, landmark_inds_x_eye);
|
||||
ys_eye = all_params(valid_frames, landmark_inds_y_eye);
|
||||
|
||||
figure
|
||||
|
||||
for j = 1:size(xs,1)
|
||||
plot(xs(j,:), -ys(j,:), '.');
|
||||
hold on;
|
||||
plot(xs_eye(j,:), -ys_eye(j,:), '.');
|
||||
hold off;
|
||||
xlim([min(xs(1,:)) * 0.5, max(xs(2,:))*1.4]);
|
||||
ylim([min(-ys(1,:)) * 1.4, max(-ys(2,:))*0.5]);
|
||||
xlabel('x (px)');
|
||||
|
|
Loading…
Reference in a new issue