Allowing to control the size of aligned output image
This commit is contained in:
parent
984cfb58e7
commit
0befd5f756
15 changed files with 276 additions and 108 deletions
|
@ -300,7 +300,7 @@ int main (int argc, char **argv)
|
||||||
vector<string> output_similarity_align;
|
vector<string> output_similarity_align;
|
||||||
vector<string> output_hog_align_files;
|
vector<string> output_hog_align_files;
|
||||||
|
|
||||||
double sim_scale = 0.7;
|
double sim_scale = -1;
|
||||||
int sim_size = 112;
|
int sim_size = 112;
|
||||||
bool grayscale = false;
|
bool grayscale = false;
|
||||||
bool video_output = false;
|
bool video_output = false;
|
||||||
|
@ -320,6 +320,7 @@ int main (int argc, char **argv)
|
||||||
get_output_feature_params(output_similarity_align, output_hog_align_files, sim_scale, sim_size, grayscale, verbose, dynamic,
|
get_output_feature_params(output_similarity_align, output_hog_align_files, sim_scale, sim_size, grayscale, verbose, dynamic,
|
||||||
output_2D_landmarks, output_3D_landmarks, output_model_params, output_pose, output_AUs, output_gaze, arguments);
|
output_2D_landmarks, output_3D_landmarks, output_model_params, output_pose, output_AUs, output_gaze, arguments);
|
||||||
|
|
||||||
|
|
||||||
// Used for image masking
|
// Used for image masking
|
||||||
|
|
||||||
string tri_loc;
|
string tri_loc;
|
||||||
|
@ -339,11 +340,6 @@ int main (int argc, char **argv)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Will warp to scaled mean shape
|
|
||||||
cv::Mat_<double> similarity_normalised_shape = face_model.pdm.mean_shape * sim_scale;
|
|
||||||
// Discard the z component
|
|
||||||
similarity_normalised_shape = similarity_normalised_shape(cv::Rect(0, 0, 1, 2*similarity_normalised_shape.rows/3)).clone();
|
|
||||||
|
|
||||||
// If multiple video files are tracked, use this to indicate if we are done
|
// If multiple video files are tracked, use this to indicate if we are done
|
||||||
bool done = false;
|
bool done = false;
|
||||||
int f_n = -1;
|
int f_n = -1;
|
||||||
|
@ -381,7 +377,11 @@ int main (int argc, char **argv)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creating a face analyser that will be used for AU extraction
|
// Creating a face analyser that will be used for AU extraction
|
||||||
FaceAnalysis::FaceAnalyser face_analyser(vector<cv::Vec3d>(), 0.7, 112, 112, au_loc, tri_loc);
|
|
||||||
|
// Make sure sim_scale is proportional to sim_size if not set
|
||||||
|
if (sim_scale == -1) sim_scale = sim_size * (0.7 / 112.0);
|
||||||
|
|
||||||
|
FaceAnalysis::FaceAnalyser face_analyser(vector<cv::Vec3d>(), sim_scale, sim_size, sim_size, au_loc, tri_loc);
|
||||||
|
|
||||||
while(!done) // this is not a for loop as we might also be reading from a webcam
|
while(!done) // this is not a for loop as we might also be reading from a webcam
|
||||||
{
|
{
|
||||||
|
@ -588,7 +588,7 @@ int main (int argc, char **argv)
|
||||||
}
|
}
|
||||||
if(hog_output_file.is_open())
|
if(hog_output_file.is_open())
|
||||||
{
|
{
|
||||||
FaceAnalysis::Extract_FHOG_descriptor(hog_descriptor, sim_warped_img, num_hog_rows, num_hog_cols);
|
face_analyser.GetLatestHOG(hog_descriptor, num_hog_rows, num_hog_cols);
|
||||||
|
|
||||||
if(visualise_hog && !det_parameters.quiet_mode)
|
if(visualise_hog && !det_parameters.quiet_mode)
|
||||||
{
|
{
|
||||||
|
|
|
@ -88,7 +88,7 @@ namespace OpenFaceDemo
|
||||||
|
|
||||||
clnf_params = new FaceModelParameters(root, true);
|
clnf_params = new FaceModelParameters(root, true);
|
||||||
clnf_model = new CLNF(clnf_params);
|
clnf_model = new CLNF(clnf_params);
|
||||||
face_analyser = new FaceAnalyserManaged(root, true);
|
face_analyser = new FaceAnalyserManaged(root, true, 112);
|
||||||
|
|
||||||
Dispatcher.Invoke(DispatcherPriority.Render, new TimeSpan(0, 0, 0, 0, 200), (Action)(() =>
|
Dispatcher.Invoke(DispatcherPriority.Render, new TimeSpan(0, 0, 0, 0, 200), (Action)(() =>
|
||||||
{
|
{
|
||||||
|
|
|
@ -29,7 +29,7 @@
|
||||||
<MenuItem Header="Open image sequence" Click="imageSequenceFileOpenClick">
|
<MenuItem Header="Open image sequence" Click="imageSequenceFileOpenClick">
|
||||||
</MenuItem>
|
</MenuItem>
|
||||||
</MenuItem>
|
</MenuItem>
|
||||||
<MenuItem Name="RecordingMenu" Header="Recording settings">
|
<MenuItem Name="RecordingMenu" Header="Record">
|
||||||
<MenuItem Header="Set Location"></MenuItem>
|
<MenuItem Header="Set Location"></MenuItem>
|
||||||
<MenuItem Name="RecordAUCheckBox" IsCheckable="True" Header="Record AUs" Click="recordCheckBox_click"></MenuItem>
|
<MenuItem Name="RecordAUCheckBox" IsCheckable="True" Header="Record AUs" Click="recordCheckBox_click"></MenuItem>
|
||||||
<MenuItem Name="RecordPoseCheckBox" IsCheckable="True" Header="Record pose" Click="recordCheckBox_click"></MenuItem>
|
<MenuItem Name="RecordPoseCheckBox" IsCheckable="True" Header="Record pose" Click="recordCheckBox_click"></MenuItem>
|
||||||
|
@ -41,6 +41,10 @@
|
||||||
<MenuItem Name="RecordAlignedCheckBox" IsCheckable="True" Header="Record aligned faces" Click="recordCheckBox_click"></MenuItem>
|
<MenuItem Name="RecordAlignedCheckBox" IsCheckable="True" Header="Record aligned faces" Click="recordCheckBox_click"></MenuItem>
|
||||||
<MenuItem Name="RecordTrackedVidCheckBox" IsCheckable="True" Header="Record tracked video" Click="recordCheckBox_click"></MenuItem>
|
<MenuItem Name="RecordTrackedVidCheckBox" IsCheckable="True" Header="Record tracked video" Click="recordCheckBox_click"></MenuItem>
|
||||||
</MenuItem>
|
</MenuItem>
|
||||||
|
<MenuItem Name="SettingsMenu" Header="Recording settings">
|
||||||
|
<MenuItem Header="Set output location..."></MenuItem>
|
||||||
|
<MenuItem Header="Set output image size..." Click="setOutputImageSize_Click"></MenuItem>
|
||||||
|
</MenuItem>
|
||||||
<MenuItem Header="AU settings">
|
<MenuItem Header="AU settings">
|
||||||
<MenuItem Name="UseDynamicModelsCheckBox" IsChecked="True" IsCheckable="True" Header="Use dynamic models" Click="UseDynamicModelsCheckBox_Click"></MenuItem>
|
<MenuItem Name="UseDynamicModelsCheckBox" IsChecked="True" IsCheckable="True" Header="Use dynamic models" Click="UseDynamicModelsCheckBox_Click"></MenuItem>
|
||||||
<MenuItem Name="UseDynamicShiftingCheckBox" IsCheckable="True" Header="Use dynamic shifting" Click="UseDynamicModelsCheckBox_Click"></MenuItem>
|
<MenuItem Name="UseDynamicShiftingCheckBox" IsCheckable="True" Header="Use dynamic shifting" Click="UseDynamicModelsCheckBox_Click"></MenuItem>
|
||||||
|
|
|
@ -148,6 +148,8 @@ namespace OpenFaceOffline
|
||||||
bool show_geometry = true;
|
bool show_geometry = true;
|
||||||
bool show_aus = true;
|
bool show_aus = true;
|
||||||
|
|
||||||
|
int image_output_size = 112;
|
||||||
|
|
||||||
// TODO classifiers converted to regressors
|
// TODO classifiers converted to regressors
|
||||||
|
|
||||||
// TODO indication that track is done
|
// TODO indication that track is done
|
||||||
|
@ -196,7 +198,7 @@ namespace OpenFaceOffline
|
||||||
|
|
||||||
clnf_params = new FaceModelParameters(root, false);
|
clnf_params = new FaceModelParameters(root, false);
|
||||||
clnf_model = new CLNF(clnf_params);
|
clnf_model = new CLNF(clnf_params);
|
||||||
face_analyser = new FaceAnalyserManaged(root, use_dynamic_models);
|
face_analyser = new FaceAnalyserManaged(root, use_dynamic_models, image_output_size);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -505,7 +507,7 @@ namespace OpenFaceOffline
|
||||||
List<Tuple<double, double>> landmarks = null;
|
List<Tuple<double, double>> landmarks = null;
|
||||||
List<Tuple<double, double>> eye_landmarks = null;
|
List<Tuple<double, double>> eye_landmarks = null;
|
||||||
List<Tuple<Point, Point>> gaze_lines = null;
|
List<Tuple<Point, Point>> gaze_lines = null;
|
||||||
Tuple<double, double> gaze_angle = new Tuple<double, double>(0,0);
|
Tuple<double, double> gaze_angle = new Tuple<double, double>(0, 0);
|
||||||
|
|
||||||
if (detectionSucceeding)
|
if (detectionSucceeding)
|
||||||
{
|
{
|
||||||
|
@ -556,7 +558,7 @@ namespace OpenFaceOffline
|
||||||
nonRigidGraph.Update(non_rigid_params);
|
nonRigidGraph.Update(non_rigid_params);
|
||||||
|
|
||||||
// Update eye gaze
|
// Update eye gaze
|
||||||
GazeXLabel.Content = gaze_angle.Item1 * (180.0/ Math.PI);
|
GazeXLabel.Content = gaze_angle.Item1 * (180.0 / Math.PI);
|
||||||
GazeYLabel.Content = gaze_angle.Item2 * (180.0 / Math.PI);
|
GazeYLabel.Content = gaze_angle.Item2 * (180.0 / Math.PI);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -625,7 +627,7 @@ namespace OpenFaceOffline
|
||||||
}));
|
}));
|
||||||
|
|
||||||
// Recording the tracked model
|
// Recording the tracked model
|
||||||
RecordFrame(clnf_model, detectionSucceeding, frame_id + 1, frame, grayFrame, ((double)frame_id)/fps,
|
RecordFrame(clnf_model, detectionSucceeding, frame_id + 1, frame, grayFrame, ((double)frame_id) / fps,
|
||||||
record_2D_landmarks, record_2D_landmarks, record_model_params, record_pose, record_AUs, record_gaze, fx, fy, cx, cy);
|
record_2D_landmarks, record_2D_landmarks, record_model_params, record_pose, record_AUs, record_gaze, fx, fy, cx, cy);
|
||||||
|
|
||||||
if (reset)
|
if (reset)
|
||||||
|
@ -1206,10 +1208,26 @@ namespace OpenFaceOffline
|
||||||
{
|
{
|
||||||
// Change the face analyser, this should be safe as the model is only allowed to change when not running
|
// Change the face analyser, this should be safe as the model is only allowed to change when not running
|
||||||
String root = AppDomain.CurrentDomain.BaseDirectory;
|
String root = AppDomain.CurrentDomain.BaseDirectory;
|
||||||
face_analyser = new FaceAnalyserManaged(root, UseDynamicModelsCheckBox.IsChecked);
|
face_analyser = new FaceAnalyserManaged(root, UseDynamicModelsCheckBox.IsChecked, image_output_size);
|
||||||
}
|
}
|
||||||
use_dynamic_models = UseDynamicModelsCheckBox.IsChecked;
|
use_dynamic_models = UseDynamicModelsCheckBox.IsChecked;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void setOutputImageSize_Click(object sender, RoutedEventArgs e)
|
||||||
|
{
|
||||||
|
|
||||||
|
NumberEntryWindow number_entry_window = new NumberEntryWindow();
|
||||||
|
number_entry_window.Icon = this.Icon;
|
||||||
|
|
||||||
|
number_entry_window.WindowStartupLocation = WindowStartupLocation.CenterScreen;
|
||||||
|
|
||||||
|
if (number_entry_window.ShowDialog() == true)
|
||||||
|
{
|
||||||
|
image_output_size = number_entry_window.OutputInt;
|
||||||
|
String root = AppDomain.CurrentDomain.BaseDirectory;
|
||||||
|
face_analyser = new FaceAnalyserManaged(root, use_dynamic_models, image_output_size);
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -91,6 +91,9 @@
|
||||||
<Compile Include="UI_items\MultiBarGraphHorz.xaml.cs">
|
<Compile Include="UI_items\MultiBarGraphHorz.xaml.cs">
|
||||||
<DependentUpon>MultiBarGraphHorz.xaml</DependentUpon>
|
<DependentUpon>MultiBarGraphHorz.xaml</DependentUpon>
|
||||||
</Compile>
|
</Compile>
|
||||||
|
<Compile Include="UI_items\NumberEntryWindow.xaml.cs">
|
||||||
|
<DependentUpon>NumberEntryWindow.xaml</DependentUpon>
|
||||||
|
</Compile>
|
||||||
<Compile Include="UI_items\OverlayImage.xaml.cs">
|
<Compile Include="UI_items\OverlayImage.xaml.cs">
|
||||||
<DependentUpon>OverlayImage.xaml</DependentUpon>
|
<DependentUpon>OverlayImage.xaml</DependentUpon>
|
||||||
</Compile>
|
</Compile>
|
||||||
|
@ -125,6 +128,10 @@
|
||||||
<SubType>Designer</SubType>
|
<SubType>Designer</SubType>
|
||||||
<Generator>MSBuild:Compile</Generator>
|
<Generator>MSBuild:Compile</Generator>
|
||||||
</Page>
|
</Page>
|
||||||
|
<Page Include="UI_items\NumberEntryWindow.xaml">
|
||||||
|
<SubType>Designer</SubType>
|
||||||
|
<Generator>MSBuild:Compile</Generator>
|
||||||
|
</Page>
|
||||||
<Page Include="UI_items\OverlayImage.xaml">
|
<Page Include="UI_items\OverlayImage.xaml">
|
||||||
<SubType>Designer</SubType>
|
<SubType>Designer</SubType>
|
||||||
<Generator>MSBuild:Compile</Generator>
|
<Generator>MSBuild:Compile</Generator>
|
||||||
|
|
17
gui/OpenFaceOffline/UI_items/NumberEntryWindow.xaml
Normal file
17
gui/OpenFaceOffline/UI_items/NumberEntryWindow.xaml
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
<Window x:Class="OpenFaceOffline.NumberEntryWindow"
|
||||||
|
xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation"
|
||||||
|
xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"
|
||||||
|
xmlns:d="http://schemas.microsoft.com/expression/blend/2008"
|
||||||
|
xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006"
|
||||||
|
mc:Ignorable="d"
|
||||||
|
Title="NumberEntryWindow" Height="160" Width="300">
|
||||||
|
<Grid>
|
||||||
|
<StackPanel FocusManager.FocusedElement="{Binding ElementName=ResponseTextBox}">
|
||||||
|
<TextBlock HorizontalAlignment="Center" Text="Enter new output image size" FontSize="20"/>
|
||||||
|
<TextBox Margin="0,4,0,0" x:Name="ResponseTextBox" FontSize="20" Width="120" TextChanged="ResponseTextBox_TextChanged" />
|
||||||
|
<Label Name="warningLabel" Visibility="Collapsed" FontStyle="Italic" Foreground="Red" HorizontalAlignment="Center">Has to be a non negative integer</Label>
|
||||||
|
|
||||||
|
<Button Margin="0,8,0,0" Content="OK" Click="OKButton_Click" Width="100" VerticalAlignment="Bottom"/>
|
||||||
|
</StackPanel>
|
||||||
|
</Grid>
|
||||||
|
</Window>
|
142
gui/OpenFaceOffline/UI_items/NumberEntryWindow.xaml.cs
Normal file
142
gui/OpenFaceOffline/UI_items/NumberEntryWindow.xaml.cs
Normal file
|
@ -0,0 +1,142 @@
|
||||||
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Copyright (C) 2016, Carnegie Mellon University and University of Cambridge,
|
||||||
|
// all rights reserved.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED “AS IS” FOR ACADEMIC USE ONLY AND ANY EXPRESS
|
||||||
|
// OR IMPLIED WARRANTIES WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||||
|
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS
|
||||||
|
// BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY.
|
||||||
|
// OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||||
|
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||||
|
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
||||||
|
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
||||||
|
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||||
|
// POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
//
|
||||||
|
// Notwithstanding the license granted herein, Licensee acknowledges that certain components
|
||||||
|
// of the Software may be covered by so-called “open source” software licenses (“Open Source
|
||||||
|
// Components”), which means any software licenses approved as open source licenses by the
|
||||||
|
// Open Source Initiative or any substantially similar licenses, including without limitation any
|
||||||
|
// license that, as a condition of distribution of the software licensed under such license,
|
||||||
|
// requires that the distributor make the software available in source code format. Licensor shall
|
||||||
|
// provide a list of Open Source Components for a particular version of the Software upon
|
||||||
|
// Licensee’s request. Licensee will comply with the applicable terms of such licenses and to
|
||||||
|
// the extent required by the licenses covering Open Source Components, the terms of such
|
||||||
|
// licenses will apply in lieu of the terms of this Agreement. To the extent the terms of the
|
||||||
|
// licenses applicable to Open Source Components prohibit any of the restrictions in this
|
||||||
|
// License Agreement with respect to such Open Source Component, such restrictions will not
|
||||||
|
// apply to such Open Source Component. To the extent the terms of the licenses applicable to
|
||||||
|
// Open Source Components require Licensor to make an offer to provide source code or
|
||||||
|
// related information in connection with the Software, such offer is hereby made. Any request
|
||||||
|
// for source code or related information should be directed to cl-face-tracker-distribution@lists.cam.ac.uk
|
||||||
|
// Licensee acknowledges receipt of notices for the Open Source Components for the initial
|
||||||
|
// delivery of the Software.
|
||||||
|
|
||||||
|
// * Any publications arising from the use of this software, including but
|
||||||
|
// not limited to academic journal and conference publications, technical
|
||||||
|
// reports and manuals, must cite at least one of the following works:
|
||||||
|
//
|
||||||
|
// OpenFace: an open source facial behavior analysis toolkit
|
||||||
|
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency
|
||||||
|
// in IEEE Winter Conference on Applications of Computer Vision, 2016
|
||||||
|
//
|
||||||
|
// Rendering of Eyes for Eye-Shape Registration and Gaze Estimation
|
||||||
|
// Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling
|
||||||
|
// in IEEE International. Conference on Computer Vision (ICCV), 2015
|
||||||
|
//
|
||||||
|
// Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection
|
||||||
|
// Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson
|
||||||
|
// in Facial Expression Recognition and Analysis Challenge,
|
||||||
|
// IEEE International Conference on Automatic Face and Gesture Recognition, 2015
|
||||||
|
//
|
||||||
|
// Constrained Local Neural Fields for robust facial landmark detection in the wild.
|
||||||
|
// Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency.
|
||||||
|
// in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013.
|
||||||
|
//
|
||||||
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
using System;
|
||||||
|
using System.Collections.Generic;
|
||||||
|
using System.Linq;
|
||||||
|
using System.Text;
|
||||||
|
using System.Text.RegularExpressions;
|
||||||
|
using System.Threading.Tasks;
|
||||||
|
using System.Windows;
|
||||||
|
using System.Windows.Controls;
|
||||||
|
using System.Windows.Data;
|
||||||
|
using System.Windows.Documents;
|
||||||
|
using System.Windows.Input;
|
||||||
|
using System.Windows.Media;
|
||||||
|
using System.Windows.Media.Imaging;
|
||||||
|
using System.Windows.Shapes;
|
||||||
|
|
||||||
|
namespace OpenFaceOffline
|
||||||
|
{
|
||||||
|
/// <summary>
|
||||||
|
/// Interaction logic for TextEntryWindow.xaml
|
||||||
|
/// </summary>
|
||||||
|
public partial class NumberEntryWindow : Window
|
||||||
|
{
|
||||||
|
public NumberEntryWindow()
|
||||||
|
{
|
||||||
|
InitializeComponent();
|
||||||
|
ResponseTextBox.Text = "112";
|
||||||
|
OutputInt = 112;
|
||||||
|
this.KeyDown += new KeyEventHandler(TextEntry_KeyDown);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
private string ResponseText
|
||||||
|
{
|
||||||
|
get { return ResponseTextBox.Text; }
|
||||||
|
set { ResponseTextBox.Text = value; }
|
||||||
|
}
|
||||||
|
|
||||||
|
public int OutputInt;
|
||||||
|
|
||||||
|
private void OKButton_Click(object sender, System.Windows.RoutedEventArgs e)
|
||||||
|
{
|
||||||
|
DialogResult = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void TextEntry_KeyDown(object sender, KeyEventArgs e)
|
||||||
|
{
|
||||||
|
if (e.Key == Key.Enter)
|
||||||
|
{
|
||||||
|
DialogResult = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do not allow illegal characters like
|
||||||
|
private void ResponseTextBox_TextChanged(object sender, TextChangedEventArgs e)
|
||||||
|
{
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
OutputInt = Int32.Parse(ResponseTextBox.Text);
|
||||||
|
if(OutputInt > 0)
|
||||||
|
{
|
||||||
|
warningLabel.Visibility = System.Windows.Visibility.Collapsed;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
warningLabel.Visibility = System.Windows.Visibility.Visible;
|
||||||
|
OutputInt = 112;
|
||||||
|
ResponseTextBox.Text = "112";
|
||||||
|
ResponseTextBox.SelectionStart = ResponseTextBox.Text.Length;
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
catch (FormatException except)
|
||||||
|
{
|
||||||
|
OutputInt = 112;
|
||||||
|
ResponseTextBox.Text = "112";
|
||||||
|
ResponseTextBox.SelectionStart = ResponseTextBox.Text.Length;
|
||||||
|
warningLabel.Visibility = System.Windows.Visibility.Visible;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
|
@ -133,14 +133,16 @@ private:
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
FaceAnalyserManaged(System::String^ root, bool dynamic)
|
FaceAnalyserManaged(System::String^ root, bool dynamic, int output_width)
|
||||||
{
|
{
|
||||||
|
|
||||||
vector<cv::Vec3d> orientation_bins;
|
vector<cv::Vec3d> orientation_bins;
|
||||||
orientation_bins.push_back(cv::Vec3d(0,0,0));
|
orientation_bins.push_back(cv::Vec3d(0,0,0));
|
||||||
double scale = 0.7;
|
|
||||||
int width = 112;
|
int width = output_width;
|
||||||
int height = 112;
|
int height = output_width;
|
||||||
|
|
||||||
|
double scale = width * (0.7 / 112.0);
|
||||||
|
|
||||||
string root_std = msclr::interop::marshal_as<std::string>(root);
|
string root_std = msclr::interop::marshal_as<std::string>(root);
|
||||||
|
|
||||||
|
|
|
@ -112,12 +112,8 @@ public:
|
||||||
|
|
||||||
cv::Mat_<int> GetTriangulation();
|
cv::Mat_<int> GetTriangulation();
|
||||||
|
|
||||||
cv::Mat_<uchar> GetLatestAlignedFaceGrayscale();
|
|
||||||
|
|
||||||
void GetGeomDescriptor(cv::Mat_<double>& geom_desc);
|
void GetGeomDescriptor(cv::Mat_<double>& geom_desc);
|
||||||
|
|
||||||
void ExtractCurrentMedians(vector<cv::Mat>& hog_medians, vector<cv::Mat>& face_image_medians, vector<cv::Vec3d>& orientations);
|
|
||||||
|
|
||||||
// Grab the names of AUs being predicted
|
// Grab the names of AUs being predicted
|
||||||
std::vector<std::string> GetAUClassNames() const; // Presence
|
std::vector<std::string> GetAUClassNames() const; // Presence
|
||||||
std::vector<std::string> GetAURegNames() const; // Intensity
|
std::vector<std::string> GetAURegNames() const; // Intensity
|
||||||
|
@ -148,8 +144,8 @@ private:
|
||||||
int frames_tracking;
|
int frames_tracking;
|
||||||
|
|
||||||
// Cache of intermediate images
|
// Cache of intermediate images
|
||||||
cv::Mat_<uchar> aligned_face_grayscale;
|
cv::Mat aligned_face_for_au;
|
||||||
cv::Mat aligned_face;
|
cv::Mat aligned_face_for_output;
|
||||||
cv::Mat hog_descriptor_visualisation;
|
cv::Mat hog_descriptor_visualisation;
|
||||||
|
|
||||||
// Private members to be used for predictions
|
// Private members to be used for predictions
|
||||||
|
|
|
@ -226,7 +226,7 @@ void FaceAnalyser::GetLatestHOG(cv::Mat_<double>& hog_descriptor, int& num_rows,
|
||||||
|
|
||||||
void FaceAnalyser::GetLatestAlignedFace(cv::Mat& image)
|
void FaceAnalyser::GetLatestAlignedFace(cv::Mat& image)
|
||||||
{
|
{
|
||||||
image = this->aligned_face.clone();
|
image = this->aligned_face_for_output.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
void FaceAnalyser::GetLatestNeutralHOG(cv::Mat_<double>& hog_descriptor, int& num_rows, int& num_cols)
|
void FaceAnalyser::GetLatestNeutralHOG(cv::Mat_<double>& hog_descriptor, int& num_rows, int& num_cols)
|
||||||
|
@ -267,50 +267,15 @@ int GetViewId(const vector<cv::Vec3d> orientations_all, const cv::Vec3d& orienta
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void FaceAnalyser::ExtractCurrentMedians(vector<cv::Mat>& hog_medians, vector<cv::Mat>& face_image_medians, vector<cv::Vec3d>& orientations)
|
|
||||||
{
|
|
||||||
|
|
||||||
orientations = this->head_orientations;
|
|
||||||
|
|
||||||
for(size_t i = 0; i < orientations.size(); ++i)
|
|
||||||
{
|
|
||||||
cv::Mat_<double> median_face(this->face_image_median.rows, this->face_image_median.cols, 0.0);
|
|
||||||
cv::Mat_<double> median_hog(this->hog_desc_median.rows, this->hog_desc_median.cols, 0.0);
|
|
||||||
|
|
||||||
ExtractMedian(this->face_image_hist[i], this->face_image_hist_sum[i], median_face, 256, 0, 255);
|
|
||||||
ExtractMedian(this->hog_desc_hist[i], this->hog_hist_sum[i], median_hog, this->num_bins_hog, 0, 1);
|
|
||||||
|
|
||||||
// Add the HOG sample
|
|
||||||
hog_medians.push_back(median_hog.clone());
|
|
||||||
|
|
||||||
// For the face image need to convert it to suitable format
|
|
||||||
cv::Mat_<uchar> aligned_face_cols_uchar;
|
|
||||||
median_face.convertTo(aligned_face_cols_uchar, CV_8U);
|
|
||||||
|
|
||||||
cv::Mat aligned_face_uchar;
|
|
||||||
if(aligned_face.channels() == 1)
|
|
||||||
{
|
|
||||||
aligned_face_uchar = cv::Mat(aligned_face.rows, aligned_face.cols, CV_8U, aligned_face_cols_uchar.data);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
aligned_face_uchar = cv::Mat(aligned_face.rows, aligned_face.cols, CV_8UC3, aligned_face_cols_uchar.data);
|
|
||||||
}
|
|
||||||
|
|
||||||
face_image_medians.push_back(aligned_face_uchar.clone());
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
std::pair<std::vector<std::pair<string, double>>, std::vector<std::pair<string, double>>> FaceAnalyser::PredictStaticAUs(const cv::Mat& frame, const LandmarkDetector::CLNF& clnf, bool visualise)
|
std::pair<std::vector<std::pair<string, double>>, std::vector<std::pair<string, double>>> FaceAnalyser::PredictStaticAUs(const cv::Mat& frame, const LandmarkDetector::CLNF& clnf, bool visualise)
|
||||||
{
|
{
|
||||||
|
|
||||||
// First align the face
|
// First align the face
|
||||||
AlignFaceMask(aligned_face, frame, clnf, triangulation, true, align_scale, align_width, align_height);
|
AlignFaceMask(aligned_face_for_au, frame, clnf, triangulation, true, 0.7, 112, 112);
|
||||||
|
|
||||||
// Extract HOG descriptor from the frame and convert it to a useable format
|
// Extract HOG descriptor from the frame and convert it to a useable format
|
||||||
cv::Mat_<double> hog_descriptor;
|
cv::Mat_<double> hog_descriptor;
|
||||||
Extract_FHOG_descriptor(hog_descriptor, aligned_face, this->num_hog_rows, this->num_hog_cols);
|
Extract_FHOG_descriptor(hog_descriptor, aligned_face_for_au, this->num_hog_rows, this->num_hog_cols);
|
||||||
|
|
||||||
// Store the descriptor
|
// Store the descriptor
|
||||||
hog_desc_frame = hog_descriptor;
|
hog_desc_frame = hog_descriptor;
|
||||||
|
@ -326,10 +291,10 @@ std::pair<std::vector<std::pair<string, double>>, std::vector<std::pair<string,
|
||||||
|
|
||||||
cv::hconcat(locs.t(), geom_descriptor_frame.clone(), geom_descriptor_frame);
|
cv::hconcat(locs.t(), geom_descriptor_frame.clone(), geom_descriptor_frame);
|
||||||
|
|
||||||
// First convert the face image to double representation as a row vector
|
// First convert the face image to double representation as a row vector, TODO rem
|
||||||
cv::Mat_<uchar> aligned_face_cols(1, aligned_face.cols * aligned_face.rows * aligned_face.channels(), aligned_face.data, 1);
|
//cv::Mat_<uchar> aligned_face_cols(1, aligned_face_for_au.cols * aligned_face_for_au.rows * aligned_face_for_au.channels(), aligned_face_for_au.data, 1);
|
||||||
cv::Mat_<double> aligned_face_cols_double;
|
//cv::Mat_<double> aligned_face_cols_double;
|
||||||
aligned_face_cols.convertTo(aligned_face_cols_double, CV_64F);
|
//aligned_face_cols.convertTo(aligned_face_cols_double, CV_64F);
|
||||||
|
|
||||||
// Visualising the median HOG
|
// Visualising the median HOG
|
||||||
if (visualise)
|
if (visualise)
|
||||||
|
@ -363,26 +328,31 @@ void FaceAnalyser::AddNextFrame(const cv::Mat& frame, const LandmarkDetector::CL
|
||||||
// First align the face if tracking was successfull
|
// First align the face if tracking was successfull
|
||||||
if(clnf_model.detection_success)
|
if(clnf_model.detection_success)
|
||||||
{
|
{
|
||||||
AlignFaceMask(aligned_face, frame, clnf_model, triangulation, true, align_scale, align_width, align_height);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
aligned_face = cv::Mat(align_height, align_width, CV_8UC3);
|
|
||||||
aligned_face.setTo(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
if(aligned_face.channels() == 3)
|
// The aligned face requirement for AUs
|
||||||
{
|
AlignFaceMask(aligned_face_for_au, frame, clnf_model, triangulation, true, 0.7, 112, 112);
|
||||||
cv::cvtColor(aligned_face, aligned_face_grayscale, CV_BGR2GRAY);
|
|
||||||
|
// If the output requirement matches use the already computed one, else compute it again
|
||||||
|
if(align_scale == 0.7 && align_width == 112 && align_height == 112)
|
||||||
|
{
|
||||||
|
aligned_face_for_output = aligned_face_for_au.clone();
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
AlignFaceMask(aligned_face_for_output, frame, clnf_model, triangulation, true, align_scale, align_width, align_height);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
aligned_face_grayscale = aligned_face.clone();
|
aligned_face_for_output = cv::Mat(align_height, align_width, CV_8UC3);
|
||||||
|
aligned_face_for_au = cv::Mat(112, 112, CV_8UC3);
|
||||||
|
aligned_face_for_output.setTo(0);
|
||||||
|
aligned_face_for_au.setTo(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extract HOG descriptor from the frame and convert it to a useable format
|
// Extract HOG descriptor from the frame and convert it to a useable format
|
||||||
cv::Mat_<double> hog_descriptor;
|
cv::Mat_<double> hog_descriptor;
|
||||||
Extract_FHOG_descriptor(hog_descriptor, aligned_face, this->num_hog_rows, this->num_hog_cols);
|
Extract_FHOG_descriptor(hog_descriptor, aligned_face_for_au, this->num_hog_rows, this->num_hog_cols);
|
||||||
|
|
||||||
// Store the descriptor
|
// Store the descriptor
|
||||||
hog_desc_frame = hog_descriptor;
|
hog_desc_frame = hog_descriptor;
|
||||||
|
@ -450,13 +420,10 @@ void FaceAnalyser::AddNextFrame(const cv::Mat& frame, const LandmarkDetector::CL
|
||||||
UpdateRunningMedian(this->geom_desc_hist, this->geom_hist_sum, this->geom_descriptor_median, geom_descriptor_frame, update_median, this->num_bins_geom, this->min_val_geom, this->max_val_geom);
|
UpdateRunningMedian(this->geom_desc_hist, this->geom_hist_sum, this->geom_descriptor_median, geom_descriptor_frame, update_median, this->num_bins_geom, this->min_val_geom, this->max_val_geom);
|
||||||
}
|
}
|
||||||
|
|
||||||
// First convert the face image to double representation as a row vector
|
// First convert the face image to double representation as a row vector, TODO rem?
|
||||||
cv::Mat_<uchar> aligned_face_cols(1, aligned_face.cols * aligned_face.rows * aligned_face.channels(), aligned_face.data, 1);
|
//cv::Mat_<uchar> aligned_face_cols(1, aligned_face.cols * aligned_face.rows * aligned_face.channels(), aligned_face.data, 1);
|
||||||
cv::Mat_<double> aligned_face_cols_double;
|
//cv::Mat_<double> aligned_face_cols_double;
|
||||||
aligned_face_cols.convertTo(aligned_face_cols_double, CV_64F);
|
//aligned_face_cols.convertTo(aligned_face_cols_double, CV_64F);
|
||||||
|
|
||||||
// TODO get rid of this completely as it takes too long?
|
|
||||||
//UpdateRunningMedian(this->face_image_hist[orientation_to_use], this->face_image_hist_sum[orientation_to_use], this->face_image_median, aligned_face_cols_double, update_median, 256, 0, 255);
|
|
||||||
|
|
||||||
// Visualising the median HOG
|
// Visualising the median HOG
|
||||||
if(visualise)
|
if(visualise)
|
||||||
|
@ -1097,12 +1064,6 @@ vector<pair<string, double>> FaceAnalyser::PredictCurrentAUsClass(int view)
|
||||||
return predictions;
|
return predictions;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
cv::Mat_<uchar> FaceAnalyser::GetLatestAlignedFaceGrayscale()
|
|
||||||
{
|
|
||||||
return aligned_face_grayscale.clone();
|
|
||||||
}
|
|
||||||
|
|
||||||
cv::Mat FaceAnalyser::GetLatestHOGDescriptorVisualisation()
|
cv::Mat FaceAnalyser::GetLatestHOGDescriptorVisualisation()
|
||||||
{
|
{
|
||||||
return hog_descriptor_visualisation;
|
return hog_descriptor_visualisation;
|
||||||
|
|
|
@ -125,6 +125,8 @@ end
|
||||||
|
|
||||||
%%
|
%%
|
||||||
f = fopen('results/BP4D_valid_res_class.txt', 'w');
|
f = fopen('results/BP4D_valid_res_class.txt', 'w');
|
||||||
|
f1s_class = zeros(1, numel(aus_BP4D));
|
||||||
|
|
||||||
for au = 1:numel(aus_BP4D)
|
for au = 1:numel(aus_BP4D)
|
||||||
|
|
||||||
if(inds_au_class(au) ~= 0)
|
if(inds_au_class(au) ~= 0)
|
||||||
|
@ -137,7 +139,7 @@ for au = 1:numel(aus_BP4D)
|
||||||
recall = tp./(tp+fn);
|
recall = tp./(tp+fn);
|
||||||
|
|
||||||
f1 = 2 * precision .* recall ./ (precision + recall);
|
f1 = 2 * precision .* recall ./ (precision + recall);
|
||||||
|
f1s_class(au) = f1;
|
||||||
fprintf(f, 'AU%d class, Precision - %.3f, Recall - %.3f, F1 - %.3f\n', aus_BP4D(au), precision, recall, f1);
|
fprintf(f, 'AU%d class, Precision - %.3f, Recall - %.3f, F1 - %.3f\n', aus_BP4D(au), precision, recall, f1);
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@ -195,8 +197,10 @@ end
|
||||||
|
|
||||||
%%
|
%%
|
||||||
f = fopen('results/BP4D_valid_res_int.txt', 'w');
|
f = fopen('results/BP4D_valid_res_int.txt', 'w');
|
||||||
|
ints_cccs = zeros(1, numel(aus_BP4D));
|
||||||
for au = 1:numel(aus_BP4D)
|
for au = 1:numel(aus_BP4D)
|
||||||
[ accuracies, F1s, corrs, ccc, rms, classes ] = evaluate_au_prediction_results( preds_all_int(valid_ids, inds_au_int(au)), labels_gt(valid_ids,au));
|
[ accuracies, F1s, corrs, ccc, rms, classes ] = evaluate_au_prediction_results( preds_all_int(valid_ids, inds_au_int(au)), labels_gt(valid_ids,au));
|
||||||
|
ints_cccs(au) = ccc;
|
||||||
fprintf(f, 'AU%d results - rms %.3f, corr %.3f, ccc - %.3f\n', aus_BP4D(au), rms, corrs, ccc);
|
fprintf(f, 'AU%d results - rms %.3f, corr %.3f, ccc - %.3f\n', aus_BP4D(au), rms, corrs, ccc);
|
||||||
end
|
end
|
||||||
fclose(f);
|
fclose(f);
|
|
@ -100,6 +100,7 @@ end
|
||||||
f = fopen('results/Bosphorus_res_class.txt', 'w');
|
f = fopen('results/Bosphorus_res_class.txt', 'w');
|
||||||
labels_gt_bin = labels_gt;
|
labels_gt_bin = labels_gt;
|
||||||
labels_gt_bin(labels_gt_bin > 1) = 1;
|
labels_gt_bin(labels_gt_bin > 1) = 1;
|
||||||
|
f1s_class = zeros(1, numel(aus_Bosph));
|
||||||
for au = 1:numel(aus_Bosph)
|
for au = 1:numel(aus_Bosph)
|
||||||
|
|
||||||
tp = sum(labels_gt_bin(:,au) == 1 & labels_pred(:, au) == 1);
|
tp = sum(labels_gt_bin(:,au) == 1 & labels_pred(:, au) == 1);
|
||||||
|
@ -111,6 +112,7 @@ for au = 1:numel(aus_Bosph)
|
||||||
recall = tp./(tp+fn);
|
recall = tp./(tp+fn);
|
||||||
|
|
||||||
f1 = 2 * precision .* recall ./ (precision + recall);
|
f1 = 2 * precision .* recall ./ (precision + recall);
|
||||||
|
f1s_class(au) = f1;
|
||||||
|
|
||||||
fprintf(f, 'AU%d class, Precision - %.3f, Recall - %.3f, F1 - %.3f\n', aus_Bosph(au), precision, recall, f1);
|
fprintf(f, 'AU%d class, Precision - %.3f, Recall - %.3f, F1 - %.3f\n', aus_Bosph(au), precision, recall, f1);
|
||||||
|
|
||||||
|
@ -180,10 +182,13 @@ end
|
||||||
|
|
||||||
%%
|
%%
|
||||||
f = fopen('results/Bosphorus_res_int.txt', 'w');
|
f = fopen('results/Bosphorus_res_int.txt', 'w');
|
||||||
|
cccs_reg = zeros(1, numel(aus_Bosph));
|
||||||
for au = 1:numel(aus_Bosph)
|
for au = 1:numel(aus_Bosph)
|
||||||
|
|
||||||
[ ~, ~, corrs, ccc, rms, ~ ] = evaluate_regression_results( labels_pred(:, au), labels_gt(:, au));
|
[ ~, ~, corrs, ccc, rms, ~ ] = evaluate_regression_results( labels_pred(:, au), labels_gt(:, au));
|
||||||
|
|
||||||
|
cccs_reg(au) = ccc;
|
||||||
|
|
||||||
fprintf(f, 'AU%d intensity, Corr - %.3f, RMS - %.3f, CCC - %.3f\n', aus_Bosph(au), corrs, rms, ccc);
|
fprintf(f, 'AU%d intensity, Corr - %.3f, RMS - %.3f, CCC - %.3f\n', aus_Bosph(au), corrs, rms, ccc);
|
||||||
|
|
||||||
end
|
end
|
||||||
|
|
|
@ -98,10 +98,11 @@ end
|
||||||
|
|
||||||
%%
|
%%
|
||||||
f = fopen('results/UNBC_valid_res_int.txt', 'w');
|
f = fopen('results/UNBC_valid_res_int.txt', 'w');
|
||||||
|
ints_cccs = zeros(1, numel(aus_UNBC);
|
||||||
for au = 1:numel(aus_UNBC)
|
for au = 1:numel(aus_UNBC)
|
||||||
|
|
||||||
[ accuracies, F1s, corrs, ccc, rms, classes ] = evaluate_au_prediction_results( preds_all_int(:, inds_au_int(au)), labels_gt(:,au));
|
[ accuracies, F1s, corrs, ccc, rms, classes ] = evaluate_au_prediction_results( preds_all_int(:, inds_au_int(au)), labels_gt(:,au));
|
||||||
fprintf(f, 'AU%d results - rms %.3f, corr %.3f, ccc - %.3f\n', aus_UNBC(au), rms, corrs, ccc);
|
fprintf(f, 'AU%d results - rms %.3f, corr %.3f, ccc - %.3f\n', aus_UNBC(au), rms, corrs, ccc);
|
||||||
|
ints_cccs(au) = ccc;
|
||||||
end
|
end
|
||||||
fclose(f);
|
fclose(f);
|
|
@ -36,7 +36,7 @@ for i=1:numel(in_files)
|
||||||
output_shape_params = [output name '.params.txt'];
|
output_shape_params = [output name '.params.txt'];
|
||||||
|
|
||||||
command = cat(2, command, [' -f "' inputFile '" -of "' outputFile '"']);
|
command = cat(2, command, [' -f "' inputFile '" -of "' outputFile '"']);
|
||||||
command = cat(2, command, [' -simalign "' outputDir_aligned '" -hogalign "' outputHOG_aligned '"' ]);
|
command = cat(2, command, [' -simsize 224 -simalign "' outputDir_aligned '" -hogalign "' outputHOG_aligned '"' ]);
|
||||||
|
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@ -153,7 +153,7 @@ hold off;
|
||||||
[hog_data, valid_inds, vid_id] = Read_HOG_files({name}, output);
|
[hog_data, valid_inds, vid_id] = Read_HOG_files({name}, output);
|
||||||
|
|
||||||
%% Output aligned images
|
%% Output aligned images
|
||||||
img_files = dir([outputDir_aligned, '/*.png']);
|
img_files = dir([outputDir_aligned, '/*.bmp']);
|
||||||
imgs = cell(numel(img_files, 1));
|
imgs = cell(numel(img_files, 1));
|
||||||
for i=1:numel(img_files)
|
for i=1:numel(img_files)
|
||||||
imgs{i} = imread([ outputDir_aligned, '/', img_files(i).name]);
|
imgs{i} = imread([ outputDir_aligned, '/', img_files(i).name]);
|
||||||
|
|
|
@ -21,6 +21,17 @@ cd('../');
|
||||||
|
|
||||||
%% AUs
|
%% AUs
|
||||||
cd('Action Unit Experiments');
|
cd('Action Unit Experiments');
|
||||||
|
run_AU_prediction_Bosphorus
|
||||||
|
assert(mean(cccs_reg) > 0.56);
|
||||||
|
assert(mean(f1s_class) > 0.46);
|
||||||
|
|
||||||
|
run_AU_prediction_BP4D
|
||||||
|
assert(mean(ints_cccs) > 0.6);
|
||||||
|
assert(mean(f1s_class) > 0.6);
|
||||||
|
|
||||||
|
run_AU_prediction_UNBC
|
||||||
|
assert(mean(ints_cccs) > 0.38);
|
||||||
|
|
||||||
run_AU_prediction_DISFA
|
run_AU_prediction_DISFA
|
||||||
assert(mean(au_res) > 0.7);
|
assert(mean(au_res) > 0.7);
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue