Merge branch 'feature/new_validator' into develop
This commit is contained in:
commit
cdd258330f
155 changed files with 1864 additions and 15837 deletions
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -49,3 +49,5 @@ matlab_runners/Head Pose Experiments/experiments/bu_out/
|
|||
matlab_runners/Head Pose Experiments/experiments/ict_out/
|
||||
|
||||
OpenFace\.VC\.db
|
||||
matlab_version/face_validation/vlfeat-0.9.20/
|
||||
matlab_version/face_validation/trained/intermediate/
|
||||
|
|
|
@ -302,12 +302,12 @@ int main (int argc, char **argv)
|
|||
boost::filesystem::path parent_path = boost::filesystem::path(arguments[0]).parent_path();
|
||||
|
||||
// Some initial parameters that can be overriden from command line
|
||||
vector<string> files, depth_files, output_images, output_landmark_locations, output_pose_locations;
|
||||
vector<string> files, output_images, output_landmark_locations, output_pose_locations;
|
||||
|
||||
// Bounding boxes for a face in each image (optional)
|
||||
vector<cv::Rect_<double> > bounding_boxes;
|
||||
|
||||
LandmarkDetector::get_image_input_output_params(files, depth_files, output_landmark_locations, output_pose_locations, output_images, bounding_boxes, arguments);
|
||||
LandmarkDetector::get_image_input_output_params(files, output_landmark_locations, output_pose_locations, output_images, bounding_boxes, arguments);
|
||||
LandmarkDetector::FaceModelParameters det_parameters(arguments);
|
||||
// No need to validate detections, as we're not doing tracking
|
||||
det_parameters.validate_detections = false;
|
||||
|
@ -397,17 +397,7 @@ int main (int argc, char **argv)
|
|||
cout << "Could not read the input image" << endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Loading depth file if exists (optional)
|
||||
cv::Mat_<float> depth_image;
|
||||
|
||||
if(depth_files.size() > 0)
|
||||
{
|
||||
string dFile = depth_files.at(i);
|
||||
cv::Mat dTemp = cv::imread(dFile, -1);
|
||||
dTemp.convertTo(depth_image, CV_32F);
|
||||
}
|
||||
|
||||
|
||||
// Making sure the image is in uchar grayscale
|
||||
cv::Mat_<uchar> grayscale_image;
|
||||
convert_to_grayscale(read_image, grayscale_image);
|
||||
|
@ -453,7 +443,7 @@ int main (int argc, char **argv)
|
|||
for(size_t face=0; face < face_detections.size(); ++face)
|
||||
{
|
||||
// if there are multiple detections go through them
|
||||
bool success = LandmarkDetector::DetectLandmarksInImage(grayscale_image, depth_image, face_detections[face], clnf_model, det_parameters);
|
||||
bool success = LandmarkDetector::DetectLandmarksInImage(grayscale_image, face_detections[face], clnf_model, det_parameters);
|
||||
|
||||
// Estimate head pose and eye gaze
|
||||
cv::Vec6d headPose = LandmarkDetector::GetCorrectedPoseWorld(clnf_model, fx, fy, cx, cy);
|
||||
|
|
|
@ -87,7 +87,7 @@ double fps_tracker = -1.0;
|
|||
int64 t0 = 0;
|
||||
|
||||
// Visualising the results
|
||||
void visualise_tracking(cv::Mat& captured_image, cv::Mat_<float>& depth_image, const LandmarkDetector::CLNF& face_model, const LandmarkDetector::FaceModelParameters& det_parameters, cv::Point3f gazeDirection0, cv::Point3f gazeDirection1, int frame_count, double fx, double fy, double cx, double cy)
|
||||
void visualise_tracking(cv::Mat& captured_image, const LandmarkDetector::CLNF& face_model, const LandmarkDetector::FaceModelParameters& det_parameters, cv::Point3f gazeDirection0, cv::Point3f gazeDirection1, int frame_count, double fx, double fy, double cx, double cy)
|
||||
{
|
||||
|
||||
// Drawing the facial landmarks on the face and the bounding box around it if tracking is successful and initialised
|
||||
|
@ -142,13 +142,6 @@ void visualise_tracking(cv::Mat& captured_image, cv::Mat_<float>& depth_image, c
|
|||
{
|
||||
cv::namedWindow("tracking_result", 1);
|
||||
cv::imshow("tracking_result", captured_image);
|
||||
|
||||
if (!depth_image.empty())
|
||||
{
|
||||
// Division needed for visualisation purposes
|
||||
imshow("depth", depth_image / 2000.0);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -158,7 +151,7 @@ int main (int argc, char **argv)
|
|||
vector<string> arguments = get_arguments(argc, argv);
|
||||
|
||||
// Some initial parameters that can be overriden from command line
|
||||
vector<string> files, depth_directories, output_video_files, out_dummy;
|
||||
vector<string> files, output_video_files, out_dummy;
|
||||
|
||||
// By default try webcam 0
|
||||
int device = 0;
|
||||
|
@ -170,7 +163,7 @@ int main (int argc, char **argv)
|
|||
// Indicates that rotation should be with respect to world or camera coordinates
|
||||
bool u;
|
||||
string output_codec;
|
||||
LandmarkDetector::get_video_input_output_params(files, depth_directories, out_dummy, output_video_files, u, output_codec, arguments);
|
||||
LandmarkDetector::get_video_input_output_params(files, out_dummy, output_video_files, u, output_codec, arguments);
|
||||
|
||||
// The modules that are being used for tracking
|
||||
LandmarkDetector::CLNF clnf_model(det_parameters.model_location);
|
||||
|
@ -215,8 +208,6 @@ int main (int argc, char **argv)
|
|||
f_n = 0;
|
||||
}
|
||||
|
||||
bool use_depth = !depth_directories.empty();
|
||||
|
||||
// Do some grabbing
|
||||
cv::VideoCapture video_capture;
|
||||
if( current_file.size() > 0 )
|
||||
|
@ -292,7 +283,6 @@ int main (int argc, char **argv)
|
|||
{
|
||||
|
||||
// Reading the images
|
||||
cv::Mat_<float> depth_image;
|
||||
cv::Mat_<uchar> grayscale_image;
|
||||
|
||||
if(captured_image.channels() == 3)
|
||||
|
@ -303,31 +293,9 @@ int main (int argc, char **argv)
|
|||
{
|
||||
grayscale_image = captured_image.clone();
|
||||
}
|
||||
|
||||
// Get depth image
|
||||
if(use_depth)
|
||||
{
|
||||
char* dst = new char[100];
|
||||
std::stringstream sstream;
|
||||
|
||||
sstream << depth_directories[f_n] << "\\depth%05d.png";
|
||||
sprintf(dst, sstream.str().c_str(), frame_count + 1);
|
||||
// Reading in 16-bit png image representing depth
|
||||
cv::Mat_<short> depth_image_16_bit = cv::imread(string(dst), -1);
|
||||
|
||||
// Convert to a floating point depth image
|
||||
if(!depth_image_16_bit.empty())
|
||||
{
|
||||
depth_image_16_bit.convertTo(depth_image, CV_32F);
|
||||
}
|
||||
else
|
||||
{
|
||||
WARN_STREAM( "Can't find depth image" );
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// The actual facial landmark detection / tracking
|
||||
bool detection_success = LandmarkDetector::DetectLandmarksInVideo(grayscale_image, depth_image, clnf_model, det_parameters);
|
||||
bool detection_success = LandmarkDetector::DetectLandmarksInVideo(grayscale_image, clnf_model, det_parameters);
|
||||
|
||||
// Visualising the results
|
||||
// Drawing the facial landmarks on the face and the bounding box around it if tracking is successful and initialised
|
||||
|
@ -343,7 +311,7 @@ int main (int argc, char **argv)
|
|||
FaceAnalysis::EstimateGaze(clnf_model, gazeDirection1, fx, fy, cx, cy, false);
|
||||
}
|
||||
|
||||
visualise_tracking(captured_image, depth_image, clnf_model, det_parameters, gazeDirection0, gazeDirection1, frame_count, fx, fy, cx, cy);
|
||||
visualise_tracking(captured_image, clnf_model, det_parameters, gazeDirection0, gazeDirection1, frame_count, fx, fy, cx, cy);
|
||||
|
||||
// output the tracked video
|
||||
if (!output_video_files.empty())
|
||||
|
|
|
@ -107,7 +107,7 @@ int main (int argc, char **argv)
|
|||
vector<string> arguments = get_arguments(argc, argv);
|
||||
|
||||
// Some initial parameters that can be overriden from command line
|
||||
vector<string> files, depth_directories, tracked_videos_output, dummy_out;
|
||||
vector<string> files, tracked_videos_output, dummy_out;
|
||||
|
||||
// By default try webcam 0
|
||||
int device = 0;
|
||||
|
@ -128,7 +128,7 @@ int main (int argc, char **argv)
|
|||
// Get the input output file parameters
|
||||
bool u;
|
||||
string output_codec;
|
||||
LandmarkDetector::get_video_input_output_params(files, depth_directories, dummy_out, tracked_videos_output, u, output_codec, arguments);
|
||||
LandmarkDetector::get_video_input_output_params(files, dummy_out, tracked_videos_output, u, output_codec, arguments);
|
||||
// Get camera parameters
|
||||
LandmarkDetector::get_camera_params(device, fx, fy, cx, cy, arguments);
|
||||
|
||||
|
@ -177,8 +177,6 @@ int main (int argc, char **argv)
|
|||
current_file = files[f_n];
|
||||
}
|
||||
|
||||
bool use_depth = !depth_directories.empty();
|
||||
|
||||
// Do some grabbing
|
||||
cv::VideoCapture video_capture;
|
||||
if( current_file.size() > 0 )
|
||||
|
@ -254,28 +252,6 @@ int main (int argc, char **argv)
|
|||
grayscale_image = captured_image.clone();
|
||||
}
|
||||
|
||||
// Get depth image
|
||||
if(use_depth)
|
||||
{
|
||||
char* dst = new char[100];
|
||||
std::stringstream sstream;
|
||||
|
||||
sstream << depth_directories[f_n] << "\\depth%05d.png";
|
||||
sprintf(dst, sstream.str().c_str(), frame_count + 1);
|
||||
// Reading in 16-bit png image representing depth
|
||||
cv::Mat_<short> depth_image_16_bit = cv::imread(string(dst), -1);
|
||||
|
||||
// Convert to a floating point depth image
|
||||
if(!depth_image_16_bit.empty())
|
||||
{
|
||||
depth_image_16_bit.convertTo(depth_image, CV_32F);
|
||||
}
|
||||
else
|
||||
{
|
||||
WARN_STREAM( "Can't find depth image" );
|
||||
}
|
||||
}
|
||||
|
||||
vector<cv::Rect_<double> > face_detections;
|
||||
|
||||
bool all_models_active = true;
|
||||
|
@ -337,7 +313,7 @@ int main (int argc, char **argv)
|
|||
|
||||
// This ensures that a wider window is used for the initial landmark localisation
|
||||
clnf_models[model].detection_success = false;
|
||||
detection_success = LandmarkDetector::DetectLandmarksInVideo(grayscale_image, depth_image, face_detections[detection_ind], clnf_models[model], det_parameters[model]);
|
||||
detection_success = LandmarkDetector::DetectLandmarksInVideo(grayscale_image, face_detections[detection_ind], clnf_models[model], det_parameters[model]);
|
||||
|
||||
// This activates the model
|
||||
active_models[model] = true;
|
||||
|
@ -351,7 +327,7 @@ int main (int argc, char **argv)
|
|||
else
|
||||
{
|
||||
// The actual facial landmark detection / tracking
|
||||
detection_success = LandmarkDetector::DetectLandmarksInVideo(grayscale_image, depth_image, clnf_models[model], det_parameters[model]);
|
||||
detection_success = LandmarkDetector::DetectLandmarksInVideo(grayscale_image, clnf_models[model], det_parameters[model]);
|
||||
}
|
||||
});
|
||||
|
||||
|
|
|
@ -226,7 +226,7 @@ int main (int argc, char **argv)
|
|||
boost::filesystem::path parent_path = boost::filesystem::path(arguments[0]).parent_path();
|
||||
|
||||
// Some initial parameters that can be overriden from command line
|
||||
vector<string> input_files, depth_directories, output_files, tracked_videos_output;
|
||||
vector<string> input_files, output_files, tracked_videos_output;
|
||||
|
||||
LandmarkDetector::FaceModelParameters det_parameters(arguments);
|
||||
// Always track gaze in feature extraction
|
||||
|
@ -237,7 +237,7 @@ int main (int argc, char **argv)
|
|||
// Indicates that rotation should be with respect to camera or world coordinates
|
||||
bool use_world_coordinates;
|
||||
string output_codec; //not used but should
|
||||
LandmarkDetector::get_video_input_output_params(input_files, depth_directories, output_files, tracked_videos_output, use_world_coordinates, output_codec, arguments);
|
||||
LandmarkDetector::get_video_input_output_params(input_files, output_files, tracked_videos_output, use_world_coordinates, output_codec, arguments);
|
||||
|
||||
bool video_input = true;
|
||||
bool verbose = true;
|
||||
|
|
|
@ -396,11 +396,6 @@ void FaceAnalyser::AddNextFrame(const cv::Mat& frame, const LandmarkDetector::CL
|
|||
UpdateRunningMedian(this->geom_desc_hist, this->geom_hist_sum, this->geom_descriptor_median, geom_descriptor_frame, update_median, this->num_bins_geom, this->min_val_geom, this->max_val_geom);
|
||||
}
|
||||
|
||||
// First convert the face image to double representation as a row vector, TODO rem?
|
||||
//cv::Mat_<uchar> aligned_face_cols(1, aligned_face.cols * aligned_face.rows * aligned_face.channels(), aligned_face.data, 1);
|
||||
//cv::Mat_<double> aligned_face_cols_double;
|
||||
//aligned_face_cols.convertTo(aligned_face_cols_double, CV_64F);
|
||||
|
||||
// Visualising the median HOG
|
||||
if (visualise)
|
||||
{
|
||||
|
|
|
@ -1,14 +1,38 @@
|
|||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Copyright (C) 2017, Carnegie Mellon University and University of Cambridge,
|
||||
// Copyright (C) 2016, Carnegie Mellon University and University of Cambridge,
|
||||
// all rights reserved.
|
||||
//
|
||||
// ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
|
||||
//
|
||||
// BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT.
|
||||
// IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
|
||||
//
|
||||
// License can be found in OpenFace-license.txt
|
||||
// THIS SOFTWARE IS PROVIDED “AS IS” FOR ACADEMIC USE ONLY AND ANY EXPRESS
|
||||
// OR IMPLIED WARRANTIES WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS
|
||||
// BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY.
|
||||
// OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
||||
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
||||
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
// POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
// Notwithstanding the license granted herein, Licensee acknowledges that certain components
|
||||
// of the Software may be covered by so-called “open source” software licenses (“Open Source
|
||||
// Components”), which means any software licenses approved as open source licenses by the
|
||||
// Open Source Initiative or any substantially similar licenses, including without limitation any
|
||||
// license that, as a condition of distribution of the software licensed under such license,
|
||||
// requires that the distributor make the software available in source code format. Licensor shall
|
||||
// provide a list of Open Source Components for a particular version of the Software upon
|
||||
// Licensee’s request. Licensee will comply with the applicable terms of such licenses and to
|
||||
// the extent required by the licenses covering Open Source Components, the terms of such
|
||||
// licenses will apply in lieu of the terms of this Agreement. To the extent the terms of the
|
||||
// licenses applicable to Open Source Components prohibit any of the restrictions in this
|
||||
// License Agreement with respect to such Open Source Component, such restrictions will not
|
||||
// apply to such Open Source Component. To the extent the terms of the licenses applicable to
|
||||
// Open Source Components require Licensor to make an offer to provide source code or
|
||||
// related information in connection with the Software, such offer is hereby made. Any request
|
||||
// for source code or related information should be directed to cl-face-tracker-distribution@lists.cam.ac.uk
|
||||
// Licensee acknowledges receipt of notices for the Open Source Components for the initial
|
||||
// delivery of the Software.
|
||||
|
||||
// * Any publications arising from the use of this software, including but
|
||||
// not limited to academic journal and conference publications, technical
|
||||
// reports and manuals, must cite at least one of the following works:
|
||||
|
@ -59,7 +83,7 @@ class DetectionValidator
|
|||
|
||||
public:
|
||||
|
||||
// What type of validator we're using - 0 - linear svr, 1 - feed forward neural net, 2 - convolutional neural net
|
||||
// What type of validator we're using - 0 - linear svr, 1 - feed forward neural net, 2 - convolutional neural net, 3 - new version of convolutional neural net
|
||||
int validator_type;
|
||||
|
||||
// The orientations of each of the landmark detection validator
|
||||
|
@ -98,11 +122,15 @@ public:
|
|||
vector<vector<vector<vector<pair<int, cv::Mat_<double> > > > > > cnn_convolutional_layers_dft;
|
||||
vector<vector<vector<float > > > cnn_convolutional_layers_bias;
|
||||
vector< vector<int> > cnn_subsampling_layers;
|
||||
vector< vector<cv::Mat_<float> > > cnn_fully_connected_layers;
|
||||
vector< vector<cv::Mat_<float> > > cnn_fully_connected_layers_weights;
|
||||
vector< vector<float > > cnn_fully_connected_layers_bias;
|
||||
// 0 - convolutional, 1 - subsampling, 2 - fully connected
|
||||
// OLD CNN: 0 - convolutional, 1 - subsampling, 2 - fully connected
|
||||
// NEW CNN: 0 - convolutional, 1 - max pooling (2x2 stride 2), 2 - fully connected, 3 - relu, 4 - sigmoid
|
||||
vector<vector<int> > cnn_layer_types;
|
||||
|
||||
// Extra params for the new CNN
|
||||
vector< vector<cv::Mat_<float> > > cnn_fully_connected_layers_biases;
|
||||
|
||||
//==========================================
|
||||
|
||||
// Normalisation for face validation
|
||||
|
@ -134,9 +162,15 @@ private:
|
|||
// Feed-forward Neural Network
|
||||
double CheckNN(const cv::Mat_<double>& warped_img, int view_id);
|
||||
|
||||
// Convolutional Neural Network
|
||||
double CheckCNN_tbb(const cv::Mat_<double>& warped_img, int view_id);
|
||||
|
||||
// Convolutional Neural Network
|
||||
double CheckCNN(const cv::Mat_<double>& warped_img, int view_id);
|
||||
|
||||
// Convolutional Neural Network
|
||||
double CheckCNN_old(const cv::Mat_<double>& warped_img, int view_id);
|
||||
|
||||
// A normalisation helper
|
||||
void NormaliseWarpedToVector(const cv::Mat_<double>& warped_img, cv::Mat_<double>& feature_vec, int view_id);
|
||||
|
||||
|
|
|
@ -55,10 +55,7 @@ namespace LandmarkDetector
|
|||
// Optionally can provide a bounding box from which to start tracking
|
||||
//================================================================================================================
|
||||
bool DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_image, CLNF& clnf_model, FaceModelParameters& params);
|
||||
bool DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_image, const cv::Mat_<float> &depth_image, CLNF& clnf_model, FaceModelParameters& params);
|
||||
|
||||
bool DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_image, const cv::Rect_<double> bounding_box, CLNF& clnf_model, FaceModelParameters& params);
|
||||
bool DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_image, const cv::Mat_<float> &depth_image, const cv::Rect_<double> bounding_box, CLNF& clnf_model, FaceModelParameters& params);
|
||||
|
||||
//================================================================================================================
|
||||
// Landmark detection in image, need to provide an image and optionally CLNF model together with parameters (default values work well)
|
||||
|
@ -68,11 +65,6 @@ namespace LandmarkDetector
|
|||
// Providing a bounding box
|
||||
bool DetectLandmarksInImage(const cv::Mat_<uchar> &grayscale_image, const cv::Rect_<double> bounding_box, CLNF& clnf_model, FaceModelParameters& params);
|
||||
|
||||
//================================================
|
||||
// CLM-Z versions
|
||||
bool DetectLandmarksInImage(const cv::Mat_<uchar> &grayscale_image, const cv::Mat_<float> depth_image, CLNF& clnf_model, FaceModelParameters& params);
|
||||
bool DetectLandmarksInImage(const cv::Mat_<uchar> &grayscale_image, const cv::Mat_<float> depth_image, const cv::Rect_<double> bounding_box, CLNF& clnf_model, FaceModelParameters& params);
|
||||
|
||||
//================================================================
|
||||
// Helper function for getting head pose from CLNF parameters
|
||||
|
||||
|
|
|
@ -153,7 +153,7 @@ public:
|
|||
CLNF & operator= (const CLNF&& other);
|
||||
|
||||
// Does the actual work - landmark detection
|
||||
bool DetectLandmarks(const cv::Mat_<uchar> &image, const cv::Mat_<float> &depth, FaceModelParameters& params);
|
||||
bool DetectLandmarks(const cv::Mat_<uchar> &image, FaceModelParameters& params);
|
||||
|
||||
// Gets the shape of the current detected landmarks in camera space (given camera calibration)
|
||||
// Can only be called after a call to DetectLandmarksInVideo or DetectLandmarksInImage
|
||||
|
@ -180,7 +180,7 @@ private:
|
|||
map<int, cv::Mat_<float> > kde_resp_precalc;
|
||||
|
||||
// The model fitting: patch response computation and optimisation steps
|
||||
bool Fit(const cv::Mat_<uchar>& intensity_image, const cv::Mat_<float>& depth_image, const std::vector<int>& window_sizes, const FaceModelParameters& parameters);
|
||||
bool Fit(const cv::Mat_<uchar>& intensity_image, const std::vector<int>& window_sizes, const FaceModelParameters& parameters);
|
||||
|
||||
// Mean shift computation that uses precalculated kernel density estimators (the one actually used)
|
||||
void NonVectorisedMeanShift_precalc_kde(cv::Mat_<float>& out_mean_shifts, const vector<cv::Mat_<float> >& patch_expert_responses, const cv::Mat_<float> &dxs, const cv::Mat_<float> &dys, int resp_size, float a, int scale, int view_id, map<int, cv::Mat_<float> >& mean_shifts);
|
||||
|
@ -189,9 +189,6 @@ private:
|
|||
double NU_RLMS(cv::Vec6d& final_global, cv::Mat_<double>& final_local, const vector<cv::Mat_<float> >& patch_expert_responses, const cv::Vec6d& initial_global, const cv::Mat_<double>& initial_local,
|
||||
const cv::Mat_<double>& base_shape, const cv::Matx22d& sim_img_to_ref, const cv::Matx22f& sim_ref_to_img, int resp_size, int view_idx, bool rigid, int scale, cv::Mat_<double>& landmark_lhoods, const FaceModelParameters& parameters);
|
||||
|
||||
// Removing background image from the depth
|
||||
bool RemoveBackground(cv::Mat_<float>& out_depth_image, const cv::Mat_<float>& depth_image);
|
||||
|
||||
// Generating the weight matrix for the Weighted least squares
|
||||
void GetWeightMatrix(cv::Mat_<float>& WeightMatrix, int scale, int view_id, const FaceModelParameters& parameters);
|
||||
|
||||
|
|
|
@ -52,12 +52,12 @@ namespace LandmarkDetector
|
|||
//=============================================================================================
|
||||
// Helper functions for parsing the inputs
|
||||
//=============================================================================================
|
||||
void get_video_input_output_params(vector<string> &input_video_file, vector<string> &depth_dir, vector<string> &output_files,
|
||||
void get_video_input_output_params(vector<string> &input_video_file, vector<string> &output_files,
|
||||
vector<string> &output_video_files, bool& world_coordinates_pose, string &output_codec, vector<string> &arguments);
|
||||
|
||||
void get_camera_params(int &device, float &fx, float &fy, float &cx, float &cy, vector<string> &arguments);
|
||||
|
||||
void get_image_input_output_params(vector<string> &input_image_files, vector<string> &input_depth_files, vector<string> &output_feature_files, vector<string> &output_pose_files, vector<string> &output_image_files,
|
||||
void get_image_input_output_params(vector<string> &input_image_files, vector<string> &output_feature_files, vector<string> &output_pose_files, vector<string> &output_image_files,
|
||||
vector<cv::Rect_<double>> &input_bounding_boxes, vector<string> &arguments);
|
||||
|
||||
//===========================================================================
|
||||
|
|
|
@ -57,9 +57,6 @@ public:
|
|||
// The collection of SVR patch experts (for intensity/grayscale images), the experts are laid out scale->view->landmark
|
||||
vector<vector<vector<Multi_SVR_patch_expert> > > svr_expert_intensity;
|
||||
|
||||
// The collection of SVR patch experts (for depth/range images), the experts are laid out scale->view->landmark
|
||||
vector<vector<vector<Multi_SVR_patch_expert> > > svr_expert_depth;
|
||||
|
||||
// The collection of LNF (CCNF) patch experts (for intensity images), the experts are laid out scale->view->landmark
|
||||
vector<vector<vector<CCNF_patch_expert> > > ccnf_expert_intensity;
|
||||
|
||||
|
@ -81,11 +78,11 @@ public:
|
|||
// A copy constructor
|
||||
Patch_experts(const Patch_experts& other);
|
||||
|
||||
// Returns the patch expert responses given a grayscale and an optional depth image.
|
||||
// Returns the patch expert responses given a grayscale image.
|
||||
// Additionally returns the transform from the image coordinates to the response coordinates (and vice versa).
|
||||
// The computation also requires the current landmark locations to compute response around, the PDM corresponding to the desired model, and the parameters describing its instance
|
||||
// Also need to provide the size of the area of interest and the desired scale of analysis
|
||||
void Response(vector<cv::Mat_<float> >& patch_expert_responses, cv::Matx22f& sim_ref_to_img, cv::Matx22d& sim_img_to_ref, const cv::Mat_<uchar>& grayscale_image, const cv::Mat_<float>& depth_image,
|
||||
void Response(vector<cv::Mat_<float> >& patch_expert_responses, cv::Matx22f& sim_ref_to_img, cv::Matx22d& sim_img_to_ref, const cv::Mat_<uchar>& grayscale_image,
|
||||
const PDM& pdm, const cv::Vec6d& params_global, const cv::Mat_<double>& params_local, int window_size, int scale);
|
||||
|
||||
// Getting the best view associated with the current orientation
|
||||
|
@ -95,7 +92,7 @@ public:
|
|||
inline int nViews(size_t scale = 0) const { return (int)centers[scale].size(); };
|
||||
|
||||
// Reading in all of the patch experts
|
||||
void Read(vector<string> intensity_svr_expert_locations, vector<string> depth_svr_expert_locations, vector<string> intensity_ccnf_expert_locations);
|
||||
void Read(vector<string> intensity_svr_expert_locations, vector<string> intensity_ccnf_expert_locations);
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -1,8 +0,0 @@
|
|||
PDM pdms/Multi-PIE_aligned_PDM_66.txt
|
||||
Triangulations tris_66.txt
|
||||
PatchesIntensity patch_experts/intensity_patches_0.25.txt
|
||||
PatchesIntensity patch_experts/intensity_patches_0.35.txt
|
||||
PatchesIntensity patch_experts/intensity_patches_0.5.txt
|
||||
PatchesDepth patch_experts/depth_patches_0.25.txt
|
||||
PatchesDepth patch_experts/depth_patches_0.35.txt
|
||||
PatchesDepth patch_experts/depth_patches_0.5.txt
|
Binary file not shown.
Binary file not shown.
|
@ -1,3 +0,0 @@
|
|||
LandmarkDetector clm-z.txt
|
||||
FaceDetConversion haarAlign.txt
|
||||
DetectionValidator detection_validation/validator_general_66.txt
|
|
@ -1,3 +1,3 @@
|
|||
LandmarkDetector clm_general.txt
|
||||
FaceDetConversion haarAlign.txt
|
||||
DetectionValidator detection_validation/validator_general_68.txt
|
||||
DetectionValidator detection_validation/validator_cnn.txt
|
|
@ -1,3 +1,3 @@
|
|||
LandmarkDetector clm_wild.txt
|
||||
FaceDetConversion haarAlign.txt
|
||||
DetectionValidator detection_validation/validator_general_68.txt
|
||||
DetectionValidator detection_validation/validator_cnn.txt
|
|
@ -3,4 +3,4 @@ LandmarkDetector_part model_inner/main_clnf_inner.txt inner 17 0 18 1 19 2 20 3
|
|||
LandmarkDetector_part model_eye/main_clnf_synth_left.txt left_eye_28 36 8 37 10 38 12 39 14 40 16 41 18
|
||||
LandmarkDetector_part model_eye/main_clnf_synth_right.txt right_eye_28 42 8 43 10 44 12 45 14 46 16 47 18
|
||||
FaceDetConversion haarAlign.txt
|
||||
DetectionValidator detection_validation/validator_general_68.txt
|
||||
DetectionValidator detection_validation/validator_cnn.txt
|
|
@ -2,4 +2,4 @@ LandmarkDetector clnf_wild.txt
|
|||
LandmarkDetector_part model_eye/main_clnf_synth_left.txt left_eye_28 36 8 37 10 38 12 39 14 40 16 41 18
|
||||
LandmarkDetector_part model_eye/main_clnf_synth_right.txt right_eye_28 42 8 43 10 44 12 45 14 46 16 47 18
|
||||
FaceDetConversion haarAlign.txt
|
||||
DetectionValidator detection_validation/validator_general_68.txt
|
||||
DetectionValidator detection_validation/validator_cnn.txt
|
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -1,409 +0,0 @@
|
|||
# The mean values of the components (in mm)
|
||||
198
|
||||
1
|
||||
6
|
||||
-73.447014
|
||||
-72.593444
|
||||
-70.508936
|
||||
-66.868861
|
||||
-60.414326
|
||||
-49.631717
|
||||
-35.391108
|
||||
-18.754141
|
||||
0.114597
|
||||
19.016913
|
||||
35.598673
|
||||
49.771346
|
||||
60.477080
|
||||
66.851714
|
||||
70.423155
|
||||
72.449235
|
||||
73.242955
|
||||
-58.203530
|
||||
-49.478500
|
||||
-38.505934
|
||||
-27.177561
|
||||
-16.300987
|
||||
16.156900
|
||||
27.016187
|
||||
38.332565
|
||||
49.304224
|
||||
58.038529
|
||||
-0.040354
|
||||
-0.033363
|
||||
-0.017873
|
||||
0.004084
|
||||
-12.557676
|
||||
-6.454282
|
||||
0.021435
|
||||
6.508846
|
||||
12.601163
|
||||
-43.222559
|
||||
-36.097646
|
||||
-27.574181
|
||||
-20.557521
|
||||
-28.056401
|
||||
-35.953854
|
||||
20.451629
|
||||
27.455875
|
||||
35.977844
|
||||
43.113320
|
||||
35.851506
|
||||
27.955232
|
||||
-26.517287
|
||||
-19.122816
|
||||
-10.001914
|
||||
0.033411
|
||||
10.096699
|
||||
19.222369
|
||||
26.631040
|
||||
19.826245
|
||||
10.719912
|
||||
0.073977
|
||||
-10.546541
|
||||
-19.679901
|
||||
-10.743288
|
||||
0.054542
|
||||
10.855802
|
||||
11.052930
|
||||
0.066245
|
||||
-10.914660
|
||||
-24.746512
|
||||
-5.563341
|
||||
13.586104
|
||||
31.935985
|
||||
48.494172
|
||||
62.540756
|
||||
73.747225
|
||||
81.753306
|
||||
83.703222
|
||||
81.701197
|
||||
73.643185
|
||||
62.391933
|
||||
48.310331
|
||||
31.731624
|
||||
13.372130
|
||||
-5.780379
|
||||
-24.966474
|
||||
-55.382734
|
||||
-62.743537
|
||||
-65.767452
|
||||
-65.366721
|
||||
-62.577889
|
||||
-62.609412
|
||||
-65.431918
|
||||
-65.867839
|
||||
-62.878026
|
||||
-55.545001
|
||||
-42.794388
|
||||
-31.898600
|
||||
-21.092928
|
||||
-10.293158
|
||||
2.090045
|
||||
3.917313
|
||||
4.546474
|
||||
3.913121
|
||||
2.066722
|
||||
-38.786133
|
||||
-43.150534
|
||||
-43.235540
|
||||
-38.961437
|
||||
-36.861866
|
||||
-36.568711
|
||||
-39.010632
|
||||
-43.306353
|
||||
-43.245963
|
||||
-38.901782
|
||||
-36.662572
|
||||
-36.932904
|
||||
30.400416
|
||||
23.498452
|
||||
18.833915
|
||||
19.206597
|
||||
18.816801
|
||||
23.452358
|
||||
30.330913
|
||||
37.487315
|
||||
41.984978
|
||||
42.996350
|
||||
42.005283
|
||||
37.535894
|
||||
26.744930
|
||||
26.063568
|
||||
26.723925
|
||||
32.163959
|
||||
33.052706
|
||||
32.187531
|
||||
78.681255
|
||||
76.697610
|
||||
75.100729
|
||||
67.144829
|
||||
48.865709
|
||||
28.603564
|
||||
11.177305
|
||||
-4.295402
|
||||
-17.795472
|
||||
-4.239701
|
||||
11.236558
|
||||
28.650983
|
||||
48.897325
|
||||
67.163067
|
||||
75.133633
|
||||
76.746544
|
||||
78.714887
|
||||
1.731363
|
||||
-4.557739
|
||||
-9.659810
|
||||
-14.615709
|
||||
-19.199130
|
||||
-19.060969
|
||||
-14.484504
|
||||
-9.533992
|
||||
-4.439184
|
||||
1.839139
|
||||
-17.498844
|
||||
-27.916448
|
||||
-38.687865
|
||||
-48.878729
|
||||
-23.622361
|
||||
-26.603554
|
||||
-29.386742
|
||||
-26.488498
|
||||
-23.509038
|
||||
-2.510802
|
||||
-3.222846
|
||||
-3.712807
|
||||
-2.897381
|
||||
-3.340207
|
||||
-3.258814
|
||||
-2.796041
|
||||
-3.614055
|
||||
-3.114678
|
||||
-2.391598
|
||||
-3.139280
|
||||
-3.231401
|
||||
-10.336622
|
||||
-18.125938
|
||||
-24.157414
|
||||
-30.363981
|
||||
-24.048139
|
||||
-18.017482
|
||||
-10.235142
|
||||
-15.032664
|
||||
-19.983990
|
||||
-25.047636
|
||||
-20.087424
|
||||
-15.139002
|
||||
-19.356132
|
||||
-27.503141
|
||||
-19.258370
|
||||
-16.964648
|
||||
-23.973005
|
||||
-17.050167
|
||||
# The principal components (eigenvectors) of identity or combined identity and expression model
|
||||
198
|
||||
24
|
||||
6
|
||||
-0.131273 0.061642 -0.012510 0.102280 -0.050892 0.032543 -0.075885 0.038389 -0.127712 0.121736 -0.051410 0.006800 -0.109190 0.068214 -0.191040 0.005581 -0.001793 0.030290 -0.014701 0.054678 -0.099910 0.129486 -0.039664 0.030211
|
||||
-0.130535 0.076644 0.005955 0.128992 -0.087766 0.003856 -0.062712 0.011035 -0.104311 0.121196 -0.051551 -0.031437 -0.072782 0.078546 -0.149843 0.002235 -0.009600 0.014969 -0.023432 0.076747 -0.057106 0.057651 -0.045418 0.037971
|
||||
-0.131360 0.097772 0.019892 0.147686 -0.117376 -0.003328 -0.044024 -0.011450 -0.072751 0.113932 -0.046556 -0.055102 0.000908 0.067126 -0.101982 -0.000479 -0.013325 -0.013626 -0.032762 0.091546 -0.014217 -0.020913 -0.046980 0.035342
|
||||
-0.137970 0.110929 0.030689 0.139783 -0.115369 0.026630 -0.019212 -0.029754 -0.010726 0.072704 -0.021451 -0.056577 0.082231 0.025464 -0.003511 0.005508 -0.024960 -0.025544 -0.036912 0.062564 0.039080 -0.055324 -0.046860 0.017641
|
||||
-0.135135 0.110637 0.050661 0.102334 -0.098664 0.080015 0.005429 -0.036857 0.072405 0.012258 0.033956 -0.045079 0.148257 -0.040902 0.133200 0.012816 -0.043055 -0.021025 -0.013320 -0.013401 0.114790 -0.030526 -0.022542 -0.008660
|
||||
-0.100221 0.089414 0.064057 0.061401 -0.066814 0.071042 0.011659 -0.031420 0.110299 -0.020965 0.069839 -0.033725 0.138080 -0.122631 0.195694 -0.001764 -0.028688 -0.044580 0.021239 -0.074578 0.157209 -0.004682 0.016493 -0.018792
|
||||
-0.051976 0.055863 0.055121 0.032228 -0.022272 0.031244 0.013022 -0.025100 0.085963 -0.019700 0.073384 -0.035730 0.068265 -0.200993 0.161503 -0.009745 0.021018 -0.059450 0.046151 -0.071870 0.151306 0.019038 0.028337 0.009588
|
||||
-0.012118 0.022021 0.029027 0.018045 0.000934 0.009319 0.003756 -0.016914 0.037117 -0.004160 0.068224 -0.022136 0.020698 -0.271194 0.080781 -0.005545 0.106564 -0.049508 0.044223 -0.049700 0.116584 0.030734 0.037658 0.019211
|
||||
-0.001496 -0.003386 -0.001635 0.001533 -0.003370 0.023643 -0.004987 0.000648 -0.000476 0.004318 0.062219 0.001774 0.003329 -0.347090 -0.004710 -0.005730 0.179671 0.001995 0.003775 -0.039326 0.114391 0.011512 0.046072 -0.002301
|
||||
0.010156 -0.026698 -0.034295 -0.017574 -0.007795 0.007692 -0.010272 0.019016 -0.035864 0.008549 0.064502 0.028715 -0.008884 -0.273566 -0.081852 -0.001133 0.107864 0.054134 -0.039650 -0.030460 0.123967 -0.011016 0.045932 -0.017967
|
||||
0.050475 -0.060045 -0.059978 -0.031345 0.017962 0.028792 -0.020342 0.027766 -0.085041 0.023528 0.067875 0.042232 -0.060356 -0.206706 -0.162667 0.012045 0.023279 0.064628 -0.042433 -0.031479 0.167069 -0.007401 0.030965 -0.000492
|
||||
0.099327 -0.092823 -0.068503 -0.059721 0.066651 0.067454 -0.020423 0.035253 -0.109560 0.023854 0.064577 0.039736 -0.133331 -0.132567 -0.197720 0.008731 -0.027289 0.049747 -0.018088 -0.031922 0.172937 0.005670 0.008896 0.030979
|
||||
0.134832 -0.112930 -0.054432 -0.101018 0.100680 0.077060 -0.013236 0.041442 -0.071489 -0.011067 0.031190 0.049373 -0.145540 -0.052449 -0.135671 -0.005866 -0.044700 0.024214 0.015403 -0.056540 0.100198 0.008994 -0.039469 0.017290
|
||||
0.138185 -0.111981 -0.033264 -0.140688 0.115232 0.026089 0.016312 0.033420 0.011362 -0.073244 -0.020374 0.057861 -0.080955 0.015932 0.001458 -0.002179 -0.026825 0.025950 0.037674 -0.074353 -0.008133 0.019461 -0.076201 -0.015515
|
||||
0.131982 -0.097616 -0.021277 -0.149817 0.116660 -0.002810 0.044505 0.014100 0.072832 -0.115723 -0.042691 0.054624 -0.000761 0.060145 0.100303 0.001612 -0.013223 0.012744 0.032584 -0.067128 -0.068838 -0.010357 -0.061317 -0.038451
|
||||
0.131569 -0.075260 -0.006352 -0.131216 0.089215 0.006368 0.064227 -0.008742 0.103968 -0.124092 -0.045800 0.030087 0.073130 0.073717 0.148378 -0.001695 -0.009349 -0.015794 0.022791 -0.030641 -0.093172 -0.078192 -0.021389 -0.044189
|
||||
0.132692 -0.059063 0.012959 -0.103988 0.056043 0.038127 0.077019 -0.035808 0.126882 -0.125517 -0.043474 -0.008736 0.110533 0.064482 0.189624 -0.005446 -0.001524 -0.030614 0.013591 0.011999 -0.112879 -0.138665 0.020561 -0.038379
|
||||
-0.103256 0.020891 -0.110162 -0.212634 0.058183 0.138030 -0.200849 0.100068 -0.022495 0.002709 0.153038 0.054887 -0.024530 -0.007414 0.049195 0.197049 -0.022827 -0.045753 0.186816 -0.052449 -0.132988 0.030237 0.015215 0.024104
|
||||
-0.080994 0.010985 -0.094549 -0.264861 0.003874 0.102996 -0.116142 0.005617 -0.017761 0.061246 0.173548 0.032150 -0.019111 -0.056125 0.005898 0.140163 -0.037308 -0.052262 0.076745 -0.021728 -0.140151 -0.032839 -0.049225 0.028646
|
||||
-0.060263 0.002721 -0.066287 -0.241913 -0.043231 0.048059 0.014406 -0.119970 -0.045232 0.070887 0.164744 0.001511 -0.015282 -0.070804 -0.009665 0.049746 -0.010035 0.008282 0.038497 0.009992 -0.108441 -0.053610 -0.130623 0.013733
|
||||
-0.043438 -0.004202 -0.035388 -0.188880 -0.081593 -0.012313 0.148663 -0.247184 -0.088321 0.038230 0.140939 -0.042380 -0.011204 -0.066379 -0.012701 -0.020756 0.039141 0.069640 0.060648 0.028443 -0.068939 -0.065957 -0.207395 0.000253
|
||||
-0.027456 -0.012648 -0.010305 -0.128762 -0.118023 -0.076298 0.272941 -0.355960 -0.129333 -0.008980 0.109842 -0.090833 -0.003982 -0.049622 -0.009264 -0.079023 0.098614 0.109283 0.109170 0.037788 -0.032760 -0.076614 -0.277675 -0.009655
|
||||
0.028658 0.015537 0.014071 0.124422 0.114523 -0.115827 -0.262002 0.348923 0.124202 0.009677 0.119551 0.093386 0.008350 -0.050918 0.011467 0.069446 0.111352 -0.106208 -0.111719 -0.010214 -0.041439 -0.068982 -0.278486 0.025867
|
||||
0.044696 0.007255 0.038872 0.186692 0.083366 -0.041438 -0.141963 0.242179 0.082905 -0.037066 0.149989 0.046493 0.015225 -0.069457 0.013907 0.018541 0.044200 -0.065992 -0.064310 0.019432 -0.066650 -0.044316 -0.214883 0.006808
|
||||
0.061510 0.000372 0.069425 0.241930 0.049925 0.030814 -0.011846 0.116905 0.039652 -0.069373 0.171756 0.003798 0.018542 -0.075797 0.009708 -0.045986 -0.013506 -0.004452 -0.042694 0.057660 -0.089105 -0.017260 -0.145119 -0.016908
|
||||
0.082170 -0.007997 0.097171 0.267100 0.007247 0.099038 0.114834 -0.006757 0.012478 -0.059880 0.177191 -0.026227 0.021483 -0.062985 -0.007118 -0.133388 -0.050566 0.056066 -0.080337 0.100740 -0.095460 0.005097 -0.066940 -0.041625
|
||||
0.104329 -0.018258 0.112126 0.216590 -0.044296 0.147034 0.196819 -0.099590 0.018126 -0.002082 0.153447 -0.049659 0.025491 -0.015134 -0.051658 -0.192842 -0.041726 0.049416 -0.188101 0.118016 -0.067590 -0.017735 0.019713 -0.041620
|
||||
0.000663 0.000944 0.002028 -0.002485 -0.004325 -0.178450 0.006940 -0.002647 -0.000452 -0.000260 -0.036127 -0.002157 -0.001769 0.085386 0.003664 -0.001368 0.012609 -0.000348 0.002466 -0.036630 0.109712 -0.022803 -0.093546 -0.000547
|
||||
0.000288 0.000448 0.002317 -0.000912 -0.003640 -0.137716 0.004390 -0.002725 -0.000322 0.001001 -0.035680 -0.002530 -0.004788 0.091072 0.001471 0.002255 -0.028343 -0.000838 0.003056 -0.051336 0.154680 -0.028982 -0.119923 -0.000914
|
||||
-0.000001 0.000083 0.002277 0.000559 -0.002598 -0.092125 0.002081 -0.002291 -0.000077 0.001848 -0.034709 -0.002701 -0.006719 0.095246 -0.000088 0.004200 -0.062383 -0.001457 0.003110 -0.065545 0.197098 -0.035205 -0.145076 -0.001671
|
||||
-0.000249 -0.000228 0.001999 0.001898 -0.001304 -0.044349 -0.000049 -0.001452 0.000185 0.002371 -0.035322 -0.002749 -0.007660 0.093895 -0.000988 0.005450 -0.093930 -0.002147 0.002397 -0.077738 0.233063 -0.038852 -0.158652 -0.002510
|
||||
-0.018175 0.032540 -0.046381 0.072624 0.076758 -0.059237 0.017791 -0.046361 -0.028705 0.026262 -0.065072 -0.040634 -0.156135 0.085741 0.051510 0.014526 -0.086807 -0.021631 -0.076788 -0.116557 0.108914 -0.513944 0.078194 -0.015943
|
||||
-0.009972 0.017183 -0.024354 0.040287 0.043472 -0.082474 0.009076 -0.020686 -0.018070 0.017280 -0.071368 -0.020552 -0.088495 0.092577 0.031774 0.012613 -0.099299 -0.024190 -0.038465 -0.086399 0.124173 -0.285954 -0.014114 -0.021193
|
||||
-0.000140 -0.000550 0.001209 -0.000441 -0.004329 -0.107015 0.002219 -0.001344 0.000205 0.001041 -0.076844 -0.002757 -0.005753 0.102871 0.001296 0.005455 -0.107458 -0.002422 0.001475 -0.047062 0.142812 -0.027540 -0.116449 -0.004795
|
||||
0.010137 -0.017300 0.025196 -0.041630 -0.049844 -0.081928 -0.004971 0.018240 0.019669 -0.017799 -0.072194 0.017115 0.082066 0.095586 -0.027996 -0.005137 -0.101295 0.021371 0.041818 -0.002708 0.151466 0.245495 -0.155914 0.015694
|
||||
0.018362 -0.032464 0.047071 -0.073353 -0.081048 -0.055564 -0.015023 0.044733 0.030168 -0.027108 -0.065188 0.037657 0.150432 0.091558 -0.048341 -0.007899 -0.090804 0.019455 0.080044 0.030478 0.158906 0.486632 -0.188186 0.013055
|
||||
-0.062179 0.016062 -0.055023 -0.062336 0.062633 0.015002 -0.139789 0.011415 -0.012883 -0.257736 0.051063 -0.125679 -0.062414 0.075253 -0.035691 -0.258638 0.116601 -0.007012 -0.024872 0.054285 0.050491 0.018896 0.155783 0.098688
|
||||
-0.055414 0.013285 -0.037771 -0.058857 0.017543 -0.008107 -0.059850 0.006637 -0.050576 -0.215639 0.052160 -0.035553 -0.049198 0.068918 -0.025862 -0.132344 0.120040 -0.033448 -0.103534 0.030552 0.049017 0.047455 0.160755 -0.008346
|
||||
-0.042484 0.032992 -0.022698 -0.029477 -0.040529 -0.018012 0.042014 0.015150 -0.126555 -0.155830 0.044106 0.140958 -0.059714 0.068312 0.063184 -0.034734 0.122973 -0.048511 -0.121531 -0.003780 0.034849 0.078048 0.159977 -0.057993
|
||||
-0.035440 0.019458 -0.017848 -0.004969 -0.075476 -0.024694 0.116267 -0.001407 -0.162960 -0.132404 0.034346 0.190808 -0.021034 0.078356 0.100739 0.093001 0.133670 -0.080129 -0.174964 -0.044027 0.017142 0.122386 0.140941 -0.159570
|
||||
-0.044064 0.017262 -0.035061 -0.017594 -0.023286 -0.002500 0.026448 0.003865 -0.112249 -0.174655 0.038768 0.080754 -0.033141 0.082713 0.066483 -0.030953 0.128027 -0.050641 -0.114281 -0.017467 0.025057 0.086708 0.136329 -0.067118
|
||||
-0.054128 0.008177 -0.043584 -0.039099 0.020889 0.009912 -0.060465 -0.004176 -0.052947 -0.228032 0.043482 -0.053136 -0.041201 0.082949 0.006132 -0.134940 0.124448 -0.035251 -0.078194 0.013263 0.034128 0.059040 0.138889 0.002496
|
||||
0.036429 -0.017173 0.020074 0.003056 0.076476 -0.040706 -0.110829 -0.001936 0.160311 0.130160 0.044100 -0.189477 0.022333 0.075611 -0.101573 -0.108789 0.118490 0.084245 0.178783 0.027137 0.027026 -0.042416 0.177492 0.162700
|
||||
0.043372 -0.030810 0.025028 0.027904 0.042779 -0.027092 -0.037252 -0.018723 0.123958 0.153502 0.050374 -0.140335 0.061145 0.068850 -0.064475 0.020012 0.121849 0.052851 0.125271 -0.017183 0.022247 0.006394 0.175965 0.061268
|
||||
0.056262 -0.011187 0.039919 0.057812 -0.014179 -0.007524 0.063630 -0.009916 0.048136 0.213352 0.047852 0.036046 0.050505 0.070893 0.024268 0.117516 0.131104 0.037653 0.107485 -0.054386 0.015812 0.034711 0.164552 0.011119
|
||||
0.062997 -0.014037 0.056777 0.062384 -0.057710 0.023599 0.141727 -0.013554 0.010630 0.255766 0.040524 0.126602 0.063146 0.079571 0.033942 0.243078 0.142713 0.010601 0.029329 -0.076942 0.008834 0.059028 0.148989 -0.097300
|
||||
0.055001 -0.006044 0.045393 0.038929 -0.016762 0.010702 0.062907 0.002209 0.050447 0.226291 0.037698 0.054580 0.041441 0.084706 -0.007500 0.118756 0.136432 0.038761 0.082581 -0.032660 0.015190 0.014654 0.150716 -0.000936
|
||||
0.044997 -0.015025 0.036985 0.016777 0.026234 -0.009436 -0.022901 -0.006263 0.109640 0.172791 0.041228 -0.079125 0.033710 0.082359 -0.067516 0.014768 0.127674 0.054423 0.118491 -0.001109 0.022631 -0.011923 0.159215 0.069256
|
||||
-0.023578 0.120370 -0.206325 0.064439 0.117295 0.061900 0.048174 -0.094861 0.156753 -0.048033 -0.078564 0.161264 0.064195 -0.000899 -0.055363 0.160622 -0.073064 0.105794 -0.067154 0.094394 -0.067326 0.049608 0.060316 -0.120181
|
||||
0.029259 0.097048 -0.122702 0.034425 0.039206 0.022570 0.003128 -0.017498 0.071592 -0.016638 -0.085460 0.066636 0.003404 0.023490 -0.025193 0.099620 -0.076253 0.199443 -0.086905 0.022589 -0.068385 -0.007717 0.037784 -0.108892
|
||||
0.030709 0.048406 -0.048080 0.012620 -0.004611 0.002575 -0.013731 0.007570 0.030575 -0.003973 -0.088770 0.022909 -0.008814 0.038796 -0.013788 0.058480 -0.077010 0.147878 -0.060535 -0.009937 -0.059100 -0.003053 0.017061 -0.070454
|
||||
-0.000302 -0.000943 0.000547 0.001449 -0.001239 -0.006491 -0.001734 0.000341 0.000847 0.001198 -0.093282 -0.003311 -0.005371 0.041839 -0.000402 0.005437 -0.074125 -0.002514 -0.001704 0.015020 -0.043990 0.000645 -0.005113 -0.001960
|
||||
-0.030797 -0.048997 0.046937 -0.011391 0.003417 0.001209 0.011529 -0.006000 -0.027354 0.002930 -0.087701 -0.026732 0.004662 0.036964 0.016357 -0.051045 -0.081748 -0.151184 0.056099 0.046124 -0.042445 0.011467 0.014231 0.071140
|
||||
-0.029604 -0.097770 0.120860 -0.032640 -0.038750 0.025383 -0.006595 0.019926 -0.067987 0.015490 -0.082251 -0.069769 -0.006133 0.021065 0.027135 -0.091877 -0.086183 -0.203228 0.082136 0.026697 -0.069388 0.024703 0.031778 0.112183
|
||||
0.022926 -0.121370 0.203355 -0.061901 -0.114065 0.069420 -0.053980 0.098785 -0.152366 0.046567 -0.073896 -0.162999 -0.063898 -0.003711 0.056517 -0.153237 -0.090068 -0.110124 0.062269 -0.030591 -0.112204 -0.014302 0.080858 0.125147
|
||||
-0.033733 -0.096371 0.176956 -0.058336 -0.085727 0.026810 -0.034571 0.089236 -0.078399 0.021782 -0.098726 -0.080928 -0.040063 -0.023172 0.014364 -0.122521 -0.074310 0.004259 0.019871 -0.077142 -0.147174 -0.004727 0.048434 0.050469
|
||||
-0.051756 -0.056781 0.102870 -0.037546 -0.040576 0.002479 -0.010659 0.050596 -0.022705 -0.002075 -0.106003 -0.031065 -0.014993 -0.034761 -0.000659 -0.052928 -0.050979 0.046678 -0.015466 -0.021197 -0.154220 -0.002858 0.040511 0.026339
|
||||
-0.001389 -0.001769 -0.000221 0.000668 -0.002293 -0.014354 -0.001470 0.001005 0.001826 0.001041 -0.106758 -0.002393 -0.001734 -0.050997 -0.000734 0.002124 -0.037690 -0.003636 -0.002295 0.041989 -0.130523 0.013248 0.034704 -0.002931
|
||||
0.049819 0.054690 -0.105351 0.038014 0.038108 0.001525 0.008211 -0.047455 0.027967 0.001024 -0.106413 0.028066 0.016805 -0.033544 0.003369 0.055321 -0.044824 -0.053224 0.009920 0.107234 -0.109307 0.028491 0.031904 -0.029247
|
||||
0.032388 0.094696 -0.179555 0.059657 0.085278 0.021859 0.030839 -0.085781 0.083274 -0.022804 -0.100987 0.078004 0.040824 -0.020810 -0.012061 0.127611 -0.059704 -0.010308 -0.024953 0.149707 -0.068486 0.031088 0.038623 -0.049272
|
||||
0.055288 0.089987 -0.116420 0.055196 0.050866 -0.013567 -0.014283 -0.077197 0.126567 -0.055797 -0.100908 0.043115 0.020959 0.003353 -0.055083 -0.113788 -0.069784 0.127756 0.009788 -0.087710 -0.139159 0.025540 0.021607 0.253009
|
||||
-0.000454 -0.000787 -0.000116 0.000621 -0.001841 -0.018428 -0.001368 0.000983 0.001188 0.000771 -0.104809 -0.002318 -0.002586 0.016991 -0.001054 0.003022 -0.060359 -0.003725 -0.003339 0.027248 -0.086137 0.001090 -0.002708 -0.003297
|
||||
-0.055465 -0.090870 0.114742 -0.054605 -0.053530 -0.009652 0.012691 0.079117 -0.122882 0.054638 -0.102095 -0.047054 -0.022220 0.004312 0.057137 0.118816 -0.053099 -0.132196 -0.014616 0.152468 -0.052576 -0.010694 0.031747 -0.251650
|
||||
-0.064337 -0.100340 0.139482 -0.063623 -0.067127 -0.027284 -0.001801 0.097292 -0.131144 0.051530 -0.114480 -0.051189 -0.025074 -0.019008 0.065639 0.123629 -0.049740 -0.055988 -0.035597 0.133740 -0.070551 -0.023888 0.035165 -0.253572
|
||||
-0.001193 -0.001429 -0.000416 0.000442 -0.002304 -0.039911 -0.000185 0.001231 0.001785 0.000743 -0.115314 -0.002297 -0.001727 -0.012627 -0.000406 0.001330 -0.052773 -0.004296 -0.002723 0.032706 -0.100037 0.004905 0.010825 -0.002656
|
||||
0.062870 0.098835 -0.141206 0.062837 0.063106 -0.032160 0.001745 -0.095313 0.135795 -0.053571 -0.113289 0.047120 0.026385 -0.020598 -0.061923 -0.119342 -0.066010 0.050733 0.030681 -0.061349 -0.141997 0.041136 0.016678 0.257293
|
||||
0.013568 0.075283 0.073044 0.014178 -0.036832 0.113334 0.054730 -0.003736 0.092943 -0.048912 0.292026 -0.041422 0.028860 0.225432 0.108228 0.035625 -0.179377 -0.108097 0.055587 -0.019926 0.017807 0.031717 0.052884 0.058473
|
||||
0.015213 0.048885 0.041282 -0.001124 -0.047273 0.002254 0.047494 0.012727 0.054730 -0.033674 0.241823 -0.016723 0.002475 0.164058 0.022547 0.036518 -0.119643 -0.048013 0.004933 -0.012554 -0.002398 -0.002715 0.046355 0.030392
|
||||
0.017522 0.021690 0.010983 -0.017448 -0.056982 -0.109291 0.039563 0.031922 0.015108 -0.022673 0.188102 0.006317 -0.030263 0.109650 -0.077499 0.043786 -0.054529 0.018794 -0.044887 -0.005600 -0.029115 -0.027271 0.032934 0.002573
|
||||
0.018987 -0.009590 -0.020454 -0.023044 -0.061384 -0.216509 0.013503 0.028882 -0.021498 -0.014168 0.132528 0.020703 -0.051878 0.060000 -0.122343 0.034133 0.013369 0.053221 -0.064304 0.006574 -0.063883 -0.035597 0.020759 -0.007359
|
||||
0.014665 -0.047062 -0.056981 -0.003353 -0.046223 -0.273362 -0.044350 -0.011088 -0.059522 0.008654 0.083653 0.013501 -0.067898 0.018249 -0.097659 0.015458 0.074427 0.017393 -0.022201 0.032842 -0.105221 -0.016934 -0.002578 0.015161
|
||||
-0.011450 -0.073527 -0.097697 0.035873 -0.049430 -0.245443 -0.102199 -0.064566 -0.087344 0.026273 0.044374 0.012634 -0.051313 -0.011410 -0.063484 0.000102 0.121608 -0.009751 0.007075 0.058464 -0.131329 -0.001915 -0.022626 0.029807
|
||||
-0.060146 -0.080736 -0.127955 0.073043 -0.088358 -0.163765 -0.159562 -0.103110 -0.077882 0.023844 0.025374 0.032114 0.030673 -0.031697 -0.002302 -0.019082 0.139533 -0.006214 0.006395 0.031929 -0.121254 -0.013269 -0.023355 -0.014124
|
||||
-0.121346 -0.076937 -0.136566 0.088866 -0.119102 -0.064832 -0.203154 -0.126655 -0.034705 0.003459 0.019500 0.039546 0.101804 -0.042472 0.103227 -0.047564 0.086433 0.019189 0.007404 -0.026891 -0.063047 -0.032690 -0.012970 -0.055948
|
||||
-0.151220 -0.082702 -0.136965 0.091990 -0.112993 -0.000251 -0.234686 -0.167876 0.003752 -0.008060 -0.000066 0.044230 0.142201 -0.003374 0.230204 -0.053466 -0.000503 0.003982 0.054641 -0.082587 -0.028236 -0.020971 -0.004196 -0.045681
|
||||
-0.120937 -0.075784 -0.136278 0.090171 -0.113089 0.058797 -0.206006 -0.125043 -0.033229 0.001417 -0.020487 0.037919 0.099750 0.039237 0.104876 -0.038389 -0.091944 0.018618 0.007584 -0.056502 0.035109 -0.026210 0.016043 -0.048495
|
||||
-0.059629 -0.078558 -0.127244 0.077300 -0.073869 0.158607 -0.167426 -0.099163 -0.075768 0.020907 -0.028100 0.029161 0.029808 0.026459 0.000054 -0.004045 -0.140156 -0.005142 0.007623 -0.042074 0.117786 -0.005105 0.019598 -0.000500
|
||||
-0.010940 -0.070512 -0.096799 0.042746 -0.027857 0.241765 -0.114078 -0.058395 -0.084784 0.022512 -0.046950 0.007989 -0.049498 0.004365 -0.061566 0.013184 -0.119265 -0.008286 0.010001 -0.026872 0.140208 0.004587 0.017846 0.042540
|
||||
0.015145 -0.043448 -0.055769 0.004697 -0.021638 0.273023 -0.057270 -0.003856 -0.056858 0.004345 -0.086740 0.006239 -0.063529 -0.024252 -0.096149 0.023312 -0.069655 0.017608 -0.018139 -0.033257 0.101718 -0.016840 0.009047 0.021908
|
||||
0.019533 -0.005664 -0.018585 -0.016212 -0.040292 0.220687 0.004373 0.035063 -0.019405 -0.018736 -0.136680 0.010393 -0.045248 -0.063788 -0.120611 0.035274 -0.004956 0.051791 -0.059390 -0.032128 0.051498 -0.042614 -0.003163 -0.006605
|
||||
0.018270 0.025910 0.014076 -0.014226 -0.043367 0.115436 0.037225 0.035730 0.016547 -0.027137 -0.190705 -0.007109 -0.021203 -0.111254 -0.074472 0.037677 0.064831 0.016199 -0.039336 -0.023342 0.014105 -0.038187 -0.017379 -0.000412
|
||||
0.016068 0.053215 0.045334 -0.001355 -0.041533 0.004764 0.052138 0.014289 0.055640 -0.038392 -0.241255 -0.032967 0.013979 -0.163460 0.026626 0.023886 0.128906 -0.051444 0.011454 -0.015064 -0.014736 -0.020704 -0.039340 0.024586
|
||||
0.014398 0.079548 0.077832 0.010796 -0.038970 -0.105561 0.066264 -0.004530 0.093262 -0.053842 -0.288432 -0.060374 0.043064 -0.222726 0.113556 0.017171 0.188326 -0.112355 0.063384 -0.010729 -0.038365 0.008496 -0.060300 0.050271
|
||||
-0.000666 -0.166461 -0.047638 0.226722 -0.062495 0.047061 0.180480 0.054964 -0.040678 -0.223687 -0.106798 -0.057279 -0.061243 0.212546 -0.044251 0.050233 0.280303 0.128676 0.284177 -0.033774 0.088486 -0.023176 -0.092996 -0.037713
|
||||
-0.000972 -0.171244 -0.078496 0.083595 -0.066265 0.098378 0.068289 0.121816 0.047409 -0.107857 -0.045203 -0.023775 -0.038860 0.147556 -0.009404 0.089405 0.189064 -0.022525 0.157650 -0.029346 0.013491 -0.036902 -0.069676 0.009942
|
||||
0.009278 -0.178350 -0.093628 -0.018883 -0.085033 0.127557 0.028476 0.129605 0.116840 0.026287 0.008063 0.026010 -0.025831 0.087597 0.010314 0.029459 0.105757 -0.079040 -0.008182 0.007583 -0.025090 -0.036933 -0.073199 0.029351
|
||||
0.018752 -0.183143 -0.086363 -0.060625 -0.090889 0.110928 0.045066 0.077759 0.155975 0.112058 0.026685 0.054277 -0.009075 0.048565 0.025079 -0.075927 0.063430 -0.049367 -0.135403 0.062582 -0.017305 -0.026424 -0.102326 0.035178
|
||||
0.026223 -0.182968 -0.060390 -0.077983 -0.094273 0.074346 0.089135 -0.028227 0.168586 0.167246 0.037509 0.062050 -0.006134 0.006573 0.041144 -0.171564 0.035741 0.019097 -0.234694 0.102822 -0.006954 -0.024876 -0.147405 0.024067
|
||||
0.026767 -0.181701 -0.059223 -0.081503 -0.097861 -0.059682 0.095176 -0.031342 0.168331 0.165196 -0.043727 0.059636 -0.007020 -0.002405 0.042686 -0.169025 -0.049614 0.019903 -0.233971 0.072414 0.072296 0.054873 0.146194 0.018985
|
||||
0.019428 -0.181307 -0.084402 -0.065649 -0.096576 -0.101837 0.054069 0.073217 0.154613 0.110317 -0.028598 0.051888 -0.008555 -0.043402 0.028212 -0.069800 -0.069257 -0.048068 -0.136040 0.035945 0.056853 0.030960 0.109225 0.035017
|
||||
0.010064 -0.175942 -0.090992 -0.024661 -0.091222 -0.122171 0.039151 0.124128 0.114402 0.024926 -0.006400 0.024007 -0.024184 -0.082393 0.014850 0.040333 -0.102544 -0.077394 -0.010188 -0.010614 0.027989 0.007890 0.089474 0.030854
|
||||
-0.000088 -0.168268 -0.075489 0.078533 -0.069718 -0.095585 0.077758 0.116529 0.043594 -0.108181 0.049478 -0.023579 -0.035863 -0.143648 -0.004009 0.110302 -0.181753 -0.020018 0.152575 -0.012880 -0.031069 0.005921 0.083920 0.007966
|
||||
0.000303 -0.163017 -0.044570 0.223475 -0.061573 -0.041807 0.187180 0.050632 -0.045851 -0.222654 0.109567 -0.054439 -0.057083 -0.211724 -0.038411 0.082331 -0.278800 0.131770 0.276012 0.031916 -0.100194 0.029624 0.091170 -0.050609
|
||||
0.051760 0.009814 0.124180 -0.028182 0.167985 -0.010920 -0.187859 -0.066584 0.132638 0.087357 -0.000635 0.115302 -0.129897 -0.002753 0.021796 -0.275762 -0.016521 0.093369 0.291072 0.266124 0.091270 -0.002682 0.004203 -0.408835
|
||||
0.069592 0.022340 0.176048 0.000511 0.066476 -0.004060 -0.158509 -0.148885 0.137068 0.020040 0.001487 0.099014 -0.201573 -0.004521 0.004133 -0.090121 -0.003724 0.008600 0.144731 0.116573 0.038887 0.030887 -0.006416 -0.208614
|
||||
0.086514 0.034269 0.228120 0.032208 -0.032565 0.001715 -0.132563 -0.232248 0.144765 -0.051312 0.001229 0.083422 -0.270941 -0.005987 -0.011569 0.094073 0.008026 -0.073257 -0.000307 -0.032488 -0.010769 0.064854 -0.015112 -0.008922
|
||||
0.103713 0.046693 0.281192 0.065708 -0.133400 0.006840 -0.106934 -0.313780 0.153934 -0.126247 -0.001131 0.068642 -0.340988 -0.005884 -0.035197 0.278498 0.019359 -0.149850 -0.146846 -0.180591 -0.057995 0.094039 -0.020694 0.188561
|
||||
0.107290 0.107291 0.075904 -0.066396 -0.091329 0.031634 -0.041196 0.045182 -0.048306 -0.067631 -0.008673 0.095721 0.147995 0.001302 -0.124149 0.017545 -0.009636 -0.037424 -0.003437 0.050152 0.003791 -0.212308 0.062173 -0.029743
|
||||
0.113725 0.093832 0.098857 -0.095333 -0.140128 0.020449 -0.043266 0.022029 -0.015646 -0.093435 -0.003323 0.089404 0.213953 0.002776 -0.140828 0.001928 -0.007389 0.030961 0.032791 0.087900 0.023262 -0.073810 0.023020 0.046717
|
||||
0.116067 0.085493 0.108752 -0.106261 -0.157436 0.006270 -0.055560 0.010898 0.005091 -0.104413 0.000135 0.086637 0.240119 0.004924 -0.133904 0.001894 -0.005766 0.050149 0.061238 0.103625 0.036166 -0.018672 0.005746 0.092861
|
||||
0.114129 0.094588 0.099022 -0.096799 -0.140692 -0.009616 -0.041993 0.021925 -0.015505 -0.094195 -0.000581 0.089715 0.213590 0.006497 -0.141378 0.001992 -0.004080 0.031770 0.032399 0.083739 0.033574 -0.077325 0.018579 0.047336
|
||||
0.107706 0.108389 0.076420 -0.068018 -0.092314 -0.025722 -0.038533 0.044227 -0.048742 -0.068489 0.004124 0.095815 0.147436 0.005377 -0.124322 0.017316 0.001500 -0.036277 -0.003589 0.041306 0.028987 -0.218020 0.052030 -0.027864
|
||||
0.021075 -0.061699 0.044785 0.054934 0.160887 0.105590 0.006119 -0.059284 -0.096162 0.017034 -0.013413 -0.047746 0.093876 0.070868 0.051776 0.060390 0.073348 -0.004413 -0.017285 -0.041193 -0.057196 0.016766 -0.089974 0.013964
|
||||
0.000566 -0.108307 0.039120 0.000595 0.212129 0.081625 -0.055977 -0.092907 -0.001599 -0.025075 -0.000796 -0.250824 0.159174 0.060991 -0.085016 0.100121 0.076776 -0.017621 -0.126803 -0.019434 -0.042586 0.013023 -0.084440 -0.073557
|
||||
0.004728 -0.094037 0.050088 -0.015559 0.202189 0.081127 -0.048503 -0.082521 -0.025175 -0.013916 0.009790 -0.203476 0.144441 0.047373 -0.091747 0.078963 0.065318 -0.019187 -0.131514 -0.000356 -0.028973 0.027383 -0.045994 -0.062522
|
||||
0.036544 -0.021301 0.079004 0.004288 0.123788 0.048902 0.021459 -0.024952 -0.144596 0.049707 0.002301 0.096121 0.029274 0.031194 0.018614 -0.001164 0.055624 0.008799 -0.048234 0.016083 -0.026506 0.020130 -0.019641 0.029235
|
||||
0.040662 0.005334 0.058748 0.038985 0.155976 0.067252 0.016266 0.016341 -0.178937 0.078551 -0.003891 0.196079 0.021670 0.041840 0.130013 -0.079759 0.044697 0.059931 0.029592 -0.003383 -0.042202 -0.002612 -0.041503 0.142530
|
||||
0.036321 -0.002710 0.048009 0.053270 0.158552 0.090876 0.026363 0.009405 -0.176808 0.074596 -0.006341 0.170349 0.041244 0.052134 0.142940 -0.053780 0.055031 0.045925 0.033286 -0.018257 -0.045071 0.001114 -0.054385 0.140177
|
||||
0.037107 -0.019811 0.080257 0.001698 0.121329 -0.054543 0.026442 -0.027060 -0.145809 0.048061 -0.013255 0.095686 0.029206 -0.026493 0.020739 0.005343 -0.054777 0.009840 -0.050111 -0.002654 0.031999 0.027429 0.009560 0.031306
|
||||
0.005338 -0.092159 0.052016 -0.019313 0.197770 -0.095517 -0.040939 -0.085941 -0.026978 -0.015799 -0.003747 -0.204368 0.144726 -0.038398 -0.088843 0.085859 -0.058638 -0.017713 -0.133151 -0.018160 0.028466 0.046749 0.030509 -0.059219
|
||||
0.001269 -0.106010 0.041480 -0.003485 0.208432 -0.097705 -0.047985 -0.096850 -0.003676 -0.027406 0.009933 -0.251880 0.160196 -0.051456 -0.082076 0.108035 -0.068171 -0.014969 -0.128459 -0.040669 0.027766 0.052025 0.072297 -0.068702
|
||||
0.021917 -0.058963 0.047688 0.050001 0.156292 -0.115845 0.015715 -0.064136 -0.098739 0.014539 0.011422 -0.049071 0.095376 -0.063594 0.055202 0.067948 -0.067973 -0.001179 -0.018618 -0.064793 0.021240 0.057766 0.075304 0.020151
|
||||
0.037081 -0.000389 0.050415 0.048954 0.154353 -0.100674 0.034701 0.005422 -0.178809 0.072243 -0.006892 0.168929 0.042180 -0.047468 0.145839 -0.047358 -0.061246 0.048186 0.031794 -0.038594 0.021257 0.026704 0.048958 0.145316
|
||||
0.041321 0.007238 0.060557 0.035574 0.152741 -0.077242 0.022817 0.013342 -0.180567 0.076531 -0.010939 0.195224 0.022261 -0.038193 0.132420 -0.074116 -0.053043 0.061780 0.028192 -0.025446 0.027559 0.017187 0.038533 0.146615
|
||||
-0.008608 0.139928 -0.165726 0.019265 0.107354 0.053069 0.134201 -0.025768 -0.019242 0.051248 -0.003843 -0.018606 -0.033981 -0.026008 -0.037507 -0.003917 0.020300 -0.348075 0.083854 0.194618 -0.033770 0.031775 -0.020322 0.048880
|
||||
0.065974 0.159841 -0.116613 -0.015936 0.017826 0.025887 0.053362 0.047682 -0.043986 0.043981 -0.004988 -0.122833 -0.081246 -0.019647 0.090258 0.006277 0.022327 -0.043979 -0.002374 0.118418 -0.031262 0.005206 -0.022271 0.030471
|
||||
0.121060 0.146795 -0.049039 -0.047213 -0.085538 0.019092 -0.026017 0.091907 -0.048204 0.031425 0.001660 -0.173929 -0.096729 -0.007540 0.158115 0.036645 0.015751 0.184181 -0.058520 0.023155 -0.022803 0.035205 -0.019099 -0.010358
|
||||
0.144633 0.148416 -0.045185 -0.037883 -0.100856 0.003832 -0.050526 0.097384 -0.035440 0.024961 0.006683 -0.179670 -0.096413 0.000115 0.207844 0.025215 0.004627 0.193394 -0.048757 -0.029672 -0.007457 0.073006 -0.019618 -0.037156
|
||||
0.121676 0.147819 -0.048662 -0.048811 -0.086131 -0.014394 -0.024542 0.091577 -0.048031 0.030487 0.004271 -0.173617 -0.097380 0.008014 0.157887 0.038735 -0.008933 0.185889 -0.058490 0.005281 0.033574 0.037776 0.001551 -0.008432
|
||||
0.066713 0.161493 -0.115911 -0.017655 0.017386 -0.025939 0.056130 0.046460 -0.044150 0.042595 0.010142 -0.122458 -0.082363 0.016867 0.090474 0.009783 -0.017509 -0.041090 -0.001486 0.078056 0.099052 0.011002 0.018775 0.035575
|
||||
-0.007919 0.141969 -0.164759 0.016847 0.105618 -0.056715 0.139476 -0.028351 -0.019507 0.049370 0.007443 -0.018533 -0.035117 0.019770 -0.036919 0.000160 -0.017686 -0.345003 0.085695 0.138424 0.149596 0.031630 0.002508 0.058001
|
||||
-0.126672 0.095819 -0.025587 -0.020411 0.069696 -0.040492 0.113408 0.031462 0.034512 0.033831 0.003643 0.030528 -0.016233 0.011701 -0.096173 -0.076485 -0.029675 -0.146836 0.048508 -0.081728 0.061181 0.021189 -0.014489 -0.041443
|
||||
-0.250166 0.063268 0.083495 -0.054436 0.057312 -0.020981 0.104096 0.069033 0.091832 0.000402 0.000251 0.071967 0.008207 0.011854 -0.127690 -0.088722 -0.024205 0.035454 -0.007640 -0.213727 -0.029662 -0.007746 -0.007841 -0.085785
|
||||
-0.296019 0.054209 0.115310 -0.058454 0.064304 0.001047 0.095382 0.064413 0.123368 -0.010259 0.003756 0.077430 0.018650 -0.004453 -0.087455 -0.086370 -0.006771 0.088859 -0.019705 -0.235003 -0.076648 -0.015417 -0.003684 -0.080678
|
||||
-0.250897 0.062205 0.083305 -0.052857 0.058055 0.020288 0.102304 0.069589 0.091555 0.001374 0.003939 0.071836 0.009119 -0.017922 -0.127756 -0.091187 0.008043 0.034653 -0.008149 -0.191606 -0.101632 -0.002118 0.002213 -0.089753
|
||||
-0.127456 0.094161 -0.026167 -0.018422 0.070970 0.038881 0.109672 0.033189 0.034640 0.035192 0.000349 0.030582 -0.015196 -0.018359 -0.096808 -0.080649 0.018355 -0.148838 0.047059 -0.033800 -0.095213 0.028324 -0.005043 -0.049366
|
||||
0.104320 0.170913 -0.080998 -0.021930 -0.032618 0.025131 0.010807 0.067423 -0.036997 0.049471 0.000150 -0.100902 -0.019091 -0.013763 -0.036516 -0.100919 0.007221 -0.037267 0.000956 -0.160019 -0.086714 0.062270 -0.011209 -0.137345
|
||||
0.143379 0.165763 -0.060587 -0.016963 -0.064358 0.001114 -0.036866 0.078487 -0.030741 0.055788 0.002031 -0.111664 -0.000937 0.002077 0.032607 -0.119558 -0.009742 -0.005622 0.036313 -0.263615 -0.087856 0.081412 -0.018026 -0.161589
|
||||
0.105016 0.172060 -0.080874 -0.023381 -0.033528 -0.024000 0.012838 0.066580 -0.036656 0.048317 0.005201 -0.100551 -0.019986 0.012979 -0.036877 -0.099109 -0.022128 -0.035904 0.001548 -0.181239 -0.020639 0.057382 -0.019235 -0.134122
|
||||
-0.205688 0.049919 0.069362 -0.072378 0.066333 -0.021241 0.053205 0.075885 0.026935 -0.034539 0.005560 -0.038648 -0.097809 0.004207 0.045232 0.071028 -0.010315 0.132832 -0.022870 0.175336 0.099108 -0.028911 0.005928 0.109927
|
||||
-0.284218 0.019124 0.108561 -0.081450 0.085386 -0.000675 0.040237 0.073037 0.081899 -0.060470 -0.000721 -0.041618 -0.087622 -0.002093 0.131858 0.123116 0.007698 0.217754 -0.028622 0.209344 0.066249 -0.010693 -0.000377 0.151209
|
||||
-0.206346 0.048788 0.069346 -0.071231 0.066851 0.018814 0.051430 0.076716 0.026560 -0.033380 -0.000869 -0.038955 -0.097134 -0.010612 0.045537 0.068786 0.024196 0.131653 -0.023820 0.199350 0.022820 -0.026651 0.003549 0.105548
|
||||
0.027947 -0.014519 0.027372 0.027090 -0.014515 -0.118449 0.014779 -0.000186 -0.033562 0.024932 -0.042037 -0.006816 -0.010401 -0.004006 -0.030938 -0.006596 -0.003303 0.025464 0.010620 0.011829 -0.009179 -0.028271 -0.011793 -0.040691
|
||||
0.025583 -0.022643 0.008028 0.037660 -0.003753 -0.102792 0.026260 -0.003599 -0.046445 0.055353 -0.023960 0.025986 -0.025782 -0.031085 0.011987 -0.036466 -0.000804 -0.031755 -0.013448 -0.019920 0.026063 -0.033933 -0.018291 -0.017150
|
||||
0.030716 -0.019142 0.015104 0.048347 0.008061 -0.057675 0.034305 0.001908 -0.032288 0.037098 -0.025976 0.008895 -0.034955 -0.015408 -0.033946 -0.012214 0.004252 -0.031221 0.011547 -0.052185 0.013885 0.023495 -0.041625 -0.018600
|
||||
0.017217 -0.005005 -0.000825 0.038309 -0.033691 -0.049929 0.035044 0.006099 -0.026296 -0.013340 -0.050572 0.018098 0.017580 0.008349 -0.037259 -0.027207 0.030530 0.047757 -0.042520 -0.027716 0.002119 0.011201 -0.013641 -0.023775
|
||||
-0.002977 -0.004519 -0.005401 0.007102 -0.020136 0.003735 0.010466 0.031329 -0.051627 0.040202 -0.038370 -0.038383 0.005010 0.005392 -0.023086 0.029715 0.008523 0.055107 -0.010657 0.004865 -0.007244 0.004499 -0.049346 0.002795
|
||||
-0.010096 0.001042 0.000059 0.011824 -0.026881 0.025272 0.041191 0.020275 -0.012375 0.000779 -0.033145 0.002458 -0.007586 0.007785 -0.013394 0.017417 0.014159 -0.005881 -0.014046 -0.035161 0.017520 -0.016349 -0.017341 -0.009992
|
||||
0.019776 -0.000284 -0.012557 0.024116 -0.012968 0.027447 0.037641 -0.003555 0.001294 -0.019848 -0.025716 -0.009227 0.017166 0.003187 -0.003003 -0.013572 0.026600 -0.056937 -0.027703 -0.029589 0.036149 -0.017405 -0.005140 -0.014149
|
||||
0.003043 0.002636 -0.007479 0.021282 0.022862 0.026962 0.012532 0.017070 -0.011934 -0.027974 -0.031975 -0.014367 0.009348 0.001082 -0.006111 0.023087 0.042828 -0.015792 -0.000859 -0.010996 0.010873 0.003257 0.004459 0.021874
|
||||
0.003741 0.016913 -0.001204 -0.017634 -0.016073 0.036588 0.022771 0.035887 0.015065 -0.020207 -0.010793 -0.000589 0.017515 -0.028759 -0.005286 0.026388 0.045892 -0.023168 0.012083 0.037597 0.001342 0.008441 -0.027373 0.056358
|
||||
0.006235 0.002336 -0.014149 0.012474 0.020734 -0.047581 0.012805 0.019052 -0.009281 -0.032040 0.036141 -0.011251 0.009517 0.003028 -0.009339 0.025537 -0.049242 -0.015047 0.003361 0.000855 -0.014520 -0.007131 0.002813 0.033132
|
||||
0.021965 -0.001009 -0.015741 0.019932 -0.015721 -0.035531 0.037303 -0.002551 0.003316 -0.020857 0.030647 -0.007770 0.014825 -0.001334 -0.003715 -0.011282 -0.035968 -0.059442 -0.024581 0.000602 -0.041615 -0.016669 0.018792 -0.007243
|
||||
-0.008996 -0.000549 -0.001255 0.012659 -0.029948 -0.024644 0.041247 0.020575 -0.011513 0.000978 0.039860 0.002944 -0.010132 -0.008154 -0.011641 0.019826 -0.019156 -0.010840 -0.011057 -0.014449 -0.029046 -0.005862 0.026793 -0.003840
|
||||
-0.003031 -0.007050 -0.005025 0.013159 -0.022487 -0.002090 0.009577 0.031278 -0.051872 0.041429 0.045054 -0.039255 0.000680 -0.006840 -0.019272 0.033989 -0.010335 0.047850 -0.007969 0.005201 0.010707 0.029877 0.041337 0.009335
|
||||
0.015909 -0.008608 0.000093 0.050988 -0.032880 0.056455 0.031226 0.007102 -0.028123 -0.011172 0.054224 0.016291 0.012312 -0.014037 -0.034003 -0.018029 -0.036754 0.038949 -0.041639 -0.016244 -0.014604 0.017632 -0.000615 -0.021925
|
||||
0.028911 -0.020564 0.020663 0.057528 0.009832 0.063863 0.029412 0.000800 -0.032018 0.041995 0.029433 0.006736 -0.041794 0.006742 -0.027211 -0.007554 -0.006106 -0.039369 0.009745 -0.029838 -0.038610 0.047533 0.015143 -0.020532
|
||||
0.023922 -0.023664 0.013385 0.048180 0.001429 0.109536 0.018845 -0.002766 -0.046065 0.060109 0.023434 0.024933 -0.030917 0.021761 0.016546 -0.033368 -0.000156 -0.037766 -0.016409 0.003256 -0.032063 -0.016957 0.022986 -0.020118
|
||||
0.025479 -0.016511 0.031827 0.041393 -0.008403 0.126612 0.007288 0.001233 -0.035336 0.029687 0.040922 -0.007776 -0.013868 -0.005459 -0.027831 -0.001951 0.005347 0.020879 0.007693 0.007357 0.014333 -0.017694 0.012221 -0.043917
|
||||
0.003189 0.026538 0.038597 0.018669 0.012396 -0.003064 -0.024561 -0.002817 0.002622 -0.000148 0.017020 0.022609 -0.004868 0.005963 0.023348 -0.028816 -0.032968 0.005698 -0.055577 0.020448 0.022584 0.024768 0.047859 0.001740
|
||||
0.000463 0.019463 0.024278 -0.029989 0.006319 -0.005217 -0.013751 0.006275 0.010675 0.049628 0.022928 0.001551 0.009671 -0.024186 0.018628 0.002273 -0.059675 0.026343 -0.063193 0.033560 0.003140 0.020410 0.001326 0.016946
|
||||
-0.000029 0.020658 0.017072 -0.027930 0.007062 -0.017970 -0.006258 -0.005185 0.004557 0.025434 0.022254 0.009542 0.011170 -0.004279 0.003404 0.005904 -0.053521 0.031271 -0.024813 0.038249 0.003914 0.022292 -0.001279 0.009964
|
||||
0.003294 0.025279 0.021921 -0.021950 0.016281 -0.028948 -0.013909 -0.011395 -0.011052 -0.005952 0.021898 0.008144 0.008008 0.011346 -0.002342 0.008934 -0.040320 0.014749 0.007476 0.035145 0.004734 0.013044 0.014277 0.001855
|
||||
0.008232 0.024965 0.029824 -0.004269 0.019641 -0.043536 -0.030921 -0.012568 -0.008932 -0.041767 0.012154 0.000783 -0.004622 0.026748 0.019253 0.007897 -0.032740 -0.002789 0.020610 0.019074 0.013518 -0.004310 0.037242 -0.007354
|
||||
0.010354 0.027336 0.023251 -0.015652 0.025251 0.036485 -0.033761 -0.007033 -0.005841 -0.047403 -0.018907 0.006933 0.007124 -0.023824 0.008963 -0.000927 0.036586 0.012466 0.022380 0.013472 -0.003293 -0.035777 -0.027321 -0.012812
|
||||
0.004813 0.027050 0.017023 -0.030956 0.020204 0.026792 -0.015645 -0.007011 -0.008335 -0.010363 -0.029818 0.012299 0.017923 -0.008348 -0.009660 -0.000164 0.045111 0.026769 0.008711 0.022239 0.011627 -0.005512 -0.015953 -0.003064
|
||||
0.000766 0.021777 0.013939 -0.033866 0.009609 0.020486 -0.007124 -0.001779 0.006741 0.022061 -0.029365 0.012405 0.019471 0.006576 -0.002112 -0.003785 0.059780 0.041340 -0.023649 0.024858 0.014139 0.012995 -0.007512 0.005546
|
||||
0.000800 0.019861 0.022529 -0.033132 0.007255 0.008814 -0.013329 0.008770 0.012220 0.047301 -0.028907 0.002944 0.016370 0.025151 0.014628 -0.007662 0.068549 0.034586 -0.062338 0.021464 0.011355 0.013293 -0.010281 0.012728
|
||||
0.003226 0.025832 0.036768 0.019095 0.012590 0.005204 -0.023585 -0.000822 0.003035 -0.000961 -0.021105 0.022764 0.001173 -0.008868 0.020613 -0.035042 0.039000 0.010165 -0.056131 0.022830 -0.009516 -0.002921 -0.057879 -0.004983
|
||||
0.001974 0.012342 0.008902 -0.001035 -0.014319 -0.014808 -0.005193 -0.002275 0.011929 -0.018206 0.019006 0.009941 0.038655 0.005308 0.018479 0.030950 -0.036894 -0.017445 0.032165 -0.091752 0.005902 -0.021467 0.001966 -0.009808
|
||||
-0.004216 0.004994 -0.016522 -0.029024 0.004502 0.007034 0.013316 0.000795 -0.012080 0.017514 0.036897 -0.007448 0.020571 -0.003682 0.025414 0.004514 -0.022096 -0.025326 0.029382 -0.047762 -0.017414 0.005327 -0.013496 0.012521
|
||||
-0.018832 -0.002126 -0.034571 -0.026174 0.029176 0.018516 0.009517 0.001888 -0.008390 0.017435 0.027928 -0.015628 0.017215 -0.007801 0.016744 -0.024043 -0.014056 0.003402 0.019569 -0.013736 -0.015438 0.013625 -0.001066 0.006400
|
||||
-0.028577 -0.003106 -0.035186 0.006865 0.039718 0.022501 -0.012257 -0.000103 0.031018 -0.027697 -0.003165 -0.006225 0.003329 0.006415 -0.021346 -0.045658 -0.008373 0.037353 0.024522 0.008467 0.007100 0.009292 0.037864 -0.028296
|
||||
-0.010439 -0.016610 -0.032891 0.026755 0.038727 0.019621 -0.001639 -0.035212 0.021531 0.043250 -0.011573 -0.047443 -0.090454 0.017415 0.029955 -0.029753 -0.016593 0.003223 0.000312 -0.050431 -0.067987 0.007906 -0.010813 0.001715
|
||||
-0.018644 -0.010357 -0.028056 -0.000468 0.032730 0.008727 -0.000374 -0.021592 0.004388 0.003520 0.005170 -0.016747 -0.034045 0.003118 0.010412 -0.016663 0.001397 -0.000975 0.017040 -0.015979 -0.012284 -0.003719 0.001706 -0.008266
|
||||
-0.024274 -0.003386 -0.017513 -0.036282 0.010578 0.005796 0.000745 -0.000204 -0.000937 -0.029211 0.016619 0.019301 0.020013 -0.001295 -0.025894 0.021730 0.004179 0.018245 0.011996 0.012299 0.036561 -0.004257 0.003014 0.001946
|
||||
-0.014891 -0.008411 -0.036029 -0.015021 0.034034 -0.016902 -0.000349 -0.017533 0.008122 -0.003290 -0.006353 -0.011299 -0.025911 0.001618 0.002149 -0.022488 -0.009485 0.009421 0.019984 -0.027222 0.008181 -0.016199 0.011061 -0.004347
|
||||
-0.007543 -0.015474 -0.040189 0.015384 0.038625 -0.027574 -0.000491 -0.031990 0.023963 0.036850 0.011293 -0.042285 -0.082698 -0.016002 0.021122 -0.035817 0.009754 0.014281 0.004730 -0.087198 0.033671 -0.000797 0.015535 0.007350
|
||||
0.007930 0.023774 0.036743 -0.014038 -0.036477 -0.006774 0.011374 0.022541 -0.022535 0.042305 0.026335 0.036750 0.021916 0.007611 -0.005653 -0.015182 -0.047638 -0.014341 0.054505 -0.043196 0.023290 -0.056673 -0.015723 -0.007742
|
||||
-0.004915 0.015933 0.017345 -0.015680 -0.017967 -0.004915 0.011614 0.016946 0.029827 0.028125 0.011868 -0.004711 -0.005276 -0.001960 -0.009475 0.009741 -0.046956 0.011883 0.007010 -0.018339 -0.002865 -0.004312 -0.013932 -0.006942
|
||||
-0.003521 0.004654 -0.015590 -0.013889 0.004395 -0.008732 0.007700 -0.004791 0.067569 -0.003581 0.005624 -0.039265 -0.012761 -0.005771 -0.015968 0.006255 -0.033275 -0.009690 -0.041270 0.014617 -0.023760 0.048481 -0.000051 -0.022552
|
||||
0.000458 0.005431 -0.018104 -0.000980 0.011696 -0.042760 -0.002120 -0.006394 0.021871 -0.044384 0.017191 0.023310 -0.058988 -0.023639 0.049743 -0.022378 0.011228 -0.017895 -0.021546 0.054782 -0.005029 0.079345 0.046779 0.008618
|
||||
-0.000473 -0.003483 -0.000317 -0.000032 -0.013100 -0.027729 -0.010188 -0.006816 0.024254 -0.025317 0.025138 -0.003575 -0.027818 -0.016550 0.033888 -0.020679 -0.003022 -0.026972 0.004090 0.018494 0.016316 0.039096 0.026870 -0.021700
|
||||
-0.000384 0.006795 0.002821 0.001952 -0.016537 -0.017312 -0.006126 -0.003212 0.008095 0.008878 0.020727 0.007447 -0.003471 -0.004029 0.015335 -0.019076 -0.026372 -0.019477 0.020929 -0.007807 0.020166 -0.006328 0.008143 -0.024274
|
||||
0.002071 0.006466 -0.023492 -0.007974 0.015683 0.036241 -0.004189 -0.003038 0.024520 -0.048551 -0.020624 0.025659 -0.052563 0.020100 0.044440 -0.026317 -0.003525 -0.010101 -0.021391 0.035466 0.034123 0.038210 -0.080648 0.006556
|
||||
-0.002402 0.005398 -0.019679 -0.020452 0.005835 0.006932 0.007837 -0.002300 0.069086 -0.006995 -0.007547 -0.036643 -0.005613 0.005258 -0.021338 -0.000437 0.040201 0.000064 -0.039798 -0.010442 0.026457 0.034652 -0.023664 -0.024801
|
||||
-0.003825 0.016801 0.014153 -0.022104 -0.016761 0.006166 0.011717 0.019730 0.031672 0.024962 -0.015533 -0.002099 0.002298 0.002520 -0.014551 0.001733 0.051460 0.020880 0.007994 -0.024340 -0.011603 -0.003149 0.015724 -0.010346
|
||||
0.009293 0.025153 0.034610 -0.021073 -0.035140 0.010154 0.010695 0.025885 -0.019886 0.039855 -0.033031 0.039020 0.029686 -0.005553 -0.011205 -0.023293 0.045845 -0.005817 0.054993 -0.027600 -0.047966 -0.047026 0.044749 -0.012053
|
||||
0.001196 0.008104 -0.000550 -0.005319 -0.014490 0.017382 -0.007183 0.000135 0.010803 0.005711 -0.024559 0.010185 0.003984 0.004249 0.009649 -0.025892 0.026929 -0.011175 0.020838 -0.000927 -0.021532 -0.016130 -0.001564 -0.027490
|
||||
0.001089 -0.002355 -0.004514 -0.007150 -0.010291 0.025128 -0.011639 -0.003498 0.027026 -0.029088 -0.027714 -0.001506 -0.021034 0.014769 0.028630 -0.025597 0.007130 -0.019412 0.003749 0.018448 -0.003519 0.014143 -0.041945 -0.024701
|
||||
-0.014752 -0.007321 0.013877 -0.002884 -0.024108 0.002303 -0.026512 0.020183 -0.015461 -0.030184 -0.006267 0.026068 0.044433 -0.016670 0.030474 0.011557 0.027340 0.038203 -0.005839 0.006177 0.027136 0.000316 0.019255 0.000552
|
||||
-0.016868 -0.029752 0.004473 -0.020764 -0.021435 0.027753 -0.000612 0.014380 -0.000613 0.002858 0.009706 -0.012946 0.027082 -0.001179 0.028245 0.039466 0.007036 0.048356 -0.016487 -0.020655 0.013899 0.035330 -0.037198 0.035797
|
||||
-0.020584 -0.025552 0.009208 -0.010018 -0.001434 0.030711 0.008708 -0.025079 0.018193 -0.004982 0.019040 -0.024397 0.011371 0.005362 0.014803 0.024863 0.004075 -0.004452 0.006803 -0.034515 0.013896 0.032023 -0.042288 0.024877
|
||||
-0.007172 -0.030730 -0.013156 0.002041 0.013713 0.018878 -0.002702 -0.036444 -0.004836 -0.014642 0.027953 -0.026214 0.006306 -0.001831 0.021305 -0.037591 0.021327 -0.044823 0.050411 -0.036042 0.008177 0.014946 -0.015597 -0.013038
|
||||
-0.017181 -0.023576 0.003032 -0.025203 -0.001919 -0.036232 0.009083 -0.022286 0.022601 -0.010671 -0.019965 -0.020816 0.016805 0.003686 0.009380 0.019998 -0.010796 0.002809 0.008988 -0.025250 -0.024145 0.039778 0.034154 0.028887
|
||||
-0.014072 -0.028410 -0.000822 -0.033387 -0.022436 -0.033373 -0.000370 0.016901 0.003275 -0.002184 -0.011401 -0.010105 0.031211 0.010900 0.023483 0.035466 -0.010793 0.053986 -0.014690 -0.011441 -0.019274 0.041437 0.027803 0.041183
|
||||
-0.011745 -0.006901 0.008132 -0.012160 -0.024078 -0.016750 -0.027255 0.023374 -0.012432 -0.034736 0.001738 0.028410 0.047103 0.022609 0.024876 0.009700 -0.029335 0.042447 -0.004444 0.021981 -0.024490 -0.016464 -0.009335 0.005723
|
||||
0.013230 -0.012106 -0.008624 0.006624 -0.005346 -0.037879 -0.023831 0.001700 -0.000246 -0.040315 0.016171 -0.000787 0.040941 -0.027783 -0.034923 0.000944 -0.023770 -0.015075 0.022171 0.070943 0.004816 -0.018620 0.009349 0.002391
|
||||
0.017298 0.003296 0.001998 -0.019318 -0.015374 -0.040235 -0.032471 -0.025113 0.013205 -0.017560 -0.001988 -0.032933 0.018524 0.000793 -0.025342 0.018806 -0.022045 -0.076625 0.010375 0.067569 0.001278 -0.039650 0.037479 0.024286
|
||||
0.026498 0.011392 -0.000362 -0.042663 -0.029473 0.013919 -0.020835 -0.008795 -0.003774 0.015161 0.010246 -0.040784 0.002108 -0.020452 -0.003005 0.013382 0.020127 -0.090119 -0.018840 0.070445 0.011233 -0.062439 -0.022872 0.049291
|
||||
0.012895 0.001538 0.008666 -0.004337 -0.015103 0.016876 -0.032324 -0.028284 0.009022 -0.012605 0.000615 -0.037415 0.014420 0.004265 -0.019532 0.022244 0.021863 -0.083473 0.007511 0.058851 0.031450 -0.040010 -0.012168 0.018051
|
||||
0.009878 -0.013189 -0.002893 0.018337 -0.004429 0.018560 -0.023726 -0.000676 -0.003245 -0.036619 -0.018168 -0.004958 0.037474 0.033212 -0.029730 0.002752 0.018367 -0.021047 0.019956 0.061906 0.033994 -0.009713 0.002178 -0.002604
|
||||
-0.036455 -0.002450 -0.017922 -0.006097 0.022282 0.049421 -0.011847 -0.011101 -0.004510 -0.004388 0.004868 0.031117 0.011638 -0.001303 -0.008047 0.039302 0.009212 0.001789 0.021092 -0.019512 -0.016745 0.011997 0.019345 0.006406
|
||||
0.000423 -0.007319 -0.057292 0.016176 0.031459 0.007973 -0.009437 -0.016403 0.000322 -0.021840 -0.027543 0.040725 0.012995 0.020647 -0.022551 -0.047601 0.009484 -0.033190 0.025752 -0.014163 -0.038431 -0.002155 0.032958 -0.015735
|
||||
-0.032717 -0.001468 -0.026128 -0.020284 0.020055 -0.061717 -0.009130 -0.007757 -0.001532 -0.011309 -0.008803 0.034833 0.018287 0.008545 -0.015543 0.035259 -0.013646 0.010114 0.025166 -0.029561 0.005239 -0.010502 -0.011794 0.015129
|
||||
-0.015427 -0.008969 -0.007325 0.001209 0.003884 -0.065420 -0.014325 0.005239 0.003813 -0.021528 0.005891 0.012697 -0.008585 -0.000428 -0.006703 0.035593 -0.008463 0.035073 0.011345 0.016756 -0.004326 -0.043418 0.005114 0.014734
|
||||
-0.016076 0.002727 -0.004804 0.030704 0.014818 0.010395 -0.028232 0.002524 0.026332 0.003282 -0.053346 0.020559 -0.065119 0.005758 -0.005391 0.059666 -0.008954 0.058529 0.015960 0.017843 -0.028979 -0.059673 -0.002070 0.043925
|
||||
-0.018698 -0.009367 0.000474 0.013333 0.006238 0.041309 -0.016795 0.002286 0.001409 -0.014775 -0.007769 0.009326 -0.013965 0.003250 0.000451 0.038211 0.013024 0.027985 0.007332 0.016022 0.004588 -0.027147 0.015319 0.006226
|
||||
# The variances of the components (eigenvalues) of identity or combined identity and expression model
|
||||
1
|
||||
24
|
||||
6
|
||||
634.723159 232.843080 101.461353 70.892811 62.146634 61.253370 53.263370 41.886102 37.065997 30.948553 27.957356 24.039353 20.687504 19.610950 14.875372 14.281810 13.358140 11.883143 10.946700 9.934766 9.739733 7.997896 7.885247 6.790098
|
|
@ -1,518 +0,0 @@
|
|||
# Number of triangulations
|
||||
7
|
||||
# Triangulation 1
|
||||
# triangulation
|
||||
91
|
||||
3
|
||||
4
|
||||
23 20 21
|
||||
23 21 22
|
||||
36 0 1
|
||||
15 16 45
|
||||
17 0 36
|
||||
16 26 45
|
||||
18 17 37
|
||||
26 25 44
|
||||
37 17 36
|
||||
45 26 44
|
||||
19 18 38
|
||||
25 24 43
|
||||
38 18 37
|
||||
44 25 43
|
||||
20 19 38
|
||||
24 23 43
|
||||
21 20 39
|
||||
23 22 42
|
||||
39 20 38
|
||||
43 23 42
|
||||
22 21 27
|
||||
27 21 39
|
||||
22 27 42
|
||||
27 28 42
|
||||
28 27 39
|
||||
42 28 47
|
||||
28 39 40
|
||||
36 1 41
|
||||
46 15 45
|
||||
41 1 2
|
||||
14 15 46
|
||||
29 28 40
|
||||
28 29 47
|
||||
41 2 40
|
||||
47 14 46
|
||||
2 29 40
|
||||
29 14 47
|
||||
29 2 3
|
||||
13 14 29
|
||||
30 29 31
|
||||
35 29 30
|
||||
29 3 31
|
||||
35 13 29
|
||||
33 30 32
|
||||
34 30 33
|
||||
32 30 31
|
||||
35 30 34
|
||||
31 3 4
|
||||
12 13 35
|
||||
4 5 48
|
||||
11 12 54
|
||||
5 6 48
|
||||
10 11 54
|
||||
48 6 59
|
||||
55 10 54
|
||||
6 7 59
|
||||
9 10 55
|
||||
59 7 58
|
||||
56 9 55
|
||||
58 8 57
|
||||
57 8 56
|
||||
7 8 58
|
||||
8 9 56
|
||||
31 4 48
|
||||
54 12 35
|
||||
49 31 48
|
||||
54 35 53
|
||||
50 31 49
|
||||
53 35 52
|
||||
32 31 50
|
||||
35 34 52
|
||||
33 32 50
|
||||
34 33 52
|
||||
51 33 50
|
||||
52 33 51
|
||||
49 48 60
|
||||
50 49 60
|
||||
61 50 60
|
||||
51 50 61
|
||||
52 51 61
|
||||
61 62 52
|
||||
53 52 62
|
||||
54 53 62
|
||||
55 54 63
|
||||
56 55 63
|
||||
64 56 63
|
||||
57 56 64
|
||||
64 65 57
|
||||
58 57 65
|
||||
59 58 65
|
||||
65 48 59
|
||||
# Triangulation 2
|
||||
# triangulation
|
||||
90
|
||||
3
|
||||
4
|
||||
23 20 21
|
||||
23 21 22
|
||||
36 0 1
|
||||
15 16 45
|
||||
17 0 36
|
||||
16 26 45
|
||||
18 17 37
|
||||
26 25 44
|
||||
37 17 36
|
||||
45 26 44
|
||||
19 18 38
|
||||
25 24 43
|
||||
38 18 37
|
||||
44 25 43
|
||||
20 19 38
|
||||
24 23 43
|
||||
21 20 39
|
||||
23 22 42
|
||||
39 20 38
|
||||
43 23 42
|
||||
22 21 27
|
||||
27 21 39
|
||||
22 27 42
|
||||
27 28 42
|
||||
28 27 39
|
||||
42 28 47
|
||||
28 39 40
|
||||
36 1 41
|
||||
46 15 45
|
||||
41 1 2
|
||||
14 15 46
|
||||
29 28 40
|
||||
28 29 47
|
||||
41 2 40
|
||||
47 14 46
|
||||
2 29 40
|
||||
29 14 47
|
||||
29 2 3
|
||||
13 14 29
|
||||
30 29 31
|
||||
29 3 31
|
||||
35 13 29
|
||||
33 30 32
|
||||
34 30 33
|
||||
32 30 31
|
||||
35 30 34
|
||||
31 3 4
|
||||
12 13 35
|
||||
4 5 48
|
||||
11 12 54
|
||||
5 6 48
|
||||
10 11 54
|
||||
48 6 59
|
||||
55 10 54
|
||||
6 7 59
|
||||
9 10 55
|
||||
59 7 58
|
||||
56 9 55
|
||||
58 8 57
|
||||
57 8 56
|
||||
7 8 58
|
||||
8 9 56
|
||||
31 4 48
|
||||
54 12 35
|
||||
49 31 48
|
||||
54 35 53
|
||||
50 31 49
|
||||
53 35 52
|
||||
32 31 50
|
||||
35 34 52
|
||||
33 32 50
|
||||
34 33 52
|
||||
51 33 50
|
||||
52 33 51
|
||||
49 48 60
|
||||
50 49 60
|
||||
61 50 60
|
||||
51 50 61
|
||||
52 51 61
|
||||
61 62 52
|
||||
53 52 62
|
||||
54 53 62
|
||||
55 54 63
|
||||
56 55 63
|
||||
64 56 63
|
||||
57 56 64
|
||||
64 65 57
|
||||
58 57 65
|
||||
59 58 65
|
||||
65 48 59
|
||||
# Triangulation 3
|
||||
# triangulation
|
||||
77
|
||||
3
|
||||
4
|
||||
23 20 21
|
||||
23 21 22
|
||||
36 0 1
|
||||
17 0 36
|
||||
18 17 37
|
||||
26 25 44
|
||||
37 17 36
|
||||
45 26 44
|
||||
19 18 38
|
||||
25 24 43
|
||||
38 18 37
|
||||
44 25 43
|
||||
20 19 38
|
||||
24 23 43
|
||||
21 20 39
|
||||
23 22 42
|
||||
39 20 38
|
||||
43 23 42
|
||||
22 21 27
|
||||
27 21 39
|
||||
22 27 42
|
||||
27 28 42
|
||||
28 27 39
|
||||
42 28 47
|
||||
28 39 40
|
||||
36 1 41
|
||||
46 15 45
|
||||
41 1 2
|
||||
29 28 40
|
||||
28 29 47
|
||||
41 2 40
|
||||
47 14 46
|
||||
2 29 40
|
||||
29 2 3
|
||||
30 29 31
|
||||
29 3 31
|
||||
33 30 32
|
||||
34 30 33
|
||||
32 30 31
|
||||
35 30 34
|
||||
31 3 4
|
||||
4 5 48
|
||||
5 6 48
|
||||
48 6 59
|
||||
6 7 59
|
||||
59 7 58
|
||||
56 9 55
|
||||
58 8 57
|
||||
57 8 56
|
||||
7 8 58
|
||||
8 9 56
|
||||
31 4 48
|
||||
49 31 48
|
||||
50 31 49
|
||||
53 35 52
|
||||
32 31 50
|
||||
35 34 52
|
||||
33 32 50
|
||||
34 33 52
|
||||
51 33 50
|
||||
52 33 51
|
||||
49 48 60
|
||||
50 49 60
|
||||
61 50 60
|
||||
51 50 61
|
||||
52 51 61
|
||||
61 62 52
|
||||
53 52 62
|
||||
54 53 62
|
||||
55 54 63
|
||||
56 55 63
|
||||
64 56 63
|
||||
57 56 64
|
||||
64 65 57
|
||||
58 57 65
|
||||
59 58 65
|
||||
65 48 59
|
||||
# Triangulation 4
|
||||
# triangulation
|
||||
28
|
||||
3
|
||||
4
|
||||
36 0 1
|
||||
17 0 36
|
||||
18 17 37
|
||||
37 17 36
|
||||
19 18 38
|
||||
38 18 37
|
||||
20 19 38
|
||||
36 1 41
|
||||
41 1 2
|
||||
29 28 40
|
||||
41 2 40
|
||||
2 29 40
|
||||
29 2 3
|
||||
30 29 31
|
||||
29 3 31
|
||||
31 3 4
|
||||
4 5 48
|
||||
5 6 48
|
||||
48 6 59
|
||||
6 7 59
|
||||
59 7 58
|
||||
58 8 57
|
||||
7 8 58
|
||||
31 4 48
|
||||
49 31 48
|
||||
50 31 49
|
||||
51 33 50
|
||||
51 50 61
|
||||
# Triangulation 5
|
||||
# triangulation
|
||||
90
|
||||
3
|
||||
4
|
||||
23 20 21
|
||||
23 21 22
|
||||
36 0 1
|
||||
15 16 45
|
||||
17 0 36
|
||||
16 26 45
|
||||
18 17 37
|
||||
26 25 44
|
||||
37 17 36
|
||||
45 26 44
|
||||
19 18 38
|
||||
25 24 43
|
||||
38 18 37
|
||||
44 25 43
|
||||
20 19 38
|
||||
24 23 43
|
||||
21 20 39
|
||||
23 22 42
|
||||
39 20 38
|
||||
43 23 42
|
||||
22 21 27
|
||||
27 21 39
|
||||
22 27 42
|
||||
27 28 42
|
||||
28 27 39
|
||||
42 28 47
|
||||
28 39 40
|
||||
36 1 41
|
||||
46 15 45
|
||||
41 1 2
|
||||
14 15 46
|
||||
29 28 40
|
||||
28 29 47
|
||||
41 2 40
|
||||
47 14 46
|
||||
2 29 40
|
||||
29 14 47
|
||||
29 2 3
|
||||
13 14 29
|
||||
35 29 30
|
||||
29 3 31
|
||||
35 13 29
|
||||
33 30 32
|
||||
34 30 33
|
||||
32 30 31
|
||||
35 30 34
|
||||
31 3 4
|
||||
12 13 35
|
||||
4 5 48
|
||||
11 12 54
|
||||
5 6 48
|
||||
10 11 54
|
||||
48 6 59
|
||||
55 10 54
|
||||
6 7 59
|
||||
9 10 55
|
||||
59 7 58
|
||||
56 9 55
|
||||
58 8 57
|
||||
57 8 56
|
||||
7 8 58
|
||||
8 9 56
|
||||
31 4 48
|
||||
54 12 35
|
||||
49 31 48
|
||||
54 35 53
|
||||
50 31 49
|
||||
53 35 52
|
||||
32 31 50
|
||||
35 34 52
|
||||
33 32 50
|
||||
34 33 52
|
||||
51 33 50
|
||||
52 33 51
|
||||
49 48 60
|
||||
50 49 60
|
||||
61 50 60
|
||||
51 50 61
|
||||
52 51 61
|
||||
61 62 52
|
||||
53 52 62
|
||||
54 53 62
|
||||
55 54 63
|
||||
56 55 63
|
||||
64 56 63
|
||||
57 56 64
|
||||
64 65 57
|
||||
58 57 65
|
||||
59 58 65
|
||||
65 48 59
|
||||
# Triangulation 6
|
||||
# triangulation
|
||||
77
|
||||
3
|
||||
4
|
||||
23 20 21
|
||||
23 21 22
|
||||
15 16 45
|
||||
16 26 45
|
||||
18 17 37
|
||||
26 25 44
|
||||
37 17 36
|
||||
45 26 44
|
||||
19 18 38
|
||||
25 24 43
|
||||
38 18 37
|
||||
44 25 43
|
||||
20 19 38
|
||||
24 23 43
|
||||
21 20 39
|
||||
23 22 42
|
||||
39 20 38
|
||||
43 23 42
|
||||
22 21 27
|
||||
27 21 39
|
||||
22 27 42
|
||||
27 28 42
|
||||
28 27 39
|
||||
42 28 47
|
||||
28 39 40
|
||||
36 1 41
|
||||
46 15 45
|
||||
14 15 46
|
||||
29 28 40
|
||||
28 29 47
|
||||
41 2 40
|
||||
47 14 46
|
||||
29 14 47
|
||||
13 14 29
|
||||
35 29 30
|
||||
35 13 29
|
||||
33 30 32
|
||||
34 30 33
|
||||
32 30 31
|
||||
35 30 34
|
||||
12 13 35
|
||||
11 12 54
|
||||
10 11 54
|
||||
55 10 54
|
||||
9 10 55
|
||||
59 7 58
|
||||
56 9 55
|
||||
58 8 57
|
||||
57 8 56
|
||||
7 8 58
|
||||
8 9 56
|
||||
54 12 35
|
||||
54 35 53
|
||||
50 31 49
|
||||
53 35 52
|
||||
32 31 50
|
||||
35 34 52
|
||||
33 32 50
|
||||
34 33 52
|
||||
51 33 50
|
||||
52 33 51
|
||||
49 48 60
|
||||
50 49 60
|
||||
61 50 60
|
||||
51 50 61
|
||||
52 51 61
|
||||
61 62 52
|
||||
53 52 62
|
||||
54 53 62
|
||||
55 54 63
|
||||
56 55 63
|
||||
64 56 63
|
||||
57 56 64
|
||||
64 65 57
|
||||
58 57 65
|
||||
59 58 65
|
||||
65 48 59
|
||||
# Triangulation 7
|
||||
# triangulation
|
||||
28
|
||||
3
|
||||
4
|
||||
15 16 45
|
||||
16 26 45
|
||||
26 25 44
|
||||
45 26 44
|
||||
25 24 43
|
||||
44 25 43
|
||||
24 23 43
|
||||
46 15 45
|
||||
14 15 46
|
||||
28 29 47
|
||||
47 14 46
|
||||
29 14 47
|
||||
13 14 29
|
||||
35 29 30
|
||||
35 13 29
|
||||
12 13 35
|
||||
11 12 54
|
||||
10 11 54
|
||||
55 10 54
|
||||
9 10 55
|
||||
56 9 55
|
||||
57 8 56
|
||||
8 9 56
|
||||
54 12 35
|
||||
54 35 53
|
||||
53 35 52
|
||||
52 33 51
|
||||
52 51 61
|
|
@ -1,14 +1,38 @@
|
|||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Copyright (C) 2017, Carnegie Mellon University and University of Cambridge,
|
||||
// Copyright (C) 2016, Carnegie Mellon University and University of Cambridge,
|
||||
// all rights reserved.
|
||||
//
|
||||
// ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
|
||||
//
|
||||
// BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT.
|
||||
// IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
|
||||
//
|
||||
// License can be found in OpenFace-license.txt
|
||||
// THIS SOFTWARE IS PROVIDED “AS IS” FOR ACADEMIC USE ONLY AND ANY EXPRESS
|
||||
// OR IMPLIED WARRANTIES WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS
|
||||
// BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY.
|
||||
// OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
||||
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
||||
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
// POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
// Notwithstanding the license granted herein, Licensee acknowledges that certain components
|
||||
// of the Software may be covered by so-called “open source” software licenses (“Open Source
|
||||
// Components”), which means any software licenses approved as open source licenses by the
|
||||
// Open Source Initiative or any substantially similar licenses, including without limitation any
|
||||
// license that, as a condition of distribution of the software licensed under such license,
|
||||
// requires that the distributor make the software available in source code format. Licensor shall
|
||||
// provide a list of Open Source Components for a particular version of the Software upon
|
||||
// Licensee’s request. Licensee will comply with the applicable terms of such licenses and to
|
||||
// the extent required by the licenses covering Open Source Components, the terms of such
|
||||
// licenses will apply in lieu of the terms of this Agreement. To the extent the terms of the
|
||||
// licenses applicable to Open Source Components prohibit any of the restrictions in this
|
||||
// License Agreement with respect to such Open Source Component, such restrictions will not
|
||||
// apply to such Open Source Component. To the extent the terms of the licenses applicable to
|
||||
// Open Source Components require Licensor to make an offer to provide source code or
|
||||
// related information in connection with the Software, such offer is hereby made. Any request
|
||||
// for source code or related information should be directed to cl-face-tracker-distribution@lists.cam.ac.uk
|
||||
// Licensee acknowledges receipt of notices for the Open Source Components for the initial
|
||||
// delivery of the Software.
|
||||
|
||||
// * Any publications arising from the use of this software, including but
|
||||
// not limited to academic journal and conference publications, technical
|
||||
// reports and manuals, must cite at least one of the following works:
|
||||
|
@ -40,6 +64,9 @@
|
|||
#include <opencv2/core/core.hpp>
|
||||
#include <opencv2/imgproc.hpp>
|
||||
|
||||
// TBB includes
|
||||
#include <tbb/tbb.h>
|
||||
|
||||
// System includes
|
||||
#include <fstream>
|
||||
|
||||
|
@ -108,15 +135,27 @@ cnn_convolutional_layers_bias(other.cnn_convolutional_layers_bias), cnn_convolut
|
|||
}
|
||||
}
|
||||
|
||||
this->cnn_fully_connected_layers.resize(other.cnn_fully_connected_layers.size());
|
||||
for (size_t v = 0; v < other.cnn_fully_connected_layers.size(); ++v)
|
||||
this->cnn_fully_connected_layers_weights.resize(other.cnn_fully_connected_layers_weights.size());
|
||||
for (size_t v = 0; v < other.cnn_fully_connected_layers_weights.size(); ++v)
|
||||
{
|
||||
this->cnn_fully_connected_layers[v].resize(other.cnn_fully_connected_layers[v].size());
|
||||
this->cnn_fully_connected_layers_weights[v].resize(other.cnn_fully_connected_layers_weights[v].size());
|
||||
|
||||
for (size_t l = 0; l < other.cnn_fully_connected_layers[v].size(); ++l)
|
||||
for (size_t l = 0; l < other.cnn_fully_connected_layers_weights[v].size(); ++l)
|
||||
{
|
||||
// Make sure the matrix is copied.
|
||||
this->cnn_fully_connected_layers[v][l] = other.cnn_fully_connected_layers[v][l].clone();
|
||||
this->cnn_fully_connected_layers_weights[v][l] = other.cnn_fully_connected_layers_weights[v][l].clone();
|
||||
}
|
||||
}
|
||||
|
||||
this->cnn_fully_connected_layers_biases.resize(other.cnn_fully_connected_layers_biases.size());
|
||||
for (size_t v = 0; v < other.cnn_fully_connected_layers_biases.size(); ++v)
|
||||
{
|
||||
this->cnn_fully_connected_layers_biases[v].resize(other.cnn_fully_connected_layers_biases[v].size());
|
||||
|
||||
for (size_t l = 0; l < other.cnn_fully_connected_layers_biases[v].size(); ++l)
|
||||
{
|
||||
// Make sure the matrix is copied.
|
||||
this->cnn_fully_connected_layers_biases[v][l] = other.cnn_fully_connected_layers_biases[v][l].clone();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -188,11 +227,20 @@ void DetectionValidator::Read(string location)
|
|||
cnn_convolutional_layers.resize(n);
|
||||
cnn_convolutional_layers_dft.resize(n);
|
||||
cnn_subsampling_layers.resize(n);
|
||||
cnn_fully_connected_layers.resize(n);
|
||||
cnn_fully_connected_layers_weights.resize(n);
|
||||
cnn_layer_types.resize(n);
|
||||
cnn_fully_connected_layers_bias.resize(n);
|
||||
cnn_convolutional_layers_bias.resize(n);
|
||||
}
|
||||
else if (validator_type == 3)
|
||||
{
|
||||
cnn_convolutional_layers.resize(n);
|
||||
cnn_convolutional_layers_dft.resize(n);
|
||||
cnn_fully_connected_layers_weights.resize(n);
|
||||
cnn_layer_types.resize(n);
|
||||
cnn_fully_connected_layers_biases.resize(n);
|
||||
cnn_convolutional_layers_bias.resize(n);
|
||||
}
|
||||
|
||||
// Initialise the normalisation terms
|
||||
mean_images.resize(n);
|
||||
|
@ -318,11 +366,82 @@ void DetectionValidator::Read(string location)
|
|||
// Fully connected layer
|
||||
cv::Mat_<float> weights;
|
||||
ReadMatBin(detection_validator_stream, weights);
|
||||
cnn_fully_connected_layers[i].push_back(weights);
|
||||
cnn_fully_connected_layers_weights[i].push_back(weights);
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (validator_type == 3)
|
||||
{
|
||||
int network_depth;
|
||||
detection_validator_stream.read((char*)&network_depth, 4);
|
||||
|
||||
cnn_layer_types[i].resize(network_depth);
|
||||
|
||||
for (int layer = 0; layer < network_depth; ++layer)
|
||||
{
|
||||
|
||||
int layer_type;
|
||||
detection_validator_stream.read((char*)&layer_type, 4);
|
||||
cnn_layer_types[i][layer] = layer_type;
|
||||
|
||||
// convolutional
|
||||
if (layer_type == 0)
|
||||
{
|
||||
|
||||
// Read the number of input maps
|
||||
int num_in_maps;
|
||||
detection_validator_stream.read((char*)&num_in_maps, 4);
|
||||
|
||||
// Read the number of kernels for each input map
|
||||
int num_kernels;
|
||||
detection_validator_stream.read((char*)&num_kernels, 4);
|
||||
|
||||
vector<vector<cv::Mat_<float> > > kernels;
|
||||
vector<vector<pair<int, cv::Mat_<double> > > > kernel_dfts;
|
||||
|
||||
kernels.resize(num_in_maps);
|
||||
kernel_dfts.resize(num_in_maps);
|
||||
|
||||
vector<float> biases;
|
||||
for (int k = 0; k < num_kernels; ++k)
|
||||
{
|
||||
float bias;
|
||||
detection_validator_stream.read((char*)&bias, 4);
|
||||
biases.push_back(bias);
|
||||
}
|
||||
|
||||
cnn_convolutional_layers_bias[i].push_back(biases);
|
||||
|
||||
// For every input map
|
||||
for (int in = 0; in < num_in_maps; ++in)
|
||||
{
|
||||
kernels[in].resize(num_kernels);
|
||||
kernel_dfts[in].resize(num_kernels);
|
||||
|
||||
// For every kernel on that input map
|
||||
for (int k = 0; k < num_kernels; ++k)
|
||||
{
|
||||
ReadMatBin(detection_validator_stream, kernels[in][k]);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
cnn_convolutional_layers[i].push_back(kernels);
|
||||
cnn_convolutional_layers_dft[i].push_back(kernel_dfts);
|
||||
}
|
||||
else if (layer_type == 2)
|
||||
{
|
||||
cv::Mat_<float> biases;
|
||||
ReadMatBin(detection_validator_stream, biases);
|
||||
cnn_fully_connected_layers_biases[i].push_back(biases);
|
||||
|
||||
// Fully connected layer
|
||||
cv::Mat_<float> weights;
|
||||
ReadMatBin(detection_validator_stream, weights);
|
||||
cnn_fully_connected_layers_weights[i].push_back(weights);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Read in the piece-wise affine warps
|
||||
paws[i].Read(detection_validator_stream);
|
||||
}
|
||||
|
@ -361,7 +480,13 @@ double DetectionValidator::Check(const cv::Vec3d& orientation, const cv::Mat_<uc
|
|||
}
|
||||
else if(validator_type == 2)
|
||||
{
|
||||
dec = CheckCNN(warped, id);
|
||||
dec = CheckCNN_old(warped, id);
|
||||
}
|
||||
else if (validator_type == 3)
|
||||
{
|
||||
// On some machines the non-TBB version may be faster
|
||||
//dec = CheckCNN(warped, id);
|
||||
dec = CheckCNN_tbb(warped, id);
|
||||
}
|
||||
return dec;
|
||||
}
|
||||
|
@ -433,7 +558,7 @@ double DetectionValidator::CheckSVR(const cv::Mat_<double>& warped_img, int view
|
|||
}
|
||||
|
||||
// Convolutional Neural Network
|
||||
double DetectionValidator::CheckCNN(const cv::Mat_<double>& warped_img, int view_id)
|
||||
double DetectionValidator::CheckCNN_old(const cv::Mat_<double>& warped_img, int view_id)
|
||||
{
|
||||
|
||||
cv::Mat_<double> feature_vec;
|
||||
|
@ -599,7 +724,7 @@ double DetectionValidator::CheckCNN(const cv::Mat_<double>& warped_img, int view
|
|||
cv::hconcat(input_concat, add, input_concat);
|
||||
}
|
||||
|
||||
input_concat = input_concat * cnn_fully_connected_layers[view_id][fully_connected_layer].t();
|
||||
input_concat = input_concat * cnn_fully_connected_layers_weights[view_id][fully_connected_layer].t();
|
||||
|
||||
cv::exp(-input_concat - cnn_fully_connected_layers_bias[view_id][fully_connected_layer], input_concat);
|
||||
input_concat = 1.0 /(1.0 + input_concat);
|
||||
|
@ -609,6 +734,55 @@ double DetectionValidator::CheckCNN(const cv::Mat_<double>& warped_img, int view
|
|||
|
||||
fully_connected_layer++;
|
||||
}
|
||||
// Max pooling layer
|
||||
if (layer_type == 3)
|
||||
{
|
||||
|
||||
vector<cv::Mat_<float>> outputs_sub;
|
||||
|
||||
// Iterate over pool height and width, all the stride is 2x2 and no padding is used
|
||||
int stride_x = 2;
|
||||
int stride_y = 2;
|
||||
|
||||
int pool_x = 2;
|
||||
int pool_y = 2;
|
||||
|
||||
for (size_t in = 0; in < input_maps.size(); ++in)
|
||||
{
|
||||
int out_x = input_maps[in].cols / stride_x;
|
||||
int out_y = input_maps[in].rows / stride_y;
|
||||
|
||||
cv::Mat_<float> sub_out(out_y, out_x, 0.0);
|
||||
cv::Mat_<float> in_map = input_maps[in];
|
||||
|
||||
for (int x = 0; x < input_maps[in].cols; x+= stride_x)
|
||||
{
|
||||
for (int y = 0; y < input_maps[in].rows; y+= stride_y)
|
||||
{
|
||||
float curr_max = -FLT_MAX;
|
||||
for (int x_in = x; x_in < x+pool_x; ++x_in)
|
||||
{
|
||||
for (int y_in = y; y_in < y + pool_y; ++y_in)
|
||||
{
|
||||
float curr_val = in_map.at<float>(y_in, x_in);
|
||||
if (curr_val > curr_max)
|
||||
{
|
||||
curr_max = curr_val;
|
||||
}
|
||||
}
|
||||
}
|
||||
int x_in_out = x / stride_x;
|
||||
int y_in_out = y / stride_y;
|
||||
sub_out.at<float>(y_in_out, x_in_out) = curr_max;
|
||||
}
|
||||
}
|
||||
|
||||
outputs_sub.push_back(sub_out);
|
||||
}
|
||||
outputs = outputs_sub;
|
||||
subsample_layer++;
|
||||
}
|
||||
|
||||
// Set the outputs of this layer to inputs of the next
|
||||
input_maps = outputs;
|
||||
|
||||
|
@ -620,6 +794,471 @@ double DetectionValidator::CheckCNN(const cv::Mat_<double>& warped_img, int view
|
|||
return dec;
|
||||
}
|
||||
|
||||
// Convolutional Neural Network
|
||||
double DetectionValidator::CheckCNN_tbb(const cv::Mat_<double>& warped_img, int view_id)
|
||||
{
|
||||
|
||||
cv::Mat_<double> feature_vec;
|
||||
NormaliseWarpedToVector(warped_img, feature_vec, view_id);
|
||||
|
||||
// Create a normalised image from the crop vector
|
||||
cv::Mat_<float> img(warped_img.size(), 0.0);
|
||||
img = img.t();
|
||||
|
||||
cv::Mat mask = paws[view_id].pixel_mask.t();
|
||||
cv::MatIterator_<uchar> mask_it = mask.begin<uchar>();
|
||||
|
||||
cv::MatIterator_<double> feature_it = feature_vec.begin();
|
||||
cv::MatIterator_<float> img_it = img.begin();
|
||||
|
||||
int wInt = img.cols;
|
||||
int hInt = img.rows;
|
||||
|
||||
for (int i = 0; i < wInt; ++i)
|
||||
{
|
||||
for (int j = 0; j < hInt; ++j, ++mask_it, ++img_it)
|
||||
{
|
||||
// if is within mask
|
||||
if (*mask_it)
|
||||
{
|
||||
// assign the feature to image if it is within the mask
|
||||
*img_it = (float)*feature_it++;
|
||||
}
|
||||
}
|
||||
}
|
||||
img = img.t();
|
||||
|
||||
int cnn_layer = 0;
|
||||
int fully_connected_layer = 0;
|
||||
|
||||
vector<cv::Mat_<float> > input_maps;
|
||||
input_maps.push_back(img);
|
||||
|
||||
vector<cv::Mat_<float> > outputs;
|
||||
|
||||
for (size_t layer = 0; layer < cnn_layer_types[view_id].size(); ++layer)
|
||||
{
|
||||
// Determine layer type
|
||||
int layer_type = cnn_layer_types[view_id][layer];
|
||||
|
||||
// Convolutional layer
|
||||
if (layer_type == 0)
|
||||
{
|
||||
outputs.clear();
|
||||
// Pre-allocate the output feature maps
|
||||
outputs.resize(cnn_convolutional_layers[view_id][cnn_layer][0].size());
|
||||
for (size_t in = 0; in < input_maps.size(); ++in)
|
||||
{
|
||||
cv::Mat_<float> input_image = input_maps[in];
|
||||
|
||||
// Useful precomputed data placeholders for quick correlation (convolution)
|
||||
cv::Mat_<double> input_image_dft;
|
||||
cv::Mat integral_image;
|
||||
cv::Mat integral_image_sq;
|
||||
|
||||
// To adapt for TBB, perform the first convolution in a non TBB way so that dft, and integral images are computed
|
||||
cv::Mat_<float> kernel = cnn_convolutional_layers[view_id][cnn_layer][in][0];
|
||||
|
||||
// The convolution (with precomputation)
|
||||
cv::Mat_<float> output;
|
||||
if (cnn_convolutional_layers_dft[view_id][cnn_layer][in][0].second.empty()) // This will only be needed during the first pass
|
||||
{
|
||||
std::map<int, cv::Mat_<double> > precomputed_dft;
|
||||
|
||||
LandmarkDetector::matchTemplate_m(input_image, input_image_dft, integral_image, integral_image_sq, kernel, precomputed_dft, output, CV_TM_CCORR);
|
||||
|
||||
cnn_convolutional_layers_dft[view_id][cnn_layer][in][0].first = precomputed_dft.begin()->first;
|
||||
cnn_convolutional_layers_dft[view_id][cnn_layer][in][0].second = precomputed_dft.begin()->second;
|
||||
}
|
||||
else
|
||||
{
|
||||
std::map<int, cv::Mat_<double> > precomputed_dft;
|
||||
precomputed_dft[cnn_convolutional_layers_dft[view_id][cnn_layer][in][0].first] = cnn_convolutional_layers_dft[view_id][cnn_layer][in][0].second;
|
||||
LandmarkDetector::matchTemplate_m(input_image, input_image_dft, integral_image, integral_image_sq, kernel, precomputed_dft, output, CV_TM_CCORR);
|
||||
}
|
||||
|
||||
// Combining the maps
|
||||
if (in == 0)
|
||||
{
|
||||
outputs[0] = output;
|
||||
}
|
||||
else
|
||||
{
|
||||
outputs[0] = outputs[0] + output;
|
||||
}
|
||||
|
||||
|
||||
// TBB pass for the remaining kernels, empirically helps with layers with more kernels
|
||||
tbb::parallel_for(1, (int)cnn_convolutional_layers[view_id][cnn_layer][in].size(), [&](int k) {
|
||||
{
|
||||
cv::Mat_<float> kernel = cnn_convolutional_layers[view_id][cnn_layer][in][k];
|
||||
|
||||
// The convolution (with precomputation)
|
||||
cv::Mat_<float> output;
|
||||
if (cnn_convolutional_layers_dft[view_id][cnn_layer][in][k].second.empty()) // This will only be needed during the first pass
|
||||
{
|
||||
std::map<int, cv::Mat_<double> > precomputed_dft;
|
||||
|
||||
LandmarkDetector::matchTemplate_m(input_image, input_image_dft, integral_image, integral_image_sq, kernel, precomputed_dft, output, CV_TM_CCORR);
|
||||
|
||||
cnn_convolutional_layers_dft[view_id][cnn_layer][in][k].first = precomputed_dft.begin()->first;
|
||||
cnn_convolutional_layers_dft[view_id][cnn_layer][in][k].second = precomputed_dft.begin()->second;
|
||||
}
|
||||
else
|
||||
{
|
||||
std::map<int, cv::Mat_<double> > precomputed_dft;
|
||||
precomputed_dft[cnn_convolutional_layers_dft[view_id][cnn_layer][in][k].first] = cnn_convolutional_layers_dft[view_id][cnn_layer][in][k].second;
|
||||
LandmarkDetector::matchTemplate_m(input_image, input_image_dft, integral_image, integral_image_sq, kernel, precomputed_dft, output, CV_TM_CCORR);
|
||||
}
|
||||
|
||||
// Combining the maps
|
||||
if (in == 0)
|
||||
{
|
||||
outputs[k] = output;
|
||||
}
|
||||
else
|
||||
{
|
||||
outputs[k] = outputs[k] + output;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
for (size_t k = 0; k < cnn_convolutional_layers[view_id][cnn_layer][0].size(); ++k)
|
||||
{
|
||||
outputs[k] = outputs[k] + cnn_convolutional_layers_bias[view_id][cnn_layer][k];
|
||||
}
|
||||
cnn_layer++;
|
||||
}
|
||||
if (layer_type == 1)
|
||||
{
|
||||
vector<cv::Mat_<float>> outputs_sub;
|
||||
|
||||
// Iterate over pool height and width, all the stride is 2x2 and no padding is used
|
||||
int stride_x = 2;
|
||||
int stride_y = 2;
|
||||
|
||||
int pool_x = 2;
|
||||
int pool_y = 2;
|
||||
|
||||
for (size_t in = 0; in < input_maps.size(); ++in)
|
||||
{
|
||||
int out_x = input_maps[in].cols / stride_x;
|
||||
int out_y = input_maps[in].rows / stride_y;
|
||||
|
||||
cv::Mat_<float> sub_out(out_y, out_x, 0.0);
|
||||
cv::Mat_<float> in_map = input_maps[in];
|
||||
|
||||
for (int x = 0; x < input_maps[in].cols; x += stride_x)
|
||||
{
|
||||
for (int y = 0; y < input_maps[in].rows; y += stride_y)
|
||||
{
|
||||
float curr_max = -FLT_MAX;
|
||||
for (int x_in = x; x_in < x + pool_x; ++x_in)
|
||||
{
|
||||
for (int y_in = y; y_in < y + pool_y; ++y_in)
|
||||
{
|
||||
float curr_val = in_map.at<float>(y_in, x_in);
|
||||
if (curr_val > curr_max)
|
||||
{
|
||||
curr_max = curr_val;
|
||||
}
|
||||
}
|
||||
}
|
||||
int x_in_out = x / stride_x;
|
||||
int y_in_out = y / stride_y;
|
||||
sub_out.at<float>(y_in_out, x_in_out) = curr_max;
|
||||
}
|
||||
}
|
||||
|
||||
outputs_sub.push_back(sub_out);
|
||||
|
||||
}
|
||||
outputs = outputs_sub;
|
||||
}
|
||||
if (layer_type == 2)
|
||||
{
|
||||
// Concatenate all the maps
|
||||
cv::Mat_<float> input_concat = input_maps[0].t();
|
||||
input_concat = input_concat.reshape(0, 1);
|
||||
|
||||
for (size_t in = 1; in < input_maps.size(); ++in)
|
||||
{
|
||||
cv::Mat_<float> add = input_maps[in].t();
|
||||
add = add.reshape(0, 1);
|
||||
cv::hconcat(input_concat, add, input_concat);
|
||||
}
|
||||
|
||||
input_concat = input_concat * cnn_fully_connected_layers_weights[view_id][fully_connected_layer];
|
||||
input_concat = input_concat + cnn_fully_connected_layers_biases[view_id][fully_connected_layer].t();
|
||||
|
||||
outputs.clear();
|
||||
outputs.push_back(input_concat);
|
||||
|
||||
fully_connected_layer++;
|
||||
}
|
||||
if (layer_type == 3) // ReLU
|
||||
{
|
||||
outputs.clear();
|
||||
for (size_t k = 0; k < input_maps.size(); ++k)
|
||||
{
|
||||
// Apply the ReLU
|
||||
cv::threshold(input_maps[k], input_maps[k], 0, 0, cv::THRESH_TOZERO);
|
||||
outputs.push_back(input_maps[k]);
|
||||
|
||||
}
|
||||
}
|
||||
if (layer_type == 4)
|
||||
{
|
||||
outputs.clear();
|
||||
for (size_t k = 0; k < input_maps.size(); ++k)
|
||||
{
|
||||
// Apply the sigmoid
|
||||
cv::exp(-input_maps[k], input_maps[k]);
|
||||
input_maps[k] = 1.0 / (1.0 + input_maps[k]);
|
||||
|
||||
outputs.push_back(input_maps[k]);
|
||||
|
||||
}
|
||||
}
|
||||
// Set the outputs of this layer to inputs of the next
|
||||
input_maps = outputs;
|
||||
|
||||
}
|
||||
|
||||
// Convert the class label to a continuous value
|
||||
double max_val = 0;
|
||||
cv::Point max_loc;
|
||||
cv::minMaxLoc(outputs[0].t(), 0, &max_val, 0, &max_loc);
|
||||
int max_idx = max_loc.y;
|
||||
double max = 1;
|
||||
double min = -1;
|
||||
double bins = (double)outputs[0].cols;
|
||||
// Unquantizing the softmax layer to continuous value
|
||||
double step_size = (max - min) / bins; // This should be saved somewhere
|
||||
double unquantized = min + step_size / 2.0 + max_idx * step_size;
|
||||
|
||||
return unquantized;
|
||||
}
|
||||
|
||||
// Convolutional Neural Network
|
||||
double DetectionValidator::CheckCNN(const cv::Mat_<double>& warped_img, int view_id)
|
||||
{
|
||||
|
||||
cv::Mat_<double> feature_vec;
|
||||
NormaliseWarpedToVector(warped_img, feature_vec, view_id);
|
||||
|
||||
// Create a normalised image from the crop vector
|
||||
cv::Mat_<float> img(warped_img.size(), 0.0);
|
||||
img = img.t();
|
||||
|
||||
cv::Mat mask = paws[view_id].pixel_mask.t();
|
||||
cv::MatIterator_<uchar> mask_it = mask.begin<uchar>();
|
||||
|
||||
cv::MatIterator_<double> feature_it = feature_vec.begin();
|
||||
cv::MatIterator_<float> img_it = img.begin();
|
||||
|
||||
int wInt = img.cols;
|
||||
int hInt = img.rows;
|
||||
|
||||
for (int i = 0; i < wInt; ++i)
|
||||
{
|
||||
for (int j = 0; j < hInt; ++j, ++mask_it, ++img_it)
|
||||
{
|
||||
// if is within mask
|
||||
if (*mask_it)
|
||||
{
|
||||
// assign the feature to image if it is within the mask
|
||||
*img_it = (float)*feature_it++;
|
||||
}
|
||||
}
|
||||
}
|
||||
img = img.t();
|
||||
|
||||
int cnn_layer = 0;
|
||||
int fully_connected_layer = 0;
|
||||
|
||||
vector<cv::Mat_<float> > input_maps;
|
||||
input_maps.push_back(img);
|
||||
|
||||
vector<cv::Mat_<float> > outputs;
|
||||
|
||||
for (size_t layer = 0; layer < cnn_layer_types[view_id].size(); ++layer)
|
||||
{
|
||||
// Determine layer type
|
||||
int layer_type = cnn_layer_types[view_id][layer];
|
||||
|
||||
// Convolutional layer
|
||||
if (layer_type == 0)
|
||||
{
|
||||
outputs.clear();
|
||||
for (size_t in = 0; in < input_maps.size(); ++in)
|
||||
{
|
||||
cv::Mat_<float> input_image = input_maps[in];
|
||||
|
||||
// Useful precomputed data placeholders for quick correlation (convolution)
|
||||
cv::Mat_<double> input_image_dft;
|
||||
cv::Mat integral_image;
|
||||
cv::Mat integral_image_sq;
|
||||
|
||||
// TODO can TBB-ify this
|
||||
for (size_t k = 0; k < cnn_convolutional_layers[view_id][cnn_layer][in].size(); ++k)
|
||||
{
|
||||
cv::Mat_<float> kernel = cnn_convolutional_layers[view_id][cnn_layer][in][k];
|
||||
|
||||
// The convolution (with precomputation)
|
||||
cv::Mat_<float> output;
|
||||
if (cnn_convolutional_layers_dft[view_id][cnn_layer][in][k].second.empty())
|
||||
{
|
||||
std::map<int, cv::Mat_<double> > precomputed_dft;
|
||||
|
||||
LandmarkDetector::matchTemplate_m(input_image, input_image_dft, integral_image, integral_image_sq, kernel, precomputed_dft, output, CV_TM_CCORR);
|
||||
|
||||
cnn_convolutional_layers_dft[view_id][cnn_layer][in][k].first = precomputed_dft.begin()->first;
|
||||
cnn_convolutional_layers_dft[view_id][cnn_layer][in][k].second = precomputed_dft.begin()->second;
|
||||
}
|
||||
else
|
||||
{
|
||||
std::map<int, cv::Mat_<double> > precomputed_dft;
|
||||
precomputed_dft[cnn_convolutional_layers_dft[view_id][cnn_layer][in][k].first] = cnn_convolutional_layers_dft[view_id][cnn_layer][in][k].second;
|
||||
LandmarkDetector::matchTemplate_m(input_image, input_image_dft, integral_image, integral_image_sq, kernel, precomputed_dft, output, CV_TM_CCORR);
|
||||
}
|
||||
|
||||
// Combining the maps
|
||||
if (in == 0)
|
||||
{
|
||||
outputs.push_back(output);
|
||||
}
|
||||
else
|
||||
{
|
||||
outputs[k] = outputs[k] + output;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
for (size_t k = 0; k < cnn_convolutional_layers[view_id][cnn_layer][0].size(); ++k)
|
||||
{
|
||||
outputs[k] = outputs[k] + cnn_convolutional_layers_bias[view_id][cnn_layer][k];
|
||||
}
|
||||
cnn_layer++;
|
||||
}
|
||||
if (layer_type == 1)
|
||||
{
|
||||
vector<cv::Mat_<float>> outputs_sub;
|
||||
|
||||
// Iterate over pool height and width, all the stride is 2x2 and no padding is used
|
||||
int stride_x = 2;
|
||||
int stride_y = 2;
|
||||
|
||||
int pool_x = 2;
|
||||
int pool_y = 2;
|
||||
|
||||
for (size_t in = 0; in < input_maps.size(); ++in)
|
||||
{
|
||||
int out_x = input_maps[in].cols / stride_x;
|
||||
int out_y = input_maps[in].rows / stride_y;
|
||||
|
||||
cv::Mat_<float> sub_out(out_y, out_x, 0.0);
|
||||
cv::Mat_<float> in_map = input_maps[in];
|
||||
|
||||
for (int x = 0; x < input_maps[in].cols; x += stride_x)
|
||||
{
|
||||
for (int y = 0; y < input_maps[in].rows; y += stride_y)
|
||||
{
|
||||
float curr_max = -FLT_MAX;
|
||||
for (int x_in = x; x_in < x + pool_x; ++x_in)
|
||||
{
|
||||
for (int y_in = y; y_in < y + pool_y; ++y_in)
|
||||
{
|
||||
float curr_val = in_map.at<float>(y_in, x_in);
|
||||
if (curr_val > curr_max)
|
||||
{
|
||||
curr_max = curr_val;
|
||||
}
|
||||
}
|
||||
}
|
||||
int x_in_out = x / stride_x;
|
||||
int y_in_out = y / stride_y;
|
||||
sub_out.at<float>(y_in_out, x_in_out) = curr_max;
|
||||
}
|
||||
}
|
||||
|
||||
outputs_sub.push_back(sub_out);
|
||||
|
||||
}
|
||||
outputs = outputs_sub;
|
||||
}
|
||||
if (layer_type == 2)
|
||||
{
|
||||
// Concatenate all the maps
|
||||
cv::Mat_<float> input_concat = input_maps[0].t();
|
||||
input_concat = input_concat.reshape(0, 1);
|
||||
|
||||
for (size_t in = 1; in < input_maps.size(); ++in)
|
||||
{
|
||||
cv::Mat_<float> add = input_maps[in].t();
|
||||
add = add.reshape(0, 1);
|
||||
cv::hconcat(input_concat, add, input_concat);
|
||||
}
|
||||
|
||||
input_concat = input_concat * cnn_fully_connected_layers_weights[view_id][fully_connected_layer];
|
||||
input_concat = input_concat + cnn_fully_connected_layers_biases[view_id][fully_connected_layer].t();
|
||||
|
||||
outputs.clear();
|
||||
outputs.push_back(input_concat);
|
||||
|
||||
fully_connected_layer++;
|
||||
}
|
||||
if (layer_type == 3) // ReLU
|
||||
{
|
||||
outputs.clear();
|
||||
for (size_t k = 0; k < input_maps.size(); ++k)
|
||||
{
|
||||
// Apply the ReLU
|
||||
cv::threshold(input_maps[k], input_maps[k], 0, 0, cv::THRESH_TOZERO);
|
||||
outputs.push_back(input_maps[k]);
|
||||
|
||||
}
|
||||
}
|
||||
if (layer_type == 4)
|
||||
{
|
||||
outputs.clear();
|
||||
for (size_t k = 0; k < input_maps.size(); ++k)
|
||||
{
|
||||
// Apply the sigmoid
|
||||
cv::exp(-input_maps[k], input_maps[k]);
|
||||
input_maps[k] = 1.0 / (1.0 + input_maps[k]);
|
||||
|
||||
outputs.push_back(input_maps[k]);
|
||||
|
||||
}
|
||||
}
|
||||
// Set the outputs of this layer to inputs of the next
|
||||
input_maps = outputs;
|
||||
|
||||
}
|
||||
|
||||
// First turn to the 0-3 range
|
||||
double max_val = 0;
|
||||
cv::Point max_loc;
|
||||
cv::minMaxLoc(outputs[0].t(), 0, &max_val, 0, &max_loc);
|
||||
int max_idx = max_loc.y;
|
||||
double max = 3;
|
||||
double min = 0;
|
||||
double bins = (double)outputs[0].cols;
|
||||
// Unquantizing the softmax layer to continuous value
|
||||
double step_size = (max - min) / bins; // This should be saved somewhere
|
||||
double unquantized = min + step_size / 2.0 + max_idx * step_size;
|
||||
|
||||
// Turn it to -1, 1 range
|
||||
double dec = (unquantized - 1.5) / 1.5;
|
||||
|
||||
return dec;
|
||||
}
|
||||
|
||||
void DetectionValidator::NormaliseWarpedToVector(const cv::Mat_<double>& warped_img, cv::Mat_<double>& feature_vec, int view_id)
|
||||
{
|
||||
cv::Mat_<double> warped_t = warped_img.t();
|
||||
|
|
|
@ -259,7 +259,7 @@ void CorrectGlobalParametersVideo(const cv::Mat_<uchar> &grayscale_image, CLNF&
|
|||
|
||||
}
|
||||
|
||||
bool LandmarkDetector::DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_image, const cv::Mat_<float> &depth_image, CLNF& clnf_model, FaceModelParameters& params)
|
||||
bool LandmarkDetector::DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_image, CLNF& clnf_model, FaceModelParameters& params)
|
||||
{
|
||||
// First need to decide if the landmarks should be "detected" or "tracked"
|
||||
// Detected means running face detection and a larger search area, tracked means initialising from previous step
|
||||
|
@ -288,7 +288,8 @@ bool LandmarkDetector::DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_i
|
|||
CorrectGlobalParametersVideo(grayscale_image, clnf_model, params);
|
||||
}
|
||||
|
||||
bool track_success = clnf_model.DetectLandmarks(grayscale_image, depth_image, params);
|
||||
bool track_success = clnf_model.DetectLandmarks(grayscale_image, params);
|
||||
|
||||
if(!track_success)
|
||||
{
|
||||
// Make a record that tracking failed
|
||||
|
@ -357,7 +358,7 @@ bool LandmarkDetector::DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_i
|
|||
params.window_sizes_current = params.window_sizes_init;
|
||||
|
||||
// Do the actual landmark detection (and keep it only if successful)
|
||||
bool landmark_detection_success = clnf_model.DetectLandmarks(grayscale_image, depth_image, params);
|
||||
bool landmark_detection_success = clnf_model.DetectLandmarks(grayscale_image, params);
|
||||
|
||||
// If landmark reinitialisation unsucessful continue from previous estimates
|
||||
// if it's initial detection however, do not care if it was successful as the validator might be wrong, so continue trackig
|
||||
|
@ -377,7 +378,7 @@ bool LandmarkDetector::DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_i
|
|||
}
|
||||
else
|
||||
{
|
||||
clnf_model.failures_in_a_row = -1;
|
||||
clnf_model.failures_in_a_row = -1;
|
||||
UpdateTemplate(grayscale_image, clnf_model);
|
||||
return true;
|
||||
}
|
||||
|
@ -400,7 +401,7 @@ bool LandmarkDetector::DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_i
|
|||
|
||||
}
|
||||
|
||||
bool LandmarkDetector::DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_image, const cv::Mat_<float> &depth_image, const cv::Rect_<double> bounding_box, CLNF& clnf_model, FaceModelParameters& params)
|
||||
bool LandmarkDetector::DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_image, const cv::Rect_<double> bounding_box, CLNF& clnf_model, FaceModelParameters& params)
|
||||
{
|
||||
if(bounding_box.width > 0)
|
||||
{
|
||||
|
@ -412,27 +413,17 @@ bool LandmarkDetector::DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_i
|
|||
clnf_model.tracking_initialised = true;
|
||||
}
|
||||
|
||||
return DetectLandmarksInVideo(grayscale_image, depth_image, clnf_model, params);
|
||||
return DetectLandmarksInVideo(grayscale_image, clnf_model, params);
|
||||
|
||||
}
|
||||
|
||||
bool LandmarkDetector::DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_image, CLNF& clnf_model, FaceModelParameters& params)
|
||||
{
|
||||
return DetectLandmarksInVideo(grayscale_image, cv::Mat_<float>(), clnf_model, params);
|
||||
}
|
||||
|
||||
bool LandmarkDetector::DetectLandmarksInVideo(const cv::Mat_<uchar> &grayscale_image, const cv::Rect_<double> bounding_box, CLNF& clnf_model, FaceModelParameters& params)
|
||||
{
|
||||
return DetectLandmarksInVideo(grayscale_image, cv::Mat_<float>(), bounding_box, clnf_model, params);
|
||||
}
|
||||
|
||||
//================================================================================================================
|
||||
// Landmark detection in image, need to provide an image and optionally CLNF model together with parameters (default values work well)
|
||||
// Optionally can provide a bounding box in which detection is performed (this is useful if multiple faces are to be detected in images)
|
||||
//================================================================================================================
|
||||
|
||||
// This is the one where the actual work gets done, other DetectLandmarksInImage calls lead to this one
|
||||
bool LandmarkDetector::DetectLandmarksInImage(const cv::Mat_<uchar> &grayscale_image, const cv::Mat_<float> depth_image, const cv::Rect_<double> bounding_box, CLNF& clnf_model, FaceModelParameters& params)
|
||||
bool LandmarkDetector::DetectLandmarksInImage(const cv::Mat_<uchar> &grayscale_image, const cv::Rect_<double> bounding_box, CLNF& clnf_model, FaceModelParameters& params)
|
||||
{
|
||||
|
||||
// Can have multiple hypotheses
|
||||
|
@ -485,7 +476,7 @@ bool LandmarkDetector::DetectLandmarksInImage(const cv::Mat_<uchar> &grayscale_i
|
|||
// calculate the local and global parameters from the generated 2D shape (mapping from the 2D to 3D because camera params are unknown)
|
||||
clnf_model.pdm.CalcParams(clnf_model.params_global, bounding_box, clnf_model.params_local, rotation_hypotheses[hypothesis]);
|
||||
|
||||
bool success = clnf_model.DetectLandmarks(grayscale_image, depth_image, params);
|
||||
bool success = clnf_model.DetectLandmarks(grayscale_image, params);
|
||||
|
||||
if(hypothesis == 0 || best_likelihood < clnf_model.model_likelihood)
|
||||
{
|
||||
|
@ -530,7 +521,7 @@ bool LandmarkDetector::DetectLandmarksInImage(const cv::Mat_<uchar> &grayscale_i
|
|||
return best_success;
|
||||
}
|
||||
|
||||
bool LandmarkDetector::DetectLandmarksInImage(const cv::Mat_<uchar> &grayscale_image, const cv::Mat_<float> depth_image, CLNF& clnf_model, FaceModelParameters& params)
|
||||
bool LandmarkDetector::DetectLandmarksInImage(const cv::Mat_<uchar> &grayscale_image, CLNF& clnf_model, FaceModelParameters& params)
|
||||
{
|
||||
|
||||
cv::Rect_<double> bounding_box;
|
||||
|
@ -559,18 +550,6 @@ bool LandmarkDetector::DetectLandmarksInImage(const cv::Mat_<uchar> &grayscale_i
|
|||
}
|
||||
else
|
||||
{
|
||||
return DetectLandmarksInImage(grayscale_image, depth_image, bounding_box, clnf_model, params);
|
||||
return DetectLandmarksInImage(grayscale_image, bounding_box, clnf_model, params);
|
||||
}
|
||||
}
|
||||
|
||||
// Versions not using depth images
|
||||
bool LandmarkDetector::DetectLandmarksInImage(const cv::Mat_<uchar> &grayscale_image, const cv::Rect_<double> bounding_box, CLNF& clnf_model, FaceModelParameters& params)
|
||||
{
|
||||
return DetectLandmarksInImage(grayscale_image, cv::Mat_<float>(), bounding_box, clnf_model, params);
|
||||
}
|
||||
|
||||
bool LandmarkDetector::DetectLandmarksInImage(const cv::Mat_<uchar> &grayscale_image, CLNF& clnf_model, FaceModelParameters& params)
|
||||
{
|
||||
return DetectLandmarksInImage(grayscale_image, cv::Mat_<float>(), clnf_model, params);
|
||||
}
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ CLNF::CLNF(const CLNF& other): pdm(other.pdm), params_local(other.params_local.c
|
|||
this->detection_certainty = other.detection_certainty;
|
||||
this->model_likelihood = other.model_likelihood;
|
||||
this->failures_in_a_row = other.failures_in_a_row;
|
||||
|
||||
|
||||
// Load the CascadeClassifier (as it does not have a proper copy constructor)
|
||||
if(!face_detector_location.empty())
|
||||
{
|
||||
|
@ -243,7 +243,6 @@ void CLNF::Read_CLNF(string clnf_location)
|
|||
string line;
|
||||
|
||||
vector<string> intensity_expert_locations;
|
||||
vector<string> depth_expert_locations;
|
||||
vector<string> ccnf_expert_locations;
|
||||
|
||||
// The other module locations should be defined as relative paths from the main model
|
||||
|
@ -308,10 +307,6 @@ void CLNF::Read_CLNF(string clnf_location)
|
|||
{
|
||||
intensity_expert_locations.push_back(location);
|
||||
}
|
||||
else if(module.compare("PatchesDepth") == 0)
|
||||
{
|
||||
depth_expert_locations.push_back(location);
|
||||
}
|
||||
else if(module.compare("PatchesCCNF") == 0)
|
||||
{
|
||||
ccnf_expert_locations.push_back(location);
|
||||
|
@ -319,7 +314,7 @@ void CLNF::Read_CLNF(string clnf_location)
|
|||
}
|
||||
|
||||
// Initialise the patch experts
|
||||
patch_experts.Read(intensity_expert_locations, depth_expert_locations, ccnf_expert_locations);
|
||||
patch_experts.Read(intensity_expert_locations, ccnf_expert_locations);
|
||||
|
||||
// Read in a face detector
|
||||
face_detector_HOG = dlib::get_frontal_face_detector();
|
||||
|
@ -564,11 +559,11 @@ void CLNF::Reset(double x, double y)
|
|||
}
|
||||
|
||||
// The main internal landmark detection call (should not be used externally?)
|
||||
bool CLNF::DetectLandmarks(const cv::Mat_<uchar> &image, const cv::Mat_<float> &depth, FaceModelParameters& params)
|
||||
bool CLNF::DetectLandmarks(const cv::Mat_<uchar> &image, FaceModelParameters& params)
|
||||
{
|
||||
|
||||
// Fits from the current estimate of local and global parameters in the model
|
||||
bool fit_success = Fit(image, depth, params.window_sizes_current, params);
|
||||
bool fit_success = Fit(image, params.window_sizes_current, params);
|
||||
|
||||
// Store the landmarks converged on in detected_landmarks
|
||||
pdm.CalcShape2D(detected_landmarks, params_local, params_global);
|
||||
|
@ -610,7 +605,7 @@ bool CLNF::DetectLandmarks(const cv::Mat_<uchar> &image, const cv::Mat_<float> &
|
|||
this->hierarchical_params[part_model].window_sizes_current = this->hierarchical_params[part_model].window_sizes_init;
|
||||
|
||||
// Do the actual landmark detection
|
||||
hierarchical_models[part_model].DetectLandmarks(image, depth, hierarchical_params[part_model]);
|
||||
hierarchical_models[part_model].DetectLandmarks(image, hierarchical_params[part_model]);
|
||||
|
||||
}
|
||||
else
|
||||
|
@ -675,7 +670,7 @@ bool CLNF::DetectLandmarks(const cv::Mat_<uchar> &image, const cv::Mat_<float> &
|
|||
}
|
||||
|
||||
//=============================================================================
|
||||
bool CLNF::Fit(const cv::Mat_<uchar>& im, const cv::Mat_<float>& depthImg, const std::vector<int>& window_sizes, const FaceModelParameters& parameters)
|
||||
bool CLNF::Fit(const cv::Mat_<uchar>& im, const std::vector<int>& window_sizes, const FaceModelParameters& parameters)
|
||||
{
|
||||
// Making sure it is a single channel image
|
||||
assert(im.channels() == 1);
|
||||
|
@ -684,21 +679,7 @@ bool CLNF::Fit(const cv::Mat_<uchar>& im, const cv::Mat_<float>& depthImg, const
|
|||
cv::Mat_<double> current_shape(2 * pdm.NumberOfPoints() , 1, 0.0);
|
||||
|
||||
int n = pdm.NumberOfPoints();
|
||||
|
||||
cv::Mat_<float> depth_img_no_background;
|
||||
|
||||
// Background elimination from the depth image
|
||||
if(!depthImg.empty())
|
||||
{
|
||||
bool success = RemoveBackground(depth_img_no_background, depthImg);
|
||||
|
||||
// The attempted background removal can fail leading to tracking failure
|
||||
if(!success)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int num_scales = patch_experts.patch_scaling.size();
|
||||
|
||||
// Storing the patch expert response maps
|
||||
|
@ -720,15 +701,7 @@ bool CLNF::Fit(const cv::Mat_<uchar>& im, const cv::Mat_<float>& depthImg, const
|
|||
continue;
|
||||
|
||||
// The patch expert response computation
|
||||
if(scale != window_sizes.size() - 1)
|
||||
{
|
||||
patch_experts.Response(patch_expert_responses, sim_ref_to_img, sim_img_to_ref, im, depth_img_no_background, pdm, params_global, params_local, window_size, scale);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Do not use depth for the final iteration as it is not as accurate
|
||||
patch_experts.Response(patch_expert_responses, sim_ref_to_img, sim_img_to_ref, im, cv::Mat(), pdm, params_global, params_local, window_size, scale);
|
||||
}
|
||||
patch_experts.Response(patch_expert_responses, sim_ref_to_img, sim_img_to_ref, im, pdm, params_global, params_local, window_size, scale);
|
||||
|
||||
if(parameters.refine_parameters == true)
|
||||
{
|
||||
|
@ -1113,95 +1086,6 @@ double CLNF::NU_RLMS(cv::Vec6d& final_global, cv::Mat_<double>& final_local, con
|
|||
|
||||
}
|
||||
|
||||
|
||||
bool CLNF::RemoveBackground(cv::Mat_<float>& out_depth_image, const cv::Mat_<float>& depth_image)
|
||||
{
|
||||
// use the current estimate of the face location to determine what is foreground and background
|
||||
double tx = this->params_global[4];
|
||||
double ty = this->params_global[5];
|
||||
|
||||
// if we are too close to the edge fail
|
||||
if(tx - 9 <= 0 || ty - 9 <= 0 || tx + 9 >= depth_image.cols || ty + 9 >= depth_image.rows)
|
||||
{
|
||||
cout << "Face estimate is too close to the edge, tracking failed" << endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
cv::Mat_<double> current_shape;
|
||||
|
||||
pdm.CalcShape2D(current_shape, params_local, params_global);
|
||||
|
||||
double min_x, max_x, min_y, max_y;
|
||||
|
||||
int n = this->pdm.NumberOfPoints();
|
||||
|
||||
cv::minMaxLoc(current_shape(cv::Range(0, n), cv::Range(0,1)), &min_x, &max_x);
|
||||
cv::minMaxLoc(current_shape(cv::Range(n, n*2), cv::Range(0,1)), &min_y, &max_y);
|
||||
|
||||
// the area of interest: size of face with some scaling ( these scalings are fairly ad-hoc)
|
||||
double width = 3 * (max_x - min_x);
|
||||
double height = 2.5 * (max_y - min_y);
|
||||
|
||||
// getting the region of interest from the depth image,
|
||||
// so we don't get other objects lying at same depth as head in the image but away from it
|
||||
cv::Rect_<int> roi((int)(tx-width/2), (int)(ty - height/2), (int)width, (int)height);
|
||||
|
||||
// clamp it if it does not lie fully in the image
|
||||
if(roi.x < 0) roi.x = 0;
|
||||
if(roi.y < 0) roi.y = 0;
|
||||
if(roi.width + roi.x >= depth_image.cols) roi.x = depth_image.cols - roi.width;
|
||||
if(roi.height + roi.y >= depth_image.rows) roi.y = depth_image.rows - roi.height;
|
||||
|
||||
if(width > depth_image.cols)
|
||||
{
|
||||
roi.x = 0; roi.width = depth_image.cols;
|
||||
}
|
||||
if(height > depth_image.rows)
|
||||
{
|
||||
roi.y = 0; roi.height = depth_image.rows;
|
||||
}
|
||||
|
||||
if(roi.width == 0) roi.width = depth_image.cols;
|
||||
if(roi.height == 0) roi.height = depth_image.rows;
|
||||
|
||||
if(roi.x >= depth_image.cols) roi.x = 0;
|
||||
if(roi.y >= depth_image.rows) roi.y = 0;
|
||||
|
||||
// Initialise the mask
|
||||
cv::Mat_<uchar> mask(depth_image.rows, depth_image.cols, (uchar)0);
|
||||
|
||||
cv::Mat_<uchar> valid_pixels = depth_image > 0;
|
||||
|
||||
// check if there is any depth near the estimate
|
||||
if(cv::sum(valid_pixels(cv::Rect((int)tx - 8, (int)ty - 8, 16, 16))/255)[0] > 0)
|
||||
{
|
||||
double Z = cv::mean(depth_image(cv::Rect((int)tx - 8, (int)ty - 8, 16, 16)), valid_pixels(cv::Rect((int)tx - 8, (int)ty - 8, 16, 16)))[0]; // Z offset from the surface of the face
|
||||
|
||||
// Only operate within region of interest of the depth image
|
||||
cv::Mat dRoi = depth_image(roi);
|
||||
|
||||
cv::Mat mRoi = mask(roi);
|
||||
|
||||
// Filter all pixels further than 20cm away from the current pose depth estimate
|
||||
cv::inRange(dRoi, Z - 200, Z + 200, mRoi);
|
||||
|
||||
// Convert to be either 0 or 1
|
||||
mask = mask / 255;
|
||||
|
||||
cv::Mat_<float> maskF;
|
||||
mask.convertTo(maskF, CV_32F);
|
||||
|
||||
//Filter the depth image
|
||||
out_depth_image = depth_image.mul(maskF);
|
||||
}
|
||||
else
|
||||
{
|
||||
cout << "No depth signal found in foreground, tracking failed" << endl;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Getting a 3D shape model from the current detected landmarks (in camera space)
|
||||
cv::Mat_<double> CLNF::GetShape(double fx, double fy, double cx, double cy) const
|
||||
{
|
||||
|
|
|
@ -95,8 +95,8 @@ void create_directories(string output_path)
|
|||
}
|
||||
}
|
||||
|
||||
// Extracting the following command line arguments -f, -fd, -op, -of, -ov (and possible ordered repetitions)
|
||||
void get_video_input_output_params(vector<string> &input_video_files, vector<string> &depth_dirs, vector<string> &output_files,
|
||||
// Extracting the following command line arguments -f, -op, -of, -ov (and possible ordered repetitions)
|
||||
void get_video_input_output_params(vector<string> &input_video_files, vector<string> &output_files,
|
||||
vector<string> &output_video_files, bool& world_coordinates_pose, string& output_codec, vector<string> &arguments)
|
||||
{
|
||||
bool* valid = new bool[arguments.size()];
|
||||
|
@ -149,13 +149,6 @@ void get_video_input_output_params(vector<string> &input_video_files, vector<str
|
|||
valid[i+1] = false;
|
||||
i++;
|
||||
}
|
||||
else if (arguments[i].compare("-fd") == 0)
|
||||
{
|
||||
depth_dirs.push_back(input_root + arguments[i + 1]);
|
||||
valid[i] = false;
|
||||
valid[i+1] = false;
|
||||
i++;
|
||||
}
|
||||
else if (arguments[i].compare("-of") == 0)
|
||||
{
|
||||
output_files.push_back(output_root + arguments[i + 1]);
|
||||
|
@ -251,7 +244,7 @@ void get_camera_params(int &device, float &fx, float &fy, float &cx, float &cy,
|
|||
}
|
||||
}
|
||||
|
||||
void get_image_input_output_params(vector<string> &input_image_files, vector<string> &input_depth_files, vector<string> &output_feature_files, vector<string> &output_pose_files, vector<string> &output_image_files,
|
||||
void get_image_input_output_params(vector<string> &input_image_files, vector<string> &output_feature_files, vector<string> &output_pose_files, vector<string> &output_image_files,
|
||||
vector<cv::Rect_<double>> &input_bounding_boxes, vector<string> &arguments)
|
||||
{
|
||||
bool* valid = new bool[arguments.size()];
|
||||
|
@ -294,13 +287,6 @@ void get_image_input_output_params(vector<string> &input_image_files, vector<str
|
|||
valid[i+1] = false;
|
||||
i++;
|
||||
}
|
||||
else if (arguments[i].compare("-fd") == 0)
|
||||
{
|
||||
input_depth_files.push_back(input_root + arguments[i + 1]);
|
||||
valid[i] = false;
|
||||
valid[i+1] = false;
|
||||
i++;
|
||||
}
|
||||
else if (arguments[i].compare("-fdir") == 0)
|
||||
{
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@
|
|||
using namespace LandmarkDetector;
|
||||
|
||||
// A copy constructor
|
||||
Patch_experts::Patch_experts(const Patch_experts& other) : patch_scaling(other.patch_scaling), centers(other.centers), svr_expert_intensity(other.svr_expert_intensity), svr_expert_depth(other.svr_expert_depth), ccnf_expert_intensity(other.ccnf_expert_intensity)
|
||||
Patch_experts::Patch_experts(const Patch_experts& other) : patch_scaling(other.patch_scaling), centers(other.centers), svr_expert_intensity(other.svr_expert_intensity), ccnf_expert_intensity(other.ccnf_expert_intensity)
|
||||
{
|
||||
|
||||
// Make sure the matrices are allocated properly
|
||||
|
@ -86,11 +86,11 @@ Patch_experts::Patch_experts(const Patch_experts& other) : patch_scaling(other.p
|
|||
}
|
||||
}
|
||||
|
||||
// Returns the patch expert responses given a grayscale and an optional depth image.
|
||||
// Returns the patch expert responses given a grayscale image.
|
||||
// Additionally returns the transform from the image coordinates to the response coordinates (and vice versa).
|
||||
// The computation also requires the current landmark locations to compute response around, the PDM corresponding to the desired model, and the parameters describing its instance
|
||||
// Also need to provide the size of the area of interest and the desired scale of analysis
|
||||
void Patch_experts::Response(vector<cv::Mat_<float> >& patch_expert_responses, cv::Matx22f& sim_ref_to_img, cv::Matx22d& sim_img_to_ref, const cv::Mat_<uchar>& grayscale_image, const cv::Mat_<float>& depth_image,
|
||||
void Patch_experts::Response(vector<cv::Mat_<float> >& patch_expert_responses, cv::Matx22f& sim_ref_to_img, cv::Matx22d& sim_img_to_ref, const cv::Mat_<uchar>& grayscale_image,
|
||||
const PDM& pdm, const cv::Vec6d& params_global, const cv::Mat_<double>& params_local, int window_size, int scale)
|
||||
{
|
||||
|
||||
|
@ -126,15 +126,6 @@ void Patch_experts::Response(vector<cv::Mat_<float> >& patch_expert_responses, c
|
|||
sim_ref_to_img(1,0) = (float)sim_ref_to_img_d(1,0);
|
||||
sim_ref_to_img(1,1) = (float)sim_ref_to_img_d(1,1);
|
||||
|
||||
// Indicates the legal pixels in a depth image, if available (used for CLM-Z area of interest (window) interpolation)
|
||||
cv::Mat_<uchar> mask;
|
||||
if(!depth_image.empty())
|
||||
{
|
||||
mask = depth_image > 0;
|
||||
mask = mask / 255;
|
||||
}
|
||||
|
||||
|
||||
bool use_ccnf = !this->ccnf_expert_intensity.empty();
|
||||
|
||||
// If using CCNF patch experts might need to precalculate Sigmas
|
||||
|
@ -222,53 +213,6 @@ void Patch_experts::Response(vector<cv::Mat_<float> >& patch_expert_responses, c
|
|||
svr_expert_intensity[scale][view_id][i].Response(area_of_interest, patch_expert_responses[i]);
|
||||
}
|
||||
|
||||
// if we have a corresponding depth patch and it is visible
|
||||
if(!svr_expert_depth.empty() && !depth_image.empty() && visibilities[scale][view_id].at<int>(i,0))
|
||||
{
|
||||
|
||||
cv::Mat_<float> dProb = patch_expert_responses[i].clone();
|
||||
cv::Mat_<float> depthWindow(area_of_interest_height, area_of_interest_width);
|
||||
|
||||
|
||||
CvMat dimg_o = depthWindow;
|
||||
cv::Mat maskWindow(area_of_interest_height, area_of_interest_width, CV_32F);
|
||||
CvMat mimg_o = maskWindow;
|
||||
|
||||
IplImage d_o = depth_image;
|
||||
IplImage m_o = mask;
|
||||
|
||||
cvGetQuadrangleSubPix(&d_o,&dimg_o,&sim_o);
|
||||
|
||||
cvGetQuadrangleSubPix(&m_o,&mimg_o,&sim_o);
|
||||
|
||||
depthWindow.setTo(0, maskWindow < 1);
|
||||
|
||||
svr_expert_depth[scale][view_id][i].ResponseDepth(depthWindow, dProb);
|
||||
|
||||
// Sum to one
|
||||
double sum = cv::sum(patch_expert_responses[i])[0];
|
||||
|
||||
// To avoid division by 0 issues
|
||||
if(sum == 0)
|
||||
{
|
||||
sum = 1;
|
||||
}
|
||||
|
||||
patch_expert_responses[i] /= sum;
|
||||
|
||||
// Sum to one
|
||||
sum = cv::sum(dProb)[0];
|
||||
// To avoid division by 0 issues
|
||||
if(sum == 0)
|
||||
{
|
||||
sum = 1;
|
||||
}
|
||||
|
||||
dProb /= sum;
|
||||
|
||||
patch_expert_responses[i] = patch_expert_responses[i] + dProb;
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -303,7 +247,7 @@ int Patch_experts::GetViewIdx(const cv::Vec6d& params_global, int scale) const
|
|||
|
||||
|
||||
//===========================================================================
|
||||
void Patch_experts::Read(vector<string> intensity_svr_expert_locations, vector<string> depth_svr_expert_locations, vector<string> intensity_ccnf_expert_locations)
|
||||
void Patch_experts::Read(vector<string> intensity_svr_expert_locations, vector<string> intensity_ccnf_expert_locations)
|
||||
{
|
||||
|
||||
// initialise the SVR intensity patch expert parameters
|
||||
|
@ -341,61 +285,6 @@ void Patch_experts::Read(vector<string> intensity_svr_expert_locations, vector<s
|
|||
Read_CCNF_patch_experts(location, centers[scale], visibilities[scale], ccnf_expert_intensity[scale], patch_scaling[scale]);
|
||||
}
|
||||
|
||||
|
||||
// initialise the SVR depth patch expert parameters
|
||||
int num_depth_scales = depth_svr_expert_locations.size();
|
||||
int num_intensity_scales = centers.size();
|
||||
|
||||
if(num_depth_scales > 0 && num_intensity_scales != num_depth_scales)
|
||||
{
|
||||
cout << "Intensity and depth patch experts have a different number of scales, can't read depth" << endl;
|
||||
return;
|
||||
}
|
||||
|
||||
// Have these to confirm that depth patch experts have the same number of views and scales and have the same visibilities
|
||||
vector<vector<cv::Vec3d> > centers_depth(num_depth_scales);
|
||||
vector<vector<cv::Mat_<int> > > visibilities_depth(num_depth_scales);
|
||||
vector<double> patch_scaling_depth(num_depth_scales);
|
||||
|
||||
svr_expert_depth.resize(num_depth_scales);
|
||||
|
||||
// Reading in SVR intensity patch experts for each scales it is defined in
|
||||
for(int scale = 0; scale < num_depth_scales; ++scale)
|
||||
{
|
||||
string location = depth_svr_expert_locations[scale];
|
||||
cout << "Reading the depth SVR patch experts from: " << location << "....";
|
||||
Read_SVR_patch_experts(location, centers_depth[scale], visibilities_depth[scale], svr_expert_depth[scale], patch_scaling_depth[scale]);
|
||||
|
||||
// Check if the scales are identical
|
||||
if(patch_scaling_depth[scale] != patch_scaling[scale])
|
||||
{
|
||||
cout << "Intensity and depth patch experts have a different scales, can't read depth" << endl;
|
||||
svr_expert_depth.clear();
|
||||
return;
|
||||
}
|
||||
|
||||
int num_views_intensity = centers[scale].size();
|
||||
int num_views_depth = centers_depth[scale].size();
|
||||
|
||||
// Check if the number of views is identical
|
||||
if(num_views_intensity != num_views_depth)
|
||||
{
|
||||
cout << "Intensity and depth patch experts have a different number of scales, can't read depth" << endl;
|
||||
svr_expert_depth.clear();
|
||||
return;
|
||||
}
|
||||
|
||||
for(int view = 0; view < num_views_depth; ++view)
|
||||
{
|
||||
if(cv::countNonZero(centers_depth[scale][view] != centers[scale][view]) || cv::countNonZero(visibilities[scale][view] != visibilities_depth[scale][view]))
|
||||
{
|
||||
cout << "Intensity and depth patch experts have different visibilities or centers" << endl;
|
||||
svr_expert_depth.clear();
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
//======================= Reading the SVR patch experts =========================================//
|
||||
void Patch_experts::Read_SVR_patch_experts(string expert_location, std::vector<cv::Vec3d>& centers, std::vector<cv::Mat_<int> >& visibility, std::vector<std::vector<Multi_SVR_patch_expert> >& patches, double& scale)
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
AU1 results - corr 0.825, rms 0.413, ccc - 0.803
|
||||
AU2 results - corr 0.765, rms 0.444, ccc - 0.659
|
||||
AU4 results - corr 0.863, rms 0.583, ccc - 0.838
|
||||
AU5 results - corr 0.749, rms 0.179, ccc - 0.717
|
||||
AU2 results - corr 0.758, rms 0.448, ccc - 0.652
|
||||
AU4 results - corr 0.874, rms 0.565, ccc - 0.848
|
||||
AU5 results - corr 0.747, rms 0.180, ccc - 0.717
|
||||
AU6 results - corr 0.702, rms 0.604, ccc - 0.657
|
||||
AU9 results - corr 0.742, rms 0.384, ccc - 0.689
|
||||
AU12 results - corr 0.865, rms 0.510, ccc - 0.850
|
||||
AU15 results - corr 0.747, rms 0.268, ccc - 0.714
|
||||
AU17 results - corr 0.646, rms 0.515, ccc - 0.578
|
||||
AU20 results - corr 0.637, rms 0.304, ccc - 0.595
|
||||
AU25 results - corr 0.926, rms 0.499, ccc - 0.920
|
||||
AU26 results - corr 0.805, rms 0.447, ccc - 0.764
|
||||
AU9 results - corr 0.740, rms 0.384, ccc - 0.688
|
||||
AU12 results - corr 0.864, rms 0.511, ccc - 0.850
|
||||
AU15 results - corr 0.744, rms 0.269, ccc - 0.712
|
||||
AU17 results - corr 0.641, rms 0.520, ccc - 0.572
|
||||
AU20 results - corr 0.619, rms 0.311, ccc - 0.581
|
||||
AU25 results - corr 0.926, rms 0.500, ccc - 0.920
|
||||
AU26 results - corr 0.803, rms 0.449, ccc - 0.763
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
AU1 class, Precision - 0.588, Recall - 0.708, F1 - 0.643
|
||||
AU1 class, Precision - 0.590, Recall - 0.715, F1 - 0.647
|
||||
AU2 class, Precision - 0.473, Recall - 0.749, F1 - 0.580
|
||||
AU4 class, Precision - 0.509, Recall - 0.745, F1 - 0.605
|
||||
AU6 class, Precision - 0.834, Recall - 0.667, F1 - 0.741
|
||||
AU7 class, Precision - 0.685, Recall - 0.792, F1 - 0.735
|
||||
AU7 class, Precision - 0.686, Recall - 0.792, F1 - 0.735
|
||||
AU10 class, Precision - 0.520, Recall - 0.737, F1 - 0.610
|
||||
AU12 class, Precision - 0.919, Recall - 0.654, F1 - 0.764
|
||||
AU15 class, Precision - 0.362, Recall - 0.634, F1 - 0.461
|
||||
AU17 class, Precision - 0.230, Recall - 0.279, F1 - 0.252
|
||||
AU12 class, Precision - 0.919, Recall - 0.657, F1 - 0.766
|
||||
AU15 class, Precision - 0.363, Recall - 0.638, F1 - 0.462
|
||||
AU17 class, Precision - 0.231, Recall - 0.280, F1 - 0.253
|
||||
AU25 class, Precision - 0.205, Recall - 0.871, F1 - 0.332
|
||||
AU26 class, Precision - 0.122, Recall - 0.974, F1 - 0.217
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
AU2 class, Precision - 0.369, Recall - 0.744, F1 - 0.493
|
||||
AU2 class, Precision - 0.360, Recall - 0.743, F1 - 0.485
|
||||
AU12 class, Precision - 0.427, Recall - 0.782, F1 - 0.553
|
||||
AU17 class, Precision - 0.126, Recall - 0.815, F1 - 0.219
|
||||
AU25 class, Precision - 0.344, Recall - 0.574, F1 - 0.430
|
||||
AU28 class, Precision - 0.486, Recall - 0.475, F1 - 0.481
|
||||
AU45 class, Precision - 0.289, Recall - 0.621, F1 - 0.394
|
||||
AU17 class, Precision - 0.114, Recall - 0.819, F1 - 0.201
|
||||
AU25 class, Precision - 0.337, Recall - 0.523, F1 - 0.410
|
||||
AU28 class, Precision - 0.443, Recall - 0.482, F1 - 0.462
|
||||
AU45 class, Precision - 0.293, Recall - 0.615, F1 - 0.397
|
||||
|
|
Binary file not shown.
|
@ -1,3 +1,3 @@
|
|||
Model, mean, median
|
||||
OpenFace (CLNF): 0.0562, 0.0515
|
||||
CLM: 0.0683, 0.0602
|
||||
OpenFace (CLNF): 0.0564, 0.0515
|
||||
CLM: 0.0631, 0.0587
|
||||
|
|
Binary file not shown.
Binary file not shown.
|
@ -25,7 +25,7 @@ run_AU_prediction_DISFA
|
|||
assert(mean(au_res) > 0.7);
|
||||
|
||||
run_AU_prediction_SEMAINE
|
||||
assert(mean(f1s) > 0.42);
|
||||
assert(mean(f1s) > 0.41);
|
||||
|
||||
run_AU_prediction_FERA2011
|
||||
assert(mean(au_res) > 0.5);
|
||||
|
|
Binary file not shown.
Binary file not shown.
|
@ -1,4 +1,4 @@
|
|||
Dataset and model, pitch, yaw, roll, mean, median
|
||||
biwi error: 7.955, 5.583, 4.402, 5.980, 2.624
|
||||
bu error: 2.762, 4.103, 2.568, 3.145, 2.118
|
||||
ict error: 3.620, 3.608, 3.626, 3.618, 2.028
|
||||
biwi error: 7.163, 5.314, 4.760, 5.746, 2.617
|
||||
bu error: 2.769, 4.105, 2.569, 3.147, 2.118
|
||||
ict error: 3.489, 3.632, 3.538, 3.553, 2.029
|
||||
|
|
|
@ -1,65 +0,0 @@
|
|||
clear;
|
||||
|
||||
%%
|
||||
% Run the BU test with CLM
|
||||
if(exist([getenv('USERPROFILE') '/Dropbox/AAM/test data/'], 'file'))
|
||||
database_root = [getenv('USERPROFILE') '/Dropbox/AAM/test data/'];
|
||||
else
|
||||
database_root = 'F:/Dropbox/Dropbox/AAM/test data/';
|
||||
end
|
||||
|
||||
buDir = [database_root, '/bu/uniform-light/'];
|
||||
|
||||
% The fast and accurate single light models
|
||||
%%
|
||||
v = 1;
|
||||
[fps_bu_general, resFolderBUCLM_general] = run_bu_experiment(buDir, false, v, 'model', 'model/main_clm_general.txt');
|
||||
[bu_error_clm_svr_general, ~, ~, all_errors_bu_svr_general] = calcBUerror(resFolderBUCLM_general, buDir);
|
||||
|
||||
%%
|
||||
% Run the CLM-Z and general Biwi test
|
||||
biwi_dir = '/biwi pose/';
|
||||
biwi_results_root = '/biwi pose results/';
|
||||
|
||||
% Intensity
|
||||
v = 1;
|
||||
[fps_biwi_clm, res_folder_clm_biwi] = run_biwi_experiment(database_root, biwi_dir, biwi_results_root, false, false, v, 'model', 'model/main_clm-z.txt');
|
||||
% Calculate the resulting errors
|
||||
[biwi_error_clm, ~, ~, ~, all_errors_biwi_clm] = calcBiwiError([database_root res_folder_clm_biwi], [database_root biwi_dir]);
|
||||
|
||||
% Intensity with depth
|
||||
v = 2;
|
||||
[fps_biwi_clmz, res_folder_clmz_biwi] = run_biwi_experiment(database_root, biwi_dir, biwi_results_root, false, true, v, 'model', 'model/main_clm-z.txt');
|
||||
% Calculate the resulting errors
|
||||
[biwi_error_clmz, ~, ~, ~, all_errors_biwi_clm_z] = calcBiwiError([database_root res_folder_clmz_biwi], [database_root biwi_dir]);
|
||||
|
||||
%% Run the CLM-Z and general ICT test
|
||||
ict_dir = ['ict/'];
|
||||
ict_results_root = ['ict results/'];
|
||||
|
||||
v = 1;
|
||||
% Intensity
|
||||
[fps_ict_clm, res_folder_ict_clm] = run_ict_experiment(database_root, ict_dir, ict_results_root, false, false, v, 'model', 'model/main_clm-z.txt');
|
||||
[ict_error_clm, ~, ~, ~, all_errors_ict_clm] = calcIctError([database_root res_folder_ict_clm], [database_root ict_dir]);
|
||||
|
||||
v = 2;
|
||||
% Intensity and depth
|
||||
[fps_ict_clmz, res_folder_ict_clmz] = run_ict_experiment(database_root, ict_dir, ict_results_root, false, true, v, 'model', 'model/main_clm-z.txt');
|
||||
[ict_error_clmz, ~, ~, ~, all_errors_ict_clm_z] = calcIctError([database_root res_folder_ict_clmz], [database_root ict_dir]);
|
||||
|
||||
%% Save the results
|
||||
v = 1;
|
||||
filename = 'results/Pose_clm';
|
||||
save(filename);
|
||||
%
|
||||
% Also save them in a reasonable .txt format for easy comparison
|
||||
f = fopen('results/Pose_clm.txt', 'w');
|
||||
fprintf(f, 'Dataset and model, pitch, yaw, roll, mean, median\n');
|
||||
fprintf(f, 'biwi error clm: %.3f, %.3f, %.3f, %.3f, %.3f\n', biwi_error_clm, mean(all_errors_biwi_clm(:)), median(all_errors_biwi_clm(:)));
|
||||
fprintf(f, 'biwi error clm-z: %.3f, %.3f, %.3f, %.3f, %.3f\n', biwi_error_clmz, mean(all_errors_biwi_clm_z(:)), median(all_errors_biwi_clm_z(:)));
|
||||
fprintf(f, 'bu error clm general: %.3f, %.3f, %.3f, %.3f, %.3f\n', bu_error_clm_svr_general, mean(all_errors_bu_svr_general(:)), median(all_errors_bu_svr_general(:)));
|
||||
fprintf(f, 'ict error clm: %.3f, %.3f, %.3f, %.3f, %.3f\n', ict_error_clm, mean(all_errors_ict_clm(:)), median(all_errors_ict_clm(:)));
|
||||
fprintf(f, 'ict error clm-z: %.3f, %.3f, %.3f, %.3f, %.3f\n', ict_error_clmz, mean(all_errors_ict_clm_z(:)), median(all_errors_ict_clm_z(:)));
|
||||
|
||||
fclose(f);
|
||||
clear 'f'
|
|
@ -27,8 +27,11 @@ record = true;
|
|||
clmParams.multi_modal_types = patches(1).multi_modal_types;
|
||||
|
||||
% load the face validator and add its dependency
|
||||
load('../face_validation/trained/face_check_cnn_68.mat', 'face_check_cnns');
|
||||
load('../face_validation/trained/faceCheckers.mat', 'faceCheckers');
|
||||
addpath(genpath('../face_validation'));
|
||||
od = cd('../face_validation/');
|
||||
setup;
|
||||
cd(od);
|
||||
|
||||
%%
|
||||
for v=1:numel(vids)
|
||||
|
@ -126,7 +129,7 @@ for v=1:numel(vids)
|
|||
% detection
|
||||
shape_new = GetShapeOrtho(pdm.M, pdm.V, params, g_param_n);
|
||||
|
||||
dec = face_check_cnn(image, shape_new, g_param, face_check_cnns);
|
||||
dec = face_check_cnn(image, shape_new, g_param, faceCheckers);
|
||||
|
||||
if(dec < 0.5)
|
||||
det = true;
|
||||
|
@ -153,7 +156,7 @@ for v=1:numel(vids)
|
|||
all_local_params(i,:) = l_param;
|
||||
all_global_params(i,:) = g_param;
|
||||
|
||||
dec = face_check_cnn(image, shape, g_param, face_check_cnns);
|
||||
dec = face_check_cnn(image, shape, g_param, faceCheckers);
|
||||
|
||||
if(dec < 0.5)
|
||||
clmParams.window_size = [19,19; 17,17;];
|
||||
|
|
|
@ -1,221 +0,0 @@
|
|||
function [images, detections, labels] = Collect_wild_imgs(root_test_data)
|
||||
|
||||
use_afw = true;
|
||||
use_lfpw = true;
|
||||
use_helen = true;
|
||||
use_ibug = true;
|
||||
|
||||
use_68 = true;
|
||||
|
||||
images = [];
|
||||
labels = [];
|
||||
detections = [];
|
||||
|
||||
if(use_afw)
|
||||
[img, det, lbl] = Collect_AFW(root_test_data, use_68);
|
||||
images = cat(1, images, img');
|
||||
detections = cat(1, detections, det);
|
||||
labels = cat(1, labels, lbl);
|
||||
end
|
||||
|
||||
if(use_lfpw)
|
||||
[img, det, lbl] = Collect_LFPW(root_test_data, use_68);
|
||||
images = cat(1, images, img');
|
||||
detections = cat(1, detections, det);
|
||||
labels = cat(1, labels, lbl);
|
||||
end
|
||||
|
||||
if(use_ibug)
|
||||
[img, det, lbl] = Collect_ibug(root_test_data, use_68);
|
||||
images = cat(1, images, img');
|
||||
detections = cat(1, detections, det);
|
||||
labels = cat(1, labels, lbl);
|
||||
end
|
||||
|
||||
if(use_helen)
|
||||
[img, det, lbl] = Collect_helen(root_test_data, use_68);
|
||||
images = cat(1, images, img');
|
||||
detections = cat(1, detections, det);
|
||||
labels = cat(1, labels, lbl);
|
||||
end
|
||||
|
||||
% convert to format expected by the Fitting method
|
||||
detections(:,3) = detections(:,1) + detections(:,3);
|
||||
detections(:,4) = detections(:,2) + detections(:,4);
|
||||
|
||||
end
|
||||
|
||||
|
||||
function [images, detections, labels] = Collect_AFW(root_test_data, use_68)
|
||||
|
||||
dataset_loc = [root_test_data, '/AFW/'];
|
||||
|
||||
landmarkLabels = dir([dataset_loc '\*.pts']);
|
||||
|
||||
num_imgs = size(landmarkLabels,1);
|
||||
|
||||
images = struct;
|
||||
if(use_68)
|
||||
labels = zeros(num_imgs, 68, 2);
|
||||
else
|
||||
labels = zeros(num_imgs, 66, 2);
|
||||
end
|
||||
|
||||
detections = zeros(num_imgs, 4);
|
||||
|
||||
load([root_test_data, '/Bounding Boxes/bounding_boxes_afw.mat']);
|
||||
|
||||
for imgs = 1:num_imgs
|
||||
|
||||
[~,name,~] = fileparts(landmarkLabels(imgs).name);
|
||||
|
||||
landmarks = importdata([dataset_loc, landmarkLabels(imgs).name], ' ', 3);
|
||||
landmarks = landmarks.data;
|
||||
if(~use_68)
|
||||
inds_frontal = [1:60,62:64,66:68];
|
||||
landmarks = landmarks(inds_frontal,:);
|
||||
end
|
||||
|
||||
images(imgs).img = [dataset_loc, name '.jpg'];
|
||||
labels(imgs,:,:) = landmarks;
|
||||
|
||||
detections(imgs,:) = bounding_boxes{imgs}.bb_detector;
|
||||
|
||||
end
|
||||
|
||||
detections(:,3) = detections(:,3) - detections(:,1);
|
||||
detections(:,4) = detections(:,4) - detections(:,2);
|
||||
|
||||
end
|
||||
|
||||
function [images, detections, labels] = Collect_LFPW(root_test_data, use_68)
|
||||
|
||||
dataset_loc = [root_test_data, '/lfpw/testset/'];
|
||||
|
||||
landmarkLabels = dir([dataset_loc '\*.pts']);
|
||||
|
||||
num_imgs = size(landmarkLabels,1);
|
||||
|
||||
images = struct;
|
||||
if(use_68)
|
||||
labels = zeros(num_imgs, 68, 2);
|
||||
else
|
||||
labels = zeros(num_imgs, 66, 2);
|
||||
end
|
||||
|
||||
detections = zeros(num_imgs, 4);
|
||||
|
||||
load([root_test_data, '/Bounding Boxes/bounding_boxes_lfpw_testset.mat']);
|
||||
|
||||
for imgs = 1:num_imgs
|
||||
|
||||
[~,name,~] = fileparts(landmarkLabels(imgs).name);
|
||||
|
||||
landmarks = importdata([dataset_loc, landmarkLabels(imgs).name], ' ', 3);
|
||||
landmarks = landmarks.data;
|
||||
if(~use_68)
|
||||
inds_frontal = [1:60,62:64,66:68];
|
||||
landmarks = landmarks(inds_frontal,:);
|
||||
end
|
||||
|
||||
images(imgs).img = [dataset_loc, name '.png'];
|
||||
|
||||
labels(imgs,:,:) = landmarks;
|
||||
|
||||
detections(imgs,:) = bounding_boxes{imgs}.bb_detector;
|
||||
|
||||
end
|
||||
|
||||
detections(:,3) = detections(:,3) - detections(:,1);
|
||||
detections(:,4) = detections(:,4) - detections(:,2);
|
||||
|
||||
|
||||
end
|
||||
|
||||
function [images, detections, labels] = Collect_ibug(root_test_data, use_68)
|
||||
|
||||
dataset_loc = [root_test_data, '/ibug/'];
|
||||
|
||||
landmarkLabels = dir([dataset_loc '\*.pts']);
|
||||
|
||||
num_imgs = size(landmarkLabels,1);
|
||||
|
||||
images = struct;
|
||||
|
||||
if(use_68)
|
||||
labels = zeros(num_imgs, 68, 2);
|
||||
else
|
||||
labels = zeros(num_imgs, 66, 2);
|
||||
end
|
||||
|
||||
detections = zeros(num_imgs, 4);
|
||||
|
||||
load([root_test_data, '/Bounding Boxes/bounding_boxes_ibug.mat']);
|
||||
|
||||
for imgs = 1:num_imgs
|
||||
|
||||
[~,name,~] = fileparts(landmarkLabels(imgs).name);
|
||||
|
||||
landmarks = importdata([dataset_loc, landmarkLabels(imgs).name], ' ', 3);
|
||||
landmarks = landmarks.data;
|
||||
if(~use_68)
|
||||
inds_frontal = [1:60,62:64,66:68];
|
||||
landmarks = landmarks(inds_frontal,:);
|
||||
end
|
||||
|
||||
images(imgs).img = [dataset_loc, name '.jpg'];
|
||||
|
||||
labels(imgs,:,:) = landmarks;
|
||||
|
||||
detections(imgs,:) = bounding_boxes{imgs}.bb_detector;
|
||||
|
||||
end
|
||||
|
||||
detections(:,3) = detections(:,3) - detections(:,1);
|
||||
detections(:,4) = detections(:,4) - detections(:,2);
|
||||
|
||||
end
|
||||
|
||||
function [images, detections, labels] = Collect_helen(root_test_data, use_68)
|
||||
|
||||
dataset_loc = [root_test_data, '/helen/testset/'];
|
||||
|
||||
landmarkLabels = dir([dataset_loc '\*.pts']);
|
||||
|
||||
num_imgs = size(landmarkLabels,1);
|
||||
|
||||
images = struct;
|
||||
|
||||
if(use_68)
|
||||
labels = zeros(num_imgs, 68, 2);
|
||||
else
|
||||
labels = zeros(num_imgs, 66, 2);
|
||||
end
|
||||
|
||||
detections = zeros(num_imgs, 4);
|
||||
|
||||
load([root_test_data, '/Bounding Boxes/bounding_boxes_helen_testset.mat']);
|
||||
|
||||
for imgs = 1:num_imgs
|
||||
|
||||
[~,name,~] = fileparts(landmarkLabels(imgs).name);
|
||||
|
||||
landmarks = importdata([dataset_loc, landmarkLabels(imgs).name], ' ', 3);
|
||||
landmarks = landmarks.data;
|
||||
if(~use_68)
|
||||
inds_frontal = [1:60,62:64,66:68];
|
||||
landmarks = landmarks(inds_frontal,:);
|
||||
end
|
||||
|
||||
images(imgs).img = [dataset_loc, name '.jpg'];
|
||||
|
||||
labels(imgs,:,:) = landmarks;
|
||||
|
||||
detections(imgs,:) = bounding_boxes{imgs}.bb_detector;
|
||||
|
||||
end
|
||||
|
||||
detections(:,3) = detections(:,3) - detections(:,1);
|
||||
detections(:,4) = detections(:,4) - detections(:,2);
|
||||
|
||||
end
|
|
@ -1,337 +0,0 @@
|
|||
function Create_data_66()
|
||||
|
||||
load '../models/pdm/pdm_66_multi_pie';
|
||||
load '../models/tri_66.mat';
|
||||
|
||||
% This script uses the same format used for patch expert training, and
|
||||
% expects the data to be there
|
||||
dataset_loc = '../../../CCNF experiments/clnf/patch training/data_preparation/prepared_data/';
|
||||
addpath('../PDM_helpers/');
|
||||
|
||||
scale = '0.5';
|
||||
prefix= 'combined_';
|
||||
|
||||
% Find the available positive training data
|
||||
data_files = dir(sprintf('%s/%s%s*.mat', dataset_loc, prefix, scale));
|
||||
centres_all = [];
|
||||
for i=1:numel(data_files)
|
||||
|
||||
% Load the orientation of the training data
|
||||
load([dataset_loc, '/', data_files(i).name], 'centres');
|
||||
centres_all = cat(1, centres_all, centres);
|
||||
|
||||
end
|
||||
|
||||
label_inds = [1:60,62:64,66:68];
|
||||
|
||||
% Construct mirror indices (which views need to be flipped to create other
|
||||
% profile training data)
|
||||
mirror_inds = zeros(size(centres_all,1), 1);
|
||||
for i=1:numel(data_files)
|
||||
|
||||
% mirrored image has inverse yaw
|
||||
mirrored_centre = centres_all(i,:);
|
||||
mirrored_centre(2) = -mirrored_centre(2);
|
||||
|
||||
% if mirrored version has same orientation, do not need mirroring
|
||||
if(~isequal(mirrored_centre, centres_all(i,:)))
|
||||
|
||||
centres_all = cat(1, centres_all, mirrored_centre);
|
||||
mirror_inds = cat(1, mirror_inds, i);
|
||||
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
outputLocation = './prep_data/';
|
||||
|
||||
num_more_neg = 10;
|
||||
|
||||
% Make sure same data generated all the time
|
||||
rng(0);
|
||||
|
||||
neg_image_loc = './neg/';
|
||||
|
||||
neg_images = cat(1,dir([neg_image_loc, '/*.jpg']),dir([neg_image_loc, '/*.png']));
|
||||
|
||||
max_img_used = 1500;
|
||||
|
||||
%% do it separately for centers due to memory limitations
|
||||
for r=1:size(centres_all,1)
|
||||
|
||||
a_mod = 0.3;
|
||||
|
||||
mirror = false;
|
||||
|
||||
if(mirror_inds(r) ~= 0 )
|
||||
mirror = true;
|
||||
label_mirror_inds = [1,17;2,16;3,15;4,14;5,13;6,12;7,11;8,10;18,27;19,26;20,25;21,24;22,23;...
|
||||
32,36;33,35;37,46;38,45;39,44;40,43;41,48;42,47;49,55;50,54;51,53;60,56;59,57;...
|
||||
61,63;66,64];
|
||||
load([dataset_loc, '/', data_files(mirror_inds(r)).name]);
|
||||
else
|
||||
load([dataset_loc, '/', data_files(r).name]);
|
||||
end
|
||||
|
||||
% Convert to 66 point model
|
||||
landmark_locations = landmark_locations(:,label_inds,:);
|
||||
|
||||
visiCurrent = logical(visiIndex);
|
||||
|
||||
% Flip the orientation and indices for mirror data
|
||||
if(mirror)
|
||||
centres = [centres(1), -centres(2), -centres(3)];
|
||||
tmp1 = visiCurrent(label_mirror_inds(:,1));
|
||||
tmp2 = visiCurrent(label_mirror_inds(:,2));
|
||||
visiCurrent(label_mirror_inds(:,2)) = tmp1;
|
||||
visiCurrent(label_mirror_inds(:,1)) = tmp2;
|
||||
end
|
||||
|
||||
visibleVerts = 1:numel(visiCurrent);
|
||||
visibleVerts = visibleVerts(visiCurrent)-1;
|
||||
|
||||
% Correct the triangulation to take into account the vertex
|
||||
% visibilities
|
||||
triangulation = [];
|
||||
|
||||
shape = a_mod * Euler2Rot(centres * pi/180) * reshape(M, numel(M)/3, 3)';
|
||||
shape = shape';
|
||||
|
||||
for i=1:size(T,1)
|
||||
visib = 0;
|
||||
for j=1:numel(visibleVerts)
|
||||
if(T(i,1)==visibleVerts(j))
|
||||
visib = visib+1;
|
||||
end
|
||||
if(T(i,2)==visibleVerts(j))
|
||||
visib = visib+1;
|
||||
end
|
||||
if(T(i,3)==visibleVerts(j))
|
||||
visib = visib+1;
|
||||
end
|
||||
end
|
||||
|
||||
% Only if all three of the vertices are visible
|
||||
if(visib == 3)
|
||||
|
||||
% Also want to remove triangles facing the wrong way (self occluded)
|
||||
v1 = [shape(T(i,1)+1,1), shape(T(i,1)+1,2), shape(T(i,1)+1,3)];
|
||||
v2 = [shape(T(i,2)+1,1), shape(T(i,2)+1,2), shape(T(i,2)+1,3)];
|
||||
v3 = [shape(T(i,3)+1,1), shape(T(i,3)+1,2), shape(T(i,3)+1,3)];
|
||||
normal = cross((v2-v1), v3 - v2);
|
||||
normal = normal / norm(normal);
|
||||
direction = normal * [0,0,1]';
|
||||
|
||||
% And only if the triangle is facing the camera
|
||||
if(direction > 0)
|
||||
triangulation = cat(1, triangulation, T(i,:));
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
% Initialise the warp
|
||||
[ alphas, betas, triX, mask, minX, minY, nPix ] = InitialisePieceWiseAffine(triangulation, shape);
|
||||
|
||||
imgs_to_use = randperm(size(landmark_locations, 1));
|
||||
|
||||
if(size(landmark_locations, 1) > max_img_used)
|
||||
imgs_to_use = imgs_to_use(1:max_img_used);
|
||||
end
|
||||
|
||||
% Extracting relevant filenames
|
||||
examples = zeros(numel(imgs_to_use) * (num_more_neg+1), nPix);
|
||||
errors = zeros(numel(imgs_to_use) * (num_more_neg+1), 1);
|
||||
|
||||
unused_pos = 0;
|
||||
|
||||
curr_filled = 0;
|
||||
|
||||
for j=imgs_to_use
|
||||
|
||||
labels = squeeze(landmark_locations(j,:,:));
|
||||
|
||||
img = squeeze(all_images(j,:,:));
|
||||
|
||||
if(mirror)
|
||||
img = fliplr(img);
|
||||
imgSize = size(img);
|
||||
flippedLbls = labels;
|
||||
flippedLbls(:,1) = imgSize(1) - flippedLbls(:,1);
|
||||
tmp1 = flippedLbls(label_mirror_inds(:,1),:);
|
||||
tmp2 = flippedLbls(label_mirror_inds(:,2),:);
|
||||
flippedLbls(label_mirror_inds(:,2),:) = tmp1;
|
||||
flippedLbls(label_mirror_inds(:,1),:) = tmp2;
|
||||
labels = flippedLbls;
|
||||
end
|
||||
|
||||
% If for some reason some of the labels are not visible in the
|
||||
% current sample skip this label
|
||||
non_existent_labels = labels(:,1)==0 | labels(:,2)==0;
|
||||
non_existent_inds = find(non_existent_labels)-1;
|
||||
if(numel(intersect(triangulation(:), non_existent_inds)) > 0)
|
||||
unused_pos = unused_pos + 1;
|
||||
continue;
|
||||
end
|
||||
|
||||
curr_filled = curr_filled + 1;
|
||||
[features] = ExtractFaceFeatures(img, labels, triangulation, triX, mask, alphas, betas, nPix, minX, minY);
|
||||
examples(curr_filled,:) = features;
|
||||
|
||||
% Extract the correct PDM parameters for the model (we will perturb
|
||||
% them for some negative examples)
|
||||
[ a_orig, R_orig, trans_orig, ~, params_orig] = fit_PDM_ortho_proj_to_2D(M, E, V, labels);
|
||||
eul_orig = Rot2Euler(R_orig);
|
||||
|
||||
% a slightly perturbed example, too tight
|
||||
% from 0.3 to 0.9
|
||||
a_mod = a_orig * (0.6 + (randi(7) - 4)*0.1);
|
||||
p_global = [a_mod; eul_orig'; trans_orig];
|
||||
|
||||
labels_mod = GetShapeOrtho(M, V, params_orig, p_global);
|
||||
labels_mod = labels_mod(:,1:2);
|
||||
|
||||
[features] = ExtractFaceFeatures(img, labels_mod, triangulation, triX, mask, alphas, betas, nPix, minX, minY);
|
||||
|
||||
curr_filled = curr_filled + 1;
|
||||
examples(curr_filled,:) = features;
|
||||
|
||||
% Compute the badness of fit
|
||||
error = norm(labels_mod(:) - labels(:)) / (max(labels(:,2))-min(labels(:,2)));
|
||||
errors(curr_filled,:) = error;
|
||||
|
||||
% a slightly perturbed example, too broad
|
||||
% from 1.2 to 0.6
|
||||
a_mod = a_orig * (1.4 + (randi(5) - 3)*0.1);
|
||||
p_global = [a_mod; eul_orig'; trans_orig];
|
||||
|
||||
labels_mod = GetShapeOrtho(M, V, params_orig, p_global);
|
||||
labels_mod = labels_mod(:,1:2);
|
||||
|
||||
[features] = ExtractFaceFeatures(img, labels_mod, triangulation, triX, mask, alphas, betas, nPix, minX, minY);
|
||||
|
||||
curr_filled = curr_filled + 1;
|
||||
examples(curr_filled,:) = features;
|
||||
|
||||
error = norm(labels_mod(:) - labels(:)) / (max(labels(:,2))-min(labels(:,2)));
|
||||
errors(curr_filled,:) = error;
|
||||
|
||||
% A somewhat offset example
|
||||
|
||||
trans_mod = trans_orig + randn(2,1) * 10;
|
||||
p_global = [a_orig; eul_orig'; trans_mod];
|
||||
|
||||
labels_mod = GetShapeOrtho(M, V, params_orig, p_global);
|
||||
labels_mod = labels_mod(:,1:2);
|
||||
|
||||
[features] = ExtractFaceFeatures(img, labels_mod, triangulation, triX, mask, alphas, betas, nPix, minX, minY);
|
||||
|
||||
curr_filled = curr_filled + 1;
|
||||
examples(curr_filled,:) = features;
|
||||
|
||||
error = norm(labels_mod(:) - labels(:)) / (max(labels(:,2))-min(labels(:,2)));
|
||||
errors(curr_filled,:) = error;
|
||||
|
||||
% A rotated sample
|
||||
eul_mod = eul_orig + randn(1,3)*0.2;
|
||||
p_global = [a_orig; eul_mod'; trans_orig];
|
||||
|
||||
labels_mod = GetShapeOrtho(M, V, params_orig, p_global);
|
||||
labels_mod = labels_mod(:,1:2);
|
||||
|
||||
[features] = ExtractFaceFeatures(img, labels_mod, triangulation, triX, mask, alphas, betas, nPix, minX, minY);
|
||||
|
||||
curr_filled = curr_filled + 1;
|
||||
examples(curr_filled,:) = features;
|
||||
|
||||
error = norm(labels_mod(:) - labels(:)) / (max(labels(:,2))-min(labels(:,2)));
|
||||
errors(curr_filled,:) = error;
|
||||
|
||||
% A sample with modified shape parameters
|
||||
p_global = [a_orig; eul_orig'; trans_orig];
|
||||
params_mod = params_orig + randn(size(params_orig)).*sqrt(E);
|
||||
labels_mod = GetShapeOrtho(M, V, params_mod, p_global);
|
||||
labels_mod = labels_mod(:,1:2);
|
||||
|
||||
[features] = ExtractFaceFeatures(img, labels_mod, triangulation, triX, mask, alphas, betas, nPix, minX, minY);
|
||||
|
||||
curr_filled = curr_filled + 1;
|
||||
examples(curr_filled,:) = features;
|
||||
|
||||
error = norm(labels_mod(:) - labels(:)) / (max(labels(:,2))-min(labels(:,2)));
|
||||
errors(curr_filled,:) = error;
|
||||
|
||||
% pick a random image from negative inriaperson dataset, use original location if
|
||||
% first, otherwhise resize it to fit
|
||||
for n=6:num_more_neg
|
||||
n_img = randi(numel(neg_images));
|
||||
|
||||
neg_image = imread([neg_image_loc, neg_images(n_img).name]);
|
||||
|
||||
if(size(neg_image,3) == 3)
|
||||
neg_image = rgb2gray(neg_image);
|
||||
end
|
||||
|
||||
[h_neg, w_neg] = size(neg_image);
|
||||
|
||||
% if the current labels fit just use them, if not, then resize
|
||||
% to fit
|
||||
max_x = max(labels(:,1));
|
||||
max_y = max(labels(:,2));
|
||||
|
||||
if(max_x > w_neg || max_y > h_neg)
|
||||
neg_image = imresize(neg_image, [max_y, max_x]);
|
||||
end
|
||||
|
||||
[features] = ExtractFaceFeatures(neg_image, labels, triangulation, triX, mask, alphas, betas, nPix, minX, minY);
|
||||
|
||||
curr_filled = curr_filled + 1;
|
||||
examples(curr_filled,:) = features;
|
||||
|
||||
% Set high error to 3
|
||||
errors(curr_filled,:) = 3;
|
||||
end
|
||||
|
||||
|
||||
if(mod(curr_filled, 10) == 0)
|
||||
fprintf('%d/%d done\n', curr_filled/(num_more_neg+1), numel(imgs_to_use));
|
||||
end
|
||||
% add the pos example to the background
|
||||
|
||||
end
|
||||
|
||||
examples = examples(1:curr_filled,:);
|
||||
errors = errors(1:curr_filled);
|
||||
|
||||
% svm training
|
||||
filename = sprintf('%s/face_checker_general_training_66_%d.mat', outputLocation, r);
|
||||
save(filename, 'examples', 'errors', 'alphas', 'betas', 'triangulation', 'minX', 'minY', 'nPix', 'shape', 'triX', 'mask', 'centres');
|
||||
|
||||
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
function [features] = ExtractFaceFeatures(img, labels, triangulation, triX, mask, alphas, betas, nPix, minX, minY)
|
||||
|
||||
% Make sure labels are within range
|
||||
[hRes, wRes] = size(img);
|
||||
labels(labels(:,1) < 1,1) = 1;
|
||||
labels(labels(:,2) < 1,2) = 1;
|
||||
|
||||
labels(labels(:,1) > wRes-1,1) = wRes-1;
|
||||
labels(labels(:,2) > hRes-1,2) = hRes-1;
|
||||
|
||||
crop_img = Crop(img, labels, triangulation, triX, mask, alphas, betas, nPix, minX, minY);
|
||||
crop_img(isnan(crop_img)) = 0;
|
||||
|
||||
% vectorised version
|
||||
features = reshape(crop_img(logical(mask)), 1, nPix);
|
||||
|
||||
% normalisations
|
||||
features = (features - mean(features));
|
||||
norms = std(features);
|
||||
if(norms==0)
|
||||
norms = 1;
|
||||
end
|
||||
features = features / norms;
|
||||
end
|
|
@ -1,334 +0,0 @@
|
|||
function Create_data_68()
|
||||
|
||||
load '../models/pdm/pdm_68_multi_pie';
|
||||
load '../models/tri_68.mat';
|
||||
|
||||
% This script uses the same format used for patch expert training, and
|
||||
% expects the data to be there
|
||||
dataset_loc = '../../../CCNF experiments/clnf/patch_training/data_preparation/prepared_data/';
|
||||
addpath('../PDM_helpers/');
|
||||
|
||||
scale = '0.5';
|
||||
prefix= 'combined_';
|
||||
|
||||
% Find the available positive training data
|
||||
data_files = dir(sprintf('%s/%s%s*.mat', dataset_loc, prefix, scale));
|
||||
centres_all = [];
|
||||
for i=1:numel(data_files)
|
||||
|
||||
% Load the orientation of the training data
|
||||
load([dataset_loc, '/', data_files(i).name], 'centres');
|
||||
centres_all = cat(1, centres_all, centres);
|
||||
|
||||
end
|
||||
|
||||
% Construct mirror indices (which views need to be flipped to create other
|
||||
% profile training data)
|
||||
mirror_inds = zeros(size(centres_all,1), 1);
|
||||
for i=1:numel(data_files)
|
||||
|
||||
% mirrored image has inverse yaw
|
||||
mirrored_centre = centres_all(i,:);
|
||||
mirrored_centre(2) = -mirrored_centre(2);
|
||||
|
||||
% if mirrored version has same orientation, do not need mirroring
|
||||
if(~isequal(mirrored_centre, centres_all(i,:)))
|
||||
|
||||
centres_all = cat(1, centres_all, mirrored_centre);
|
||||
mirror_inds = cat(1, mirror_inds, i);
|
||||
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
outputLocation = './prep_data/';
|
||||
|
||||
num_more_neg = 10;
|
||||
|
||||
% Make sure same data generated all the time
|
||||
rng(0);
|
||||
|
||||
neg_image_loc = './neg/';
|
||||
|
||||
neg_images = cat(1,dir([neg_image_loc, '/*.jpg']),dir([neg_image_loc, '/*.png']));
|
||||
|
||||
max_img_used = 1500;
|
||||
|
||||
% do it separately for centers due to memory limitations
|
||||
for r=1:size(centres_all,1)
|
||||
|
||||
a_mod = 0.3;
|
||||
|
||||
mirror = false;
|
||||
|
||||
if(mirror_inds(r) ~= 0 )
|
||||
mirror = true;
|
||||
label_mirror_inds = [1,17;2,16;3,15;4,14;5,13;6,12;7,11;8,10;18,27;19,26;20,25;21,24;22,23;...
|
||||
32,36;33,35;37,46;38,45;39,44;40,43;41,48;42,47;49,55;50,54;51,53;60,56;59,57;...
|
||||
61,65;62,64;68,66];
|
||||
load([dataset_loc, '/', data_files(mirror_inds(r)).name]);
|
||||
else
|
||||
load([dataset_loc, '/', data_files(r).name]);
|
||||
end
|
||||
|
||||
visiCurrent = logical(visiIndex);
|
||||
|
||||
if(mirror)
|
||||
centres = [centres(1), -centres(2), -centres(3)];
|
||||
tmp1 = visiCurrent(label_mirror_inds(:,1));
|
||||
tmp2 = visiCurrent(label_mirror_inds(:,2));
|
||||
visiCurrent(label_mirror_inds(:,2)) = tmp1;
|
||||
visiCurrent(label_mirror_inds(:,1)) = tmp2;
|
||||
end
|
||||
|
||||
visibleVerts = 1:numel(visiCurrent);
|
||||
visibleVerts = visibleVerts(visiCurrent)-1;
|
||||
|
||||
% Correct the triangulation to take into account the vertex
|
||||
% visibilities
|
||||
triangulation = [];
|
||||
|
||||
shape = a_mod * Euler2Rot(centres * pi/180) * reshape(M, numel(M)/3, 3)';
|
||||
shape = shape';
|
||||
|
||||
for i=1:size(T,1)
|
||||
visib = 0;
|
||||
for j=1:numel(visibleVerts)
|
||||
if(T(i,1)==visibleVerts(j))
|
||||
visib = visib+1;
|
||||
end
|
||||
if(T(i,2)==visibleVerts(j))
|
||||
visib = visib+1;
|
||||
end
|
||||
if(T(i,3)==visibleVerts(j))
|
||||
visib = visib+1;
|
||||
end
|
||||
end
|
||||
|
||||
% Only if all three of the vertices are visible
|
||||
if(visib == 3)
|
||||
|
||||
% Also want to remove triangles facing the wrong way (self occluded)
|
||||
v1 = [shape(T(i,1)+1,1), shape(T(i,1)+1,2), shape(T(i,1)+1,3)];
|
||||
v2 = [shape(T(i,2)+1,1), shape(T(i,2)+1,2), shape(T(i,2)+1,3)];
|
||||
v3 = [shape(T(i,3)+1,1), shape(T(i,3)+1,2), shape(T(i,3)+1,3)];
|
||||
normal = cross((v2-v1), v3 - v2);
|
||||
normal = normal / norm(normal);
|
||||
direction = normal * [0,0,1]';
|
||||
|
||||
% And only if the triangle is facing the camera
|
||||
if(direction > 0)
|
||||
triangulation = cat(1, triangulation, T(i,:));
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
% Initialise the warp
|
||||
[ alphas, betas, triX, mask, minX, minY, nPix ] = InitialisePieceWiseAffine(triangulation, shape);
|
||||
|
||||
mask = logical(mask);
|
||||
|
||||
imgs_to_use = randperm(size(landmark_locations, 1));
|
||||
|
||||
if(size(landmark_locations, 1) > max_img_used)
|
||||
imgs_to_use = imgs_to_use(1:max_img_used);
|
||||
end
|
||||
|
||||
% Extracting relevant filenames
|
||||
examples = zeros(numel(imgs_to_use) * (num_more_neg+1), nPix);
|
||||
errors = zeros(numel(imgs_to_use) * (num_more_neg+1), 1);
|
||||
|
||||
unused_pos = 0;
|
||||
|
||||
curr_filled = 0;
|
||||
|
||||
for j=imgs_to_use
|
||||
|
||||
labels = squeeze(landmark_locations(j,:,:));
|
||||
|
||||
img = squeeze(all_images(j,:,:));
|
||||
|
||||
if(mirror)
|
||||
img = fliplr(img);
|
||||
imgSize = size(img);
|
||||
flippedLbls = labels;
|
||||
flippedLbls(:,1) = imgSize(1) - flippedLbls(:,1);
|
||||
tmp1 = flippedLbls(label_mirror_inds(:,1),:);
|
||||
tmp2 = flippedLbls(label_mirror_inds(:,2),:);
|
||||
flippedLbls(label_mirror_inds(:,2),:) = tmp1;
|
||||
flippedLbls(label_mirror_inds(:,1),:) = tmp2;
|
||||
labels = flippedLbls;
|
||||
end
|
||||
|
||||
% If for some reason some of the labels are not visible in the
|
||||
% current sample skip this label
|
||||
non_existent_labels = labels(:,1)==0 | labels(:,2)==0;
|
||||
non_existent_inds = find(non_existent_labels)-1;
|
||||
if(numel(intersect(triangulation(:), non_existent_inds)) > 0)
|
||||
unused_pos = unused_pos + 1;
|
||||
continue;
|
||||
end
|
||||
|
||||
curr_filled = curr_filled + 1;
|
||||
[features] = ExtractFaceFeatures(img, labels, triangulation, triX, mask, alphas, betas, nPix, minX, minY);
|
||||
examples(curr_filled,:) = features;
|
||||
|
||||
% Extract the correct PDM parameters for the model (we will perturb
|
||||
% them for some negative examples)
|
||||
[ a_orig, R_orig, trans_orig, ~, params_orig] = fit_PDM_ortho_proj_to_2D(M, E, V, labels);
|
||||
eul_orig = Rot2Euler(R_orig);
|
||||
|
||||
% a slightly perturbed example, too tight
|
||||
% from 0.3 to 0.9
|
||||
a_mod = a_orig * (0.6 + (randi(7) - 4)*0.1);
|
||||
p_global = [a_mod; eul_orig'; trans_orig];
|
||||
|
||||
labels_mod = GetShapeOrtho(M, V, params_orig, p_global);
|
||||
labels_mod = labels_mod(:,1:2);
|
||||
|
||||
[features] = ExtractFaceFeatures(img, labels_mod, triangulation, triX, mask, alphas, betas, nPix, minX, minY);
|
||||
|
||||
curr_filled = curr_filled + 1;
|
||||
examples(curr_filled,:) = features;
|
||||
|
||||
% Compute the badness of fit
|
||||
error = norm(labels_mod(:) - labels(:)) / (max(labels(:,2))-min(labels(:,2)));
|
||||
errors(curr_filled,:) = error;
|
||||
|
||||
% a slightly perturbed example, too broad
|
||||
% from 1.2 to 0.6
|
||||
a_mod = a_orig * (1.4 + (randi(5) - 3)*0.1);
|
||||
p_global = [a_mod; eul_orig'; trans_orig];
|
||||
|
||||
labels_mod = GetShapeOrtho(M, V, params_orig, p_global);
|
||||
labels_mod = labels_mod(:,1:2);
|
||||
|
||||
[features] = ExtractFaceFeatures(img, labels_mod, triangulation, triX, mask, alphas, betas, nPix, minX, minY);
|
||||
|
||||
curr_filled = curr_filled + 1;
|
||||
examples(curr_filled,:) = features;
|
||||
|
||||
error = norm(labels_mod(:) - labels(:)) / (max(labels(:,2))-min(labels(:,2)));
|
||||
errors(curr_filled,:) = error;
|
||||
|
||||
% A somewhat offset example
|
||||
|
||||
trans_mod = trans_orig + randn(2,1) * 10;
|
||||
p_global = [a_orig; eul_orig'; trans_mod];
|
||||
|
||||
labels_mod = GetShapeOrtho(M, V, params_orig, p_global);
|
||||
labels_mod = labels_mod(:,1:2);
|
||||
|
||||
[features] = ExtractFaceFeatures(img, labels_mod, triangulation, triX, mask, alphas, betas, nPix, minX, minY);
|
||||
|
||||
curr_filled = curr_filled + 1;
|
||||
examples(curr_filled,:) = features;
|
||||
|
||||
error = norm(labels_mod(:) - labels(:)) / (max(labels(:,2))-min(labels(:,2)));
|
||||
errors(curr_filled,:) = error;
|
||||
|
||||
% A rotated sample
|
||||
eul_mod = eul_orig + randn(1,3)*0.2;
|
||||
p_global = [a_orig; eul_mod'; trans_orig];
|
||||
|
||||
labels_mod = GetShapeOrtho(M, V, params_orig, p_global);
|
||||
labels_mod = labels_mod(:,1:2);
|
||||
|
||||
[features] = ExtractFaceFeatures(img, labels_mod, triangulation, triX, mask, alphas, betas, nPix, minX, minY);
|
||||
|
||||
curr_filled = curr_filled + 1;
|
||||
examples(curr_filled,:) = features;
|
||||
|
||||
error = norm(labels_mod(:) - labels(:)) / (max(labels(:,2))-min(labels(:,2)));
|
||||
errors(curr_filled,:) = error;
|
||||
|
||||
% A sample with modified shape parameters
|
||||
p_global = [a_orig; eul_orig'; trans_orig];
|
||||
params_mod = params_orig + randn(size(params_orig)).*sqrt(E);
|
||||
labels_mod = GetShapeOrtho(M, V, params_mod, p_global);
|
||||
labels_mod = labels_mod(:,1:2);
|
||||
|
||||
[features] = ExtractFaceFeatures(img, labels_mod, triangulation, triX, mask, alphas, betas, nPix, minX, minY);
|
||||
|
||||
curr_filled = curr_filled + 1;
|
||||
examples(curr_filled,:) = features;
|
||||
|
||||
error = norm(labels_mod(:) - labels(:)) / (max(labels(:,2))-min(labels(:,2)));
|
||||
errors(curr_filled,:) = error;
|
||||
|
||||
% pick a random image from negative inriaperson dataset, use original location if
|
||||
% first, otherwhise resize it to fit
|
||||
for n=6:num_more_neg
|
||||
n_img = randi(numel(neg_images));
|
||||
|
||||
neg_image = imread([neg_image_loc, neg_images(n_img).name]);
|
||||
|
||||
if(size(neg_image,3) == 3)
|
||||
neg_image = rgb2gray(neg_image);
|
||||
end
|
||||
|
||||
[h_neg, w_neg] = size(neg_image);
|
||||
|
||||
% if the current labels fit just use them, if not, then resize
|
||||
% to fit
|
||||
max_x = max(labels(:,1));
|
||||
max_y = max(labels(:,2));
|
||||
|
||||
if(max_x > w_neg || max_y > h_neg)
|
||||
neg_image = imresize(neg_image, [max_y, max_x]);
|
||||
end
|
||||
|
||||
[features] = ExtractFaceFeatures(neg_image, labels, triangulation, triX, mask, alphas, betas, nPix, minX, minY);
|
||||
|
||||
curr_filled = curr_filled + 1;
|
||||
examples(curr_filled,:) = features;
|
||||
|
||||
% Set high error to 3
|
||||
errors(curr_filled,:) = 3;
|
||||
end
|
||||
|
||||
|
||||
if(mod(curr_filled, 10) == 0)
|
||||
fprintf('%d/%d done\n', curr_filled/(num_more_neg+1), numel(imgs_to_use));
|
||||
end
|
||||
% add the pos example to the background
|
||||
|
||||
end
|
||||
|
||||
examples = examples(1:curr_filled,:);
|
||||
errors = errors(1:curr_filled);
|
||||
|
||||
% svm training
|
||||
filename = sprintf('%s/face_checker_general_training_68_%d.mat', outputLocation, r);
|
||||
save(filename, 'examples', 'errors', 'alphas', 'betas', 'triangulation', 'minX', 'minY', 'nPix', 'shape', 'triX', 'mask', 'centres');
|
||||
|
||||
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
function [features] = ExtractFaceFeatures(img, labels, triangulation, triX, mask, alphas, betas, nPix, minX, minY)
|
||||
|
||||
% Make sure labels are within range
|
||||
[hRes, wRes] = size(img);
|
||||
labels(labels(:,1) < 1,1) = 1;
|
||||
labels(labels(:,2) < 1,2) = 1;
|
||||
|
||||
labels(labels(:,1) > wRes-1,1) = wRes-1;
|
||||
labels(labels(:,2) > hRes-1,2) = hRes-1;
|
||||
|
||||
crop_img = Crop(img, labels, triangulation, triX, mask, alphas, betas, nPix, minX, minY);
|
||||
crop_img(isnan(crop_img)) = 0;
|
||||
|
||||
% vectorised version
|
||||
features = reshape(crop_img(logical(mask)), 1, nPix);
|
||||
|
||||
% normalisations
|
||||
features = (features - mean(features));
|
||||
norms = std(features);
|
||||
if(norms==0)
|
||||
norms = 1;
|
||||
end
|
||||
features = features / norms;
|
||||
|
||||
end
|
|
@ -1,6 +1,6 @@
|
|||
function Create_data_68_large()
|
||||
function Create_data_test()
|
||||
|
||||
load '../models/pdm/pdm_68_aligned_wild';
|
||||
load '../models/pdm/pdm_68_aligned_menpo';
|
||||
load '../models/tri_68.mat';
|
||||
|
||||
% This script uses the same format used for patch expert training, and
|
||||
|
@ -8,14 +8,19 @@ load '../models/tri_68.mat';
|
|||
% https://github.com/TadasBaltrusaitis/CCNF)
|
||||
|
||||
% Replace with your location of training data
|
||||
dataset_loc = 'C:/Users/Tadas/Documents/CCNF/patch_experts/data_preparation/prepared_data/';
|
||||
dataset_loc = 'C:\Users\tbaltrus\Documents\CCNF\patch_experts\data_preparation/prepared_data/';
|
||||
addpath('../PDM_helpers/');
|
||||
addpath('./paw_helpers/');
|
||||
|
||||
% Collect Menpo, Multi-PIE and 300W data for training the validator
|
||||
scale = '0.5';
|
||||
prefix= 'combined_';
|
||||
prefix_menpo= 'menpo_valid_';
|
||||
prefix_mpie_300W = 'combined_';
|
||||
|
||||
% Find the available positive training data
|
||||
data_files = dir(sprintf('%s/%s%s*.mat', dataset_loc, prefix, scale));
|
||||
data_files = dir(sprintf('%s/%s%s*.mat', dataset_loc, prefix_menpo, scale));
|
||||
data_files_c = dir(sprintf('%s/%s%s*.mat', dataset_loc, prefix_mpie_300W, scale));
|
||||
|
||||
centres_all = [];
|
||||
for i=1:numel(data_files)
|
||||
|
||||
|
@ -25,9 +30,6 @@ for i=1:numel(data_files)
|
|||
|
||||
end
|
||||
|
||||
% Do not use extreme pose
|
||||
centres_all = centres_all(1:3,:);
|
||||
|
||||
% Construct mirror indices (which views need to be flipped to create other
|
||||
% profile training data)
|
||||
mirror_inds = zeros(size(centres_all,1), 1);
|
||||
|
@ -48,14 +50,14 @@ for i=1:numel(data_files)
|
|||
end
|
||||
|
||||
% Replace with your location of training data
|
||||
outputLocation = 'E:/datasets/detection_validation/prep_data/';
|
||||
outputLocation = 'D:\Datasets/detection_validation/prep_data/';
|
||||
|
||||
num_more_neg = 10;
|
||||
|
||||
% Make sure same data generated all the time
|
||||
rng(0);
|
||||
|
||||
neg_image_loc = 'E:/datasets/detection_validation/neg/';
|
||||
neg_image_loc = 'D:\Datasets\INRIAPerson\INRIAPerson\Train\neg/';
|
||||
|
||||
neg_images = cat(1,dir([neg_image_loc, '/*.jpg']),dir([neg_image_loc, '/*.png']));
|
||||
|
||||
|
@ -72,10 +74,21 @@ for r=1:size(centres_all,1)
|
|||
mirror = true;
|
||||
label_mirror_inds = [1,17;2,16;3,15;4,14;5,13;6,12;7,11;8,10;18,27;19,26;20,25;21,24;22,23;...
|
||||
32,36;33,35;37,46;38,45;39,44;40,43;41,48;42,47;49,55;50,54;51,53;60,56;59,57;...
|
||||
61,65;62,64;68,66];
|
||||
61,65;62,64;68,66];
|
||||
|
||||
% Make sure we take the subset of visibilities from all the
|
||||
% datasets
|
||||
load([dataset_loc, '/', data_files_c(mirror_inds(r)).name], 'visiIndex');
|
||||
visiIndex_t = visiIndex;
|
||||
|
||||
load([dataset_loc, '/', data_files(mirror_inds(r)).name]);
|
||||
visiIndex = visiIndex_t & visiIndex;
|
||||
else
|
||||
load([dataset_loc, '/', data_files_c(r).name], 'visiIndex');
|
||||
visiIndex_t = visiIndex;
|
||||
|
||||
load([dataset_loc, '/', data_files(r).name]);
|
||||
visiIndex = visiIndex_t & visiIndex;
|
||||
end
|
||||
|
||||
visiCurrent = logical(visiIndex);
|
||||
|
@ -312,8 +325,7 @@ for r=1:size(centres_all,1)
|
|||
examples = examples(1:curr_filled,:);
|
||||
errors = errors(1:curr_filled);
|
||||
|
||||
% svm training
|
||||
filename = sprintf('%s/face_checker_general_training_large_68_%d.mat', outputLocation, r);
|
||||
filename = sprintf('%s/face_validator_test_%d.mat', outputLocation, r);
|
||||
save(filename, 'examples', 'errors', 'alphas', 'betas', 'triangulation', 'minX', 'minY', 'nPix', 'shape', 'triX', 'mask', 'centres');
|
||||
|
||||
|
|
@ -1,22 +1,26 @@
|
|||
function Create_data_66_large()
|
||||
function Create_data_train()
|
||||
|
||||
load '../models/pdm/pdm_66_multi_pie';
|
||||
load '../models/tri_66.mat';
|
||||
load '../models/pdm/pdm_68_aligned_menpo';
|
||||
load '../models/tri_68.mat';
|
||||
|
||||
% This script uses the same format used for patch expert training, and
|
||||
% expects the data to be there (this can be found in
|
||||
% https://github.com/TadasBaltrusaitis/CCNF)
|
||||
|
||||
% Replace with your location of training data
|
||||
dataset_loc = 'C:/Users/Tadas/Documents/CCNF/patch_experts/data_preparation/prepared_data/';
|
||||
|
||||
dataset_loc = 'C:\Users\tbaltrus\Documents\CCNF\patch_experts\data_preparation/prepared_data/';
|
||||
addpath('../PDM_helpers/');
|
||||
addpath('./paw_helpers/');
|
||||
|
||||
% Collect Menpo, Multi-PIE and 300W data for training the validator
|
||||
scale = '0.5';
|
||||
prefix= 'combined_';
|
||||
prefix_menpo= 'menpo_train_';
|
||||
prefix_mpie_300W = 'combined_';
|
||||
|
||||
% Find the available positive training data
|
||||
data_files = dir(sprintf('%s/%s%s*.mat', dataset_loc, prefix, scale));
|
||||
data_files = dir(sprintf('%s/%s%s*.mat', dataset_loc, prefix_menpo, scale));
|
||||
data_files_c = dir(sprintf('%s/%s%s*.mat', dataset_loc, prefix_mpie_300W, scale));
|
||||
|
||||
centres_all = [];
|
||||
for i=1:numel(data_files)
|
||||
|
||||
|
@ -26,8 +30,6 @@ for i=1:numel(data_files)
|
|||
|
||||
end
|
||||
|
||||
label_inds = [1:60,62:64,66:68];
|
||||
|
||||
% Construct mirror indices (which views need to be flipped to create other
|
||||
% profile training data)
|
||||
mirror_inds = zeros(size(centres_all,1), 1);
|
||||
|
@ -47,18 +49,20 @@ for i=1:numel(data_files)
|
|||
|
||||
end
|
||||
|
||||
outputLocation = 'F:/datasets/detection_validation/prep_data/';
|
||||
% Replace with your location of training data
|
||||
outputLocation = 'D:\Datasets/detection_validation/prep_data/';
|
||||
|
||||
num_more_neg = 10;
|
||||
|
||||
% Make sure same data generated all the time
|
||||
rng(0);
|
||||
|
||||
neg_image_loc = 'F:/datasets/detection_validation/neg/';
|
||||
% Negative samples from teh INRIAPerson dataset
|
||||
neg_image_loc = 'D:\Datasets\INRIAPerson\INRIAPerson\Train\neg/';
|
||||
|
||||
neg_images = cat(1,dir([neg_image_loc, '/*.jpg']),dir([neg_image_loc, '/*.png']));
|
||||
|
||||
max_img_used = 2500;
|
||||
max_img_used = 8000;
|
||||
|
||||
% do it separately for centers due to memory limitations
|
||||
for r=1:size(centres_all,1)
|
||||
|
@ -71,15 +75,31 @@ for r=1:size(centres_all,1)
|
|||
mirror = true;
|
||||
label_mirror_inds = [1,17;2,16;3,15;4,14;5,13;6,12;7,11;8,10;18,27;19,26;20,25;21,24;22,23;...
|
||||
32,36;33,35;37,46;38,45;39,44;40,43;41,48;42,47;49,55;50,54;51,53;60,56;59,57;...
|
||||
61,63;66,64];
|
||||
61,65;62,64;68,66];
|
||||
load([dataset_loc, '/', data_files_c(mirror_inds(r)).name]);
|
||||
all_images_t = all_images;
|
||||
landmark_locations_t = landmark_locations;
|
||||
visiIndex_t = visiIndex;
|
||||
|
||||
load([dataset_loc, '/', data_files(mirror_inds(r)).name]);
|
||||
|
||||
% Combining Menpo + MPIE + 300W
|
||||
all_images = cat(1, all_images, all_images_t);
|
||||
landmark_locations = cat(1, landmark_locations, landmark_locations_t);
|
||||
% Taking a subset of visibilities from all the datasets
|
||||
visiIndex = visiIndex_t & visiIndex;
|
||||
else
|
||||
load([dataset_loc, '/', data_files_c(r).name]);
|
||||
all_images_t = all_images;
|
||||
landmark_locations_t = landmark_locations;
|
||||
visiIndex_t = visiIndex;
|
||||
|
||||
load([dataset_loc, '/', data_files(r).name]);
|
||||
all_images = cat(1, all_images, all_images_t);
|
||||
landmark_locations = cat(1, landmark_locations, landmark_locations_t);
|
||||
visiIndex = visiIndex_t & visiIndex;
|
||||
end
|
||||
|
||||
% Convert to 66 point model
|
||||
landmark_locations = landmark_locations(:,label_inds,:);
|
||||
|
||||
visiCurrent = logical(visiIndex);
|
||||
|
||||
if(mirror)
|
||||
|
@ -161,7 +181,7 @@ for r=1:size(centres_all,1)
|
|||
img = fliplr(img);
|
||||
imgSize = size(img);
|
||||
flippedLbls = labels;
|
||||
flippedLbls(:,1) = imgSize(1) - flippedLbls(:,1);
|
||||
flippedLbls(:,1) = imgSize(1) - flippedLbls(:,1) + 1;
|
||||
tmp1 = flippedLbls(label_mirror_inds(:,1),:);
|
||||
tmp2 = flippedLbls(label_mirror_inds(:,2),:);
|
||||
flippedLbls(label_mirror_inds(:,2),:) = tmp1;
|
||||
|
@ -178,8 +198,12 @@ for r=1:size(centres_all,1)
|
|||
continue;
|
||||
end
|
||||
|
||||
% Centering the pixel so that 0,0 is center of the top left pixel
|
||||
labels = labels - 1;
|
||||
|
||||
curr_filled = curr_filled + 1;
|
||||
[features] = ExtractFaceFeatures(img, labels, triangulation, triX, mask, alphas, betas, nPix, minX, minY);
|
||||
% sample_img = zeros(size(mask));sample_img(mask) = features;imagesc(sample_img)
|
||||
examples(curr_filled,:) = features;
|
||||
errors(curr_filled,:) = 0;
|
||||
|
||||
|
@ -197,6 +221,7 @@ for r=1:size(centres_all,1)
|
|||
labels_mod = labels_mod(:,1:2);
|
||||
|
||||
[features] = ExtractFaceFeatures(img, labels_mod, triangulation, triX, mask, alphas, betas, nPix, minX, minY);
|
||||
% sample_img = zeros(size(mask));sample_img(mask) = features;imagesc(sample_img)
|
||||
|
||||
curr_filled = curr_filled + 1;
|
||||
examples(curr_filled,:) = features;
|
||||
|
@ -214,7 +239,8 @@ for r=1:size(centres_all,1)
|
|||
labels_mod = labels_mod(:,1:2);
|
||||
|
||||
[features] = ExtractFaceFeatures(img, labels_mod, triangulation, triX, mask, alphas, betas, nPix, minX, minY);
|
||||
|
||||
% sample_img = zeros(size(mask));sample_img(mask) = features;imagesc(sample_img)
|
||||
|
||||
curr_filled = curr_filled + 1;
|
||||
examples(curr_filled,:) = features;
|
||||
|
||||
|
@ -223,7 +249,7 @@ for r=1:size(centres_all,1)
|
|||
|
||||
% A somewhat offset example
|
||||
|
||||
trans_mod = trans_orig + randn(2,1) * 10;
|
||||
trans_mod = trans_orig + randn(2,1) * 20;
|
||||
p_global = [a_orig; eul_orig'; trans_mod];
|
||||
|
||||
labels_mod = GetShapeOrtho(M, V, params_orig, p_global);
|
||||
|
@ -238,7 +264,7 @@ for r=1:size(centres_all,1)
|
|||
errors(curr_filled,:) = error;
|
||||
|
||||
% A rotated sample
|
||||
eul_mod = eul_orig + randn(1,3)*0.2;
|
||||
eul_mod = eul_orig + randn(1,3)*0.3;
|
||||
p_global = [a_orig; eul_mod'; trans_orig];
|
||||
|
||||
labels_mod = GetShapeOrtho(M, V, params_orig, p_global);
|
||||
|
@ -308,8 +334,7 @@ for r=1:size(centres_all,1)
|
|||
examples = examples(1:curr_filled,:);
|
||||
errors = errors(1:curr_filled);
|
||||
|
||||
% svm training
|
||||
filename = sprintf('%s/face_checker_general_training_large_66_%d.mat', outputLocation, r);
|
||||
filename = sprintf('%s/face_validator_train_%d.mat', outputLocation, r);
|
||||
save(filename, 'examples', 'errors', 'alphas', 'betas', 'triangulation', 'minX', 'minY', 'nPix', 'shape', 'triX', 'mask', 'centres');
|
||||
|
||||
|
||||
|
@ -321,8 +346,8 @@ function [features] = ExtractFaceFeatures(img, labels, triangulation, triX, mask
|
|||
|
||||
% Make sure labels are within range
|
||||
[hRes, wRes] = size(img);
|
||||
labels(labels(:,1) < 1,1) = 1;
|
||||
labels(labels(:,2) < 1,2) = 1;
|
||||
labels(labels(:,1) < 0,1) = 0;
|
||||
labels(labels(:,2) < 0,2) = 0;
|
||||
|
||||
labels(labels(:,1) > wRes-1,1) = wRes-1;
|
||||
labels(labels(:,2) > hRes-1,2) = hRes-1;
|
|
@ -1,10 +0,0 @@
|
|||
before_script:
|
||||
- sudo apt-add-repository ppa:octave/stable --yes
|
||||
- sudo apt-get update -y
|
||||
- sudo apt-get install octave -y
|
||||
- sudo apt-get install liboctave-dev -y
|
||||
script:
|
||||
- sh -c "octave tests/runalltests.m"
|
||||
|
||||
notifications:
|
||||
email: false
|
|
@ -1,29 +0,0 @@
|
|||
function cae = caeapplygrads(cae)
|
||||
cae.sv = 0;
|
||||
for j = 1 : numel(cae.a)
|
||||
for i = 1 : numel(cae.i)
|
||||
% cae.vik{i}{j} = cae.momentum * cae.vik{i}{j} + cae.alpha ./ (cae.sigma + cae.ddik{i}{j}) .* cae.dik{i}{j};
|
||||
% cae.vok{i}{j} = cae.momentum * cae.vok{i}{j} + cae.alpha ./ (cae.sigma + cae.ddok{i}{j}) .* cae.dok{i}{j};
|
||||
cae.vik{i}{j} = cae.alpha * cae.dik{i}{j};
|
||||
cae.vok{i}{j} = cae.alpha * cae.dok{i}{j};
|
||||
cae.sv = cae.sv + sum(cae.vik{i}{j}(:) .^ 2);
|
||||
cae.sv = cae.sv + sum(cae.vok{i}{j}(:) .^ 2);
|
||||
|
||||
cae.ik{i}{j} = cae.ik{i}{j} - cae.vik{i}{j};
|
||||
cae.ok{i}{j} = cae.ok{i}{j} - cae.vok{i}{j};
|
||||
end
|
||||
% cae.vb{j} = cae.momentum * cae.vb{j} + cae.alpha / (cae.sigma + cae.ddb{j}) * cae.db{j};
|
||||
cae.vb{j} = cae.alpha * cae.db{j};
|
||||
cae.sv = cae.sv + sum(cae.vb{j} .^ 2);
|
||||
|
||||
cae.b{j} = cae.b{j} - cae.vb{j};
|
||||
end
|
||||
|
||||
for i = 1 : numel(cae.o)
|
||||
% cae.vc{i} = cae.momentum * cae.vc{i} + cae.alpha / (cae.sigma + cae.ddc{i}) * cae.dc{i};
|
||||
cae.vc{i} = cae.alpha * cae.dc{i};
|
||||
cae.sv = cae.sv + sum(cae.vc{i} .^ 2);
|
||||
|
||||
cae.c{i} = cae.c{i} - cae.vc{i};
|
||||
end
|
||||
end
|
|
@ -1,29 +0,0 @@
|
|||
function cae = caebbp(cae)
|
||||
|
||||
%% backprop deltas
|
||||
for i = 1 : numel(cae.o)
|
||||
% output delta delta
|
||||
cae.odd{i} = (cae.o{i} .* (1 - cae.o{i}) .* cae.edgemask) .^ 2;
|
||||
% delta delta c
|
||||
cae.ddc{i} = sum(cae.odd{i}(:)) / size(cae.odd{i}, 1);
|
||||
end
|
||||
|
||||
for j = 1 : numel(cae.a) % calc activation delta deltas
|
||||
z = 0;
|
||||
for i = 1 : numel(cae.o)
|
||||
z = z + convn(cae.odd{i}, flipall(cae.ok{i}{j} .^ 2), 'full');
|
||||
end
|
||||
cae.add{j} = (cae.a{j} .* (1 - cae.a{j})) .^ 2 .* z;
|
||||
end
|
||||
|
||||
%% calc params delta deltas
|
||||
ns = size(cae.odd{1}, 1);
|
||||
for j = 1 : numel(cae.a)
|
||||
cae.ddb{j} = sum(cae.add{j}(:)) / ns;
|
||||
for i = 1 : numel(cae.o)
|
||||
cae.ddok{i}{j} = convn(flipall(cae.a{j} .^ 2), cae.odd{i}, 'valid') / ns;
|
||||
cae.ddik{i}{j} = convn(cae.add{j}, flipall(cae.i{i} .^ 2), 'valid') / ns;
|
||||
end
|
||||
end
|
||||
|
||||
end
|
|
@ -1,34 +0,0 @@
|
|||
function cae = caebp(cae, y)
|
||||
|
||||
%% backprop deltas
|
||||
cae.L = 0;
|
||||
for i = 1 : numel(cae.o)
|
||||
% error
|
||||
cae.e{i} = (cae.o{i} - y{i}) .* cae.edgemask;
|
||||
% loss function
|
||||
cae.L = cae.L + 1/2 * sum(cae.e{i}(:) .^2 ) / size(cae.e{i}, 1);
|
||||
% output delta
|
||||
cae.od{i} = cae.e{i} .* (cae.o{i} .* (1 - cae.o{i}));
|
||||
|
||||
cae.dc{i} = sum(cae.od{i}(:)) / size(cae.e{i}, 1);
|
||||
end
|
||||
|
||||
for j = 1 : numel(cae.a) % calc activation deltas
|
||||
z = 0;
|
||||
for i = 1 : numel(cae.o)
|
||||
z = z + convn(cae.od{i}, flipall(cae.ok{i}{j}), 'full');
|
||||
end
|
||||
cae.ad{j} = cae.a{j} .* (1 - cae.a{j}) .* z;
|
||||
end
|
||||
|
||||
%% calc gradients
|
||||
ns = size(cae.e{1}, 1);
|
||||
for j = 1 : numel(cae.a)
|
||||
cae.db{j} = sum(cae.ad{j}(:)) / ns;
|
||||
for i = 1 : numel(cae.o)
|
||||
cae.dok{i}{j} = convn(flipall(cae.a{j}), cae.od{i}, 'valid') / ns;
|
||||
cae.dik{i}{j} = convn(cae.ad{j}, flipall(cae.i{i}), 'valid') / ns;
|
||||
end
|
||||
end
|
||||
|
||||
end
|
|
@ -1,13 +0,0 @@
|
|||
function cae = caedown(cae)
|
||||
pa = cae.a;
|
||||
pok = cae.ok;
|
||||
|
||||
for i = 1 : numel(cae.o)
|
||||
z = 0;
|
||||
for j = 1 : numel(cae.a)
|
||||
z = z + convn(pa{j}, pok{i}{j}, 'valid');
|
||||
end
|
||||
cae.o{i} = sigm(z + cae.c{i});
|
||||
|
||||
end
|
||||
end
|
|
@ -1,32 +0,0 @@
|
|||
%% mnist data
|
||||
clear all; close all; clc;
|
||||
load mnist_uint8;
|
||||
x = cell(100, 1);
|
||||
N = 600;
|
||||
for i = 1 : 100
|
||||
x{i}{1} = reshape(train_x(((i - 1) * N + 1) : (i) * N, :), N, 28, 28) * 255;
|
||||
end
|
||||
%% ex 1
|
||||
scae = {
|
||||
struct('outputmaps', 10, 'inputkernel', [1 5 5], 'outputkernel', [1 5 5], 'scale', [1 2 2], 'sigma', 0.1, 'momentum', 0.9, 'noise', 0)
|
||||
};
|
||||
|
||||
opts.rounds = 1000;
|
||||
opts.batchsize = 1;
|
||||
opts.alpha = 0.01;
|
||||
opts.ddinterval = 10;
|
||||
opts.ddhist = 0.5;
|
||||
scae = scaesetup(scae, x, opts);
|
||||
scae = scaetrain(scae, x, opts);
|
||||
cae = scae{1};
|
||||
|
||||
%Visualize the average reconstruction error
|
||||
plot(cae.rL);
|
||||
|
||||
%Visualize the output kernels
|
||||
ff=[];
|
||||
for i=1:numel(cae.ok{1});
|
||||
mm = cae.ok{1}{i}(1,:,:);
|
||||
ff(i,:) = mm(:);
|
||||
end;
|
||||
figure;visualize(ff')
|
|
@ -1,107 +0,0 @@
|
|||
function cae = caenumgradcheck(cae, x, y)
|
||||
epsilon = 1e-4;
|
||||
er = 1e-6;
|
||||
disp('performing numerical gradient checking...')
|
||||
for i = 1 : numel(cae.o)
|
||||
p_cae = cae; p_cae.c{i} = p_cae.c{i} + epsilon;
|
||||
m_cae = cae; m_cae.c{i} = m_cae.c{i} - epsilon;
|
||||
|
||||
[m_cae, p_cae] = caerun(m_cae, p_cae, x, y);
|
||||
d = (p_cae.L - m_cae.L) / (2 * epsilon);
|
||||
|
||||
e = abs(d - cae.dc{i});
|
||||
if e > er
|
||||
disp('OUTPUT BIAS numerical gradient checking failed');
|
||||
disp(e);
|
||||
disp(d / cae.dc{i});
|
||||
keyboard
|
||||
end
|
||||
end
|
||||
|
||||
for a = 1 : numel(cae.a)
|
||||
|
||||
p_cae = cae; p_cae.b{a} = p_cae.b{a} + epsilon;
|
||||
m_cae = cae; m_cae.b{a} = m_cae.b{a} - epsilon;
|
||||
|
||||
[m_cae, p_cae] = caerun(m_cae, p_cae, x, y);
|
||||
d = (p_cae.L - m_cae.L) / (2 * epsilon);
|
||||
% cae.dok{i}{a}(u) = d;
|
||||
e = abs(d - cae.db{a});
|
||||
if e > er
|
||||
disp('BIAS numerical gradient checking failed');
|
||||
disp(e);
|
||||
disp(d / cae.db{a});
|
||||
keyboard
|
||||
end
|
||||
|
||||
for i = 1 : numel(cae.o)
|
||||
for u = 1 : numel(cae.ok{i}{a})
|
||||
p_cae = cae; p_cae.ok{i}{a}(u) = p_cae.ok{i}{a}(u) + epsilon;
|
||||
m_cae = cae; m_cae.ok{i}{a}(u) = m_cae.ok{i}{a}(u) - epsilon;
|
||||
|
||||
[m_cae, p_cae] = caerun(m_cae, p_cae, x, y);
|
||||
d = (p_cae.L - m_cae.L) / (2 * epsilon);
|
||||
% cae.dok{i}{a}(u) = d;
|
||||
e = abs(d - cae.dok{i}{a}(u));
|
||||
if e > er
|
||||
disp('OUTPUT KERNEL numerical gradient checking failed');
|
||||
disp(e);
|
||||
disp(d / cae.dok{i}{a}(u));
|
||||
% keyboard
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
for i = 1 : numel(cae.i)
|
||||
for u = 1 : numel(cae.ik{i}{a})
|
||||
p_cae = cae;
|
||||
m_cae = cae;
|
||||
p_cae.ik{i}{a}(u) = p_cae.ik{i}{a}(u) + epsilon;
|
||||
m_cae.ik{i}{a}(u) = m_cae.ik{i}{a}(u) - epsilon;
|
||||
[m_cae, p_cae] = caerun(m_cae, p_cae, x, y);
|
||||
d = (p_cae.L - m_cae.L) / (2 * epsilon);
|
||||
% cae.dik{i}{a}(u) = d;
|
||||
e = abs(d - cae.dik{i}{a}(u));
|
||||
if e > er
|
||||
disp('INPUT KERNEL numerical gradient checking failed');
|
||||
disp(e);
|
||||
disp(d / cae.dik{i}{a}(u));
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
disp('done')
|
||||
|
||||
end
|
||||
|
||||
function [m_cae, p_cae] = caerun(m_cae, p_cae, x, y)
|
||||
m_cae = caeup(m_cae, x); m_cae = caedown(m_cae); m_cae = caebp(m_cae, y);
|
||||
p_cae = caeup(p_cae, x); p_cae = caedown(p_cae); p_cae = caebp(p_cae, y);
|
||||
end
|
||||
|
||||
%function checknumgrad(cae,what,x,y)
|
||||
% epsilon = 1e-4;
|
||||
% er = 1e-9;
|
||||
%
|
||||
% for i = 1 : numel(eval(what))
|
||||
% if iscell(eval(['cae.' what]))
|
||||
% checknumgrad(cae,[what '{' num2str(i) '}'], x, y)
|
||||
% else
|
||||
% p_cae = cae;
|
||||
% m_cae = cae;
|
||||
% eval(['p_cae.' what '(' num2str(i) ')']) = eval([what '(' num2str(i) ')']) + epsilon;
|
||||
% eval(['m_cae.' what '(' num2str(i) ')']) = eval([what '(' num2str(i) ')']) - epsilon;
|
||||
%
|
||||
% m_cae = caeff(m_cae, x); m_cae = caedown(m_cae); m_cae = caebp(m_cae, y);
|
||||
% p_cae = caeff(p_cae, x); p_cae = caedown(p_cae); p_cae = caebp(p_cae, y);
|
||||
%
|
||||
% d = (p_cae.L - m_cae.L) / (2 * epsilon);
|
||||
% e = abs(d - eval(['cae.d' what '(' num2str(i) ')']));
|
||||
% if e > er
|
||||
% error('numerical gradient checking failed');
|
||||
% end
|
||||
% end
|
||||
% end
|
||||
%
|
||||
% end
|
|
@ -1,26 +0,0 @@
|
|||
function cae = caesdlm(cae, opts, m)
|
||||
%stochastic diagonal levenberg-marquardt
|
||||
|
||||
%first round
|
||||
if isfield(cae,'ddok') == 0
|
||||
cae = caebbp(cae);
|
||||
end
|
||||
|
||||
%recalculate double grads every opts.ddinterval
|
||||
if mod(m, opts.ddinterval) == 0
|
||||
cae_n = caebbp(cae);
|
||||
|
||||
for ii = 1 : numel(cae.o)
|
||||
cae.ddc{ii} = opts.ddhist * cae.ddc{ii} + (1 - opts.ddhist) * cae_n.ddc{ii};
|
||||
end
|
||||
|
||||
for jj = 1 : numel(cae.a)
|
||||
cae.ddb{jj} = opts.ddhist * cae.ddb{jj} + (1 - opts.ddhist) * cae_n.ddb{jj};
|
||||
for ii = 1 : numel(cae.o)
|
||||
cae.ddok{ii}{jj} = opts.ddhist * cae.ddok{ii}{jj} + (1 - opts.ddhist) * cae_n.ddok{ii}{jj};
|
||||
cae.ddik{ii}{jj} = opts.ddhist * cae.ddik{ii}{jj} + (1 - opts.ddhist) * cae_n.ddik{ii}{jj};
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
end
|
|
@ -1,38 +0,0 @@
|
|||
function cae = caetrain(cae, x, opts)
|
||||
n = cae.inputkernel(1);
|
||||
cae.rL = [];
|
||||
for m = 1 : opts.rounds
|
||||
tic;
|
||||
disp([num2str(m) '/' num2str(opts.rounds) ' rounds']);
|
||||
i1 = randi(numel(x));
|
||||
l = randi(size(x{i1}{1},1) - opts.batchsize - n + 1);
|
||||
x1{1} = double(x{i1}{1}(l : l + opts.batchsize - 1, :, :)) / 255;
|
||||
|
||||
if n == 1 %Auto Encoder
|
||||
x2{1} = x1{1};
|
||||
else %Predictive Encoder
|
||||
x2{1} = double(x{i1}{1}(l + n : l + n + opts.batchsize - 1, :, :)) / 255;
|
||||
end
|
||||
% Add noise to input, for denoising stacked autoenoder
|
||||
x1{1} = x1{1} .* (rand(size(x1{1})) > cae.noise);
|
||||
|
||||
cae = caeup(cae, x1);
|
||||
cae = caedown(cae);
|
||||
cae = caebp(cae, x2);
|
||||
cae = caesdlm(cae, opts, m);
|
||||
% caenumgradcheck(cae,x1,x2);
|
||||
cae = caeapplygrads(cae);
|
||||
|
||||
if m == 1
|
||||
cae.rL(1) = cae.L;
|
||||
end
|
||||
% cae.rL(m + 1) = 0.99 * cae.rL(m) + 0.01 * cae.L;
|
||||
cae.rL(m + 1) = cae.L;
|
||||
% if cae.sv < 1e-10
|
||||
% disp('Converged');
|
||||
% break;
|
||||
% end
|
||||
toc;
|
||||
end
|
||||
|
||||
end
|
|
@ -1,25 +0,0 @@
|
|||
function cae = caeup(cae, x)
|
||||
cae.i = x;
|
||||
|
||||
%init temp vars for parrallel processing
|
||||
pa = cell(size(cae.a));
|
||||
pi = cae.i;
|
||||
pik = cae.ik;
|
||||
pb = cae.b;
|
||||
|
||||
for j = 1 : numel(cae.a)
|
||||
z = 0;
|
||||
for i = 1 : numel(pi)
|
||||
z = z + convn(pi{i}, pik{i}{j}, 'full');
|
||||
end
|
||||
pa{j} = sigm(z + pb{j});
|
||||
|
||||
% Max pool.
|
||||
if ~isequal(cae.scale, [1 1 1])
|
||||
pa{j} = max3d(pa{j}, cae.M);
|
||||
end
|
||||
|
||||
end
|
||||
cae.a = pa;
|
||||
|
||||
end
|
|
@ -1,8 +0,0 @@
|
|||
function X = max3d(X, M)
|
||||
ll = size(X);
|
||||
B=X(M);
|
||||
B=B+rand(size(B))*1e-12;
|
||||
B=(B.*(B==repmat(max(B,[],2),[1 size(B,2) 1])));
|
||||
X(M) = B;
|
||||
reshape(X,ll);
|
||||
end
|
|
@ -1,58 +0,0 @@
|
|||
function scae = scaesetup(cae, x, opts)
|
||||
x = x{1};
|
||||
for l = 1 : numel(cae)
|
||||
cae = cae{l};
|
||||
ll= [opts.batchsize size(x{1}, 2) size(x{1}, 3)] + cae.inputkernel - 1;
|
||||
X = zeros(ll);
|
||||
cae.M = nbmap(X, cae.scale);
|
||||
bounds = cae.outputmaps * prod(cae.inputkernel) + numel(x) * prod(cae.outputkernel);
|
||||
for j = 1 : cae.outputmaps % activation maps
|
||||
cae.a{j} = zeros(size(x{1}) + cae.inputkernel - 1);
|
||||
for i = 1 : numel(x) % input map
|
||||
cae.ik{i}{j} = (rand(cae.inputkernel) - 0.5) * 2 * sqrt(6 / bounds);
|
||||
cae.ok{i}{j} = (rand(cae.outputkernel) - 0.5) * 2 * sqrt(6 / bounds);
|
||||
cae.vik{i}{j} = zeros(size(cae.ik{i}{j}));
|
||||
cae.vok{i}{j} = zeros(size(cae.ok{i}{j}));
|
||||
end
|
||||
cae.b{j} = 0;
|
||||
cae.vb{j} = zeros(size(cae.b{j}));
|
||||
end
|
||||
|
||||
cae.alpha = opts.alpha;
|
||||
|
||||
cae.i = cell(numel(x), 1);
|
||||
cae.o = cae.i;
|
||||
|
||||
for i = 1 : numel(cae.o)
|
||||
cae.c{i} = 0;
|
||||
cae.vc{i} = zeros(size(cae.c{i}));
|
||||
end
|
||||
|
||||
ss = cae.outputkernel;
|
||||
|
||||
cae.edgemask = zeros([opts.batchsize size(x{1}, 2) size(x{1}, 3)]);
|
||||
|
||||
cae.edgemask(ss(1) : end - ss(1) + 1, ...
|
||||
ss(2) : end - ss(2) + 1, ...
|
||||
ss(3) : end - ss(3) + 1) = 1;
|
||||
|
||||
scae{l} = cae;
|
||||
end
|
||||
|
||||
function B = nbmap(X,n)
|
||||
assert(numel(n)==3,'n should have 3 elements (x,y,z) scaling.');
|
||||
X = reshape(1:numel(X),size(X,1),size(X,2),size(X,3));
|
||||
B = zeros(size(X,1)/n(1),prod(n),size(X,2)*size(X,3)/prod(n(2:3)));
|
||||
u=1;
|
||||
p=1;
|
||||
for m=1:size(X,1)
|
||||
B(u,(p-1)*prod(n(2:3))+1:p*prod(n(2:3)),:) = im2col(squeeze(X(m,:,:)),n(2:3),'distinct');
|
||||
p=p+1;
|
||||
if(mod(m,n(1))==0)
|
||||
u=u+1;
|
||||
p=1;
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
end
|
|
@ -1,8 +0,0 @@
|
|||
function scae = scaetrain(scae, x, opts)
|
||||
%TODO: Transform x through scae{1} into new x. Only works for a single PAE.
|
||||
% for i=1:numel(scae)
|
||||
% scae{i} = paetrain(scae{i}, x, opts);
|
||||
% end
|
||||
scae{1} = caetrain(scae{1}, x, opts);
|
||||
|
||||
end
|
|
@ -1,15 +0,0 @@
|
|||
function net = cnnapplygrads(net, opts)
|
||||
for l = 2 : numel(net.layers)
|
||||
if strcmp(net.layers{l}.type, 'c')
|
||||
for j = 1 : numel(net.layers{l}.a)
|
||||
for ii = 1 : numel(net.layers{l - 1}.a)
|
||||
net.layers{l}.k{ii}{j} = net.layers{l}.k{ii}{j} - opts.alpha * net.layers{l}.dk{ii}{j};
|
||||
end
|
||||
net.layers{l}.b{j} = net.layers{l}.b{j} - opts.alpha * net.layers{l}.db{j};
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
net.ffW = net.ffW - opts.alpha * net.dffW;
|
||||
net.ffb = net.ffb - opts.alpha * net.dffb;
|
||||
end
|
|
@ -1,56 +0,0 @@
|
|||
function net = cnnbp(net, y)
|
||||
n = numel(net.layers);
|
||||
|
||||
% error
|
||||
net.e = net.o - y;
|
||||
% loss function
|
||||
net.L = 1/2* sum(net.e(:) .^ 2) / size(net.e, 2);
|
||||
|
||||
%% backprop deltas
|
||||
net.od = net.e .* (net.o .* (1 - net.o)); % output delta
|
||||
net.fvd = (net.ffW' * net.od); % feature vector delta
|
||||
if strcmp(net.layers{n}.type, 'c') % only conv layers has sigm function
|
||||
net.fvd = net.fvd .* (net.fv .* (1 - net.fv));
|
||||
end
|
||||
|
||||
% reshape feature vector deltas into output map style
|
||||
sa = size(net.layers{n}.a{1});
|
||||
fvnum = sa(1) * sa(2);
|
||||
for j = 1 : numel(net.layers{n}.a)
|
||||
net.layers{n}.d{j} = reshape(net.fvd(((j - 1) * fvnum + 1) : j * fvnum, :), sa(1), sa(2), sa(3));
|
||||
end
|
||||
|
||||
for l = (n - 1) : -1 : 1
|
||||
if strcmp(net.layers{l}.type, 'c')
|
||||
for j = 1 : numel(net.layers{l}.a)
|
||||
net.layers{l}.d{j} = net.layers{l}.a{j} .* (1 - net.layers{l}.a{j}) .* (expand(net.layers{l + 1}.d{j}, [net.layers{l + 1}.scale net.layers{l + 1}.scale 1]) / net.layers{l + 1}.scale ^ 2);
|
||||
end
|
||||
elseif strcmp(net.layers{l}.type, 's')
|
||||
for i = 1 : numel(net.layers{l}.a)
|
||||
z = zeros(size(net.layers{l}.a{1}));
|
||||
for j = 1 : numel(net.layers{l + 1}.a)
|
||||
z = z + convn(net.layers{l + 1}.d{j}, rot180(net.layers{l + 1}.k{i}{j}), 'full');
|
||||
end
|
||||
net.layers{l}.d{i} = z;
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
%% calc gradients
|
||||
for l = 2 : n
|
||||
if strcmp(net.layers{l}.type, 'c')
|
||||
for j = 1 : numel(net.layers{l}.a)
|
||||
for i = 1 : numel(net.layers{l - 1}.a)
|
||||
net.layers{l}.dk{i}{j} = convn(flipall(net.layers{l - 1}.a{i}), net.layers{l}.d{j}, 'valid') / size(net.layers{l}.d{j}, 3);
|
||||
end
|
||||
net.layers{l}.db{j} = sum(net.layers{l}.d{j}(:)) / size(net.layers{l}.d{j}, 3);
|
||||
end
|
||||
end
|
||||
end
|
||||
net.dffW = net.od * (net.fv)' / size(net.od, 2);
|
||||
net.dffb = mean(net.od, 2);
|
||||
|
||||
function X = rot180(X)
|
||||
X = flipdim(flipdim(X, 1), 2);
|
||||
end
|
||||
end
|
|
@ -1,47 +0,0 @@
|
|||
function net = cnnff(net, x)
|
||||
n = numel(net.layers);
|
||||
net.layers{1}.a{1} = x;
|
||||
inputmaps = 1;
|
||||
|
||||
for l = 2 : n % for each layer
|
||||
if strcmp(net.layers{l}.type, 'c')
|
||||
% !!below can probably be handled by insane matrix operations
|
||||
for j = 1 : net.layers{l}.outputmaps % for each output map
|
||||
% create temp output map
|
||||
if(size(x,3) > 1)
|
||||
z = zeros(size(net.layers{l - 1}.a{1}) - [net.layers{l}.kernelsize - 1 net.layers{l}.kernelsize - 1 0]);
|
||||
else
|
||||
z = zeros(size(net.layers{l - 1}.a{1}) - [net.layers{l}.kernelsize - 1 net.layers{l}.kernelsize - 1]);
|
||||
end
|
||||
for i = 1 : inputmaps % for each input map
|
||||
% convolve with corresponding kernel and add to temp output map
|
||||
z = z + convn(net.layers{l - 1}.a{i}, net.layers{l}.k{i}{j}, 'valid');
|
||||
end
|
||||
% add bias, pass through nonlinearity
|
||||
net.layers{l}.a{j} = sigm(z + net.layers{l}.b{j});
|
||||
end
|
||||
% set number of input maps to this layers number of outputmaps
|
||||
inputmaps = net.layers{l}.outputmaps;
|
||||
elseif strcmp(net.layers{l}.type, 's')
|
||||
% downsample
|
||||
for j = 1 : inputmaps
|
||||
z = convn(net.layers{l - 1}.a{j}, ones(net.layers{l}.scale) / (net.layers{l}.scale ^ 2), 'valid'); % !! replace with variable
|
||||
net.layers{l}.a{j} = z(1 : net.layers{l}.scale : end, 1 : net.layers{l}.scale : end, :);
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
% concatenate all end layer feature maps into vector
|
||||
net.fv = [];
|
||||
for j = 1 : numel(net.layers{n}.a)
|
||||
sa = size(net.layers{n}.a{j});
|
||||
if(numel(sa) == 3)
|
||||
net.fv = [net.fv; reshape(net.layers{n}.a{j}, sa(1) * sa(2), sa(3))];
|
||||
else
|
||||
net.fv = [net.fv; reshape(net.layers{n}.a{j}, sa(1) * sa(2), 1)];
|
||||
end
|
||||
end
|
||||
% feedforward into output perceptrons
|
||||
net.o = sigm(net.ffW * net.fv + repmat(net.ffb, 1, size(net.fv, 2)));
|
||||
|
||||
end
|
|
@ -1,79 +0,0 @@
|
|||
function cnnnumgradcheck(net, x, y)
|
||||
epsilon = 1e-4;
|
||||
er = 1e-8;
|
||||
n = numel(net.layers);
|
||||
for j = 1 : numel(net.ffb)
|
||||
net_m = net; net_p = net;
|
||||
net_p.ffb(j) = net_m.ffb(j) + epsilon;
|
||||
net_m.ffb(j) = net_m.ffb(j) - epsilon;
|
||||
net_m = cnnff(net_m, x); net_m = cnnbp(net_m, y);
|
||||
net_p = cnnff(net_p, x); net_p = cnnbp(net_p, y);
|
||||
d = (net_p.L - net_m.L) / (2 * epsilon);
|
||||
e = abs(d - net.dffb(j));
|
||||
if e > er
|
||||
error('numerical gradient checking failed');
|
||||
end
|
||||
end
|
||||
|
||||
for i = 1 : size(net.ffW, 1)
|
||||
for u = 1 : size(net.ffW, 2)
|
||||
net_m = net; net_p = net;
|
||||
net_p.ffW(i, u) = net_m.ffW(i, u) + epsilon;
|
||||
net_m.ffW(i, u) = net_m.ffW(i, u) - epsilon;
|
||||
net_m = cnnff(net_m, x); net_m = cnnbp(net_m, y);
|
||||
net_p = cnnff(net_p, x); net_p = cnnbp(net_p, y);
|
||||
d = (net_p.L - net_m.L) / (2 * epsilon);
|
||||
e = abs(d - net.dffW(i, u));
|
||||
if e > er
|
||||
error('numerical gradient checking failed');
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
for l = n : -1 : 2
|
||||
if strcmp(net.layers{l}.type, 'c')
|
||||
for j = 1 : numel(net.layers{l}.a)
|
||||
net_m = net; net_p = net;
|
||||
net_p.layers{l}.b{j} = net_m.layers{l}.b{j} + epsilon;
|
||||
net_m.layers{l}.b{j} = net_m.layers{l}.b{j} - epsilon;
|
||||
net_m = cnnff(net_m, x); net_m = cnnbp(net_m, y);
|
||||
net_p = cnnff(net_p, x); net_p = cnnbp(net_p, y);
|
||||
d = (net_p.L - net_m.L) / (2 * epsilon);
|
||||
e = abs(d - net.layers{l}.db{j});
|
||||
if e > er
|
||||
error('numerical gradient checking failed');
|
||||
end
|
||||
for i = 1 : numel(net.layers{l - 1}.a)
|
||||
for u = 1 : size(net.layers{l}.k{i}{j}, 1)
|
||||
for v = 1 : size(net.layers{l}.k{i}{j}, 2)
|
||||
net_m = net; net_p = net;
|
||||
net_p.layers{l}.k{i}{j}(u, v) = net_p.layers{l}.k{i}{j}(u, v) + epsilon;
|
||||
net_m.layers{l}.k{i}{j}(u, v) = net_m.layers{l}.k{i}{j}(u, v) - epsilon;
|
||||
net_m = cnnff(net_m, x); net_m = cnnbp(net_m, y);
|
||||
net_p = cnnff(net_p, x); net_p = cnnbp(net_p, y);
|
||||
d = (net_p.L - net_m.L) / (2 * epsilon);
|
||||
e = abs(d - net.layers{l}.dk{i}{j}(u, v));
|
||||
if e > er
|
||||
error('numerical gradient checking failed');
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
elseif strcmp(net.layers{l}.type, 's')
|
||||
% for j = 1 : numel(net.layers{l}.a)
|
||||
% net_m = net; net_p = net;
|
||||
% net_p.layers{l}.b{j} = net_m.layers{l}.b{j} + epsilon;
|
||||
% net_m.layers{l}.b{j} = net_m.layers{l}.b{j} - epsilon;
|
||||
% net_m = cnnff(net_m, x); net_m = cnnbp(net_m, y);
|
||||
% net_p = cnnff(net_p, x); net_p = cnnbp(net_p, y);
|
||||
% d = (net_p.L - net_m.L) / (2 * epsilon);
|
||||
% e = abs(d - net.layers{l}.db{j});
|
||||
% if e > er
|
||||
% error('numerical gradient checking failed');
|
||||
% end
|
||||
% end
|
||||
end
|
||||
end
|
||||
% keyboard
|
||||
end
|
|
@ -1,36 +0,0 @@
|
|||
function net = cnnsetup(net, x, y)
|
||||
assert(~isOctave() || compare_versions(OCTAVE_VERSION, '3.8.0', '>='), ['Octave 3.8.0 or greater is required for CNNs as there is a bug in convolution in previous versions. See http://savannah.gnu.org/bugs/?39314. Your version is ' myOctaveVersion]);
|
||||
inputmaps = 1;
|
||||
mapsize = size(squeeze(x(:, :, 1)));
|
||||
|
||||
for l = 1 : numel(net.layers) % layer
|
||||
if strcmp(net.layers{l}.type, 's')
|
||||
mapsize = mapsize / net.layers{l}.scale;
|
||||
assert(all(floor(mapsize)==mapsize), ['Layer ' num2str(l) ' size must be integer. Actual: ' num2str(mapsize)]);
|
||||
for j = 1 : inputmaps
|
||||
net.layers{l}.b{j} = 0;
|
||||
end
|
||||
end
|
||||
if strcmp(net.layers{l}.type, 'c')
|
||||
mapsize = mapsize - net.layers{l}.kernelsize + 1;
|
||||
fan_out = net.layers{l}.outputmaps * net.layers{l}.kernelsize ^ 2;
|
||||
for j = 1 : net.layers{l}.outputmaps % output map
|
||||
fan_in = inputmaps * net.layers{l}.kernelsize ^ 2;
|
||||
for i = 1 : inputmaps % input map
|
||||
net.layers{l}.k{i}{j} = (rand(net.layers{l}.kernelsize) - 0.5) * 2 * sqrt(6 / (fan_in + fan_out));
|
||||
end
|
||||
net.layers{l}.b{j} = 0;
|
||||
end
|
||||
inputmaps = net.layers{l}.outputmaps;
|
||||
end
|
||||
end
|
||||
% 'onum' is the number of labels, that's why it is calculated using size(y, 1). If you have 20 labels so the output of the network will be 20 neurons.
|
||||
% 'fvnum' is the number of output neurons at the last layer, the layer just before the output layer.
|
||||
% 'ffb' is the biases of the output neurons.
|
||||
% 'ffW' is the weights between the last layer and the output neurons. Note that the last layer is fully connected to the output layer, that's why the size of the weights is (onum * fvnum)
|
||||
fvnum = prod(mapsize) * inputmaps;
|
||||
onum = size(y, 1);
|
||||
|
||||
net.ffb = zeros(onum, 1);
|
||||
net.ffW = (rand(onum, fvnum) - 0.5) * 2 * sqrt(6 / (onum + fvnum));
|
||||
end
|
|
@ -1,9 +0,0 @@
|
|||
function [er, bad] = cnntest(net, x, y)
|
||||
% feedforward
|
||||
net = cnnff(net, x);
|
||||
[~, h] = max(net.o);
|
||||
[~, a] = max(y);
|
||||
bad = find(h ~= a);
|
||||
|
||||
er = numel(bad) / size(y, 2);
|
||||
end
|
|
@ -1,29 +0,0 @@
|
|||
function net = cnntrain(net, x, y, opts)
|
||||
m = size(x, 3);
|
||||
numbatches = floor(m / opts.batchsize);
|
||||
if rem(numbatches, 1) ~= 0
|
||||
error('numbatches not integer');
|
||||
end
|
||||
net.rL = [];
|
||||
for i = 1 : opts.numepochs
|
||||
net = cnnff(net, x);
|
||||
error_curr = sqrt(mean((net.o - y).^2));
|
||||
disp(['epoch ' num2str(i) '/' num2str(opts.numepochs), ' RMSE-', num2str(error_curr)]);
|
||||
tic;
|
||||
kk = randperm(m);
|
||||
for l = 1 : numbatches
|
||||
batch_x = x(:, :, kk((l - 1) * opts.batchsize + 1 : l * opts.batchsize));
|
||||
batch_y = y(:, kk((l - 1) * opts.batchsize + 1 : l * opts.batchsize));
|
||||
|
||||
net = cnnff(net, batch_x);
|
||||
net = cnnbp(net, batch_y);
|
||||
net = cnnapplygrads(net, opts);
|
||||
if isempty(net.rL)
|
||||
net.rL(1) = net.L;
|
||||
end
|
||||
net.rL(end + 1) = 0.99 * net.rL(end) + 0.01 * net.L;
|
||||
end
|
||||
toc;
|
||||
end
|
||||
|
||||
end
|
|
@ -1,20 +0,0 @@
|
|||
Thank you so much for wanting to give back to the toolbox. Here's some info on how to contribute:
|
||||
|
||||
#General:
|
||||
|
||||
Don't bunch up changes, e.g. if you have bug-fixes, new features and style changes, rather make 3 seperate pull requests.
|
||||
|
||||
Ensure that you introduce tests/examples for any new functionality
|
||||
|
||||
# Guide
|
||||
1. Fork repository
|
||||
2. Create a new branch, e.g. `checkout -b my-stuff`
|
||||
3. Commit and push your changes to that branch
|
||||
4. Make sure that the test works (!) (see known errors)
|
||||
5. Create a pull request
|
||||
6. I accept your pull request
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
function dbn = dbnsetup(dbn, x, opts)
|
||||
n = size(x, 2);
|
||||
dbn.sizes = [n, dbn.sizes];
|
||||
|
||||
for u = 1 : numel(dbn.sizes) - 1
|
||||
dbn.rbm{u}.alpha = opts.alpha;
|
||||
dbn.rbm{u}.momentum = opts.momentum;
|
||||
|
||||
dbn.rbm{u}.W = zeros(dbn.sizes(u + 1), dbn.sizes(u));
|
||||
dbn.rbm{u}.vW = zeros(dbn.sizes(u + 1), dbn.sizes(u));
|
||||
|
||||
dbn.rbm{u}.b = zeros(dbn.sizes(u), 1);
|
||||
dbn.rbm{u}.vb = zeros(dbn.sizes(u), 1);
|
||||
|
||||
dbn.rbm{u}.c = zeros(dbn.sizes(u + 1), 1);
|
||||
dbn.rbm{u}.vc = zeros(dbn.sizes(u + 1), 1);
|
||||
end
|
||||
|
||||
end
|
|
@ -1,10 +0,0 @@
|
|||
function dbn = dbntrain(dbn, x, opts)
|
||||
n = numel(dbn.rbm);
|
||||
|
||||
dbn.rbm{1} = rbmtrain(dbn.rbm{1}, x, opts);
|
||||
for i = 2 : n
|
||||
x = rbmup(dbn.rbm{i - 1}, x);
|
||||
dbn.rbm{i} = rbmtrain(dbn.rbm{i}, x, opts);
|
||||
end
|
||||
|
||||
end
|
|
@ -1,15 +0,0 @@
|
|||
function nn = dbnunfoldtonn(dbn, outputsize)
|
||||
%DBNUNFOLDTONN Unfolds a DBN to a NN
|
||||
% dbnunfoldtonn(dbn, outputsize ) returns the unfolded dbn with a final
|
||||
% layer of size outputsize added.
|
||||
if(exist('outputsize','var'))
|
||||
size = [dbn.sizes outputsize];
|
||||
else
|
||||
size = [dbn.sizes];
|
||||
end
|
||||
nn = nnsetup(size);
|
||||
for i = 1 : numel(dbn.rbm)
|
||||
nn.W{i} = [dbn.rbm{i}.c dbn.rbm{i}.W];
|
||||
end
|
||||
end
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
function x = rbmdown(rbm, x)
|
||||
x = sigm(repmat(rbm.b', size(x, 1), 1) + x * rbm.W);
|
||||
end
|
|
@ -1,37 +0,0 @@
|
|||
function rbm = rbmtrain(rbm, x, opts)
|
||||
assert(isfloat(x), 'x must be a float');
|
||||
assert(all(x(:)>=0) && all(x(:)<=1), 'all data in x must be in [0:1]');
|
||||
m = size(x, 1);
|
||||
numbatches = m / opts.batchsize;
|
||||
|
||||
assert(rem(numbatches, 1) == 0, 'numbatches not integer');
|
||||
|
||||
for i = 1 : opts.numepochs
|
||||
kk = randperm(m);
|
||||
err = 0;
|
||||
for l = 1 : numbatches
|
||||
batch = x(kk((l - 1) * opts.batchsize + 1 : l * opts.batchsize), :);
|
||||
|
||||
v1 = batch;
|
||||
h1 = sigmrnd(repmat(rbm.c', opts.batchsize, 1) + v1 * rbm.W');
|
||||
v2 = sigmrnd(repmat(rbm.b', opts.batchsize, 1) + h1 * rbm.W);
|
||||
h2 = sigm(repmat(rbm.c', opts.batchsize, 1) + v2 * rbm.W');
|
||||
|
||||
c1 = h1' * v1;
|
||||
c2 = h2' * v2;
|
||||
|
||||
rbm.vW = rbm.momentum * rbm.vW + rbm.alpha * (c1 - c2) / opts.batchsize;
|
||||
rbm.vb = rbm.momentum * rbm.vb + rbm.alpha * sum(v1 - v2)' / opts.batchsize;
|
||||
rbm.vc = rbm.momentum * rbm.vc + rbm.alpha * sum(h1 - h2)' / opts.batchsize;
|
||||
|
||||
rbm.W = rbm.W + rbm.vW;
|
||||
rbm.b = rbm.b + rbm.vb;
|
||||
rbm.c = rbm.c + rbm.vc;
|
||||
|
||||
err = err + sum(sum((v1 - v2) .^ 2)) / opts.batchsize;
|
||||
end
|
||||
|
||||
disp(['epoch ' num2str(i) '/' num2str(opts.numepochs) '. Average reconstruction error is: ' num2str(err / numbatches)]);
|
||||
|
||||
end
|
||||
end
|
|
@ -1,3 +0,0 @@
|
|||
function x = rbmup(rbm, x)
|
||||
x = sigm(repmat(rbm.c', size(x, 1), 1) + x * rbm.W');
|
||||
end
|
|
@ -1,8 +0,0 @@
|
|||
Copyright (c) 2012, Rasmus Berg Palm (rasmusbergpalm@gmail.com)
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -1,22 +0,0 @@
|
|||
function nn = nnapplygrads(nn)
|
||||
%NNAPPLYGRADS updates weights and biases with calculated gradients
|
||||
% nn = nnapplygrads(nn) returns an neural network structure with updated
|
||||
% weights and biases
|
||||
|
||||
for i = 1 : (nn.n - 1)
|
||||
if(nn.weightPenaltyL2>0)
|
||||
dW = nn.dW{i} + nn.weightPenaltyL2 * [zeros(size(nn.W{i},1),1) nn.W{i}(:,2:end)];
|
||||
else
|
||||
dW = nn.dW{i};
|
||||
end
|
||||
|
||||
dW = nn.learningRate * dW;
|
||||
|
||||
if(nn.momentum>0)
|
||||
nn.vW{i} = nn.momentum*nn.vW{i} + dW;
|
||||
dW = nn.vW{i};
|
||||
end
|
||||
|
||||
nn.W{i} = nn.W{i} - dW;
|
||||
end
|
||||
end
|
|
@ -1,47 +0,0 @@
|
|||
function nn = nnbp(nn)
|
||||
%NNBP performs backpropagation
|
||||
% nn = nnbp(nn) returns an neural network structure with updated weights
|
||||
|
||||
n = nn.n;
|
||||
sparsityError = 0;
|
||||
switch nn.output
|
||||
case 'sigm'
|
||||
d{n} = - nn.e .* (nn.a{n} .* (1 - nn.a{n}));
|
||||
case {'softmax','linear'}
|
||||
d{n} = - nn.e;
|
||||
end
|
||||
for i = (n - 1) : -1 : 2
|
||||
% Derivative of the activation function
|
||||
switch nn.activation_function
|
||||
case 'sigm'
|
||||
d_act = nn.a{i} .* (1 - nn.a{i});
|
||||
case 'tanh_opt'
|
||||
d_act = 1.7159 * 2/3 * (1 - 1/(1.7159)^2 * nn.a{i}.^2);
|
||||
end
|
||||
|
||||
if(nn.nonSparsityPenalty>0)
|
||||
pi = repmat(nn.p{i}, size(nn.a{i}, 1), 1);
|
||||
sparsityError = [zeros(size(nn.a{i},1),1) nn.nonSparsityPenalty * (-nn.sparsityTarget ./ pi + (1 - nn.sparsityTarget) ./ (1 - pi))];
|
||||
end
|
||||
|
||||
% Backpropagate first derivatives
|
||||
if i+1==n % in this case in d{n} there is not the bias term to be removed
|
||||
d{i} = (d{i + 1} * nn.W{i} + sparsityError) .* d_act; % Bishop (5.56)
|
||||
else % in this case in d{i} the bias term has to be removed
|
||||
d{i} = (d{i + 1}(:,2:end) * nn.W{i} + sparsityError) .* d_act;
|
||||
end
|
||||
|
||||
if(nn.dropoutFraction>0)
|
||||
d{i} = d{i} .* [ones(size(d{i},1),1) nn.dropOutMask{i}];
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
for i = 1 : (n - 1)
|
||||
if i+1==n
|
||||
nn.dW{i} = (d{i + 1}' * nn.a{i}) / size(d{i + 1}, 1);
|
||||
else
|
||||
nn.dW{i} = (d{i + 1}(:,2:end)' * nn.a{i}) / size(d{i + 1}, 1);
|
||||
end
|
||||
end
|
||||
end
|
|
@ -1,22 +0,0 @@
|
|||
function nnchecknumgrad(nn, x, y)
|
||||
epsilon = 1e-6;
|
||||
er = 1e-7;
|
||||
n = nn.n;
|
||||
for l = 1 : (n - 1)
|
||||
for i = 1 : size(nn.W{l}, 1)
|
||||
for j = 1 : size(nn.W{l}, 2)
|
||||
nn_m = nn; nn_p = nn;
|
||||
nn_m.W{l}(i, j) = nn.W{l}(i, j) - epsilon;
|
||||
nn_p.W{l}(i, j) = nn.W{l}(i, j) + epsilon;
|
||||
rand('state',0)
|
||||
nn_m = nnff(nn_m, x, y);
|
||||
rand('state',0)
|
||||
nn_p = nnff(nn_p, x, y);
|
||||
dW = (nn_p.L - nn_m.L) / (2 * epsilon);
|
||||
e = abs(dW - nn.dW{l}(i, j));
|
||||
|
||||
assert(e < er, 'numerical gradient checking failed');
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -1,28 +0,0 @@
|
|||
function [loss] = nneval(nn, loss, train_x, train_y, val_x, val_y)
|
||||
%NNEVAL evaluates performance of neural network
|
||||
% Returns a updated loss struct
|
||||
assert(nargin == 4 || nargin == 6, 'Wrong number of arguments');
|
||||
|
||||
nn.testing = 1;
|
||||
% training performance
|
||||
nn = nnff(nn, train_x, train_y);
|
||||
loss.train.e(end + 1) = nn.L;
|
||||
|
||||
% validation performance
|
||||
if nargin == 6
|
||||
nn = nnff(nn, val_x, val_y);
|
||||
loss.val.e(end + 1) = nn.L;
|
||||
end
|
||||
nn.testing = 0;
|
||||
%calc misclassification rate if softmax
|
||||
if strcmp(nn.output,'softmax')
|
||||
[er_train, dummy] = nntest(nn, train_x, train_y);
|
||||
loss.train.e_frac(end+1) = er_train;
|
||||
|
||||
if nargin == 6
|
||||
[er_val, dummy] = nntest(nn, val_x, val_y);
|
||||
loss.val.e_frac(end+1) = er_val;
|
||||
end
|
||||
end
|
||||
|
||||
end
|
|
@ -1,60 +0,0 @@
|
|||
function nn = nnff(nn, x, y)
|
||||
%NNFF performs a feedforward pass
|
||||
% nn = nnff(nn, x, y) returns an neural network structure with updated
|
||||
% layer activations, error and loss (nn.a, nn.e and nn.L)
|
||||
|
||||
n = nn.n;
|
||||
m = size(x, 1);
|
||||
|
||||
x = [ones(m,1) x];
|
||||
nn.a{1} = x;
|
||||
|
||||
%feedforward pass
|
||||
for i = 2 : n-1
|
||||
switch nn.activation_function
|
||||
case 'sigm'
|
||||
% Calculate the unit's outputs (including the bias term)
|
||||
nn.a{i} = sigm(nn.a{i - 1} * nn.W{i - 1}');
|
||||
case 'tanh_opt'
|
||||
nn.a{i} = tanh_opt(nn.a{i - 1} * nn.W{i - 1}');
|
||||
end
|
||||
|
||||
%dropout
|
||||
if(nn.dropoutFraction > 0)
|
||||
if(nn.testing)
|
||||
nn.a{i} = nn.a{i}.*(1 - nn.dropoutFraction);
|
||||
else
|
||||
nn.dropOutMask{i} = (rand(size(nn.a{i}))>nn.dropoutFraction);
|
||||
nn.a{i} = nn.a{i}.*nn.dropOutMask{i};
|
||||
end
|
||||
end
|
||||
|
||||
%calculate running exponential activations for use with sparsity
|
||||
if(nn.nonSparsityPenalty>0)
|
||||
nn.p{i} = 0.99 * nn.p{i} + 0.01 * mean(nn.a{i}, 1);
|
||||
end
|
||||
|
||||
%Add the bias term
|
||||
nn.a{i} = [ones(m,1) nn.a{i}];
|
||||
end
|
||||
switch nn.output
|
||||
case 'sigm'
|
||||
nn.a{n} = sigm(nn.a{n - 1} * nn.W{n - 1}');
|
||||
case 'linear'
|
||||
nn.a{n} = nn.a{n - 1} * nn.W{n - 1}';
|
||||
case 'softmax'
|
||||
nn.a{n} = nn.a{n - 1} * nn.W{n - 1}';
|
||||
nn.a{n} = exp(bsxfun(@minus, nn.a{n}, max(nn.a{n},[],2)));
|
||||
nn.a{n} = bsxfun(@rdivide, nn.a{n}, sum(nn.a{n}, 2));
|
||||
end
|
||||
|
||||
%error and loss
|
||||
nn.e = y - nn.a{n};
|
||||
|
||||
switch nn.output
|
||||
case {'sigm', 'linear'}
|
||||
nn.L = 1/2 * sum(sum(nn.e .^ 2)) / m;
|
||||
case 'softmax'
|
||||
nn.L = -sum(sum(y .* log(nn.a{n}))) / m;
|
||||
end
|
||||
end
|
|
@ -1,8 +0,0 @@
|
|||
function labels = nnpredict(nn, x)
|
||||
nn.testing = 1;
|
||||
nn = nnff(nn, x, zeros(size(x,1), nn.size(end)));
|
||||
nn.testing = 0;
|
||||
|
||||
[dummy, i] = max(nn.a{end},[],2);
|
||||
labels = i;
|
||||
end
|
|
@ -1,29 +0,0 @@
|
|||
function nn = nnsetup(architecture)
|
||||
%NNSETUP creates a Feedforward Backpropagate Neural Network
|
||||
% nn = nnsetup(architecture) returns an neural network structure with n=numel(architecture)
|
||||
% layers, architecture being a n x 1 vector of layer sizes e.g. [784 100 10]
|
||||
|
||||
nn.size = architecture;
|
||||
nn.n = numel(nn.size);
|
||||
|
||||
nn.activation_function = 'tanh_opt'; % Activation functions of hidden layers: 'sigm' (sigmoid) or 'tanh_opt' (optimal tanh).
|
||||
nn.learningRate = 2; % learning rate Note: typically needs to be lower when using 'sigm' activation function and non-normalized inputs.
|
||||
nn.momentum = 0.5; % Momentum
|
||||
nn.scaling_learningRate = 1; % Scaling factor for the learning rate (each epoch)
|
||||
nn.weightPenaltyL2 = 0; % L2 regularization
|
||||
nn.nonSparsityPenalty = 0; % Non sparsity penalty
|
||||
nn.sparsityTarget = 0.05; % Sparsity target
|
||||
nn.inputZeroMaskedFraction = 0; % Used for Denoising AutoEncoders
|
||||
nn.dropoutFraction = 0; % Dropout level (http://www.cs.toronto.edu/~hinton/absps/dropout.pdf)
|
||||
nn.testing = 0; % Internal variable. nntest sets this to one.
|
||||
nn.output = 'sigm'; % output unit 'sigm' (=logistic), 'softmax' and 'linear'
|
||||
|
||||
for i = 2 : nn.n
|
||||
% weights and weight momentum
|
||||
nn.W{i - 1} = (rand(nn.size(i), nn.size(i - 1)+1) - 0.5) * 2 * 4 * sqrt(6 / (nn.size(i) + nn.size(i - 1)));
|
||||
nn.vW{i - 1} = zeros(size(nn.W{i - 1}));
|
||||
|
||||
% average activations (for use with sparsity)
|
||||
nn.p{i} = zeros(1, nn.size(i));
|
||||
end
|
||||
end
|
|
@ -1,6 +0,0 @@
|
|||
function [er, bad] = nntest(nn, x, y)
|
||||
labels = nnpredict(nn, x);
|
||||
[dummy, expected] = max(y,[],2);
|
||||
bad = find(labels ~= expected);
|
||||
er = numel(bad) / size(x, 1);
|
||||
end
|
|
@ -1,77 +0,0 @@
|
|||
function [nn, L] = nntrain(nn, train_x, train_y, opts, val_x, val_y)
|
||||
%NNTRAIN trains a neural net
|
||||
% [nn, L] = nnff(nn, x, y, opts) trains the neural network nn with input x and
|
||||
% output y for opts.numepochs epochs, with minibatches of size
|
||||
% opts.batchsize. Returns a neural network nn with updated activations,
|
||||
% errors, weights and biases, (nn.a, nn.e, nn.W, nn.b) and L, the sum
|
||||
% squared error for each training minibatch.
|
||||
|
||||
assert(isfloat(train_x), 'train_x must be a float');
|
||||
assert(nargin == 4 || nargin == 6,'number ofinput arguments must be 4 or 6')
|
||||
|
||||
loss.train.e = [];
|
||||
loss.train.e_frac = [];
|
||||
loss.val.e = [];
|
||||
loss.val.e_frac = [];
|
||||
opts.validation = 0;
|
||||
if nargin == 6
|
||||
opts.validation = 1;
|
||||
end
|
||||
|
||||
fhandle = [];
|
||||
if isfield(opts,'plot') && opts.plot == 1
|
||||
fhandle = figure();
|
||||
end
|
||||
|
||||
m = size(train_x, 1);
|
||||
|
||||
batchsize = opts.batchsize;
|
||||
numepochs = opts.numepochs;
|
||||
|
||||
numbatches = floor(m / batchsize);
|
||||
|
||||
assert(rem(numbatches, 1) == 0, 'numbatches must be a integer');
|
||||
|
||||
L = zeros(numepochs*numbatches,1);
|
||||
n = 1;
|
||||
for i = 1 : numepochs
|
||||
tic;
|
||||
|
||||
kk = randperm(m);
|
||||
for l = 1 : numbatches
|
||||
batch_x = train_x(kk((l - 1) * batchsize + 1 : l * batchsize), :);
|
||||
|
||||
%Add noise to input (for use in denoising autoencoder)
|
||||
if(nn.inputZeroMaskedFraction ~= 0)
|
||||
batch_x = batch_x.*(rand(size(batch_x))>nn.inputZeroMaskedFraction);
|
||||
end
|
||||
|
||||
batch_y = train_y(kk((l - 1) * batchsize + 1 : l * batchsize), :);
|
||||
|
||||
nn = nnff(nn, batch_x, batch_y);
|
||||
nn = nnbp(nn);
|
||||
nn = nnapplygrads(nn);
|
||||
|
||||
L(n) = nn.L;
|
||||
|
||||
n = n + 1;
|
||||
end
|
||||
|
||||
t = toc;
|
||||
|
||||
if opts.validation == 1
|
||||
loss = nneval(nn, loss, train_x, train_y, val_x, val_y);
|
||||
str_perf = sprintf('; Full-batch train mse = %f, val mse = %f', loss.train.e(end), loss.val.e(end));
|
||||
else
|
||||
loss = nneval(nn, loss, train_x, train_y);
|
||||
str_perf = sprintf('; Full-batch train err = %f', loss.train.e(end));
|
||||
end
|
||||
if ishandle(fhandle)
|
||||
nnupdatefigures(nn, fhandle, loss, opts, i);
|
||||
end
|
||||
|
||||
% disp(['epoch ' num2str(i) '/' num2str(opts.numepochs) '. Took ' num2str(t) ' seconds' '. Mini-batch mean squared error on training set is ' num2str(mean(L((n-numbatches):(n-1)))) str_perf]);
|
||||
nn.learningRate = nn.learningRate * nn.scaling_learningRate;
|
||||
end
|
||||
end
|
||||
|
|
@ -1,63 +0,0 @@
|
|||
function nnupdatefigures(nn,fhandle,L,opts,i)
|
||||
%NNUPDATEFIGURES updates figures during training
|
||||
if i > 1 %dont plot first point, its only a point
|
||||
x_ax = 1:i;
|
||||
% create legend
|
||||
if opts.validation == 1
|
||||
M = {'Training','Validation'};
|
||||
else
|
||||
M = {'Training'};
|
||||
end
|
||||
|
||||
%create data for plots
|
||||
if strcmp(nn.output,'softmax')
|
||||
plot_x = x_ax';
|
||||
plot_ye = L.train.e';
|
||||
plot_yfrac = L.train.e_frac';
|
||||
|
||||
else
|
||||
plot_x = x_ax';
|
||||
plot_ye = L.train.e';
|
||||
end
|
||||
|
||||
%add error on validation data if present
|
||||
if opts.validation == 1
|
||||
plot_x = [plot_x, x_ax'];
|
||||
plot_ye = [plot_ye,L.val.e'];
|
||||
end
|
||||
|
||||
|
||||
%add classification error on validation data if present
|
||||
if opts.validation == 1 && strcmp(nn.output,'softmax')
|
||||
plot_yfrac = [plot_yfrac, L.val.e_frac'];
|
||||
end
|
||||
|
||||
% plotting
|
||||
figure(fhandle);
|
||||
if strcmp(nn.output,'softmax') %also plot classification error
|
||||
|
||||
p1 = subplot(1,2,1);
|
||||
plot(plot_x,plot_ye);
|
||||
xlabel('Number of epochs'); ylabel('Error');title('Error');
|
||||
title('Error')
|
||||
legend(p1, M,'Location','NorthEast');
|
||||
set(p1, 'Xlim',[0,opts.numepochs + 1])
|
||||
|
||||
p2 = subplot(1,2,2);
|
||||
plot(plot_x,plot_yfrac);
|
||||
xlabel('Number of epochs'); ylabel('Misclassification rate');
|
||||
title('Misclassification rate')
|
||||
legend(p2, M,'Location','NorthEast');
|
||||
set(p2, 'Xlim',[0,opts.numepochs + 1])
|
||||
|
||||
else
|
||||
|
||||
p = plot(plot_x,plot_ye);
|
||||
xlabel('Number of epochs'); ylabel('Error');title('Error');
|
||||
legend(p, M,'Location','NorthEast');
|
||||
set(gca, 'Xlim',[0,opts.numepochs + 1])
|
||||
|
||||
end
|
||||
drawnow;
|
||||
end
|
||||
end
|
|
@ -1,298 +0,0 @@
|
|||
|
||||
DeepLearnToolbox
|
||||
================
|
||||
|
||||
A Matlab toolbox for Deep Learning.
|
||||
|
||||
Deep Learning is a new subfield of machine learning that focuses on learning deep hierarchical models of data.
|
||||
It is inspired by the human brain's apparent deep (layered, hierarchical) architecture.
|
||||
A good overview of the theory of Deep Learning theory is
|
||||
[Learning Deep Architectures for AI](http://www.iro.umontreal.ca/~bengioy/papers/ftml_book.pdf)
|
||||
|
||||
For a more informal introduction, see the following videos by Geoffrey Hinton and Andrew Ng.
|
||||
|
||||
* [The Next Generation of Neural Networks](http://www.youtube.com/watch?v=AyzOUbkUf3M) (Hinton, 2007)
|
||||
* [Recent Developments in Deep Learning](http://www.youtube.com/watch?v=VdIURAu1-aU) (Hinton, 2010)
|
||||
* [Unsupervised Feature Learning and Deep Learning](http://www.youtube.com/watch?v=ZmNOAtZIgIk) (Ng, 2011)
|
||||
|
||||
If you use this toolbox in your research please cite [Prediction as a candidate for learning deep hierarchical models of data](http://www2.imm.dtu.dk/pubdb/views/publication_details.php?id=6284)
|
||||
|
||||
```
|
||||
@MASTERSTHESIS\{IMM2012-06284,
|
||||
author = "R. B. Palm",
|
||||
title = "Prediction as a candidate for learning deep hierarchical models of data",
|
||||
year = "2012",
|
||||
}
|
||||
```
|
||||
|
||||
Contact: rasmusbergpalm at gmail dot com
|
||||
|
||||
Directories included in the toolbox
|
||||
-----------------------------------
|
||||
|
||||
`NN/` - A library for Feedforward Backpropagation Neural Networks
|
||||
|
||||
`CNN/` - A library for Convolutional Neural Networks
|
||||
|
||||
`DBN/` - A library for Deep Belief Networks
|
||||
|
||||
`SAE/` - A library for Stacked Auto-Encoders
|
||||
|
||||
`CAE/` - A library for Convolutional Auto-Encoders
|
||||
|
||||
`util/` - Utility functions used by the libraries
|
||||
|
||||
`data/` - Data used by the examples
|
||||
|
||||
`tests/` - unit tests to verify toolbox is working
|
||||
|
||||
For references on each library check REFS.md
|
||||
|
||||
Setup
|
||||
-----
|
||||
|
||||
1. Download.
|
||||
2. addpath(genpath('DeepLearnToolbox'));
|
||||
|
||||
Known errors
|
||||
------------------------------
|
||||
|
||||
`test_cnn_gradients_are_numerically_correct` fails on Octave because of a bug in Octave's convn implementation. See http://savannah.gnu.org/bugs/?39314
|
||||
|
||||
`test_example_CNN` fails in Octave for the same reason.
|
||||
Example: Deep Belief Network
|
||||
---------------------
|
||||
```matlab
|
||||
|
||||
function test_example_DBN
|
||||
load mnist_uint8;
|
||||
|
||||
train_x = double(train_x) / 255;
|
||||
test_x = double(test_x) / 255;
|
||||
train_y = double(train_y);
|
||||
test_y = double(test_y);
|
||||
|
||||
%% ex1 train a 100 hidden unit RBM and visualize its weights
|
||||
rand('state',0)
|
||||
dbn.sizes = [100];
|
||||
opts.numepochs = 1;
|
||||
opts.batchsize = 100;
|
||||
opts.momentum = 0;
|
||||
opts.alpha = 1;
|
||||
dbn = dbnsetup(dbn, train_x, opts);
|
||||
dbn = dbntrain(dbn, train_x, opts);
|
||||
figure; visualize(dbn.rbm{1}.W'); % Visualize the RBM weights
|
||||
|
||||
%% ex2 train a 100-100 hidden unit DBN and use its weights to initialize a NN
|
||||
rand('state',0)
|
||||
%train dbn
|
||||
dbn.sizes = [100 100];
|
||||
opts.numepochs = 1;
|
||||
opts.batchsize = 100;
|
||||
opts.momentum = 0;
|
||||
opts.alpha = 1;
|
||||
dbn = dbnsetup(dbn, train_x, opts);
|
||||
dbn = dbntrain(dbn, train_x, opts);
|
||||
|
||||
%unfold dbn to nn
|
||||
nn = dbnunfoldtonn(dbn, 10);
|
||||
nn.activation_function = 'sigm';
|
||||
|
||||
%train nn
|
||||
opts.numepochs = 1;
|
||||
opts.batchsize = 100;
|
||||
nn = nntrain(nn, train_x, train_y, opts);
|
||||
[er, bad] = nntest(nn, test_x, test_y);
|
||||
|
||||
assert(er < 0.10, 'Too big error');
|
||||
|
||||
```
|
||||
|
||||
|
||||
Example: Stacked Auto-Encoders
|
||||
---------------------
|
||||
```matlab
|
||||
|
||||
function test_example_SAE
|
||||
load mnist_uint8;
|
||||
|
||||
train_x = double(train_x)/255;
|
||||
test_x = double(test_x)/255;
|
||||
train_y = double(train_y);
|
||||
test_y = double(test_y);
|
||||
|
||||
%% ex1 train a 100 hidden unit SDAE and use it to initialize a FFNN
|
||||
% Setup and train a stacked denoising autoencoder (SDAE)
|
||||
rand('state',0)
|
||||
sae = saesetup([784 100]);
|
||||
sae.ae{1}.activation_function = 'sigm';
|
||||
sae.ae{1}.learningRate = 1;
|
||||
sae.ae{1}.inputZeroMaskedFraction = 0.5;
|
||||
opts.numepochs = 1;
|
||||
opts.batchsize = 100;
|
||||
sae = saetrain(sae, train_x, opts);
|
||||
visualize(sae.ae{1}.W{1}(:,2:end)')
|
||||
|
||||
% Use the SDAE to initialize a FFNN
|
||||
nn = nnsetup([784 100 10]);
|
||||
nn.activation_function = 'sigm';
|
||||
nn.learningRate = 1;
|
||||
nn.W{1} = sae.ae{1}.W{1};
|
||||
|
||||
% Train the FFNN
|
||||
opts.numepochs = 1;
|
||||
opts.batchsize = 100;
|
||||
nn = nntrain(nn, train_x, train_y, opts);
|
||||
[er, bad] = nntest(nn, test_x, test_y);
|
||||
assert(er < 0.16, 'Too big error');
|
||||
|
||||
```
|
||||
|
||||
|
||||
Example: Convolutional Neural Nets
|
||||
---------------------
|
||||
```matlab
|
||||
|
||||
function test_example_CNN
|
||||
load mnist_uint8;
|
||||
|
||||
train_x = double(reshape(train_x',28,28,60000))/255;
|
||||
test_x = double(reshape(test_x',28,28,10000))/255;
|
||||
train_y = double(train_y');
|
||||
test_y = double(test_y');
|
||||
|
||||
%% ex1 Train a 6c-2s-12c-2s Convolutional neural network
|
||||
%will run 1 epoch in about 200 second and get around 11% error.
|
||||
%With 100 epochs you'll get around 1.2% error
|
||||
rand('state',0)
|
||||
cnn.layers = {
|
||||
struct('type', 'i') %input layer
|
||||
struct('type', 'c', 'outputmaps', 6, 'kernelsize', 5) %convolution layer
|
||||
struct('type', 's', 'scale', 2) %sub sampling layer
|
||||
struct('type', 'c', 'outputmaps', 12, 'kernelsize', 5) %convolution layer
|
||||
struct('type', 's', 'scale', 2) %subsampling layer
|
||||
};
|
||||
cnn = cnnsetup(cnn, train_x, train_y);
|
||||
|
||||
opts.alpha = 1;
|
||||
opts.batchsize = 50;
|
||||
opts.numepochs = 1;
|
||||
|
||||
cnn = cnntrain(cnn, train_x, train_y, opts);
|
||||
|
||||
[er, bad] = cnntest(cnn, test_x, test_y);
|
||||
|
||||
%plot mean squared error
|
||||
figure; plot(cnn.rL);
|
||||
|
||||
assert(er<0.12, 'Too big error');
|
||||
|
||||
```
|
||||
|
||||
|
||||
Example: Neural Networks
|
||||
---------------------
|
||||
```matlab
|
||||
|
||||
function test_example_NN
|
||||
load mnist_uint8;
|
||||
|
||||
train_x = double(train_x) / 255;
|
||||
test_x = double(test_x) / 255;
|
||||
train_y = double(train_y);
|
||||
test_y = double(test_y);
|
||||
|
||||
% normalize
|
||||
[train_x, mu, sigma] = zscore(train_x);
|
||||
test_x = normalize(test_x, mu, sigma);
|
||||
|
||||
%% ex1 vanilla neural net
|
||||
rand('state',0)
|
||||
nn = nnsetup([784 100 10]);
|
||||
opts.numepochs = 1; % Number of full sweeps through data
|
||||
opts.batchsize = 100; % Take a mean gradient step over this many samples
|
||||
[nn, L] = nntrain(nn, train_x, train_y, opts);
|
||||
|
||||
[er, bad] = nntest(nn, test_x, test_y);
|
||||
|
||||
assert(er < 0.08, 'Too big error');
|
||||
|
||||
%% ex2 neural net with L2 weight decay
|
||||
rand('state',0)
|
||||
nn = nnsetup([784 100 10]);
|
||||
|
||||
nn.weightPenaltyL2 = 1e-4; % L2 weight decay
|
||||
opts.numepochs = 1; % Number of full sweeps through data
|
||||
opts.batchsize = 100; % Take a mean gradient step over this many samples
|
||||
|
||||
nn = nntrain(nn, train_x, train_y, opts);
|
||||
|
||||
[er, bad] = nntest(nn, test_x, test_y);
|
||||
assert(er < 0.1, 'Too big error');
|
||||
|
||||
|
||||
%% ex3 neural net with dropout
|
||||
rand('state',0)
|
||||
nn = nnsetup([784 100 10]);
|
||||
|
||||
nn.dropoutFraction = 0.5; % Dropout fraction
|
||||
opts.numepochs = 1; % Number of full sweeps through data
|
||||
opts.batchsize = 100; % Take a mean gradient step over this many samples
|
||||
|
||||
nn = nntrain(nn, train_x, train_y, opts);
|
||||
|
||||
[er, bad] = nntest(nn, test_x, test_y);
|
||||
assert(er < 0.1, 'Too big error');
|
||||
|
||||
%% ex4 neural net with sigmoid activation function
|
||||
rand('state',0)
|
||||
nn = nnsetup([784 100 10]);
|
||||
|
||||
nn.activation_function = 'sigm'; % Sigmoid activation function
|
||||
nn.learningRate = 1; % Sigm require a lower learning rate
|
||||
opts.numepochs = 1; % Number of full sweeps through data
|
||||
opts.batchsize = 100; % Take a mean gradient step over this many samples
|
||||
|
||||
nn = nntrain(nn, train_x, train_y, opts);
|
||||
|
||||
[er, bad] = nntest(nn, test_x, test_y);
|
||||
assert(er < 0.1, 'Too big error');
|
||||
|
||||
%% ex5 plotting functionality
|
||||
rand('state',0)
|
||||
nn = nnsetup([784 20 10]);
|
||||
opts.numepochs = 5; % Number of full sweeps through data
|
||||
nn.output = 'softmax'; % use softmax output
|
||||
opts.batchsize = 1000; % Take a mean gradient step over this many samples
|
||||
opts.plot = 1; % enable plotting
|
||||
|
||||
nn = nntrain(nn, train_x, train_y, opts);
|
||||
|
||||
[er, bad] = nntest(nn, test_x, test_y);
|
||||
assert(er < 0.1, 'Too big error');
|
||||
|
||||
%% ex6 neural net with sigmoid activation and plotting of validation and training error
|
||||
% split training data into training and validation data
|
||||
vx = train_x(1:10000,:);
|
||||
tx = train_x(10001:end,:);
|
||||
vy = train_y(1:10000,:);
|
||||
ty = train_y(10001:end,:);
|
||||
|
||||
rand('state',0)
|
||||
nn = nnsetup([784 20 10]);
|
||||
nn.output = 'softmax'; % use softmax output
|
||||
opts.numepochs = 5; % Number of full sweeps through data
|
||||
opts.batchsize = 1000; % Take a mean gradient step over this many samples
|
||||
opts.plot = 1; % enable plotting
|
||||
nn = nntrain(nn, tx, ty, opts, vx, vy); % nntrain takes validation set as last two arguments (optionally)
|
||||
|
||||
[er, bad] = nntest(nn, test_x, test_y);
|
||||
assert(er < 0.1, 'Too big error');
|
||||
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/rasmusbergpalm/deeplearntoolbox/trend.png)](https://bitdeli.com/free "Bitdeli Badge")
|
||||
|
|
@ -1,63 +0,0 @@
|
|||
[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/rasmusbergpalm/deeplearntoolbox/trend.png)](https://bitdeli.com/free "Bitdeli Badge")
|
||||
|
||||
DeepLearnToolbox
|
||||
================
|
||||
|
||||
A Matlab toolbox for Deep Learning.
|
||||
|
||||
Deep Learning is a new subfield of machine learning that focuses on learning deep hierarchical models of data.
|
||||
It is inspired by the human brain's apparent deep (layered, hierarchical) architecture.
|
||||
A good overview of the theory of Deep Learning theory is
|
||||
[Learning Deep Architectures for AI](http://www.iro.umontreal.ca/~bengioy/papers/ftml_book.pdf)
|
||||
|
||||
For a more informal introduction, see the following videos by Geoffrey Hinton and Andrew Ng.
|
||||
|
||||
* [The Next Generation of Neural Networks](http://www.youtube.com/watch?v=AyzOUbkUf3M) (Hinton, 2007)
|
||||
* [Recent Developments in Deep Learning](http://www.youtube.com/watch?v=VdIURAu1-aU) (Hinton, 2010)
|
||||
* [Unsupervised Feature Learning and Deep Learning](http://www.youtube.com/watch?v=ZmNOAtZIgIk) (Ng, 2011)
|
||||
|
||||
If you use this toolbox in your research please cite [Prediction as a candidate for learning deep hierarchical models of data](http://www2.imm.dtu.dk/pubdb/views/publication_details.php?id=6284)
|
||||
|
||||
```
|
||||
@MASTERSTHESIS\{IMM2012-06284,
|
||||
author = "R. B. Palm",
|
||||
title = "Prediction as a candidate for learning deep hierarchical models of data",
|
||||
year = "2012",
|
||||
}
|
||||
```
|
||||
|
||||
Contact: rasmusbergpalm at gmail dot com
|
||||
|
||||
Directories included in the toolbox
|
||||
-----------------------------------
|
||||
|
||||
`NN/` - A library for Feedforward Backpropagation Neural Networks
|
||||
|
||||
`CNN/` - A library for Convolutional Neural Networks
|
||||
|
||||
`DBN/` - A library for Deep Belief Networks
|
||||
|
||||
`SAE/` - A library for Stacked Auto-Encoders
|
||||
|
||||
`CAE/` - A library for Convolutional Auto-Encoders
|
||||
|
||||
`util/` - Utility functions used by the libraries
|
||||
|
||||
`data/` - Data used by the examples
|
||||
|
||||
`tests/` - unit tests to verify toolbox is working
|
||||
|
||||
For references on each library check REFS.md
|
||||
|
||||
Setup
|
||||
-----
|
||||
|
||||
1. Download.
|
||||
2. addpath(genpath('DeepLearnToolbox'));
|
||||
|
||||
Known errors
|
||||
------------------------------
|
||||
|
||||
`test_cnn_gradients_are_numerically_correct` fails on Octave because of a bug in Octave's convn implementation. See http://savannah.gnu.org/bugs/?39314
|
||||
|
||||
`test_example_CNN` fails in Octave for the same reason.
|
|
@ -1,16 +0,0 @@
|
|||
Deep Belief Nets
|
||||
----------------
|
||||
|
||||
* ["A Fast Learning Algorithm for Deep Belief Nets"](http://www.cs.toronto.edu/~hinton/absps/ncfast.pdf) Geoffrey Hinton 2006 - Introduces contrastive divergence and DBNs
|
||||
* ["A Practical Guide to Training Restricted Boltzmann Machines"](http://www.cs.toronto.edu/~hinton/absps/guideTR.pdf) Geoffrey Hinton 2010 - How to implement DBNs
|
||||
|
||||
Convolutional Neural Nets
|
||||
-------------------------
|
||||
|
||||
* ["Handwritten Digit Recognition with a Back-Propagation Network"](http://yann.lecun.com/exdb/publis/pdf/lecun-90c.pdf) Yann LeCun 1990 - Introduces CNNs
|
||||
* ["Notes on Convolutional Neural Networks"](http://cogprints.org/5869/1/cnn_tutorial.pdf) Jake Bouvrie 2006 - How to implement CNNs
|
||||
|
||||
Auto Encoders
|
||||
-------------
|
||||
|
||||
* ["Extracting and Composing Robust Features with Denoising Autoencoders"](http://www.iro.umontreal.ca/~vincentp/Publications/vincent_icml_2008.pdf) Pascal Vincent 2008 - Introduces the Denoising Autoencoder
|
|
@ -1,5 +0,0 @@
|
|||
function sae = saesetup(size)
|
||||
for u = 2 : numel(size)
|
||||
sae.ae{u-1} = nnsetup([size(u-1) size(u) size(u-1)]);
|
||||
end
|
||||
end
|
|
@ -1,10 +0,0 @@
|
|||
function sae = saetrain(sae, x, opts)
|
||||
for i = 1 : numel(sae.ae);
|
||||
disp(['Training AE ' num2str(i) '/' num2str(numel(sae.ae))]);
|
||||
sae.ae{i} = nntrain(sae.ae{i}, x, x, opts);
|
||||
t = nnff(sae.ae{i}, x, x);
|
||||
x = t.a{2};
|
||||
%remove bias term
|
||||
x = x(:,2:end);
|
||||
end
|
||||
end
|
|
@ -1,18 +0,0 @@
|
|||
echo "" > README.md
|
||||
cat README_header.md >> README.md
|
||||
|
||||
echo -e "Example: Deep Belief Network\n---------------------\n\`\`\`matlab\n" >> README.md
|
||||
cat ./tests/test_example_DBN.m >> README.md
|
||||
echo -e "\n\`\`\`\n\n" >> README.md
|
||||
|
||||
echo -e "Example: Stacked Auto-Encoders\n---------------------\n\`\`\`matlab\n" >> README.md
|
||||
cat ./tests/test_example_SAE.m >> README.md
|
||||
echo -e "\n\`\`\`\n\n" >> README.md
|
||||
|
||||
echo -e "Example: Convolutional Neural Nets\n---------------------\n\`\`\`matlab\n" >> README.md
|
||||
cat ./tests/test_example_CNN.m >> README.md
|
||||
echo -e "\n\`\`\`\n\n" >> README.md
|
||||
|
||||
echo -e "Example: Neural Networks\n---------------------\n\`\`\`matlab\n" >> README.md
|
||||
cat ./tests/test_example_NN.m >> README.md
|
||||
echo -e "\n\`\`\`\n\n" >> README.md
|
|
@ -1,8 +0,0 @@
|
|||
clear all; close all; clc;
|
||||
|
||||
addpath(genpath('.'));
|
||||
dirlist = dir('tests/test_*');
|
||||
for i = 1:length(dirlist)
|
||||
name = dirlist(i).name(1:end-2);
|
||||
feval(name)
|
||||
end
|
|
@ -1,15 +0,0 @@
|
|||
function test_cnn_gradients_are_numerically_correct
|
||||
batch_x = rand(28,28,5);
|
||||
batch_y = rand(10,5);
|
||||
cnn.layers = {
|
||||
struct('type', 'i') %input layer
|
||||
struct('type', 'c', 'outputmaps', 2, 'kernelsize', 5) %convolution layer
|
||||
struct('type', 's', 'scale', 2) %sub sampling layer
|
||||
struct('type', 'c', 'outputmaps', 2, 'kernelsize', 5) %convolution layer
|
||||
struct('type', 's', 'scale', 2) %subsampling layer
|
||||
};
|
||||
cnn = cnnsetup(cnn, batch_x, batch_y);
|
||||
|
||||
cnn = cnnff(cnn, batch_x);
|
||||
cnn = cnnbp(cnn, batch_y);
|
||||
cnnnumgradcheck(cnn, batch_x, batch_y);
|
|
@ -1,35 +0,0 @@
|
|||
%function test_example_CNN
|
||||
load mnist_uint8;
|
||||
|
||||
train_x = double(reshape(train_x',28,28,60000))/255;
|
||||
test_x = double(reshape(test_x',28,28,10000))/255;
|
||||
train_y = double(train_y');
|
||||
test_y = double(test_y');
|
||||
|
||||
%% ex1 Train a 6c-2s-12c-2s Convolutional neural network
|
||||
%will run 1 epoch in about 200 second and get around 11% error.
|
||||
%With 100 epochs you'll get around 1.2% error
|
||||
|
||||
rand('state',0)
|
||||
|
||||
cnn.layers = {
|
||||
struct('type', 'i') %input layer
|
||||
struct('type', 'c', 'outputmaps', 6, 'kernelsize', 5) %convolution layer
|
||||
struct('type', 's', 'scale', 2) %sub sampling layer
|
||||
struct('type', 'c', 'outputmaps', 12, 'kernelsize', 5) %convolution layer
|
||||
struct('type', 's', 'scale', 2) %subsampling layer
|
||||
};
|
||||
|
||||
|
||||
opts.alpha = 1;
|
||||
opts.batchsize = 50;
|
||||
opts.numepochs = 5;
|
||||
|
||||
cnn = cnnsetup(cnn, train_x, train_y);
|
||||
cnn = cnntrain(cnn, train_x, train_y, opts);
|
||||
|
||||
[er, bad] = cnntest(cnn, test_x, test_y);
|
||||
|
||||
%plot mean squared error
|
||||
figure; plot(cnn.rL);
|
||||
assert(er<0.12, 'Too big error');
|
|
@ -1,41 +0,0 @@
|
|||
function test_example_DBN
|
||||
load mnist_uint8;
|
||||
|
||||
train_x = double(train_x) / 255;
|
||||
test_x = double(test_x) / 255;
|
||||
train_y = double(train_y);
|
||||
test_y = double(test_y);
|
||||
|
||||
%% ex1 train a 100 hidden unit RBM and visualize its weights
|
||||
rand('state',0)
|
||||
dbn.sizes = [100];
|
||||
opts.numepochs = 1;
|
||||
opts.batchsize = 100;
|
||||
opts.momentum = 0;
|
||||
opts.alpha = 1;
|
||||
dbn = dbnsetup(dbn, train_x, opts);
|
||||
dbn = dbntrain(dbn, train_x, opts);
|
||||
figure; visualize(dbn.rbm{1}.W'); % Visualize the RBM weights
|
||||
|
||||
%% ex2 train a 100-100 hidden unit DBN and use its weights to initialize a NN
|
||||
rand('state',0)
|
||||
%train dbn
|
||||
dbn.sizes = [100 100];
|
||||
opts.numepochs = 1;
|
||||
opts.batchsize = 100;
|
||||
opts.momentum = 0;
|
||||
opts.alpha = 1;
|
||||
dbn = dbnsetup(dbn, train_x, opts);
|
||||
dbn = dbntrain(dbn, train_x, opts);
|
||||
|
||||
%unfold dbn to nn
|
||||
nn = dbnunfoldtonn(dbn, 10);
|
||||
nn.activation_function = 'sigm';
|
||||
|
||||
%train nn
|
||||
opts.numepochs = 1;
|
||||
opts.batchsize = 100;
|
||||
nn = nntrain(nn, train_x, train_y, opts);
|
||||
[er, bad] = nntest(nn, test_x, test_y);
|
||||
|
||||
assert(er < 0.10, 'Too big error');
|
|
@ -1,94 +0,0 @@
|
|||
%function test_example_NN
|
||||
load mnist_uint8;
|
||||
|
||||
train_x = double(train_x) / 255;
|
||||
test_x = double(test_x) / 255;
|
||||
train_y = double(train_y);
|
||||
test_y = double(test_y);
|
||||
|
||||
% normalize
|
||||
[train_x, mu, sigma] = zscore(train_x);
|
||||
test_x = normalize(test_x, mu, sigma);
|
||||
|
||||
%% ex1 vanilla neural net
|
||||
rand('state',0)
|
||||
nn = nnsetup([784 100 10]);
|
||||
opts.numepochs = 10; % Number of full sweeps through data
|
||||
opts.batchsize = 100; % Take a mean gradient step over this many samples
|
||||
[nn, L] = nntrain(nn, train_x, train_y, opts);
|
||||
|
||||
[er, bad] = nntest(nn, test_x, test_y);
|
||||
|
||||
assert(er < 0.08, 'Too big error');
|
||||
|
||||
%% ex2 neural net with L2 weight decay
|
||||
rand('state',0)
|
||||
nn = nnsetup([784 100 10]);
|
||||
|
||||
nn.weightPenaltyL2 = 1e-4; % L2 weight decay
|
||||
opts.numepochs = 1; % Number of full sweeps through data
|
||||
opts.batchsize = 100; % Take a mean gradient step over this many samples
|
||||
|
||||
nn = nntrain(nn, train_x, train_y, opts);
|
||||
|
||||
[er, bad] = nntest(nn, test_x, test_y);
|
||||
assert(er < 0.1, 'Too big error');
|
||||
|
||||
|
||||
%% ex3 neural net with dropout
|
||||
rand('state',0)
|
||||
nn = nnsetup([784 100 10]);
|
||||
|
||||
nn.dropoutFraction = 0.5; % Dropout fraction
|
||||
opts.numepochs = 1; % Number of full sweeps through data
|
||||
opts.batchsize = 100; % Take a mean gradient step over this many samples
|
||||
|
||||
nn = nntrain(nn, train_x, train_y, opts);
|
||||
|
||||
[er, bad] = nntest(nn, test_x, test_y);
|
||||
assert(er < 0.1, 'Too big error');
|
||||
|
||||
%% ex4 neural net with sigmoid activation function
|
||||
rand('state',0)
|
||||
nn = nnsetup([784 100 10]);
|
||||
|
||||
nn.activation_function = 'sigm'; % Sigmoid activation function
|
||||
nn.learningRate = 1; % Sigm require a lower learning rate
|
||||
opts.numepochs = 1; % Number of full sweeps through data
|
||||
opts.batchsize = 100; % Take a mean gradient step over this many samples
|
||||
|
||||
nn = nntrain(nn, train_x, train_y, opts);
|
||||
|
||||
[er, bad] = nntest(nn, test_x, test_y);
|
||||
assert(er < 0.1, 'Too big error');
|
||||
|
||||
%% ex5 plotting functionality
|
||||
rand('state',0)
|
||||
nn = nnsetup([784 20 10]);
|
||||
opts.numepochs = 5; % Number of full sweeps through data
|
||||
nn.output = 'softmax'; % use softmax output
|
||||
opts.batchsize = 1000; % Take a mean gradient step over this many samples
|
||||
opts.plot = 1; % enable plotting
|
||||
|
||||
nn = nntrain(nn, train_x, train_y, opts);
|
||||
|
||||
[er, bad] = nntest(nn, test_x, test_y);
|
||||
assert(er < 0.1, 'Too big error');
|
||||
|
||||
%% ex6 neural net with sigmoid activation and plotting of validation and training error
|
||||
% split training data into training and validation data
|
||||
vx = train_x(1:10000,:);
|
||||
tx = train_x(10001:end,:);
|
||||
vy = train_y(1:10000,:);
|
||||
ty = train_y(10001:end,:);
|
||||
|
||||
rand('state',0)
|
||||
nn = nnsetup([784 20 10]);
|
||||
nn.output = 'softmax'; % use softmax output
|
||||
opts.numepochs = 5; % Number of full sweeps through data
|
||||
opts.batchsize = 1000; % Take a mean gradient step over this many samples
|
||||
opts.plot = 1; % enable plotting
|
||||
nn = nntrain(nn, tx, ty, opts, vx, vy); % nntrain takes validation set as last two arguments (optionally)
|
||||
|
||||
[er, bad] = nntest(nn, test_x, test_y);
|
||||
assert(er < 0.1, 'Too big error');
|
|
@ -1,102 +0,0 @@
|
|||
%function test_example_NN
|
||||
load mnist_uint8;
|
||||
|
||||
train_x = double(train_x) / 255;
|
||||
test_x = double(test_x) / 255;
|
||||
train_y = double(train_y);
|
||||
test_y = double(test_y);
|
||||
|
||||
train_y = train_y(:,1);
|
||||
test_y = test_y(:,1);
|
||||
|
||||
% normalize
|
||||
[train_x, mu, sigma] = zscore(train_x);
|
||||
test_x = normalize(test_x, mu, sigma);
|
||||
|
||||
%% ex1 vanilla neural net
|
||||
rand('state',0)
|
||||
nn = nnsetup([784 100 1]);
|
||||
nn.output = 'sigm';
|
||||
opts.numepochs = 5; % Number of full sweeps through data
|
||||
opts.batchsize = 100; % Take a mean gradient step over this many samples
|
||||
[nn, L] = nntrain(nn, train_x, train_y, opts);
|
||||
|
||||
% [er, bad] = nntest(nn, test_x, test_y);
|
||||
nn = nnff(nn, test_x, zeros(size(test_x,1), nn.size(end)));
|
||||
% pred_y = nnpredict(nn, test_x);
|
||||
nn.a{end};
|
||||
fprintf('Prediction error %f\n', sqrt(mean((pred_y - test_y).^2)));
|
||||
|
||||
% assert(er < 0.08, 'Too big error');
|
||||
|
||||
%% ex2 neural net with L2 weight decay
|
||||
rand('state',0)
|
||||
nn = nnsetup([784 100 10]);
|
||||
|
||||
nn.weightPenaltyL2 = 1e-4; % L2 weight decay
|
||||
opts.numepochs = 1; % Number of full sweeps through data
|
||||
opts.batchsize = 100; % Take a mean gradient step over this many samples
|
||||
|
||||
nn = nntrain(nn, train_x, train_y, opts);
|
||||
|
||||
[er, bad] = nntest(nn, test_x, test_y);
|
||||
assert(er < 0.1, 'Too big error');
|
||||
|
||||
|
||||
%% ex3 neural net with dropout
|
||||
rand('state',0)
|
||||
nn = nnsetup([784 100 10]);
|
||||
|
||||
nn.dropoutFraction = 0.5; % Dropout fraction
|
||||
opts.numepochs = 1; % Number of full sweeps through data
|
||||
opts.batchsize = 100; % Take a mean gradient step over this many samples
|
||||
|
||||
nn = nntrain(nn, train_x, train_y, opts);
|
||||
|
||||
[er, bad] = nntest(nn, test_x, test_y);
|
||||
assert(er < 0.1, 'Too big error');
|
||||
|
||||
%% ex4 neural net with sigmoid activation function
|
||||
rand('state',0)
|
||||
nn = nnsetup([784 100 10]);
|
||||
|
||||
nn.activation_function = 'sigm'; % Sigmoid activation function
|
||||
nn.learningRate = 1; % Sigm require a lower learning rate
|
||||
opts.numepochs = 1; % Number of full sweeps through data
|
||||
opts.batchsize = 100; % Take a mean gradient step over this many samples
|
||||
|
||||
nn = nntrain(nn, train_x, train_y, opts);
|
||||
|
||||
[er, bad] = nntest(nn, test_x, test_y);
|
||||
assert(er < 0.1, 'Too big error');
|
||||
|
||||
%% ex5 plotting functionality
|
||||
rand('state',0)
|
||||
nn = nnsetup([784 20 10]);
|
||||
opts.numepochs = 5; % Number of full sweeps through data
|
||||
nn.output = 'softmax'; % use softmax output
|
||||
opts.batchsize = 1000; % Take a mean gradient step over this many samples
|
||||
opts.plot = 1; % enable plotting
|
||||
|
||||
nn = nntrain(nn, train_x, train_y, opts);
|
||||
|
||||
[er, bad] = nntest(nn, test_x, test_y);
|
||||
assert(er < 0.1, 'Too big error');
|
||||
|
||||
%% ex6 neural net with sigmoid activation and plotting of validation and training error
|
||||
% split training data into training and validation data
|
||||
vx = train_x(1:10000,:);
|
||||
tx = train_x(10001:end,:);
|
||||
vy = train_y(1:10000,:);
|
||||
ty = train_y(10001:end,:);
|
||||
|
||||
rand('state',0)
|
||||
nn = nnsetup([784 20 10]);
|
||||
nn.output = 'softmax'; % use softmax output
|
||||
opts.numepochs = 5; % Number of full sweeps through data
|
||||
opts.batchsize = 1000; % Take a mean gradient step over this many samples
|
||||
opts.plot = 1; % enable plotting
|
||||
nn = nntrain(nn, tx, ty, opts, vx, vy); % nntrain takes validation set as last two arguments (optionally)
|
||||
|
||||
[er, bad] = nntest(nn, test_x, test_y);
|
||||
assert(er < 0.1, 'Too big error');
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue