Cleaning up and adding AU training - WIP
This commit is contained in:
parent
803575d5b7
commit
27641add77
441 changed files with 11859 additions and 249 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -26,3 +26,4 @@ matlab_runners/Action Unit Experiments/out_SEMAINE/
|
||||||
*.ipch
|
*.ipch
|
||||||
exe/FeatureExtraction/out_bp4d/
|
exe/FeatureExtraction/out_bp4d/
|
||||||
x64/Debug/
|
x64/Debug/
|
||||||
|
matlab_runners/Action Unit Experiments/out_unbc/
|
||||||
|
|
|
@ -688,7 +688,7 @@ int main (int argc, char **argv)
|
||||||
{
|
{
|
||||||
|
|
||||||
// If the video is long enough post-process it for AUs
|
// If the video is long enough post-process it for AUs
|
||||||
if (output_AUs && frame_count > 100)
|
if (output_AUs && frame_count > 1000)
|
||||||
{
|
{
|
||||||
cout << "Postprocessing the Action Unit predictions" << endl;
|
cout << "Postprocessing the Action Unit predictions" << endl;
|
||||||
|
|
||||||
|
|
|
@ -118,6 +118,11 @@ public:
|
||||||
std::vector<std::string> GetAUClassNames() const; // Presence
|
std::vector<std::string> GetAUClassNames() const; // Presence
|
||||||
std::vector<std::string> GetAURegNames() const; // Intensity
|
std::vector<std::string> GetAURegNames() const; // Intensity
|
||||||
|
|
||||||
|
// Identify if models are static or dynamic (useful for correction and shifting)
|
||||||
|
std::vector<bool> GetDynamicAUClass() const; // Presence
|
||||||
|
std::vector<bool> GetDynamicAUReg() const; // Intensity
|
||||||
|
|
||||||
|
|
||||||
void ExtractAllPredictionsOfflineReg(vector<std::pair<std::string, vector<double>>>& au_predictions, vector<double>& confidences, vector<bool>& successes, vector<double>& timestamps);
|
void ExtractAllPredictionsOfflineReg(vector<std::pair<std::string, vector<double>>>& au_predictions, vector<double>& confidences, vector<bool>& successes, vector<double>& timestamps);
|
||||||
void ExtractAllPredictionsOfflineClass(vector<std::pair<std::string, vector<double>>>& au_predictions, vector<double>& confidences, vector<bool>& successes, vector<double>& timestamps);
|
void ExtractAllPredictionsOfflineClass(vector<std::pair<std::string, vector<double>>>& au_predictions, vector<double>& confidences, vector<bool>& successes, vector<double>& timestamps);
|
||||||
|
|
||||||
|
|
|
@ -168,6 +168,42 @@ std::vector<std::string> FaceAnalyser::GetAURegNames() const
|
||||||
return au_reg_names_all;
|
return au_reg_names_all;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::vector<bool> FaceAnalyser::GetDynamicAUClass() const
|
||||||
|
{
|
||||||
|
std::vector<bool> au_dynamic_class;
|
||||||
|
std::vector<std::string> au_class_names_stat = AU_SVM_static_appearance_lin.GetAUNames();
|
||||||
|
std::vector<std::string> au_class_names_dyn = AU_SVM_dynamic_appearance_lin.GetAUNames();
|
||||||
|
|
||||||
|
for (size_t i = 0; i < au_class_names_stat.size(); ++i)
|
||||||
|
{
|
||||||
|
au_dynamic_class.push_back(false);
|
||||||
|
}
|
||||||
|
for (size_t i = 0; i < au_class_names_dyn.size(); ++i)
|
||||||
|
{
|
||||||
|
au_dynamic_class.push_back(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
return au_dynamic_class;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<bool> FaceAnalyser::GetDynamicAUReg() const
|
||||||
|
{
|
||||||
|
std::vector<bool> au_dynamic_reg;
|
||||||
|
std::vector<std::string> au_reg_names_stat = AU_SVR_static_appearance_lin_regressors.GetAUNames();
|
||||||
|
std::vector<std::string> au_reg_names_dyn = AU_SVR_dynamic_appearance_lin_regressors.GetAUNames();
|
||||||
|
|
||||||
|
for (size_t i = 0; i < au_reg_names_stat.size(); ++i)
|
||||||
|
{
|
||||||
|
au_dynamic_reg.push_back(false);
|
||||||
|
}
|
||||||
|
for (size_t i = 0; i < au_reg_names_dyn.size(); ++i)
|
||||||
|
{
|
||||||
|
au_dynamic_reg.push_back(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
return au_dynamic_reg;
|
||||||
|
}
|
||||||
|
|
||||||
cv::Mat_<int> FaceAnalyser::GetTriangulation()
|
cv::Mat_<int> FaceAnalyser::GetTriangulation()
|
||||||
{
|
{
|
||||||
return triangulation.clone();
|
return triangulation.clone();
|
||||||
|
@ -569,6 +605,7 @@ void FaceAnalyser::ExtractAllPredictionsOfflineReg(vector<std::pair<std::string,
|
||||||
confidences = this->confidences;
|
confidences = this->confidences;
|
||||||
successes = this->valid_preds;
|
successes = this->valid_preds;
|
||||||
|
|
||||||
|
// TODO only if the video is long enough or there is enough range? Compare stdev of BP4D and this
|
||||||
for(auto au_iter = AU_predictions_reg_all_hist.begin(); au_iter != AU_predictions_reg_all_hist.end(); ++au_iter)
|
for(auto au_iter = AU_predictions_reg_all_hist.begin(); au_iter != AU_predictions_reg_all_hist.end(); ++au_iter)
|
||||||
{
|
{
|
||||||
vector<double> au_good;
|
vector<double> au_good;
|
||||||
|
@ -599,9 +636,10 @@ void FaceAnalyser::ExtractAllPredictionsOfflineReg(vector<std::pair<std::string,
|
||||||
aus_valid.push_back(au_good);
|
aus_valid.push_back(au_good);
|
||||||
}
|
}
|
||||||
|
|
||||||
// sort each of the aus
|
// sort each of the aus and adjust the dynamic ones
|
||||||
for(size_t au = 0; au < au_predictions.size(); ++au)
|
for(size_t au = 0; au < au_predictions.size(); ++au)
|
||||||
{
|
{
|
||||||
|
|
||||||
for(size_t frame = 0; frame < au_predictions[au].second.size(); ++frame)
|
for(size_t frame = 0; frame < au_predictions[au].second.size(); ++frame)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
|
|
@ -75,7 +75,7 @@ void SVM_dynamic_lin::Read(std::ifstream& stream, const std::vector<std::string>
|
||||||
LandmarkDetector::ReadMatBin(stream, m_tmp);
|
LandmarkDetector::ReadMatBin(stream, m_tmp);
|
||||||
if(cv::norm(m_tmp - this->means > 0.00001))
|
if(cv::norm(m_tmp - this->means > 0.00001))
|
||||||
{
|
{
|
||||||
cout << "Something went wrong with the SVR dynamic regressors" << endl;
|
cout << "Something went wrong with the SVM dynamic classifiers" << endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -75,7 +75,7 @@ void SVM_static_lin::Read(std::ifstream& stream, const std::vector<std::string>&
|
||||||
LandmarkDetector::ReadMatBin(stream, m_tmp);
|
LandmarkDetector::ReadMatBin(stream, m_tmp);
|
||||||
if(cv::norm(m_tmp - this->means > 0.00001))
|
if(cv::norm(m_tmp - this->means > 0.00001))
|
||||||
{
|
{
|
||||||
cout << "Something went wrong with the SVR dynamic regressors" << endl;
|
cout << "Something went wrong with the SVM static classifiers" << endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -75,7 +75,7 @@ void SVR_static_lin_regressors::Read(std::ifstream& stream, const std::vector<st
|
||||||
LandmarkDetector::ReadMatBin(stream, m_tmp);
|
LandmarkDetector::ReadMatBin(stream, m_tmp);
|
||||||
if(cv::norm(m_tmp - this->means > 0.00001))
|
if(cv::norm(m_tmp - this->means > 0.00001))
|
||||||
{
|
{
|
||||||
cout << "Something went wrong with the SVR dynamic regressors" << endl;
|
cout << "Something went wrong with the SVR static regressors" << endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,16 @@
|
||||||
AU6 intensity, Precision - 0.182, Recall - 0.050, F1 - 0.079
|
AU1 class, Precision - 0.296, Recall - 0.637, F1 - 0.404
|
||||||
AU10 intensity, Precision - 0.200, Recall - 0.058, F1 - 0.090
|
AU2 class, Precision - 0.189, Recall - 0.875, F1 - 0.311
|
||||||
AU12 intensity, Precision - 0.361, Recall - 0.081, F1 - 0.132
|
AU4 class, Precision - 0.372, Recall - 0.533, F1 - 0.438
|
||||||
AU12 class, Precision - 0.494, Recall - 0.551, F1 - 0.521
|
AU6 intensity, Precision - 0.843, Recall - 0.730, F1 - 0.782
|
||||||
AU14 intensity, Precision - 0.098, Recall - 0.059, F1 - 0.074
|
AU6 class, Precision - 0.812, Recall - 0.735, F1 - 0.772
|
||||||
AU17 intensity, Precision - 0.358, Recall - 0.048, F1 - 0.085
|
AU7 class, Precision - 0.767, Recall - 0.695, F1 - 0.730
|
||||||
|
AU10 intensity, Precision - 0.864, Recall - 0.761, F1 - 0.809
|
||||||
|
AU10 class, Precision - 0.867, Recall - 0.843, F1 - 0.855
|
||||||
|
AU12 intensity, Precision - 0.920, Recall - 0.775, F1 - 0.841
|
||||||
|
AU12 class, Precision - 0.878, Recall - 0.862, F1 - 0.870
|
||||||
|
AU14 intensity, Precision - 0.620, Recall - 0.700, F1 - 0.658
|
||||||
|
AU14 class, Precision - 0.635, Recall - 0.660, F1 - 0.647
|
||||||
|
AU15 class, Precision - 0.369, Recall - 0.556, F1 - 0.444
|
||||||
|
AU17 intensity, Precision - 0.554, Recall - 0.689, F1 - 0.614
|
||||||
|
AU17 class, Precision - 0.515, Recall - 0.802, F1 - 0.627
|
||||||
|
AU23 class, Precision - 0.445, Recall - 0.450, F1 - 0.447
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
AU6 results - corr 0.548, ccc - 0.319
|
AU6 results - corr 0.774, ccc - 0.744
|
||||||
AU10 results - corr 0.415, ccc - 0.216
|
AU10 results - corr 0.716, ccc - 0.681
|
||||||
AU12 results - corr 0.495, ccc - 0.297
|
AU12 results - corr 0.736, ccc - 0.686
|
||||||
AU14 results - corr 0.241, ccc - 0.122
|
AU14 results - corr 0.466, ccc - 0.451
|
||||||
AU17 results - corr 0.456, ccc - 0.253
|
AU17 results - corr 0.477, ccc - 0.448
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
AU2 intensity, Precision - 0.454, Recall - 0.462, F1 - 0.458
|
AU2 class, Precision - 0.489, Recall - 0.536, F1 - 0.511
|
||||||
AU12 intensity, Precision - 0.510, Recall - 0.393, F1 - 0.444
|
AU12 class, Precision - 0.521, Recall - 0.751, F1 - 0.615
|
||||||
AU12 class, Precision - 0.406, Recall - 0.828, F1 - 0.545
|
AU17 class, Precision - 0.408, Recall - 0.486, F1 - 0.444
|
||||||
AU17 intensity, Precision - 0.394, Recall - 0.392, F1 - 0.393
|
AU25 class, Precision - 0.410, Recall - 0.542, F1 - 0.467
|
||||||
AU25 intensity, Precision - 0.342, Recall - 0.768, F1 - 0.473
|
AU28 class, Precision - 0.493, Recall - 0.398, F1 - 0.441
|
||||||
AU28 class, Precision - 0.613, Recall - 0.500, F1 - 0.551
|
AU45 class, Precision - 0.223, Recall - 0.723, F1 - 0.341
|
||||||
AU45 class, Precision - 0.300, Recall - 0.636, F1 - 0.408
|
|
||||||
|
|
|
@ -0,0 +1,6 @@
|
||||||
|
AU6 class, Precision - 0.277, Recall - 0.464, F1 - 0.347
|
||||||
|
AU7 class, Precision - 0.189, Recall - 0.495, F1 - 0.274
|
||||||
|
AU10 class, Precision - 0.143, Recall - 0.322, F1 - 0.198
|
||||||
|
AU12 class, Precision - 0.306, Recall - 0.939, F1 - 0.462
|
||||||
|
AU25 class, Precision - 0.073, Recall - 0.251, F1 - 0.112
|
||||||
|
AU26 class, Precision - 0.052, Recall - 0.730, F1 - 0.096
|
70
matlab_runners/Action Unit Experiments/extract_UNBC_labels.m
Normal file
70
matlab_runners/Action Unit Experiments/extract_UNBC_labels.m
Normal file
|
@ -0,0 +1,70 @@
|
||||||
|
function [ labels, valid_ids, filenames ] = extract_UNBC_labels( UNBC_dir, recs, aus )
|
||||||
|
%EXTRACT_SEMAINE_LABELS Summary of this function goes here
|
||||||
|
% Detailed explanation goes here
|
||||||
|
|
||||||
|
UNBC_dir = [UNBC_dir, '/Frame_Labels/FACS/'];
|
||||||
|
|
||||||
|
aus_UNBC = [4, 6, 7, 9, 10, 12, 20, 25, 26, 43];
|
||||||
|
|
||||||
|
inds_to_use = [];
|
||||||
|
|
||||||
|
for i=1:numel(aus)
|
||||||
|
|
||||||
|
inds_to_use = cat(1, inds_to_use, find(aus_UNBC == aus(i)));
|
||||||
|
|
||||||
|
end
|
||||||
|
aus_UNBC = aus_UNBC(inds_to_use);
|
||||||
|
labels_all = {};
|
||||||
|
valid_ids_all = {};
|
||||||
|
filenames_all = {};
|
||||||
|
|
||||||
|
for i=1:numel(recs)
|
||||||
|
|
||||||
|
% get all the dirs, etc.
|
||||||
|
|
||||||
|
sessions = dir([UNBC_dir, recs{i}]);
|
||||||
|
sessions = sessions(3:end);
|
||||||
|
|
||||||
|
num_sessions = numel(sessions);
|
||||||
|
|
||||||
|
labels = cell(num_sessions, 1);
|
||||||
|
valid_ids = cell(num_sessions, 1);
|
||||||
|
filenames = cell(num_sessions, 1);
|
||||||
|
|
||||||
|
for s=1:numel(sessions)
|
||||||
|
|
||||||
|
frames = dir([UNBC_dir, '/', recs{i}, '/', sessions(s).name, '/*.txt']);
|
||||||
|
|
||||||
|
labels_c = zeros(numel(frames), numel(aus));
|
||||||
|
|
||||||
|
for f=1:numel(frames)
|
||||||
|
|
||||||
|
file = [UNBC_dir, '/', recs{i}, '/', sessions(s).name, '/', frames(f).name];
|
||||||
|
|
||||||
|
fileID = fopen(file);
|
||||||
|
C = textscan(fileID,'%d %d %d %d\n');
|
||||||
|
fclose(fileID);
|
||||||
|
|
||||||
|
% OCC = csvread(file); %import annotations for one video file
|
||||||
|
for au = 1:numel(C{1})
|
||||||
|
labels_c(f, aus_UNBC == C{1}(au)) = C{2}(au);
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
||||||
|
labels{s} = labels_c;
|
||||||
|
filenames(s) = {sessions(s).name};
|
||||||
|
valid_ids{s} = true(size(labels_c,1),1);
|
||||||
|
end
|
||||||
|
|
||||||
|
|
||||||
|
labels_all = cat(1, labels_all, labels);
|
||||||
|
valid_ids_all = cat(1, valid_ids_all, valid_ids);
|
||||||
|
filenames_all = cat(1, filenames_all, filenames);
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
labels = labels_all;
|
||||||
|
valid_ids = valid_ids_all;
|
||||||
|
filenames = filenames_all;
|
||||||
|
end
|
||||||
|
|
|
@ -0,0 +1,70 @@
|
||||||
|
function [ labels, valid_ids, filenames ] = extract_UNBC_labels( UNBC_dir, recs, aus )
|
||||||
|
%EXTRACT_SEMAINE_LABELS Summary of this function goes here
|
||||||
|
% Detailed explanation goes here
|
||||||
|
|
||||||
|
UNBC_dir = [UNBC_dir, '/Frame_Labels/FACS/'];
|
||||||
|
|
||||||
|
aus_UNBC = [4, 6, 7, 9, 10, 12, 20, 25, 26, 43];
|
||||||
|
|
||||||
|
inds_to_use = [];
|
||||||
|
|
||||||
|
for i=1:numel(aus)
|
||||||
|
|
||||||
|
inds_to_use = cat(1, inds_to_use, find(aus_UNBC == aus(i)));
|
||||||
|
|
||||||
|
end
|
||||||
|
aus_UNBC = aus_UNBC(inds_to_use);
|
||||||
|
labels_all = {};
|
||||||
|
valid_ids_all = {};
|
||||||
|
filenames_all = {};
|
||||||
|
|
||||||
|
for i=1:numel(recs)
|
||||||
|
|
||||||
|
% get all the dirs, etc.
|
||||||
|
|
||||||
|
sessions = dir([UNBC_dir, recs{i}]);
|
||||||
|
sessions = sessions(3:end);
|
||||||
|
|
||||||
|
num_sessions = numel(sessions);
|
||||||
|
|
||||||
|
labels = cell(num_sessions, 1);
|
||||||
|
valid_ids = cell(num_sessions, 1);
|
||||||
|
filenames = cell(num_sessions, 1);
|
||||||
|
|
||||||
|
for s=1:numel(sessions)
|
||||||
|
|
||||||
|
frames = dir([UNBC_dir, '/', recs{i}, '/', sessions(s).name, '/*.txt']);
|
||||||
|
|
||||||
|
labels_c = zeros(numel(frames), numel(aus));
|
||||||
|
|
||||||
|
for f=1:numel(frames)
|
||||||
|
|
||||||
|
file = [UNBC_dir, '/', recs{i}, '/', sessions(s).name, '/', frames(f).name];
|
||||||
|
|
||||||
|
fileID = fopen(file);
|
||||||
|
C = textscan(fileID,'%d %d %d %d\n');
|
||||||
|
fclose(fileID);
|
||||||
|
|
||||||
|
% OCC = csvread(file); %import annotations for one video file
|
||||||
|
for au = 1:numel(C{1})
|
||||||
|
labels_c(f, aus_UNBC == C{1}(au)) = C{2}(au);
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
||||||
|
labels{s} = labels_c;
|
||||||
|
filenames(s) = {sessions(s).name};
|
||||||
|
valid_ids{s} = true(size(labels_c,1),1);
|
||||||
|
end
|
||||||
|
|
||||||
|
|
||||||
|
labels_all = cat(1, labels_all, labels);
|
||||||
|
valid_ids_all = cat(1, valid_ids_all, valid_ids);
|
||||||
|
filenames_all = cat(1, filenames_all, filenames);
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
labels = labels_all;
|
||||||
|
valid_ids = valid_ids_all;
|
||||||
|
filenames = filenames_all;
|
||||||
|
end
|
||||||
|
|
17
matlab_runners/Action Unit Experiments/helpers/find_UNBC.m
Normal file
17
matlab_runners/Action Unit Experiments/helpers/find_UNBC.m
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
if(exist('D:\Datasets\UNBC/', 'file'))
|
||||||
|
UNBC_dir = 'D:\Datasets\UNBC/';
|
||||||
|
hog_data_dir = 'D:\Datasets\face_datasets\hog_aligned_rigid';
|
||||||
|
else
|
||||||
|
fprintf('UNBC location not found (or not defined)\n');
|
||||||
|
end
|
||||||
|
|
||||||
|
all_recs = {'042-ll042', '043-jh043', '047-jl047', '048-aa048', '049-bm049',...
|
||||||
|
'052-dr052', '059-fn059', '064-ak064', '066-mg066', '080-bn080',...
|
||||||
|
'092-ch092', '095-tv095', '096-bg096', '097-gf097', '101-mg101',...
|
||||||
|
'103-jk103', '106-nm106', '107-hs107', '108-th108', '109-ib109',...
|
||||||
|
'115-jy115', '120-kz120', '121-vw121', '123-jh123', '124-dn124'};
|
||||||
|
|
||||||
|
devel_recs = all_recs(1:5:25);
|
||||||
|
train_recs = setdiff(all_recs, devel_recs);
|
||||||
|
|
||||||
|
all_aus = [4, 6, 7, 9, 10, 12, 20, 25, 26, 43];
|
|
@ -1,226 +0,0 @@
|
||||||
clear
|
|
||||||
|
|
||||||
bp4d_loc = 'D:/Datasets/FERA_2015/BP4D/BP4D-training/';
|
|
||||||
|
|
||||||
out_loc = './out_bp4d/';
|
|
||||||
|
|
||||||
if(~exist(out_loc, 'dir'))
|
|
||||||
mkdir(out_loc);
|
|
||||||
end
|
|
||||||
|
|
||||||
%%
|
|
||||||
executable = '"../../x64/Release/FeatureExtraction.exe"';
|
|
||||||
|
|
||||||
bp4d_dirs = {'F002', 'F004', 'F006', 'F008', 'F010', 'F012', 'F014', 'F016', 'F018', 'F020', 'F022', 'M002', 'M004', 'M006', 'M008', 'M010', 'M012', 'M014', 'M016', 'M018'};
|
|
||||||
|
|
||||||
parfor f1=1:numel(bp4d_dirs)
|
|
||||||
|
|
||||||
if(isdir([bp4d_loc, bp4d_dirs{f1}]))
|
|
||||||
|
|
||||||
bp4d_2_dirs = dir([bp4d_loc, bp4d_dirs{f1}]);
|
|
||||||
bp4d_2_dirs = bp4d_2_dirs(3:end);
|
|
||||||
|
|
||||||
f1_dir = bp4d_dirs{f1};
|
|
||||||
|
|
||||||
command = [executable ' -asvid -q -no2Dfp -no3Dfp -noMparams -noPose -noGaze '];
|
|
||||||
|
|
||||||
for f2=1:numel(bp4d_2_dirs)
|
|
||||||
f2_dir = bp4d_2_dirs(f2).name;
|
|
||||||
if(isdir([bp4d_loc, bp4d_dirs{f1}]))
|
|
||||||
|
|
||||||
curr_vid = [bp4d_loc, f1_dir, '/', f2_dir, '/'];
|
|
||||||
|
|
||||||
name = [f1_dir '_' f2_dir];
|
|
||||||
output_file = [out_loc name '.au.txt'];
|
|
||||||
|
|
||||||
command = cat(2, command, [' -fdir "' curr_vid '" -of "' output_file '"']);
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
dos(command);
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
%%
|
|
||||||
addpath('./helpers/');
|
|
||||||
|
|
||||||
find_BP4D;
|
|
||||||
|
|
||||||
aus_BP4D = [1, 2, 4, 6, 7, 10, 12, 14, 15, 17, 23];
|
|
||||||
|
|
||||||
[ labels_gt, valid_ids, vid_ids, filenames] = extract_BP4D_labels(BP4D_dir, bp4d_dirs, aus_BP4D);
|
|
||||||
labels_gt = cat(1, labels_gt{:});
|
|
||||||
|
|
||||||
%% Identifying which column IDs correspond to which AU
|
|
||||||
tab = readtable([out_loc, bp4d_dirs{1}, '_T1.au.txt']);
|
|
||||||
column_names = tab.Properties.VariableNames;
|
|
||||||
|
|
||||||
% As there are both classes and intensities list and evaluate both of them
|
|
||||||
aus_pred_int = [];
|
|
||||||
aus_pred_class = [];
|
|
||||||
|
|
||||||
inds_int_in_file = [];
|
|
||||||
inds_class_in_file = [];
|
|
||||||
|
|
||||||
for c=1:numel(column_names)
|
|
||||||
if(strfind(column_names{c}, '_r') > 0)
|
|
||||||
aus_pred_int = cat(1, aus_pred_int, int32(str2num(column_names{c}(3:end-2))));
|
|
||||||
inds_int_in_file = cat(1, inds_int_in_file, c);
|
|
||||||
end
|
|
||||||
if(strfind(column_names{c}, '_c') > 0)
|
|
||||||
aus_pred_class = cat(1, aus_pred_class, int32(str2num(column_names{c}(3:end-2))));
|
|
||||||
inds_class_in_file = cat(1, inds_class_in_file, c);
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
%%
|
|
||||||
inds_au_int = zeros(size(aus_BP4D));
|
|
||||||
inds_au_class = zeros(size(aus_BP4D));
|
|
||||||
|
|
||||||
for ind=1:numel(aus_BP4D)
|
|
||||||
if(~isempty(find(aus_pred_int==aus_BP4D(ind), 1)))
|
|
||||||
inds_au_int(ind) = find(aus_pred_int==aus_BP4D(ind));
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
for ind=1:numel(aus_BP4D)
|
|
||||||
if(~isempty(find(aus_pred_class==aus_BP4D(ind), 1)))
|
|
||||||
inds_au_class(ind) = find(aus_pred_class==aus_BP4D(ind));
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
preds_all_class = [];
|
|
||||||
preds_all_int = [];
|
|
||||||
|
|
||||||
for i=1:numel(filenames)
|
|
||||||
|
|
||||||
fname = [out_loc, filenames{i}, '.au.txt'];
|
|
||||||
preds = dlmread(fname, ',', 1, 0);
|
|
||||||
|
|
||||||
% Read all of the intensity AUs
|
|
||||||
preds_int = preds(:, inds_int_in_file);
|
|
||||||
|
|
||||||
% Read all of the classification AUs
|
|
||||||
preds_class = preds(:, inds_class_in_file);
|
|
||||||
|
|
||||||
preds_all_class = cat(1, preds_all_class, preds_class);
|
|
||||||
preds_all_int = cat(1, preds_all_int, preds_int);
|
|
||||||
end
|
|
||||||
|
|
||||||
%%
|
|
||||||
f = fopen('BP4D_valid_res_class.txt', 'w');
|
|
||||||
for au = 1:numel(aus_BP4D)
|
|
||||||
|
|
||||||
if(inds_au_int(au) ~= 0)
|
|
||||||
tp = sum(labels_gt(:,au) == 1 & preds_all_int(:, inds_au_int(au)) >= 1);
|
|
||||||
fp = sum(labels_gt(:,au) == 0 & preds_all_int(:, inds_au_int(au)) >= 1);
|
|
||||||
fn = sum(labels_gt(:,au) == 1 & preds_all_int(:, inds_au_int(au)) < 1);
|
|
||||||
tn = sum(labels_gt(:,au) == 0 & preds_all_int(:, inds_au_int(au)) < 1);
|
|
||||||
|
|
||||||
precision = tp./(tp+fp);
|
|
||||||
recall = tp./(tp+fn);
|
|
||||||
|
|
||||||
f1 = 2 * precision .* recall ./ (precision + recall);
|
|
||||||
|
|
||||||
fprintf(f, 'AU%d intensity, Precision - %.3f, Recall - %.3f, F1 - %.3f\n', aus_BP4D(au), precision, recall, f1);
|
|
||||||
end
|
|
||||||
|
|
||||||
if(inds_au_class(au) ~= 0)
|
|
||||||
tp = sum(labels_gt(:,au) == 1 & preds_all_class(:, inds_au_class(au)) == 1);
|
|
||||||
fp = sum(labels_gt(:,au) == 0 & preds_all_class(:, inds_au_class(au)) == 1);
|
|
||||||
fn = sum(labels_gt(:,au) == 1 & preds_all_class(:, inds_au_class(au)) == 0);
|
|
||||||
tn = sum(labels_gt(:,au) == 0 & preds_all_class(:, inds_au_class(au)) == 0);
|
|
||||||
|
|
||||||
precision = tp./(tp+fp);
|
|
||||||
recall = tp./(tp+fn);
|
|
||||||
|
|
||||||
f1 = 2 * precision .* recall ./ (precision + recall);
|
|
||||||
|
|
||||||
fprintf(f, 'AU%d class, Precision - %.3f, Recall - %.3f, F1 - %.3f\n', aus_BP4D(au), precision, recall, f1);
|
|
||||||
end
|
|
||||||
|
|
||||||
end
|
|
||||||
fclose(f);
|
|
||||||
|
|
||||||
%%
|
|
||||||
addpath('./helpers/');
|
|
||||||
|
|
||||||
find_BP4D;
|
|
||||||
|
|
||||||
aus_BP4D = [6, 10, 12, 14, 17];
|
|
||||||
[ labels_gt, valid_ids, vid_ids, filenames] = extract_BP4D_labels_intensity(BP4D_dir_int, devel_recs, aus_BP4D);
|
|
||||||
labels_gt = cat(1, labels_gt{:});
|
|
||||||
|
|
||||||
%% Identifying which column IDs correspond to which AU
|
|
||||||
tab = readtable([out_loc, bp4d_dirs{1}, '_T1.au.txt']);
|
|
||||||
column_names = tab.Properties.VariableNames;
|
|
||||||
|
|
||||||
% As there are both classes and intensities list and evaluate both of them
|
|
||||||
aus_pred_int = [];
|
|
||||||
inds_int_in_file = [];
|
|
||||||
|
|
||||||
for c=1:numel(column_names)
|
|
||||||
if(strfind(column_names{c}, '_r') > 0)
|
|
||||||
aus_pred_int = cat(1, aus_pred_int, int32(str2num(column_names{c}(3:end-2))));
|
|
||||||
inds_int_in_file = cat(1, inds_int_in_file, c);
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
%%
|
|
||||||
inds_au_int = zeros(size(aus_BP4D));
|
|
||||||
|
|
||||||
for ind=1:numel(aus_BP4D)
|
|
||||||
if(~isempty(find(aus_pred_int==aus_BP4D(ind), 1)))
|
|
||||||
inds_au_int(ind) = find(aus_pred_int==aus_BP4D(ind));
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
preds_all_class = [];
|
|
||||||
|
|
||||||
for i=1:numel(filenames)
|
|
||||||
|
|
||||||
fname = [out_loc, filenames{i}, '.au.txt'];
|
|
||||||
preds = dlmread(fname, ',', 1, 0);
|
|
||||||
|
|
||||||
% Read all of the intensity AUs
|
|
||||||
preds_int = preds(:, inds_int_in_file);
|
|
||||||
|
|
||||||
|
|
||||||
preds_all_class = cat(1, preds_all_class, preds_class);
|
|
||||||
preds_all_int = cat(1, preds_all_int, preds_int);
|
|
||||||
end
|
|
||||||
|
|
||||||
%%
|
|
||||||
f = fopen('BP4D_valid_res_class.txt', 'w');
|
|
||||||
for au = 1:numel(aus_BP4D)
|
|
||||||
|
|
||||||
if(inds_au_int(au) ~= 0)
|
|
||||||
tp = sum(labels_gt(:,au) == 1 & preds_all_int(:, inds_au_int(au)) >= 1);
|
|
||||||
fp = sum(labels_gt(:,au) == 0 & preds_all_int(:, inds_au_int(au)) >= 1);
|
|
||||||
fn = sum(labels_gt(:,au) == 1 & preds_all_int(:, inds_au_int(au)) < 1);
|
|
||||||
tn = sum(labels_gt(:,au) == 0 & preds_all_int(:, inds_au_int(au)) < 1);
|
|
||||||
|
|
||||||
precision = tp./(tp+fp);
|
|
||||||
recall = tp./(tp+fn);
|
|
||||||
|
|
||||||
f1 = 2 * precision .* recall ./ (precision + recall);
|
|
||||||
|
|
||||||
fprintf(f, 'AU%d intensity, Precision - %.3f, Recall - %.3f, F1 - %.3f\n', aus_BP4D(au), precision, recall, f1);
|
|
||||||
end
|
|
||||||
|
|
||||||
if(inds_au_class(au) ~= 0)
|
|
||||||
tp = sum(labels_gt(:,au) == 1 & preds_all_class(:, inds_au_class(au)) == 1);
|
|
||||||
fp = sum(labels_gt(:,au) == 0 & preds_all_class(:, inds_au_class(au)) == 1);
|
|
||||||
fn = sum(labels_gt(:,au) == 1 & preds_all_class(:, inds_au_class(au)) == 0);
|
|
||||||
tn = sum(labels_gt(:,au) == 0 & preds_all_class(:, inds_au_class(au)) == 0);
|
|
||||||
|
|
||||||
precision = tp./(tp+fp);
|
|
||||||
recall = tp./(tp+fn);
|
|
||||||
|
|
||||||
f1 = 2 * precision .* recall ./ (precision + recall);
|
|
||||||
|
|
||||||
fprintf(f, 'AU%d class, Precision - %.3f, Recall - %.3f, F1 - %.3f\n', aus_BP4D(au), precision, recall, f1);
|
|
||||||
end
|
|
||||||
|
|
||||||
end
|
|
||||||
fclose(f);
|
|
188
matlab_runners/Action Unit Experiments/run_AU_prediction_UNBC.m
Normal file
188
matlab_runners/Action Unit Experiments/run_AU_prediction_UNBC.m
Normal file
|
@ -0,0 +1,188 @@
|
||||||
|
clear
|
||||||
|
|
||||||
|
unbc_loc = 'D:/Datasets/UNBC/Images/';
|
||||||
|
|
||||||
|
out_loc = './out_unbc/';
|
||||||
|
|
||||||
|
if(~exist(out_loc, 'dir'))
|
||||||
|
mkdir(out_loc);
|
||||||
|
end
|
||||||
|
|
||||||
|
%%
|
||||||
|
executable = '"../../x64/Release/FeatureExtraction.exe"';
|
||||||
|
|
||||||
|
unbc_dirs = {'042-ll042', '043-jh043', '047-jl047', '048-aa048', '049-bm049',...
|
||||||
|
'052-dr052', '059-fn059', '064-ak064', '066-mg066', '080-bn080',...
|
||||||
|
'092-ch092', '095-tv095', '096-bg096', '097-gf097', '101-mg101',...
|
||||||
|
'103-jk103', '106-nm106', '107-hs107', '108-th108', '109-ib109',...
|
||||||
|
'115-jy115', '120-kz120', '121-vw121', '123-jh123', '124-dn124'};
|
||||||
|
|
||||||
|
parfor f1=1:numel(unbc_dirs)
|
||||||
|
|
||||||
|
if(isdir([unbc_loc, unbc_dirs{f1}]))
|
||||||
|
|
||||||
|
unbc_2_dirs = dir([unbc_loc, unbc_dirs{f1}]);
|
||||||
|
unbc_2_dirs = unbc_2_dirs(3:end);
|
||||||
|
|
||||||
|
f1_dir = unbc_dirs{f1};
|
||||||
|
|
||||||
|
command = [executable ' -asvid -q -no2Dfp -no3Dfp -noMparams -noPose -noGaze '];
|
||||||
|
|
||||||
|
for f2=1:numel(unbc_2_dirs)
|
||||||
|
f2_dir = unbc_2_dirs(f2).name;
|
||||||
|
if(isdir([unbc_loc, unbc_dirs{f1}]))
|
||||||
|
|
||||||
|
curr_vid = [unbc_loc, f1_dir, '/', f2_dir, '/'];
|
||||||
|
|
||||||
|
name = [f1_dir '_' f2_dir];
|
||||||
|
output_file = [out_loc name '.au.txt'];
|
||||||
|
|
||||||
|
command = cat(2, command, [' -fdir "' curr_vid '" -of "' output_file '"']);
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
dos(command);
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
%%
|
||||||
|
addpath('./helpers/');
|
||||||
|
|
||||||
|
find_UNBC;
|
||||||
|
|
||||||
|
aus_UNBC = [6, 7, 10, 12, 25, 26];
|
||||||
|
|
||||||
|
[ labels_gt, valid_ids, filenames] = extract_UNBC_labels(UNBC_dir, unbc_dirs, aus_UNBC);
|
||||||
|
labels_gt = cat(1, labels_gt{:});
|
||||||
|
|
||||||
|
%% Identifying which column IDs correspond to which AU
|
||||||
|
tab = readtable([out_loc, '042-ll042_ll042t1aaaff.au.txt']);
|
||||||
|
column_names = tab.Properties.VariableNames;
|
||||||
|
|
||||||
|
% As there are both classes and intensities list and evaluate both of them
|
||||||
|
aus_pred_int = [];
|
||||||
|
aus_pred_class = [];
|
||||||
|
|
||||||
|
inds_int_in_file = [];
|
||||||
|
inds_class_in_file = [];
|
||||||
|
|
||||||
|
for c=1:numel(column_names)
|
||||||
|
if(strfind(column_names{c}, '_r') > 0)
|
||||||
|
aus_pred_int = cat(1, aus_pred_int, int32(str2num(column_names{c}(3:end-2))));
|
||||||
|
inds_int_in_file = cat(1, inds_int_in_file, c);
|
||||||
|
end
|
||||||
|
if(strfind(column_names{c}, '_c') > 0)
|
||||||
|
aus_pred_class = cat(1, aus_pred_class, int32(str2num(column_names{c}(3:end-2))));
|
||||||
|
inds_class_in_file = cat(1, inds_class_in_file, c);
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
%%
|
||||||
|
inds_au_int = zeros(size(aus_UNBC));
|
||||||
|
inds_au_class = zeros(size(aus_UNBC));
|
||||||
|
|
||||||
|
for ind=1:numel(aus_UNBC)
|
||||||
|
if(~isempty(find(aus_pred_int==aus_UNBC(ind), 1)))
|
||||||
|
inds_au_int(ind) = find(aus_pred_int==aus_UNBC(ind));
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
for ind=1:numel(aus_UNBC)
|
||||||
|
if(~isempty(find(aus_pred_class==aus_UNBC(ind), 1)))
|
||||||
|
inds_au_class(ind) = find(aus_pred_class==aus_UNBC(ind));
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
preds_all_class = [];
|
||||||
|
preds_all_int = [];
|
||||||
|
|
||||||
|
for i=1:numel(filenames)
|
||||||
|
|
||||||
|
fname = dir([out_loc, '/*', filenames{i}, '.au.txt']);
|
||||||
|
fname = fname(1).name;
|
||||||
|
|
||||||
|
preds = dlmread([out_loc '/' fname], ',', 1, 0);
|
||||||
|
|
||||||
|
% Read all of the intensity AUs
|
||||||
|
preds_int = preds(:, inds_int_in_file);
|
||||||
|
|
||||||
|
% Read all of the classification AUs
|
||||||
|
preds_class = preds(:, inds_class_in_file);
|
||||||
|
|
||||||
|
preds_all_class = cat(1, preds_all_class, preds_class);
|
||||||
|
preds_all_int = cat(1, preds_all_int, preds_int);
|
||||||
|
end
|
||||||
|
|
||||||
|
%%
|
||||||
|
f = fopen('UNBC_valid_res_class.txt', 'w');
|
||||||
|
for au = 1:numel(aus_UNBC)
|
||||||
|
|
||||||
|
if(inds_au_class(au) ~= 0)
|
||||||
|
tp = sum(labels_gt(:,au) == 1 & preds_all_class(:, inds_au_class(au)) == 1);
|
||||||
|
fp = sum(labels_gt(:,au) == 0 & preds_all_class(:, inds_au_class(au)) == 1);
|
||||||
|
fn = sum(labels_gt(:,au) == 1 & preds_all_class(:, inds_au_class(au)) == 0);
|
||||||
|
tn = sum(labels_gt(:,au) == 0 & preds_all_class(:, inds_au_class(au)) == 0);
|
||||||
|
|
||||||
|
precision = tp./(tp+fp);
|
||||||
|
recall = tp./(tp+fn);
|
||||||
|
|
||||||
|
f1 = 2 * precision .* recall ./ (precision + recall);
|
||||||
|
|
||||||
|
fprintf(f, 'AU%d class, Precision - %.3f, Recall - %.3f, F1 - %.3f\n', aus_UNBC(au), precision, recall, f1);
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
||||||
|
fclose(f);
|
||||||
|
|
||||||
|
%%
|
||||||
|
addpath('./helpers/');
|
||||||
|
|
||||||
|
find_BP4D;
|
||||||
|
|
||||||
|
aus_UNBC = [6, 10, 12, 14, 17];
|
||||||
|
[ labels_gt, valid_ids, vid_ids, filenames] = extract_BP4D_labels_intensity(BP4D_dir_int, devel_recs, aus_UNBC);
|
||||||
|
labels_gt = cat(1, labels_gt{:});
|
||||||
|
|
||||||
|
%% Identifying which column IDs correspond to which AU
|
||||||
|
tab = readtable([out_loc, bp4d_dirs{1}, '_T1.au.txt']);
|
||||||
|
column_names = tab.Properties.VariableNames;
|
||||||
|
|
||||||
|
% As there are both classes and intensities list and evaluate both of them
|
||||||
|
aus_pred_int = [];
|
||||||
|
inds_int_in_file = [];
|
||||||
|
|
||||||
|
for c=1:numel(column_names)
|
||||||
|
if(strfind(column_names{c}, '_r') > 0)
|
||||||
|
aus_pred_int = cat(1, aus_pred_int, int32(str2num(column_names{c}(3:end-2))));
|
||||||
|
inds_int_in_file = cat(1, inds_int_in_file, c);
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
%%
|
||||||
|
inds_au_int = zeros(size(aus_UNBC));
|
||||||
|
|
||||||
|
for ind=1:numel(aus_UNBC)
|
||||||
|
if(~isempty(find(aus_pred_int==aus_UNBC(ind), 1)))
|
||||||
|
inds_au_int(ind) = find(aus_pred_int==aus_UNBC(ind));
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
preds_all_int = [];
|
||||||
|
|
||||||
|
for i=1:numel(filenames)
|
||||||
|
|
||||||
|
fname = [out_loc, filenames{i}, '.au.txt'];
|
||||||
|
preds = dlmread(fname, ',', 1, 0);
|
||||||
|
|
||||||
|
% Read all of the intensity AUs
|
||||||
|
preds_int = preds(:, inds_int_in_file);
|
||||||
|
preds_all_int = cat(1, preds_all_int, preds_int);
|
||||||
|
end
|
||||||
|
|
||||||
|
%%
|
||||||
|
f = fopen('BP4D_valid_res_int.txt', 'w');
|
||||||
|
for au = 1:numel(aus_UNBC)
|
||||||
|
[ accuracies, F1s, corrs, ccc, rms, classes ] = evaluate_au_prediction_results( preds_all_int(:, inds_au_int(au)), labels_gt(:,au));
|
||||||
|
fprintf(f, 'AU%d results - corr %.3f, ccc - %.3f\n', aus_UNBC(au), corrs, ccc);
|
||||||
|
end
|
||||||
|
fclose(f);
|
51
matlab_runners/Head Pose Experiments/find_paired_matches.m
Normal file
51
matlab_runners/Head Pose Experiments/find_paired_matches.m
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
% Working out corrections for head pose and model correlations
|
||||||
|
clear
|
||||||
|
%%
|
||||||
|
% first need to run run_clm_head_pose_tests_clnf
|
||||||
|
if(exist([getenv('USERPROFILE') '/Dropbox/AAM/test data/'], 'file'))
|
||||||
|
database_root = [getenv('USERPROFILE') '/Dropbox/AAM/test data/'];
|
||||||
|
else
|
||||||
|
database_root = 'F:/Dropbox/Dropbox/AAM/test data/';
|
||||||
|
end
|
||||||
|
buDir = [database_root, '/bu/uniform-light/'];
|
||||||
|
resFolderBUclnf_general = [database_root, '/bu/uniform-light/CLMr3/'];
|
||||||
|
[~, pred_hp_bu, gt_hp_bu, ~, rels_bu] = calcBUerror(resFolderBUclnf_general, buDir);
|
||||||
|
|
||||||
|
biwi_dir = '/biwi pose/';
|
||||||
|
biwi_results_root = '/biwi pose results/';
|
||||||
|
res_folder_clnf_general = '/biwi pose results//CLMr4/';
|
||||||
|
[~, pred_hp_biwi, gt_hp_biwi, ~, ~, rels_biwi] = calcBiwiError([database_root res_folder_clnf_general], [database_root biwi_dir]);
|
||||||
|
|
||||||
|
ict_dir = ['ict/'];
|
||||||
|
ict_results_root = ['ict results/'];
|
||||||
|
res_folder_ict_clnf_general = 'ict results//CLMr4/';
|
||||||
|
[~, pred_hp_ict, gt_hp_ict, ~, ~, rel_ict] = calcIctError([database_root res_folder_ict_clnf_general], [database_root ict_dir]);
|
||||||
|
|
||||||
|
% Finding matching pairs to make sure they are independently distributed?
|
||||||
|
|
||||||
|
%
|
||||||
|
%%
|
||||||
|
all_hps = cat(1, pred_hp_bu, pred_hp_biwi, pred_hp_ict);
|
||||||
|
all_gts = cat(1, gt_hp_bu, gt_hp_biwi, gt_hp_ict);
|
||||||
|
all_rels = cat(1, rels_bu, rels_biwi, rel_ict);
|
||||||
|
|
||||||
|
rel_frames = all_rels > 0.8;
|
||||||
|
|
||||||
|
all_err = mean(abs(all_gts - all_hps), 2);
|
||||||
|
|
||||||
|
all_hps = all_hps(rel_frames, :);
|
||||||
|
all_gts = all_gts(rel_frames, :);
|
||||||
|
|
||||||
|
% Variation along pitch when others are close to 0
|
||||||
|
pitch_bins = [-40:5:40];
|
||||||
|
for p = pitch_bins
|
||||||
|
rel_frames = find(abs(all_gts(:,2))<3 & abs(all_gts(:,3))<3 & abs(all_gts(:,1) - p)<3);
|
||||||
|
if ~isempty(rel_frames)
|
||||||
|
corr_coeff = corr(all_hps(rel_frames,1), all_gts(rel_frames,1));
|
||||||
|
fprintf('%d, %.3f\n', numel(rel_frames), corr_coeff);
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
plot(find(abs(all_gts(:,1))<1 & abs(all_gts(:,3))<1));
|
||||||
|
|
||||||
|
plot(find(abs(all_gts(:,1))<1 & abs(all_gts(:,2))<1));
|
75
matlab_runners/Head Pose Experiments/run_f2f_videos.m
Normal file
75
matlab_runners/Head Pose Experiments/run_f2f_videos.m
Normal file
|
@ -0,0 +1,75 @@
|
||||||
|
executable = '"../../x64/Release/FeatureExtraction.exe"';
|
||||||
|
|
||||||
|
outputDir = 'D:\Datasets\face2face\2007_processed/';
|
||||||
|
|
||||||
|
% First collect the filenames of the data to be used
|
||||||
|
input_label_dir = 'D:\Datasets\face2face\f2f-2007-all-transcriptions/';
|
||||||
|
|
||||||
|
folds = dir([input_label_dir, '*Rapport*']);
|
||||||
|
|
||||||
|
listener_file_labels = {};
|
||||||
|
speaker_file_labels = {};
|
||||||
|
|
||||||
|
listener_vid_files = {};
|
||||||
|
speaker_vid_files = {};
|
||||||
|
|
||||||
|
speaker_vid_dir = 'D:\Datasets\face2face\f2f-2007-all-movie-speaker/';
|
||||||
|
listener_vid_dir = 'D:\Datasets\face2face\f2f-2007-all-movie-listener/';
|
||||||
|
verbose = true;
|
||||||
|
for i=1:numel(folds)
|
||||||
|
|
||||||
|
listener_file = dir([input_label_dir, folds(i).name, '/*.L.nod.eaf']);
|
||||||
|
speaker_file = dir([input_label_dir, folds(i).name, '/*.S.nod.eaf']);
|
||||||
|
|
||||||
|
if(~isempty(listener_file))
|
||||||
|
% Need to find the appropriate video file if it exists
|
||||||
|
num = listener_file.name(end-15:end-10);
|
||||||
|
vid_file_dir = dir([listener_vid_dir, '/*', num, '*']);
|
||||||
|
vid_file = dir([listener_vid_dir, '/', vid_file_dir.name, '/*.mp4']);
|
||||||
|
if(~isempty(vid_file))
|
||||||
|
listener_vid_files = cat(1, listener_vid_files, [listener_vid_dir, '/', vid_file_dir.name, '/', vid_file.name]);
|
||||||
|
listener_file_labels = cat(1, listener_file_labels, [input_label_dir, '/' folds(i).name, '/' listener_file.name]);
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
if(~isempty(speaker_file))
|
||||||
|
num = speaker_file.name(end-15:end-10);
|
||||||
|
vid_file_dir = dir([speaker_vid_dir, '/*', num, '*']);
|
||||||
|
vid_file = dir([speaker_vid_dir, '/', vid_file_dir.name, '/*.mp4']);
|
||||||
|
if(~isempty(vid_file))
|
||||||
|
speaker_vid_files = cat(1, speaker_vid_files, [speaker_vid_dir, '/', vid_file_dir.name, '/', vid_file.name]);
|
||||||
|
speaker_file_labels = cat(1, speaker_file_labels, [input_label_dir, '/' folds(i).name, '/' speaker_file.name]);
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
file_labels = cat(1, listener_file_labels, speaker_file_labels);
|
||||||
|
video_files = cat(1, listener_vid_files, speaker_vid_files);
|
||||||
|
|
||||||
|
parfor i=1:numel(file_labels)
|
||||||
|
|
||||||
|
[~,short_name,vid_ext] = fileparts(video_files{i});
|
||||||
|
|
||||||
|
command = executable;
|
||||||
|
|
||||||
|
inputFile = video_files{i};
|
||||||
|
outputFile = [outputDir short_name '.txt'];
|
||||||
|
|
||||||
|
outputEaf = [outputDir short_name '.eaf'];
|
||||||
|
|
||||||
|
command = cat(2, command, [' -f "' inputFile '" -of "' outputFile '"']);
|
||||||
|
|
||||||
|
if(verbose)
|
||||||
|
outputVideo = [outputDir short_name '.track.avi'];
|
||||||
|
command = cat(2, command, [' -ov "' outputVideo '"']);
|
||||||
|
end
|
||||||
|
|
||||||
|
command = cat(2, command, [' -no2Dfp -no3Dfp -noMparams -noAUs -noGaze']);
|
||||||
|
|
||||||
|
dos(command);
|
||||||
|
|
||||||
|
copyfile(file_labels{i}, outputEaf);
|
||||||
|
copyfile(video_files{i}, [outputDir '/' short_name, '.mp4']);
|
||||||
|
|
||||||
|
end
|
78
matlab_runners/Head Pose Experiments/run_f2f_videos_2006.m
Normal file
78
matlab_runners/Head Pose Experiments/run_f2f_videos_2006.m
Normal file
|
@ -0,0 +1,78 @@
|
||||||
|
executable = '"../../x64/Release/FeatureExtraction.exe"';
|
||||||
|
|
||||||
|
outputDir = 'D:\Datasets\face2face\2006_processed/';
|
||||||
|
|
||||||
|
% First collect the filenames of the data to be used
|
||||||
|
input_label_dir = 'D:\Datasets\face2face\rapport-oct-2006-all-transcriptions/';
|
||||||
|
|
||||||
|
folds = dir([input_label_dir, '*SES*']);
|
||||||
|
|
||||||
|
listener_file_labels = {};
|
||||||
|
speaker_file_labels = {};
|
||||||
|
|
||||||
|
listener_vid_files = {};
|
||||||
|
speaker_vid_files = {};
|
||||||
|
|
||||||
|
speaker_vid_dir = 'D:\Datasets\face2face\rapport-oct-2006-all-movie-speaker/';
|
||||||
|
listener_vid_dir = 'D:\Datasets\face2face\rapport-oct-2006-all-movie-listener/';
|
||||||
|
verbose = true;
|
||||||
|
for i=1:numel(folds)
|
||||||
|
|
||||||
|
listener_file = dir([input_label_dir, folds(i).name, '/*.L.nod.eaf']);
|
||||||
|
speaker_file = dir([input_label_dir, folds(i).name, '/*.S.nod.eaf']);
|
||||||
|
|
||||||
|
if(~isempty(listener_file))
|
||||||
|
% Need to find the appropriate video file if it exists
|
||||||
|
num = listener_file.name(end-13:end-10);
|
||||||
|
vid_file_dir = dir([listener_vid_dir, '/*', num, '*']);
|
||||||
|
vid_file = dir([listener_vid_dir, '/', vid_file_dir.name, '/*.mp4']);
|
||||||
|
if(~isempty(vid_file))
|
||||||
|
listener_vid_files = cat(1, listener_vid_files, [listener_vid_dir, '/', vid_file_dir.name, '/', vid_file.name]);
|
||||||
|
listener_file_labels = cat(1, listener_file_labels, [input_label_dir, '/' folds(i).name, '/' listener_file.name]);
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
if(~isempty(speaker_file))
|
||||||
|
num = speaker_file.name(end-13:end-10);
|
||||||
|
vid_file_dir = dir([speaker_vid_dir, '/*', num, '*']);
|
||||||
|
vid_file = dir([speaker_vid_dir, '/', vid_file_dir.name, '/*.mp4']);
|
||||||
|
if(~isempty(vid_file))
|
||||||
|
speaker_vid_files = cat(1, speaker_vid_files, [speaker_vid_dir, '/', vid_file_dir.name, '/', vid_file.name]);
|
||||||
|
speaker_file_labels = cat(1, speaker_file_labels, [input_label_dir, '/' folds(i).name, '/' speaker_file.name]);
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
% file_labels = cat(1, listener_file_labels, speaker_file_labels);
|
||||||
|
% video_files = cat(1, listener_vid_files, speaker_vid_files);
|
||||||
|
|
||||||
|
file_labels = listener_file_labels;
|
||||||
|
video_files = listener_vid_files;
|
||||||
|
|
||||||
|
parfor i=1:numel(file_labels)
|
||||||
|
|
||||||
|
[~,short_name,vid_ext] = fileparts(video_files{i});
|
||||||
|
|
||||||
|
command = executable;
|
||||||
|
|
||||||
|
inputFile = video_files{i};
|
||||||
|
outputFile = [outputDir short_name '.txt'];
|
||||||
|
|
||||||
|
outputEaf = [outputDir short_name '.eaf'];
|
||||||
|
|
||||||
|
command = cat(2, command, [' -f "' inputFile '" -of "' outputFile '"']);
|
||||||
|
|
||||||
|
if(verbose)
|
||||||
|
outputVideo = [outputDir short_name '.track.avi'];
|
||||||
|
command = cat(2, command, [' -ov "' outputVideo '"']);
|
||||||
|
end
|
||||||
|
|
||||||
|
command = cat(2, command, [' -no2Dfp -no3Dfp -noMparams -noAUs -noGaze']);
|
||||||
|
|
||||||
|
dos(command);
|
||||||
|
|
||||||
|
copyfile(file_labels{i}, outputEaf);
|
||||||
|
copyfile(video_files{i}, [outputDir '/' short_name, '.mp4']);
|
||||||
|
|
||||||
|
end
|
|
@ -0,0 +1,181 @@
|
||||||
|
|
||||||
|
% Function ParseSEMAINEAnnotations is intended to demonstrate example usage
|
||||||
|
% of SEMAINE Action Unit annotations made with ELAN annotation toolbox.
|
||||||
|
% This function loads the XML structure from an ELAN annotation file with
|
||||||
|
% ".eaf" extension, parses it and returns a numerical matrix called
|
||||||
|
% "activations" of size NUMBER OF FRAMES X NUMBER OF ACTION UNITS. The
|
||||||
|
% matrix holds binary activation status for each frame / AU combination.
|
||||||
|
% The matrix also has a row header showing which AU corresponds to which
|
||||||
|
% row as well as a column header displaying original frame indexes.
|
||||||
|
|
||||||
|
% The function takes 1 compulsory and 2 optional arguments:
|
||||||
|
|
||||||
|
% - "filepath" (compulsory) - complete path to an annotation file to parse.
|
||||||
|
% For example, "/matlab/annotation.eaf" or "C:\matlab\annotation.eaf" on
|
||||||
|
% Windows.
|
||||||
|
|
||||||
|
% - "startFrame" (optional) - ignore all annotations before "startFrame".
|
||||||
|
% Default is 1.
|
||||||
|
|
||||||
|
% - "endFrame" (optional) - ignore all annotations after "endFrame".
|
||||||
|
% Default is the last frame of a video.
|
||||||
|
|
||||||
|
% The function requires XML IO Toolbox
|
||||||
|
% (http://www.mathworks.com/matlabcentral/fileexchange/12907-xml-io-tools)
|
||||||
|
% to run properly (supplied).
|
||||||
|
|
||||||
|
function activations = ParseSEMAINEAnnotations (filepath, startFrame, endFrame)
|
||||||
|
activations = [];
|
||||||
|
|
||||||
|
% Framerate value used to convert ELAN millisecond time slots to more
|
||||||
|
% usual frames. 50 is a valid framerate for all SEMAINE videos.
|
||||||
|
framerate = 50;
|
||||||
|
|
||||||
|
% A fixed set of 6 Action Units selected for the challenge from the
|
||||||
|
% SEMAINE annotations
|
||||||
|
aus = [2 12 17 25 28 45];
|
||||||
|
|
||||||
|
% Total number of AUs.
|
||||||
|
naus = length(aus);
|
||||||
|
|
||||||
|
% Load XML structure from the file, return in case of a problem.
|
||||||
|
[success, XML] = OpenXML(filepath);
|
||||||
|
if ~success
|
||||||
|
return
|
||||||
|
end
|
||||||
|
|
||||||
|
% Parse annotation time slots
|
||||||
|
tslots = ParseTimeSlots(XML);
|
||||||
|
|
||||||
|
% Init start and end frames with default values
|
||||||
|
if nargin < 2
|
||||||
|
startFrame = 1;
|
||||||
|
end
|
||||||
|
|
||||||
|
if nargin < 3
|
||||||
|
% Get total number of time slots
|
||||||
|
ntslots = length(tslots);
|
||||||
|
% Get last slot ID
|
||||||
|
lastID = strcat('ts', num2str(ntslots));
|
||||||
|
% Get last time slot value in ms
|
||||||
|
lastValue = tslots(lastID);
|
||||||
|
% Convert last time slot value in ms to frames
|
||||||
|
endFrame = floor((lastValue / 1000) * framerate);
|
||||||
|
end
|
||||||
|
|
||||||
|
% Get total number of tiers. There are 65 of them, 1 for speech, 32 for
|
||||||
|
% activations (1 per AU) and 32 for intensities. We are going to ignore
|
||||||
|
% intensity tiers.
|
||||||
|
ntiers = length(XML.TIER);
|
||||||
|
|
||||||
|
% Compose vector of frame indexes to extract annotations from
|
||||||
|
frames = (startFrame:endFrame);
|
||||||
|
|
||||||
|
% Preallocate activations matrix
|
||||||
|
activations = zeros(length(frames), naus);
|
||||||
|
|
||||||
|
indx = 1;
|
||||||
|
% Go through all tiers skipping the first one (speech) as well as every
|
||||||
|
% intensity tier. A single activation tier is processed at every
|
||||||
|
% iteration.
|
||||||
|
for k = 2:2:ntiers
|
||||||
|
tier = XML.TIER(k);
|
||||||
|
% Only extract annotations of selected AUs, skip the rest
|
||||||
|
au = strcat('AU', num2str(aus(indx)));
|
||||||
|
if strcmp(au, tier.ATTRIBUTE.TIER_ID)
|
||||||
|
% Read all activation periods from the current tier
|
||||||
|
activationTier = ParseActivationTier(tier, tslots);
|
||||||
|
% Convert of all activation periods into frame level numerical
|
||||||
|
% representation
|
||||||
|
activations(:, indx) = ParseOccurrences(activationTier, frames, framerate);
|
||||||
|
|
||||||
|
indx = indx + 1;
|
||||||
|
end
|
||||||
|
|
||||||
|
if indx > naus
|
||||||
|
break
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
activations = [frames' activations];
|
||||||
|
activations = [[0 aus]; activations];
|
||||||
|
end
|
||||||
|
|
||||||
|
function occurrences = ParseOccurrences (activations, frames, framerate)
|
||||||
|
% Preallocate activations vector
|
||||||
|
occurrences = zeros(length(frames), 1);
|
||||||
|
% Go through all activation periods, convert ms into frames and init
|
||||||
|
% corresponding values of activations vector with 1 leaving the rest be 0
|
||||||
|
for i = 1:length(activations)
|
||||||
|
% Convert ms into frames
|
||||||
|
sframe = floor((activations(i).start / 1000) * framerate);
|
||||||
|
eframe = floor((activations(i).end / 1000) * framerate);
|
||||||
|
|
||||||
|
% Determine indexes of frames vector corresponding to the above
|
||||||
|
% time frame
|
||||||
|
sindx = find(frames == sframe);
|
||||||
|
eindx = find(frames == eframe);
|
||||||
|
|
||||||
|
% Mark active set of frames with 1
|
||||||
|
occurrences(sindx:eindx) = 1;
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
function activationTier = ParseActivationTier (tier, tslots)
|
||||||
|
% Get total number of activation periods
|
||||||
|
nactivations = length(tier.ANNOTATION);
|
||||||
|
% Preallocate activation tier structure holding start and end time
|
||||||
|
% stamps of all activation periods for the given AU
|
||||||
|
activationTier = repmat(struct('start', 0, 'end', 0), nactivations, 1);
|
||||||
|
% Go through all activation periods and init activation tier
|
||||||
|
% structure array
|
||||||
|
for i = 1:nactivations
|
||||||
|
% Read start time slot ID of the current activation period
|
||||||
|
t = tier.ANNOTATION(i).ALIGNABLE_ANNOTATION.ATTRIBUTE.TIME_SLOT_REF1;
|
||||||
|
% Read time in ms corresponding to the time slot ID
|
||||||
|
activationTier(i).start = tslots(t);
|
||||||
|
|
||||||
|
% Read end time slot ID of the current activation period
|
||||||
|
t = tier.ANNOTATION(i).ALIGNABLE_ANNOTATION.ATTRIBUTE.TIME_SLOT_REF2;
|
||||||
|
% Read time in ms corresponding to the time slot ID
|
||||||
|
activationTier(i).end = tslots(t);
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
function tslots = ParseTimeSlots (xmlObject)
|
||||||
|
% Get total number of time slots
|
||||||
|
nslots = length(xmlObject.TIME_ORDER.TIME_SLOT);
|
||||||
|
% Preallocate cell arrays of time slot IDs and values
|
||||||
|
tids = cell(nslots, 1);
|
||||||
|
tvalues = zeros(nslots, 1);
|
||||||
|
% Read all time slot IDs and numerical values (in ms)
|
||||||
|
for i = 1:nslots
|
||||||
|
tids{i} = xmlObject.TIME_ORDER.TIME_SLOT(i).ATTRIBUTE.TIME_SLOT_ID;
|
||||||
|
tvalues(i) = xmlObject.TIME_ORDER.TIME_SLOT(i).ATTRIBUTE.TIME_VALUE;
|
||||||
|
end
|
||||||
|
% Map time slot IDs and values together so that values are accessible
|
||||||
|
% by their IDs
|
||||||
|
tslots = containers.Map(tids, tvalues);
|
||||||
|
end
|
||||||
|
|
||||||
|
function [success, xmlObject] = OpenXML (xmlPath)
|
||||||
|
fprintf(' *** Attempting to load \"%s\" ... ', xmlPath);
|
||||||
|
xmlObject = [];
|
||||||
|
success = false;
|
||||||
|
% Check if the specified file exists and return error otherwise
|
||||||
|
if exist(xmlPath, 'file')
|
||||||
|
% Load XML structure
|
||||||
|
xmlObject = xml_read(xmlPath);
|
||||||
|
% Check if XML object loaded correctly, return error otherwise
|
||||||
|
if isempty(xmlObject)
|
||||||
|
fprintf(' ERROR - unable to read xml tree *** \n');
|
||||||
|
return
|
||||||
|
else
|
||||||
|
success = true;
|
||||||
|
end
|
||||||
|
else
|
||||||
|
fprintf(' ERROR - specified path does not exist *** \n');
|
||||||
|
return
|
||||||
|
end
|
||||||
|
fprintf(' Done *** \n');
|
||||||
|
end
|
|
@ -0,0 +1,98 @@
|
||||||
|
AU_dir = 'D:/Databases/DISFA/ActionUnit_Labels/';
|
||||||
|
|
||||||
|
aus = [1,2,4,5,6,9,12,15,17,20,25,26];
|
||||||
|
|
||||||
|
subjects = dir([AU_dir, 'SN*']);
|
||||||
|
|
||||||
|
% Store all of the AU directories in a cell
|
||||||
|
input_label_dirs = cell(numel(subjects), 1);
|
||||||
|
for i=1:numel(subjects)
|
||||||
|
input_label_dirs{i} = [AU_dir, subjects(i).name, '/', subjects(i).name];
|
||||||
|
end
|
||||||
|
|
||||||
|
for user=1:numel(subjects)
|
||||||
|
|
||||||
|
testing_label_files = input_label_dirs(user);
|
||||||
|
|
||||||
|
training_label_files = setdiff(input_label_dirs, testing_label_files);
|
||||||
|
|
||||||
|
training_labels_all = [];
|
||||||
|
|
||||||
|
testing_labels_all = [];
|
||||||
|
|
||||||
|
% First extract AU information
|
||||||
|
for au=aus
|
||||||
|
|
||||||
|
% Extract all of the AUs from the current user
|
||||||
|
[training_labels, training_vid_inds_all, training_frame_inds_all] = extract_au_labels(training_label_files, au);
|
||||||
|
[testing_labels, testing_vid_inds_all, testing_frame_inds_all] = extract_au_labels(testing_label_files, au);
|
||||||
|
|
||||||
|
training_labels_all = cat(2, training_labels_all, training_labels);
|
||||||
|
testing_labels_all = cat(2, testing_labels_all, testing_labels);
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
% File lists for each of the AUs
|
||||||
|
for au_ind=1:numel(aus)
|
||||||
|
|
||||||
|
% extract the interesting frames for training, the interesting ones
|
||||||
|
% are the AU
|
||||||
|
|
||||||
|
positive_samples = training_labels_all(:,au_ind) > 0;
|
||||||
|
|
||||||
|
active_samples = sum(training_labels_all,2) > 10;
|
||||||
|
|
||||||
|
% Remove neighboring images as they are not very informative
|
||||||
|
negative_samples = sum(training_labels_all,2) == 0;
|
||||||
|
neg_inds = find(negative_samples);
|
||||||
|
neg_to_use = randperm(numel(neg_inds));
|
||||||
|
% taking a number of neutral samples that bring the positive and
|
||||||
|
% negative samples to a balanced level
|
||||||
|
neg_to_use = neg_inds(neg_to_use(1:(2*sum(positive_samples) - sum(active_samples | positive_samples))));
|
||||||
|
negative_samples(:) = false;
|
||||||
|
negative_samples(neg_to_use) = true;
|
||||||
|
|
||||||
|
% Collect all the data for training now
|
||||||
|
training_samples = positive_samples | active_samples | negative_samples;
|
||||||
|
|
||||||
|
% Create a training file list file
|
||||||
|
f_train_file_list = fopen(sprintf('%s/%s_au%02d_filelist_train.txt', 'single_au_class', subjects(user).name, aus(au_ind)), 'w');
|
||||||
|
|
||||||
|
sample_inds_train = find(training_samples);
|
||||||
|
|
||||||
|
for sample_ind = sample_inds_train'
|
||||||
|
|
||||||
|
img_file_l = sprintf('../../LeftVideo%s_comp/frame_det_%06d.png', training_vid_inds_all{sample_ind}, training_frame_inds_all(sample_ind));
|
||||||
|
img_file_r = sprintf('../../RightVideo%s_comp/frame_det_%06d.png', training_vid_inds_all{sample_ind}, training_frame_inds_all(sample_ind));
|
||||||
|
|
||||||
|
au_class = training_labels_all(sample_ind, au_ind) > 1;
|
||||||
|
|
||||||
|
fprintf(f_train_file_list, '%s %d\r\n', img_file_l, au_class);
|
||||||
|
fprintf(f_train_file_list, '%s %d\r\n', img_file_r, au_class);
|
||||||
|
|
||||||
|
end
|
||||||
|
fclose(f_train_file_list);
|
||||||
|
|
||||||
|
% Create a testing file list file
|
||||||
|
f_train_file_list = fopen(sprintf('%s/%s_au%02d_filelist_test.txt', 'single_au_class', subjects(user).name, aus(au_ind)), 'w');
|
||||||
|
|
||||||
|
|
||||||
|
testing_samples = true(size(testing_labels_all,1),1);
|
||||||
|
sample_inds_test = find(testing_samples);
|
||||||
|
|
||||||
|
for sample_ind = sample_inds_test'
|
||||||
|
|
||||||
|
img_file_l = sprintf('../../LeftVideo%s_comp/frame_det_%06d.png', testing_vid_inds_all{sample_ind}, testing_frame_inds_all(sample_ind));
|
||||||
|
img_file_r = sprintf('../../RightVideo%s_comp/frame_det_%06d.png', testing_vid_inds_all{sample_ind}, testing_frame_inds_all(sample_ind));
|
||||||
|
|
||||||
|
au_class = testing_labels_all(sample_ind, au_ind) > 1;
|
||||||
|
|
||||||
|
fprintf(f_train_file_list, '%s %d\r\n', img_file_l, au_class);
|
||||||
|
fprintf(f_train_file_list, '%s %d\r\n', img_file_r, au_class);
|
||||||
|
|
||||||
|
end
|
||||||
|
fclose(f_train_file_list);
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
|
@ -0,0 +1,67 @@
|
||||||
|
function [ labels, valid_ids, vid_ids, filenames ] = extract_BP4D_labels( BP4D_dir, recs, aus )
|
||||||
|
%EXTRACT_SEMAINE_LABELS Summary of this function goes here
|
||||||
|
% Detailed explanation goes here
|
||||||
|
|
||||||
|
aus_BP4D = [1, 2, 4, 6, 7, 10, 12, 14, 15, 17, 23];
|
||||||
|
|
||||||
|
inds_to_use = [];
|
||||||
|
|
||||||
|
for i=1:numel(aus)
|
||||||
|
|
||||||
|
inds_to_use = cat(1, inds_to_use, find(aus_BP4D == aus(i)));
|
||||||
|
|
||||||
|
end
|
||||||
|
num_files = numel(dir([BP4D_dir, '/*.csv']));
|
||||||
|
|
||||||
|
labels = cell(num_files, 1);
|
||||||
|
valid_ids = cell(num_files, 1);
|
||||||
|
vid_ids = zeros(num_files, 2);
|
||||||
|
filenames = cell(num_files, 1);
|
||||||
|
|
||||||
|
file_id = 1;
|
||||||
|
|
||||||
|
for i=1:numel(recs)
|
||||||
|
|
||||||
|
csvs = dir([BP4D_dir, '/', recs{i}, '*.csv']);
|
||||||
|
|
||||||
|
for f=1:numel(csvs)
|
||||||
|
|
||||||
|
file = [BP4D_dir, '/', csvs(f).name];
|
||||||
|
|
||||||
|
[~, filename,~] = fileparts(file);
|
||||||
|
filenames{file_id} = filename;
|
||||||
|
|
||||||
|
OCC = csvread(file); %import annotations for one video file
|
||||||
|
frame_nums = OCC(2:end,1); %get all frame numbers
|
||||||
|
codes = OCC(2:end,2:end); %get codes for all action units
|
||||||
|
occlusions = OCC(2:end,end);
|
||||||
|
|
||||||
|
codes = codes(:, aus_BP4D);
|
||||||
|
|
||||||
|
% Finding the invalid regions
|
||||||
|
valid = occlusions ~= 1;
|
||||||
|
|
||||||
|
for s=1:size(codes,2)
|
||||||
|
|
||||||
|
valid = valid & codes(:,s) ~= 9;
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
vid_ids(file_id,:) = [frame_nums(1), frame_nums(end)];
|
||||||
|
|
||||||
|
labels{file_id} = codes(:, inds_to_use);
|
||||||
|
|
||||||
|
% all indices in SEMAINE are valid
|
||||||
|
valid_ids{file_id} = valid;
|
||||||
|
|
||||||
|
file_id = file_id + 1;
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
labels = labels(1:file_id-1);
|
||||||
|
valid_ids = valid_ids(1:file_id-1);
|
||||||
|
vid_ids = vid_ids(1:file_id-1, :);
|
||||||
|
filenames = filenames(1:file_id-1);
|
||||||
|
|
||||||
|
end
|
||||||
|
|
|
@ -0,0 +1,63 @@
|
||||||
|
function [ labels, valid_ids, vid_ids, filenames ] = extract_BP4D_labels_intensity( BP4D_dir, recs, aus )
|
||||||
|
%EXTRACT_SEMAINE_LABELS Summary of this function goes here
|
||||||
|
% Detailed explanation goes here
|
||||||
|
|
||||||
|
files_all = dir(sprintf('%s/AU%02d/%s', BP4D_dir, aus(1), '/*.csv'));
|
||||||
|
num_files = numel(files_all);
|
||||||
|
|
||||||
|
labels = cell(num_files, 1);
|
||||||
|
valid_ids = cell(num_files, 1);
|
||||||
|
vid_ids = zeros(num_files, 2);
|
||||||
|
filenames = cell(num_files, 1);
|
||||||
|
|
||||||
|
file_id = 1;
|
||||||
|
|
||||||
|
for r=1:numel(recs)
|
||||||
|
|
||||||
|
files_root = sprintf('%s/AU%02d/', BP4D_dir, aus(1));
|
||||||
|
files_all = dir([files_root, recs{r}, '*.csv']);
|
||||||
|
|
||||||
|
for f=1:numel(files_all)
|
||||||
|
for au=aus
|
||||||
|
|
||||||
|
% Need to find relevant files for the relevant user and for the
|
||||||
|
% relevant AU
|
||||||
|
files_root = sprintf('%s/AU%02d/', BP4D_dir, au);
|
||||||
|
files_all = dir([files_root, recs{r}, '*.csv']);
|
||||||
|
|
||||||
|
file = [files_root, '/', files_all(f).name];
|
||||||
|
|
||||||
|
[~, filename,~] = fileparts(file);
|
||||||
|
filenames{file_id} = filename(1:7);
|
||||||
|
|
||||||
|
intensities = csvread(file); % import annotations for one session
|
||||||
|
|
||||||
|
frame_nums = intensities(:,1); % get all frame numbers
|
||||||
|
|
||||||
|
codes = intensities(:,2);
|
||||||
|
|
||||||
|
% Finding the invalid regions
|
||||||
|
valid = codes ~= 9;
|
||||||
|
|
||||||
|
vid_ids(file_id,:) = [frame_nums(1), frame_nums(end)];
|
||||||
|
|
||||||
|
if(au == aus(1))
|
||||||
|
valid_ids{file_id} = valid;
|
||||||
|
labels{file_id} = codes;
|
||||||
|
else
|
||||||
|
valid_ids{file_id} = valid_ids{file_id} & valid;
|
||||||
|
labels{file_id} = cat(2, labels{file_id}, codes);
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
||||||
|
file_id = file_id + 1;
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
labels = labels(1:file_id-1);
|
||||||
|
valid_ids = valid_ids(1:file_id-1);
|
||||||
|
vid_ids = vid_ids(1:file_id-1, :);
|
||||||
|
filenames = filenames(1:file_id-1);
|
||||||
|
|
||||||
|
end
|
||||||
|
|
|
@ -0,0 +1,45 @@
|
||||||
|
function [ labels, valid_ids, filenames ] = extract_FERA2011_labels( FERA2011_dir, recs, aus )
|
||||||
|
%EXTRACT_SEMAINE_LABELS Summary of this function goes here
|
||||||
|
% Detailed explanation goes here
|
||||||
|
|
||||||
|
num_files = numel(recs);
|
||||||
|
|
||||||
|
% speech invalidates lower face AUs
|
||||||
|
labels = cell(num_files, 1);
|
||||||
|
valid_ids = cell(num_files, 1);
|
||||||
|
filenames = cell(num_files, 1);
|
||||||
|
|
||||||
|
file_id = 1;
|
||||||
|
|
||||||
|
for i=1:numel(recs)
|
||||||
|
|
||||||
|
file = [FERA2011_dir, '/', recs{i}, '/', recs{i}, '-au.dat'];
|
||||||
|
|
||||||
|
[~, filename,~] = fileparts(file);
|
||||||
|
filenames{file_id} = filename;
|
||||||
|
|
||||||
|
data = csvread(file); %import annotations for one video file
|
||||||
|
|
||||||
|
speech = data(:,end);
|
||||||
|
|
||||||
|
labels{file_id} = data(:, aus);
|
||||||
|
|
||||||
|
% Finding the invalid regions
|
||||||
|
if(aus(1) >= 10)
|
||||||
|
valid = speech == 0;
|
||||||
|
else
|
||||||
|
valid = true(size(speech,1), 1);
|
||||||
|
end
|
||||||
|
|
||||||
|
% all indices in SEMAINE are valid
|
||||||
|
valid_ids{file_id} = valid;
|
||||||
|
|
||||||
|
file_id = file_id + 1;
|
||||||
|
end
|
||||||
|
|
||||||
|
labels = labels(1:file_id-1);
|
||||||
|
valid_ids = valid_ids(1:file_id-1);
|
||||||
|
filenames = filenames(1:file_id-1);
|
||||||
|
|
||||||
|
end
|
||||||
|
|
|
@ -0,0 +1,52 @@
|
||||||
|
function [ labels, valid_ids, vid_ids ] = extract_SEMAINE_labels( SEMAINE_dir, recs, aus )
|
||||||
|
%EXTRACT_SEMAINE_LABELS Summary of this function goes here
|
||||||
|
% Detailed explanation goes here
|
||||||
|
|
||||||
|
% Get the right eaf file
|
||||||
|
|
||||||
|
aus_SEMAINE = [2 12 17 25 28 45];
|
||||||
|
|
||||||
|
inds_to_use = [];
|
||||||
|
|
||||||
|
for i=1:numel(aus)
|
||||||
|
|
||||||
|
inds_to_use = cat(1, inds_to_use, find(aus_SEMAINE == aus(i)));
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
labels = cell(numel(recs), 1);
|
||||||
|
valid_ids = cell(numel(recs), 1);
|
||||||
|
vid_ids = zeros(numel(recs), 2);
|
||||||
|
|
||||||
|
for i=1:numel(recs)
|
||||||
|
|
||||||
|
file = dir([SEMAINE_dir, '/', recs{i}, '/*.eaf']);
|
||||||
|
|
||||||
|
vid_ids(i,:) = dlmread([SEMAINE_dir, '/', recs{i}, '.txt'], ' ');
|
||||||
|
|
||||||
|
xml_file = [SEMAINE_dir, recs{i}, '\' file.name];
|
||||||
|
[root_xml, name_xml, ~] = fileparts(xml_file);
|
||||||
|
m_file = [root_xml, name_xml, '.mat'];
|
||||||
|
|
||||||
|
if(~exist(m_file, 'file'))
|
||||||
|
activations = ParseSEMAINEAnnotations([SEMAINE_dir, recs{i}, '\' file.name]);
|
||||||
|
save(m_file, 'activations');
|
||||||
|
else
|
||||||
|
load(m_file);
|
||||||
|
end
|
||||||
|
if(size(activations,1) < vid_ids(i,2))
|
||||||
|
vid_ids(i,2) = size(activations,1);
|
||||||
|
if(vid_ids(i,2) > 2999)
|
||||||
|
vid_ids(i,1) = vid_ids(i,2) - 2999;
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
labels{i} = activations(vid_ids(i,1)+1:vid_ids(i,2), 1 + inds_to_use);
|
||||||
|
|
||||||
|
% all indices in SEMAINE are valid
|
||||||
|
valid_ids{i} = ones(size(labels{i},1),1);
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
||||||
|
|
|
@ -0,0 +1,70 @@
|
||||||
|
function [ labels, valid_ids, filenames ] = extract_UNBC_labels( UNBC_dir, recs, aus )
|
||||||
|
%EXTRACT_SEMAINE_LABELS Summary of this function goes here
|
||||||
|
% Detailed explanation goes here
|
||||||
|
|
||||||
|
UNBC_dir = [UNBC_dir, '/Frame_Labels/FACS/'];
|
||||||
|
|
||||||
|
aus_UNBC = [4, 6, 7, 9, 10, 12, 20, 25, 26, 43];
|
||||||
|
|
||||||
|
inds_to_use = [];
|
||||||
|
|
||||||
|
for i=1:numel(aus)
|
||||||
|
|
||||||
|
inds_to_use = cat(1, inds_to_use, find(aus_UNBC == aus(i)));
|
||||||
|
|
||||||
|
end
|
||||||
|
aus_UNBC = aus_UNBC(inds_to_use);
|
||||||
|
labels_all = {};
|
||||||
|
valid_ids_all = {};
|
||||||
|
filenames_all = {};
|
||||||
|
|
||||||
|
for i=1:numel(recs)
|
||||||
|
|
||||||
|
% get all the dirs, etc.
|
||||||
|
|
||||||
|
sessions = dir([UNBC_dir, recs{i}]);
|
||||||
|
sessions = sessions(3:end);
|
||||||
|
|
||||||
|
num_sessions = numel(sessions);
|
||||||
|
|
||||||
|
labels = cell(num_sessions, 1);
|
||||||
|
valid_ids = cell(num_sessions, 1);
|
||||||
|
filenames = cell(num_sessions, 1);
|
||||||
|
|
||||||
|
for s=1:numel(sessions)
|
||||||
|
|
||||||
|
frames = dir([UNBC_dir, '/', recs{i}, '/', sessions(s).name, '/*.txt']);
|
||||||
|
|
||||||
|
labels_c = zeros(numel(frames), numel(aus));
|
||||||
|
|
||||||
|
for f=1:numel(frames)
|
||||||
|
|
||||||
|
file = [UNBC_dir, '/', recs{i}, '/', sessions(s).name, '/', frames(f).name];
|
||||||
|
|
||||||
|
fileID = fopen(file);
|
||||||
|
C = textscan(fileID,'%d %d %d %d\n');
|
||||||
|
fclose(fileID);
|
||||||
|
|
||||||
|
% OCC = csvread(file); %import annotations for one video file
|
||||||
|
for au = 1:numel(C{1})
|
||||||
|
labels_c(f, aus_UNBC == C{1}(au)) = C{2}(au);
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
||||||
|
labels{s} = labels_c;
|
||||||
|
filenames(s) = {sessions(s).name};
|
||||||
|
valid_ids{s} = true(size(labels_c,1),1);
|
||||||
|
end
|
||||||
|
|
||||||
|
|
||||||
|
labels_all = cat(1, labels_all, labels);
|
||||||
|
valid_ids_all = cat(1, valid_ids_all, valid_ids);
|
||||||
|
filenames_all = cat(1, filenames_all, filenames);
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
labels = labels_all;
|
||||||
|
valid_ids = valid_ids_all;
|
||||||
|
filenames = filenames_all;
|
||||||
|
end
|
||||||
|
|
|
@ -0,0 +1,29 @@
|
||||||
|
function [ labels, vid_inds, frame_inds ] = extract_au_labels( input_folders, au_id)
|
||||||
|
%EXTRACT_AU_LABELS Summary of this function goes here
|
||||||
|
% Detailed explanation goes here
|
||||||
|
|
||||||
|
labels = [];
|
||||||
|
vid_inds = [];
|
||||||
|
frame_inds = [];
|
||||||
|
for i=1:numel(input_folders)
|
||||||
|
|
||||||
|
in_file = sprintf('%s_au%d.txt', input_folders{i}, au_id);
|
||||||
|
|
||||||
|
A = dlmread(in_file, ',');
|
||||||
|
|
||||||
|
vid_inds_curr = cell(numel(A(:,2)), 1);
|
||||||
|
|
||||||
|
labels = cat(1, labels, A(:,2));
|
||||||
|
|
||||||
|
[~,curr_name,~] = fileparts(input_folders{i});
|
||||||
|
|
||||||
|
frame_inds_curr = 0:numel(A(:,2))-1;
|
||||||
|
frame_inds = cat(1, frame_inds, frame_inds_curr');
|
||||||
|
|
||||||
|
vid_inds_curr(:) = {curr_name};
|
||||||
|
vid_inds = cat(1, vid_inds, vid_inds_curr);
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
||||||
|
|
|
@ -0,0 +1,73 @@
|
||||||
|
clear
|
||||||
|
|
||||||
|
features_exe = '"..\..\..\x64\Release\FeatureExtraction.exe"';
|
||||||
|
|
||||||
|
find_BP4D;
|
||||||
|
BP4D_dir = [BP4D_dir '\..\BP4D-training\'];
|
||||||
|
|
||||||
|
bp4d_dirs = train_recs;
|
||||||
|
out_loc = [BP4D_dir '\..\processed_data\train\'];
|
||||||
|
|
||||||
|
parfor f1=1:numel(bp4d_dirs)
|
||||||
|
|
||||||
|
if(isdir([BP4D_dir, bp4d_dirs{f1}]))
|
||||||
|
|
||||||
|
bp4d_2_dirs = dir([BP4D_dir, bp4d_dirs{f1}]);
|
||||||
|
bp4d_2_dirs = bp4d_2_dirs(3:end);
|
||||||
|
|
||||||
|
f1_dir = bp4d_dirs{f1};
|
||||||
|
|
||||||
|
for f2=1:numel(bp4d_2_dirs)
|
||||||
|
f2_dir = bp4d_2_dirs(f2).name;
|
||||||
|
if(isdir([BP4D_dir, bp4d_dirs{f1}]))
|
||||||
|
command = features_exe;
|
||||||
|
|
||||||
|
curr_vid = [BP4D_dir, f1_dir, '/', f2_dir, '/'];
|
||||||
|
|
||||||
|
name = [f1_dir '_' f2_dir];
|
||||||
|
output_file = [out_loc name '/'];
|
||||||
|
|
||||||
|
output_hog = [out_loc name '.hog'];
|
||||||
|
output_params = [out_loc name '.params.txt'];
|
||||||
|
|
||||||
|
command = cat(2, command, [' -fx 2000 -fy 2000 -rigid -q -asvid -fdir "' curr_vid '" -simalign "' output_file '" -simscale 0.7 -simsize 112']);
|
||||||
|
command = cat(2, command, [' -hogalign "' output_hog '"']);
|
||||||
|
command = cat(2, command, [' -of "' output_params '" -no2Dfp -no3Dfp -noAUs -noPose -noGaze']);
|
||||||
|
dos(command);
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
bp4d_dirs = devel_recs;
|
||||||
|
out_loc = [BP4D_dir '\..\processed_data\devel\'];
|
||||||
|
parfor f1=1:numel(bp4d_dirs)
|
||||||
|
|
||||||
|
if(isdir([BP4D_dir, bp4d_dirs{f1}]))
|
||||||
|
|
||||||
|
bp4d_2_dirs = dir([BP4D_dir, bp4d_dirs{f1}]);
|
||||||
|
bp4d_2_dirs = bp4d_2_dirs(3:end);
|
||||||
|
|
||||||
|
f1_dir = bp4d_dirs{f1};
|
||||||
|
|
||||||
|
for f2=1:numel(bp4d_2_dirs)
|
||||||
|
f2_dir = bp4d_2_dirs(f2).name;
|
||||||
|
if(isdir([BP4D_dir, bp4d_dirs{f1}]))
|
||||||
|
command = features_exe;
|
||||||
|
|
||||||
|
curr_vid = [BP4D_dir, f1_dir, '/', f2_dir, '/'];
|
||||||
|
|
||||||
|
name = [f1_dir '_' f2_dir];
|
||||||
|
output_file = [out_loc name '/'];
|
||||||
|
|
||||||
|
output_hog = [out_loc name '.hog'];
|
||||||
|
output_params = [out_loc name '.params.txt'];
|
||||||
|
|
||||||
|
command = cat(2, command, [' -fx 2000 -fy 2000 -rigid -q -asvid -fdir "' curr_vid '" -simalign "' output_file '" -simscale 0.7 -simsize 112']);
|
||||||
|
command = cat(2, command, [' -hogalign "' output_hog '"']);
|
||||||
|
command = cat(2, command, [' -of "' output_params '" -no2Dfp -no3Dfp -noAUs -noPose -noGaze']);
|
||||||
|
dos(command);
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
|
@ -0,0 +1,42 @@
|
||||||
|
clear
|
||||||
|
features_exe = '"..\..\..\x64\Release\FeatureExtraction.exe"';
|
||||||
|
|
||||||
|
ck_loc = 'D:\Datasets\ck+\cohn-kanade-images\';
|
||||||
|
|
||||||
|
out_loc = 'D:\datasets\face_datasets/hog_aligned_rigid\';
|
||||||
|
out_loc_params = 'D:\datasets\face_datasets/clm_params\';
|
||||||
|
|
||||||
|
% Go two levels deep
|
||||||
|
ck_dirs = dir(ck_loc);
|
||||||
|
ck_dirs = ck_dirs(3:end);
|
||||||
|
|
||||||
|
parfor f1=1:numel(ck_dirs)
|
||||||
|
|
||||||
|
ck_dirs_level_2 = dir([ck_loc, ck_dirs(f1).name]);
|
||||||
|
ck_dirs_level_2 = ck_dirs_level_2(3:end);
|
||||||
|
|
||||||
|
for f2=1:numel(ck_dirs_level_2)
|
||||||
|
|
||||||
|
if(~isdir([ck_loc, ck_dirs(f1).name, '/', ck_dirs_level_2(f2).name]))
|
||||||
|
continue;
|
||||||
|
end
|
||||||
|
|
||||||
|
command = features_exe;
|
||||||
|
|
||||||
|
curr_vid = [ck_loc, ck_dirs(f1).name, '/', ck_dirs_level_2(f2).name];
|
||||||
|
|
||||||
|
name = [ck_dirs(f1).name, '_', ck_dirs_level_2(f2).name];
|
||||||
|
|
||||||
|
output_file = [out_loc name '/'];
|
||||||
|
|
||||||
|
output_hog = [out_loc name '.hog'];
|
||||||
|
output_params = [out_loc_params name '.txt'];
|
||||||
|
|
||||||
|
command = cat(2, command, [' -rigid -asvid -fdir "' curr_vid '" -simalign "' output_file '" -simscale 0.7 -simsize 112 -g -q ']);
|
||||||
|
command = cat(2, command, [' -hogalign "' output_hog, '"' ]);
|
||||||
|
|
||||||
|
dos(command);
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
|
@ -0,0 +1,84 @@
|
||||||
|
% Biwi dataset experiment
|
||||||
|
|
||||||
|
features_exe = '"..\..\..\x64\Release\FeatureExtraction.exe"';
|
||||||
|
|
||||||
|
if(exist('D:/Databases/DISFA/', 'dir'))
|
||||||
|
DISFA_loc = 'D:/Databases/DISFA/';
|
||||||
|
elseif(exist('D:/Datasets/DISFA/', 'dir'))
|
||||||
|
DISFA_loc = 'D:/Datasets/DISFA/';
|
||||||
|
elseif(exist('E:/datasets/DISFA/', 'dir'))
|
||||||
|
DISFA_loc = 'E:/datasets/DISFA/';
|
||||||
|
elseif(exist('C:/tadas/DISFA', 'dir'))
|
||||||
|
DISFA_loc = 'C:/tadas/DISFA/';
|
||||||
|
elseif(exist('D:\datasets\face_datasets\DISFA/', 'dir'))
|
||||||
|
DISFA_loc = 'D:\datasets\face_datasets\DISFA/';
|
||||||
|
else
|
||||||
|
fprintf('DISFA not found\n');
|
||||||
|
end
|
||||||
|
|
||||||
|
output = [DISFA_loc, '/aligned_rigid/'];
|
||||||
|
output_hog_root = [DISFA_loc '/hog_aligned_rigid/'];
|
||||||
|
output_params_root = [DISFA_loc '/model_params/'];
|
||||||
|
|
||||||
|
DISFA_loc_1 = [DISFA_loc, 'Videos_LeftCamera/'];
|
||||||
|
DISFA_loc_2 = [DISFA_loc, 'Video_RightCamera/'];
|
||||||
|
|
||||||
|
if(~exist(output, 'dir'))
|
||||||
|
mkdir(output);
|
||||||
|
end
|
||||||
|
|
||||||
|
if(~exist(output_hog_root, 'dir'))
|
||||||
|
mkdir(output_hog_root);
|
||||||
|
end
|
||||||
|
|
||||||
|
if(~exist(output_params_root, 'dir'))
|
||||||
|
mkdir(output_params_root);
|
||||||
|
end
|
||||||
|
|
||||||
|
disfa_loc_1_files = dir([DISFA_loc_1, '/*.avi']);
|
||||||
|
disfa_loc_2_files = dir([DISFA_loc_2, '/*.avi']);
|
||||||
|
|
||||||
|
%%
|
||||||
|
tic;
|
||||||
|
|
||||||
|
parfor i=1:numel(disfa_loc_1_files)
|
||||||
|
|
||||||
|
command = features_exe;
|
||||||
|
|
||||||
|
input_file = [DISFA_loc_1 disfa_loc_1_files(i).name];
|
||||||
|
|
||||||
|
[~,name,~] = fileparts(disfa_loc_1_files(i).name);
|
||||||
|
output_file = [output name '/'];
|
||||||
|
|
||||||
|
output_hog = [output_hog_root name '.hog'];
|
||||||
|
output_params = [output_params_root '/' name '.txt'];
|
||||||
|
|
||||||
|
command = cat(2, command, [' -rigid -f "' input_file '" -simalign "' output_file '" -simscale 0.7 -simsize 112']);
|
||||||
|
command = cat(2, command, [' -hogalign "' output_hog '"' ]);
|
||||||
|
command = cat(2, command, [' -of "' output_params '" -no2Dfp -no3Dfp -noAUs -noPose -noGaze -q']);
|
||||||
|
|
||||||
|
dos(command);
|
||||||
|
end
|
||||||
|
|
||||||
|
%%
|
||||||
|
parfor i=1:numel(disfa_loc_2_files)
|
||||||
|
|
||||||
|
command = features_exe;
|
||||||
|
|
||||||
|
input_file = [DISFA_loc_2 disfa_loc_2_files(i).name];
|
||||||
|
|
||||||
|
[~,name,~] = fileparts(disfa_loc_2_files(i).name);
|
||||||
|
output_file = [output name '/'];
|
||||||
|
|
||||||
|
output_hog = [output_hog_root name '.hog'];
|
||||||
|
|
||||||
|
output_params = [output_params_root '/' name '.txt'];
|
||||||
|
|
||||||
|
command = cat(2, command, [' -rigid -f "' input_file '" -simalign "' output_file '" -simscale 0.7 -simsize 112']);
|
||||||
|
command = cat(2, command, [' -hogalign "' output_hog '"']);
|
||||||
|
command = cat(2, command, [' -of "' output_params '" -no2Dfp -no3Dfp -noAUs -noPose -noGaze -q']);
|
||||||
|
|
||||||
|
dos(command);
|
||||||
|
end
|
||||||
|
|
||||||
|
timeTaken = toc;
|
|
@ -0,0 +1,46 @@
|
||||||
|
clear
|
||||||
|
features_exe = '"..\..\..\x64\Release\FeatureExtraction.exe"';
|
||||||
|
|
||||||
|
fera_loc = 'D:\Datasets\fera\';
|
||||||
|
|
||||||
|
out_loc = 'D:\Datasets\face_datasets\hog_aligned_rigid\';
|
||||||
|
out_loc_params = 'D:\Datasets\face_datasets\model_params\';
|
||||||
|
|
||||||
|
% Go two levels deep
|
||||||
|
fera_dirs = dir(fera_loc);
|
||||||
|
fera_dirs = fera_dirs(3:end);
|
||||||
|
|
||||||
|
for f1=1:numel(fera_dirs)
|
||||||
|
|
||||||
|
fera_dirs_level_2 = dir([fera_loc, fera_dirs(f1).name]);
|
||||||
|
fera_dirs_level_2 = fera_dirs_level_2(3:end);
|
||||||
|
|
||||||
|
for f2=1:numel(fera_dirs_level_2)
|
||||||
|
|
||||||
|
vid_files = dir([fera_loc, fera_dirs(f1).name, '/', fera_dirs_level_2(f2).name, '/*.avi']);
|
||||||
|
|
||||||
|
parfor v=1:numel(vid_files)
|
||||||
|
|
||||||
|
command = features_exe;
|
||||||
|
|
||||||
|
curr_vid = [fera_loc, fera_dirs(f1).name, '/', fera_dirs_level_2(f2).name, '/', vid_files(v).name];
|
||||||
|
|
||||||
|
[~,name,~] = fileparts(curr_vid);
|
||||||
|
output_file = [out_loc fera_dirs(f1).name '_' name '/'];
|
||||||
|
|
||||||
|
output_hog = [out_loc fera_dirs(f1).name '_' name '.hog'];
|
||||||
|
|
||||||
|
output_params = [out_loc_params fera_dirs(f1).name '_' name '.txt'];
|
||||||
|
|
||||||
|
command = cat(2, command, [' -rigid -f "' curr_vid '" -simalign "' output_file '" -simscale 0.7 -simsize 112']);
|
||||||
|
command = cat(2, command, [' -hogalign "' output_hog '"']);
|
||||||
|
|
||||||
|
command = cat(2, command, [' -of "' output_params '" -no2Dfp -no3Dfp -noAUs -noPose -noGaze -q']);
|
||||||
|
|
||||||
|
dos(command);
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
|
@ -0,0 +1,71 @@
|
||||||
|
|
||||||
|
features_exe = '"..\..\..\x64\Release\FeatureExtraction.exe"';
|
||||||
|
|
||||||
|
find_SEMAINE;
|
||||||
|
|
||||||
|
|
||||||
|
% Go two levels deep
|
||||||
|
semaine_dirs = train_recs;
|
||||||
|
out_loc = [SEMAINE_dir, '../processed_data/train/'];
|
||||||
|
|
||||||
|
parfor f1=1:numel(semaine_dirs)
|
||||||
|
|
||||||
|
if(isdir([SEMAINE_dir, semaine_dirs{f1}]))
|
||||||
|
|
||||||
|
vid_files = dir([SEMAINE_dir, semaine_dirs{f1}, '/*.avi']);
|
||||||
|
|
||||||
|
f1_dir = semaine_dirs{f1};
|
||||||
|
|
||||||
|
for v=1:numel(vid_files)
|
||||||
|
|
||||||
|
command = features_exe;
|
||||||
|
|
||||||
|
curr_vid = [SEMAINE_dir, f1_dir, '/', vid_files(v).name];
|
||||||
|
|
||||||
|
name = f1_dir;
|
||||||
|
output_file = [out_loc name '/'];
|
||||||
|
|
||||||
|
output_hog = [out_loc name '.hog'];
|
||||||
|
output_params = [out_loc name '.params.txt'];
|
||||||
|
|
||||||
|
command = cat(2, command, [' -rigid -f "' curr_vid '" -simalign "' output_file '" -simscale 0.7 -simsize 112']);
|
||||||
|
command = cat(2, command, [' -hogalign "' output_hog '"']);
|
||||||
|
command = cat(2, command, [' -of "' output_params '" -no2Dfp -no3Dfp -noAUs -noPose -noGaze -q']);
|
||||||
|
dos(command);
|
||||||
|
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
%%
|
||||||
|
semaine_dirs = devel_recs;
|
||||||
|
out_loc = [SEMAINE_dir, '../processed_data/devel/'];
|
||||||
|
|
||||||
|
parfor f1=1:numel(semaine_dirs)
|
||||||
|
|
||||||
|
if(isdir([SEMAINE_dir, semaine_dirs{f1}]))
|
||||||
|
|
||||||
|
vid_files = dir([SEMAINE_dir, semaine_dirs{f1}, '/*.avi']);
|
||||||
|
|
||||||
|
f1_dir = semaine_dirs{f1};
|
||||||
|
|
||||||
|
for v=1:numel(vid_files)
|
||||||
|
|
||||||
|
command = features_exe;
|
||||||
|
|
||||||
|
curr_vid = [SEMAINE_dir, f1_dir, '/', vid_files(v).name];
|
||||||
|
|
||||||
|
name = f1_dir;
|
||||||
|
output_file = [out_loc name '/'];
|
||||||
|
|
||||||
|
output_hog = [out_loc name '.hog'];
|
||||||
|
output_params = [out_loc name '.params.txt'];
|
||||||
|
|
||||||
|
command = cat(2, command, [' -rigid -f "' curr_vid '" -simalign "' output_file '" -simscale 0.7 -simsize 112']);
|
||||||
|
command = cat(2, command, [' -hogalign "' output_hog '"']);
|
||||||
|
command = cat(2, command, [' -of "' output_params '" -no2Dfp -no3Dfp -noAUs -noPose -noGaze -q']);
|
||||||
|
dos(command);
|
||||||
|
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
|
@ -0,0 +1,47 @@
|
||||||
|
clear
|
||||||
|
features_exe = '"..\..\..\x64\Release\FeatureExtraction.exe"';
|
||||||
|
|
||||||
|
unbc_loc = 'D:\Datasets\UNBC\Images/';
|
||||||
|
|
||||||
|
out_loc = 'D:\Datasets\face_datasets/';
|
||||||
|
|
||||||
|
% Go two levels deep
|
||||||
|
unbc_dirs = dir(unbc_loc);
|
||||||
|
unbc_dirs = unbc_dirs(3:end);
|
||||||
|
|
||||||
|
if(~exist([out_loc, '/clm_params/'], 'file'))
|
||||||
|
mkdir([out_loc, '/clm_params/']);
|
||||||
|
end
|
||||||
|
|
||||||
|
parfor f1=1:numel(unbc_dirs)
|
||||||
|
|
||||||
|
unbc_dirs_level_2 = dir([unbc_loc, unbc_dirs(f1).name]);
|
||||||
|
unbc_dirs_level_2 = unbc_dirs_level_2(3:end);
|
||||||
|
|
||||||
|
for f2=1:numel(unbc_dirs_level_2)
|
||||||
|
|
||||||
|
if(~isdir([unbc_loc, unbc_dirs(f1).name, '/', unbc_dirs_level_2(f2).name]))
|
||||||
|
continue;
|
||||||
|
end
|
||||||
|
|
||||||
|
command = features_exe;
|
||||||
|
|
||||||
|
curr_vid = [unbc_loc, unbc_dirs(f1).name, '/', unbc_dirs_level_2(f2).name];
|
||||||
|
|
||||||
|
name = [unbc_dirs(f1).name, '_', unbc_dirs_level_2(f2).name];
|
||||||
|
|
||||||
|
output_file = [out_loc, '/hog_aligned_rigid/', name '/'];
|
||||||
|
|
||||||
|
output_hog = [out_loc, '/hog_aligned_rigid/', name '.hog'];
|
||||||
|
output_params = [out_loc, '/model_params/', name '.txt'];
|
||||||
|
|
||||||
|
command = cat(2, command, [' -rigid -asvid -fdir "' curr_vid '" -simalign "' output_file '" -simscale 0.7 -simsize 112 -g']);
|
||||||
|
command = cat(2, command, [' -hogalign "' output_hog '"']);
|
||||||
|
|
||||||
|
command = cat(2, command, [' -of "' output_params '" -no2Dfp -no3Dfp -noAUs -noPose -noGaze -q']);
|
||||||
|
|
||||||
|
dos(command);
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
28
matlab_version/AU_training/data extraction/find_BP4D.m
Normal file
28
matlab_version/AU_training/data extraction/find_BP4D.m
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
if(exist('C:\tadas\face_datasets\fera_2015\bp4d\AUCoding/', 'file'))
|
||||||
|
BP4D_dir = 'C:\tadas\face_datasets\fera_2015\bp4d\AUCoding/';
|
||||||
|
BP4D_dir_int = 'C:\tadas\face_datasets\fera_2015\bp4d\AU Intensity Codes3.0/';
|
||||||
|
elseif(exist('E:\datasets\FERA_2015\BP4D\AUCoding/', 'file'))
|
||||||
|
BP4D_dir = 'E:\datasets\FERA_2015\BP4D\AUCoding/';
|
||||||
|
BP4D_dir_int = 'E:\datasets\FERA_2015\BP4D\AU Intensity Codes3.0/';
|
||||||
|
elseif(exist('D:\datasets\face_datasets\fera_2015\bp4d\AUCoding/','file'))
|
||||||
|
BP4D_dir = 'D:\datasets\face_datasets\fera_2015\bp4d\AUCoding/';
|
||||||
|
BP4D_dir_int = 'D:\datasets\face_datasets\fera_2015\bp4d\AU Intensity Codes3.0/';
|
||||||
|
elseif(exist('D:\Datasets\FERA_2015\BP4D\AUCoding/','file'))
|
||||||
|
BP4D_dir = 'D:\Datasets\FERA_2015\BP4D\AUCoding/';
|
||||||
|
BP4D_dir_int = 'D:\Datasets\FERA_2015\BP4D\AU Intensity Codes3.0/';
|
||||||
|
elseif(exist('I:\datasets\FERA_2015\BP4D\AUCoding/', 'file'))
|
||||||
|
BP4D_dir = 'I:\datasets\FERA_2015\BP4D\AUCoding/';
|
||||||
|
BP4D_dir_int = 'I:\datasets\FERA_2015\BP4D\AU Intensity Codes3.0/';
|
||||||
|
elseif(exist('D:/fera_2015/bp4d/AUCoding/', 'file'))
|
||||||
|
BP4D_dir = 'D:/fera_2015/bp4d/AUCoding/';
|
||||||
|
BP4D_dir_int = 'D:/fera_2015/bp4d/AU Intensity Codes3.0/';
|
||||||
|
else
|
||||||
|
fprintf('BP4D location not found (or not defined)\n');
|
||||||
|
end
|
||||||
|
|
||||||
|
|
||||||
|
hog_data_dir = [BP4D_dir, '../processed_data'];
|
||||||
|
|
||||||
|
train_recs = {'F001', 'F003', 'F005', 'F007', 'F009', 'F011', 'F013', 'F015', 'F017', 'F019', 'F021', 'F023', 'M001', 'M003', 'M005', 'M007', 'M009', 'M011', 'M013', 'M015' 'M017'};
|
||||||
|
devel_recs = {'F002', 'F004', 'F006', 'F008', 'F010', 'F012', 'F014', 'F016', 'F018', 'F020', 'F022', 'M002', 'M004', 'M006', 'M008', 'M010', 'M012', 'M014', 'M016', 'M018'};
|
||||||
|
|
49
matlab_version/AU_training/data extraction/find_DISFA.m
Normal file
49
matlab_version/AU_training/data extraction/find_DISFA.m
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
% load all of the data together (for efficiency)
|
||||||
|
% it will be split up accordingly at later stages
|
||||||
|
if(exist('F:/datasets/DISFA/', 'file'))
|
||||||
|
DISFA_dir = 'F:/datasets/DISFA/';
|
||||||
|
elseif(exist('D:/Databases/DISFA/', 'file'))
|
||||||
|
DISFA_dir = 'D:/Databases/DISFA/';
|
||||||
|
elseif(exist('D:\datasets\face_datasets\DISFA/', 'file'))
|
||||||
|
DISFA_dir = 'D:\datasets\face_datasets\DISFA/';
|
||||||
|
elseif(exist('D:\Datasets\DISFA/', 'file'))
|
||||||
|
DISFA_dir = 'D:\Datasets\DISFA/';
|
||||||
|
elseif(exist('Z:/datasets/DISFA/', 'file'))
|
||||||
|
DISFA_dir = 'Z:/Databases/DISFA/';
|
||||||
|
elseif(exist('E:/datasets/DISFA/', 'file'))
|
||||||
|
DISFA_dir = 'E:/datasets/DISFA/';
|
||||||
|
elseif(exist('C:/tadas/DISFA/', 'file'))
|
||||||
|
DISFA_dir = 'C:/tadas/DISFA/';
|
||||||
|
else
|
||||||
|
fprintf('DISFA location not found (or not defined)\n');
|
||||||
|
end
|
||||||
|
|
||||||
|
hog_data_dir = [DISFA_dir, '/hog_aligned_rigid/'];
|
||||||
|
|
||||||
|
users = {'SN001';
|
||||||
|
'SN002';
|
||||||
|
'SN003';
|
||||||
|
'SN004';
|
||||||
|
'SN005';
|
||||||
|
'SN006';
|
||||||
|
'SN007';
|
||||||
|
'SN008';
|
||||||
|
'SN009';
|
||||||
|
'SN010';
|
||||||
|
'SN011';
|
||||||
|
'SN012';
|
||||||
|
'SN016';
|
||||||
|
'SN017';
|
||||||
|
'SN018';
|
||||||
|
'SN021';
|
||||||
|
'SN023';
|
||||||
|
'SN024';
|
||||||
|
'SN025';
|
||||||
|
'SN026';
|
||||||
|
'SN027';
|
||||||
|
'SN028';
|
||||||
|
'SN029';
|
||||||
|
'SN030';
|
||||||
|
'SN031';
|
||||||
|
'SN032';
|
||||||
|
'SN013'};
|
31
matlab_version/AU_training/data extraction/find_FERA2011.m
Normal file
31
matlab_version/AU_training/data extraction/find_FERA2011.m
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
if(exist('D:\Datasets\fera/au_training', 'file'))
|
||||||
|
FERA2011_dir = 'D:\Datasets\fera/au_training/';
|
||||||
|
hog_data_dir = 'D:\Datasets\face_datasets\hog_aligned_rigid/';
|
||||||
|
else
|
||||||
|
fprintf('FERA2011 location not found (or not defined)\n');
|
||||||
|
end
|
||||||
|
|
||||||
|
all_recs = {'train_001', 'train_002', 'train_003', 'train_004', 'train_005',...
|
||||||
|
'train_006', 'train_007', 'train_008', 'train_009', 'train_010',...
|
||||||
|
'train_011', 'train_012', 'train_013', 'train_014', 'train_015',...
|
||||||
|
'train_016', 'train_017', 'train_018', 'train_019', 'train_020',...
|
||||||
|
'train_021', 'train_022', 'train_023', 'train_024', 'train_025',...
|
||||||
|
'train_026', 'train_027', 'train_028', 'train_029', 'train_030',...
|
||||||
|
'train_031', 'train_032', 'train_033', 'train_034', 'train_035',...
|
||||||
|
'train_036', 'train_037', 'train_038', 'train_039', 'train_040',...
|
||||||
|
'train_041', 'train_042', 'train_043', 'train_044', 'train_045',...
|
||||||
|
'train_046', 'train_047', 'train_048', 'train_049', 'train_050',...
|
||||||
|
'train_051', 'train_052', 'train_053', 'train_054', 'train_055',...
|
||||||
|
'train_056', 'train_057', 'train_058', 'train_059', 'train_060',...
|
||||||
|
'train_061', 'train_062', 'train_063', 'train_064', 'train_065',...
|
||||||
|
'train_066', 'train_067', 'train_068', 'train_069', 'train_070',...
|
||||||
|
'train_071', 'train_072', 'train_073', 'train_074', 'train_075',...
|
||||||
|
'train_076', 'train_077', 'train_078', 'train_079', 'train_080',...
|
||||||
|
'train_081', 'train_082', 'train_083', 'train_084', 'train_085',...
|
||||||
|
'train_086', 'train_087'};
|
||||||
|
|
||||||
|
% Making them person independent
|
||||||
|
train_recs = all_recs(1:56);
|
||||||
|
devel_recs = setdiff(all_recs, train_recs);
|
||||||
|
|
||||||
|
all_aus = [1, 2, 4, 6, 7, 10, 12, 15, 17, 18, 25, 26];
|
22
matlab_version/AU_training/data extraction/find_SEMAINE.m
Normal file
22
matlab_version/AU_training/data extraction/find_SEMAINE.m
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
if(exist('E:\datasets\FERA_2015\semaine/SEMAINE-Sessions/', 'file'))
|
||||||
|
SEMAINE_dir = 'E:\datasets\FERA_2015\semaine/SEMAINE-Sessions/';
|
||||||
|
elseif(exist('I:\datasets\FERA_2015\Semaine\SEMAINE-Sessions/', 'file'))
|
||||||
|
SEMAINE_dir = 'I:\datasets\FERA_2015\Semaine\SEMAINE-Sessions/';
|
||||||
|
elseif(exist('C:\tadas\face_datasets\fera_2015\semaine/SEMAINE-Sessions/', 'file'))
|
||||||
|
SEMAINE_dir = 'C:\tadas\face_datasets\fera_2015\semaine/SEMAINE-Sessions/';
|
||||||
|
elseif(exist('D:\datasets\face_datasets\fera_2015\semaine\SEMAINE-Sessions/', 'file'))
|
||||||
|
SEMAINE_dir = 'D:\datasets\face_datasets\fera_2015\semaine\SEMAINE-Sessions/';
|
||||||
|
elseif(exist('D:\Datasets\FERA_2015\semaine\SEMAINE-Sessions/', 'file'))
|
||||||
|
SEMAINE_dir = 'D:\Datasets\FERA_2015\semaine\SEMAINE-Sessions/';
|
||||||
|
elseif(exist('D:/fera_2015/semaine/SEMAINE-Sessions/', 'file'))
|
||||||
|
SEMAINE_dir = 'D:/fera_2015/semaine/SEMAINE-Sessions/';
|
||||||
|
else
|
||||||
|
fprintf('DISFA location not found (or not defined)\n');
|
||||||
|
end
|
||||||
|
|
||||||
|
if(exist('SEMAINE_dir', 'var'))
|
||||||
|
hog_data_dir = [SEMAINE_dir, '../processed_data/'];
|
||||||
|
end
|
||||||
|
|
||||||
|
train_recs = {'rec1', 'rec12', 'rec14', 'rec19', 'rec23', 'rec25', 'rec37', 'rec39', 'rec43', 'rec45', 'rec48', 'rec50', 'rec52', 'rec54', 'rec56', 'rec60'};
|
||||||
|
devel_recs = {'rec9', 'rec13', 'rec15', 'rec20', 'rec24', 'rec26', 'rec38', 'rec42', 'rec44', 'rec46', 'rec49', 'rec51', 'rec53', 'rec55', 'rec58'};
|
17
matlab_version/AU_training/data extraction/find_UNBC.m
Normal file
17
matlab_version/AU_training/data extraction/find_UNBC.m
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
if(exist('D:\Datasets\UNBC/', 'file'))
|
||||||
|
UNBC_dir = 'D:\Datasets\UNBC/';
|
||||||
|
hog_data_dir = 'D:\Datasets\face_datasets\hog_aligned_rigid';
|
||||||
|
else
|
||||||
|
fprintf('UNBC location not found (or not defined)\n');
|
||||||
|
end
|
||||||
|
|
||||||
|
all_recs = {'042-ll042', '043-jh043', '047-jl047', '048-aa048', '049-bm049',...
|
||||||
|
'052-dr052', '059-fn059', '064-ak064', '066-mg066', '080-bn080',...
|
||||||
|
'092-ch092', '095-tv095', '096-bg096', '097-gf097', '101-mg101',...
|
||||||
|
'103-jk103', '106-nm106', '107-hs107', '108-th108', '109-ib109',...
|
||||||
|
'115-jy115', '120-kz120', '121-vw121', '123-jh123', '124-dn124'};
|
||||||
|
|
||||||
|
devel_recs = all_recs(1:5:25);
|
||||||
|
train_recs = setdiff(all_recs, devel_recs);
|
||||||
|
|
||||||
|
all_aus = [4, 6, 7, 9, 10, 12, 20, 25, 26, 43];
|
BIN
matlab_version/AU_training/data extraction/xml_io_tools_2010_11_05/.DS_Store
vendored
Normal file
BIN
matlab_version/AU_training/data extraction/xml_io_tools_2010_11_05/.DS_Store
vendored
Normal file
Binary file not shown.
|
@ -0,0 +1,117 @@
|
||||||
|
function y = base64decode(x, outfname, alg)
|
||||||
|
%BASE64DECODE Perform base64 decoding on a string.
|
||||||
|
%
|
||||||
|
% INPUT:
|
||||||
|
% x - block of data to be decoded. Can be a string or a numeric
|
||||||
|
% vector containing integers in the range 0-255. Any character
|
||||||
|
% not part of the 65-character base64 subset set is silently
|
||||||
|
% ignored. Characters occuring after a '=' padding character are
|
||||||
|
% never decoded. If the length of the string to decode (after
|
||||||
|
% ignoring non-base64 chars) is not a multiple of 4, then a
|
||||||
|
% warning is generated.
|
||||||
|
%
|
||||||
|
% outfname - if provided the binary date from decoded string will be
|
||||||
|
% saved into a file. Since Base64 coding is often used to embbed
|
||||||
|
% binary data in xml files, this option can be used to extract and
|
||||||
|
% save them.
|
||||||
|
%
|
||||||
|
% alg - Algorithm to use: can take values 'java' or 'matlab'. Optional
|
||||||
|
% variable defaulting to 'java' which is a little faster. If
|
||||||
|
% 'java' is chosen than core of the code is performed by a call to
|
||||||
|
% a java library. Optionally all operations can be performed using
|
||||||
|
% matleb code.
|
||||||
|
%
|
||||||
|
% OUTPUT:
|
||||||
|
% y - array of binary data returned as uint8
|
||||||
|
%
|
||||||
|
% This function is used to decode strings from the Base64 encoding specified
|
||||||
|
% in RFC 2045 - MIME (Multipurpose Internet Mail Extensions). The Base64
|
||||||
|
% encoding is designed to represent arbitrary sequences of octets in a form
|
||||||
|
% that need not be humanly readable. A 65-character subset ([A-Za-z0-9+/=])
|
||||||
|
% of US-ASCII is used, enabling 6 bits to be represented per printable
|
||||||
|
% character.
|
||||||
|
%
|
||||||
|
% See also BASE64ENCODE.
|
||||||
|
%
|
||||||
|
% Written by Jarek Tuszynski, SAIC, jaroslaw.w.tuszynski_at_saic.com
|
||||||
|
%
|
||||||
|
% Matlab version based on 2004 code by Peter J. Acklam
|
||||||
|
% E-mail: pjacklam@online.no
|
||||||
|
% URL: http://home.online.no/~pjacklam
|
||||||
|
% http://home.online.no/~pjacklam/matlab/software/util/datautil/base64encode.m
|
||||||
|
|
||||||
|
if nargin<3, alg='java'; end
|
||||||
|
if nargin<2, outfname=''; end
|
||||||
|
|
||||||
|
%% if x happen to be a filename than read the file
|
||||||
|
if (numel(x)<256)
|
||||||
|
if (exist(x, 'file')==2)
|
||||||
|
fid = fopen(x,'rb');
|
||||||
|
x = fread(fid, 'uint8');
|
||||||
|
fclose(fid);
|
||||||
|
end
|
||||||
|
end
|
||||||
|
x = uint8(x(:)); % unify format
|
||||||
|
|
||||||
|
%% Perform conversion
|
||||||
|
switch (alg)
|
||||||
|
case 'java'
|
||||||
|
base64 = org.apache.commons.codec.binary.Base64;
|
||||||
|
y = base64.decode(x);
|
||||||
|
y = mod(int16(y),256); % convert from int8 to uint8
|
||||||
|
case 'matlab'
|
||||||
|
%% Perform the mapping
|
||||||
|
% A-Z -> 0 - 25
|
||||||
|
% a-z -> 26 - 51
|
||||||
|
% 0-9 -> 52 - 61
|
||||||
|
% + - -> 62 '-' is URL_SAFE alternative
|
||||||
|
% / _ -> 63 '_' is URL_SAFE alternative
|
||||||
|
map = uint8(zeros(1,256)+65);
|
||||||
|
map(uint8(['A':'Z', 'a':'z', '0':'9', '+/=']))= 0:64;
|
||||||
|
map(uint8('-_'))= 62:63; % URL_SAFE alternatives
|
||||||
|
x = map(x); % mapping
|
||||||
|
|
||||||
|
x(x>64)=[]; % remove non-base64 chars
|
||||||
|
if rem(numel(x), 4)
|
||||||
|
warning('Length of base64 data not a multiple of 4; padding input.');
|
||||||
|
end
|
||||||
|
x(x==64)=[]; % remove padding characters
|
||||||
|
|
||||||
|
%% add padding and reshape
|
||||||
|
nebytes = length(x); % number of encoded bytes
|
||||||
|
nchunks = ceil(nebytes/4); % number of chunks/groups
|
||||||
|
if rem(nebytes, 4)>0
|
||||||
|
x(end+1 : 4*nchunks) = 0; % add padding
|
||||||
|
end
|
||||||
|
x = reshape(uint8(x), 4, nchunks);
|
||||||
|
y = repmat(uint8(0), 3, nchunks); % for the decoded data
|
||||||
|
|
||||||
|
%% Rearrange every 4 bytes into 3 bytes
|
||||||
|
% 00aaaaaa 00bbbbbb 00cccccc 00dddddd
|
||||||
|
% to form
|
||||||
|
% aaaaaabb bbbbcccc ccdddddd
|
||||||
|
y(1,:) = bitshift(x(1,:), 2); % 6 highest bits of y(1,:)
|
||||||
|
y(1,:) = bitor(y(1,:), bitshift(x(2,:), -4)); % 2 lowest bits of y(1,:)
|
||||||
|
y(2,:) = bitshift(x(2,:), 4); % 4 highest bits of y(2,:)
|
||||||
|
y(2,:) = bitor(y(2,:), bitshift(x(3,:), -2)); % 4 lowest bits of y(2,:)
|
||||||
|
y(3,:) = bitshift(x(3,:), 6); % 2 highest bits of y(3,:)
|
||||||
|
y(3,:) = bitor(y(3,:), x(4,:)); % 6 lowest bits of y(3,:)
|
||||||
|
|
||||||
|
%% remove extra padding
|
||||||
|
switch rem(nebytes, 4)
|
||||||
|
case 2
|
||||||
|
y = y(1:end-2);
|
||||||
|
case 3
|
||||||
|
y = y(1:end-1);
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
%% reshape to a row vector and make it a character array
|
||||||
|
y = uint8(reshape(y, 1, numel(y)));
|
||||||
|
|
||||||
|
%% save to file if needed
|
||||||
|
if ~isempty(outfname)
|
||||||
|
fid = fopen(outfname,'wb');
|
||||||
|
fwrite(fid, y, 'uint8');
|
||||||
|
fclose(fid);
|
||||||
|
end
|
|
@ -0,0 +1,138 @@
|
||||||
|
function y = base64encode(x, alg, isChunked, url_safe)
|
||||||
|
%BASE64ENCODE Perform base64 encoding on a string.
|
||||||
|
% INPUT:
|
||||||
|
% x - block of data to be encoded. Can be a string or a numeric
|
||||||
|
% vector containing integers in the range 0-255.
|
||||||
|
% alg - Algorithm to use: can take values 'java' or 'matlab'. Optional
|
||||||
|
% variable defaulting to 'java' which is a little faster. If
|
||||||
|
% 'java' is chosen than core of the code is performed by a call to
|
||||||
|
% a java library. Optionally all operations can be performed using
|
||||||
|
% matleb code.
|
||||||
|
% isChunked - encode output into 76 character blocks. The returned
|
||||||
|
% encoded string is broken into lines of no more than
|
||||||
|
% 76 characters each, and each line will end with EOL. Notice that
|
||||||
|
% if resulting string is saved as part of an xml file, those EOL's
|
||||||
|
% are often stripped by xmlwrite funtrion prior to saving.
|
||||||
|
% url_safe - use Modified Base64 for URL applications ('base64url'
|
||||||
|
% encoding) "Base64 alphabet" ([A-Za-z0-9-_=]).
|
||||||
|
%
|
||||||
|
%
|
||||||
|
% OUTPUT:
|
||||||
|
% y - character array using only "Base64 alphabet" characters
|
||||||
|
%
|
||||||
|
% This function may be used to encode strings into the Base64 encoding
|
||||||
|
% specified in RFC 2045 - MIME (Multipurpose Internet Mail Extensions).
|
||||||
|
% The Base64 encoding is designed to represent arbitrary sequences of
|
||||||
|
% octets in a form that need not be humanly readable. A 65-character
|
||||||
|
% subset ([A-Za-z0-9+/=]) of US-ASCII is used, enabling 6 bits to be
|
||||||
|
% represented per printable character.
|
||||||
|
%
|
||||||
|
% See also BASE64DECODE.
|
||||||
|
%
|
||||||
|
% Written by Jarek Tuszynski, SAIC, jaroslaw.w.tuszynski_at_saic.com
|
||||||
|
%
|
||||||
|
% Matlab version based on 2004 code by Peter J. Acklam
|
||||||
|
% E-mail: pjacklam@online.no
|
||||||
|
% URL: http://home.online.no/~pjacklam
|
||||||
|
% http://home.online.no/~pjacklam/matlab/software/util/datautil/base64encode.m
|
||||||
|
|
||||||
|
if nargin<2, alg='java'; end
|
||||||
|
if nargin<3, isChunked=false; end
|
||||||
|
if ~islogical(isChunked)
|
||||||
|
if isnumeric(isChunked)
|
||||||
|
isChunked=(isChunked>0);
|
||||||
|
else
|
||||||
|
isChunked=false;
|
||||||
|
end
|
||||||
|
end
|
||||||
|
if nargin<4, url_safe=false; end
|
||||||
|
if ~islogical(url_safe)
|
||||||
|
if isnumeric(url_safe)
|
||||||
|
url_safe=(url_safe>0);
|
||||||
|
else
|
||||||
|
url_safe=false;
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
|
||||||
|
%% if x happen to be a filename than read the file
|
||||||
|
if (numel(x)<256)
|
||||||
|
if (exist(x, 'file')==2)
|
||||||
|
fid = fopen(x,'rb');
|
||||||
|
x = fread(fid, 'uint8'); % read image file as a raw binary
|
||||||
|
fclose(fid);
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
%% Perform conversion
|
||||||
|
switch (alg)
|
||||||
|
case 'java'
|
||||||
|
base64 = org.apache.commons.codec.binary.Base64;
|
||||||
|
y = base64.encodeBase64(x, isChunked);
|
||||||
|
if url_safe
|
||||||
|
y = strrep(y,'=','-');
|
||||||
|
y = strrep(y,'/','_');
|
||||||
|
end
|
||||||
|
|
||||||
|
case 'matlab'
|
||||||
|
|
||||||
|
%% add padding if necessary, to make the length of x a multiple of 3
|
||||||
|
x = uint8(x(:));
|
||||||
|
ndbytes = length(x); % number of decoded bytes
|
||||||
|
nchunks = ceil(ndbytes / 3); % number of chunks/groups
|
||||||
|
if rem(ndbytes, 3)>0
|
||||||
|
x(end+1 : 3*nchunks) = 0; % add padding
|
||||||
|
end
|
||||||
|
x = reshape(x, [3, nchunks]); % reshape the data
|
||||||
|
y = repmat(uint8(0), 4, nchunks); % for the encoded data
|
||||||
|
|
||||||
|
%% Split up every 3 bytes into 4 pieces
|
||||||
|
% aaaaaabb bbbbcccc ccdddddd
|
||||||
|
% to form
|
||||||
|
% 00aaaaaa 00bbbbbb 00cccccc 00dddddd
|
||||||
|
y(1,:) = bitshift(x(1,:), -2); % 6 highest bits of x(1,:)
|
||||||
|
y(2,:) = bitshift(bitand(x(1,:), 3), 4); % 2 lowest bits of x(1,:)
|
||||||
|
y(2,:) = bitor(y(2,:), bitshift(x(2,:), -4)); % 4 highest bits of x(2,:)
|
||||||
|
y(3,:) = bitshift(bitand(x(2,:), 15), 2); % 4 lowest bits of x(2,:)
|
||||||
|
y(3,:) = bitor(y(3,:), bitshift(x(3,:), -6)); % 2 highest bits of x(3,:)
|
||||||
|
y(4,:) = bitand(x(3,:), 63); % 6 lowest bits of x(3,:)
|
||||||
|
|
||||||
|
%% Perform the mapping
|
||||||
|
% 0 - 25 -> A-Z
|
||||||
|
% 26 - 51 -> a-z
|
||||||
|
% 52 - 61 -> 0-9
|
||||||
|
% 62 -> +
|
||||||
|
% 63 -> /
|
||||||
|
map = ['A':'Z', 'a':'z', '0':'9', '+/'];
|
||||||
|
if (url_safe), map(63:64)='-_'; end
|
||||||
|
y = map(y(:)+1);
|
||||||
|
|
||||||
|
%% Add padding if necessary.
|
||||||
|
npbytes = 3 * nchunks - ndbytes; % number of padding bytes
|
||||||
|
if npbytes>0
|
||||||
|
y(end-npbytes+1 : end) = '='; % '=' is used for padding
|
||||||
|
end
|
||||||
|
|
||||||
|
%% break into lines with length LineLength
|
||||||
|
if (isChunked)
|
||||||
|
eol = sprintf('\n');
|
||||||
|
nebytes = numel(y);
|
||||||
|
nlines = ceil(nebytes / 76); % number of lines
|
||||||
|
neolbytes = length(eol); % number of bytes in eol string
|
||||||
|
|
||||||
|
% pad data so it becomes a multiple of 76 elements
|
||||||
|
y(nebytes + 1 : 76 * nlines) = 0;
|
||||||
|
y = reshape(y, 76, nlines);
|
||||||
|
|
||||||
|
% insert eol strings
|
||||||
|
y(end + 1 : end + neolbytes, :) = eol(:, ones(1, nlines));
|
||||||
|
|
||||||
|
% remove padding, but keep the last eol string
|
||||||
|
m = nebytes + neolbytes * (nlines - 1);
|
||||||
|
n = (76+neolbytes)*nlines - neolbytes;
|
||||||
|
y(m+1 : n) = [];
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
%% reshape to a row vector and make it a character array
|
||||||
|
y = char(reshape(y, 1, numel(y)));
|
|
@ -0,0 +1,143 @@
|
||||||
|
function gen_object_display( obj_struct,indent )
|
||||||
|
%
|
||||||
|
% gen_object_display - general function to display an object's content
|
||||||
|
%
|
||||||
|
% format: gen_object_display( obj_struct,indent )
|
||||||
|
%
|
||||||
|
% input: obj_struct - a copy of the object stored inside a structure
|
||||||
|
% indent - amount of "indent" when printing to the screen
|
||||||
|
%
|
||||||
|
% output: to the screen
|
||||||
|
%
|
||||||
|
% example: gen_object_display( struct( my_object_handle) );
|
||||||
|
% gen_object_display( ny_structure );
|
||||||
|
%
|
||||||
|
% Correction History:
|
||||||
|
% 2006-11-01 - Jarek Tuszynski - added support for struct arrays
|
||||||
|
|
||||||
|
%% handle insufficient input
|
||||||
|
if ( nargin == 0 )
|
||||||
|
help gen_object_display;
|
||||||
|
return;
|
||||||
|
elseif (nargin == 1)
|
||||||
|
indent = 1;
|
||||||
|
end
|
||||||
|
|
||||||
|
%% check input for errors
|
||||||
|
% if ~isstruct( obj_struct )
|
||||||
|
% fprintf( '\n\n\tMake sure that ''obj_struct'' is a struct type\n' );
|
||||||
|
% return
|
||||||
|
% end
|
||||||
|
|
||||||
|
% if (iscell( obj_struct ))
|
||||||
|
% for i =1:length(obj_struct)
|
||||||
|
% gen_object_display( obj_struct{i},indent + 2 );
|
||||||
|
% end
|
||||||
|
% return
|
||||||
|
% end
|
||||||
|
if ~isstruct( obj_struct )
|
||||||
|
space = sprintf( sprintf( '%%%ds',indent ),' ' );
|
||||||
|
fprintf( ' %s', space);
|
||||||
|
disp(obj_struct);
|
||||||
|
return
|
||||||
|
end
|
||||||
|
|
||||||
|
% find the longest name
|
||||||
|
field_list = fieldnames( obj_struct );
|
||||||
|
max_strlen = 0;
|
||||||
|
for idx = 1:length( field_list )
|
||||||
|
max_strlen = max( max_strlen,length(field_list{idx}) );
|
||||||
|
end
|
||||||
|
|
||||||
|
%% setup the display format (spacing)
|
||||||
|
space = sprintf( sprintf( '%%%ds',indent ),' ' );
|
||||||
|
name_format = sprintf( ' %s%%%ds: ', space, max_strlen );
|
||||||
|
name_format2= sprintf( ' %s%%%ds', space, max_strlen );
|
||||||
|
max_displen = 110 - max_strlen - indent;
|
||||||
|
|
||||||
|
%% display each field, if it is not too long
|
||||||
|
for iItem = 1:length( obj_struct ) % loop added by JT
|
||||||
|
for idx = 1:length( field_list )
|
||||||
|
|
||||||
|
% prepare field name to be displayed
|
||||||
|
name = sprintf( name_format,field_list{idx} );
|
||||||
|
%temp = getfield( obj_struct,field_list{idx} ); % original by OG
|
||||||
|
temp = obj_struct(iItem).(field_list{idx}); % modification by JT
|
||||||
|
|
||||||
|
% proceed according the variable's type
|
||||||
|
switch (1)
|
||||||
|
case islogical( temp ), % case added by JT
|
||||||
|
if isscalar(temp)
|
||||||
|
if (temp)
|
||||||
|
fprintf( '%strue\n',name );
|
||||||
|
else
|
||||||
|
fprintf( '%sfalse\n',name );
|
||||||
|
end
|
||||||
|
else
|
||||||
|
fprintf( '%s[%dx%d logical]\n',name,size(temp,1),size(temp,2) );
|
||||||
|
end
|
||||||
|
case ischar( temp ),
|
||||||
|
if (length(temp)<max_displen )
|
||||||
|
fprintf( '%s''%s''\n',name,temp' );
|
||||||
|
else
|
||||||
|
fprintf( '%s[%dx%d char]\n',name,size(temp,1),size(temp,2) );
|
||||||
|
end
|
||||||
|
case isnumeric( temp ),
|
||||||
|
if (size( temp,1 )==1 )
|
||||||
|
temp_b = num2str( temp );
|
||||||
|
if (length(temp_b)<max_displen )
|
||||||
|
fprintf( '%s[%s]\n',name,temp_b );
|
||||||
|
else
|
||||||
|
fprintf( '%s[%dx%d double]\n',name,size(temp,1),size(temp,2) );
|
||||||
|
end
|
||||||
|
else
|
||||||
|
fprintf( '%s[%dx%d double]\n',name,size(temp,1),size(temp,2) );
|
||||||
|
end
|
||||||
|
case iscell( temp ),
|
||||||
|
if (numel(temp)<10 && (isvector(temp) || isscalar(temp)))
|
||||||
|
fprintf( '%s[%dx%d cell] = \n',name,size(temp,1),size(temp,2) );
|
||||||
|
%disp(temp)
|
||||||
|
for r =1:numel(temp)
|
||||||
|
gen_object_display( temp{r},indent + max_strlen + 2 );
|
||||||
|
fprintf('\n');
|
||||||
|
end
|
||||||
|
elseif (numel(temp)<10)
|
||||||
|
fprintf( '%s[%dx%d cell] = \n',name,size(temp,1),size(temp,2) );
|
||||||
|
for r =1:size(temp,1)
|
||||||
|
gen_object_display( temp(r,:),indent + max_strlen + 2 );
|
||||||
|
end
|
||||||
|
else
|
||||||
|
fprintf( '%s[%dx%d cell]\n',name,size(temp,1),size(temp,2) );
|
||||||
|
end
|
||||||
|
case isstruct( temp ),
|
||||||
|
fprintf( '%s[%dx%d struct]\n',name,size(temp,1),size(temp,2) );
|
||||||
|
if (indent<80)
|
||||||
|
if (numel(temp)<10 && (isvector(temp) || isscalar(temp)))
|
||||||
|
gen_object_display( temp,indent + max_strlen + 2 );
|
||||||
|
elseif (numel(temp)<10)
|
||||||
|
name2 = sprintf( name_format2,field_list{idx} );
|
||||||
|
for r =1:size(temp,1)
|
||||||
|
for c =1:size(temp,2)
|
||||||
|
fprintf( '%s(%d,%d) =\n',name2,r,c );
|
||||||
|
gen_object_display( temp(r,c),indent + max_strlen + 3 );
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
case isobject( temp ), fprintf( '%s[inherent object]\n',name );
|
||||||
|
if (indent<80)
|
||||||
|
cmd = sprintf( 'display( obj_struct.%s,%d );',field_list{idx},indent + max_strlen + 2 );
|
||||||
|
eval( cmd );
|
||||||
|
end
|
||||||
|
otherwise,
|
||||||
|
fprintf( '%s',name );
|
||||||
|
try
|
||||||
|
fprintf( temp );
|
||||||
|
catch %#ok<CTCH>
|
||||||
|
fprintf( '[No method to display type]' );
|
||||||
|
end
|
||||||
|
fprintf( '\n' );
|
||||||
|
end
|
||||||
|
end
|
||||||
|
if (length(obj_struct)>1), fprintf('\n'); end % added by JT
|
||||||
|
end % added by JT
|
|
@ -0,0 +1,21 @@
|
||||||
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
|
<MyTree>
|
||||||
|
<table border="1">
|
||||||
|
<tr>
|
||||||
|
<td>Apples</td>
|
||||||
|
<td>44%</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td>Bannanas</td>
|
||||||
|
<td>23%</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td>Oranges</td>
|
||||||
|
<td>13%</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td>Other</td>
|
||||||
|
<td>10%</td>
|
||||||
|
</tr>
|
||||||
|
</table>
|
||||||
|
</MyTree>
|
File diff suppressed because one or more lines are too long
Binary file not shown.
After Width: | Height: | Size: 4.3 KiB |
Binary file not shown.
After Width: | Height: | Size: 156 KiB |
|
@ -0,0 +1,24 @@
|
||||||
|
Copyright (c) 2007, Jaroslaw Tuszynski
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer in
|
||||||
|
the documentation and/or other materials provided with the distribution
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||||
|
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||||
|
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||||
|
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||||
|
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||||
|
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||||
|
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||||
|
POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,22 @@
|
||||||
|
<?xml version="1.0" encoding="utf-8" ?>
|
||||||
|
<?xml-stylesheet type="text/css" href="foo.css"?>
|
||||||
|
<!-- This is a Global Comment -->
|
||||||
|
<aaa xmlns:xsi="http://www.foo.org">
|
||||||
|
<?ProcInst type="local processing instruction"?>
|
||||||
|
<!-- local comment 1 -->
|
||||||
|
bbb
|
||||||
|
<!-- local comment 2 -->
|
||||||
|
ccc
|
||||||
|
<matrix bad-name='fff'>
|
||||||
|
5e3+2*i, Inf
|
||||||
|
NaN, pi
|
||||||
|
</matrix>
|
||||||
|
<ee_e> ee_e </ee_e>
|
||||||
|
<ff-f> ff-f </ff-f>
|
||||||
|
<ggög> ggög </ggög>
|
||||||
|
<![CDATA[
|
||||||
|
Here <ddd>xml</ddd> tags are treated as ...
|
||||||
|
... text
|
||||||
|
]]>
|
||||||
|
</aaa>
|
||||||
|
|
|
@ -0,0 +1,550 @@
|
||||||
|
function [tree, RootName, DOMnode] = xml_read(xmlfile, Pref)
|
||||||
|
%XML_READ reads xml files and converts them into Matlab's struct tree.
|
||||||
|
%
|
||||||
|
% DESCRIPTION
|
||||||
|
% tree = xml_read(xmlfile) reads 'xmlfile' into data structure 'tree'
|
||||||
|
%
|
||||||
|
% tree = xml_read(xmlfile, Pref) reads 'xmlfile' into data structure 'tree'
|
||||||
|
% according to your preferences
|
||||||
|
%
|
||||||
|
% [tree, RootName, DOMnode] = xml_read(xmlfile) get additional information
|
||||||
|
% about XML file
|
||||||
|
%
|
||||||
|
% INPUT:
|
||||||
|
% xmlfile URL or filename of xml file to read
|
||||||
|
% Pref Preferences:
|
||||||
|
% Pref.ItemName - default 'item' - name of a special tag used to itemize
|
||||||
|
% cell arrays
|
||||||
|
% Pref.ReadAttr - default true - allow reading attributes
|
||||||
|
% Pref.ReadSpec - default true - allow reading special nodes
|
||||||
|
% Pref.Str2Num - default 'smart' - convert strings that look like numbers
|
||||||
|
% to numbers. Options: "always", "never", and "smart"
|
||||||
|
% Pref.KeepNS - default true - keep or strip namespace info
|
||||||
|
% Pref.NoCells - default true - force output to have no cell arrays
|
||||||
|
% Pref.Debug - default false - show mode specific error messages
|
||||||
|
% Pref.NumLevels- default infinity - how many recursive levels are
|
||||||
|
% allowed. Can be used to speed up the function by prunning the tree.
|
||||||
|
% Pref.RootOnly - default true - output variable 'tree' corresponds to
|
||||||
|
% xml file root element, otherwise it correspond to the whole file.
|
||||||
|
% Pref.CellItem - default 'true' - leave 'item' nodes in cell notation.
|
||||||
|
% OUTPUT:
|
||||||
|
% tree tree of structs and/or cell arrays corresponding to xml file
|
||||||
|
% RootName XML tag name used for root (top level) node.
|
||||||
|
% Optionally it can be a string cell array storing: Name of
|
||||||
|
% root node, document "Processing Instructions" data and
|
||||||
|
% document "comment" string
|
||||||
|
% DOMnode output of xmlread
|
||||||
|
%
|
||||||
|
% DETAILS:
|
||||||
|
% Function xml_read first calls MATLAB's xmlread function and than
|
||||||
|
% converts its output ('Document Object Model' tree of Java objects)
|
||||||
|
% to tree of MATLAB struct's. The output is in format of nested structs
|
||||||
|
% and cells. In the output data structure field names are based on
|
||||||
|
% XML tags, except in cases when tags produce illegal variable names.
|
||||||
|
%
|
||||||
|
% Several special xml node types result in special tags for fields of
|
||||||
|
% 'tree' nodes:
|
||||||
|
% - node.CONTENT - stores data section of the node if other fields are
|
||||||
|
% present. Usually data section is stored directly in 'node'.
|
||||||
|
% - node.ATTRIBUTE.name - stores node's attribute called 'name'.
|
||||||
|
% - node.COMMENT - stores node's comment section (string). For global
|
||||||
|
% comments see "RootName" output variable.
|
||||||
|
% - node.CDATA_SECTION - stores node's CDATA section (string).
|
||||||
|
% - node.PROCESSING_INSTRUCTIONS - stores "processing instruction" child
|
||||||
|
% node. For global "processing instructions" see "RootName" output variable.
|
||||||
|
% - other special node types like: document fragment nodes, document type
|
||||||
|
% nodes, entity nodes, notation nodes and processing instruction nodes
|
||||||
|
% will be treated like regular nodes
|
||||||
|
%
|
||||||
|
% EXAMPLES:
|
||||||
|
% MyTree=[];
|
||||||
|
% MyTree.MyNumber = 13;
|
||||||
|
% MyTree.MyString = 'Hello World';
|
||||||
|
% xml_write('test.xml', MyTree);
|
||||||
|
% [tree treeName] = xml_read ('test.xml');
|
||||||
|
% disp(treeName)
|
||||||
|
% gen_object_display()
|
||||||
|
% % See also xml_examples.m
|
||||||
|
%
|
||||||
|
% See also:
|
||||||
|
% xml_write, xmlread, xmlwrite
|
||||||
|
%
|
||||||
|
% Written by Jarek Tuszynski, SAIC, jaroslaw.w.tuszynski_at_saic.com
|
||||||
|
% References:
|
||||||
|
% - Function inspired by Example 3 found in xmlread function.
|
||||||
|
% - Output data structures inspired by xml_toolbox structures.
|
||||||
|
|
||||||
|
%% default preferences
|
||||||
|
DPref.TableName = {'tr','td'}; % name of a special tags used to itemize 2D cell arrays
|
||||||
|
DPref.ItemName = 'item'; % name of a special tag used to itemize 1D cell arrays
|
||||||
|
DPref.CellItem = false; % leave 'item' nodes in cell notation
|
||||||
|
DPref.ReadAttr = true; % allow reading attributes
|
||||||
|
DPref.ReadSpec = true; % allow reading special nodes: comments, CData, etc.
|
||||||
|
DPref.KeepNS = true; % Keep or strip namespace info
|
||||||
|
DPref.Str2Num = 'smart';% convert strings that look like numbers to numbers
|
||||||
|
DPref.NoCells = true; % force output to have no cell arrays
|
||||||
|
DPref.NumLevels = 1e10; % number of recurence levels
|
||||||
|
DPref.PreserveSpace = false; % Preserve or delete spaces at the beggining and the end of stings?
|
||||||
|
RootOnly = true; % return root node with no top level special nodes
|
||||||
|
Debug = false; % show specific errors (true) or general (false)?
|
||||||
|
tree = [];
|
||||||
|
RootName = [];
|
||||||
|
|
||||||
|
%% Check Matlab Version
|
||||||
|
v = ver('MATLAB');
|
||||||
|
version = str2double(regexp(v.Version, '\d.\d','match','once'));
|
||||||
|
if (version<7.1)
|
||||||
|
error('Your MATLAB version is too old. You need version 7.1 or newer.');
|
||||||
|
end
|
||||||
|
|
||||||
|
%% read user preferences
|
||||||
|
if (nargin>1)
|
||||||
|
if (isfield(Pref, 'TableName')), DPref.TableName = Pref.TableName; end
|
||||||
|
if (isfield(Pref, 'ItemName' )), DPref.ItemName = Pref.ItemName; end
|
||||||
|
if (isfield(Pref, 'CellItem' )), DPref.CellItem = Pref.CellItem; end
|
||||||
|
if (isfield(Pref, 'Str2Num' )), DPref.Str2Num = Pref.Str2Num ; end
|
||||||
|
if (isfield(Pref, 'NoCells' )), DPref.NoCells = Pref.NoCells ; end
|
||||||
|
if (isfield(Pref, 'NumLevels')), DPref.NumLevels = Pref.NumLevels; end
|
||||||
|
if (isfield(Pref, 'ReadAttr' )), DPref.ReadAttr = Pref.ReadAttr; end
|
||||||
|
if (isfield(Pref, 'ReadSpec' )), DPref.ReadSpec = Pref.ReadSpec; end
|
||||||
|
if (isfield(Pref, 'KeepNS' )), DPref.KeepNS = Pref.KeepNS; end
|
||||||
|
if (isfield(Pref, 'RootOnly' )), RootOnly = Pref.RootOnly; end
|
||||||
|
if (isfield(Pref, 'Debug' )), Debug = Pref.Debug ; end
|
||||||
|
if (isfield(Pref, 'PreserveSpace')), DPref.PreserveSpace = Pref.PreserveSpace; end
|
||||||
|
end
|
||||||
|
if ischar(DPref.Str2Num), % convert from character description to numbers
|
||||||
|
DPref.Str2Num = find(strcmpi(DPref.Str2Num, {'never', 'smart', 'always'}))-1;
|
||||||
|
if isempty(DPref.Str2Num), DPref.Str2Num=1; end % 1-smart by default
|
||||||
|
end
|
||||||
|
|
||||||
|
%% read xml file using Matlab function
|
||||||
|
if isa(xmlfile, 'org.apache.xerces.dom.DeferredDocumentImpl');
|
||||||
|
% if xmlfile is a DOMnode than skip the call to xmlread
|
||||||
|
try
|
||||||
|
try
|
||||||
|
DOMnode = xmlfile;
|
||||||
|
catch ME
|
||||||
|
error('Invalid DOM node: \n%s.', getReport(ME));
|
||||||
|
end
|
||||||
|
catch %#ok<CTCH> catch for mablab versions prior to 7.5
|
||||||
|
error('Invalid DOM node. \n');
|
||||||
|
end
|
||||||
|
else % we assume xmlfile is a filename
|
||||||
|
if (Debug) % in debuging mode crashes are allowed
|
||||||
|
DOMnode = xmlread(xmlfile);
|
||||||
|
else % in normal mode crashes are not allowed
|
||||||
|
try
|
||||||
|
try
|
||||||
|
DOMnode = xmlread(xmlfile);
|
||||||
|
catch ME
|
||||||
|
error('Failed to read XML file %s: \n%s',xmlfile, getReport(ME));
|
||||||
|
end
|
||||||
|
catch %#ok<CTCH> catch for mablab versions prior to 7.5
|
||||||
|
error('Failed to read XML file %s\n',xmlfile);
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
Node = DOMnode.getFirstChild;
|
||||||
|
|
||||||
|
%% Find the Root node. Also store data from Global Comment and Processing
|
||||||
|
% Instruction nodes, if any.
|
||||||
|
GlobalTextNodes = cell(1,3);
|
||||||
|
GlobalProcInst = [];
|
||||||
|
GlobalComment = [];
|
||||||
|
GlobalDocType = [];
|
||||||
|
while (~isempty(Node))
|
||||||
|
if (Node.getNodeType==Node.ELEMENT_NODE)
|
||||||
|
RootNode=Node;
|
||||||
|
elseif (Node.getNodeType==Node.PROCESSING_INSTRUCTION_NODE)
|
||||||
|
data = strtrim(char(Node.getData));
|
||||||
|
target = strtrim(char(Node.getTarget));
|
||||||
|
GlobalProcInst = [target, ' ', data];
|
||||||
|
GlobalTextNodes{2} = GlobalProcInst;
|
||||||
|
elseif (Node.getNodeType==Node.COMMENT_NODE)
|
||||||
|
GlobalComment = strtrim(char(Node.getData));
|
||||||
|
GlobalTextNodes{3} = GlobalComment;
|
||||||
|
% elseif (Node.getNodeType==Node.DOCUMENT_TYPE_NODE)
|
||||||
|
% GlobalTextNodes{4} = GlobalDocType;
|
||||||
|
end
|
||||||
|
Node = Node.getNextSibling;
|
||||||
|
end
|
||||||
|
|
||||||
|
%% parse xml file through calls to recursive DOMnode2struct function
|
||||||
|
if (Debug) % in debuging mode crashes are allowed
|
||||||
|
[tree RootName] = DOMnode2struct(RootNode, DPref, 1);
|
||||||
|
else % in normal mode crashes are not allowed
|
||||||
|
try
|
||||||
|
try
|
||||||
|
[tree RootName] = DOMnode2struct(RootNode, DPref, 1);
|
||||||
|
catch ME
|
||||||
|
error('Unable to parse XML file %s: \n %s.',xmlfile, getReport(ME));
|
||||||
|
end
|
||||||
|
catch %#ok<CTCH> catch for mablab versions prior to 7.5
|
||||||
|
error('Unable to parse XML file %s.',xmlfile);
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
%% If there were any Global Text nodes than return them
|
||||||
|
if (~RootOnly)
|
||||||
|
if (~isempty(GlobalProcInst) && DPref.ReadSpec)
|
||||||
|
t.PROCESSING_INSTRUCTION = GlobalProcInst;
|
||||||
|
end
|
||||||
|
if (~isempty(GlobalComment) && DPref.ReadSpec)
|
||||||
|
t.COMMENT = GlobalComment;
|
||||||
|
end
|
||||||
|
if (~isempty(GlobalDocType) && DPref.ReadSpec)
|
||||||
|
t.DOCUMENT_TYPE = GlobalDocType;
|
||||||
|
end
|
||||||
|
t.(RootName) = tree;
|
||||||
|
tree=t;
|
||||||
|
end
|
||||||
|
if (~isempty(GlobalTextNodes))
|
||||||
|
GlobalTextNodes{1} = RootName;
|
||||||
|
RootName = GlobalTextNodes;
|
||||||
|
end
|
||||||
|
|
||||||
|
|
||||||
|
%% =======================================================================
|
||||||
|
% === DOMnode2struct Function ===========================================
|
||||||
|
% =======================================================================
|
||||||
|
function [s TagName LeafNode] = DOMnode2struct(node, Pref, level)
|
||||||
|
|
||||||
|
%% === Step 1: Get node name and check if it is a leaf node ==============
|
||||||
|
[TagName LeafNode] = NodeName(node, Pref.KeepNS);
|
||||||
|
s = []; % initialize output structure
|
||||||
|
|
||||||
|
%% === Step 2: Process Leaf Nodes (nodes with no children) ===============
|
||||||
|
if (LeafNode)
|
||||||
|
if (LeafNode>1 && ~Pref.ReadSpec), LeafNode=-1; end % tags only so ignore special nodes
|
||||||
|
if (LeafNode>0) % supported leaf node types
|
||||||
|
try
|
||||||
|
try % use try-catch: errors here are often due to VERY large fields (like images) that overflow java memory
|
||||||
|
s = char(node.getData);
|
||||||
|
if (isempty(s)), s = ' '; end % make it a string
|
||||||
|
% for some reason current xmlread 'creates' a lot of empty text
|
||||||
|
% fields with first chatacter=10 - those will be deleted.
|
||||||
|
if (~Pref.PreserveSpace || s(1)==10)
|
||||||
|
if (isspace(s(1)) || isspace(s(end))), s = strtrim(s); end % trim speces is any
|
||||||
|
end
|
||||||
|
if (LeafNode==1), s=str2var(s, Pref.Str2Num, 0); end % convert to number(s) if needed
|
||||||
|
catch ME % catch for mablab versions 7.5 and higher
|
||||||
|
warning('xml_io_tools:read:LeafRead', ...
|
||||||
|
'This leaf node could not be read and was ignored. ');
|
||||||
|
getReport(ME)
|
||||||
|
end
|
||||||
|
catch %#ok<CTCH> catch for mablab versions prior to 7.5
|
||||||
|
warning('xml_io_tools:read:LeafRead', ...
|
||||||
|
'This leaf node could not be read and was ignored. ');
|
||||||
|
end
|
||||||
|
end
|
||||||
|
if (LeafNode==3) % ProcessingInstructions need special treatment
|
||||||
|
target = strtrim(char(node.getTarget));
|
||||||
|
s = [target, ' ', s];
|
||||||
|
end
|
||||||
|
return % We are done the rest of the function deals with nodes with children
|
||||||
|
end
|
||||||
|
if (level>Pref.NumLevels+1), return; end % if Pref.NumLevels is reached than we are done
|
||||||
|
|
||||||
|
%% === Step 3: Process nodes with children ===============================
|
||||||
|
if (node.hasChildNodes) % children present
|
||||||
|
Child = node.getChildNodes; % create array of children nodes
|
||||||
|
nChild = Child.getLength; % number of children
|
||||||
|
|
||||||
|
% --- pass 1: how many children with each name -----------------------
|
||||||
|
f = [];
|
||||||
|
for iChild = 1:nChild % read in each child
|
||||||
|
[cname cLeaf] = NodeName(Child.item(iChild-1), Pref.KeepNS);
|
||||||
|
if (cLeaf<0), continue; end % unsupported leaf node types
|
||||||
|
if (~isfield(f,cname)),
|
||||||
|
f.(cname)=0; % initialize first time I see this name
|
||||||
|
end
|
||||||
|
f.(cname) = f.(cname)+1; % add to the counter
|
||||||
|
end % end for iChild
|
||||||
|
% text_nodes become CONTENT & for some reason current xmlread 'creates' a
|
||||||
|
% lot of empty text fields so f.CONTENT value should not be trusted
|
||||||
|
if (isfield(f,'CONTENT') && f.CONTENT>2), f.CONTENT=2; end
|
||||||
|
|
||||||
|
% --- pass 2: store all the children as struct of cell arrays ----------
|
||||||
|
for iChild = 1:nChild % read in each child
|
||||||
|
[c cname cLeaf] = DOMnode2struct(Child.item(iChild-1), Pref, level+1);
|
||||||
|
if (cLeaf && isempty(c)) % if empty leaf node than skip
|
||||||
|
continue; % usually empty text node or one of unhandled node types
|
||||||
|
elseif (nChild==1 && cLeaf==1)
|
||||||
|
s=c; % shortcut for a common case
|
||||||
|
else % if normal node
|
||||||
|
if (level>Pref.NumLevels), continue; end
|
||||||
|
n = f.(cname); % how many of them in the array so far?
|
||||||
|
if (~isfield(s,cname)) % encountered this name for the first time
|
||||||
|
if (n==1) % if there will be only one of them ...
|
||||||
|
s.(cname) = c; % than save it in format it came in
|
||||||
|
else % if there will be many of them ...
|
||||||
|
s.(cname) = cell(1,n);
|
||||||
|
s.(cname){1} = c; % than save as cell array
|
||||||
|
end
|
||||||
|
f.(cname) = 1; % initialize the counter
|
||||||
|
else % already have seen this name
|
||||||
|
s.(cname){n+1} = c; % add to the array
|
||||||
|
f.(cname) = n+1; % add to the array counter
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end % for iChild
|
||||||
|
end % end if (node.hasChildNodes)
|
||||||
|
|
||||||
|
%% === Step 4: Post-process struct's created for nodes with children =====
|
||||||
|
if (isstruct(s))
|
||||||
|
fields = fieldnames(s);
|
||||||
|
nField = length(fields);
|
||||||
|
|
||||||
|
% Detect structure that looks like Html table and store it in cell Matrix
|
||||||
|
if (nField==1 && strcmpi(fields{1},Pref.TableName{1}))
|
||||||
|
tr = s.(Pref.TableName{1});
|
||||||
|
fields2 = fieldnames(tr{1});
|
||||||
|
if (length(fields2)==1 && strcmpi(fields2{1},Pref.TableName{2}))
|
||||||
|
% This seems to be a special structure such that for
|
||||||
|
% Pref.TableName = {'tr','td'} 's' corresponds to
|
||||||
|
% <tr> <td>M11</td> <td>M12</td> </tr>
|
||||||
|
% <tr> <td>M12</td> <td>M22</td> </tr>
|
||||||
|
% Recognize it as encoding for 2D struct
|
||||||
|
nr = length(tr);
|
||||||
|
for r = 1:nr
|
||||||
|
row = tr{r}.(Pref.TableName{2});
|
||||||
|
Table(r,1:length(row)) = row; %#ok<AGROW>
|
||||||
|
end
|
||||||
|
s = Table;
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
% --- Post-processing: convert 'struct of cell-arrays' to 'array of structs'
|
||||||
|
% Example: let say s has 3 fields s.a, s.b & s.c and each field is an
|
||||||
|
% cell-array with more than one cell-element and all 3 have the same length.
|
||||||
|
% Then change it to array of structs, each with single cell.
|
||||||
|
% This way element s.a{1} will be now accessed through s(1).a
|
||||||
|
vec = zeros(size(fields));
|
||||||
|
for i=1:nField, vec(i) = f.(fields{i}); end
|
||||||
|
if (numel(vec)>1 && vec(1)>1 && var(vec)==0) % convert from struct of
|
||||||
|
s = cell2struct(struct2cell(s), fields, 1); % arrays to array of struct
|
||||||
|
end % if anyone knows better way to do above conversion please let me know.
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
%% === Step 5: Process nodes with attributes =============================
|
||||||
|
if (node.hasAttributes && Pref.ReadAttr)
|
||||||
|
if (~isstruct(s)), % make into struct if is not already
|
||||||
|
ss.CONTENT=s;
|
||||||
|
s=ss;
|
||||||
|
end
|
||||||
|
Attr = node.getAttributes; % list of all attributes
|
||||||
|
for iAttr = 1:Attr.getLength % for each attribute
|
||||||
|
name = char(Attr.item(iAttr-1).getName); % attribute name
|
||||||
|
name = str2varName(name, Pref.KeepNS); % fix name if needed
|
||||||
|
value = char(Attr.item(iAttr-1).getValue); % attribute value
|
||||||
|
value = str2var(value, Pref.Str2Num, 1); % convert to number if possible
|
||||||
|
s.ATTRIBUTE.(name) = value; % save again
|
||||||
|
end % end iAttr loop
|
||||||
|
end % done with attributes
|
||||||
|
if (~isstruct(s)), return; end %The rest of the code deals with struct's
|
||||||
|
|
||||||
|
%% === Post-processing: fields of "s"
|
||||||
|
% convert 'cell-array of structs' to 'arrays of structs'
|
||||||
|
fields = fieldnames(s); % get field names
|
||||||
|
nField = length(fields);
|
||||||
|
for iItem=1:length(s) % for each struct in the array - usually one
|
||||||
|
for iField=1:length(fields)
|
||||||
|
field = fields{iField}; % get field name
|
||||||
|
% if this is an 'item' field and user want to leave those as cells
|
||||||
|
% than skip this one
|
||||||
|
if (strcmpi(field, Pref.ItemName) && Pref.CellItem), continue; end
|
||||||
|
x = s(iItem).(field);
|
||||||
|
if (iscell(x) && all(cellfun(@isstruct,x(:))) && numel(x)>1) % it's cell-array of structs
|
||||||
|
% numel(x)>1 check is to keep 1 cell-arrays created when Pref.CellItem=1
|
||||||
|
try % this operation fails sometimes
|
||||||
|
% example: change s(1).a{1}.b='jack'; s(1).a{2}.b='john'; to
|
||||||
|
% more convinient s(1).a(1).b='jack'; s(1).a(2).b='john';
|
||||||
|
s(iItem).(field) = [x{:}]'; %#ok<AGROW> % converted to arrays of structs
|
||||||
|
catch %#ok<CTCH>
|
||||||
|
% above operation will fail if s(1).a{1} and s(1).a{2} have
|
||||||
|
% different fields. If desired, function forceCell2Struct can force
|
||||||
|
% them to the same field structure by adding empty fields.
|
||||||
|
if (Pref.NoCells)
|
||||||
|
s(iItem).(field) = forceCell2Struct(x); %#ok<AGROW>
|
||||||
|
end
|
||||||
|
end % end catch
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
%% === Step 4: Post-process struct's created for nodes with children =====
|
||||||
|
|
||||||
|
% --- Post-processing: remove special 'item' tags ---------------------
|
||||||
|
% many xml writes (including xml_write) use a special keyword to mark
|
||||||
|
% arrays of nodes (see xml_write for examples). The code below converts
|
||||||
|
% s.item to s.CONTENT
|
||||||
|
ItemContent = false;
|
||||||
|
if (isfield(s,Pref.ItemName))
|
||||||
|
s.CONTENT = s.(Pref.ItemName);
|
||||||
|
s = rmfield(s,Pref.ItemName);
|
||||||
|
ItemContent = Pref.CellItem; % if CellItem than keep s.CONTENT as cells
|
||||||
|
end
|
||||||
|
|
||||||
|
% --- Post-processing: clean up CONTENT tags ---------------------
|
||||||
|
% if s.CONTENT is a cell-array with empty elements at the end than trim
|
||||||
|
% the length of this cell-array. Also if s.CONTENT is the only field than
|
||||||
|
% remove .CONTENT part and store it as s.
|
||||||
|
if (isfield(s,'CONTENT'))
|
||||||
|
if (iscell(s.CONTENT) && isvector(s.CONTENT))
|
||||||
|
x = s.CONTENT;
|
||||||
|
for i=numel(x):-1:1, if ~isempty(x{i}), break; end; end
|
||||||
|
if (i==1 && ~ItemContent)
|
||||||
|
s.CONTENT = x{1}; % delete cell structure
|
||||||
|
else
|
||||||
|
s.CONTENT = x(1:i); % delete empty cells
|
||||||
|
end
|
||||||
|
end
|
||||||
|
if (nField==1)
|
||||||
|
if (ItemContent)
|
||||||
|
ss = s.CONTENT; % only child: remove a level but ensure output is a cell-array
|
||||||
|
s=[]; s{1}=ss;
|
||||||
|
else
|
||||||
|
s = s.CONTENT; % only child: remove a level
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
%% =======================================================================
|
||||||
|
% === forceCell2Struct Function =========================================
|
||||||
|
% =======================================================================
|
||||||
|
function s = forceCell2Struct(x)
|
||||||
|
% Convert cell-array of structs, where not all of structs have the same
|
||||||
|
% fields, to a single array of structs
|
||||||
|
|
||||||
|
%% Convert 1D cell array of structs to 2D cell array, where each row
|
||||||
|
% represents item in original array and each column corresponds to a unique
|
||||||
|
% field name. Array "AllFields" store fieldnames for each column
|
||||||
|
AllFields = fieldnames(x{1}); % get field names of the first struct
|
||||||
|
CellMat = cell(length(x), length(AllFields));
|
||||||
|
for iItem=1:length(x)
|
||||||
|
fields = fieldnames(x{iItem}); % get field names of the next struct
|
||||||
|
for iField=1:length(fields) % inspect all fieldnames and find those
|
||||||
|
field = fields{iField}; % get field name
|
||||||
|
col = find(strcmp(field,AllFields),1);
|
||||||
|
if isempty(col) % no column for such fieldname yet
|
||||||
|
AllFields = [AllFields; field]; %#ok<AGROW>
|
||||||
|
col = length(AllFields); % create a new column for it
|
||||||
|
end
|
||||||
|
CellMat{iItem,col} = x{iItem}.(field); % store rearanged data
|
||||||
|
end
|
||||||
|
end
|
||||||
|
%% Convert 2D cell array to array of structs
|
||||||
|
s = cell2struct(CellMat, AllFields, 2);
|
||||||
|
|
||||||
|
%% =======================================================================
|
||||||
|
% === str2var Function ==================================================
|
||||||
|
% =======================================================================
|
||||||
|
function val=str2var(str, option, attribute)
|
||||||
|
% Can this string 'str' be converted to a number? if so than do it.
|
||||||
|
val = str;
|
||||||
|
len = numel(str);
|
||||||
|
if (len==0 || option==0), return; end % Str2Num="never" of empty string -> do not do enything
|
||||||
|
if (len>10000 && option==1), return; end % Str2Num="smart" and string is very long -> probably base64 encoded binary
|
||||||
|
digits = '(Inf)|(NaN)|(pi)|[\t\n\d\+\-\*\.ei EI\[\]\;\,]';
|
||||||
|
s = regexprep(str, digits, ''); % remove all the digits and other allowed characters
|
||||||
|
if (~all(~isempty(s))) % if nothing left than this is probably a number
|
||||||
|
if (~isempty(strfind(str, ' '))), option=2; end %if str has white-spaces assume by default that it is not a date string
|
||||||
|
if (~isempty(strfind(str, '['))), option=2; end % same with brackets
|
||||||
|
str(strfind(str, '\n')) = ';';% parse data tables into 2D arrays, if any
|
||||||
|
if (option==1) % the 'smart' option
|
||||||
|
try % try to convert to a date, like 2007-12-05
|
||||||
|
datenum(str); % if successful than leave it as string
|
||||||
|
catch %#ok<CTCH> % if this is not a date than ...
|
||||||
|
option=2; % ... try converting to a number
|
||||||
|
end
|
||||||
|
end
|
||||||
|
if (option==2)
|
||||||
|
if (attribute)
|
||||||
|
num = str2double(str); % try converting to a single number using sscanf function
|
||||||
|
if isnan(num), return; end % So, it wasn't really a number after all
|
||||||
|
else
|
||||||
|
num = str2num(str); %#ok<ST2NM> % try converting to a single number or array using eval function
|
||||||
|
end
|
||||||
|
if(isnumeric(num) && numel(num)>0), val=num; end % if convertion to a single was succesful than save
|
||||||
|
end
|
||||||
|
elseif ((str(1)=='[' && str(end)==']') || (str(1)=='{' && str(end)=='}')) % this looks like a (cell) array encoded as a string
|
||||||
|
try
|
||||||
|
val = eval(str);
|
||||||
|
catch %#ok<CTCH>
|
||||||
|
val = str;
|
||||||
|
end
|
||||||
|
elseif (~attribute) % see if it is a boolean array with no [] brackets
|
||||||
|
str1 = lower(str);
|
||||||
|
str1 = strrep(str1, 'false', '0');
|
||||||
|
str1 = strrep(str1, 'true' , '1');
|
||||||
|
s = regexprep(str1, '[01 \;\,]', ''); % remove all 0/1, spaces, commas and semicolons
|
||||||
|
if (~all(~isempty(s))) % if nothing left than this is probably a boolean array
|
||||||
|
num = str2num(str1); %#ok<ST2NM>
|
||||||
|
if(isnumeric(num) && numel(num)>0), val = (num>0); end % if convertion was succesful than save as logical
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
|
||||||
|
%% =======================================================================
|
||||||
|
% === str2varName Function ==============================================
|
||||||
|
% =======================================================================
|
||||||
|
function str = str2varName(str, KeepNS)
|
||||||
|
% convert a sting to a valid matlab variable name
|
||||||
|
if(KeepNS)
|
||||||
|
str = regexprep(str,':','_COLON_', 'once', 'ignorecase');
|
||||||
|
else
|
||||||
|
k = strfind(str,':');
|
||||||
|
if (~isempty(k))
|
||||||
|
str = str(k+1:end);
|
||||||
|
end
|
||||||
|
end
|
||||||
|
str = regexprep(str,'-','_DASH_' ,'once', 'ignorecase');
|
||||||
|
if (~isvarname(str)) && (~iskeyword(str))
|
||||||
|
str = genvarname(str);
|
||||||
|
end
|
||||||
|
|
||||||
|
%% =======================================================================
|
||||||
|
% === NodeName Function =================================================
|
||||||
|
% =======================================================================
|
||||||
|
function [Name LeafNode] = NodeName(node, KeepNS)
|
||||||
|
% get node name and make sure it is a valid variable name in Matlab.
|
||||||
|
% also get node type:
|
||||||
|
% LeafNode=0 - normal element node,
|
||||||
|
% LeafNode=1 - text node
|
||||||
|
% LeafNode=2 - supported non-text leaf node,
|
||||||
|
% LeafNode=3 - supported processing instructions leaf node,
|
||||||
|
% LeafNode=-1 - unsupported non-text leaf node
|
||||||
|
switch (node.getNodeType)
|
||||||
|
case node.ELEMENT_NODE
|
||||||
|
Name = char(node.getNodeName);% capture name of the node
|
||||||
|
Name = str2varName(Name, KeepNS); % if Name is not a good variable name - fix it
|
||||||
|
LeafNode = 0;
|
||||||
|
case node.TEXT_NODE
|
||||||
|
Name = 'CONTENT';
|
||||||
|
LeafNode = 1;
|
||||||
|
case node.COMMENT_NODE
|
||||||
|
Name = 'COMMENT';
|
||||||
|
LeafNode = 2;
|
||||||
|
case node.CDATA_SECTION_NODE
|
||||||
|
Name = 'CDATA_SECTION';
|
||||||
|
LeafNode = 2;
|
||||||
|
case node.DOCUMENT_TYPE_NODE
|
||||||
|
Name = 'DOCUMENT_TYPE';
|
||||||
|
LeafNode = 2;
|
||||||
|
case node.PROCESSING_INSTRUCTION_NODE
|
||||||
|
Name = 'PROCESSING_INSTRUCTION';
|
||||||
|
LeafNode = 3;
|
||||||
|
otherwise
|
||||||
|
NodeType = {'ELEMENT','ATTRIBUTE','TEXT','CDATA_SECTION', ...
|
||||||
|
'ENTITY_REFERENCE', 'ENTITY', 'PROCESSING_INSTRUCTION', 'COMMENT',...
|
||||||
|
'DOCUMENT', 'DOCUMENT_TYPE', 'DOCUMENT_FRAGMENT', 'NOTATION'};
|
||||||
|
Name = char(node.getNodeName);% capture name of the node
|
||||||
|
warning('xml_io_tools:read:unkNode', ...
|
||||||
|
'Unknown node type encountered: %s_NODE (%s)', NodeType{node.getNodeType}, Name);
|
||||||
|
LeafNode = -1;
|
||||||
|
end
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,914 @@
|
||||||
|
%% Tutorial for xml_io_tools Package
|
||||||
|
% *By Jarek Tuszynski*
|
||||||
|
%
|
||||||
|
% Package xml_io_tools can read XML files into MATLAB struct and writes
|
||||||
|
% MATLAB data types to XML files with help of simple interface to
|
||||||
|
% MATLAB's xmlwrite and xmlread functions.
|
||||||
|
%
|
||||||
|
% Two function to simplify reading and writing XML files from MATLAB:
|
||||||
|
%
|
||||||
|
% * Function xml_read first calls MATLAB's xmlread function and than
|
||||||
|
% converts its output ('Document Object Model' tree of Java objects)
|
||||||
|
% to tree of MATLAB struct's. The output is in the format of nested
|
||||||
|
% structs and cells. In the output data structure field names are based on
|
||||||
|
% XML tags.
|
||||||
|
%
|
||||||
|
% * Function xml_write first convert input tree of MATLAB structs and cells
|
||||||
|
% and other types to tree of 'Document Object Model' nodes, and then writes
|
||||||
|
% resulting object to XML file using MATLAB's xmlwrite function. .
|
||||||
|
%
|
||||||
|
%% This package can:
|
||||||
|
% * Read most XML files, created inside and outside of MATLAB environment,
|
||||||
|
% and convert them to MATLAB data structures.
|
||||||
|
% * Write any MATLAB's struct tree to XML file
|
||||||
|
% * Handle XML attributes and special XML nodes like comments, processing
|
||||||
|
% instructions and CDATA sections
|
||||||
|
% * Supports base64 encoding and decoding to allow handling embeded binary
|
||||||
|
% data
|
||||||
|
% * Be studied, modified, customized, rewritten and used in other packages
|
||||||
|
% without any limitations. All code is included and documented. Software
|
||||||
|
% is distributed under BSD Licence (included).
|
||||||
|
%
|
||||||
|
%% This package does not:
|
||||||
|
% * Guarantee to recover the same Matlab objects that were saved. If you
|
||||||
|
% need to be able to recover carbon copy of the structure that was saved
|
||||||
|
% than you will have to use one of the packages that uses special set of
|
||||||
|
% tags saved as xml attributes that help to guide the parsing of XML code.
|
||||||
|
% This package does not use those tags.
|
||||||
|
% * Guarantee to work with older versions of MATLAB. Functions do not work
|
||||||
|
% with versions of MATLAB prior to 7.1 (26-Jul-2005).
|
||||||
|
%
|
||||||
|
%% Change History
|
||||||
|
% * 2006-11-06 - original version
|
||||||
|
% * 2006-11-26 - corrected xml_write to handle writing Matlab's column
|
||||||
|
% arrays to xml files. Bug discovered and diagnosed by Kalyan Dutta.
|
||||||
|
% * 2006-11-28 - made changes to handle special node types like:
|
||||||
|
% COMMENTS and CDATA sections
|
||||||
|
% * 2007-03-12 - Writing CDATA sections still did not worked. The problem
|
||||||
|
% was diagnosed and fixed by Alberto Amaro. The fix involved rewriting
|
||||||
|
% xmlwrite to use Apache Xerces java files directly instead of MATLAB's
|
||||||
|
% XMLUtils java class.
|
||||||
|
% * 2007-06-21 - Fixed problem reported by Anna Kelbert in Reviews about
|
||||||
|
% not writing attributes of ROOT node. Also: added support for Processing
|
||||||
|
% Instructions, added support for global text nodes: Processing
|
||||||
|
% Instructions and comments, allowed writing tag names with special
|
||||||
|
% characters
|
||||||
|
% * 2007-07-20 - Added tutorial script file. Extended support for global
|
||||||
|
% text nodes. Added more Preference fields.
|
||||||
|
% * 2008-01-23 - Fixed problem reported by Anna Krewet of converting dates
|
||||||
|
% in format '2007-01-01' to numbers. Improved and added warning messages.
|
||||||
|
% Added detection of old Matlab versions incompatible with the library.
|
||||||
|
% Expanded documentation.
|
||||||
|
% * 2008-06-23 - Fixed problem with writing 1D array reported by Mark Neil.
|
||||||
|
% Extended xml_read's Pref.Num2Str to 3 settings (never, smart and always)
|
||||||
|
% for better control. Added parameter Pref.KeepNS for keeping or ignoring
|
||||||
|
% namespace data when reading. Fixed a bug related to writing 2D cell
|
||||||
|
% arrays brought up by Andrej's Mosat review.
|
||||||
|
% * 2008-09-11 - Resubmitting last upload - zip file is still old
|
||||||
|
% * 2009-02-26 - Small changes. More error handling. More robust in case of
|
||||||
|
% large binary objects. Added support for Base64 encoding/decoding of
|
||||||
|
% binary objects (using functions by Peter J. Acklam).
|
||||||
|
% * 2009-06-26 - changes to xml_read: added CellItem parameter to allow
|
||||||
|
% better control of reading files with 'item' notation (see comment by
|
||||||
|
% Shlomi); changed try-catch statements so xml_read would work for mablab
|
||||||
|
% versions prior to 7.5 (see Thomas Pilutti comment)
|
||||||
|
% * 2009-12-03 - added PreserveSpace parameter for contolling empty string
|
||||||
|
% handling as suggested by Sebastiaan. Fix suggested by Michael Murphy.
|
||||||
|
% Fixed number recognition code as suggested by Yuan Ren.
|
||||||
|
% * 2010-05-04 - implemented fixes suggested by Dylan Reynolds from Airbus.
|
||||||
|
% * 2010-07-28 - implemented support for 2D arrays of cells and structs
|
||||||
|
% suggested by Rodney Behn from MIT Lincoln Laboratory. Also attempted
|
||||||
|
% large scale cleanup of xml_write function
|
||||||
|
% * 2010-08-18 - minor extension to allow better handling of logical
|
||||||
|
% scalars and arrays and function handles suggested by Andreas Richter
|
||||||
|
% and others
|
||||||
|
% * 2010-09-20 - allow reading and writing of sparse matrices. Improve
|
||||||
|
% reading of 1D boolean arrays.
|
||||||
|
% * 2010-11-05 - Fix problem with empty cells reported by Richard Cotton;
|
||||||
|
% fixed issues with reading boolean arrays reported by Zohar Bar-Yehuda;
|
||||||
|
% Improved speed of base64 coding and decoding by switching to java based
|
||||||
|
% code.
|
||||||
|
%% Licence
|
||||||
|
% The package is distributed under BSD License
|
||||||
|
format compact; % viewing preference
|
||||||
|
clear variables;
|
||||||
|
type('license.txt')
|
||||||
|
|
||||||
|
%% Write XML file based on a Struct using "xml_write"
|
||||||
|
% Any MATLAB data struct can be saved to XML file.
|
||||||
|
MyTree=[];
|
||||||
|
MyTree.MyNumber = 13;
|
||||||
|
MyTree.MyString = 'Hello World';
|
||||||
|
xml_write('test.xml', MyTree);
|
||||||
|
type('test.xml')
|
||||||
|
|
||||||
|
%% Read XML file producing a Struct using "xml_read"
|
||||||
|
[tree treeName] = xml_read ('test.xml');
|
||||||
|
disp([treeName{1} ' ='])
|
||||||
|
gen_object_display(tree)
|
||||||
|
|
||||||
|
%% "Pref.XmlEngine" flag in "xml_write"
|
||||||
|
% Occasionaly some operations are performed better by Apache Xerces XML
|
||||||
|
% engine than default xmlread function. That is why xml_write provide an
|
||||||
|
% option for choosing the underlaying xml engine. Code below performs the
|
||||||
|
% same operation as the previous section but using Apache Xerces XML engine.
|
||||||
|
% Notice that in this case name of root element
|
||||||
|
% was passed as variable and not extracted from the variable name.
|
||||||
|
Pref=[]; Pref.XmlEngine = 'Xerces'; % use Xerces xml generator directly
|
||||||
|
xml_write('test.xml', MyTree, 'TreeOfMine', Pref);
|
||||||
|
type('test.xml')
|
||||||
|
|
||||||
|
%% Writing Struct with different type MATLAB arrays
|
||||||
|
MyTree=[];
|
||||||
|
MyTree.Empty = []; % Empty variable
|
||||||
|
MyTree.Num_1x1 = 13; % simple scalar
|
||||||
|
MyTree.Vec_1x3 = [1 2 3]; % horizontal vector
|
||||||
|
MyTree.Vec_4x1 = [1; 2; 3; 4]; % vertical vector
|
||||||
|
MyTree.Mat_2x2 = [1, 2; 3, 4]; % 2D matrix
|
||||||
|
MyTree.Cube_3D = reshape(1:8,[2 2 2]); % 3D array
|
||||||
|
MyTree.String1 = '[2003 10 30]'; % number string with [] brackets
|
||||||
|
MyTree.String2 = ' 2003 10 30 '; % number string without [] brackets
|
||||||
|
MyTree.Logical_1x1 = false; % single logical
|
||||||
|
MyTree.Logical_2x2 = [false, true; true, false]; % 2D matrix of logicals
|
||||||
|
MyTree.Logical_Str = 'False False True True';
|
||||||
|
MyTree.Int_2x2 = uint8([1 2;3 4]); % 2D matrix of uint8 integers
|
||||||
|
MyTree.Complex_1x1 = complex(1, 7); % complex scalar
|
||||||
|
MyTree.Complex_2x2 = complex([1 2;3 4],[2 2;7 7]); % 2D matrix of complex numbers
|
||||||
|
MyTree.Sparse_9x9 = sparse(1:9,1:9,1); % sparse 9x9 matrix
|
||||||
|
MyTree.Function = @sum; % function handle
|
||||||
|
xml_write('test.xml', MyTree);
|
||||||
|
type('test.xml')
|
||||||
|
|
||||||
|
%% Read Struct with MATLAB arrays
|
||||||
|
% Notice that 'Cube_3D' did not preserve original dimentions
|
||||||
|
[tree treeName] = xml_read ('test.xml');
|
||||||
|
disp([treeName{1} ' ='])
|
||||||
|
gen_object_display(tree)
|
||||||
|
|
||||||
|
%% "Pref.StructItem" flag in "xml_write" (controls 1D arrays of structs)
|
||||||
|
% *Create a simple structure with 1D array of struct's*
|
||||||
|
MyTree = [];
|
||||||
|
MyTree.a(1).b = 'jack';
|
||||||
|
MyTree.a(2).b = 'john';
|
||||||
|
gen_object_display(MyTree)
|
||||||
|
%%
|
||||||
|
% *Write XML with "StructItem = true" (default). Notice single 'a'
|
||||||
|
% section and multiple 'item' sub-sections. Those subsections are used
|
||||||
|
% to store array elements*
|
||||||
|
wPref.StructItem = true;
|
||||||
|
xml_write('test.xml', MyTree, 'MyTree',wPref);
|
||||||
|
type('test.xml')
|
||||||
|
fprintf('\nxml_read output:\n')
|
||||||
|
gen_object_display(xml_read ('test.xml'))
|
||||||
|
%%
|
||||||
|
% *Write XML with "StructItem = false". Notice multiple 'a' sections*
|
||||||
|
wPref.StructItem = false;
|
||||||
|
xml_write('test.xml', MyTree, 'MyTree',wPref);
|
||||||
|
type('test.xml')
|
||||||
|
fprintf('\nxml_read output:\n')
|
||||||
|
gen_object_display(xml_read ('test.xml'))
|
||||||
|
%%
|
||||||
|
% *Notice that xml_read function produced the same struct when reading both files*
|
||||||
|
%%
|
||||||
|
% *Potential problems with "StructItem = true":*
|
||||||
|
wPref.StructItem = true;
|
||||||
|
MyTree1 = []; MyTree1.a.b = 'jack';
|
||||||
|
MyTree2 = []; MyTree2.a(1).b = 'jack';
|
||||||
|
MyTree3 = []; MyTree3.a(2).b = 'jack';
|
||||||
|
xml_write('test.xml', MyTree1, [], wPref); type('test.xml');
|
||||||
|
xml_write('test.xml', MyTree2, [], wPref); type('test.xml');
|
||||||
|
xml_write('test.xml', MyTree3, [], wPref); type('test.xml');
|
||||||
|
%%
|
||||||
|
% *Notice that MyTree1 and MyTree2 produce identical files with no 'items',
|
||||||
|
% while MyTree2 and MyTree3 produce very different file structures. It was
|
||||||
|
% pointed out to me that files produced from MyTree2 and MyTree3 can not
|
||||||
|
% belong to the same schema, which can be a problem. The solution is to use
|
||||||
|
% cells.*
|
||||||
|
wPref.CellItem = true;
|
||||||
|
wPref.NoCells = true;
|
||||||
|
MyTree2 = []; MyTree2.a{1}.b = 'jack';
|
||||||
|
MyTree3 = []; MyTree3.a{2}.b = 'jack';
|
||||||
|
xml_write('test.xml', MyTree2, [], wPref); type('test.xml');
|
||||||
|
xml_write('test.xml', MyTree3, [], wPref); type('test.xml');
|
||||||
|
|
||||||
|
|
||||||
|
%% "Pref.CellItem" flag in "xml_write" (controls 1D arrays of cells)
|
||||||
|
% *Create a simple structure with cell arrays*
|
||||||
|
MyTree = [];
|
||||||
|
MyTree.a = {'jack', 'john'};
|
||||||
|
disp(MyTree)
|
||||||
|
%%
|
||||||
|
% *Write XML with "CellItem = true" (default). Notice single 'a'
|
||||||
|
% section and multiple 'item' sections*
|
||||||
|
Pref=[]; Pref.CellItem = true;
|
||||||
|
xml_write('test.xml', MyTree, 'MyTree',Pref);
|
||||||
|
type('test.xml')
|
||||||
|
fprintf('\nxml_read output:\n');
|
||||||
|
disp(xml_read ('test.xml'))
|
||||||
|
%%
|
||||||
|
% *Write XML with "CellItem = false". Notice multiple 'a' sections*
|
||||||
|
Pref=[]; Pref.CellItem = false;
|
||||||
|
xml_write('test.xml', MyTree, 'MyTree',Pref);
|
||||||
|
type('test.xml')
|
||||||
|
fprintf('\nxml_read output:\n');
|
||||||
|
disp(xml_read ('test.xml'))
|
||||||
|
%%
|
||||||
|
% *Notice that xml_read function produced the same struct when reading both files*
|
||||||
|
|
||||||
|
%% "Pref.NoCells" flag in "xml_read"
|
||||||
|
% *Create a cell/struct mixture object*
|
||||||
|
MyTree = [];
|
||||||
|
MyTree.a{1}.b = 'jack';
|
||||||
|
MyTree.a{2}.b = [];
|
||||||
|
MyTree.a{2}.c = 'john';
|
||||||
|
gen_object_display(MyTree);
|
||||||
|
%%
|
||||||
|
% *Save it to xml file*
|
||||||
|
Pref=[]; Pref.CellItem = false;
|
||||||
|
xml_write('test.xml', MyTree, 'MyTree',Pref);
|
||||||
|
type('test.xml')
|
||||||
|
%%
|
||||||
|
% *Read above file with "Pref.NoCells=true" (default) - output is quite different then input*
|
||||||
|
% By default program is trying to convert everything to struct's and arrays
|
||||||
|
% of structs. In case arrays of structs all the structs in array need to have the
|
||||||
|
% same fields, and if they are not than MATLAB creates empty fields.
|
||||||
|
Pref=[]; Pref.NoCells=true;
|
||||||
|
gen_object_display(xml_read('test.xml', Pref))
|
||||||
|
%%
|
||||||
|
% *Read above file with "Pref.NoCells=false" - now input and output are the same*
|
||||||
|
% Cell arrays of structs allow structs in array to have different fields.
|
||||||
|
Pref=[]; Pref.NoCells=false;
|
||||||
|
gen_object_display(xml_read('test.xml', Pref))
|
||||||
|
|
||||||
|
%% "Pref.ItemName" flag in "xml_write" (customize 1D arrays of structs and cells)
|
||||||
|
% *Create a cell/struct mixture object*
|
||||||
|
MyTree = [];
|
||||||
|
MyTree.a{1}.b = 'jack';
|
||||||
|
MyTree.a{2}.c = 'john';
|
||||||
|
gen_object_display(MyTree);
|
||||||
|
%%
|
||||||
|
% *Save it to xml file, using 'item' notation but with different name*
|
||||||
|
Pref=[];
|
||||||
|
Pref.CellItem = true;
|
||||||
|
Pref.ItemName = 'MyItem';
|
||||||
|
xml_write('test.xml', MyTree, 'MyTree',Pref);
|
||||||
|
type('test.xml')
|
||||||
|
|
||||||
|
%% "Pref.ItemName" flag in "xml_read"
|
||||||
|
% *Read above file with default settings ("Pref.ItemName = 'item'")*
|
||||||
|
% The results do not match the original structure
|
||||||
|
Pref=[]; Pref.NoCells = false;
|
||||||
|
gen_object_display(xml_read('test.xml', Pref))
|
||||||
|
%%
|
||||||
|
% *Read above file with "Pref.ItemName = 'MyItem'" - now saved and read
|
||||||
|
% MATLAB structures are the same*
|
||||||
|
Pref=[];
|
||||||
|
Pref.ItemName = 'MyItem';
|
||||||
|
Pref.NoCells = false;
|
||||||
|
gen_object_display(xml_read('test.xml', Pref))
|
||||||
|
|
||||||
|
%% "Pref.CellItem" flag in "xml_read"
|
||||||
|
% "Pref.ItemName" is used to create xml files with clearly marked arrays
|
||||||
|
% "Pref.CellItem" flag in "xml_read" ensures that they are always read as
|
||||||
|
% arrays by forcing output to stay in cell format. In cell format s{1} is
|
||||||
|
% different than s, while s(1) is indistinguishable from s.
|
||||||
|
%%
|
||||||
|
% *Create a test file*
|
||||||
|
MyTree = [];
|
||||||
|
MyTree.a1{1}.b = 'jack'; % a1 - single struct
|
||||||
|
MyTree.a2{1}.b = 'jack'; % a2 - cell array of structs with the same fields
|
||||||
|
MyTree.a2{2}.b = 'john';
|
||||||
|
MyTree.a3{1}.b = 'jack'; % a3 - cell array of structs with the different fields
|
||||||
|
MyTree.a3{2}.c = 'john';
|
||||||
|
Pref=[];
|
||||||
|
Pref.CellItem = true;
|
||||||
|
Pref.Debug = true;
|
||||||
|
xml_write('test.xml', MyTree, 'MyTree',Pref);
|
||||||
|
type('test.xml')
|
||||||
|
%%
|
||||||
|
% *Read above file with "Pref.CellItem = true" (default)*
|
||||||
|
% All outputs are in cell format
|
||||||
|
Pref=[];
|
||||||
|
Pref.NoCells = false; % allow cell output
|
||||||
|
Pref.CellItem = true; % keep 'item' arrays as cells
|
||||||
|
gen_object_display(xml_read('test.xml', Pref))
|
||||||
|
%%
|
||||||
|
% *Read above file with "Pref.CellItem = false"*
|
||||||
|
% Outputs format is determined by content
|
||||||
|
Pref=[];
|
||||||
|
Pref.NoCells = false; % allow cell output
|
||||||
|
Pref.CellItem = false; % allow 'item' arrays to beheave like other fields
|
||||||
|
gen_object_display(xml_read('test.xml', Pref))
|
||||||
|
%%
|
||||||
|
% *Read above file with "Pref.CellItem = false" and "Pref.NoCells = true"*
|
||||||
|
% All outputs are in struct format
|
||||||
|
Pref=[];
|
||||||
|
Pref.NoCells = true; % don't allow cell output
|
||||||
|
Pref.CellItem = false; % allow 'item' arrays to beheave like other fields
|
||||||
|
gen_object_display(xml_read('test.xml', Pref))
|
||||||
|
|
||||||
|
%% "Pref.CellTable" flag in "xml_write" (controls 2D arrays of cells)
|
||||||
|
% *Create a structure with 2D arrays of cells*
|
||||||
|
MyTree = [];
|
||||||
|
MyTree.M = {[1,2;3,4], 'M12'; struct('a','jack'), {11, 'N12'; 21, 'N22'}};
|
||||||
|
gen_object_display(MyTree)
|
||||||
|
%%
|
||||||
|
% *Write XML with "CellTable = 'Html" (default). This option mimics use of
|
||||||
|
% HTML "tr" and "td" tags to encode 2D tables. Tag names can
|
||||||
|
% be changed using TableName parameter (see below)*
|
||||||
|
wPref = [];
|
||||||
|
wPref.CellTable = 'Html';
|
||||||
|
xml_write('test.xml', MyTree, 'MyTree',wPref);
|
||||||
|
type('test.xml')
|
||||||
|
fprintf('\nxml_read output:\n')
|
||||||
|
rPref=[]; rPref.NoCells=false;
|
||||||
|
gen_object_display(xml_read('test.xml', rPref))
|
||||||
|
%%
|
||||||
|
% *Write XML with "CellTable = 'Vector'".*
|
||||||
|
% Converts 2D arrays to 1D array and item or regular notation. This option
|
||||||
|
% is mostly provided for backward compatibility since this was the
|
||||||
|
% behavior in prior verions of the code
|
||||||
|
wPref = [];
|
||||||
|
wPref.CellTable = 'Vector';
|
||||||
|
xml_write('test.xml', MyTree, 'MyTree',wPref);
|
||||||
|
type('test.xml')
|
||||||
|
fprintf('\nxml_read output:\n')
|
||||||
|
rPref=[]; rPref.NoCells=false;
|
||||||
|
gen_object_display(xml_read('test.xml', rPref))
|
||||||
|
%%
|
||||||
|
% *Create a simpler structure without struct's*
|
||||||
|
MyTree = [];
|
||||||
|
MyTree.M = {[1,2;3,4], 'M12'; 'M21', {11, 'N12'; 21, 'N22'}};
|
||||||
|
gen_object_display(MyTree)
|
||||||
|
%%
|
||||||
|
% *Write XML with "CellTable = 'Matlab". This option encodes tables
|
||||||
|
% consisting of numbers, strings and other cell arrays as MATLAB command
|
||||||
|
% string. Unlike 'Html' option it does not work if one of the cells is
|
||||||
|
% a struct*
|
||||||
|
wPref = [];
|
||||||
|
wPref.CellTable = 'Matlab';
|
||||||
|
xml_write('test.xml', MyTree, 'MyTree',wPref);
|
||||||
|
type('test.xml')
|
||||||
|
fprintf('\nxml_read output:\n')
|
||||||
|
rPref=[]; rPref.NoCells=false;
|
||||||
|
gen_object_display(xml_read('test.xml', rPref))
|
||||||
|
|
||||||
|
%% Write 2D cell array in HTML format
|
||||||
|
MyTree = [];
|
||||||
|
MyTree.table.ATTRIBUTE.border=1;
|
||||||
|
MyTree.table.CONTENT = {'Apples', '44%'; 'Bannanas', '23%'; 'Oranges', '13%'; 'Other', '10%'};
|
||||||
|
xml_write('html/test.html', MyTree);
|
||||||
|
type('html/test.html')
|
||||||
|
%%
|
||||||
|
% Click on <test.html> to opened this file with a web brouwser
|
||||||
|
|
||||||
|
%% "Pref.StructTable" flag in "xml_write" (controls 2D arrays of structs)
|
||||||
|
% *Create a simple structure with arrays of struct's*
|
||||||
|
MyTree = [];
|
||||||
|
MyTree.a(1,1).b = 'jack';
|
||||||
|
MyTree.a(1,2).b = 'john';
|
||||||
|
MyTree.a(2,1).b = 'jim';
|
||||||
|
MyTree.a(2,2).b = 'jill';
|
||||||
|
gen_object_display(MyTree)
|
||||||
|
%%
|
||||||
|
% *Write XML with "StructTable = 'Html" (default). This option mimics use of
|
||||||
|
% HTML "tr" and "td" tags to encode 2D tables. Tag names can
|
||||||
|
% be changed using TableName parameter (see below)*
|
||||||
|
wPref = [];
|
||||||
|
wPref.StructTable = 'Html';
|
||||||
|
xml_write('test.xml', MyTree, 'MyTree',wPref);
|
||||||
|
type('test.xml')
|
||||||
|
fprintf('\nxml_read output:\n')
|
||||||
|
gen_object_display(xml_read ('test.xml'))
|
||||||
|
%%
|
||||||
|
% *Write XML with "CellTable = 'Vector'".*
|
||||||
|
% Converts 2D arrays to 1D array and item or regular notation. This option
|
||||||
|
% is mostly provided for backward compatibility since this was the
|
||||||
|
% behavior in prior verions of the code
|
||||||
|
wPref = [];
|
||||||
|
wPref.StructTable = 'Vector';
|
||||||
|
xml_write('test.xml', MyTree, 'MyTree',wPref);
|
||||||
|
type('test.xml')
|
||||||
|
fprintf('\nxml_read output:\n')
|
||||||
|
gen_object_display(xml_read ('test.xml'))
|
||||||
|
|
||||||
|
%% "Pref.TableName" flag in "xml_write" (controls encoding tags used for 2D arrays)
|
||||||
|
% *Create a cell object*
|
||||||
|
MyTree = [];
|
||||||
|
MyTree.M = {[1,2;3,4], 'M12'; 21, {11, 'N12'; 21, 'N22'}};
|
||||||
|
gen_object_display(MyTree);
|
||||||
|
%%
|
||||||
|
% *Save it to xml file, using 'Html' notation but with different names for
|
||||||
|
% rows and cells*
|
||||||
|
Pref=[]; Pref.TableName = {'row','cell'};
|
||||||
|
xml_write('test.xml', MyTree, 'MyTree',Pref);
|
||||||
|
type('test.xml')
|
||||||
|
|
||||||
|
%% "Pref.TableName" flag in "xml_read"
|
||||||
|
% *Read above file with default settings ("Pref.TableName = {'tr','td'}")*
|
||||||
|
% The results do not match the original structure
|
||||||
|
Pref=[]; Pref.NoCells = false;
|
||||||
|
gen_object_display(xml_read('test.xml', Pref))
|
||||||
|
%%
|
||||||
|
% *Read above file with "Pref.TableName = {'row','cell'}" - now saved and read
|
||||||
|
% MATLAB structures are the same*
|
||||||
|
Pref=[];
|
||||||
|
Pref.TableName = {'row','cell'};
|
||||||
|
Pref.NoCells = false;
|
||||||
|
gen_object_display(xml_read('test.xml', Pref))
|
||||||
|
|
||||||
|
%% "Pref.Str2Num" flag in xml_read (control conversion to numbers while reading)
|
||||||
|
% *Create a cell/struct mixture object*
|
||||||
|
MyTree = [];
|
||||||
|
MyTree.str = 'sphere';
|
||||||
|
MyTree.num1 = 123;
|
||||||
|
MyTree.num2 = '123';
|
||||||
|
MyTree.num3 = '[Inf,NaN]';
|
||||||
|
MyTree.calc = '1+2+3+4';
|
||||||
|
MyTree.func = 'sin(pi)/2';
|
||||||
|
MyTree.String1 = '[2003 10 30]';
|
||||||
|
MyTree.String2 = '2003 10 30'; % array resembling date
|
||||||
|
MyTree.ISO8601 = '2003-10-30'; % date in ISO 8601 format
|
||||||
|
MyTree.US_date = '2003/10/30'; % US style date format
|
||||||
|
MyTree.complex = '2003i-10e-30'; % complex number resembling a date
|
||||||
|
gen_object_display(MyTree);
|
||||||
|
%%
|
||||||
|
% *Save it to xml file*
|
||||||
|
xml_write('test.xml', MyTree);
|
||||||
|
type('test.xml')
|
||||||
|
%%
|
||||||
|
% *Read above file with default settings*
|
||||||
|
% ("Pref.Str2Num = true" or "Pref.Str2Num = 'smart'"). Under this setting all
|
||||||
|
% strings that look like numbers are converted to numbers, except for
|
||||||
|
% strings that are recognized by MATLAB 'datenum' function as dates
|
||||||
|
gen_object_display(xml_read('test.xml'))
|
||||||
|
%%
|
||||||
|
% *Note that all the fields of 'MyTree' can be converted to numbers (even
|
||||||
|
% 'sphere') but by default the function is trying to 'judge' if a string
|
||||||
|
% should be converted to a number or not*
|
||||||
|
MyCell = {'sphere','1+2+3+4','sin(pi)/2','2003 10 30','2003-10-30','2003/10/30','2003i-10e-30'};
|
||||||
|
cellfun(@str2num, MyCell, 'UniformOutput', false)
|
||||||
|
%%
|
||||||
|
% *Read above file with "Pref.Str2Num = false" or "Pref.Str2Num = 'never'"
|
||||||
|
% to keep all the fields in string format*
|
||||||
|
Pref=[]; Pref.Str2Num = false;
|
||||||
|
gen_object_display(xml_read('test.xml', Pref))
|
||||||
|
%%
|
||||||
|
% *Read above file with "Pref.Str2Num = always"
|
||||||
|
% to convert all strings that look like numbers to numbers* note the likelly
|
||||||
|
% unintendet conversion of 'ISO8601'
|
||||||
|
Pref=[]; Pref.Str2Num = 'always';
|
||||||
|
gen_object_display(xml_read('test.xml', Pref))
|
||||||
|
%%
|
||||||
|
% *Notice that all three settings will produce the same output for "num1" and
|
||||||
|
% "num2" and there is no way to reproduce the original "MyTree" structure.*
|
||||||
|
|
||||||
|
%% "Pref.PreserveSpace" flag in xml_write (control handling of strings with leading/trailing spaces)
|
||||||
|
% *Create a struct with strings*
|
||||||
|
MyTree=[];
|
||||||
|
MyTree.Empty = '';
|
||||||
|
MyTree.OneSpace = ' ';
|
||||||
|
MyTree.TwoSpaces = ' ';
|
||||||
|
MyTree.String1 = ' Hello World ';
|
||||||
|
%%
|
||||||
|
% *Write XML with "PreserveSpace = false" (default).*
|
||||||
|
Pref=[]; Pref.PreserveSpace = false; % (default setting)
|
||||||
|
xml_write('test.xml', MyTree, [], Pref);
|
||||||
|
type('test.xml')
|
||||||
|
%%
|
||||||
|
% *Write XML with "PreserveSpace = true".*
|
||||||
|
Pref=[]; Pref.PreserveSpace = true;
|
||||||
|
xml_write('test.xml', MyTree, [], Pref);
|
||||||
|
type('test.xml')
|
||||||
|
|
||||||
|
%% "Pref.PreserveSpace" flag in xml_read
|
||||||
|
% *Read file while using "PreserveSpace = false" (default).*
|
||||||
|
Pref=[]; Pref.PreserveSpace = false; % (default setting)
|
||||||
|
gen_object_display(xml_read('test.xml',Pref))
|
||||||
|
%%
|
||||||
|
% *Read file while using "PreserveSpace = true".*
|
||||||
|
Pref=[]; Pref.PreserveSpace = true;
|
||||||
|
gen_object_display(xml_read('test.xml',Pref))
|
||||||
|
|
||||||
|
|
||||||
|
%% Write XML files with ATTRIBUTEs
|
||||||
|
% In order to add node attributes a special ATTRIBUTE field is used.
|
||||||
|
% ATTRIBUTEs have to be of simple types like numbers or strings (not
|
||||||
|
% struct or cells). Attributes are easy to attach to structs nodes like
|
||||||
|
% MyTree below.
|
||||||
|
MyTree=[];
|
||||||
|
MyTree.MyNumber = 13;
|
||||||
|
MyTree.MyString = 'Hello World'; % simple case
|
||||||
|
MyTree.ATTRIBUTE.Num = 2;
|
||||||
|
xml_write('test.xml', MyTree);
|
||||||
|
type('test.xml')
|
||||||
|
|
||||||
|
%%
|
||||||
|
% In case when one needs to attach attributes to nodes which are not
|
||||||
|
% structs (for example strings, numbers or calls) then special CONTENT
|
||||||
|
% field needs to be used to make the node a struct node.
|
||||||
|
MyTree=[];
|
||||||
|
MyTree.MyNumber = 13;
|
||||||
|
MyTree.MyString.CONTENT = 'Hello World'; % simple case
|
||||||
|
MyTree.MyString.ATTRIBUTE.Num = 2;
|
||||||
|
xml_write('test.xml', MyTree);
|
||||||
|
type('test.xml')
|
||||||
|
|
||||||
|
%% "Pref.Str2Num" flag in file with ATTRIBUTEs
|
||||||
|
% *Create a cell/struct mixture object*
|
||||||
|
MyTree = [];
|
||||||
|
MyTree.X.ATTRIBUTE.str = 'sphere';
|
||||||
|
MyTree.X.ATTRIBUTE.num1 = 123;
|
||||||
|
MyTree.X.ATTRIBUTE.num2 = '123';
|
||||||
|
MyTree.X.ATTRIBUTE.num3 = '[Inf,NaN]';
|
||||||
|
MyTree.X.ATTRIBUTE.calc = '1+2+3+4';
|
||||||
|
MyTree.X.ATTRIBUTE.func = 'sin(pi)/2';
|
||||||
|
MyTree.X.ATTRIBUTE.String1 = '[2003 10 30]';
|
||||||
|
MyTree.X.ATTRIBUTE.String2 = '2003 10 30'; % array resembling date
|
||||||
|
MyTree.X.ATTRIBUTE.ISO8601 = '2003-10-30'; % date in ISO 8601 format
|
||||||
|
MyTree.X.ATTRIBUTE.US_date = '2003/10/30'; % US style date format
|
||||||
|
MyTree.X.ATTRIBUTE.complex = '2003i-10e-30'; % complex number resembling a date
|
||||||
|
gen_object_display(MyTree);
|
||||||
|
%%
|
||||||
|
% *Save it to xml file*
|
||||||
|
xml_write('test.xml', MyTree);
|
||||||
|
type('test.xml')
|
||||||
|
%%
|
||||||
|
% *Read above file with default settings*
|
||||||
|
% ("Pref.Str2Num = true" or "Pref.Str2Num = 'smart'"). Under this setting all
|
||||||
|
% strings that look like numbers are converted to numbers, except for
|
||||||
|
% strings that are recognized by MATLAB 'datenum' function as dates
|
||||||
|
gen_object_display(xml_read('test.xml'))
|
||||||
|
|
||||||
|
%%
|
||||||
|
% *Read above file with "Pref.Str2Num = false" or "Pref.Str2Num = 'never'"
|
||||||
|
% to keep all the fields in string format*
|
||||||
|
Pref=[]; Pref.Str2Num = false;
|
||||||
|
gen_object_display(xml_read('test.xml', Pref))
|
||||||
|
%%
|
||||||
|
% *Read above file with "Pref.Str2Num = always"
|
||||||
|
% to convert all strings that look like numbers to numbers*
|
||||||
|
Pref=[]; Pref.Str2Num = 'always';
|
||||||
|
gen_object_display(xml_read('test.xml', Pref))
|
||||||
|
%%
|
||||||
|
% *Notice that all three settings will produce the same output for "num1" and
|
||||||
|
% "num2" and there is no way to reproduce the original "MyTree" structure.*
|
||||||
|
|
||||||
|
|
||||||
|
%% Write XML files with COMMENTs
|
||||||
|
% Insertion of Comments is done with help of special COMMENT field.
|
||||||
|
% Note that MATLAB's xmlwrite is less readable due to lack of end-of-line
|
||||||
|
% characters around comment section.
|
||||||
|
MyTree=[];
|
||||||
|
MyTree.COMMENT = 'This is a comment';
|
||||||
|
MyTree.MyNumber = 13;
|
||||||
|
MyTree.MyString.CONTENT = 'Hello World';
|
||||||
|
xml_write('test.xml', MyTree);
|
||||||
|
type('test.xml')
|
||||||
|
|
||||||
|
%%
|
||||||
|
% *Same operation using Apache Xerces XML engine*
|
||||||
|
% gives the same result
|
||||||
|
Pref=[]; Pref.XmlEngine = 'Xerces'; % use Xerces xml generator directly
|
||||||
|
xml_write('test.xml', MyTree, 'MyTree', Pref);
|
||||||
|
type('test.xml')
|
||||||
|
|
||||||
|
%%
|
||||||
|
% *Comments in XML top level (method #1)*
|
||||||
|
% This method uses cell array
|
||||||
|
MyTree=[];
|
||||||
|
MyTree.MyNumber = 13;
|
||||||
|
MyTree.MyString = 'Hello World';
|
||||||
|
xml_write('test.xml', MyTree, {'MyTree', [], 'This is a global comment'});
|
||||||
|
type('test.xml')
|
||||||
|
%%
|
||||||
|
% *Same operation using Apache Xerces XML engine*
|
||||||
|
% gives even nicer results.
|
||||||
|
Pref=[]; Pref.XmlEngine = 'Xerces'; % use Xerces xml generator directly
|
||||||
|
xml_write('test.xml', MyTree, {'MyTree', [], 'This is a global comment'}, Pref);
|
||||||
|
type('test.xml')
|
||||||
|
|
||||||
|
%%
|
||||||
|
% *Comments in XML top level (method #2)*
|
||||||
|
% This method adds an extra top layer to the struct 'tree' and sets
|
||||||
|
% "Pref.RootOnly = false", which informs the function about the extra
|
||||||
|
% layer. Notice that RootName is also saved as a part of
|
||||||
|
% the 'tree', and does not have to be passed in separately.
|
||||||
|
MyTree=[];
|
||||||
|
MyTree.COMMENT = 'This is a global comment';
|
||||||
|
MyTree.MyTest.MyNumber = 13;
|
||||||
|
MyTree.MyTest.MyString = 'Hello World';
|
||||||
|
Pref=[]; Pref.RootOnly = false;
|
||||||
|
xml_write('test.xml', MyTree, [], Pref);
|
||||||
|
type('test.xml')
|
||||||
|
%%
|
||||||
|
% *Same operation using Apache Xerces XML engine*
|
||||||
|
Pref=[]; Pref.XmlEngine = 'Xerces'; % use Xerces xml generator directly
|
||||||
|
Pref.RootOnly = false;
|
||||||
|
xml_write('test.xml', MyTree, [], Pref);
|
||||||
|
type('test.xml')
|
||||||
|
|
||||||
|
%% Write XML files with PROCESSING_INSTRUCTIONs
|
||||||
|
% Insertion of Processing Instructions is done through use of special
|
||||||
|
% PROCESSING_INSTRUCTION field, which stores the instruction string. The
|
||||||
|
% string has to be in 'target data' format separated by space.
|
||||||
|
MyTree=[];
|
||||||
|
MyTree.PROCESSING_INSTRUCTION = 'xml-stylesheet type="a" href="foo"';
|
||||||
|
MyTree.MyNumber = 13;
|
||||||
|
MyTree.MyString = 'Hello World';
|
||||||
|
xml_write('test.xml', MyTree);
|
||||||
|
type('test.xml')
|
||||||
|
|
||||||
|
%%
|
||||||
|
% *Same operation using Apache Xerces XML engine*
|
||||||
|
Pref=[]; Pref.XmlEngine = 'Xerces'; % use Xerces xml generator directly
|
||||||
|
xml_write('test.xml', MyTree, 'MyTree', Pref);
|
||||||
|
type('test.xml')
|
||||||
|
|
||||||
|
%%
|
||||||
|
% *PROCESSING_INSTRUCTIONs in XML top level (method #1)*
|
||||||
|
% This method uses cell array
|
||||||
|
MyTree=[];
|
||||||
|
MyTree.MyNumber = 13;
|
||||||
|
MyTree.MyString = 'Hello World';
|
||||||
|
xml_write('test.xml', MyTree, {'MyTree', 'xml-stylesheet type="a" href="foo"'});
|
||||||
|
type('test.xml')
|
||||||
|
%%
|
||||||
|
% *Same operation using Apache Xerces XML engine*
|
||||||
|
Pref=[]; Pref.XmlEngine = 'Xerces'; % use Xerces xml generator directly
|
||||||
|
xml_write('test.xml', MyTree, {'MyTree', 'xml-stylesheet type="a" href="foo"'}, Pref);
|
||||||
|
type('test.xml')
|
||||||
|
|
||||||
|
%%
|
||||||
|
% *PROCESSING_INSTRUCTIONs in XML top level (method #2)*
|
||||||
|
% This method adds an extra top layer to the struct 'tree' and sets
|
||||||
|
% pref.RootOnly=false, which informs the function about the extra
|
||||||
|
% layer. Notice that RootName is also saved as a part of
|
||||||
|
% the 'tree', and does not have to be passed in separately.
|
||||||
|
MyTree=[];
|
||||||
|
MyTree.PROCESSING_INSTRUCTION = 'xml-stylesheet type="a" href="foo"';
|
||||||
|
MyTree.MyTest.MyNumber = 13;
|
||||||
|
MyTree.MyTest.MyString = 'Hello World';
|
||||||
|
Pref=[]; Pref.RootOnly = false;
|
||||||
|
xml_write('test.xml', MyTree, [], Pref);
|
||||||
|
type('test.xml')
|
||||||
|
%%
|
||||||
|
% *Same operation using Apache Xerces XML engine*
|
||||||
|
Pref=[]; Pref.XmlEngine = 'Xerces'; % use Xerces xml generator directly
|
||||||
|
Pref.RootOnly = false;
|
||||||
|
xml_write('test.xml', MyTree, 'MyTree', Pref);
|
||||||
|
type('test.xml')
|
||||||
|
|
||||||
|
%% Write XML files with CDATA Sections
|
||||||
|
% "In an XML document a CDATA (Character DATA) section is a section of
|
||||||
|
% element content that is marked for the parser to interpret as only
|
||||||
|
% character data, not markup." (from Wikipedia)
|
||||||
|
% To insert CDATA Sections one use special CDATA_SECTION field,
|
||||||
|
% which stores the instruction string. Note that MATLAB's xmlwrite created
|
||||||
|
% wrong xml code for CDATA section
|
||||||
|
MyTree=[];
|
||||||
|
MyTree.CDATA_SECTION = '<A>txt</A>';
|
||||||
|
MyTree.MyNumber = 13;
|
||||||
|
MyTree.MyString = 'Hello World';
|
||||||
|
xml_write('test.xml', MyTree);
|
||||||
|
type('test.xml')
|
||||||
|
%%
|
||||||
|
% *Same operation using Apache Xerces XML engine produces correct results*
|
||||||
|
Pref=[]; Pref.XmlEngine = 'Xerces'; % use Xerces xml generator directly
|
||||||
|
xml_write('test.xml', MyTree, 'MyTree', Pref);
|
||||||
|
type('test.xml')
|
||||||
|
|
||||||
|
%% Write XML files with special characters in TAG names
|
||||||
|
% The input to xml_write requires that all tags one wants in XML document
|
||||||
|
% have to be encoded as field names of MATLAB's struct's. Matlab has a lot
|
||||||
|
% of restrictions on variable names. This section is about XML tags with
|
||||||
|
% names not allowed as MATLAB variables, or more specifically with
|
||||||
|
% characters allowed as xml tag names but not allowed as MATLAB variable
|
||||||
|
% names. Characters like that can be replaced by their hexadecimal
|
||||||
|
% representation just as it is done by genvarname function. Alternative way
|
||||||
|
% of writing the first example is:
|
||||||
|
MyTree=[];
|
||||||
|
MyTree.('MyNumber') = 13; % same as MyTree.MyNumber = 13;
|
||||||
|
MyTree.MyString.CONTENT = 'Hello World';
|
||||||
|
MyTree.MyString.ATTRIBUTE.('Num') = 2; % same as MyTree.MyString.ATTRIBUTE.Num = 2;
|
||||||
|
xml_write('test.xml', MyTree);
|
||||||
|
type('test.xml')
|
||||||
|
|
||||||
|
%%
|
||||||
|
% *This approach fails for some characters like dash '-', colon ':', and
|
||||||
|
% international characters.*
|
||||||
|
MyTree=[];
|
||||||
|
try
|
||||||
|
MyTree.('My-Number') = 13;
|
||||||
|
MyTree.MyString.CONTENT = 'Hello World';
|
||||||
|
MyTree.MyString.ATTRIBUTE.('Num_ö') = 2;
|
||||||
|
catch %#ok<CTCH>
|
||||||
|
err = lasterror; %#ok<LERR>
|
||||||
|
disp(err.message);
|
||||||
|
end
|
||||||
|
|
||||||
|
%%
|
||||||
|
% It can be overcome by replacing offending characters with their
|
||||||
|
% hexadecimal representation. That can be done manually or with use of
|
||||||
|
% genvarname function. Note that MATLAB 'type' function does not show
|
||||||
|
% correctly 'ö' letter in xml file, but opening the file in editor shows
|
||||||
|
% that it is correct.
|
||||||
|
MyTree=[];
|
||||||
|
MyTree.(genvarname('My-Number')) = 13;
|
||||||
|
MyTree.MyString.CONTENT = 'Hello World';
|
||||||
|
MyTree.MyString.ATTRIBUTE.Num_0xF6 = 2;
|
||||||
|
gen_object_display(MyTree);
|
||||||
|
xml_write('test.xml', MyTree);
|
||||||
|
type('test.xml')
|
||||||
|
|
||||||
|
%%
|
||||||
|
% *Also two of the characters '-' and ':' can be encoded by a special strings:
|
||||||
|
% '_DASH_' and '_COLON_' respectively*
|
||||||
|
MyTree=[];
|
||||||
|
MyTree.My_DASH_Number = 13;
|
||||||
|
MyTree.MyString.CONTENT = 'Hello World';
|
||||||
|
MyTree.MyString.ATTRIBUTE.Num0xF6 = 2;
|
||||||
|
xml_write('test.xml', MyTree);
|
||||||
|
type('test.xml')
|
||||||
|
|
||||||
|
%% Write XML files with Namespaces
|
||||||
|
% No extra special fields are needed to define XML namespaces, only colon
|
||||||
|
% character written using '0x3A' or '_COLON_'. Below is an
|
||||||
|
% example of a namespace definition
|
||||||
|
MyTree=[];
|
||||||
|
MyTree.f_COLON_child.ATTRIBUTE.xmlns_COLON_f = 'http://www.foo.com';
|
||||||
|
MyTree.f_COLON_child.f_COLON_MyNumber = 13;
|
||||||
|
MyTree.f_COLON_child.f_COLON_MyString = 'Hello World';
|
||||||
|
xml_write('test.xml', MyTree, 'MyTree');
|
||||||
|
type('test.xml')
|
||||||
|
%%
|
||||||
|
% *Same operation using Apache Xerces XML engine*
|
||||||
|
Pref=[]; Pref.XmlEngine = 'Xerces'; % use Xerces xml generator directly
|
||||||
|
xml_write('test.xml', MyTree, 'f_COLON_MyTree', Pref);
|
||||||
|
type('test.xml')
|
||||||
|
|
||||||
|
%% "Pref.KeepNS" flag in "xml_read"
|
||||||
|
% Thise option allow keeping or exclusion of namespaces in tag names.
|
||||||
|
% By default the namespace data is kept but it produces much longer field
|
||||||
|
% names in the output structure. Ignoring namespace will produce more
|
||||||
|
% readible output.
|
||||||
|
% Perform default read of file with namespace
|
||||||
|
tree = xml_read('test.xml');
|
||||||
|
gen_object_display(tree);
|
||||||
|
|
||||||
|
%%
|
||||||
|
% Now the same operation with KeepNS = false.
|
||||||
|
Pref=[]; Pref.KeepNS = false; % do not read attributes
|
||||||
|
tree = xml_read('test.xml', Pref);
|
||||||
|
gen_object_display(tree);
|
||||||
|
|
||||||
|
%% Read XML files with special node types
|
||||||
|
% Display and read the file, then show the data structure. Note that
|
||||||
|
% MATLAB 'type' function shows 'ö' letter incorrectly as 'A¶' in xml file,
|
||||||
|
% but opening the file in editor shows that it is correct.
|
||||||
|
fprintf('Test xml file:\n');
|
||||||
|
type('test_file.xml')
|
||||||
|
%%
|
||||||
|
% Read only the Root Element (default)
|
||||||
|
[tree GlobalTextNodes] = xml_read('test_file.xml');
|
||||||
|
fprintf('Global Data (Root name, Global Processing Instructions and Global Comments):\n');
|
||||||
|
disp(GlobalTextNodes')
|
||||||
|
fprintf('\nStructure read from the file (uncludes COMMENT and CDATA sections):\n');
|
||||||
|
gen_object_display(tree);
|
||||||
|
%%
|
||||||
|
% Read the whole tree including global Comments and Processing Instructions
|
||||||
|
Pref=[]; Pref.RootOnly = false;
|
||||||
|
[tree GlobalTextNodes] = xml_read('test_file.xml', Pref);
|
||||||
|
fprintf('Global Data (Root name, Global Processing Instructions and Global Comments):\n');
|
||||||
|
disp(GlobalTextNodes')
|
||||||
|
fprintf('\nStructure read from the file (uncludes COMMENT and CDATA sections):\n');
|
||||||
|
gen_object_display(tree);
|
||||||
|
|
||||||
|
%% "Pref.ReadAttr" flag in "xml_read" (control handling of nodes with attributes)
|
||||||
|
% Those option allow exclusion of attributes
|
||||||
|
Pref=[]; Pref.ReadAttr = false; % do not read attributes
|
||||||
|
tree = xml_read('test_file.xml', Pref);
|
||||||
|
gen_object_display(tree);
|
||||||
|
|
||||||
|
%% "Pref.ReadSpec" flag in "xml_read"
|
||||||
|
% Those option allow exclusion of special nodes, like
|
||||||
|
% comments, processing instructions, CData sections, etc.
|
||||||
|
Pref=[]; Pref.ReadSpec = false; % do not read special node types
|
||||||
|
tree = xml_read('test_file.xml', Pref);
|
||||||
|
gen_object_display(tree);
|
||||||
|
|
||||||
|
%% "Pref.RootOnly" flag in "xml_read"
|
||||||
|
% As it was shown in previous examples RootOnly parameter can be used to
|
||||||
|
% capture global (top level) special nodes (like COMMENTs and
|
||||||
|
% PROCESSING_INSTRUCTIONs) which are ignored by default
|
||||||
|
Pref=[]; Pref.RootOnly = false; % do not read special node types
|
||||||
|
tree = xml_read('test_file.xml', Pref);
|
||||||
|
gen_object_display(tree);
|
||||||
|
|
||||||
|
%% "Pref.RootOnly" flag in "xml_write"
|
||||||
|
% Writing previously read tree with default "Pref.RootOnly = true" gives
|
||||||
|
% wrong output file
|
||||||
|
Pref=[]; Pref.RootOnly = true; % do not read special node types
|
||||||
|
xml_write('test.xml', tree, [], Pref);
|
||||||
|
fprintf('Test xml file:\n');
|
||||||
|
type('test.xml')
|
||||||
|
%%
|
||||||
|
% Writing the same tree with "Pref.RootOnly = false" gives correct output
|
||||||
|
Pref=[]; Pref.RootOnly = false; % do not read special node types
|
||||||
|
xml_write('test.xml', tree, [], Pref);
|
||||||
|
fprintf('Test xml file:\n');
|
||||||
|
type('test.xml')
|
||||||
|
|
||||||
|
%% "Pref.NumLevels" flag in "xml_read"
|
||||||
|
% This parameter allows user to skip parts of the tree in order to save
|
||||||
|
% time and memory. Usefull only in a rare case when a small portion of
|
||||||
|
% large XML file is needed.
|
||||||
|
%
|
||||||
|
% Create test tile
|
||||||
|
MyTree = [];
|
||||||
|
MyTree.Level1 = 1;
|
||||||
|
MyTree.Level1_.Level2 = 2;
|
||||||
|
MyTree.Level1_.Level2_.Level3 = 3;
|
||||||
|
MyTree.Level1_.Level2_.Level3_.Level4 = 4;
|
||||||
|
xml_write('test.xml', MyTree);
|
||||||
|
fprintf('Test xml file:\n');
|
||||||
|
type('test.xml')
|
||||||
|
%%
|
||||||
|
% *Use Default ("Pref.NumLevels = infinity") setting*
|
||||||
|
tree = xml_read('test.xml');
|
||||||
|
gen_object_display(tree);
|
||||||
|
%%
|
||||||
|
% *Limit the read to only 2 levels*
|
||||||
|
Pref=[]; Pref.NumLevels = 2;
|
||||||
|
tree = xml_read('test.xml', Pref);
|
||||||
|
gen_object_display(tree);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
%% Create DOM object based on a Struct using "xml_write"
|
||||||
|
% *Create Struct tree*
|
||||||
|
MyTree=[];
|
||||||
|
MyTree.MyNumber = 13;
|
||||||
|
MyTree.MyString = 'Hello World';
|
||||||
|
%%
|
||||||
|
% *Convert Struct to DOM object using xml_write*
|
||||||
|
DOM = xml_write([], MyTree);
|
||||||
|
xmlwrite('test.xml', DOM); % Save DOM object using MATLAB function
|
||||||
|
type('test.xml')
|
||||||
|
|
||||||
|
%% Convert DOM object to Struct using "xml_read"
|
||||||
|
DOM = xmlread('test.xml'); % Read DOM object using MATLAB function
|
||||||
|
[tree treeName] = xml_read(DOM); % Convert DOM object to Struct
|
||||||
|
disp([treeName{1} ' ='])
|
||||||
|
gen_object_display(tree)
|
||||||
|
|
||||||
|
%% Write XML file based on a DOM using "xml_write_xerces"
|
||||||
|
xmlwrite_xerces('test.xml', DOM); % Save DOM object using Xerces library
|
||||||
|
type('test.xml')
|
||||||
|
|
||||||
|
%% Write XML to string instead of a file
|
||||||
|
DOM = xml_write([], MyTree);
|
||||||
|
str = xmlwrite(DOM);
|
||||||
|
disp(str)
|
||||||
|
|
||||||
|
%% Write XML file with embedded binary data encoded as Base64 (using java version)
|
||||||
|
fid = fopen('football.jpg', 'rb');
|
||||||
|
raw1 = uint8(fread(fid, 'uint8')); % read image file as a raw binary
|
||||||
|
fclose(fid);
|
||||||
|
|
||||||
|
MyTree=[];
|
||||||
|
MyTree.Size = 13;
|
||||||
|
MyTree.MyString = 'Hello World'; % simple case
|
||||||
|
MyTree.MyImage.ATTRIBUTE.EncodingMIMEType = 'base64';
|
||||||
|
MyTree.MyImage.CONTENT = base64encode(raw1,'java');% perform base64 encoding of the binary data
|
||||||
|
xml_write('test.xml', MyTree); % write xml file
|
||||||
|
|
||||||
|
%% Read XML file with embedded binary data encoded as Base64 (using java version)
|
||||||
|
tree = xml_read('test.xml', Pref); % read xml file
|
||||||
|
raw = base64decode(tree.MyImage.CONTENT, '', 'java'); % convert xml image to raw binary
|
||||||
|
fid = fopen('MyFootball.jpg', 'wb');
|
||||||
|
fwrite(fid, raw, 'uint8'); % dumb the raw binary to the hard disk
|
||||||
|
fclose(fid);
|
||||||
|
I = imread('MyFootball.jpg'); % read it as an image
|
||||||
|
imshow(I);
|
||||||
|
|
||||||
|
%% Write XML file with embedded binary data encoded as Base64 (simpler version using only matlab code
|
||||||
|
% Notice that process of writing to xml stripped all end-of-lie characters
|
||||||
|
% from base64 code.
|
||||||
|
isChunked = true; % break into chunks 76 characters long
|
||||||
|
url_safe = true; % 'base64url' encoding
|
||||||
|
code = base64encode('license.txt', 'matlab', isChunked, url_safe);
|
||||||
|
disp(code)
|
||||||
|
MyTree=[];
|
||||||
|
MyTree.Size = 13;
|
||||||
|
MyTree.MyString = 'Hello World';
|
||||||
|
MyTree.MyImage.ATTRIBUTE.EncodingMIMEType = 'base64';
|
||||||
|
MyTree.MyImage.CONTENT = code; % perform base64 encoding of the binary data
|
||||||
|
xml_write('test.xml', MyTree); % write xml file
|
||||||
|
type('test.xml')
|
||||||
|
|
||||||
|
%% Read XML file with embedded binary data encoded as Base64 (simpler version using only matlab code
|
||||||
|
tree = xml_read('test.xml', Pref); % read xml file
|
||||||
|
base64decode(tree.MyImage.CONTENT, 'license2.txt', 'matlab'); % save xml image as raw binary
|
||||||
|
type('license2.txt')
|
|
@ -0,0 +1,447 @@
|
||||||
|
function DOMnode = xml_write(filename, tree, RootName, Pref)
|
||||||
|
%XML_WRITE Writes Matlab data structures to XML file
|
||||||
|
%
|
||||||
|
% DESCRIPTION
|
||||||
|
% xml_write( filename, tree) Converts Matlab data structure 'tree' containing
|
||||||
|
% cells, structs, numbers and strings to Document Object Model (DOM) node
|
||||||
|
% tree, then saves it to XML file 'filename' using Matlab's xmlwrite
|
||||||
|
% function. Optionally one can also use alternative version of xmlwrite
|
||||||
|
% function which directly calls JAVA functions for XML writing without
|
||||||
|
% MATLAB middleware. This function is provided as a patch to existing
|
||||||
|
% bugs in xmlwrite (in R2006b).
|
||||||
|
%
|
||||||
|
% xml_write(filename, tree, RootName, Pref) allows you to specify
|
||||||
|
% additional preferences about file format
|
||||||
|
%
|
||||||
|
% DOMnode = xml_write([], tree) same as above except that DOM node is
|
||||||
|
% not saved to the file but returned.
|
||||||
|
%
|
||||||
|
% INPUT
|
||||||
|
% filename file name
|
||||||
|
% tree Matlab structure tree to store in xml file.
|
||||||
|
% RootName String with XML tag name used for root (top level) node
|
||||||
|
% Optionally it can be a string cell array storing: Name of
|
||||||
|
% root node, document "Processing Instructions" data and
|
||||||
|
% document "comment" string
|
||||||
|
% Pref Other preferences:
|
||||||
|
% Pref.ItemName - default 'item' - name of a special tag used to
|
||||||
|
% itemize cell or struct arrays
|
||||||
|
% Pref.XmlEngine - let you choose the XML engine. Currently default is
|
||||||
|
% 'Xerces', which is using directly the apache xerces java file.
|
||||||
|
% Other option is 'Matlab' which uses MATLAB's xmlwrite and its
|
||||||
|
% XMLUtils java file. Both options create identical results except in
|
||||||
|
% case of CDATA sections where xmlwrite fails.
|
||||||
|
% Pref.CellItem - default 'true' - allow cell arrays to use 'item'
|
||||||
|
% notation. See below.
|
||||||
|
% Pref.RootOnly - default true - output variable 'tree' corresponds to
|
||||||
|
% xml file root element, otherwise it correspond to the whole file.
|
||||||
|
% Pref.StructItem - default 'true' - allow arrays of structs to use
|
||||||
|
% 'item' notation. For example "Pref.StructItem = true" gives:
|
||||||
|
% <a>
|
||||||
|
% <b>
|
||||||
|
% <item> ... <\item>
|
||||||
|
% <item> ... <\item>
|
||||||
|
% <\b>
|
||||||
|
% <\a>
|
||||||
|
% while "Pref.StructItem = false" gives:
|
||||||
|
% <a>
|
||||||
|
% <b> ... <\b>
|
||||||
|
% <b> ... <\b>
|
||||||
|
% <\a>
|
||||||
|
%
|
||||||
|
%
|
||||||
|
% Several special xml node types can be created if special tags are used
|
||||||
|
% for field names of 'tree' nodes:
|
||||||
|
% - node.CONTENT - stores data section of the node if other fields
|
||||||
|
% (usually ATTRIBUTE are present. Usually data section is stored
|
||||||
|
% directly in 'node'.
|
||||||
|
% - node.ATTRIBUTE.name - stores node's attribute called 'name'.
|
||||||
|
% - node.COMMENT - create comment child node from the string. For global
|
||||||
|
% comments see "RootName" input variable.
|
||||||
|
% - node.PROCESSING_INSTRUCTIONS - create "processing instruction" child
|
||||||
|
% node from the string. For global "processing instructions" see
|
||||||
|
% "RootName" input variable.
|
||||||
|
% - node.CDATA_SECTION - stores node's CDATA section (string). Only works
|
||||||
|
% if Pref.XmlEngine='Xerces'. For more info, see comments of F_xmlwrite.
|
||||||
|
% - other special node types like: document fragment nodes, document type
|
||||||
|
% nodes, entity nodes and notation nodes are not being handled by
|
||||||
|
% 'xml_write' at the moment.
|
||||||
|
%
|
||||||
|
% OUTPUT
|
||||||
|
% DOMnode Document Object Model (DOM) node tree in the format
|
||||||
|
% required as input to xmlwrite. (optional)
|
||||||
|
%
|
||||||
|
% EXAMPLES:
|
||||||
|
% MyTree=[];
|
||||||
|
% MyTree.MyNumber = 13;
|
||||||
|
% MyTree.MyString = 'Hello World';
|
||||||
|
% xml_write('test.xml', MyTree);
|
||||||
|
% type('test.xml')
|
||||||
|
% %See also xml_tutorial.m
|
||||||
|
%
|
||||||
|
% See also
|
||||||
|
% xml_read, xmlread, xmlwrite
|
||||||
|
%
|
||||||
|
% Written by Jarek Tuszynski, SAIC, jaroslaw.w.tuszynski_at_saic.com
|
||||||
|
|
||||||
|
%% Check Matlab Version
|
||||||
|
v = ver('MATLAB');
|
||||||
|
v = str2double(regexp(v.Version, '\d.\d','match','once'));
|
||||||
|
if (v<7)
|
||||||
|
error('Your MATLAB version is too old. You need version 7.0 or newer.');
|
||||||
|
end
|
||||||
|
|
||||||
|
%% default preferences
|
||||||
|
DPref.TableName = {'tr','td'}; % name of a special tags used to itemize 2D cell arrays
|
||||||
|
DPref.ItemName = 'item'; % name of a special tag used to itemize 1D cell arrays
|
||||||
|
DPref.StructItem = true; % allow arrays of structs to use 'item' notation
|
||||||
|
DPref.CellItem = true; % allow cell arrays to use 'item' notation
|
||||||
|
DPref.StructTable= 'Html';
|
||||||
|
DPref.CellTable = 'Html';
|
||||||
|
DPref.XmlEngine = 'Matlab'; % use matlab provided XMLUtils
|
||||||
|
%DPref.XmlEngine = 'Xerces'; % use Xerces xml generator directly
|
||||||
|
DPref.PreserveSpace = false; % Preserve or delete spaces at the beggining and the end of stings?
|
||||||
|
RootOnly = true; % Input is root node only
|
||||||
|
GlobalProcInst = [];
|
||||||
|
GlobalComment = [];
|
||||||
|
GlobalDocType = [];
|
||||||
|
|
||||||
|
%% read user preferences
|
||||||
|
if (nargin>3)
|
||||||
|
if (isfield(Pref, 'TableName' )), DPref.TableName = Pref.TableName; end
|
||||||
|
if (isfield(Pref, 'ItemName' )), DPref.ItemName = Pref.ItemName; end
|
||||||
|
if (isfield(Pref, 'StructItem')), DPref.StructItem = Pref.StructItem; end
|
||||||
|
if (isfield(Pref, 'CellItem' )), DPref.CellItem = Pref.CellItem; end
|
||||||
|
if (isfield(Pref, 'CellTable')), DPref.CellTable = Pref.CellTable; end
|
||||||
|
if (isfield(Pref, 'StructTable')), DPref.StructTable= Pref.StructTable; end
|
||||||
|
if (isfield(Pref, 'XmlEngine' )), DPref.XmlEngine = Pref.XmlEngine; end
|
||||||
|
if (isfield(Pref, 'RootOnly' )), RootOnly = Pref.RootOnly; end
|
||||||
|
if (isfield(Pref, 'PreserveSpace')), DPref.PreserveSpace = Pref.PreserveSpace; end
|
||||||
|
end
|
||||||
|
if (nargin<3 || isempty(RootName)), RootName=inputname(2); end
|
||||||
|
if (isempty(RootName)), RootName='ROOT'; end
|
||||||
|
if (iscell(RootName)) % RootName also stores global text node data
|
||||||
|
rName = RootName;
|
||||||
|
RootName = char(rName{1});
|
||||||
|
if (length(rName)>1), GlobalProcInst = char(rName{2}); end
|
||||||
|
if (length(rName)>2), GlobalComment = char(rName{3}); end
|
||||||
|
if (length(rName)>3), GlobalDocType = char(rName{4}); end
|
||||||
|
end
|
||||||
|
if(~RootOnly && isstruct(tree)) % if struct than deal with each field separatly
|
||||||
|
fields = fieldnames(tree);
|
||||||
|
for i=1:length(fields)
|
||||||
|
field = fields{i};
|
||||||
|
x = tree(1).(field);
|
||||||
|
if (strcmp(field, 'COMMENT'))
|
||||||
|
GlobalComment = x;
|
||||||
|
elseif (strcmp(field, 'PROCESSING_INSTRUCTION'))
|
||||||
|
GlobalProcInst = x;
|
||||||
|
elseif (strcmp(field, 'DOCUMENT_TYPE'))
|
||||||
|
GlobalDocType = x;
|
||||||
|
else
|
||||||
|
RootName = field;
|
||||||
|
t = x;
|
||||||
|
end
|
||||||
|
end
|
||||||
|
tree = t;
|
||||||
|
end
|
||||||
|
|
||||||
|
%% Initialize jave object that will store xml data structure
|
||||||
|
RootName = varName2str(RootName);
|
||||||
|
if (~isempty(GlobalDocType))
|
||||||
|
% n = strfind(GlobalDocType, ' ');
|
||||||
|
% if (~isempty(n))
|
||||||
|
% dtype = com.mathworks.xml.XMLUtils.createDocumentType(GlobalDocType);
|
||||||
|
% end
|
||||||
|
% DOMnode = com.mathworks.xml.XMLUtils.createDocument(RootName, dtype);
|
||||||
|
warning('xml_io_tools:write:docType', ...
|
||||||
|
'DOCUMENT_TYPE node was encountered which is not supported yet. Ignoring.');
|
||||||
|
end
|
||||||
|
DOMnode = com.mathworks.xml.XMLUtils.createDocument(RootName);
|
||||||
|
|
||||||
|
|
||||||
|
%% Use recursive function to convert matlab data structure to XML
|
||||||
|
root = DOMnode.getDocumentElement;
|
||||||
|
struct2DOMnode(DOMnode, root, tree, DPref.ItemName, DPref);
|
||||||
|
|
||||||
|
%% Remove the only child of the root node
|
||||||
|
root = DOMnode.getDocumentElement;
|
||||||
|
Child = root.getChildNodes; % create array of children nodes
|
||||||
|
nChild = Child.getLength; % number of children
|
||||||
|
if (nChild==1)
|
||||||
|
node = root.removeChild(root.getFirstChild);
|
||||||
|
while(node.hasChildNodes)
|
||||||
|
root.appendChild(node.removeChild(node.getFirstChild));
|
||||||
|
end
|
||||||
|
while(node.hasAttributes) % copy all attributes
|
||||||
|
root.setAttributeNode(node.removeAttributeNode(node.getAttributes.item(0)));
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
%% Save exotic Global nodes
|
||||||
|
if (~isempty(GlobalComment))
|
||||||
|
DOMnode.insertBefore(DOMnode.createComment(GlobalComment), DOMnode.getFirstChild());
|
||||||
|
end
|
||||||
|
if (~isempty(GlobalProcInst))
|
||||||
|
n = strfind(GlobalProcInst, ' ');
|
||||||
|
if (~isempty(n))
|
||||||
|
proc = DOMnode.createProcessingInstruction(GlobalProcInst(1:(n(1)-1)),...
|
||||||
|
GlobalProcInst((n(1)+1):end));
|
||||||
|
DOMnode.insertBefore(proc, DOMnode.getFirstChild());
|
||||||
|
end
|
||||||
|
end
|
||||||
|
% Not supported yet as the code below does not work
|
||||||
|
% if (~isempty(GlobalDocType))
|
||||||
|
% n = strfind(GlobalDocType, ' ');
|
||||||
|
% if (~isempty(n))
|
||||||
|
% dtype = DOMnode.createDocumentType(GlobalDocType);
|
||||||
|
% DOMnode.insertBefore(dtype, DOMnode.getFirstChild());
|
||||||
|
% end
|
||||||
|
% end
|
||||||
|
|
||||||
|
%% save java DOM tree to XML file
|
||||||
|
if (~isempty(filename))
|
||||||
|
if (strcmpi(DPref.XmlEngine, 'Xerces'))
|
||||||
|
xmlwrite_xerces(filename, DOMnode);
|
||||||
|
else
|
||||||
|
xmlwrite(filename, DOMnode);
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
|
||||||
|
%% =======================================================================
|
||||||
|
% === struct2DOMnode Function ===========================================
|
||||||
|
% =======================================================================
|
||||||
|
function [] = struct2DOMnode(xml, parent, s, TagName, Pref)
|
||||||
|
% struct2DOMnode is a recursive function that converts matlab's structs to
|
||||||
|
% DOM nodes.
|
||||||
|
% INPUTS:
|
||||||
|
% xml - jave object that will store xml data structure
|
||||||
|
% parent - parent DOM Element
|
||||||
|
% s - Matlab data structure to save
|
||||||
|
% TagName - name to be used in xml tags describing 's'
|
||||||
|
% Pref - preferenced
|
||||||
|
% OUTPUT:
|
||||||
|
% parent - modified 'parent'
|
||||||
|
|
||||||
|
% perform some conversions
|
||||||
|
if (ischar(s) && min(size(s))>1) % if 2D array of characters
|
||||||
|
s=cellstr(s); % than convert to cell array
|
||||||
|
end
|
||||||
|
% if (strcmp(TagName, 'CONTENT'))
|
||||||
|
% while (iscell(s) && length(s)==1), s = s{1}; end % unwrap cell arrays of length 1
|
||||||
|
% end
|
||||||
|
TagName = varName2str(TagName);
|
||||||
|
|
||||||
|
%% == node is a 2D cell array ==
|
||||||
|
% convert to some other format prior to further processing
|
||||||
|
nDim = nnz(size(s)>1); % is it a scalar, vector, 2D array, 3D cube, etc?
|
||||||
|
if (iscell(s) && nDim==2 && strcmpi(Pref.CellTable, 'Matlab'))
|
||||||
|
s = var2str(s, Pref.PreserveSpace);
|
||||||
|
end
|
||||||
|
if (nDim==2 && (iscell (s) && strcmpi(Pref.CellTable, 'Vector')) || ...
|
||||||
|
(isstruct(s) && strcmpi(Pref.StructTable, 'Vector')))
|
||||||
|
s = s(:);
|
||||||
|
end
|
||||||
|
if (nDim>2), s = s(:); end % can not handle this case well
|
||||||
|
nItem = numel(s);
|
||||||
|
nDim = nnz(size(s)>1); % is it a scalar, vector, 2D array, 3D cube, etc?
|
||||||
|
|
||||||
|
%% == node is a cell ==
|
||||||
|
if (iscell(s)) % if this is a cell or cell array
|
||||||
|
if ((nDim==2 && strcmpi(Pref.CellTable,'Html')) || (nDim< 2 && Pref.CellItem))
|
||||||
|
% if 2D array of cells than can use HTML-like notation or if 1D array
|
||||||
|
% than can use item notation
|
||||||
|
if (strcmp(TagName, 'CONTENT')) % CONTENT nodes already have <TagName> ... </TagName>
|
||||||
|
array2DOMnode(xml, parent, s, Pref.ItemName, Pref ); % recursive call
|
||||||
|
else
|
||||||
|
node = xml.createElement(TagName); % <TagName> ... </TagName>
|
||||||
|
array2DOMnode(xml, node, s, Pref.ItemName, Pref ); % recursive call
|
||||||
|
parent.appendChild(node);
|
||||||
|
end
|
||||||
|
else % use <TagName>...<\TagName> <TagName>...<\TagName> notation
|
||||||
|
array2DOMnode(xml, parent, s, TagName, Pref ); % recursive call
|
||||||
|
end
|
||||||
|
%% == node is a struct ==
|
||||||
|
elseif (isstruct(s)) % if struct than deal with each field separatly
|
||||||
|
if ((nDim==2 && strcmpi(Pref.StructTable,'Html')) || (nItem>1 && Pref.StructItem))
|
||||||
|
% if 2D array of structs than can use HTML-like notation or
|
||||||
|
% if 1D array of structs than can use 'items' notation
|
||||||
|
node = xml.createElement(TagName);
|
||||||
|
array2DOMnode(xml, node, s, Pref.ItemName, Pref ); % recursive call
|
||||||
|
parent.appendChild(node);
|
||||||
|
elseif (nItem>1) % use <TagName>...<\TagName> <TagName>...<\TagName> notation
|
||||||
|
array2DOMnode(xml, parent, s, TagName, Pref ); % recursive call
|
||||||
|
else % otherwise save each struct separatelly
|
||||||
|
fields = fieldnames(s);
|
||||||
|
node = xml.createElement(TagName);
|
||||||
|
for i=1:length(fields) % add field by field to the node
|
||||||
|
field = fields{i};
|
||||||
|
x = s.(field);
|
||||||
|
switch field
|
||||||
|
case {'COMMENT', 'CDATA_SECTION', 'PROCESSING_INSTRUCTION'}
|
||||||
|
if iscellstr(x) % cell array of strings -> add them one by one
|
||||||
|
array2DOMnode(xml, node, x(:), field, Pref ); % recursive call will modify 'node'
|
||||||
|
elseif ischar(x) % single string -> add it
|
||||||
|
struct2DOMnode(xml, node, x, field, Pref ); % recursive call will modify 'node'
|
||||||
|
else % not a string - Ignore
|
||||||
|
warning('xml_io_tools:write:badSpecialNode', ...
|
||||||
|
['Struct field named ',field,' encountered which was not a string. Ignoring.']);
|
||||||
|
end
|
||||||
|
case 'ATTRIBUTE' % set attributes of the node
|
||||||
|
if (isempty(x)), continue; end
|
||||||
|
if (isstruct(x))
|
||||||
|
attName = fieldnames(x); % get names of all the attributes
|
||||||
|
for k=1:length(attName) % attach them to the node
|
||||||
|
att = xml.createAttribute(varName2str(attName(k)));
|
||||||
|
att.setValue(var2str(x.(attName{k}),Pref.PreserveSpace));
|
||||||
|
node.setAttributeNode(att);
|
||||||
|
end
|
||||||
|
else
|
||||||
|
warning('xml_io_tools:write:badAttribute', ...
|
||||||
|
'Struct field named ATTRIBUTE encountered which was not a struct. Ignoring.');
|
||||||
|
end
|
||||||
|
otherwise % set children of the node
|
||||||
|
struct2DOMnode(xml, node, x, field, Pref ); % recursive call will modify 'node'
|
||||||
|
end
|
||||||
|
end % end for i=1:nFields
|
||||||
|
parent.appendChild(node);
|
||||||
|
end
|
||||||
|
%% == node is a leaf node ==
|
||||||
|
else % if not a struct and not a cell than it is a leaf node
|
||||||
|
switch TagName % different processing depending on desired type of the node
|
||||||
|
case 'COMMENT' % create comment node
|
||||||
|
com = xml.createComment(s);
|
||||||
|
parent.appendChild(com);
|
||||||
|
case 'CDATA_SECTION' % create CDATA Section
|
||||||
|
cdt = xml.createCDATASection(s);
|
||||||
|
parent.appendChild(cdt);
|
||||||
|
case 'PROCESSING_INSTRUCTION' % set attributes of the node
|
||||||
|
OK = false;
|
||||||
|
if (ischar(s))
|
||||||
|
n = strfind(s, ' ');
|
||||||
|
if (~isempty(n))
|
||||||
|
proc = xml.createProcessingInstruction(s(1:(n(1)-1)),s((n(1)+1):end));
|
||||||
|
parent.insertBefore(proc, parent.getFirstChild());
|
||||||
|
OK = true;
|
||||||
|
end
|
||||||
|
end
|
||||||
|
if (~OK)
|
||||||
|
warning('xml_io_tools:write:badProcInst', ...
|
||||||
|
['Struct field named PROCESSING_INSTRUCTION need to be',...
|
||||||
|
' a string, for example: xml-stylesheet type="text/css" ', ...
|
||||||
|
'href="myStyleSheet.css". Ignoring.']);
|
||||||
|
end
|
||||||
|
case 'CONTENT' % this is text part of already existing node
|
||||||
|
txt = xml.createTextNode(var2str(s, Pref.PreserveSpace)); % convert to text
|
||||||
|
parent.appendChild(txt);
|
||||||
|
otherwise % I guess it is a regular text leaf node
|
||||||
|
txt = xml.createTextNode(var2str(s, Pref.PreserveSpace));
|
||||||
|
node = xml.createElement(TagName);
|
||||||
|
node.appendChild(txt);
|
||||||
|
parent.appendChild(node);
|
||||||
|
end
|
||||||
|
end % of struct2DOMnode function
|
||||||
|
|
||||||
|
%% =======================================================================
|
||||||
|
% === array2DOMnode Function ============================================
|
||||||
|
% =======================================================================
|
||||||
|
function [] = array2DOMnode(xml, parent, s, TagName, Pref)
|
||||||
|
% Deal with 1D and 2D arrays of cell or struct. Will modify 'parent'.
|
||||||
|
nDim = nnz(size(s)>1); % is it a scalar, vector, 2D array, 3D cube, etc?
|
||||||
|
switch nDim
|
||||||
|
case 2 % 2D array
|
||||||
|
for r=1:size(s,1)
|
||||||
|
subnode = xml.createElement(Pref.TableName{1});
|
||||||
|
for c=1:size(s,2)
|
||||||
|
v = s(r,c);
|
||||||
|
if iscell(v), v = v{1}; end
|
||||||
|
struct2DOMnode(xml, subnode, v, Pref.TableName{2}, Pref ); % recursive call
|
||||||
|
end
|
||||||
|
parent.appendChild(subnode);
|
||||||
|
end
|
||||||
|
case 1 %1D array
|
||||||
|
for iItem=1:numel(s)
|
||||||
|
v = s(iItem);
|
||||||
|
if iscell(v), v = v{1}; end
|
||||||
|
struct2DOMnode(xml, parent, v, TagName, Pref ); % recursive call
|
||||||
|
end
|
||||||
|
case 0 % scalar -> this case should never be called
|
||||||
|
if ~isempty(s)
|
||||||
|
if iscell(s), s = s{1}; end
|
||||||
|
struct2DOMnode(xml, parent, s, TagName, Pref );
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
%% =======================================================================
|
||||||
|
% === var2str Function ==================================================
|
||||||
|
% =======================================================================
|
||||||
|
function str = var2str(object, PreserveSpace)
|
||||||
|
% convert matlab variables to a string
|
||||||
|
switch (1)
|
||||||
|
case isempty(object)
|
||||||
|
str = '';
|
||||||
|
case (isnumeric(object) || islogical(object))
|
||||||
|
if ndims(object)>2, object=object(:); end % can't handle arrays with dimention > 2
|
||||||
|
str=mat2str(object); % convert matrix to a string
|
||||||
|
% mark logical scalars with [] (logical arrays already have them) so the xml_read
|
||||||
|
% recognizes them as MATLAB objects instead of strings. Same with sparse
|
||||||
|
% matrices
|
||||||
|
if ((islogical(object) && isscalar(object)) || issparse(object)),
|
||||||
|
str = ['[' str ']'];
|
||||||
|
end
|
||||||
|
if (isinteger(object)),
|
||||||
|
str = ['[', class(object), '(', str ')]'];
|
||||||
|
end
|
||||||
|
case iscell(object)
|
||||||
|
if ndims(object)>2, object=object(:); end % can't handle cell arrays with dimention > 2
|
||||||
|
[nr nc] = size(object);
|
||||||
|
obj2 = object;
|
||||||
|
for i=1:length(object(:))
|
||||||
|
str = var2str(object{i}, PreserveSpace);
|
||||||
|
if (ischar(object{i})), object{i} = ['''' object{i} '''']; else object{i}=str; end
|
||||||
|
obj2{i} = [object{i} ','];
|
||||||
|
end
|
||||||
|
for r = 1:nr, obj2{r,nc} = [object{r,nc} ';']; end
|
||||||
|
obj2 = obj2.';
|
||||||
|
str = ['{' obj2{:} '}'];
|
||||||
|
case isstruct(object)
|
||||||
|
str='';
|
||||||
|
warning('xml_io_tools:write:var2str', ...
|
||||||
|
'Struct was encountered where string was expected. Ignoring.');
|
||||||
|
case isa(object, 'function_handle')
|
||||||
|
str = ['[@' char(object) ']'];
|
||||||
|
case ischar(object)
|
||||||
|
str = object;
|
||||||
|
otherwise
|
||||||
|
str = char(object);
|
||||||
|
end
|
||||||
|
|
||||||
|
%% string clean-up
|
||||||
|
str=str(:); str=str.'; % make sure this is a row vector of char's
|
||||||
|
if (~isempty(str))
|
||||||
|
str(str<32|str==127)=' '; % convert no-printable characters to spaces
|
||||||
|
if (~PreserveSpace)
|
||||||
|
str = strtrim(str); % remove spaces from begining and the end
|
||||||
|
str = regexprep(str,'\s+',' '); % remove multiple spaces
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
%% =======================================================================
|
||||||
|
% === var2Namestr Function ==============================================
|
||||||
|
% =======================================================================
|
||||||
|
function str = varName2str(str)
|
||||||
|
% convert matlab variable names to a sting
|
||||||
|
str = char(str);
|
||||||
|
p = strfind(str,'0x');
|
||||||
|
if (~isempty(p))
|
||||||
|
for i=1:length(p)
|
||||||
|
before = str( p(i)+(0:3) ); % string to replace
|
||||||
|
after = char(hex2dec(before(3:4))); % string to replace with
|
||||||
|
str = regexprep(str,before,after, 'once', 'ignorecase');
|
||||||
|
p=p-3; % since 4 characters were replaced with one - compensate
|
||||||
|
end
|
||||||
|
end
|
||||||
|
str = regexprep(str,'_COLON_',':', 'once', 'ignorecase');
|
||||||
|
str = regexprep(str,'_DASH_' ,'-', 'once', 'ignorecase');
|
||||||
|
|
|
@ -0,0 +1,109 @@
|
||||||
|
function varargout=xmlwrite_xerces(varargin)
|
||||||
|
%XMLWRITE_XERCES Serialize an XML Document Object Model node using Xerces parser.
|
||||||
|
% xmlwrite_xerces(FILENAME,DOMNODE) serializes the DOMNODE to file FILENAME.
|
||||||
|
%
|
||||||
|
% The function xmlwrite_xerces is very similar the Matlab function xmlwrite
|
||||||
|
% but works directly with the XERCES java classes (written by Apache XML
|
||||||
|
% Project) instead of the XMLUtils class created by Mathworks. Xerces files
|
||||||
|
% are provided in standard MATLAB instalation and live in root\java\jarext
|
||||||
|
% directory.
|
||||||
|
%
|
||||||
|
% Written by A.Amaro (02-22-2007) and generously donated to xml_io_tools.
|
||||||
|
% This function is needed as a work-around for a bug in XMLUtils library
|
||||||
|
% which can not write CDATA SECTION nodes correctly. Also Xerces and
|
||||||
|
% XMLUtils libraries handle namespaces differently.
|
||||||
|
%
|
||||||
|
% Examples:
|
||||||
|
% % See xmlwrite examples this function have almost identical behavior.
|
||||||
|
%
|
||||||
|
% Advanced use:
|
||||||
|
% FILENAME can also be a URN, java.io.OutputStream or java.io.Writer object
|
||||||
|
% SOURCE can also be a SAX InputSource, JAXP Source, InputStream, or
|
||||||
|
% Reader object
|
||||||
|
|
||||||
|
returnString = false;
|
||||||
|
if length(varargin)==1
|
||||||
|
returnString = true;
|
||||||
|
result = java.io.StringWriter;
|
||||||
|
source = varargin{1};
|
||||||
|
else
|
||||||
|
result = varargin{1};
|
||||||
|
if ischar(result)
|
||||||
|
% Using the XERCES classes directly, is not needed to modify the
|
||||||
|
% filename string. So I have commented this next line
|
||||||
|
% result = F_xmlstringinput(result,false);
|
||||||
|
end
|
||||||
|
|
||||||
|
source = varargin{2};
|
||||||
|
if ischar(source)
|
||||||
|
source = F_xmlstringinput(source,true);
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
% SERIALIZATION OF THE DOM DOCUMENT USING XERCES CLASSES DIRECTLY
|
||||||
|
|
||||||
|
% 1) create the output format according to the document definitions
|
||||||
|
% and type
|
||||||
|
objOutputFormat = org.apache.xml.serialize.OutputFormat(source);
|
||||||
|
set(objOutputFormat,'Indenting','on');
|
||||||
|
|
||||||
|
% 2) create the output stream. In this case: an XML file
|
||||||
|
objFile = java.io.File(result);
|
||||||
|
objOutputStream = java.io.FileOutputStream(objFile);
|
||||||
|
|
||||||
|
% 3) Create the Xerces Serializer object
|
||||||
|
objSerializer= org.apache.xml.serialize.XMLSerializer(objOutputStream,objOutputFormat);
|
||||||
|
|
||||||
|
% 4) Serialize to the XML files
|
||||||
|
javaMethod('serialize',objSerializer,source);
|
||||||
|
|
||||||
|
% 5) IMPORTANT! Delete the objects to liberate the XML file created
|
||||||
|
objOutputStream.close;
|
||||||
|
|
||||||
|
if returnString
|
||||||
|
varargout{1}=char(result.toString);
|
||||||
|
end
|
||||||
|
|
||||||
|
%% ========================================================================
|
||||||
|
function out = F_xmlstringinput(xString,isFullSearch,varargin)
|
||||||
|
% The function F_xmlstringinput is a copy of the private function:
|
||||||
|
% 'xmlstringinput' that the original xmlwrite function uses.
|
||||||
|
|
||||||
|
if isempty(xString)
|
||||||
|
error('Filename is empty');
|
||||||
|
elseif ~isempty(findstr(xString,'://'))
|
||||||
|
%xString is already a URL, most likely prefaced by file:// or http://
|
||||||
|
out = xString;
|
||||||
|
return;
|
||||||
|
end
|
||||||
|
|
||||||
|
xPath=fileparts(xString);
|
||||||
|
if isempty(xPath)
|
||||||
|
if nargin<2 || isFullSearch
|
||||||
|
out = which(xString);
|
||||||
|
if isempty(out)
|
||||||
|
error('xml:FileNotFound','File %s not found',xString);
|
||||||
|
end
|
||||||
|
else
|
||||||
|
out = fullfile(pwd,xString);
|
||||||
|
end
|
||||||
|
else
|
||||||
|
out = xString;
|
||||||
|
if (nargin<2 || isFullSearch) && ~exist(xString,'file')
|
||||||
|
%search to see if xString exists when isFullSearch
|
||||||
|
error('xml:FileNotFound','File %s not found',xString);
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
%Return as a URN
|
||||||
|
if strncmp(out,'\\',2)
|
||||||
|
% SAXON UNC filepaths need to look like file:///\\\server-name\
|
||||||
|
out = ['file:///\',out];
|
||||||
|
elseif strncmp(out,'/',1)
|
||||||
|
% SAXON UNIX filepaths need to look like file:///root/dir/dir
|
||||||
|
out = ['file://',out];
|
||||||
|
else
|
||||||
|
% DOS filepaths need to look like file:///d:/foo/bar
|
||||||
|
out = ['file:///',strrep(out,'\','/')];
|
||||||
|
end
|
||||||
|
|
|
@ -0,0 +1,98 @@
|
||||||
|
function [data_train, labels_train, data_devel, labels_devel, raw_devel, PC, means_norm, stds_norm, valid_ids_test] = ...
|
||||||
|
Prepare_HOG_AU_data_generic(train_users, devel_users, au_train, bp4d_dir, hog_data_dir, pca_file)
|
||||||
|
|
||||||
|
%%
|
||||||
|
addpath(genpath('../../data extraction/'));
|
||||||
|
|
||||||
|
au_other = setdiff([1, 2, 4, 6, 7, 10, 12, 14, 15, 17, 23], au_train);
|
||||||
|
[ labels_other, ~, ~ ] = extract_BP4D_labels(bp4d_dir, train_users, au_other);
|
||||||
|
labels_other = cat(1, labels_other{:});
|
||||||
|
|
||||||
|
% First extracting the labels
|
||||||
|
[ labels_train, valid_ids_train, vid_ids_train ] = extract_BP4D_labels(bp4d_dir, train_users, au_train);
|
||||||
|
|
||||||
|
train_geom_data = Read_geom_files(train_users, hog_data_dir);
|
||||||
|
% Reading in the HOG data (of only relevant frames)
|
||||||
|
[train_appearance_data, valid_ids_train_hog, vid_ids_train_string] = Read_HOG_files(train_users, hog_data_dir);
|
||||||
|
train_appearance_data = cat(2, train_appearance_data, train_geom_data);
|
||||||
|
|
||||||
|
% Subsample the data to make training quicker
|
||||||
|
labels_train = cat(1, labels_train{:});
|
||||||
|
valid_ids_train = logical(cat(1, valid_ids_train{:}));
|
||||||
|
reduced_inds = false(size(labels_train,1),1);
|
||||||
|
|
||||||
|
if(numel(au_train) == 1)
|
||||||
|
reduced_inds(labels_train == 1) = true;
|
||||||
|
else
|
||||||
|
reduced_inds(:) = true;
|
||||||
|
end
|
||||||
|
|
||||||
|
% make sure the same number of positive and negative samples is taken
|
||||||
|
pos_count = sum(labels_train == 1);
|
||||||
|
neg_count = sum(labels_train == 0);
|
||||||
|
|
||||||
|
num_other = floor(pos_count / (size(labels_other, 2)));
|
||||||
|
|
||||||
|
inds_all = 1:size(labels_train,1);
|
||||||
|
|
||||||
|
if(numel(au_train) == 1)
|
||||||
|
for i=1:size(labels_other, 2)+1
|
||||||
|
|
||||||
|
if(i > size(labels_other, 2))
|
||||||
|
% fill the rest with a proportion of neutral
|
||||||
|
inds_other = inds_all(sum(labels_other,2)==0 & ~labels_train );
|
||||||
|
num_other_i = min(numel(inds_other), pos_count - sum(labels_train(reduced_inds,:)==0));
|
||||||
|
else
|
||||||
|
% take a proportion of each other AU
|
||||||
|
inds_other = inds_all(labels_other(:, i) & ~labels_train );
|
||||||
|
num_other_i = min(numel(inds_other), num_other);
|
||||||
|
end
|
||||||
|
inds_other_to_keep = inds_other(round(linspace(1, numel(inds_other), num_other_i)));
|
||||||
|
reduced_inds(inds_other_to_keep) = true;
|
||||||
|
|
||||||
|
end
|
||||||
|
end
|
||||||
|
% Remove invalid ids based on CLM failing or AU not being labelled
|
||||||
|
reduced_inds(~valid_ids_train) = false;
|
||||||
|
reduced_inds(~valid_ids_train_hog) = false;
|
||||||
|
|
||||||
|
% labels_other = labels_other(reduced_inds, :);
|
||||||
|
labels_train = labels_train(reduced_inds,:);
|
||||||
|
train_appearance_data = train_appearance_data(reduced_inds,:);
|
||||||
|
vid_ids_train_string = vid_ids_train_string(reduced_inds,:);
|
||||||
|
|
||||||
|
%% Extract devel data
|
||||||
|
|
||||||
|
% First extracting the labels
|
||||||
|
[ labels_devel, valid_ids_devel, vid_ids_devel ] = extract_BP4D_labels(bp4d_dir, devel_users, au_train);
|
||||||
|
|
||||||
|
% Reading in the HOG data (of only relevant frames)
|
||||||
|
devel_geom_data = Read_geom_files(devel_users, hog_data_dir);
|
||||||
|
[devel_appearance_data, valid_ids_devel_hog, vid_ids_devel_string] = Read_HOG_files(devel_users, hog_data_dir);
|
||||||
|
devel_appearance_data = cat(2, devel_appearance_data, devel_geom_data);
|
||||||
|
|
||||||
|
labels_devel = cat(1, labels_devel{:});
|
||||||
|
|
||||||
|
valid_ids_test = valid_ids_devel_hog;
|
||||||
|
|
||||||
|
% normalise the data
|
||||||
|
load(pca_file);
|
||||||
|
|
||||||
|
PC_n = zeros(size(PC)+size(train_geom_data, 2));
|
||||||
|
PC_n(1:size(PC,1), 1:size(PC,2)) = PC;
|
||||||
|
PC_n(size(PC,1)+1:end, size(PC,2)+1:end) = eye(size(train_geom_data, 2));
|
||||||
|
PC = PC_n;
|
||||||
|
|
||||||
|
means_norm = cat(2, means_norm, zeros(1, size(train_geom_data,2)));
|
||||||
|
stds_norm = cat(2, stds_norm, ones(1, size(train_geom_data,2)));
|
||||||
|
|
||||||
|
% Grab all data for validation as want good params for all the data
|
||||||
|
raw_devel = devel_appearance_data;
|
||||||
|
|
||||||
|
devel_appearance_data = bsxfun(@times, bsxfun(@plus, devel_appearance_data, -means_norm), 1./stds_norm);
|
||||||
|
train_appearance_data = bsxfun(@times, bsxfun(@plus, train_appearance_data, -means_norm), 1./stds_norm);
|
||||||
|
|
||||||
|
data_train = train_appearance_data * PC;
|
||||||
|
data_devel = devel_appearance_data * PC;
|
||||||
|
|
||||||
|
end
|
|
@ -0,0 +1,94 @@
|
||||||
|
function [data_train, labels_train, data_devel, labels_devel, raw_devel, PC, means_norm, stds_norm, valid_ids_devel] = ...
|
||||||
|
Prepare_HOG_AU_data_generic_dynamic(train_users, devel_users, au_train, bp4d_dir, hog_data_dir, pca_file)
|
||||||
|
|
||||||
|
%%
|
||||||
|
addpath(genpath('../../data extraction/'));
|
||||||
|
|
||||||
|
au_other = setdiff([1, 2, 4, 6, 7, 10, 12, 14, 15, 17, 23], au_train);
|
||||||
|
[ labels_other, ~, ~ ] = extract_BP4D_labels(bp4d_dir, train_users, au_other);
|
||||||
|
labels_other = cat(1, labels_other{:});
|
||||||
|
|
||||||
|
% First extracting the labels
|
||||||
|
[ labels_train, valid_ids_train, vid_ids_train ] = extract_BP4D_labels(bp4d_dir, train_users, au_train);
|
||||||
|
|
||||||
|
train_geom_data = Read_geom_files_dynamic(train_users, hog_data_dir);
|
||||||
|
|
||||||
|
% Reading in the HOG data (of only relevant frames)
|
||||||
|
[train_appearance_data, valid_ids_train_hog, vid_ids_train_string] = Read_HOG_files_dynamic_pp(train_users, hog_data_dir);
|
||||||
|
|
||||||
|
train_appearance_data = cat(2, train_appearance_data, train_geom_data);
|
||||||
|
|
||||||
|
% Subsample the data to make training quicker
|
||||||
|
labels_train = cat(1, labels_train{:});
|
||||||
|
valid_ids_train = logical(cat(1, valid_ids_train{:}));
|
||||||
|
|
||||||
|
reduced_inds = false(size(labels_train,1),1);
|
||||||
|
reduced_inds(labels_train == 1) = true;
|
||||||
|
|
||||||
|
% make sure the same number of positive and negative samples is taken
|
||||||
|
pos_count = sum(labels_train == 1);
|
||||||
|
neg_count = sum(labels_train == 0);
|
||||||
|
|
||||||
|
num_other = floor(pos_count / (size(labels_other, 2)));
|
||||||
|
|
||||||
|
inds_all = 1:size(labels_train,1);
|
||||||
|
|
||||||
|
for i=1:size(labels_other, 2)+1
|
||||||
|
|
||||||
|
if(i > size(labels_other, 2))
|
||||||
|
% fill the rest with a proportion of neutral
|
||||||
|
inds_other = inds_all(sum(labels_other,2)==0 & ~labels_train );
|
||||||
|
num_other_i = min(numel(inds_other), pos_count - sum(labels_train(reduced_inds,:)==0));
|
||||||
|
else
|
||||||
|
% take a proportion of each other AU
|
||||||
|
inds_other = inds_all(labels_other(:, i) & ~labels_train );
|
||||||
|
num_other_i = min(numel(inds_other), num_other);
|
||||||
|
end
|
||||||
|
inds_other_to_keep = inds_other(round(linspace(1, numel(inds_other), num_other_i)));
|
||||||
|
reduced_inds(inds_other_to_keep) = true;
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
% Remove invalid ids based on CLM failing or AU not being labelled
|
||||||
|
reduced_inds(~valid_ids_train) = false;
|
||||||
|
reduced_inds(~valid_ids_train_hog) = false;
|
||||||
|
|
||||||
|
labels_other = labels_other(reduced_inds, :);
|
||||||
|
labels_train = labels_train(reduced_inds,:);
|
||||||
|
train_appearance_data = train_appearance_data(reduced_inds,:);
|
||||||
|
vid_ids_train_string = vid_ids_train_string(reduced_inds,:);
|
||||||
|
|
||||||
|
%% Extract devel data
|
||||||
|
|
||||||
|
% First extracting the labels
|
||||||
|
[ labels_devel, valid_ids_devel, vid_ids_devel ] = extract_BP4D_labels(bp4d_dir, devel_users, au_train);
|
||||||
|
labels_devel = cat(1, labels_devel{:});
|
||||||
|
|
||||||
|
% Reading in the HOG data (of only relevant frames)
|
||||||
|
devel_geom_data = Read_geom_files_dynamic(devel_users, hog_data_dir);
|
||||||
|
[devel_appearance_data, valid_ids_devel_hog, vid_ids_devel_string] = Read_HOG_files_dynamic_pp(devel_users, hog_data_dir);
|
||||||
|
devel_appearance_data = cat(2, devel_appearance_data, devel_geom_data);
|
||||||
|
|
||||||
|
valid_ids_devel = valid_ids_devel_hog;
|
||||||
|
|
||||||
|
% normalise the data
|
||||||
|
load(pca_file);
|
||||||
|
|
||||||
|
PC_n = zeros(size(PC)+size(train_geom_data, 2));
|
||||||
|
PC_n(1:size(PC,1), 1:size(PC,2)) = PC;
|
||||||
|
PC_n(size(PC,1)+1:end, size(PC,2)+1:end) = eye(size(train_geom_data, 2));
|
||||||
|
PC = PC_n;
|
||||||
|
|
||||||
|
means_norm = cat(2, means_norm, zeros(1, size(train_geom_data,2)));
|
||||||
|
stds_norm = cat(2, stds_norm, ones(1, size(train_geom_data,2)));
|
||||||
|
|
||||||
|
% Grab all data for validation as want good params for all the data
|
||||||
|
raw_devel = devel_appearance_data;
|
||||||
|
|
||||||
|
devel_appearance_data = bsxfun(@times, bsxfun(@plus, devel_appearance_data, -means_norm), 1./stds_norm);
|
||||||
|
train_appearance_data = bsxfun(@times, bsxfun(@plus, train_appearance_data, -means_norm), 1./stds_norm);
|
||||||
|
|
||||||
|
data_train = train_appearance_data * PC;
|
||||||
|
data_devel = devel_appearance_data * PC;
|
||||||
|
|
||||||
|
end
|
|
@ -0,0 +1,103 @@
|
||||||
|
function [data_train, labels_train, vid_ids_train_string, data_devel, labels_devel, vid_ids_devel_string, raw_devel, PC, means_norm, stds_norm, success_devel] = ...
|
||||||
|
Prepare_HOG_AU_data_generic_intensity(train_users, devel_users, au_train, bp4d_dir, hog_data_dir, pca_file)
|
||||||
|
|
||||||
|
%%
|
||||||
|
addpath(genpath('../data extraction/'));
|
||||||
|
|
||||||
|
% First extracting the labels
|
||||||
|
[ labels_train, valid_ids_train, vid_ids_train ] = extract_BP4D_labels_intensity(bp4d_dir, train_users, au_train);
|
||||||
|
au_other = setdiff([6, 10, 12, 14, 17], au_train);
|
||||||
|
[ labels_other, ~, ~ ] = extract_BP4D_labels_intensity(bp4d_dir, train_users, au_other);
|
||||||
|
labels_other = cat(1, labels_other{:});
|
||||||
|
|
||||||
|
train_geom_data = Read_geom_files(train_users, hog_data_dir);
|
||||||
|
|
||||||
|
% Reading in the HOG data (of only relevant frames)
|
||||||
|
[train_appearance_data, valid_ids_train_hog, vid_ids_train_string] = Read_HOG_files(train_users, hog_data_dir);
|
||||||
|
train_appearance_data = cat(2, train_appearance_data, train_geom_data);
|
||||||
|
|
||||||
|
% Subsample the data to make training quicker
|
||||||
|
labels_train = cat(1, labels_train{:});
|
||||||
|
valid_ids_train = logical(cat(1, valid_ids_train{:}));
|
||||||
|
|
||||||
|
reduced_inds = false(size(labels_train,1),1);
|
||||||
|
reduced_inds(labels_train > 0) = true;
|
||||||
|
|
||||||
|
% make sure the same number of positive and negative samples is taken
|
||||||
|
pos_count = sum(labels_train > 0);
|
||||||
|
neg_count = sum(labels_train == 0);
|
||||||
|
|
||||||
|
num_other = floor(pos_count / (size(labels_other, 2)));
|
||||||
|
|
||||||
|
inds_all = 1:size(labels_train,1);
|
||||||
|
|
||||||
|
if(numel(train_users) > 0)
|
||||||
|
if(numel(au_train) == 1)
|
||||||
|
for i=1:size(labels_other, 2)+1
|
||||||
|
|
||||||
|
if(i > size(labels_other, 2))
|
||||||
|
% fill the rest with a proportion of neutral
|
||||||
|
inds_other = inds_all(sum(labels_other,2)==0 & ~labels_train );
|
||||||
|
num_other_i = min(numel(inds_other), pos_count - sum(labels_train(reduced_inds,:)==0));
|
||||||
|
else
|
||||||
|
% take a proportion of each other AU
|
||||||
|
inds_other = inds_all(labels_other(:, i) & ~labels_train );
|
||||||
|
num_other_i = min(numel(inds_other), num_other);
|
||||||
|
end
|
||||||
|
inds_other_to_keep = inds_other(round(linspace(1, numel(inds_other), num_other_i)));
|
||||||
|
reduced_inds(inds_other_to_keep) = true;
|
||||||
|
|
||||||
|
end
|
||||||
|
end
|
||||||
|
% Remove invalid ids based on CLM failing or AU not being labelled
|
||||||
|
reduced_inds(~valid_ids_train) = false;
|
||||||
|
reduced_inds(~valid_ids_train_hog) = false;
|
||||||
|
|
||||||
|
labels_other = labels_other(reduced_inds, :);
|
||||||
|
labels_train = labels_train(reduced_inds,:);
|
||||||
|
train_appearance_data = train_appearance_data(reduced_inds,:);
|
||||||
|
vid_ids_train_string = vid_ids_train_string(reduced_inds,:);
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
%% Extract devel data
|
||||||
|
|
||||||
|
% First extracting the labels
|
||||||
|
[ labels_devel, valid_ids_devel, vid_ids_devel ] = extract_BP4D_labels_intensity(bp4d_dir, devel_users, au_train);
|
||||||
|
|
||||||
|
devel_geom_data = Read_geom_files(devel_users, hog_data_dir);
|
||||||
|
% Reading in the HOG data (of only relevant frames)
|
||||||
|
[devel_appearance_data, valid_ids_devel_hog, vid_ids_devel_string] = Read_HOG_files(devel_users, hog_data_dir);
|
||||||
|
devel_appearance_data = cat(2, devel_appearance_data, devel_geom_data);
|
||||||
|
|
||||||
|
valid_ids_devel = logical(cat(1, valid_ids_devel{:}));
|
||||||
|
|
||||||
|
labels_devel = cat(1, labels_devel{:});
|
||||||
|
|
||||||
|
success_devel = valid_ids_devel;
|
||||||
|
|
||||||
|
% normalise the data
|
||||||
|
load(pca_file);
|
||||||
|
|
||||||
|
PC_n = zeros(size(PC)+size(devel_geom_data, 2));
|
||||||
|
PC_n(1:size(PC,1), 1:size(PC,2)) = PC;
|
||||||
|
PC_n(size(PC,1)+1:end, size(PC,2)+1:end) = eye(size(devel_geom_data, 2));
|
||||||
|
PC = PC_n;
|
||||||
|
|
||||||
|
means_norm = cat(2, means_norm, zeros(1, size(devel_geom_data,2)));
|
||||||
|
stds_norm = cat(2, stds_norm, ones(1, size(devel_geom_data,2)));
|
||||||
|
|
||||||
|
% Grab all data for validation as want good params for all the data
|
||||||
|
raw_devel = devel_appearance_data;
|
||||||
|
|
||||||
|
devel_appearance_data = bsxfun(@times, bsxfun(@plus, devel_appearance_data, -means_norm), 1./stds_norm);
|
||||||
|
data_devel = devel_appearance_data * PC;
|
||||||
|
|
||||||
|
if(numel(train_users) > 0)
|
||||||
|
train_appearance_data = bsxfun(@times, bsxfun(@plus, train_appearance_data, -means_norm), 1./stds_norm);
|
||||||
|
data_train = train_appearance_data * PC;
|
||||||
|
else
|
||||||
|
data_train = [];
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
93
matlab_version/AU_training/experiments/BP4D/Read_HOG_files.m
Normal file
93
matlab_version/AU_training/experiments/BP4D/Read_HOG_files.m
Normal file
|
@ -0,0 +1,93 @@
|
||||||
|
function [hog_data, valid_inds, vid_id] = Read_HOG_files(users, hog_data_dir)
|
||||||
|
|
||||||
|
hog_data = [];
|
||||||
|
vid_id = {};
|
||||||
|
valid_inds = [];
|
||||||
|
|
||||||
|
feats_filled = 0;
|
||||||
|
|
||||||
|
for i=1:numel(users)
|
||||||
|
|
||||||
|
hog_files = dir([hog_data_dir, '/train/' users{i} '*.hog']);
|
||||||
|
hog_dir = [hog_data_dir, '/train/'];
|
||||||
|
if(isempty(hog_files))
|
||||||
|
hog_files = dir([hog_data_dir, '/devel/' users{i} '*.hog']);
|
||||||
|
hog_dir = [hog_data_dir, '/devel/'];
|
||||||
|
end
|
||||||
|
|
||||||
|
for h=1:numel(hog_files)
|
||||||
|
hog_file = [hog_dir, hog_files(h).name];
|
||||||
|
f = fopen(hog_file, 'r');
|
||||||
|
|
||||||
|
curr_data = [];
|
||||||
|
curr_ind = 0;
|
||||||
|
|
||||||
|
while(~feof(f))
|
||||||
|
|
||||||
|
if(curr_ind == 0)
|
||||||
|
num_cols = fread(f, 1, 'int32');
|
||||||
|
if(isempty(num_cols))
|
||||||
|
break;
|
||||||
|
end
|
||||||
|
|
||||||
|
num_rows = fread(f, 1, 'int32');
|
||||||
|
num_chan = fread(f, 1, 'int32');
|
||||||
|
|
||||||
|
curr_ind = curr_ind + 1;
|
||||||
|
|
||||||
|
% preallocate some space
|
||||||
|
if(curr_ind == 1)
|
||||||
|
curr_data = zeros(1000, 1 + num_rows * num_cols * num_chan);
|
||||||
|
num_feats = 1 + num_rows * num_cols * num_chan;
|
||||||
|
end
|
||||||
|
|
||||||
|
if(curr_ind > size(curr_data,1))
|
||||||
|
curr_data = cat(1, curr_data, zeros(1000, 1 + num_rows * num_cols * num_chan));
|
||||||
|
end
|
||||||
|
feature_vec = fread(f, [1, 1 + num_rows * num_cols * num_chan], 'float32');
|
||||||
|
curr_data(curr_ind, :) = feature_vec;
|
||||||
|
else
|
||||||
|
|
||||||
|
% Reading in batches of 5000
|
||||||
|
|
||||||
|
feature_vec = fread(f, [4 + num_rows * num_cols * num_chan, 5000], 'float32');
|
||||||
|
feature_vec = feature_vec(4:end,:)';
|
||||||
|
|
||||||
|
num_rows_read = size(feature_vec,1);
|
||||||
|
|
||||||
|
curr_data(curr_ind+1:curr_ind+num_rows_read,:) = feature_vec;
|
||||||
|
|
||||||
|
curr_ind = curr_ind + size(feature_vec,1);
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
fclose(f);
|
||||||
|
|
||||||
|
curr_data = curr_data(1:curr_ind,:);
|
||||||
|
vid_id_curr = cell(curr_ind,1);
|
||||||
|
vid_id_curr(:) = users(i);
|
||||||
|
|
||||||
|
vid_id = cat(1, vid_id, vid_id_curr);
|
||||||
|
|
||||||
|
% Assume same number of frames per video
|
||||||
|
if(i==1 && h == 1)
|
||||||
|
hog_data = zeros(curr_ind * numel(users) * 8, num_feats);
|
||||||
|
end
|
||||||
|
|
||||||
|
if(size(hog_data,1) < feats_filled+curr_ind)
|
||||||
|
hog_data = cat(1, hog_data, zeros(size(hog_data,1), num_feats));
|
||||||
|
end
|
||||||
|
|
||||||
|
hog_data(feats_filled+1:feats_filled+curr_ind,:) = curr_data;
|
||||||
|
|
||||||
|
feats_filled = feats_filled + curr_ind;
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
if(~isempty(hog_data))
|
||||||
|
valid_inds = hog_data(1:feats_filled,1);
|
||||||
|
hog_data = hog_data(1:feats_filled,2:end);
|
||||||
|
end
|
||||||
|
end
|
|
@ -0,0 +1,101 @@
|
||||||
|
function [hog_data, valid_inds, vid_id] = Read_HOG_files_dynamic_pp(users, hog_data_dir)
|
||||||
|
|
||||||
|
hog_data = [];
|
||||||
|
vid_id = {};
|
||||||
|
valid_inds = [];
|
||||||
|
|
||||||
|
feats_filled = 0;
|
||||||
|
|
||||||
|
for i=1:numel(users)
|
||||||
|
|
||||||
|
start_person_ind = feats_filled + 1;
|
||||||
|
|
||||||
|
hog_files = dir([hog_data_dir, '/train/' users{i} '*.hog']);
|
||||||
|
hog_dir = [hog_data_dir, '/train/'];
|
||||||
|
if(isempty(hog_files))
|
||||||
|
hog_files = dir([hog_data_dir, '/devel/' users{i} '*.hog']);
|
||||||
|
hog_dir = [hog_data_dir, '/devel/'];
|
||||||
|
end
|
||||||
|
|
||||||
|
for h=1:numel(hog_files)
|
||||||
|
hog_file = [hog_dir, hog_files(h).name];
|
||||||
|
f = fopen(hog_file, 'r');
|
||||||
|
|
||||||
|
curr_data = [];
|
||||||
|
curr_ind = 0;
|
||||||
|
|
||||||
|
while(~feof(f))
|
||||||
|
|
||||||
|
if(curr_ind == 0)
|
||||||
|
num_cols = fread(f, 1, 'int32');
|
||||||
|
if(isempty(num_cols))
|
||||||
|
break;
|
||||||
|
end
|
||||||
|
|
||||||
|
num_rows = fread(f, 1, 'int32');
|
||||||
|
num_chan = fread(f, 1, 'int32');
|
||||||
|
|
||||||
|
curr_ind = curr_ind + 1;
|
||||||
|
|
||||||
|
% preallocate some space
|
||||||
|
if(curr_ind == 1)
|
||||||
|
curr_data = zeros(1000, 1 + num_rows * num_cols * num_chan);
|
||||||
|
num_feats = 1 + num_rows * num_cols * num_chan;
|
||||||
|
end
|
||||||
|
|
||||||
|
if(curr_ind > size(curr_data,1))
|
||||||
|
curr_data = cat(1, curr_data, zeros(1000, 1 + num_rows * num_cols * num_chan));
|
||||||
|
end
|
||||||
|
feature_vec = fread(f, [1, 1 + num_rows * num_cols * num_chan], 'float32');
|
||||||
|
curr_data(curr_ind, :) = feature_vec;
|
||||||
|
else
|
||||||
|
|
||||||
|
% Reading in batches of 5000
|
||||||
|
|
||||||
|
feature_vec = fread(f, [4 + num_rows * num_cols * num_chan, 5000], 'float32');
|
||||||
|
feature_vec = feature_vec(4:end,:)';
|
||||||
|
|
||||||
|
num_rows_read = size(feature_vec,1);
|
||||||
|
|
||||||
|
curr_data(curr_ind+1:curr_ind+num_rows_read,:) = feature_vec;
|
||||||
|
|
||||||
|
curr_ind = curr_ind + size(feature_vec,1);
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
fclose(f);
|
||||||
|
|
||||||
|
curr_data = curr_data(1:curr_ind,:);
|
||||||
|
vid_id_curr = cell(curr_ind,1);
|
||||||
|
vid_id_curr(:) = users(i);
|
||||||
|
|
||||||
|
vid_id = cat(1, vid_id, vid_id_curr);
|
||||||
|
|
||||||
|
% Assume same number of frames per video
|
||||||
|
if(i==1 && h == 1)
|
||||||
|
hog_data = zeros(curr_ind * numel(users) * 8, num_feats);
|
||||||
|
end
|
||||||
|
|
||||||
|
if(size(hog_data,1) < feats_filled+curr_ind)
|
||||||
|
hog_data = cat(1, hog_data, zeros(size(hog_data,1), num_feats));
|
||||||
|
end
|
||||||
|
|
||||||
|
|
||||||
|
hog_data(feats_filled+1:feats_filled+curr_ind,:) = curr_data;
|
||||||
|
|
||||||
|
feats_filled = feats_filled + curr_ind;
|
||||||
|
end
|
||||||
|
|
||||||
|
person_ids = start_person_ind:feats_filled;
|
||||||
|
% Do the median normalisation per person here
|
||||||
|
hog_data(person_ids,2:end) = bsxfun(@plus, hog_data(person_ids,2:end), -median(hog_data(person_ids,2:end)));
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
if(~isempty(hog_data))
|
||||||
|
valid_inds = hog_data(1:feats_filled,1);
|
||||||
|
hog_data = hog_data(1:feats_filled,2:end);
|
||||||
|
end
|
||||||
|
end
|
|
@ -0,0 +1,41 @@
|
||||||
|
function [geom_data, valid_ids] = Read_geom_files(users, hog_data_dir)
|
||||||
|
|
||||||
|
geom_data = [];
|
||||||
|
valid_ids = [];
|
||||||
|
|
||||||
|
load('../../pca_generation/pdm_68_aligned_wild.mat');
|
||||||
|
|
||||||
|
for i=1:numel(users)
|
||||||
|
|
||||||
|
geom_files = dir([hog_data_dir, '/train/', users{i} '*.params.txt']);
|
||||||
|
geom_dir = [hog_data_dir, '/train/'];
|
||||||
|
if(isempty(geom_files))
|
||||||
|
geom_files = dir([hog_data_dir, '/devel/', users{i} '*.params.txt']);
|
||||||
|
geom_dir = [hog_data_dir, '/devel/'];
|
||||||
|
end
|
||||||
|
|
||||||
|
for h=1:numel(geom_files)
|
||||||
|
geom_file = [geom_dir, geom_files(h).name];
|
||||||
|
[~, nm, ~] = fileparts(geom_file);
|
||||||
|
m_file = [geom_dir, '/' nm '.params.mat'];
|
||||||
|
|
||||||
|
if(~exist(m_file, 'file'))
|
||||||
|
res = dlmread(geom_file, ',', 1, 0);
|
||||||
|
save(m_file, 'res');
|
||||||
|
else
|
||||||
|
load(m_file);
|
||||||
|
end
|
||||||
|
|
||||||
|
valid = res(:, 4);
|
||||||
|
res = res(:, 11:end);
|
||||||
|
|
||||||
|
actual_locs = res * V';
|
||||||
|
res = cat(2, actual_locs, res);
|
||||||
|
|
||||||
|
valid_ids = cat(1, valid_ids, valid);
|
||||||
|
|
||||||
|
geom_data = cat(1, geom_data, res);
|
||||||
|
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
|
@ -0,0 +1,46 @@
|
||||||
|
function [geom_data, valid_ids] = Read_geom_files_dynamic(users, hog_data_dir)
|
||||||
|
|
||||||
|
geom_data = [];
|
||||||
|
valid_ids = [];
|
||||||
|
|
||||||
|
load('../../pca_generation/pdm_68_aligned_wild.mat');
|
||||||
|
|
||||||
|
for i=1:numel(users)
|
||||||
|
|
||||||
|
geom_files = dir([hog_data_dir, '/train/', users{i} '*.params.txt']);
|
||||||
|
geom_dir = [hog_data_dir, '/train/'];
|
||||||
|
if(isempty(geom_files))
|
||||||
|
geom_files = dir([hog_data_dir, '/devel/', users{i} '*.params.txt']);
|
||||||
|
geom_dir = [hog_data_dir, '/devel/'];
|
||||||
|
end
|
||||||
|
|
||||||
|
geom_data_curr = [];
|
||||||
|
for h=1:numel(geom_files)
|
||||||
|
geom_file = [geom_dir, geom_files(h).name];
|
||||||
|
|
||||||
|
[~, nm, ~] = fileparts(geom_file);
|
||||||
|
m_file = [geom_dir, '/' nm '.params.mat'];
|
||||||
|
|
||||||
|
if(~exist(m_file, 'file'))
|
||||||
|
res = dlmread(geom_file, ',', 1, 0);
|
||||||
|
save(m_file, 'res');
|
||||||
|
else
|
||||||
|
load(m_file);
|
||||||
|
end
|
||||||
|
|
||||||
|
valid = res(:, 4);
|
||||||
|
res = res(:, 11:end);
|
||||||
|
|
||||||
|
actual_locs = res * V';
|
||||||
|
res = cat(2, actual_locs, res);
|
||||||
|
|
||||||
|
valid_ids = cat(1, valid_ids, valid);
|
||||||
|
|
||||||
|
geom_data_curr = cat(1, geom_data_curr, res);
|
||||||
|
end
|
||||||
|
geom_data_curr = bsxfun(@plus, geom_data_curr, -median(geom_data_curr));
|
||||||
|
|
||||||
|
geom_data = cat(1, geom_data, geom_data_curr);
|
||||||
|
|
||||||
|
end
|
||||||
|
end
|
|
@ -0,0 +1,64 @@
|
||||||
|
% Change to your downloaded location
|
||||||
|
clear
|
||||||
|
addpath('C:\liblinear\matlab')
|
||||||
|
addpath('../training_code/');
|
||||||
|
addpath('../utilities/');
|
||||||
|
addpath('../../data extraction/');
|
||||||
|
%% load shared definitions and AU data
|
||||||
|
shared_defs;
|
||||||
|
|
||||||
|
% Set up the hyperparameters to be validated
|
||||||
|
hyperparams.c = 10.^(-7:0.5:1);
|
||||||
|
hyperparams.e = 10.^(-3);
|
||||||
|
|
||||||
|
hyperparams.validate_params = {'c', 'e'};
|
||||||
|
|
||||||
|
% Set the training function
|
||||||
|
svm_train = @svm_train_linear;
|
||||||
|
|
||||||
|
% Set the test function (the first output will be used for validation)
|
||||||
|
svm_test = @svm_test_linear;
|
||||||
|
|
||||||
|
pca_loc = '../../pca_generation/generic_face_rigid.mat';
|
||||||
|
|
||||||
|
hog_data_dir_BP4D = hog_data_dir;
|
||||||
|
|
||||||
|
aus = [1, 2, 4, 6, 7, 10, 12, 14, 15, 17, 23];
|
||||||
|
%%
|
||||||
|
for a=1:numel(aus)
|
||||||
|
|
||||||
|
au = aus(a);
|
||||||
|
|
||||||
|
rest_aus = setdiff(all_aus, au);
|
||||||
|
|
||||||
|
% load the training and testing data for the current fold
|
||||||
|
[train_samples, train_labels, valid_samples, valid_labels, ~, PC, means, scaling] = Prepare_HOG_AU_data_generic_dynamic(train_recs, devel_recs, au, BP4D_dir, hog_data_dir_BP4D, pca_loc);
|
||||||
|
|
||||||
|
train_samples = sparse(train_samples);
|
||||||
|
valid_samples = sparse(valid_samples);
|
||||||
|
|
||||||
|
%% Cross-validate here
|
||||||
|
[ best_params, ~ ] = validate_grid_search_no_par(svm_train, svm_test, false, train_samples, train_labels, valid_samples, valid_labels, hyperparams);
|
||||||
|
model = svm_train(train_labels, train_samples, best_params);
|
||||||
|
|
||||||
|
[~, predictions_all] = svm_test(valid_labels, valid_samples, model);
|
||||||
|
|
||||||
|
name = sprintf('results_BP4D_devel/AU_%d_dynamic.mat', au);
|
||||||
|
|
||||||
|
[ accuracies, F1s, corrs, ccc, rms, classes ] = evaluate_regression_results( predictions_all, valid_labels );
|
||||||
|
|
||||||
|
save(name, 'model', 'F1s', 'accuracies', 'predictions_all', 'valid_labels');
|
||||||
|
|
||||||
|
% Write out the model
|
||||||
|
name = sprintf('models/AU_%d_dynamic.dat', au);
|
||||||
|
|
||||||
|
pos_lbl = model.Label(1);
|
||||||
|
neg_lbl = model.Label(2);
|
||||||
|
|
||||||
|
w = model.w(1:end-1)';
|
||||||
|
b = model.w(end);
|
||||||
|
|
||||||
|
svs = bsxfun(@times, PC, 1./scaling') * w;
|
||||||
|
|
||||||
|
write_lin_dyn_svm(name, means, svs, b, pos_lbl, neg_lbl);
|
||||||
|
end
|
|
@ -0,0 +1,66 @@
|
||||||
|
% Change to your downloaded location
|
||||||
|
clear
|
||||||
|
addpath('C:\liblinear\matlab')
|
||||||
|
addpath('../training_code/');
|
||||||
|
addpath('../utilities/');
|
||||||
|
addpath('../../data extraction/');
|
||||||
|
%% load shared definitions and AU data
|
||||||
|
shared_defs;
|
||||||
|
|
||||||
|
% Set up the hyperparameters to be validated
|
||||||
|
hyperparams.c = 10.^(-7:0.5:1);
|
||||||
|
hyperparams.e = 10.^(-3);
|
||||||
|
|
||||||
|
hyperparams.validate_params = {'c', 'e'};
|
||||||
|
|
||||||
|
% Set the training function
|
||||||
|
svm_train = @svm_train_linear;
|
||||||
|
|
||||||
|
% Set the test function (the first output will be used for validation)
|
||||||
|
svm_test = @svm_test_linear;
|
||||||
|
|
||||||
|
pca_loc = '../../pca_generation/generic_face_rigid.mat';
|
||||||
|
|
||||||
|
hog_data_dir_BP4D = hog_data_dir;
|
||||||
|
|
||||||
|
aus = [1, 2, 4, 6, 7, 10, 12, 14, 15, 17, 23];
|
||||||
|
%%
|
||||||
|
for a=1:numel(aus)
|
||||||
|
|
||||||
|
au = aus(a);
|
||||||
|
|
||||||
|
rest_aus = setdiff(all_aus, au);
|
||||||
|
|
||||||
|
% load the training and testing data for the current fold
|
||||||
|
[train_samples, train_labels, valid_samples, valid_labels, ~, PC, means, scaling] = Prepare_HOG_AU_data_generic(train_recs, devel_recs, au, BP4D_dir, hog_data_dir_BP4D, pca_loc);
|
||||||
|
|
||||||
|
train_samples = sparse(train_samples);
|
||||||
|
valid_samples = sparse(valid_samples);
|
||||||
|
|
||||||
|
%% Cross-validate here
|
||||||
|
[ best_params, ~ ] = validate_grid_search_no_par(svm_train, svm_test, false, train_samples, train_labels, valid_samples, valid_labels, hyperparams);
|
||||||
|
model = svm_train(train_labels, train_samples, best_params);
|
||||||
|
|
||||||
|
[~, predictions_all] = svm_test(valid_labels, valid_samples, model);
|
||||||
|
|
||||||
|
name = sprintf('results_BP4D_devel/AU_%d_static.mat', au);
|
||||||
|
|
||||||
|
[ accuracies, F1s, corrs, ccc, rms, classes ] = evaluate_regression_results( predictions_all, valid_labels );
|
||||||
|
|
||||||
|
save(name, 'model', 'F1s', 'accuracies', 'predictions_all', 'valid_labels');
|
||||||
|
|
||||||
|
% Write out the model
|
||||||
|
name = sprintf('models/AU_%d_static.dat', au);
|
||||||
|
|
||||||
|
pos_lbl = model.Label(1);
|
||||||
|
neg_lbl = model.Label(2);
|
||||||
|
|
||||||
|
w = model.w(1:end-1)';
|
||||||
|
b = model.w(end);
|
||||||
|
|
||||||
|
svs = bsxfun(@times, PC, 1./scaling') * w;
|
||||||
|
|
||||||
|
write_lin_svm(name, means, svs, b, pos_lbl, neg_lbl);
|
||||||
|
end
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,83 @@
|
||||||
|
%% load shared definitions and AU data
|
||||||
|
clear
|
||||||
|
|
||||||
|
addpath('../../data extraction/');
|
||||||
|
addpath('../utilities/');
|
||||||
|
addpath('../training_code/');
|
||||||
|
|
||||||
|
shared_defs;
|
||||||
|
|
||||||
|
% Set up the hyperparameters to be validated
|
||||||
|
hyperparams.c = 10.^(-7:1:4);
|
||||||
|
hyperparams.p = 10.^(-2);
|
||||||
|
|
||||||
|
hyperparams.validate_params = {'c', 'p'};
|
||||||
|
|
||||||
|
% Set the training function
|
||||||
|
svr_train = @svr_train_linear_shift_fancy;
|
||||||
|
|
||||||
|
% Set the test function (the first output will be used for validation)
|
||||||
|
svr_test = @svr_test_linear_shift_fancy;
|
||||||
|
|
||||||
|
pca_loc = '../../pca_generation/generic_face_rigid.mat';
|
||||||
|
|
||||||
|
hog_data_dir_BP4D = hog_data_dir;
|
||||||
|
|
||||||
|
aus = [6, 10, 12, 14, 17];
|
||||||
|
%%
|
||||||
|
for a=1:numel(aus)
|
||||||
|
|
||||||
|
predictions_all = [];
|
||||||
|
test_labels_all = [];
|
||||||
|
|
||||||
|
au = aus(a);
|
||||||
|
|
||||||
|
rest_aus = setdiff(all_aus, au);
|
||||||
|
|
||||||
|
% load the training and testing data for the current fold
|
||||||
|
[train_samples, train_labels, ~, valid_samples, valid_labels, vid_ids_devel, ~, PC, means, scaling, success_devel] = Prepare_HOG_AU_data_generic_intensity(train_recs, devel_recs, au, BP4D_dir_int, hog_data_dir_BP4D, pca_loc);
|
||||||
|
|
||||||
|
ignore = valid_labels == 9;
|
||||||
|
|
||||||
|
valid_samples = valid_samples(~ignore, :);
|
||||||
|
valid_labels = valid_labels(~ignore);
|
||||||
|
vid_ids_devel = vid_ids_devel(~ignore);
|
||||||
|
success_devel = success_devel(~ignore);
|
||||||
|
|
||||||
|
train_samples = sparse(train_samples);
|
||||||
|
valid_samples = sparse(valid_samples);
|
||||||
|
|
||||||
|
hyperparams.success = success_devel;
|
||||||
|
hyperparams.valid_samples = valid_samples;
|
||||||
|
hyperparams.valid_labels = valid_labels;
|
||||||
|
hyperparams.vid_ids = vid_ids_devel;
|
||||||
|
|
||||||
|
%% Cross-validate here
|
||||||
|
[ best_params, ~ ] = validate_grid_search_no_par(svr_train, svr_test, false, train_samples, train_labels, valid_samples, valid_labels, hyperparams);
|
||||||
|
model = svr_train(train_labels, train_samples, best_params);
|
||||||
|
|
||||||
|
clear 'train_samples'
|
||||||
|
|
||||||
|
%% Now test the model
|
||||||
|
model.vid_ids = vid_ids_devel;
|
||||||
|
|
||||||
|
[~, prediction] = svr_test(valid_labels, valid_samples, model);
|
||||||
|
|
||||||
|
name = sprintf('results_BP4D_devel/AU_%d_static_intensity_shift.mat', au);
|
||||||
|
|
||||||
|
[ accuracies, F1s, corrs, ccc, rms, classes ] = evaluate_regression_results( prediction, valid_labels );
|
||||||
|
|
||||||
|
save(name, 'model', 'F1s', 'corrs', 'accuracies', 'ccc', 'rms', 'prediction', 'valid_labels');
|
||||||
|
|
||||||
|
% Go from raw data to the prediction
|
||||||
|
w = model.w(1:end-1)';
|
||||||
|
b = model.w(end);
|
||||||
|
|
||||||
|
svs = bsxfun(@times, PC, 1./scaling') * w;
|
||||||
|
|
||||||
|
name = sprintf('models/AU_%d_static_intensity_shift.dat', au);
|
||||||
|
|
||||||
|
write_lin_svr(name, means, svs, b);
|
||||||
|
|
||||||
|
end
|
||||||
|
|
|
@ -0,0 +1,80 @@
|
||||||
|
%% load shared definitions and AU data
|
||||||
|
clear
|
||||||
|
|
||||||
|
addpath('../../data extraction/');
|
||||||
|
addpath('../utilities/');
|
||||||
|
addpath('../training_code/');
|
||||||
|
|
||||||
|
shared_defs;
|
||||||
|
|
||||||
|
% Set up the hyperparameters to be validated
|
||||||
|
hyperparams.c = 10.^(-7:1:4);
|
||||||
|
hyperparams.p = 10.^(-2);
|
||||||
|
|
||||||
|
hyperparams.validate_params = {'c', 'p'};
|
||||||
|
|
||||||
|
% Set the training function
|
||||||
|
svr_train = @svr_train_linear;
|
||||||
|
|
||||||
|
% Set the test function (the first output will be used for validation)
|
||||||
|
svr_test = @svr_test_linear;
|
||||||
|
|
||||||
|
pca_loc = '../../pca_generation/generic_face_rigid.mat';
|
||||||
|
|
||||||
|
hog_data_dir_BP4D = hog_data_dir;
|
||||||
|
|
||||||
|
aus = [6, 10, 12, 14, 17];
|
||||||
|
%%
|
||||||
|
for a=1:numel(aus)
|
||||||
|
|
||||||
|
predictions_all = [];
|
||||||
|
test_labels_all = [];
|
||||||
|
|
||||||
|
au = aus(a);
|
||||||
|
|
||||||
|
rest_aus = setdiff(all_aus, au);
|
||||||
|
|
||||||
|
% load the training and testing data for the current fold
|
||||||
|
[train_samples, train_labels, ~, valid_samples, valid_labels, vid_ids_devel, ~, PC, means, scaling, success_devel] = Prepare_HOG_AU_data_generic_intensity(train_recs, devel_recs, au, BP4D_dir_int, hog_data_dir_BP4D, pca_loc);
|
||||||
|
|
||||||
|
ignore = valid_labels == 9;
|
||||||
|
|
||||||
|
valid_samples = valid_samples(~ignore, :);
|
||||||
|
valid_labels = valid_labels(~ignore);
|
||||||
|
vid_ids_devel = vid_ids_devel(~ignore);
|
||||||
|
success_devel = success_devel(~ignore);
|
||||||
|
|
||||||
|
train_samples = sparse(train_samples);
|
||||||
|
valid_samples = sparse(valid_samples);
|
||||||
|
|
||||||
|
hyperparams.success = success_devel;
|
||||||
|
|
||||||
|
%% Cross-validate here
|
||||||
|
[ best_params, ~ ] = validate_grid_search_no_par(svr_train, svr_test, false, train_samples, train_labels, valid_samples, valid_labels, hyperparams);
|
||||||
|
model = svr_train(train_labels, train_samples, best_params);
|
||||||
|
|
||||||
|
clear 'train_samples'
|
||||||
|
|
||||||
|
%% Now test the model
|
||||||
|
model.vid_ids = vid_ids_devel;
|
||||||
|
|
||||||
|
[~, prediction] = svr_test(valid_labels, valid_samples, model);
|
||||||
|
|
||||||
|
name = sprintf('results_BP4D_devel/AU_%d_static_intensity.mat', au);
|
||||||
|
|
||||||
|
[ accuracies, F1s, corrs, ccc, rms, classes ] = evaluate_regression_results( prediction, valid_labels );
|
||||||
|
|
||||||
|
save(name, 'model', 'F1s', 'corrs', 'accuracies', 'ccc', 'rms', 'prediction', 'valid_labels');
|
||||||
|
|
||||||
|
% Go from raw data to the prediction
|
||||||
|
w = model.w(1:end-1)';
|
||||||
|
b = model.w(end);
|
||||||
|
|
||||||
|
svs = bsxfun(@times, PC, 1./scaling') * w;
|
||||||
|
|
||||||
|
name = sprintf('models/AU_%d_static_intensity.dat', au);
|
||||||
|
|
||||||
|
write_lin_svr(name, means, svs, b);
|
||||||
|
|
||||||
|
end
|
||||||
|
|
|
@ -0,0 +1,83 @@
|
||||||
|
%% load shared definitions and AU data
|
||||||
|
clear
|
||||||
|
|
||||||
|
addpath('../../data extraction/');
|
||||||
|
addpath('../utilities/');
|
||||||
|
addpath('../training_code/');
|
||||||
|
|
||||||
|
shared_defs;
|
||||||
|
|
||||||
|
% Set up the hyperparameters to be validated
|
||||||
|
hyperparams.c = 10.^(-7:1:4);
|
||||||
|
hyperparams.p = 10.^(-2);
|
||||||
|
|
||||||
|
hyperparams.validate_params = {'c', 'p'};
|
||||||
|
|
||||||
|
% Set the training function
|
||||||
|
svr_train = @svr_train_linear_shift_fancy;
|
||||||
|
|
||||||
|
% Set the test function (the first output will be used for validation)
|
||||||
|
svr_test = @svr_test_linear_shift_fancy;
|
||||||
|
|
||||||
|
pca_loc = '../../pca_generation/generic_face_rigid.mat';
|
||||||
|
|
||||||
|
hog_data_dir_BP4D = hog_data_dir;
|
||||||
|
|
||||||
|
aus = [6, 10, 12, 14, 17];
|
||||||
|
%%
|
||||||
|
for a=1:numel(aus)
|
||||||
|
|
||||||
|
predictions_all = [];
|
||||||
|
test_labels_all = [];
|
||||||
|
|
||||||
|
au = aus(a);
|
||||||
|
|
||||||
|
rest_aus = setdiff(all_aus, au);
|
||||||
|
|
||||||
|
% load the training and testing data for the current fold
|
||||||
|
[train_samples, train_labels, ~, valid_samples, valid_labels, vid_ids_devel, ~, PC, means, scaling, success_devel] = Prepare_HOG_AU_data_generic_intensity(train_recs, devel_recs, au, BP4D_dir_int, hog_data_dir_BP4D, pca_loc);
|
||||||
|
|
||||||
|
ignore = valid_labels == 9;
|
||||||
|
|
||||||
|
valid_samples = valid_samples(~ignore, :);
|
||||||
|
valid_labels = valid_labels(~ignore);
|
||||||
|
vid_ids_devel = vid_ids_devel(~ignore);
|
||||||
|
success_devel = success_devel(~ignore);
|
||||||
|
|
||||||
|
train_samples = sparse(train_samples);
|
||||||
|
valid_samples = sparse(valid_samples);
|
||||||
|
|
||||||
|
hyperparams.success = success_devel;
|
||||||
|
hyperparams.valid_samples = valid_samples;
|
||||||
|
hyperparams.valid_labels = valid_labels;
|
||||||
|
hyperparams.vid_ids = vid_ids_devel;
|
||||||
|
|
||||||
|
%% Cross-validate here
|
||||||
|
[ best_params, ~ ] = validate_grid_search_no_par(svr_train, svr_test, false, train_samples, train_labels, valid_samples, valid_labels, hyperparams);
|
||||||
|
model = svr_train(train_labels, train_samples, best_params);
|
||||||
|
|
||||||
|
clear 'train_samples'
|
||||||
|
|
||||||
|
%% Now test the model
|
||||||
|
model.vid_ids = vid_ids_devel;
|
||||||
|
|
||||||
|
[~, prediction] = svr_test(valid_labels, valid_samples, model);
|
||||||
|
|
||||||
|
name = sprintf('results_BP4D_devel/AU_%d_static_intensity_shift.mat', au);
|
||||||
|
|
||||||
|
[ accuracies, F1s, corrs, ccc, rms, classes ] = evaluate_regression_results( prediction, valid_labels );
|
||||||
|
|
||||||
|
save(name, 'model', 'F1s', 'corrs', 'accuracies', 'ccc', 'rms', 'prediction', 'valid_labels');
|
||||||
|
|
||||||
|
% Go from raw data to the prediction
|
||||||
|
w = model.w(1:end-1)';
|
||||||
|
b = model.w(end);
|
||||||
|
|
||||||
|
svs = bsxfun(@times, PC, 1./scaling') * w;
|
||||||
|
|
||||||
|
name = sprintf('models/AU_%d_static_intensity_shift.dat', au);
|
||||||
|
|
||||||
|
write_lin_svr(name, means, svs, b);
|
||||||
|
|
||||||
|
end
|
||||||
|
|
|
@ -0,0 +1,21 @@
|
||||||
|
function [train_users, dev_users] = get_balanced_fold(BP4D_dir, users, au, prop_test)
|
||||||
|
|
||||||
|
% Extracting the labels
|
||||||
|
[labels, valid_ids, vid_ids, filenames] = extract_BP4D_labels(BP4D_dir, users, au);
|
||||||
|
|
||||||
|
% the grouping should be done per person
|
||||||
|
|
||||||
|
for f=1:numel(filenames)
|
||||||
|
filenames{f} = filenames{f}(1:4);
|
||||||
|
end
|
||||||
|
|
||||||
|
counts = zeros(numel(users),1);
|
||||||
|
for k=1:numel(users)
|
||||||
|
counts(k) = sum(cat(1, labels{strcmp(filenames, users{k})}));
|
||||||
|
end
|
||||||
|
|
||||||
|
[sorted, inds] = sort(counts);
|
||||||
|
|
||||||
|
dev_users = users(inds(1:round(1/prop_test):end));
|
||||||
|
train_users = setdiff(users, dev_users);
|
||||||
|
end
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue