Some more bug fixes with visualization and recording. Creating a metafile for recorded data.
This commit is contained in:
parent
fd4d06166a
commit
bee6d185cb
6 changed files with 142 additions and 82 deletions
|
@ -206,7 +206,7 @@ int main (int argc, char **argv)
|
|||
visualizer.SetObservationHOG(hog_descriptor, num_hog_rows, num_hog_cols);
|
||||
visualizer.SetObservationLandmarks(face_model.detected_landmarks, face_model.detection_certainty, detection_success);
|
||||
visualizer.SetObservationPose(pose_estimate, face_model.detection_certainty);
|
||||
visualizer.SetObservationGaze(gazeDirection0, gazeDirection1, gazeAngle, LandmarkDetector::CalculateAllEyeLandmarks(face_model), LandmarkDetector::Calculate3DEyeLandmarks(face_model, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy));
|
||||
visualizer.SetObservationGaze(gazeDirection0, gazeDirection1, gazeAngle, LandmarkDetector::CalculateAllEyeLandmarks(face_model), LandmarkDetector::Calculate3DEyeLandmarks(face_model, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy), face_model.detection_certainty);
|
||||
visualizer.ShowObservation();
|
||||
|
||||
// Setting up the recorder output
|
||||
|
|
|
@ -105,9 +105,11 @@ namespace Utilities
|
|||
|
||||
// Keep track of the file and output root location
|
||||
std::string record_root = "processed"; // By default we are writing in the processed directory in the working directory
|
||||
std::string of_filename;
|
||||
std::string filename;
|
||||
std::string csv_filename;
|
||||
std::string aligned_output_directory;
|
||||
std::ofstream metadata_file;
|
||||
|
||||
// The actual output file stream that will be written
|
||||
RecorderCSV csv_recorder;
|
||||
|
|
|
@ -72,7 +72,7 @@ namespace Utilities
|
|||
void SetObservationPose(const cv::Vec6d& pose, double confidence);
|
||||
|
||||
// Gaze related observations
|
||||
void SetObservationGaze(const cv::Point3f& gazeDirection0, const cv::Point3f& gazeDirection1, const cv::Vec2d& gaze_angle, const std::vector<cv::Point2d>& eye_landmarks, const std::vector<cv::Point3d>& eye_landmarks3d);
|
||||
void SetObservationGaze(const cv::Point3f& gazeDirection0, const cv::Point3f& gazeDirection1, const cv::Vec2d& gaze_angle, const std::vector<cv::Point2d>& eye_landmarks, const std::vector<cv::Point3d>& eye_landmarks3d, double confidence);
|
||||
|
||||
// Face alignment related observations
|
||||
void SetObservationFaceAlign(const cv::Mat& aligned_face);
|
||||
|
@ -88,6 +88,9 @@ namespace Utilities
|
|||
bool vis_track;
|
||||
bool vis_hog;
|
||||
bool vis_align;
|
||||
|
||||
// Can be adjusted to show less confident frames
|
||||
double visualisation_boundary = 0.4;
|
||||
|
||||
private:
|
||||
|
||||
|
|
|
@ -77,32 +77,80 @@ RecorderOpenFace::RecorderOpenFace(const std::string in_filename, RecorderOpenFa
|
|||
// From the filename, strip out the name without directory and extension
|
||||
filename = path(in_filename).replace_extension("").filename().string();
|
||||
|
||||
// Consuming the input arguments
|
||||
bool* valid = new bool[arguments.size()];
|
||||
|
||||
for (size_t i = 0; i < arguments.size(); ++i)
|
||||
{
|
||||
valid[i] = true;
|
||||
}
|
||||
|
||||
string record_root;
|
||||
for (size_t i = 0; i < arguments.size(); ++i)
|
||||
{
|
||||
if (arguments[i].compare("-outroot") == 0)
|
||||
{
|
||||
record_root = arguments[i + 1];
|
||||
}
|
||||
}
|
||||
|
||||
// Determine output directory
|
||||
bool output_found = false;
|
||||
for (size_t i = 0; i < arguments.size(); ++i)
|
||||
{
|
||||
if (arguments[i].compare("-out_dir") == 0)
|
||||
{
|
||||
record_root = arguments[i + 1];
|
||||
record_root = (boost::filesystem::path(record_root) / boost::filesystem::path(arguments[i + 1])).string();
|
||||
}
|
||||
else if (!output_found && arguments[i].compare("-of") == 0)
|
||||
{
|
||||
record_root = (boost::filesystem::path(record_root) / boost::filesystem::path(arguments[i + 1])).remove_filename().string();
|
||||
filename = path(boost::filesystem::path(arguments[i + 1])).replace_extension("").filename().string();
|
||||
valid[i] = false;
|
||||
valid[i + 1] = false;
|
||||
i++;
|
||||
output_found = true;
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = (int)arguments.size() - 1; i >= 0; --i)
|
||||
{
|
||||
if (!valid[i])
|
||||
{
|
||||
arguments.erase(arguments.begin() + i);
|
||||
}
|
||||
}
|
||||
|
||||
// Construct the directories required for the output
|
||||
CreateDirectory(record_root);
|
||||
|
||||
// Create the filename for the general output file that contains all of the meta information about the recording
|
||||
path of_det_name(filename);
|
||||
of_det_name = path(record_root) / of_det_name.concat("_of_details.txt");
|
||||
|
||||
// Write in the of file what we are outputing what is the input etc.
|
||||
metadata_file.open(of_det_name.string(), std::ios_base::out);
|
||||
|
||||
// Populate the metadata file
|
||||
metadata_file << "Input:" << in_filename << endl;
|
||||
|
||||
// Create the required individual recorders, CSV, HOG, aligned, video
|
||||
csv_filename = (path(record_root) / path(filename).replace_extension(".csv")).string();
|
||||
metadata_file << "Output csv:" << csv_filename << endl;
|
||||
|
||||
// Consruct HOG recorder here
|
||||
if(params.outputHOG())
|
||||
{
|
||||
std::string hog_filename = (path(record_root) / path(filename).replace_extension(".hog")).string();
|
||||
hog_recorder.Open(hog_filename);
|
||||
metadata_file << "Output HOG:" << csv_filename << endl;
|
||||
}
|
||||
|
||||
// saving the videos
|
||||
if (params.outputTrackedVideo())
|
||||
{
|
||||
this->video_filename = (path(record_root) / path(filename).replace_extension(".avi")).string();
|
||||
metadata_file << "Output video:" << this->video_filename << endl;
|
||||
}
|
||||
|
||||
// Prepare image recording
|
||||
|
@ -110,9 +158,10 @@ RecorderOpenFace::RecorderOpenFace(const std::string in_filename, RecorderOpenFa
|
|||
{
|
||||
aligned_output_directory = (path(record_root) / path(filename + "_aligned")).string();
|
||||
CreateDirectory(aligned_output_directory);
|
||||
metadata_file << "Output aligned directory:" << this->aligned_output_directory << endl;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
observation_count = 0;
|
||||
|
||||
}
|
||||
|
@ -279,6 +328,8 @@ void RecorderOpenFace::Close()
|
|||
hog_recorder.Close();
|
||||
csv_recorder.Close();
|
||||
video_writer.release();
|
||||
metadata_file.close();
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -109,23 +109,26 @@ void Visualizer::SetObservationHOG(const cv::Mat_<double>& hog_descriptor, int n
|
|||
void Visualizer::SetObservationLandmarks(const cv::Mat_<double>& landmarks_2D, double confidence, bool success, const cv::Mat_<int>& visibilities)
|
||||
{
|
||||
|
||||
// Draw 2D landmarks on the image
|
||||
int n = landmarks_2D.rows / 2;
|
||||
|
||||
// Drawing feature points
|
||||
for (int i = 0; i < n; ++i)
|
||||
if(confidence > visualisation_boundary)
|
||||
{
|
||||
if (visibilities.empty() || visibilities.at<int>(i))
|
||||
// Draw 2D landmarks on the image
|
||||
int n = landmarks_2D.rows / 2;
|
||||
|
||||
// Drawing feature points
|
||||
for (int i = 0; i < n; ++i)
|
||||
{
|
||||
cv::Point featurePoint(cvRound(landmarks_2D.at<double>(i) * (double)draw_multiplier), cvRound(landmarks_2D.at<double>(i + n) * (double)draw_multiplier));
|
||||
if (visibilities.empty() || visibilities.at<int>(i))
|
||||
{
|
||||
cv::Point featurePoint(cvRound(landmarks_2D.at<double>(i) * (double)draw_multiplier), cvRound(landmarks_2D.at<double>(i + n) * (double)draw_multiplier));
|
||||
|
||||
// A rough heuristic for drawn point size
|
||||
int thickness = (int)std::ceil(3.0* ((double)captured_image.cols) / 640.0);
|
||||
int thickness_2 = (int)std::ceil(1.0* ((double)captured_image.cols) / 640.0);
|
||||
// A rough heuristic for drawn point size
|
||||
int thickness = (int)std::ceil(3.0* ((double)captured_image.cols) / 640.0);
|
||||
int thickness_2 = (int)std::ceil(1.0* ((double)captured_image.cols) / 640.0);
|
||||
|
||||
cv::circle(captured_image, featurePoint, 1 * draw_multiplier, cv::Scalar(0, 0, 255), thickness, CV_AA, draw_shiftbits);
|
||||
cv::circle(captured_image, featurePoint, 1 * draw_multiplier, cv::Scalar(255, 0, 0), thickness_2, CV_AA, draw_shiftbits);
|
||||
cv::circle(captured_image, featurePoint, 1 * draw_multiplier, cv::Scalar(0, 0, 255), thickness, CV_AA, draw_shiftbits);
|
||||
cv::circle(captured_image, featurePoint, 1 * draw_multiplier, cv::Scalar(255, 0, 0), thickness_2, CV_AA, draw_shiftbits);
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -133,7 +136,7 @@ void Visualizer::SetObservationLandmarks(const cv::Mat_<double>& landmarks_2D, d
|
|||
void Visualizer::SetObservationPose(const cv::Vec6d& pose, double confidence)
|
||||
{
|
||||
|
||||
double visualisation_boundary = 0.4;
|
||||
|
||||
|
||||
// Only draw if the reliability is reasonable, the value is slightly ad-hoc
|
||||
if (confidence > visualisation_boundary)
|
||||
|
@ -154,78 +157,79 @@ void Visualizer::SetObservationPose(const cv::Vec6d& pose, double confidence)
|
|||
}
|
||||
|
||||
// Eye gaze infomration drawing, first of eye landmarks then of gaze
|
||||
void Visualizer::SetObservationGaze(const cv::Point3f& gaze_direction0, const cv::Point3f& gaze_direction1, const cv::Vec2d& gaze_angle, const std::vector<cv::Point2d>& eye_landmarks2d, const std::vector<cv::Point3d>& eye_landmarks3d)
|
||||
void Visualizer::SetObservationGaze(const cv::Point3f& gaze_direction0, const cv::Point3f& gaze_direction1, const cv::Vec2d& gaze_angle, const std::vector<cv::Point2d>& eye_landmarks2d, const std::vector<cv::Point3d>& eye_landmarks3d, double confidence)
|
||||
{
|
||||
|
||||
if (eye_landmarks2d.size() > 0)
|
||||
if(confidence > visualisation_boundary)
|
||||
{
|
||||
// First draw the eye region landmarks
|
||||
for (size_t i = 0; i < eye_landmarks2d.size(); ++i)
|
||||
if (eye_landmarks2d.size() > 0)
|
||||
{
|
||||
cv::Point featurePoint(cvRound(eye_landmarks2d[i].x * (double)draw_multiplier), cvRound(eye_landmarks2d[i].y * (double)draw_multiplier));
|
||||
// First draw the eye region landmarks
|
||||
for (size_t i = 0; i < eye_landmarks2d.size(); ++i)
|
||||
{
|
||||
cv::Point featurePoint(cvRound(eye_landmarks2d[i].x * (double)draw_multiplier), cvRound(eye_landmarks2d[i].y * (double)draw_multiplier));
|
||||
|
||||
// A rough heuristic for drawn point size
|
||||
int thickness = 1;
|
||||
int thickness_2 = 1;
|
||||
// A rough heuristic for drawn point size
|
||||
int thickness = 1;
|
||||
int thickness_2 = 1;
|
||||
|
||||
size_t next_point = i + 1;
|
||||
if (i == 7)
|
||||
next_point = 0;
|
||||
if (i == 19)
|
||||
next_point = 8;
|
||||
if (i == 27)
|
||||
next_point = 20;
|
||||
size_t next_point = i + 1;
|
||||
if (i == 7)
|
||||
next_point = 0;
|
||||
if (i == 19)
|
||||
next_point = 8;
|
||||
if (i == 27)
|
||||
next_point = 20;
|
||||
|
||||
if (i == 7 + 28)
|
||||
next_point = 0 + 28;
|
||||
if (i == 19 + 28)
|
||||
next_point = 8 + 28;
|
||||
if (i == 27 + 28)
|
||||
next_point = 20 + 28;
|
||||
if (i == 7 + 28)
|
||||
next_point = 0 + 28;
|
||||
if (i == 19 + 28)
|
||||
next_point = 8 + 28;
|
||||
if (i == 27 + 28)
|
||||
next_point = 20 + 28;
|
||||
|
||||
cv::Point nextFeaturePoint(cvRound(eye_landmarks2d[next_point].x * (double)draw_multiplier), cvRound(eye_landmarks2d[next_point].y * (double)draw_multiplier));
|
||||
if ((i < 28 && (i < 8 || i > 19)) || (i >= 28 && (i < 8 + 28 || i > 19 + 28)))
|
||||
cv::line(captured_image, featurePoint, nextFeaturePoint, cv::Scalar(255, 0, 0), thickness_2, CV_AA, draw_shiftbits);
|
||||
else
|
||||
cv::line(captured_image, featurePoint, nextFeaturePoint, cv::Scalar(0, 0, 255), thickness_2, CV_AA, draw_shiftbits);
|
||||
cv::Point nextFeaturePoint(cvRound(eye_landmarks2d[next_point].x * (double)draw_multiplier), cvRound(eye_landmarks2d[next_point].y * (double)draw_multiplier));
|
||||
if ((i < 28 && (i < 8 || i > 19)) || (i >= 28 && (i < 8 + 28 || i > 19 + 28)))
|
||||
cv::line(captured_image, featurePoint, nextFeaturePoint, cv::Scalar(255, 0, 0), thickness_2, CV_AA, draw_shiftbits);
|
||||
else
|
||||
cv::line(captured_image, featurePoint, nextFeaturePoint, cv::Scalar(0, 0, 255), thickness_2, CV_AA, draw_shiftbits);
|
||||
|
||||
}
|
||||
|
||||
// Now draw the gaze lines themselves
|
||||
cv::Mat cameraMat = (cv::Mat_<double>(3, 3) << fx, 0, cx, 0, fy, cy, 0, 0, 0);
|
||||
|
||||
// Grabbing the pupil location, to draw eye gaze need to know where the pupil is
|
||||
cv::Point3d pupil_left(0, 0, 0);
|
||||
cv::Point3d pupil_right(0, 0, 0);
|
||||
for (size_t i = 0; i < 8; ++i)
|
||||
{
|
||||
pupil_left = pupil_left + eye_landmarks3d[i];
|
||||
pupil_right = pupil_right + eye_landmarks3d[i + eye_landmarks3d.size()/2];
|
||||
}
|
||||
pupil_left = pupil_left / 8;
|
||||
pupil_right = pupil_right / 8;
|
||||
|
||||
std::vector<cv::Point3d> points_left;
|
||||
points_left.push_back(cv::Point3d(pupil_left));
|
||||
points_left.push_back(cv::Point3d(pupil_left + cv::Point3d(gaze_direction0)*50.0));
|
||||
|
||||
std::vector<cv::Point3d> points_right;
|
||||
points_right.push_back(cv::Point3d(pupil_right));
|
||||
points_right.push_back(cv::Point3d(pupil_right + cv::Point3d(gaze_direction1)*50.0));
|
||||
|
||||
cv::Mat_<double> proj_points;
|
||||
cv::Mat_<double> mesh_0 = (cv::Mat_<double>(2, 3) << points_left[0].x, points_left[0].y, points_left[0].z, points_left[1].x, points_left[1].y, points_left[1].z);
|
||||
Project(proj_points, mesh_0, fx, fy, cx, cy);
|
||||
cv::line(captured_image, cv::Point(cvRound(proj_points.at<double>(0, 0) * (double)draw_multiplier), cvRound(proj_points.at<double>(0, 1) * (double)draw_multiplier)),
|
||||
cv::Point(cvRound(proj_points.at<double>(1, 0) * (double)draw_multiplier), cvRound(proj_points.at<double>(1, 1) * (double)draw_multiplier)), cv::Scalar(110, 220, 0), 2, CV_AA, draw_shiftbits);
|
||||
|
||||
cv::Mat_<double> mesh_1 = (cv::Mat_<double>(2, 3) << points_right[0].x, points_right[0].y, points_right[0].z, points_right[1].x, points_right[1].y, points_right[1].z);
|
||||
Project(proj_points, mesh_1, fx, fy, cx, cy);
|
||||
cv::line(captured_image, cv::Point(cvRound(proj_points.at<double>(0, 0) * (double)draw_multiplier), cvRound(proj_points.at<double>(0, 1) * (double)draw_multiplier)),
|
||||
cv::Point(cvRound(proj_points.at<double>(1, 0) * (double)draw_multiplier), cvRound(proj_points.at<double>(1, 1) * (double)draw_multiplier)), cv::Scalar(110, 220, 0), 2, CV_AA, draw_shiftbits);
|
||||
|
||||
}
|
||||
|
||||
// Now draw the gaze lines themselves
|
||||
cv::Mat cameraMat = (cv::Mat_<double>(3, 3) << fx, 0, cx, 0, fy, cy, 0, 0, 0);
|
||||
|
||||
// Grabbing the pupil location, to draw eye gaze need to know where the pupil is
|
||||
cv::Point3d pupil_left(0, 0, 0);
|
||||
cv::Point3d pupil_right(0, 0, 0);
|
||||
for (size_t i = 0; i < 8; ++i)
|
||||
{
|
||||
pupil_left = pupil_left + eye_landmarks3d[i];
|
||||
pupil_right = pupil_right + eye_landmarks3d[i + eye_landmarks3d.size()/2];
|
||||
}
|
||||
pupil_left = pupil_left / 8;
|
||||
pupil_right = pupil_right / 8;
|
||||
|
||||
std::vector<cv::Point3d> points_left;
|
||||
points_left.push_back(cv::Point3d(pupil_left));
|
||||
points_left.push_back(cv::Point3d(pupil_left + cv::Point3d(gaze_direction0)*50.0));
|
||||
|
||||
std::vector<cv::Point3d> points_right;
|
||||
points_right.push_back(cv::Point3d(pupil_right));
|
||||
points_right.push_back(cv::Point3d(pupil_right + cv::Point3d(gaze_direction1)*50.0));
|
||||
|
||||
cv::Mat_<double> proj_points;
|
||||
cv::Mat_<double> mesh_0 = (cv::Mat_<double>(2, 3) << points_left[0].x, points_left[0].y, points_left[0].z, points_left[1].x, points_left[1].y, points_left[1].z);
|
||||
Project(proj_points, mesh_0, fx, fy, cx, cy);
|
||||
cv::line(captured_image, cv::Point(cvRound(proj_points.at<double>(0, 0) * (double)draw_multiplier), cvRound(proj_points.at<double>(0, 1) * (double)draw_multiplier)),
|
||||
cv::Point(cvRound(proj_points.at<double>(1, 0) * (double)draw_multiplier), cvRound(proj_points.at<double>(1, 1) * (double)draw_multiplier)), cv::Scalar(110, 220, 0), 2, CV_AA, draw_shiftbits);
|
||||
|
||||
cv::Mat_<double> mesh_1 = (cv::Mat_<double>(2, 3) << points_right[0].x, points_right[0].y, points_right[0].z, points_right[1].x, points_right[1].y, points_right[1].z);
|
||||
Project(proj_points, mesh_1, fx, fy, cx, cy);
|
||||
cv::line(captured_image, cv::Point(cvRound(proj_points.at<double>(0, 0) * (double)draw_multiplier), cvRound(proj_points.at<double>(0, 1) * (double)draw_multiplier)),
|
||||
cv::Point(cvRound(proj_points.at<double>(1, 0) * (double)draw_multiplier), cvRound(proj_points.at<double>(1, 1) * (double)draw_multiplier)), cv::Scalar(110, 220, 0), 2, CV_AA, draw_shiftbits);
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void Visualizer::ShowObservation()
|
||||
|
|
|
@ -14,7 +14,7 @@ dbSeqDir = dbSeqDir(3:end);
|
|||
|
||||
output_dir = cat(2, output_dir, '/');
|
||||
|
||||
command = sprintf('%s -inroot "%s" -out_dir "%s" -fx 505 -fy 505 -cx 320 -cy 240 -pose -vis-track ', executable, rootDir, output_dir);
|
||||
command = sprintf('%s -inroot "%s" -outroot "%s" -fx 505 -fy 505 -cx 320 -cy 240 -pose -vis-track ', executable, rootDir, output_dir);
|
||||
|
||||
if(verbose)
|
||||
command = cat(2, command, [' -tracked ' outputVideo]);
|
||||
|
@ -26,7 +26,7 @@ end
|
|||
|
||||
for i=1:numel(dbSeqDir)
|
||||
inputFile = [biwiDir dbSeqDir(i).name '/colour.avi'];
|
||||
command = cat(2, command, sprintf(' -f "%s" ', inputFile));
|
||||
command = sprintf('%s -f "%s" -of "%s" ', command, inputFile, dbSeqDir(i).name);
|
||||
end
|
||||
|
||||
if(isunix)
|
||||
|
|
Loading…
Reference in a new issue