Some renaming to prepare for video scrubbing mode.
This commit is contained in:
parent
41fc0bd43c
commit
e0f71fb67c
2 changed files with 267 additions and 143 deletions
|
@ -227,7 +227,7 @@ namespace OpenFaceOffline
|
|||
file_no_ext = System.IO.Path.GetFileName(file_no_ext);
|
||||
|
||||
// Start the actual processing and recording
|
||||
VideoLoop(file_no_ext);
|
||||
FeatureExtractionLoop(file_no_ext);
|
||||
|
||||
}
|
||||
else
|
||||
|
@ -291,7 +291,7 @@ namespace OpenFaceOffline
|
|||
String file_no_ext = System.IO.Path.GetFileNameWithoutExtension(filename);
|
||||
|
||||
// Start the actual processing
|
||||
VideoLoop(file_no_ext);
|
||||
FeatureExtractionLoop(file_no_ext);
|
||||
|
||||
}
|
||||
else
|
||||
|
@ -396,11 +396,9 @@ namespace OpenFaceOffline
|
|||
|
||||
|
||||
// Capturing and processing the video frame by frame
|
||||
private void VideoLoop(string output_file_name)
|
||||
private void FeatureExtractionLoop(string output_file_name)
|
||||
{
|
||||
|
||||
Thread.CurrentThread.IsBackground = true;
|
||||
|
||||
DateTime? startTime = CurrentTime;
|
||||
|
||||
var lastFrameTime = CurrentTime;
|
||||
|
@ -456,9 +454,164 @@ namespace OpenFaceOffline
|
|||
continue;
|
||||
}
|
||||
|
||||
bool detectionSucceeding = ProcessFrame(clnf_model, clnf_params, frame, grayFrame, fx, fy, cx, cy);
|
||||
detectionSucceeding = ProcessFrame(clnf_model, clnf_params, frame, grayFrame, fx, fy, cx, cy);
|
||||
|
||||
double scale = clnf_model.GetRigidParams()[0];
|
||||
// The face analysis step (for AUs and eye gaze)
|
||||
face_analyser.AddNextFrame(frame, clnf_model, fx, fy, cx, cy, false, ShowAppearance, false); // TODO change
|
||||
|
||||
recorder.RecordFrame(clnf_model, face_analyser, detectionSucceeding, frame_id + 1, ((double)frame_id) / fps);
|
||||
|
||||
List<Tuple<double, double>> landmarks = clnf_model.CalculateLandmarks();
|
||||
|
||||
VisualizeFeatures(frame, landmarks, fx, fy, cx, cy, progress);
|
||||
|
||||
if (reset)
|
||||
{
|
||||
clnf_model.Reset();
|
||||
face_analyser.Reset();
|
||||
reset = false;
|
||||
}
|
||||
|
||||
while (thread_running & thread_paused && skip_frames == 0)
|
||||
{
|
||||
Thread.Sleep(10);
|
||||
}
|
||||
|
||||
frame_id++;
|
||||
|
||||
if (skip_frames > 0)
|
||||
skip_frames--;
|
||||
|
||||
}
|
||||
|
||||
latest_img = null;
|
||||
skip_frames = 0;
|
||||
|
||||
// Unpause if it's paused
|
||||
if (thread_paused)
|
||||
{
|
||||
Dispatcher.Invoke(DispatcherPriority.Render, new TimeSpan(0, 0, 0, 0, 200), (Action)(() =>
|
||||
{
|
||||
PauseButton_Click(null, null);
|
||||
}));
|
||||
}
|
||||
|
||||
recorder.FinishRecording(clnf_model, face_analyser);
|
||||
|
||||
}
|
||||
|
||||
// Replaying the features frame by frame
|
||||
private void FeatureVisualizationLoop(string input_feature_file, string input_video_file)
|
||||
{
|
||||
|
||||
DateTime? startTime = CurrentTime;
|
||||
|
||||
var lastFrameTime = CurrentTime;
|
||||
|
||||
clnf_model.Reset();
|
||||
face_analyser.Reset();
|
||||
|
||||
// TODO these need to be stored so that they could be loaded somewhere
|
||||
double fx = 500.0 * (capture.width / 640.0);
|
||||
double fy = 500.0 * (capture.height / 480.0);
|
||||
|
||||
fx = (fx + fy) / 2.0;
|
||||
fy = fx;
|
||||
|
||||
double cx = capture.width / 2f;
|
||||
double cy = capture.height / 2f;
|
||||
|
||||
int frame_id = 0;
|
||||
|
||||
double fps = capture.GetFPS();
|
||||
if (fps <= 0) fps = 30;
|
||||
|
||||
while (thread_running)
|
||||
{
|
||||
//////////////////////////////////////////////
|
||||
// CAPTURE FRAME AND DETECT LANDMARKS FOLLOWED BY THE REQUIRED IMAGE PROCESSING
|
||||
//////////////////////////////////////////////
|
||||
RawImage frame = null;
|
||||
double progress = -1;
|
||||
|
||||
frame = new RawImage(capture.GetNextFrame(false));
|
||||
progress = capture.GetProgress();
|
||||
|
||||
if (frame.Width == 0)
|
||||
{
|
||||
// This indicates that we reached the end of the video file
|
||||
break;
|
||||
}
|
||||
|
||||
// TODO stop button should actually clear the video
|
||||
lastFrameTime = CurrentTime;
|
||||
processing_fps.AddFrame();
|
||||
|
||||
var grayFrame = new RawImage(capture.GetCurrentFrameGray());
|
||||
|
||||
if (grayFrame == null)
|
||||
{
|
||||
Console.WriteLine("Gray is empty");
|
||||
continue;
|
||||
}
|
||||
|
||||
detectionSucceeding = ProcessFrame(clnf_model, clnf_params, frame, grayFrame, fx, fy, cx, cy);
|
||||
|
||||
// The face analysis step (for AUs and eye gaze)
|
||||
face_analyser.AddNextFrame(frame, clnf_model, fx, fy, cx, cy, false, ShowAppearance, false); // TODO change
|
||||
|
||||
recorder.RecordFrame(clnf_model, face_analyser, detectionSucceeding, frame_id + 1, ((double)frame_id) / fps);
|
||||
|
||||
List<Tuple<double, double>> landmarks = clnf_model.CalculateLandmarks();
|
||||
|
||||
VisualizeFeatures(frame, landmarks, fx, fy, cx, cy, progress);
|
||||
|
||||
if (reset)
|
||||
{
|
||||
clnf_model.Reset();
|
||||
face_analyser.Reset();
|
||||
reset = false;
|
||||
}
|
||||
|
||||
while (thread_running & thread_paused && skip_frames == 0)
|
||||
{
|
||||
Thread.Sleep(10);
|
||||
}
|
||||
|
||||
frame_id++;
|
||||
|
||||
if (skip_frames > 0)
|
||||
skip_frames--;
|
||||
|
||||
}
|
||||
|
||||
latest_img = null;
|
||||
skip_frames = 0;
|
||||
|
||||
// Unpause if it's paused
|
||||
if (thread_paused)
|
||||
{
|
||||
Dispatcher.Invoke(DispatcherPriority.Render, new TimeSpan(0, 0, 0, 0, 200), (Action)(() =>
|
||||
{
|
||||
PauseButton_Click(null, null);
|
||||
}));
|
||||
}
|
||||
|
||||
recorder.FinishRecording(clnf_model, face_analyser);
|
||||
|
||||
}
|
||||
|
||||
|
||||
private void VisualizeFeatures(RawImage frame, List<Tuple<double, double>> landmarks, double fx, double fy, double cx, double cy, double progress)
|
||||
{
|
||||
List<Tuple<Point, Point>> lines = null;
|
||||
List<Tuple<double, double>> eye_landmarks = null;
|
||||
List<Tuple<Point, Point>> gaze_lines = null;
|
||||
Tuple<double, double> gaze_angle = new Tuple<double, double>(0, 0);
|
||||
|
||||
List<double> pose = new List<double>();
|
||||
clnf_model.GetPose(pose, fx, fy, cx, cy);
|
||||
List<double> non_rigid_params = clnf_model.GetNonRigidParams();
|
||||
|
||||
double confidence = (-clnf_model.GetConfidence()) / 2.0 + 0.5;
|
||||
|
||||
|
@ -467,29 +620,19 @@ namespace OpenFaceOffline
|
|||
else if (confidence > 1)
|
||||
confidence = 1;
|
||||
|
||||
List<double> pose = new List<double>();
|
||||
clnf_model.GetPose(pose, fx, fy, cx, cy);
|
||||
List<double> non_rigid_params = clnf_model.GetNonRigidParams();
|
||||
|
||||
// The face analysis step (for AUs and eye gaze)
|
||||
face_analyser.AddNextFrame(frame, clnf_model, fx, fy, cx, cy, false, ShowAppearance, false); // TODO change
|
||||
|
||||
List<Tuple<Point, Point>> lines = null;
|
||||
List<Tuple<double, double>> landmarks = null;
|
||||
List<Tuple<double, double>> eye_landmarks = null;
|
||||
List<Tuple<Point, Point>> gaze_lines = null;
|
||||
Tuple<double, double> gaze_angle = new Tuple<double, double>(0, 0);
|
||||
|
||||
if (detectionSucceeding)
|
||||
{
|
||||
landmarks = clnf_model.CalculateLandmarks();
|
||||
|
||||
eye_landmarks = clnf_model.CalculateEyeLandmarks();
|
||||
lines = clnf_model.CalculateBox((float)fx, (float)fy, (float)cx, (float)cy);
|
||||
|
||||
double scale = clnf_model.GetRigidParams()[0];
|
||||
|
||||
gaze_lines = face_analyser.CalculateGazeLines(scale, (float)fx, (float)fy, (float)cx, (float)cy);
|
||||
gaze_angle = face_analyser.GetGazeAngle();
|
||||
}
|
||||
|
||||
// Visualisation
|
||||
// Visualisation (as a separate function)
|
||||
Dispatcher.Invoke(DispatcherPriority.Render, new TimeSpan(0, 0, 0, 0, 200), (Action)(() =>
|
||||
{
|
||||
if (ShowAUs)
|
||||
|
@ -598,40 +741,6 @@ namespace OpenFaceOffline
|
|||
}
|
||||
}));
|
||||
|
||||
recorder.RecordFrame(clnf_model, face_analyser, detectionSucceeding, frame_id + 1, ((double)frame_id) / fps);
|
||||
|
||||
if (reset)
|
||||
{
|
||||
clnf_model.Reset();
|
||||
face_analyser.Reset();
|
||||
reset = false;
|
||||
}
|
||||
|
||||
while (thread_running & thread_paused && skip_frames == 0)
|
||||
{
|
||||
Thread.Sleep(10);
|
||||
}
|
||||
|
||||
frame_id++;
|
||||
|
||||
if (skip_frames > 0)
|
||||
skip_frames--;
|
||||
|
||||
}
|
||||
|
||||
latest_img = null;
|
||||
skip_frames = 0;
|
||||
|
||||
// Unpause if it's paused
|
||||
if (thread_paused)
|
||||
{
|
||||
Dispatcher.Invoke(DispatcherPriority.Render, new TimeSpan(0, 0, 0, 0, 200), (Action)(() =>
|
||||
{
|
||||
PauseButton_Click(null, null);
|
||||
}));
|
||||
}
|
||||
|
||||
recorder.FinishRecording(clnf_model, face_analyser);
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -42,6 +42,21 @@ namespace OpenFaceOffline
|
|||
{
|
||||
System.IO.Directory.CreateDirectory(root);
|
||||
}
|
||||
|
||||
// Write out the OF file which tells where all the relevant data is
|
||||
StreamWriter out_of_file = new StreamWriter(root + "/" + filename + ".of");
|
||||
|
||||
//out_of_file.WriteLine("Video_file:" + )
|
||||
out_of_file.WriteLine("CSV file: " + root + "/" + filename + ".csv");
|
||||
if(record_HOG)
|
||||
{
|
||||
out_of_file.WriteLine("HOG file: " + root + "/" + filename + ".hog");
|
||||
}
|
||||
if(record_aligned)
|
||||
{
|
||||
out_of_file.WriteLine("Aligned dir: " + root + "/" + filename + "/");
|
||||
}
|
||||
|
||||
out_filename = root + "/" + filename + ".csv";
|
||||
output_features_file = new StreamWriter(out_filename);
|
||||
output_features_file.Write("frame, timestamp, confidence, success");
|
||||
|
|
Loading…
Reference in a new issue