More bug fixes revealed by demo scripts.
This commit is contained in:
parent
41af428f5c
commit
89332ea943
8 changed files with 32 additions and 42 deletions
|
@ -168,10 +168,8 @@ int main (int argc, char **argv)
|
||||||
visualizer.SetObservationPose(pose_estimate, face_model.detection_certainty);
|
visualizer.SetObservationPose(pose_estimate, face_model.detection_certainty);
|
||||||
visualizer.SetObservationGaze(gazeDirection0, gazeDirection1, LandmarkDetector::CalculateAllEyeLandmarks(face_model), LandmarkDetector::Calculate3DEyeLandmarks(face_model, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy), face_model.detection_certainty);
|
visualizer.SetObservationGaze(gazeDirection0, gazeDirection1, LandmarkDetector::CalculateAllEyeLandmarks(face_model), LandmarkDetector::Calculate3DEyeLandmarks(face_model, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy), face_model.detection_certainty);
|
||||||
visualizer.SetFps(fps_tracker.GetFPS());
|
visualizer.SetFps(fps_tracker.GetFPS());
|
||||||
visualizer.ShowObservation();
|
// detect key presses (due to pecularities of OpenCV, you can get it when displaying images)
|
||||||
|
char character_press = visualizer.ShowObservation();
|
||||||
// detect key presses
|
|
||||||
char character_press = cv::waitKey(1);
|
|
||||||
|
|
||||||
// restart the tracker
|
// restart the tracker
|
||||||
if (character_press == 'r')
|
if (character_press == 'r')
|
||||||
|
|
|
@ -295,10 +295,9 @@ int main (int argc, char **argv)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
visualizer.SetFps(fps_tracker.GetFPS());
|
visualizer.SetFps(fps_tracker.GetFPS());
|
||||||
visualizer.ShowObservation();
|
|
||||||
|
|
||||||
// detect key presses
|
// show visualization and detect key presses
|
||||||
char character_press = cv::waitKey(1);
|
char character_press = visualizer.ShowObservation();
|
||||||
|
|
||||||
// restart the trackers
|
// restart the trackers
|
||||||
if(character_press == 'r')
|
if(character_press == 'r')
|
||||||
|
@ -312,7 +311,7 @@ int main (int argc, char **argv)
|
||||||
// quit the application
|
// quit the application
|
||||||
else if(character_press=='q')
|
else if(character_press=='q')
|
||||||
{
|
{
|
||||||
return(0);
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the frame count
|
// Update the frame count
|
||||||
|
@ -332,6 +331,8 @@ int main (int argc, char **argv)
|
||||||
active_models[model] = false;
|
active_models[model] = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sequence_number++;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -190,7 +190,15 @@ int main (int argc, char **argv)
|
||||||
visualizer.SetObservationPose(pose_estimate, face_model.detection_certainty);
|
visualizer.SetObservationPose(pose_estimate, face_model.detection_certainty);
|
||||||
visualizer.SetObservationGaze(gazeDirection0, gazeDirection1, LandmarkDetector::CalculateAllEyeLandmarks(face_model), LandmarkDetector::Calculate3DEyeLandmarks(face_model, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy), face_model.detection_certainty);
|
visualizer.SetObservationGaze(gazeDirection0, gazeDirection1, LandmarkDetector::CalculateAllEyeLandmarks(face_model), LandmarkDetector::Calculate3DEyeLandmarks(face_model, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy), face_model.detection_certainty);
|
||||||
visualizer.SetFps(fps_tracker.GetFPS());
|
visualizer.SetFps(fps_tracker.GetFPS());
|
||||||
visualizer.ShowObservation();
|
|
||||||
|
// detect key presses
|
||||||
|
char character_press = visualizer.ShowObservation();
|
||||||
|
|
||||||
|
// quit processing the current sequence (useful when in Webcam mode)
|
||||||
|
if (character_press == 'q')
|
||||||
|
{
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
// Setting up the recorder output
|
// Setting up the recorder output
|
||||||
open_face_rec.SetObservationHOG(detection_success, hog_descriptor, num_hog_rows, num_hog_cols, 31); // The number of channels in HOG is fixed at the moment, as using FHOG
|
open_face_rec.SetObservationHOG(detection_success, hog_descriptor, num_hog_rows, num_hog_cols, 31); // The number of channels in HOG is fixed at the moment, as using FHOG
|
||||||
|
|
|
@ -82,7 +82,8 @@ namespace Utilities
|
||||||
|
|
||||||
void SetFps(double fps);
|
void SetFps(double fps);
|
||||||
|
|
||||||
void ShowObservation();
|
// Return key-press that could have resulted in the open windows
|
||||||
|
char ShowObservation();
|
||||||
|
|
||||||
cv::Mat GetVisImage();
|
cv::Mat GetVisImage();
|
||||||
|
|
||||||
|
|
|
@ -265,7 +265,7 @@ void Visualizer::SetFps(double fps)
|
||||||
cv::putText(captured_image, fpsSt, cv::Point(10, 20), CV_FONT_HERSHEY_SIMPLEX, 0.5, CV_RGB(255, 0, 0), 1, CV_AA);
|
cv::putText(captured_image, fpsSt, cv::Point(10, 20), CV_FONT_HERSHEY_SIMPLEX, 0.5, CV_RGB(255, 0, 0), 1, CV_AA);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Visualizer::ShowObservation()
|
char Visualizer::ShowObservation()
|
||||||
{
|
{
|
||||||
if (vis_track)
|
if (vis_track)
|
||||||
{
|
{
|
||||||
|
@ -280,7 +280,7 @@ void Visualizer::ShowObservation()
|
||||||
{
|
{
|
||||||
cv::imshow("hog", hog_image);
|
cv::imshow("hog", hog_image);
|
||||||
}
|
}
|
||||||
cv::waitKey(1);
|
return cv::waitKey(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
cv::Mat Visualizer::GetVisImage()
|
cv::Mat Visualizer::GetVisImage()
|
||||||
|
|
|
@ -8,33 +8,17 @@ end
|
||||||
in_dir = '../../samples/';
|
in_dir = '../../samples/';
|
||||||
out_dir = './demo_img/';
|
out_dir = './demo_img/';
|
||||||
|
|
||||||
if(~exist(out_dir, 'file'))
|
model = 'model/main_clnf_general.txt'; % Trained on in the wild and multi-pie data (a CLNF model)
|
||||||
mkdir(out_dir);
|
|
||||||
end
|
|
||||||
|
|
||||||
% some parameters
|
% Uncomment the below models if you want to try them
|
||||||
verbose = true;
|
%model = 'model/main_clnf_wild.txt'; % Trained on in-the-wild data only
|
||||||
|
|
||||||
% Trained on in the wild and multi-pie data (less accurate CLM model)
|
%model = 'model/main_clm_general.txt'; % Trained on in the wild and multi-pie data (less accurate SVR/CLM model)
|
||||||
% model = 'model/main_clm_general.txt';
|
%model = 'model/main_clm_wild.txt'; % Trained on in-the-wild
|
||||||
% Trained on in-the-wild
|
|
||||||
%model = 'model/main_clm_wild.txt';
|
|
||||||
|
|
||||||
% Trained on in the wild and multi-pie data (more accurate CLNF model)
|
% Load images (-fdir), output images and all the features (-out_dir), use a
|
||||||
model = 'model/main_clnf_general.txt';
|
% user specified model (-mloc), and visualize everything (-verbose)
|
||||||
% Trained on in-the-wild
|
command = sprintf('%s -fdir "%s" -out_dir "%s" -verbose -mloc "%s" ', executable, in_dir, out_dir, model);
|
||||||
%model = 'model/main_clnf_wild.txt';
|
|
||||||
|
|
||||||
command = executable;
|
|
||||||
|
|
||||||
command = cat(2, command, [' -fdir "' in_dir '"']);
|
|
||||||
|
|
||||||
if(verbose)
|
|
||||||
command = cat(2, command, [' -ofdir "' out_dir '"']);
|
|
||||||
command = cat(2, command, [' -oidir "' out_dir '"']);
|
|
||||||
end
|
|
||||||
|
|
||||||
command = cat(2, command, [' -mloc "', model, '"']);
|
|
||||||
|
|
||||||
% Demonstrates the multi-hypothesis slow landmark detection (more accurate
|
% Demonstrates the multi-hypothesis slow landmark detection (more accurate
|
||||||
% when dealing with non-frontal faces and less accurate face detections)
|
% when dealing with non-frontal faces and less accurate face detections)
|
||||||
|
|
|
@ -24,9 +24,8 @@ model = 'model/main_clnf_general.txt'; % Trained on in the wild and multi-pie da
|
||||||
%model = 'model/main_clm_general.txt'; % Trained on in the wild and multi-pie data (less accurate SVR/CLM model)
|
%model = 'model/main_clm_general.txt'; % Trained on in the wild and multi-pie data (less accurate SVR/CLM model)
|
||||||
%model = 'model/main_clm_wild.txt'; % Trained on in-the-wild
|
%model = 'model/main_clm_wild.txt'; % Trained on in-the-wild
|
||||||
|
|
||||||
% Create a command that will run the tracker on set of videos,
|
% Create a command that will run the tracker on set of videos and display the output
|
||||||
% and visualize the output (-verbose)
|
command = sprintf('%s -mloc "%s" ', executable, model);
|
||||||
command = sprintf('%s -mloc "%s" -verbose', executable, model);
|
|
||||||
|
|
||||||
% add all videos to single argument list (so as not to load the model anew
|
% add all videos to single argument list (so as not to load the model anew
|
||||||
% for every video)
|
% for every video)
|
||||||
|
|
|
@ -19,9 +19,8 @@ model = 'model/main_clnf_general.txt'; % Trained on in the wild and multi-pie da
|
||||||
%model = 'model/main_clm_general.txt'; % Trained on in the wild and multi-pie data (less accurate SVR/CLM model)
|
%model = 'model/main_clm_general.txt'; % Trained on in the wild and multi-pie data (less accurate SVR/CLM model)
|
||||||
%model = 'model/main_clm_wild.txt'; % Trained on in-the-wild
|
%model = 'model/main_clm_wild.txt'; % Trained on in-the-wild
|
||||||
|
|
||||||
% Create a command that will run the tracker on set of videos,
|
% Create a command that will run the tracker on set of videos and display the output
|
||||||
% and visualize the output (-verbose)
|
command = sprintf('%s -mloc "%s" ', executable, model);
|
||||||
command = sprintf('%s -mloc "%s" -verbose', executable, model);
|
|
||||||
|
|
||||||
% add all videos to single argument list (so as not to load the model anew
|
% add all videos to single argument list (so as not to load the model anew
|
||||||
% for every video)
|
% for every video)
|
||||||
|
|
Loading…
Reference in a new issue