Visualization fixes.
This commit is contained in:
parent
3f6878c5c3
commit
ffe2e66081
14 changed files with 124 additions and 54 deletions
|
@ -192,7 +192,7 @@ int main (int argc, char **argv)
|
|||
// Displaying the tracking visualizations
|
||||
visualizer.SetObservationFaceAlign(sim_warped_img);
|
||||
visualizer.SetObservationHOG(hog_descriptor, num_hog_rows, num_hog_cols);
|
||||
visualizer.SetObservationLandmarks(face_model.detected_landmarks, 1.0); // Set confidence to high to make sure we always visualize
|
||||
visualizer.SetObservationLandmarks(face_model.detected_landmarks, 1.0, face_model.GetVisibilities()); // Set confidence to high to make sure we always visualize
|
||||
visualizer.SetObservationPose(pose_estimate, 1.0);
|
||||
visualizer.SetObservationGaze(gaze_direction0, gaze_direction1, LandmarkDetector::CalculateAllEyeLandmarks(face_model), LandmarkDetector::Calculate3DEyeLandmarks(face_model, image_reader.fx, image_reader.fy, image_reader.cx, image_reader.cy), face_model.detection_certainty);
|
||||
|
||||
|
|
|
@ -164,7 +164,7 @@ int main (int argc, char **argv)
|
|||
|
||||
// Displaying the tracking visualizations
|
||||
visualizer.SetImage(captured_image, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy);
|
||||
visualizer.SetObservationLandmarks(face_model.detected_landmarks, face_model.detection_certainty);
|
||||
visualizer.SetObservationLandmarks(face_model.detected_landmarks, face_model.detection_certainty, face_model.GetVisibilities());
|
||||
visualizer.SetObservationPose(pose_estimate, face_model.detection_certainty);
|
||||
visualizer.SetObservationGaze(gazeDirection0, gazeDirection1, LandmarkDetector::CalculateAllEyeLandmarks(face_model), LandmarkDetector::Calculate3DEyeLandmarks(face_model, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy), face_model.detection_certainty);
|
||||
visualizer.SetFps(fps_tracker.GetFPS());
|
||||
|
|
|
@ -194,7 +194,7 @@ int main (int argc, char **argv)
|
|||
visualizer.SetImage(captured_image, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy);
|
||||
visualizer.SetObservationFaceAlign(sim_warped_img);
|
||||
visualizer.SetObservationHOG(hog_descriptor, num_hog_rows, num_hog_cols);
|
||||
visualizer.SetObservationLandmarks(face_model.detected_landmarks, face_model.detection_certainty);
|
||||
visualizer.SetObservationLandmarks(face_model.detected_landmarks, face_model.detection_certainty, face_model.GetVisibilities());
|
||||
visualizer.SetObservationPose(pose_estimate, face_model.detection_certainty);
|
||||
visualizer.SetObservationGaze(gazeDirection0, gazeDirection1, LandmarkDetector::CalculateAllEyeLandmarks(face_model), LandmarkDetector::Calculate3DEyeLandmarks(face_model, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy), face_model.detection_certainty);
|
||||
visualizer.SetFps(fps_tracker.GetFPS());
|
||||
|
|
|
@ -417,7 +417,7 @@ namespace HeadPoseLive
|
|||
List<System.Windows.Point> landmarks = new List<System.Windows.Point>();
|
||||
List<Tuple<System.Windows.Point, System.Windows.Point>> gaze_lines = null;
|
||||
Tuple<double, double> gaze_angle = new Tuple<double, double>(0, 0);
|
||||
double scale = 0;
|
||||
double scale = face_model.GetRigidParams()[0];
|
||||
|
||||
if (detectionSucceeding)
|
||||
{
|
||||
|
@ -428,9 +428,7 @@ namespace HeadPoseLive
|
|||
|
||||
eye_landmarks = face_model.CalculateVisibleEyeLandmarks();
|
||||
|
||||
scale = face_model.GetRigidParams()[0];
|
||||
|
||||
gaze_lines = gaze_analyser.CalculateGazeLines(scale, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy());
|
||||
gaze_lines = gaze_analyser.CalculateGazeLines(reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy());
|
||||
gaze_angle = gaze_analyser.GetGazeAngle();
|
||||
|
||||
lines = face_model.CalculateBox(reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy());
|
||||
|
|
|
@ -251,7 +251,7 @@ namespace OpenFaceDemo
|
|||
landmarks = landmark_detector.CalculateVisibleLandmarks();
|
||||
eye_landmarks = landmark_detector.CalculateVisibleEyeLandmarks();
|
||||
lines = landmark_detector.CalculateBox(reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy());
|
||||
gaze_lines = gaze_analyser.CalculateGazeLines(scale, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy());
|
||||
gaze_lines = gaze_analyser.CalculateGazeLines(reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy());
|
||||
}
|
||||
|
||||
// Visualisation
|
||||
|
|
|
@ -64,7 +64,7 @@
|
|||
<Label></Label>
|
||||
</Border>
|
||||
<Border Name="VideoBorder" Grid.Row="1" Grid.Column="0" BorderBrush="Black" BorderThickness="1" Background="LightGray" Margin="5,5,0,0">
|
||||
<OpenFaceOffline:OverlayImage x:Name="video" />
|
||||
<OpenFaceOffline:OverlayImage x:Name="overlay_image" />
|
||||
</Border>
|
||||
|
||||
<GroupBox Name="AppearanceBorder" Grid.Row="1" Grid.Column="1" BorderBrush="Black" BorderThickness="1" MinHeight="100">
|
||||
|
|
|
@ -224,7 +224,7 @@ namespace OpenFaceOffline
|
|||
gaze_analyser.AddNextFrame(landmark_detector, detection_succeeding, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy());
|
||||
|
||||
// Only the final face will contain the details
|
||||
VisualizeFeatures(frame, visualizer_of, landmark_detector.CalculateAllLandmarks(), detection_succeeding, true, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy(), progress);
|
||||
VisualizeFeatures(frame, visualizer_of, landmark_detector.CalculateAllLandmarks(), landmark_detector.GetVisibilities(), detection_succeeding, true, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy(), progress);
|
||||
|
||||
// Record an observation
|
||||
RecordObservation(recorder, visualizer_of.GetVisImage(), detection_succeeding, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy(), reader.GetTimestamp());
|
||||
|
@ -237,8 +237,6 @@ namespace OpenFaceOffline
|
|||
if (skip_frames > 0)
|
||||
skip_frames--;
|
||||
|
||||
latest_img = null;
|
||||
|
||||
frame = new RawImage(reader.GetNextImage());
|
||||
gray_frame = new RawImage(reader.GetCurrentFrameGray());
|
||||
|
||||
|
@ -330,15 +328,13 @@ namespace OpenFaceOffline
|
|||
gaze_analyser.AddNextFrame(landmark_detector, detection_succeeding, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy());
|
||||
|
||||
// Only the final face will contain the details
|
||||
VisualizeFeatures(frame, visualizer_of, landmarks, detection_succeeding, i == 0, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy(), progress);
|
||||
VisualizeFeatures(frame, visualizer_of, landmarks, landmark_detector.GetVisibilities(), detection_succeeding, i == 0, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy(), progress);
|
||||
|
||||
// Record an observation
|
||||
RecordObservation(recorder, visualizer_of.GetVisImage(), detection_succeeding, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy(), 0);
|
||||
|
||||
}
|
||||
|
||||
latest_img = null;
|
||||
|
||||
frame = new RawImage(reader.GetNextImage());
|
||||
gray_frame = new RawImage(reader.GetCurrentFrameGray());
|
||||
|
||||
|
@ -398,7 +394,7 @@ namespace OpenFaceOffline
|
|||
|
||||
}
|
||||
|
||||
private void VisualizeFeatures(RawImage frame, Visualizer visualizer, List<Tuple<double, double>> landmarks, bool detection_succeeding,
|
||||
private void VisualizeFeatures(RawImage frame, Visualizer visualizer, List<Tuple<double, double>> landmarks, List<bool> visibilities, bool detection_succeeding,
|
||||
bool new_image, float fx, float fy, float cx, float cy, double progress)
|
||||
{
|
||||
|
||||
|
@ -418,7 +414,7 @@ namespace OpenFaceOffline
|
|||
else if (confidence > 1)
|
||||
confidence = 1;
|
||||
|
||||
double scale = 0;
|
||||
double scale = landmark_detector.GetRigidParams()[0];
|
||||
|
||||
// Helps with recording and showing the visualizations
|
||||
if (new_image)
|
||||
|
@ -426,19 +422,16 @@ namespace OpenFaceOffline
|
|||
visualizer.SetImage(frame, fx, fy, cx, cy);
|
||||
}
|
||||
visualizer.SetObservationHOG(face_analyser.GetLatestHOGFeature(), face_analyser.GetHOGRows(), face_analyser.GetHOGCols());
|
||||
visualizer.SetObservationLandmarks(landmarks, confidence); // Set confidence to high to make sure we always visualize
|
||||
visualizer.SetObservationLandmarks(landmarks, confidence, visibilities);
|
||||
visualizer.SetObservationPose(pose, confidence);
|
||||
visualizer.SetObservationGaze(gaze_analyser.GetGazeCamera().Item1, gaze_analyser.GetGazeCamera().Item2, landmark_detector.CalculateAllEyeLandmarks(), landmark_detector.CalculateAllEyeLandmarks3D(fx, fy, cx, cy), confidence);
|
||||
|
||||
if (detection_succeeding)
|
||||
{
|
||||
|
||||
eye_landmarks = landmark_detector.CalculateVisibleEyeLandmarks();
|
||||
lines = landmark_detector.CalculateBox(fx, fy, cx, cy);
|
||||
|
||||
scale = landmark_detector.GetRigidParams()[0];
|
||||
|
||||
gaze_lines = gaze_analyser.CalculateGazeLines(scale, fx, fy, cx, cy);
|
||||
gaze_lines = gaze_analyser.CalculateGazeLines(fx, fy, cx, cy);
|
||||
gaze_angle = gaze_analyser.GetGazeAngle();
|
||||
}
|
||||
|
||||
|
@ -490,25 +483,26 @@ namespace OpenFaceOffline
|
|||
|
||||
if (ShowTrackedVideo)
|
||||
{
|
||||
if (latest_img == null)
|
||||
if (new_image)
|
||||
{
|
||||
latest_img = frame.CreateWriteableBitmap();
|
||||
}
|
||||
|
||||
frame.UpdateWriteableBitmap(latest_img);
|
||||
|
||||
video.Source = latest_img;
|
||||
video.Confidence = confidence;
|
||||
video.FPS = processing_fps.GetFPS();
|
||||
video.Progress = progress;
|
||||
video.FaceScale = scale;
|
||||
overlay_image.Source = latest_img;
|
||||
overlay_image.Confidence = confidence;
|
||||
overlay_image.FPS = processing_fps.GetFPS();
|
||||
overlay_image.Progress = progress;
|
||||
overlay_image.FaceScale = scale;
|
||||
|
||||
if (!detection_succeeding)
|
||||
{
|
||||
video.OverlayLines.Clear();
|
||||
video.OverlayPoints.Clear();
|
||||
video.OverlayEyePoints.Clear();
|
||||
video.GazeLines.Clear();
|
||||
overlay_image.OverlayLines.Clear();
|
||||
overlay_image.OverlayPoints.Clear();
|
||||
overlay_image.OverlayPointsVisibility.Clear();
|
||||
overlay_image.OverlayEyePoints.Clear();
|
||||
overlay_image.GazeLines.Clear();
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -528,18 +522,20 @@ namespace OpenFaceOffline
|
|||
|
||||
if (new_image)
|
||||
{
|
||||
video.OverlayLines = lines;
|
||||
video.OverlayPoints = landmark_points;
|
||||
video.OverlayEyePoints = eye_landmark_points;
|
||||
video.GazeLines = gaze_lines;
|
||||
overlay_image.OverlayLines = lines;
|
||||
overlay_image.OverlayPoints = landmark_points;
|
||||
overlay_image.OverlayPointsVisibility = visibilities;
|
||||
overlay_image.OverlayEyePoints = eye_landmark_points;
|
||||
overlay_image.GazeLines = gaze_lines;
|
||||
}
|
||||
else
|
||||
{
|
||||
// In case of multiple faces just add them to the existing drawing list
|
||||
video.OverlayLines.AddRange(lines.GetRange(0, lines.Count));
|
||||
video.OverlayPoints.AddRange(landmark_points.GetRange(0, landmark_points.Count));
|
||||
video.OverlayEyePoints.AddRange(eye_landmark_points.GetRange(0, eye_landmark_points.Count));
|
||||
video.GazeLines.AddRange(gaze_lines.GetRange(0, gaze_lines.Count));
|
||||
overlay_image.OverlayLines.AddRange(lines.GetRange(0, lines.Count));
|
||||
overlay_image.OverlayPoints.AddRange(landmark_points.GetRange(0, landmark_points.Count));
|
||||
overlay_image.OverlayPointsVisibility.AddRange(visibilities.GetRange(0, visibilities.Count));
|
||||
overlay_image.OverlayEyePoints.AddRange(eye_landmark_points.GetRange(0, eye_landmark_points.Count));
|
||||
overlay_image.GazeLines.AddRange(gaze_lines.GetRange(0, gaze_lines.Count));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -626,7 +622,7 @@ namespace OpenFaceOffline
|
|||
NextFrameButton.IsEnabled = false;
|
||||
|
||||
// Clean up the interface itself
|
||||
video.Source = null;
|
||||
overlay_image.Source = null;
|
||||
|
||||
auClassGraph.Update(new Dictionary<string, double>());
|
||||
auRegGraph.Update(new Dictionary<string, double>());
|
||||
|
|
|
@ -75,6 +75,7 @@ namespace OpenFaceOffline
|
|||
InitializeComponent();
|
||||
OverlayLines = new List<Tuple<Point, Point>>();
|
||||
OverlayPoints = new List<Point>();
|
||||
OverlayPointsVisibility = new List<bool>();
|
||||
OverlayEyePoints = new List<Point>();
|
||||
GazeLines = new List<Tuple<Point, Point>>();
|
||||
|
||||
|
@ -91,6 +92,9 @@ namespace OpenFaceOffline
|
|||
if (OverlayPoints == null)
|
||||
OverlayPoints = new List<Point>();
|
||||
|
||||
if (OverlayPointsVisibility == null)
|
||||
OverlayPointsVisibility = new List<bool>();
|
||||
|
||||
if (OverlayEyePoints == null)
|
||||
OverlayEyePoints = new List<Point>();
|
||||
|
||||
|
@ -122,17 +126,29 @@ namespace OpenFaceOffline
|
|||
var p1 = new Point(ActualWidth * line.Item1.X / width, ActualHeight * line.Item1.Y / height);
|
||||
var p2 = new Point(ActualWidth * line.Item2.X / width, ActualHeight * line.Item2.Y / height);
|
||||
|
||||
dc.DrawLine(new Pen(new SolidColorBrush(Color.FromArgb(200, (byte)(240), (byte)(30), (byte)100)), 3.0 * scaling_p), p1, p2);
|
||||
var dir = p2 - p1;
|
||||
p2 = p1 + dir * scaling_p * 2;
|
||||
dc.DrawLine(new Pen(new SolidColorBrush(Color.FromArgb(200, (byte)(240), (byte)(30), (byte)100)), 6.0 * scaling_p), p1, p2);
|
||||
|
||||
}
|
||||
|
||||
foreach (var p in OverlayPoints)
|
||||
for (int i = 0; i < OverlayPoints.Count; ++i)
|
||||
{
|
||||
var p = OverlayPoints[i];
|
||||
|
||||
var q = new Point(ActualWidth * p.X / width, ActualHeight * p.Y / height);
|
||||
|
||||
dc.DrawEllipse(new SolidColorBrush(Color.FromArgb((byte)(230 * Confidence), 255, 50, 50)), null, q, 2.75 * scaling_p, 2.75 * scaling_p);
|
||||
dc.DrawEllipse(new SolidColorBrush(Color.FromArgb((byte)(230 * Confidence), 255, 255, 100)), null, q, 1.75 * scaling_p, 1.75 * scaling_p);
|
||||
if(OverlayPointsVisibility.Count == 0 || OverlayPointsVisibility[i])
|
||||
{
|
||||
dc.DrawEllipse(new SolidColorBrush(Color.FromArgb((byte)(230 * Confidence), 255, 50, 50)), null, q, 2.75 * scaling_p, 3.0 * scaling_p);
|
||||
dc.DrawEllipse(new SolidColorBrush(Color.FromArgb((byte)(230 * Confidence), 255, 255, 100)), null, q, 1.75 * scaling_p, 2.0 * scaling_p);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Draw fainter if landmark not visible
|
||||
dc.DrawEllipse(new SolidColorBrush(Color.FromArgb((byte)(125 * Confidence), 255, 50, 50)), null, q, 2.75 * scaling_p, 3.0 * scaling_p);
|
||||
dc.DrawEllipse(new SolidColorBrush(Color.FromArgb((byte)(125 * Confidence), 255, 255, 100)), null, q, 1.75 * scaling_p, 2.0 * scaling_p);
|
||||
}
|
||||
}
|
||||
|
||||
for (int id = 0; id < OverlayEyePoints.Count; id++)
|
||||
|
@ -199,6 +215,7 @@ namespace OpenFaceOffline
|
|||
public List<Tuple<Point, Point>> OverlayLines { get; set; }
|
||||
public List<Tuple<Point, Point>> GazeLines { get; set; }
|
||||
public List<Point> OverlayPoints { get; set; }
|
||||
public List<bool> OverlayPointsVisibility { get; set; }
|
||||
public List<Point> OverlayEyePoints { get; set; }
|
||||
public double Confidence { get; set; }
|
||||
public double FPS { get; set; }
|
||||
|
|
|
@ -152,18 +152,18 @@ namespace GazeAnalyser_Interop {
|
|||
return gaze_angle;
|
||||
|
||||
}
|
||||
System::Collections::Generic::List<System::Tuple<System::Windows::Point, System::Windows::Point>^>^ CalculateGazeLines(double scale, float fx, float fy, float cx, float cy)
|
||||
System::Collections::Generic::List<System::Tuple<System::Windows::Point, System::Windows::Point>^>^ CalculateGazeLines(float fx, float fy, float cx, float cy)
|
||||
{
|
||||
|
||||
cv::Mat_<double> cameraMat = (cv::Mat_<double>(3, 3) << fx, 0, cx, 0, fy, cy, 0, 0, 0);
|
||||
|
||||
vector<cv::Point3f> points_left;
|
||||
points_left.push_back(cv::Point3f(*pupil_left));
|
||||
points_left.push_back(cv::Point3f(*pupil_left + *gazeDirection0 * 40.0 * scale));
|
||||
points_left.push_back(cv::Point3f(*pupil_left + *gazeDirection0 * 40.0));
|
||||
|
||||
vector<cv::Point3f> points_right;
|
||||
points_right.push_back(cv::Point3f(*pupil_right));
|
||||
points_right.push_back(cv::Point3f(*pupil_right + *gazeDirection1 * 40.0 * scale));
|
||||
points_right.push_back(cv::Point3f(*pupil_right + *gazeDirection1 * 40.0));
|
||||
|
||||
// Perform manual projection of points
|
||||
vector<cv::Point2d> imagePoints_left;
|
||||
|
|
|
@ -227,6 +227,19 @@ namespace CppInterop {
|
|||
}
|
||||
}
|
||||
|
||||
// Get the mask of which landmarks are currently visible (not self-occluded)
|
||||
List<bool>^ GetVisibilities()
|
||||
{
|
||||
cv::Mat_<int> vis = clnf->GetVisibilities();
|
||||
List<bool>^ visibilities = gcnew List<bool>();
|
||||
|
||||
for (auto vis_it = vis.begin(); vis_it != vis.end(); vis_it++)
|
||||
{
|
||||
visibilities->Add(*vis_it != 0);
|
||||
}
|
||||
return visibilities;
|
||||
}
|
||||
|
||||
List<System::Tuple<double,double>^>^ CalculateVisibleLandmarks() {
|
||||
vector<cv::Point2d> vecLandmarks = ::LandmarkDetector::CalculateVisibleLandmarks(*clnf);
|
||||
|
||||
|
|
|
@ -100,7 +100,7 @@ namespace UtilitiesOF {
|
|||
m_visualizer->SetObservationHOG(observation_HOG->Mat, num_cols, num_rows);
|
||||
}
|
||||
|
||||
void SetObservationLandmarks(List<System::Tuple<double, double>^>^ landmarks_2D, double confidence)
|
||||
void SetObservationLandmarks(List<System::Tuple<double, double>^>^ landmarks_2D, double confidence, List<bool>^ visibilities)
|
||||
{
|
||||
// Construct an OpenCV matrix from the landmarks
|
||||
cv::Mat_<double> landmarks_2D_mat(landmarks_2D->Count * 2, 1, 0.0);
|
||||
|
@ -109,8 +109,27 @@ namespace UtilitiesOF {
|
|||
landmarks_2D_mat.at<double>(i, 0) = landmarks_2D[i]->Item1;
|
||||
landmarks_2D_mat.at<double>(i + landmarks_2D->Count, 0) = landmarks_2D[i]->Item2;
|
||||
}
|
||||
// TODO add visibilities
|
||||
m_visualizer->SetObservationLandmarks(landmarks_2D_mat, confidence);
|
||||
|
||||
// Construct an OpenCV matrix from the landmarks
|
||||
cv::Mat_<int> visibilities_cv(visibilities->Count, 1, 0);
|
||||
for (int i = 0; i < visibilities->Count; ++i)
|
||||
{
|
||||
if (visibilities[i])
|
||||
{
|
||||
visibilities_cv.at<int>(i, 0) = 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
visibilities_cv.at<int>(i, 0) = 0;
|
||||
}
|
||||
}
|
||||
|
||||
m_visualizer->SetObservationLandmarks(landmarks_2D_mat, confidence, visibilities_cv);
|
||||
}
|
||||
|
||||
void SetObservationLandmarks(List<System::Tuple<double, double>^>^ landmarks_2D, double confidence)
|
||||
{
|
||||
SetObservationLandmarks(landmarks_2D, confidence, gcnew List<bool>());
|
||||
}
|
||||
|
||||
void SetImage(OpenCVWrappers::RawImage^ canvas, float fx, float fy, float cx, float cy)
|
||||
|
|
|
@ -159,6 +159,9 @@ public:
|
|||
// A utility bounding box function
|
||||
cv::Rect_<double> GetBoundingBox() const;
|
||||
|
||||
// Get the currently non-self occluded landmarks
|
||||
cv::Mat_<int> GetVisibilities() const;
|
||||
|
||||
// Reset the model (useful if we want to completelly reinitialise, or we want to track another video)
|
||||
void Reset();
|
||||
|
||||
|
|
|
@ -1146,6 +1146,17 @@ cv::Mat_<double> CLNF::GetShape(double fx, double fy, double cx, double cy) cons
|
|||
|
||||
}
|
||||
|
||||
cv::Mat_<int> CLNF::GetVisibilities() const
|
||||
{
|
||||
// Get the view of the largest scale
|
||||
int scale = patch_experts.visibilities.size() - 1;
|
||||
int view_id = patch_experts.GetViewIdx(params_global, scale);
|
||||
|
||||
cv::Mat_<int> visibilities_to_ret = this->patch_experts.visibilities[scale][view_id].clone();
|
||||
return visibilities_to_ret;
|
||||
}
|
||||
|
||||
|
||||
// A utility bounding box function
|
||||
cv::Rect_<double> CLNF::GetBoundingBox() const
|
||||
{
|
||||
|
|
|
@ -157,6 +157,19 @@ void Visualizer::SetObservationLandmarks(const cv::Mat_<double>& landmarks_2D, d
|
|||
cv::circle(captured_image, featurePoint, 1 * draw_multiplier, cv::Scalar(255, 0, 0), thickness_2, CV_AA, draw_shiftbits);
|
||||
|
||||
}
|
||||
else
|
||||
{
|
||||
// Draw a fainter point if the landmark is self occluded
|
||||
cv::Point featurePoint(cvRound(landmarks_2D.at<double>(i) * (double)draw_multiplier), cvRound(landmarks_2D.at<double>(i + n) * (double)draw_multiplier));
|
||||
|
||||
// A rough heuristic for drawn point size
|
||||
int thickness = (int)std::ceil(2.5* ((double)captured_image.cols) / 640.0);
|
||||
int thickness_2 = (int)std::ceil(1.0* ((double)captured_image.cols) / 640.0);
|
||||
|
||||
cv::circle(captured_image, featurePoint, 1 * draw_multiplier, cv::Scalar(0, 0, 155), thickness, CV_AA, draw_shiftbits);
|
||||
cv::circle(captured_image, featurePoint, 1 * draw_multiplier, cv::Scalar(155, 0, 0), thickness_2, CV_AA, draw_shiftbits);
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue