/////////////////////////////////////////////////////////////////////////////// // Copyright (C) 2017, Carnegie Mellon University and University of Cambridge, // all rights reserved. // // ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY // // BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT. // IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE. // // License can be found in OpenFace-license.txt // * Any publications arising from the use of this software, including but // not limited to academic journal and conference publications, technical // reports and manuals, must cite at least one of the following works: // // OpenFace: an open source facial behavior analysis toolkit // Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency // in IEEE Winter Conference on Applications of Computer Vision, 2016 // // Rendering of Eyes for Eye-Shape Registration and Gaze Estimation // Erroll Wood, Tadas Baltrušaitis, Xucong Zhang, Yusuke Sugano, Peter Robinson, and Andreas Bulling // in IEEE International. Conference on Computer Vision (ICCV), 2015 // // Cross-dataset learning and person-speci?c normalisation for automatic Action Unit detection // Tadas Baltrušaitis, Marwa Mahmoud, and Peter Robinson // in Facial Expression Recognition and Analysis Challenge, // IEEE International Conference on Automatic Face and Gesture Recognition, 2015 // // Constrained Local Neural Fields for robust facial landmark detection in the wild. // Tadas Baltrušaitis, Peter Robinson, and Louis-Philippe Morency. // in IEEE Int. Conference on Computer Vision Workshops, 300 Faces in-the-Wild Challenge, 2013. // /////////////////////////////////////////////////////////////////////////////// using System; using System.Collections.Generic; using System.Diagnostics; using System.Threading; using System.Windows; using System.Windows.Threading; using System.Windows.Media.Imaging; using System.IO; using Microsoft.Win32; // Internal libraries using OpenCVWrappers; using CppInterop.LandmarkDetector; using CameraInterop; using FaceAnalyser_Interop; using GazeAnalyser_Interop; using FaceDetectorInterop; using MediaReader; using Microsoft.WindowsAPICodePack.Dialogs; namespace OpenFaceOffline { /// /// Interaction logic for MainWindow.xaml /// public partial class MainWindow : Window { // Timing for measuring FPS #region High-Resolution Timing static DateTime startTime; static Stopwatch sw = new Stopwatch(); static MainWindow() { startTime = DateTime.Now; sw.Start(); } public static DateTime CurrentTime { get { return startTime + sw.Elapsed; } } #endregion // ----------------------------------------------------------------- // Members // ----------------------------------------------------------------- Thread processing_thread; // Some members for displaying the results private Capture capture; private WriteableBitmap latest_img; private WriteableBitmap latest_aligned_face; private WriteableBitmap latest_HOG_descriptor; // Managing the running of the analysis system private volatile bool thread_running; private volatile bool thread_paused = false; // Allows for going forward in time step by step // Useful for visualising things private volatile int skip_frames = 0; FpsTracker processing_fps = new FpsTracker(); volatile bool detectionSucceeding = false; // For tracking private FaceDetector face_detector; FaceModelParameters face_model_params; CLNF clnf_model; // For face analysis FaceAnalyserManaged face_analyser; GazeAnalyserManaged gaze_analyser; // Recording parameters (default values) Recorder recorder; public bool RecordAligned { get; set; } = false; // Aligned face images public bool RecordHOG { get; set; } = false; // HOG features extracted from face images public bool Record2DLandmarks { get; set; } = true; // 2D locations of facial landmarks (in pixels) public bool Record3DLandmarks { get; set; } = true; // 3D locations of facial landmarks (in pixels) public bool RecordModelParameters { get; set; } = true; // Facial shape parameters (rigid and non-rigid geometry) public bool RecordPose { get; set; } = true; // Head pose (position and orientation) public bool RecordAUs { get; set; } = true; // Facial action units public bool RecordGaze { get; set; } = true; // Eye gaze // Visualisation options public bool ShowTrackedVideo { get; set; } = true; // Showing the actual tracking public bool ShowAppearance { get; set; } = true; // Showing appeaance features like HOG public bool ShowGeometry { get; set; } = true; // Showing geometry features, pose, gaze, and non-rigid public bool ShowAUs { get; set; } = true; // Showing Facial Action Units int image_output_size = 112; // Where the recording is done (by default in a record directory, from where the application executed) String record_root = "./record"; // For AU prediction, if videos are long dynamic models should be used public bool DynamicAUModels { get; set; } = true; // Camera calibration parameters public double fx = -1, fy = -1, cx = -1, cy = -1; bool estimate_camera_parameters = true; public MainWindow() { InitializeComponent(); this.DataContext = this; // For WPF data binding // Set the icon Uri iconUri = new Uri("logo1.ico", UriKind.RelativeOrAbsolute); this.Icon = BitmapFrame.Create(iconUri); String root = AppDomain.CurrentDomain.BaseDirectory; face_model_params = new FaceModelParameters(root, false); clnf_model = new CLNF(face_model_params); gaze_analyser = new GazeAnalyserManaged(); } // ---------------------------------------------------------- // Actual work gets done here // The main function call for processing images or video files, TODO rename this as it is not a loop private void ProcessingLoop(String[] filenames, int cam_id = -1, int width = -1, int height = -1, bool multi_face = false) { SetupFeatureExtractionMode(); thread_running = true; // Create the video capture and call the VideoLoop if (filenames != null) { face_model_params.optimiseForVideo(); if (cam_id == -2) { List image_files_all = new List(); foreach (string image_name in filenames) image_files_all.Add(image_name); // Loading an image sequence that represents a video capture = new Capture(image_files_all); if (capture.isOpened()) { // Prepare recording if any based on the directory String file_no_ext = System.IO.Path.GetDirectoryName(filenames[0]); file_no_ext = System.IO.Path.GetFileName(file_no_ext); // Start the actual processing and recording FeatureExtractionLoop(file_no_ext); } else { string messageBoxText = "Failed to open an image"; string caption = "Not valid file"; MessageBoxButton button = MessageBoxButton.OK; MessageBoxImage icon = MessageBoxImage.Warning; // Display message box MessageBox.Show(messageBoxText, caption, button, icon); } } else { face_model_params.optimiseForVideo(); // Loading a video file (or a number of them) foreach (string filename in filenames) { if (!thread_running) { continue; } capture = new Capture(filename); if (capture.isOpened()) { String file_no_ext = System.IO.Path.GetFileNameWithoutExtension(filename); // Start the actual processing FeatureExtractionLoop(file_no_ext); } else { string messageBoxText = "File is not a video or the codec is not supported."; string caption = "Not valid file"; MessageBoxButton button = MessageBoxButton.OK; MessageBoxImage icon = MessageBoxImage.Warning; // Display message box MessageBox.Show(messageBoxText, caption, button, icon); } } } } EndMode(); } // TODO here private void ProcessIndividualImages(ImageReader reader) // TODO need interface for recording settings { // Make sure the GUI is setup appropriately SetupFeatureExtractionMode(); // Indicate we will start running the thread thread_running = true; // Setup the parameters optimized for working on individual images rather than sequences face_model_params.optimiseForImages(); // Initialize the face detector if it has not been initialized yet if(face_detector == null) { face_detector = new FaceDetector(); } // Initialize the face analyser face_analyser = new FaceAnalyserManaged(AppDomain.CurrentDomain.BaseDirectory, DynamicAUModels, image_output_size); // Loading an image file (or a number of them) while (reader.isOpened()) { if (!thread_running) { continue; } // Start the actual processing, TODO this should change? Thread.CurrentThread.IsBackground = true; clnf_model.Reset(); var frame = new RawImage(reader.GetNextImage()); var grayFrame = new RawImage(reader.GetCurrentFrameGray()); double progress = reader.GetProgress(); // Detect faces here and return bounding boxes List face_detections = new List(); List confidences = new List(); face_detector.DetectFacesHOG(face_detections, grayFrame, confidences); // For visualizing landmarks List landmark_points = new List(); for (int i = 0; i < face_detections.Count; ++i) { bool success = clnf_model.DetectFaceLandmarksInImage(grayFrame, face_detections[i], face_model_params); var landmarks = clnf_model.CalculateAllLandmarks(); // Predict action units var au_preds = face_analyser.PredictStaticAUsAndComputeFeatures(grayFrame, landmarks); // Predic eye gaze gaze_analyser.AddNextFrame(clnf_model, success, fx, fy, cx, cy); // TODO fx should be from reader // Only the final face will contain the details VisualizeFeatures(frame, landmarks, fx, fy, cx, cy, progress); foreach (var p in landmarks) { landmark_points.Add(new Point(p.Item1, p.Item2)); } } // Visualisation TODO this should be lifted out? and actually be grabbed from the visualizer? rather than drawing points ourselves? //if (ShowTrackedVideo) //{ // Dispatcher.Invoke(DispatcherPriority.Render, new TimeSpan(0, 0, 0, 0, 200), (Action)(() => // { // if (latest_img == null) // { // latest_img = frame.CreateWriteableBitmap(); // } // frame.UpdateWriteableBitmap(latest_img); // video.Source = latest_img; // video.Confidence = 1; // video.FPS = processing_fps.GetFPS(); // video.Progress = progress; // video.OverlayLines = new List>(); // video.OverlayPoints = landmark_points; // // TODO unify with other visualization // })); //} latest_img = null; // TODO how to report errors from the reader here? exceptions? logging? Problem for future versions? } // TODO is this still needed? EndMode(); } // Capturing and processing the video frame by frame private void FeatureExtractionLoop(string output_file_name) { DateTime? startTime = CurrentTime; var lastFrameTime = CurrentTime; clnf_model.Reset(); face_analyser = new FaceAnalyserManaged(AppDomain.CurrentDomain.BaseDirectory, DynamicAUModels, image_output_size); // If the camera calibration parameters are not set (indicated by -1), guesstimate them if(estimate_camera_parameters || fx == -1 || fy == -1 || cx == -1 || cy == -1) { fx = 500.0 * (capture.width / 640.0); fy = 500.0 * (capture.height / 480.0); fx = (fx + fy) / 2.0; fy = fx; cx = capture.width / 2f; cy = capture.height / 2f; } // Setup the recorder first recorder = new Recorder(record_root, output_file_name, capture.width, capture.height, Record2DLandmarks, Record3DLandmarks, RecordModelParameters, RecordPose, RecordAUs, RecordGaze, RecordAligned, RecordHOG, clnf_model, face_analyser, fx, fy, cx, cy, DynamicAUModels); int frame_id = 0; double fps = capture.GetFPS(); if (fps <= 0) fps = 30; while (thread_running) { ////////////////////////////////////////////// // CAPTURE FRAME AND DETECT LANDMARKS FOLLOWED BY THE REQUIRED IMAGE PROCESSING ////////////////////////////////////////////// RawImage frame = null; double progress = -1; frame = new RawImage(capture.GetNextFrame(false)); progress = capture.GetProgress(); if (frame.Width == 0) { // This indicates that we reached the end of the video file break; } lastFrameTime = CurrentTime; processing_fps.AddFrame(); var grayFrame = new RawImage(capture.GetCurrentFrameGray()); if (grayFrame == null) { Console.WriteLine("Gray is empty"); continue; } detectionSucceeding = ProcessFrame(clnf_model, face_model_params, frame, grayFrame, fx, fy, cx, cy); // The face analysis step (for AUs and eye gaze) face_analyser.AddNextFrame(frame, clnf_model.CalculateAllLandmarks(), detectionSucceeding, false, ShowAppearance); // TODO change gaze_analyser.AddNextFrame(clnf_model, detectionSucceeding, fx, fy, cx, cy); recorder.RecordFrame(clnf_model, face_analyser, gaze_analyser, detectionSucceeding, frame_id + 1, ((double)frame_id) / fps); List> landmarks = clnf_model.CalculateVisibleLandmarks(); VisualizeFeatures(frame, landmarks, fx, fy, cx, cy, progress); while (thread_running & thread_paused && skip_frames == 0) { Thread.Sleep(10); } frame_id++; if (skip_frames > 0) skip_frames--; } latest_img = null; skip_frames = 0; // Unpause if it's paused if (thread_paused) { Dispatcher.Invoke(DispatcherPriority.Render, new TimeSpan(0, 0, 0, 0, 200), (Action)(() => { PauseButton_Click(null, null); })); } recorder.FinishRecording(clnf_model, face_analyser); } private void VisualizeFeatures(RawImage frame, List> landmarks, double fx, double fy, double cx, double cy, double progress) { List> lines = null; List> eye_landmarks = null; List> gaze_lines = null; Tuple gaze_angle = new Tuple(0, 0); List pose = new List(); clnf_model.GetPose(pose, fx, fy, cx, cy); List non_rigid_params = clnf_model.GetNonRigidParams(); double confidence = (-clnf_model.GetConfidence()) / 2.0 + 0.5; if (confidence < 0) confidence = 0; else if (confidence > 1) confidence = 1; double scale = 0; if (detectionSucceeding) { eye_landmarks = clnf_model.CalculateVisibleEyeLandmarks(); lines = clnf_model.CalculateBox((float)fx, (float)fy, (float)cx, (float)cy); scale = clnf_model.GetRigidParams()[0]; gaze_lines = gaze_analyser.CalculateGazeLines(scale, (float)fx, (float)fy, (float)cx, (float)cy); gaze_angle = gaze_analyser.GetGazeAngle(); } // Visualisation (as a separate function) Dispatcher.Invoke(DispatcherPriority.Render, new TimeSpan(0, 0, 0, 0, 200), (Action)(() => { if (ShowAUs) { // TODO this should be done through the visualizer? var au_classes = face_analyser.GetCurrentAUsClass(); var au_regs = face_analyser.GetCurrentAUsReg(); auClassGraph.Update(au_classes); var au_regs_scaled = new Dictionary(); foreach (var au_reg in au_regs) { au_regs_scaled[au_reg.Key] = au_reg.Value / 5.0; if (au_regs_scaled[au_reg.Key] < 0) au_regs_scaled[au_reg.Key] = 0; if (au_regs_scaled[au_reg.Key] > 1) au_regs_scaled[au_reg.Key] = 1; } auRegGraph.Update(au_regs_scaled); } if (ShowGeometry) { int yaw = (int)(pose[4] * 180 / Math.PI + 0.5); int roll = (int)(pose[5] * 180 / Math.PI + 0.5); int pitch = (int)(pose[3] * 180 / Math.PI + 0.5); YawLabel.Content = yaw + "°"; RollLabel.Content = roll + "°"; PitchLabel.Content = pitch + "°"; XPoseLabel.Content = (int)pose[0] + " mm"; YPoseLabel.Content = (int)pose[1] + " mm"; ZPoseLabel.Content = (int)pose[2] + " mm"; nonRigidGraph.Update(non_rigid_params); // Update eye gaze String x_angle = String.Format("{0:F0}°", gaze_angle.Item1 * (180.0 / Math.PI)); String y_angle = String.Format("{0:F0}°", gaze_angle.Item2 * (180.0 / Math.PI)); GazeXLabel.Content = x_angle; GazeYLabel.Content = y_angle; } if (ShowTrackedVideo) { if (latest_img == null) { latest_img = frame.CreateWriteableBitmap(); } frame.UpdateWriteableBitmap(latest_img); video.Source = latest_img; video.Confidence = confidence; video.FPS = processing_fps.GetFPS(); video.Progress = progress; video.FaceScale = scale; if (!detectionSucceeding) { video.OverlayLines.Clear(); video.OverlayPoints.Clear(); video.OverlayEyePoints.Clear(); video.GazeLines.Clear(); } else { video.OverlayLines = lines; List landmark_points = new List(); foreach (var p in landmarks) { landmark_points.Add(new Point(p.Item1, p.Item2)); } List eye_landmark_points = new List(); foreach (var p in eye_landmarks) { eye_landmark_points.Add(new Point(p.Item1, p.Item2)); } video.OverlayPoints = landmark_points; video.OverlayEyePoints = eye_landmark_points; video.GazeLines = gaze_lines; } } if (ShowAppearance) { // TODO how to do this for images, now this is only for videos, one possibility is only doing this on replay for images, and showing the selected face only RawImage aligned_face = face_analyser.GetLatestAlignedFace(); RawImage hog_face = face_analyser.GetLatestHOGDescriptorVisualisation(); if (latest_aligned_face == null) { latest_aligned_face = aligned_face.CreateWriteableBitmap(); latest_HOG_descriptor = hog_face.CreateWriteableBitmap(); } aligned_face.UpdateWriteableBitmap(latest_aligned_face); hog_face.UpdateWriteableBitmap(latest_HOG_descriptor); AlignedFace.Source = latest_aligned_face; AlignedHOG.Source = latest_HOG_descriptor; } })); } private void StopTracking() { // First complete the running of the thread if (processing_thread != null) { // Tell the other thread to finish thread_running = false; processing_thread.Join(); } } // ---------------------------------------------------------- // Interacting with landmark detection and face analysis private bool ProcessFrame(CLNF clnf_model, FaceModelParameters clnf_params, RawImage frame, RawImage grayscale_frame, double fx, double fy, double cx, double cy) { detectionSucceeding = clnf_model.DetectLandmarksInVideo(grayscale_frame, clnf_params); return detectionSucceeding; } // ---------------------------------------------------------- // Mode handling (image, video) // ---------------------------------------------------------- // Disable GUI components that should not be active during processing private void SetupFeatureExtractionMode() { Dispatcher.Invoke((Action)(() => { SettingsMenu.IsEnabled = false; RecordingMenu.IsEnabled = false; AUSetting.IsEnabled = false; PauseButton.IsEnabled = true; StopButton.IsEnabled = true; NextFiveFramesButton.IsEnabled = false; NextFrameButton.IsEnabled = false; })); } // When the processing is done re-enable the components private void EndMode() { Dispatcher.Invoke(DispatcherPriority.Render, new TimeSpan(0, 0, 0, 1, 0), (Action)(() => { SettingsMenu.IsEnabled = true; RecordingMenu.IsEnabled = true; AUSetting.IsEnabled = true; PauseButton.IsEnabled = false; StopButton.IsEnabled = false; NextFiveFramesButton.IsEnabled = false; NextFrameButton.IsEnabled = false; // Clean up the interface itself video.Source = null; auClassGraph.Update(new Dictionary()); auRegGraph.Update(new Dictionary()); YawLabel.Content = "0°"; RollLabel.Content = "0°"; PitchLabel.Content = "0°"; XPoseLabel.Content = "0 mm"; YPoseLabel.Content = "0 mm"; ZPoseLabel.Content = "0 mm"; nonRigidGraph.Update(new List()); GazeXLabel.Content = "0°"; GazeYLabel.Content = "0°"; AlignedFace.Source = null; AlignedHOG.Source = null; })); } // ---------------------------------------------------------- // Opening Videos/Images // ---------------------------------------------------------- private void videoFileOpenClick(object sender, RoutedEventArgs e) { new Thread(() => openVideoFile()).Start(); } private void openVideoFile() { StopTracking(); Dispatcher.Invoke(DispatcherPriority.Render, new TimeSpan(0, 0, 0, 2, 0), (Action)(() => { var d = new OpenFileDialog(); d.Multiselect = true; d.Filter = "Video files|*.avi;*.wmv;*.mov;*.mpg;*.mpeg;*.mp4"; if (d.ShowDialog(this) == true) { string[] video_files = d.FileNames; processing_thread = new Thread(() => ProcessingLoop(video_files)); processing_thread.Start(); } })); } // Some utilities for opening images/videos and directories private ImageReader openMediaDialog(bool images) { string[] image_files = new string[0]; Dispatcher.Invoke(DispatcherPriority.Render, new TimeSpan(0, 0, 0, 2, 0), (Action)(() => { var d = new OpenFileDialog(); d.Multiselect = true; if(images) { d.Filter = "Image files|*.jpg;*.jpeg;*.bmp;*.png;*.gif"; } else { d.Filter = "Video files|*.avi;*.wmv;*.mov;*.mpg;*.mpeg;*.mp4"; } if (d.ShowDialog(this) == true) { image_files = d.FileNames; } })); List img_files_list = new List(image_files); return new ImageReader(img_files_list); } private void individualImageFilesOpenClick(object sender, RoutedEventArgs e) { // First clean up existing tracking StopTracking(); ImageReader reader = openMediaDialog(true); processing_thread = new Thread(() => ProcessIndividualImages(reader)); processing_thread.Start(); } private void individualImageDirectoryOpenClick(object sender, RoutedEventArgs e) { StopTracking(); // TODO open directory here new Thread(() => imageOpen()).Start(); } // TODO old private void imageFileOpenClick(object sender, RoutedEventArgs e) { new Thread(() => imageOpen()).Start(); } // TODO old private void imageOpen() { StopTracking(); Dispatcher.Invoke(DispatcherPriority.Render, new TimeSpan(0, 0, 0, 2, 0), (Action)(() => { var d = new OpenFileDialog(); d.Multiselect = true; d.Filter = "Image files|*.jpg;*.jpeg;*.bmp;*.png;*.gif"; if (d.ShowDialog(this) == true) { string[] image_files = d.FileNames; processing_thread = new Thread(() => ProcessingLoop(image_files, -3)); processing_thread.Start(); } })); } private void imageSequenceFileOpenClick(object sender, RoutedEventArgs e) { new Thread(() => imageSequenceOpen()).Start(); } private void imageSequenceOpen() { StopTracking(); Dispatcher.Invoke(DispatcherPriority.Render, new TimeSpan(0, 0, 0, 2, 0), (Action)(() => { var d = new OpenFileDialog(); d.Multiselect = true; d.Filter = "Image files|*.jpg;*.jpeg;*.bmp;*.png;*.gif"; if (d.ShowDialog(this) == true) { string[] image_files = d.FileNames; processing_thread = new Thread(() => ProcessingLoop(image_files, -2)); processing_thread.Start(); } })); } // -------------------------------------------------------- // Button handling // -------------------------------------------------------- // Cleanup stuff when closing the window private void Window_Closing(object sender, System.ComponentModel.CancelEventArgs e) { if (processing_thread != null) { // Stop capture and tracking thread_running = false; processing_thread.Join(); capture.Dispose(); } face_analyser.Dispose(); } // Stopping the tracking private void StopButton_Click(object sender, RoutedEventArgs e) { if (processing_thread != null) { // Stop capture and tracking thread_paused = false; thread_running = false; // Let the processing thread finish processing_thread.Join(); // Clean up the interface EndMode(); } } private void PauseButton_Click(object sender, RoutedEventArgs e) { if (processing_thread != null) { // Stop capture and tracking thread_paused = !thread_paused; NextFrameButton.IsEnabled = thread_paused; NextFiveFramesButton.IsEnabled = thread_paused; if (thread_paused) { PauseButton.Content = "Resume"; } else { PauseButton.Content = "Pause"; } } } private void SkipButton_Click(object sender, RoutedEventArgs e) { if (sender.Equals(NextFrameButton)) { skip_frames += 1; } else if (sender.Equals(NextFiveFramesButton)) { skip_frames += 5; } } private void VisualisationChange(object sender, RoutedEventArgs e) { // Collapsing or restoring the windows here if (!ShowTrackedVideo) { VideoBorder.Visibility = System.Windows.Visibility.Collapsed; MainGrid.ColumnDefinitions[0].Width = new GridLength(0, GridUnitType.Star); } else { VideoBorder.Visibility = System.Windows.Visibility.Visible; MainGrid.ColumnDefinitions[0].Width = new GridLength(2.1, GridUnitType.Star); } if (!ShowAppearance) { AppearanceBorder.Visibility = System.Windows.Visibility.Collapsed; MainGrid.ColumnDefinitions[1].Width = new GridLength(0, GridUnitType.Star); } else { AppearanceBorder.Visibility = System.Windows.Visibility.Visible; MainGrid.ColumnDefinitions[1].Width = new GridLength(0.8, GridUnitType.Star); } // Collapsing or restoring the windows here if (!ShowGeometry) { GeometryBorder.Visibility = System.Windows.Visibility.Collapsed; MainGrid.ColumnDefinitions[2].Width = new GridLength(0, GridUnitType.Star); } else { GeometryBorder.Visibility = System.Windows.Visibility.Visible; MainGrid.ColumnDefinitions[2].Width = new GridLength(1.0, GridUnitType.Star); } // Collapsing or restoring the windows here if (!ShowAUs) { ActionUnitBorder.Visibility = System.Windows.Visibility.Collapsed; MainGrid.ColumnDefinitions[3].Width = new GridLength(0, GridUnitType.Star); } else { ActionUnitBorder.Visibility = System.Windows.Visibility.Visible; MainGrid.ColumnDefinitions[3].Width = new GridLength(1.6, GridUnitType.Star); } } private void setOutputImageSize_Click(object sender, RoutedEventArgs e) { NumberEntryWindow number_entry_window = new NumberEntryWindow(image_output_size); number_entry_window.Icon = this.Icon; number_entry_window.WindowStartupLocation = WindowStartupLocation.CenterScreen; if (number_entry_window.ShowDialog() == true) { image_output_size = number_entry_window.OutputInt; } } private void setCameraParameters_Click(object sender, RoutedEventArgs e) { CameraParametersEntry camera_params_entry_window = new CameraParametersEntry(fx, fy, cx, cy); camera_params_entry_window.Icon = this.Icon; camera_params_entry_window.WindowStartupLocation = WindowStartupLocation.CenterScreen; if (camera_params_entry_window.ShowDialog() == true) { fx = camera_params_entry_window.Fx; fy = camera_params_entry_window.Fy; cx = camera_params_entry_window.Cx; cy = camera_params_entry_window.Cy; if(fx == -1 || fy == -1 || cx == -1 || cy == -1) { estimate_camera_parameters = true; } else { estimate_camera_parameters = false; } } } private void OutputLocationItem_Click(object sender, RoutedEventArgs e) { var dlg = new CommonOpenFileDialog(); dlg.Title = "Select output directory"; dlg.IsFolderPicker = true; dlg.AllowNonFileSystemItems = false; dlg.EnsureFileExists = true; dlg.EnsurePathExists = true; dlg.EnsureReadOnly = false; dlg.EnsureValidNames = true; dlg.Multiselect = false; dlg.ShowPlacesList = true; if (dlg.ShowDialog() == CommonFileDialogResult.Ok) { var folder = dlg.FileName; record_root = folder; } } } }