Working on sequence captures.

This commit is contained in:
Tadas Baltrusaitis 2017-11-08 20:50:46 +00:00
parent 75bb688d2c
commit 839e1bc738
3 changed files with 146 additions and 12 deletions

View File

@ -332,7 +332,7 @@ int main (int argc, char **argv)
while (!captured_image.empty())
{
// Grab the timestamp first
// Grab the timestamp first (TODO timestamp should be grabbed from sequence)
if (video_input)
{
time_stamp = (double)frame_count * (1.0 / fps_vid_in);
@ -343,7 +343,7 @@ int main (int argc, char **argv)
time_stamp = (double)frame_count * (1.0 / 30.0);
}
// Reading the images
// Reading the images, TODO grayscale should be grabbed another way
cv::Mat_<uchar> grayscale_image;
if (captured_image.channels() == 3)

View File

@ -55,7 +55,9 @@ namespace Utilities
public:
// Default constructor
SequenceCapture();
SequenceCapture() {};
// TODO block copy, move etc.
// Opening based on command line arguments
bool Open(std::vector<std::string> arguments);
@ -71,6 +73,16 @@ namespace Utilities
// Video file
bool OpenVideoFile(std::string video_file, float fx = -1, float fy = -1, float cx = -1, float cy = -1);
// Getting the next frame
cv::Mat GetNextFrame();
// Getting the most recent grayscale frame (need to call GetNextFrame first)
cv::Mat_<uchar> GetGrayFrame();
double GetProgress();
bool IsOpened();
int frame_width;
int frame_height;
@ -98,6 +110,8 @@ namespace Utilities
// Length of video allowing to assess progress
int vid_length;
bool img_grabbed;
};
}
#endif

View File

@ -42,12 +42,6 @@
using namespace Utilities;
// TODO initialize defaults
SequenceCapture::SequenceCapture():
{
}
bool SequenceCapture::Open(std::vector<std::string> arguments)
{
@ -198,7 +192,22 @@ bool SequenceCapture::OpenWebcam(int device, int image_width, int image_height,
this->fps = capture.get(CV_CAP_PROP_FPS);
// TODO estimate the fx, fy etc.
// If optical centers are not defined just use center of image
if (cx == -1)
{
cx = frame_width / 2.0f;
cy = frame_height / 2.0f;
}
// Use a rough guess-timate of focal length
if (fx == -1)
{
fx = 500 * (frame_width / 640.0);
fy = 500 * (frame_height / 480.0);
fx = (fx + fy) / 2.0;
fy = fx;
}
return true;
}
@ -229,13 +238,27 @@ bool SequenceCapture::OpenVideoFile(std::string video_file, float fx, float fy,
return false;
}
// TODO estimate the fx, fy etc.
// If optical centers are not defined just use center of image
if (cx == -1)
{
cx = frame_width / 2.0f;
cy = frame_height / 2.0f;
}
// Use a rough guess-timate of focal length
if (fx == -1)
{
fx = 500 * (frame_width / 640.0);
fy = 500 * (frame_height / 480.0);
fx = (fx + fy) / 2.0;
fy = fx;
}
return true;
}
void SequenceCapture::OpenImageSequence(std::string directory, float fx, float fy, float cx, float cy)
bool SequenceCapture::OpenImageSequence(std::string directory, float fx, float fy, float cx, float cy)
{
image_files.clear();
@ -265,6 +288,103 @@ void SequenceCapture::OpenImageSequence(std::string directory, float fx, float f
return false;
}
// Assume all images are same size in an image sequence
cv::Mat tmp = cv::imread(image_files[0], -1);
this->frame_height = tmp.size().height;
this->frame_width = tmp.size().width;
// If optical centers are not defined just use center of image
if (cx == -1)
{
cx = frame_width / 2.0f;
cy = frame_height / 2.0f;
}
// Use a rough guess-timate of focal length
if (fx == -1)
{
fx = 500 * (frame_width / 640.0);
fy = 500 * (frame_height / 480.0);
fx = (fx + fy) / 2.0;
fy = fx;
}
// No fps as we have a sequence
this->fps = 0;
return true;
}
cv::Mat SequenceCapture::GetNextFrame()
{
frame_num++;
if (is_webcam && !is_image_seq)
{
bool success = capture.read(latest_frame);
if (!success)
{
// Indicate lack of success by returning an empty image
latest_frame = cv::Mat();
}
}
else if (is_image_seq)
{
if (image_files.empty())
{
// Indicate lack of success by returning an empty image
latest_frame = cv::Mat();
}
latest_frame = cv::imread(image_files[frame_num-1], -1);
}
// Set the grayscale frame
if (grayFrame == nullptr) {
if (latestFrame->Width > 0) {
grayFrame = gcnew OpenCVWrappers::RawImage(latestFrame->Width, latestFrame->Height, CV_8UC1);
}
}
if (grayFrame != nullptr) {
cvtColor(latestFrame->Mat, grayFrame->Mat, CV_BGR2GRAY);
}
return latest_frame;
}
double SequenceCapture::GetProgress()
{
if (is_webcam)
{
return -1.0;
}
else
{
return (double)frame_num / (double)vid_length;
}
}
bool SequenceCapture::IsOpened()
{
if (is_webcam || !is_image_seq)
return capture.isOpened();
else
return (image_files.size() > 0 && frame_num < image_files.size());
}
cv::Mat_<uchar> SequenceCapture::GetGrayFrame() {
if (img_grabbed)
{
img_grabbed = false;
return latest_gray_frame;
}
else
{
std::cout << "Need to call GetNextFrame(), before calling GetGrayFrame() " << std::endl;
return cv::Mat_<uchar>();
}
}