Can not retrieve first frame of movie opencv - opencv

I try to get the first frame of a movie. But it seems impossible with VideoCapture (opencv).
To make sure that I can not get the first video I made a with all the frames numbered. You can check that the saved video has the right numbering.
Whatever I do, I can not get the first frame back.
The code to check this:
reads a movie (6 frames)
writes the 6 frames with an index printed in per frame
reads the new created movie
shows the 2 images next to each other
How can I get the first frame of a movie?
Is this a bug or is something wrong in retrieving the frames in the code?
Please compile the code and run. Without arguments a small movie is generated. standard output comments on want happens. Or specify movie name as argument.
See also next question on the comparison of the 2 images ("Images are not the same after VideoWrite and videoRead OpenCv").
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
using namespace cv;
using namespace std;
std::vector<Mat> ReadImages(string VideoName)
{
// read movie from disk
VideoCapture capture( VideoName);
cout << VideoName;
if (!capture.isOpened())
cout << "Could not open the output video for write: " << VideoName << endl;
int NumFrames = capture.get(CV_CAP_PROP_FRAME_COUNT);
std::vector<Mat> FramesRead(NumFrames);
// int NumFrames = capture.get(CV_CAP_PROP_FRAME_COUNT);
cout << endl;
for(int fr = 0; fr < NumFrames; ++fr){
// read frame in 3 different ways:
cout << "Position of pointer in Movie: " << capture.get(CAP_PROP_POS_FRAMES) << endl;
switch (3) {
case 1:
capture.grab();
capture.retrieve(FramesRead[fr]);// >> frame; // get frame
break;
case 2:
capture.set ( CV_CAP_PROP_POS_FRAMES , fr); // set position
capture.read(FramesRead[fr]); // get frame
break;
default:
capture >> FramesRead[fr]; // get frame
break;
}
// show read frame
imshow("Reader",FramesRead[fr]);
cout << "Frame read should indicate a '" << fr << "'" << " and " << fr+1 << " Faces" << endl;
cout << "if number in Image is not the same as written here: VideoReader does not retrieve first image! \n";
if(waitKey(-1)==1) break;
}
cout << "finished reading Movie\n";
return FramesRead;
}
string WriteImages(std::vector<Mat> frames, string VideoName)
{
// write movie to disk and put number in every image according to frame number
Size S = Size((int) frames[0].cols, // get input size
(int) frames[0].rows);
VideoWriter outputVideo;
// use a uncompressed codec sheme
string Codec = "raw ";//"tiff";//, "mp4v", "MJPG"};
// Open the output
String VideoName2 = VideoName + "_" + Codec + "_" + "Write.avi";
cout << VideoName2 << endl;
outputVideo.open( VideoName2, VideoWriter::fourcc(Codec[0],Codec[1],Codec[2],Codec[3]),1, S, false);
if (!outputVideo.isOpened())
{
cout << "Could not open the output video for write: " << VideoName << endl;
return "-1";
}
else
{
// write images and put frame number in images
for(int fr=0;fr < frames.size() ; ++fr) //Show the image captured in the window and repeat
{
imshow("writer",frames[fr]);
waitKey(1);
putText(frames[fr], to_string(fr), Point(200, 100), FONT_HERSHEY_SIMPLEX, 1, Scalar(255,255,255));
outputVideo << frames[fr].clone();
}
return VideoName2;
}
}
std::vector<Mat> Face_Color_Movie(int NumFrames)
{
// create a movie with colored faces
const int w = 450;
std::vector<Mat> Frames(NumFrames);
Mat tmp = Mat::zeros(w, w, CV_8UC3);
for(int i=0; i< NumFrames;i++)
tmp.copyTo(Frames[i]);
Scalar white = Scalar(0,0,255);
for(int fr=0;fr < NumFrames; fr++)
{
for( int i = 0; i < fr+1; i++ )
{
int dx = (i%2)*250 - 30;
int dy = (i/2)*150;
if (i == 0)
white = Scalar(0,0,255);
else if(i == 1)
white = Scalar(255,0,0);
else
white = Scalar(0,255,0);
const Scalar black = Scalar(0);
if( i == 0 )
{
for( int j = 0; j <= 10; j++ )
{
double angle = (j+5)*CV_PI/21;
line(Frames[fr], Point(cvRound(dx+100+j*10-80*cos(angle)),
cvRound(dy+100-90*sin(angle))),
Point(cvRound(dx+100+j*10-30*cos(angle)),
cvRound(dy+100-30*sin(angle))), white, 1, 8, 0);
}
}
// imshow("Faces", Frames[fr]);waitKey(1);
ellipse( Frames[fr], Point(dx+150, dy+100), Size(100,70), 0, 0, 360, white, -1, 8, 0 );
// imshow("Faces", Frames[fr]);waitKey(1);
ellipse( Frames[fr], Point(dx+115, dy+70), Size(30,20), 0, 0, 360, black, -1, 8, 0 );
ellipse( Frames[fr], Point(dx+185, dy+70), Size(30,20), 0, 0, 360, black, -1, 8, 0 );
ellipse( Frames[fr], Point(dx+115, dy+70), Size(15,15), 0, 0, 360, white, -1, 8, 0 );
ellipse( Frames[fr], Point(dx+185, dy+70), Size(15,15), 0, 0, 360, white, -1, 8, 0 );
ellipse( Frames[fr], Point(dx+115, dy+70), Size(5,5), 0, 0, 360, black, -1, 8, 0 );
ellipse( Frames[fr], Point(dx+185, dy+70), Size(5,5), 0, 0, 360, black, -1, 8, 0 );
ellipse( Frames[fr], Point(dx+150, dy+100), Size(10,5), 0, 0, 360, black, -1, 8, 0 );
ellipse( Frames[fr], Point(dx+150, dy+150), Size(40,10), 0, 0, 360, black, -1, 8, 0 );
ellipse( Frames[fr], Point(dx+27, dy+100), Size(20,35), 0, 0, 360, white, -1, 8, 0 );
ellipse( Frames[fr], Point(dx+273, dy+100), Size(20,35), 0, 0, 360, white, -1, 8, 0 );
}
imshow("Faces", Frames[fr]);
waitKey(1);
}
return Frames;
}
std::vector<Mat> Load_Movie(int NumFrames,string VideoName)
{
// reads NUMFRAMES from a specified movie from disk
VideoCapture capture(VideoName);
if (!capture.isOpened())
cout << "Could not open the output video for write: " << VideoName << endl;
int ex = static_cast<int>(capture.get(CV_CAP_PROP_FOURCC));
cout << "Codec: "<< ex << endl;
std::vector<Mat> Frames(NumFrames);
for(int fr = 0; fr < NumFrames; ++fr)
{
capture >> Frames[fr]; // get frame
imshow("test",Frames[fr]);
waitKey(30);
}
return Frames;
}
int main( int argc, char** argv )
{
int NumFrames = 6;
std::vector<Mat> Frames;
// create a movie
if (argc < 2)
// A movie with colored faces is generated
Frames = Face_Color_Movie(NumFrames);
// Frames = Face_Movie(NumFrames);
else{
// use an existing movie
string VideoName = "Megamind_bugy.avi"; // use your own movie here
VideoName = argv[1];
Frames = Load_Movie(NumFrames,VideoName);
}
//--------------------------------
// write the frames to disk
string VideoName2 = WriteImages(Frames, "Video_Faces.avi");
// read the frames from disk (again)
std::vector<Mat> FramesRead = ReadImages( VideoName2);
// show the frames and subtract to see if they are really the same?
for (int fr=0;fr <NumFrames; ++fr)
{
//-------------------------------
// first frame of read movie is missing!!!
// Original image; fr+1 because VideoReader skips the first frame!!!
imshow("Frame before writing to disk",Frames[fr]);
// Image read from disk (first frame missing)
imshow("Frame after reading from Disk",FramesRead[fr]);
cout << "-------------------------------\n";
cout << "Shows Frame before writing to disk and frames after reading from disk\n";
cout << "Image windows may lay on top of each other\n";
moveWindow("Frame after reading from Disk",505,46);
if(waitKey(-1)==1) break;
}
}

Related

segmentation failed (core dumped) working with opencv

I'm running into a problem, trying to perform a template matching using OpenCV on Ubuntu 18.04LTS
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
int main( int argc, char** argv )
{
int match_method =5;
string image_window = "Source Image";
string result_window = "Result window";
Mat img, templ, result;
/// Load image and template
img = imread("./RI2.jpg", IMREAD_GRAYSCALE );
templ = imread("./Pump2.jpg", IMREAD_GRAYSCALE );
/// Create windows
//namedWindow( image_window, WINDOW_AUTOSIZE );
//namedWindow( result_window, WINDOW_AUTOSIZE );
/// Source image to display
Mat img_display;
img.copyTo( img_display );
/// Create the result matrix
int result_cols = img.cols - templ.cols + 1;
int result_rows = img.rows - templ.rows + 1;
result.create( result_rows, result_cols, CV_32FC1 );
/// Do the Matching and Normalize
matchTemplate( img, templ, result, match_method );
normalize( result, result, 0, 1, NORM_MINMAX, -1, Mat() );
Mat resultgrey(result_rows, result_cols, CV_8UC1);
cout << "resultgrey.size().width: " << resultgrey.size().width << endl;
cout << "resultgrey.size().height: " << resultgrey.size().height << endl;
cout << "result.size().width: " << result.size().width << endl;
cout << "result.size().height: " << result.size().height << endl;
if( match_method == 0 || match_method == 1 )
{
for (int i=0; i<result.size().width; i++)
{
for (int j=0; j<result.size().height; j++)
{
if (result.at<float>(i,j)>=0.1)
{
resultgrey.at<int>(i,j)=0;
}
else
{
resultgrey.at<int>(i,j)=1;
}
}
}
}
else
{
for (int i=0; i<result.size().width; i++)
{
for (int j=0; j<result.size().height; j++)
{
if (result.at<float>(i,j)<=0.98)
{
resultgrey.at<int>(i,j)=0;
//cout << "0" << endl;
}
else
{
resultgrey.at<int>(i,j)=1;
//cout << "1" << endl;
}
}
}
}
cout << "3" << endl;
/// Localizing the objects
vector<Point> matchLoclist;
//cout << resultgrey << endl;
findNonZero(resultgrey, matchLoclist);
cout << "4" << endl;
if (matchLoclist.size() == 0)
{
cout << "no matches found" << endl;
return 0;
}
///Draw Rectangles on Pumps found in the scene
for (int i=0; i<matchLoclist.size(); i++)
{
//cout << "matchLoclist[i].x: "<<matchLoclist[i].x << endl << "matchLoclist[i].y: " << matchLoclist[i].y << endl;
rectangle( img_display, matchLoclist[i], Point( matchLoclist[i].x + templ.cols, matchLoclist[i].y + templ.rows ), Scalar::all(0), 2, 8, 0 );
rectangle( result, matchLoclist[i], Point( matchLoclist[i].x + templ.cols, matchLoclist[i].y + templ.rows ), Scalar::all(0), 2, 8, 0 );
}
imshow( image_window, img_display );
imshow( result_window, result );
waitKey(0);
return 0;
}
as an output i get:
xxx#ubuntu:~/Projects/Template_matching$ ./template_matching
resultgrey.size().width: 1216
resultgrey.size().height: 723
result.size().width: 1216
result.size().height: 723
Segmentation fault (core dumped)
This happens during the double for-loop where either a 1 or a 0 gets written into "resultrgrey" as I never get the "3" as an output from the cout below
if I take different input pictures (espacially smaller ones) the programm tends to run without this error.
I appreciate any help or suggestions!
Alex
You write outside of the allocated buffer because of (1) incorrectly specified data types and (2) swapped arguments to .at, as #rafix07 has noted.
You create 8-bit matrix (8 in CV_8UC1):
Mat resultgrey(result_rows, result_cols, CV_8UC1);
but try to assign 32-bit values to its elements in double-for loop:
resultgrey.at<int>(i,j)=0;
Template method cv::Mat::at calculates address of the (i,j)-th element in memory, based on:
data type, specified in template instantiation,
pointer to data start, stored in the cv::Mat instance,
and data stride (distance in bytes between leftmost pixels of two consecutive lines), also stored in the cv::Mat instance.
Then it returns reference to it. No checks is performed, for speed, therefore it's your responsibility to submit correct arguments.
Size of int is 32 bits on most modern platforms, but can be differrent.
Generally, it is safer to use types from stdint.h header, that have explicit length and sign in their names: uint8_t, int32_t, etc
Look at reference about Mat::at method
const _Tp& cv::Mat::at ( int i0, int i1 ) const
Parameters
i0 Index along the dimension 0
i1 Index along the dimension 1
the first dimenstion is number of rows, the second dim is number of columns, so you should change all lines in your code with at
resultgrey.at<int>(i,j) // i means col, j means row
to
resultgrey.at<int>(j,i)

Application error(0xc000007b) when runnning Opencv example in Visual Studio 2015

I am running canny edge example in Visual Studio 2015 and i got this error.
The application was unable to start correctly (0xc000007b).
And then visual studio show to this error.
Unhandled exception at 0x77A2D5B2 (ntdll.dll) in Canny Edge.exe: 0xC000007B: %hs is either not designed to run on Windows or it contains an error. Try installing the program again using the original installation media or contact your system administrator or the software vendor for support. Error status 0x.
I quite sure this coding is working as i ran this coding before in Visual Studio 2013. Here is my coding.
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <algorithm>
using namespace cv;
using namespace std;
void help()
{
cout << "\nThis program demonstrates line finding with the Hough transform.\n"
"Usage:\n"
"./houghlines <image_name>, Default is pic1.jpg\n" << endl;
}
bool less_by_y(const cv::Point& lhs, const cv::Point& rhs)
{
return lhs.y < rhs.y;
}
int main(int argc, char** argv)
{
const char* filename = argc >= 2 ? argv[1] : "pic1.jpg";
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
Rect roi;
Mat src = imread("test_4_1.png");
if (src.empty())
{
help();
cout << "can not open " << filename << endl;
return -1;
}
Mat dst, cdst;
Canny(src, dst, 50, 200, 3);
cvtColor(dst, cdst, CV_GRAY2BGR);
findContours(dst, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
//vector<Vec2f> lines;
//HoughLines(dst, lines, 1, CV_PI / 180, 50, 0, 0);
//for (size_t i = 0; i < lines.size(); i++)
//{
// float rho = lines[i][0], theta = lines[i][1];
// Point pt1, pt2;
// double a = cos(theta), b = sin(theta);
// double x0 = a*rho, y0 = b*rho;
// pt1.x = cvRound(x0 + 1000 * (-b));
// pt1.y = cvRound(y0 + 1000 * (a));
// pt2.x = cvRound(x0 - 1000 * (-b));
// pt2.y = cvRound(y0 - 1000 * (a));
// line(cdst, pt1, pt2, Scalar(0, 0, 255), 1, CV_AA);
// cout << pt1 << " " << pt2 << endl;
//}
vector<Vec4i> lines;
HoughLinesP(dst, lines, 1, CV_PI / 180, 30, 50, 10);
for (size_t i = 0; i < lines.size(); i++)
{
Vec4i l = lines[i];
line(cdst, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0, 0, 255), 1, CV_AA);
cout << l << endl;
}
cout << endl << lines.size() << endl;
cout << arcLength(contours[0], true) << endl;
cout << dst.size() << endl << endl;
for (int a = 0; a < contours[0].size(); a++){
cout << contours[0][a] << " ";
}
vector<Point> test = contours[0];
auto mmx = std::minmax_element(test.begin(), test.end(), less_by_y);
cout << endl << *mmx.first._Ptr << endl << *mmx.second._Ptr;
vector<Point> test2 = contours[1];
auto mmx_1 = std::minmax_element(test2.begin(), test2.end(), less_by_y);
cout << endl << *mmx_1.first._Ptr << endl << *mmx_1.second._Ptr;
imshow("source", src);
imshow("detected lines", cdst);
/* ROI by creating mask for the parallelogram */
Mat mask = cvCreateMat(dst.size().height, dst.size().width, CV_8UC1);
// Create black image with the same size as the original
for (int i = 0; i < mask.cols; i++)
for (int j = 0; j < mask.rows; j++)
mask.at<uchar>(Point(i, j)) = 0;
cout <<endl<<endl<< *mmx.first._Ptr << *mmx.second._Ptr << *mmx_1.first._Ptr << *mmx_1.second._Ptr << endl;
// Create Polygon from vertices
vector<Point> ROI_Vertices = { *mmx.first._Ptr, *mmx.second._Ptr, *mmx_1.first._Ptr, *mmx_1.second._Ptr};
vector<Point> ROI_Poly;
approxPolyDP(ROI_Vertices, ROI_Poly, 1.0, false);
// Fill polygon white
fillConvexPoly(mask, &ROI_Poly[0], ROI_Poly.size(), 255, 8, 0);
cout << ROI_Poly.size() << endl;
// Create new image for result storage
Mat imageDest = cvCreateMat(dst.size().height, dst.size().width, CV_8UC3);
// Cut out ROI and store it in imageDest
src.copyTo(imageDest, mask);
imshow("mask", mask);
imshow("image", imageDest);
waitKey();
return 0;
}
Actually my comment is the answer, with some additions
What OpenCV Libs are you linking to? Are you linking to vs12? Because
you need to upgrade your linker to vs13 for MSVS 2015
OpenCV Doesn't come with Visual Studio 15 pre-builds, so you need to build OpenCV yourself for VS2015
This person seems to have had a similar problem and talks you through how to compile for VS2015

Inacurate tracking when drawing calcOpticalFlow's outputed feature vector

I have been trying to develop a simple feature tracking program. The user outlines an area on the screen with their mouse, and a mask is created for this area and passed to goodFeaturesToTrack. The features found by the function are then drawn on the screen (represented by blue circles).
Next I pass the feature vector returned by the function to calcOpticalFlowPyrLk and draw the resulting vector of points on the screen (represented by green circles). Although the program tracks the direction of flow correctly, for some reason the features output by the calcOpticalFlow funciton do not line up with the object's location on the screen.
I feel as though it is a small mistake in the logic I have used on my part, but I just can't seem to decompose it, and I would really appreciate some help from the you guys.
I have posted my code below, and I would like to greatly apologize for the global variables and messy structure. I am just testing at the moment, and plan to clean up and convert to an OOP format as soon as I get it running.
As well, here is a link to a YouTube video I have uploaded that demonstrates the behavior I am combating.
bool drawingBox = false;
bool destroyBox = false;
bool targetAcquired = false;
bool featuresFound = false;
CvRect box;
int boxCounter = 0;
cv::Point objectLocation;
cv::Mat prevFrame, nextFrame, prevFrame_1C, nextFrame_1C;
std::vector<cv::Point2f> originalFeatures, newFeatures, baseFeatures;
std::vector<uchar> opticalFlowFeatures;
std::vector<float> opticalFlowFeaturesError;
cv::TermCriteria opticalFlowTermination = cv::TermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.3);
cv::Mat mask;
cv::Mat clearMask;
long currentFrame = 0;
void draw(cv::Mat image, CvRect rectangle)
{
if (drawingBox)
{
cv::rectangle(image, cv::Point(box.x, box.y), cv::Point(box.x + box.width, box.y + box.height), cv::Scalar(225, 238 , 81), 2);
CvRect rectangle2 = cvRect(box.x, box.y, box.width, box.height);
}
if (featuresFound)
{
for (int i = 0; i < originalFeatures.size(); i++)
{
cv::circle(image, baseFeatures[i], 4, cv::Scalar(255, 0, 0), 1, 8, 0);
cv::circle(image, newFeatures[i], 4, cv::Scalar(0, 255, 0),1, 8, 0);
cv::line(image, baseFeatures[i], newFeatures[i], cv::Scalar(255, 0, 0), 2, CV_AA);
}
}
}
void findFeatures(cv::Mat mask)
{
if (!featuresFound && targetAcquired)
{
cv::goodFeaturesToTrack(prevFrame_1C, baseFeatures, 200, 0.1, 0.1, mask);
originalFeatures= baseFeatures;
featuresFound = true;
std::cout << "Number of Corners Detected: " << originalFeatures.size() << std::endl;
for(int i = 0; i < originalFeatures.size(); i++)
{
std::cout << "Corner Location " << i << ": " << originalFeatures[i].x << "," << originalFeatures[i].y << std::endl;
}
}
}
void trackFeatures()
{
cv::calcOpticalFlowPyrLK(prevFrame_1C, nextFrame_1C, originalFeatures, newFeatures, opticalFlowFeatures, opticalFlowFeaturesError, cv::Size(30,30), 5, opticalFlowTermination);
originalFeatures = newFeatures;
}
void mouseCallback(int event, int x, int y, int flags, void *param)
{
cv::Mat frame;
frame = *((cv::Mat*)param);
switch(event)
{
case CV_EVENT_MOUSEMOVE:
{
if(drawingBox)
{
box.width = x-box.x;
box.height = y-box.y;
}
}
break;
case CV_EVENT_LBUTTONDOWN:
{
drawingBox = true;
box = cvRect (x, y, 0, 0);
targetAcquired = false;
cv::destroyWindow("Selection");
}
break;
case CV_EVENT_LBUTTONUP:
{
drawingBox = false;
featuresFound = false;
boxCounter++;
std::cout << "Box " << boxCounter << std::endl;
std::cout << "Box Coordinates: " << box.x << "," << box.y << std::endl;
std::cout << "Box Height: " << box.height << std::endl;
std::cout << "Box Width: " << box.width << std:: endl << std::endl;
if(box.width < 0)
{
box.x += box.width;
box.width *= -1;
}
if(box.height < 0)
{
box.y +=box.height;
box.height *= -1;
}
objectLocation.x = box.x;
objectLocation.y = box.y;
targetAcquired = true;
}
break;
case CV_EVENT_RBUTTONUP:
{
destroyBox = true;
}
break;
}
}
int main ()
{
const char *name = "Boundary Box";
cv::namedWindow(name);
cv::VideoCapture camera;
cv::Mat cameraFrame;
int cameraNumber = 0;
camera.open(cameraNumber);
camera >> cameraFrame;
cv::Mat mask = cv::Mat::zeros(cameraFrame.size(), CV_8UC1);
cv::Mat clearMask = cv::Mat::zeros(cameraFrame.size(), CV_8UC1);
if (!camera.isOpened())
{
std::cerr << "ERROR: Could not access the camera or video!" << std::endl;
}
cv::setMouseCallback(name, mouseCallback, &cameraFrame);
while(true)
{
if (destroyBox)
{
cv::destroyAllWindows();
break;
}
camera >> cameraFrame;
if (cameraFrame.empty())
{
std::cerr << "ERROR: Could not grab a camera frame." << std::endl;
exit(1);
}
camera.set(CV_CAP_PROP_POS_FRAMES, currentFrame);
camera >> prevFrame;
cv::cvtColor(prevFrame, prevFrame_1C, cv::COLOR_BGR2GRAY);
camera.set(CV_CAP_PROP_POS_FRAMES, currentFrame ++);
camera >> nextFrame;
cv::cvtColor(nextFrame, nextFrame_1C, cv::COLOR_BGR2GRAY);
if (targetAcquired)
{
cv::Mat roi (mask, cv::Rect(box.x, box.y, box.width, box.height));
roi = cv::Scalar(255, 255, 255);
findFeatures(mask);
clearMask.copyTo(mask);
trackFeatures();
}
draw(cameraFrame, box);
cv::imshow(name, cameraFrame);
cv::waitKey(20);
}
cv::destroyWindow(name);
return 0;
}
In my opinion you can't use camera.set(CV_CAP_PROP_POS_FRAMES, currentFrame) on a webcam, but I 'm not positive about that.
Instead I suggest you to save the previous frame in your prevFrame variable.
As an example I can suggest you this working code, I only change inside the while loop and I add comment before all my adds :
while(true)
{
if (destroyBox)
{
cv::destroyAllWindows();
break;
}
camera >> cameraFrame;
if (cameraFrame.empty())
{
std::cerr << "ERROR: Could not grab a camera frame." << std::endl;
exit(1);
}
// new lines
if(prevFrame.empty()){
prevFrame = cameraFrame;
continue;
}
// end new lines
//camera.set(CV_CAP_PROP_POS_FRAMES, currentFrame);
//camera >> prevFrame;
cv::cvtColor(prevFrame, prevFrame_1C, cv::COLOR_BGR2GRAY);
//camera.set(CV_CAP_PROP_POS_FRAMES, currentFrame ++);
//camera >> nextFrame;
// new line
nextFrame = cameraFrame;
cv::cvtColor(nextFrame, nextFrame_1C, cv::COLOR_BGR2GRAY);
if (targetAcquired)
{
cv::Mat roi (mask, cv::Rect(box.x, box.y, box.width, box.height));
roi = cv::Scalar(255, 255, 255);
findFeatures(mask);
clearMask.copyTo(mask);
trackFeatures();
}
draw(cameraFrame, box);
cv::imshow(name, cameraFrame);
cv::waitKey(20);
// old = new
// new line
prevFrame = cameraFrame.clone();
}

Failed Assertion Using HOGDescriptor

Ok, so I've decided that using a histogram of oriented gradients is a better method for image fingerprinting vs. creating a histogram of sobel derivatives. I think I finally have it mostly figured out but when I test my code I get the following:
OpenCV Error: Assertion failed ((winSize.width - blockSize.width) % blockStride.width == 0 && (winSize.height - blockSize.height) % blockStride.height == 0).
As of now I'm just trying to figure out how to compute the HOG correctly and see the results; but not visually, I just want some very basic output to see if the HOG was created. Then I'll figure out how to use it in image comparison.
Here is my sample code:
using namespace cv;
using namespace std;
int main(int argc, const char * argv[])
{
// Initialize string variables.
string thePath, img, hogSaveFile;
thePath = "/Users/Mikie/Documents/Xcode/images/";
img = thePath + "HDimage.jpg";
hogSaveFile = thePath + "HDimage.yml";
// Create mats.
Mat src;
// Load image as grayscale.
src = imread(img, CV_LOAD_IMAGE_GRAYSCALE);
// Verify source loaded.
if(src.empty()){
cout << "No image data. \n ";
return -1;
}else{
cout << "Image loaded. \n" << "Size: " << src.cols << " X " << src.rows << "." << "\n";
}
// Initialize float variables.
float imgWidth, imgHeight, newWidth, newHeight;
imgWidth = src.cols;
imgHeight = src.rows;
newWidth = 320;
newHeight = (imgHeight/imgWidth)*newWidth;
Mat dst = Mat::zeros(newHeight, newWidth, CV_8UC3);
resize(src, dst, Size(newWidth, newHeight), CV_INTER_LINEAR);
// Was resize successful?
if (dst.rows < src.rows && dst.cols < src.cols) {
cout << "Resize successful. \n" << "New size: " << dst.cols << " X " << dst.rows << "." << "\n";
} else {
cout << "Resize failed. \n";
return -1;
}
vector<float>theHOG(Mat dst);{
if (dst.empty()) {
cout << "Image lost. \n";
} else {
cout << "Setting up HOG. \n";
}
imshow("Image", dst);
bool gammaC = true;
int nlevels = HOGDescriptor::DEFAULT_NLEVELS;
Size winS(newWidth, newHeight);
// int block_size = 16;
// int block_stride= 8;
// int cell_size = 8;
int gbins = 9;
vector<float> descriptorsValues;
vector<Point> locations;
HOGDescriptor hog(Size(320, 412), Size(16, 16), Size(8, 8), Size(8, 8), gbins, -1, HOGDescriptor::L2Hys, 0.2, gammaC, nlevels);
hog.compute(dst, descriptorsValues, Size(0,0), Size(0,0), locations);
printf("descriptorsValues.size() = %ld \n", descriptorsValues.size()); //prints 960
for (int i = 0; i <descriptorsValues.size(); i++) {
cout << descriptorsValues[i] << endl;
}
}
cvWaitKey(0);
return 0;
}
As you can see, I messed around with different variables to define the sizes but to no avail so, I commented them out and tried manually setting them. Still nothing. What am I doing wrong? Any help will be greatly appreciated.
Thank you!
You are initializing the HOGDescriptor incorrectly.
The assertion states that each of the first three input parameters must satisfy the constraint:
(winSize - blockSize) % blockStride == 0
in both height and width dimensions.
The problem is that winSize.height does not satisfy this constraint, considering the other parameters you initialize hog with:
(412 - 16) % 8 = 4 //Problem!!
Probably the simplest fix is to increase your window dimensions from cv::Size(320,412) to something divisible by 8, perhaps cv::Size(320,416), but the specific size will depend on your specific requirements. Just pay attention to what the assertion is saying!

OpenCV Fingertip detection

Good day. I'm new to OpenCV and right now, I'm trying to do fingertip detection using colour tracking and background subtraction methods. I got the colour tracking part working but I have no idea on how to subtract the background and leave only the fingertips.
Here is my code.
#include <opencv2/opencv.hpp>
#include <stdio.h>
#include <iostream>
using namespace std;
IplImage* GetThresholdedImage(IplImage* img, CvScalar& lowerBound, CvScalar& upperBound)
{
// Convert the image into an HSV image
IplImage* imgHSV = cvCreateImage(cvGetSize(img), 8, 3);
cvCvtColor(img, imgHSV, CV_BGR2HSV);
IplImage* imgThreshed = cvCreateImage(cvGetSize(img), 8, 1);
cvInRangeS(imgHSV, lowerBound, upperBound, imgThreshed);
cvReleaseImage(&imgHSV);
return imgThreshed;
}
int main()
{
int lineThickness = 2;
CvScalar lowerBound = cvScalar(20, 100, 100);
CvScalar upperBound = cvScalar(30, 255, 255);
int b,g,r;
lowerBound = cvScalar(0,58,89);
upperBound = cvScalar(25,173,229);
CvCapture* capture = 0;
capture = cvCaptureFromCAM(1);
if(!capture)
{
printf("Could not initialize capturing...\n");
return -1;
}
cvNamedWindow("video");
cvNamedWindow("thresh");
// This image holds the "scribble" data...
// the tracked positions of the object
IplImage* imgScribble = NULL;
while(true)
{
IplImage* frame = 0;
frame = cvQueryFrame(capture);
if(!frame)
break;
// If this is the first frame, we need to initialize it
if(imgScribble == NULL)
{
imgScribble = cvCreateImage(cvGetSize(frame), 8, 3);
}
// Holds the thresholded image (tracked color -> white, the rest -> black)
IplImage* imgThresh = GetThresholdedImage(frame,lowerBound,upperBound);
// Calculate the moments to estimate the position of the object
CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments));
cvMoments(imgThresh, moments, 1);
// The actual moment values
double moment10 = cvGetSpatialMoment(moments, 1, 0);
double moment01 = cvGetSpatialMoment(moments, 0, 1);
double area = cvGetCentralMoment(moments, 0, 0);
// Holding the last and current positions
static int posX = 0;
static int posY = 0;
int lastX = posX;
int lastY = posY;
posX = moment10/area;
posY = moment01/area;
cout << "position = " << posX << " " << posY << endl;
// We want to draw a line only if its a valid position
if(lastX>0 && lastY>0 && posX>0 && posY>0)
{
// Draw a yellow line from the previous point to the current point
cvLine(imgScribble, cvPoint(posX, posY), cvPoint(lastX, lastY), upperBound, lineThickness);
}
// Add the scribbling image and the frame...
cvAdd(frame, imgScribble, frame);
cvShowImage("thresh", imgThresh);
cvShowImage("video", frame);
int c = cvWaitKey(10);
if(c==27) //ESC key
{
break;
}
cvReleaseImage(&imgThresh);
delete moments;
}
cvReleaseCapture(&capture);
return 0;
}
I don t know if I understand you right but I think you should need to add the following:
cvErode(imgThreshed, imgThreshed, NULL, 1);
cvDilate(imgThreshed, imgThreshed, NULL, 1);
in GetThresholdedImage and get less noise ! but after all I think it would be better for you to use the cv::Mat object of opencv ;)
Try BGS library, I used it before and like it. You can get it here: http://code.google.com/p/bgslibrary/

Resources