I try to run the following program according to what is written on this site
http://www.lirtex.com/robotics/fast-object-tracking-robot-computer-vision/
Initialization parameters
CvCapture *capture = cvCaptureFromCAM(0);
int width = ( int )cvGetCaptureProperty( capture, CV_CAP_PROP_FRAME_WIDTH );
int height = ( int )cvGetCaptureProperty( capture, CV_CAP_PROP_FRAME_HEIGHT );
IplImage *frame;
CvSize size = cvSize(640,480);
cvNamedWindow( "Camera", CV_WINDOW_NORMAL );
cvNamedWindow( "HSV", CV_WINDOW_NORMAL );
cvNamedWindow( "EdgeDetection", CV_WINDOW_NORMAL );
cvResizeWindow("Camera",640,480);
cvResizeWindow("HSV",640,480);
cvResizeWindow("EdgeDetection",640,480);
IplImage * hsv_frame = cvCreateImage(size, IPL_DEPTH_8U, 3);
IplImage* thresholded = cvCreateImage(size, IPL_DEPTH_8U, 1);
when im calling the function cvCvtColor like this:
cvCvtColor(frame, frame , CV_BGR2HSV);
with the 2nd paramter is equal to the first its work
but when im calling the function like this
cvCvtColor(frame, hsv_frame , CV_BGR2HSV);
its fail and throws an exception
Does anyone have any idea why this happens?
The Array frame is the size of the captured image whereas, *hsv_frame* has been hardcoded to 640x480. The colorspace conversion operation(cvCvtColor) requires that both the source and destination be of the same size and so the exception.
Solution:
a. Use the same size as the input by setting
CvSize size = cvSize(frame->width, frame->height);
-OR-
b. If you want the final o/p to be 640x480, resize the input to the required size using: cvResize
One final note: The C++ interface is much more intuitive, much more stable and has added features as compared to the C interface. I recommend moving to the C++ interface of OpenCV
Related
I am trying to convert BGR image into YUV420P, but when I try to view each of the YUV planes separately, this is what I see.
Shouldn't cv::Mat::data and AVFrame::data[0] be packed in the same way? I should be able to do a direct memcpy. Am I missing something?
Any ideas?
Mat frame;
VideoCapture cap;
if(!cap.open(0)){
return 0;
}
// capture a frame
cap >> frame;
if( frame.empty() ) return 0;
cv::Size s = frame.size();
int height = s.height;
int width = s.width;
// Creating two frames for conversion
AVFrame *pFrameYUV =av_frame_alloc();
AVFrame *pFrameBGR =av_frame_alloc();
// Determine required buffer size and allocate buffer for YUV frame
int numBytesYUV=av_image_get_buffer_size(AV_PIX_FMT_YUV420P, width,
height,1);
// Assign image buffers
avpicture_fill((AVPicture *)pFrameBGR, frame.data, AV_PIX_FMT_BGR24,
width, height);
uint8_t* bufferYUV=(uint8_t *)av_malloc(numBytesYUV*sizeof(uint8_t));
avpicture_fill((AVPicture *)pFrameYUV, bufferYUV, AV_PIX_FMT_YUV420P,
width, height);
// Initialise Software scaling context
struct SwsContext *sws_ctx = sws_getContext(width,
height,
AV_PIX_FMT_BGR24,
width,
height,
AV_PIX_FMT_YUV420P,
SWS_BILINEAR,
NULL,
NULL,
NULL
);
// Convert the image from its BGR to YUV
sws_scale(sws_ctx, (uint8_t const * const *)pFrameBGR->data,
pFrameYUV->linesize, 0, height,
pFrameYUV->data, pFrameYUV->linesize);
// Trying to see the different planes of YUV
Mat MY = Mat(height, width, CV_8UC1);
memcpy(MY.data,pFrameYUV->data[0], height*width);
imshow("Test1", MY); // fail
Mat MU = Mat(height/2, width/2, CV_8UC1);
memcpy(MU.data,pFrameYUV->data[1], height*width/4);
imshow("Test2", MU); // fail
Mat MV = Mat(height/2, width/2, CV_8UC1);
memcpy(MV.data,pFrameYUV->data[2], height*width/4);
imshow("Test3", MV); // fail
waitKey(0); // Wait for a keystroke in the window
For sws_scale() third parameter should not be pFrameYUV->linesize but rather pFrameBGR->linesize, i.e. pointer to width*3.
I am writing a program that does the following in brief:
read YUV 4:2:0 frame from .yuv file
convert frame from YUV to RGB
make some operations on RGB frame
display the RGB frame
repeat the previous steps until there is no frame left in the .yuv file
I omitted the 3rd step currently, I do not make any operations on RGB frames right now. Because my program does not display the frame in correct colors.
Here is my code which is using OpenCV 2.1:
What is wrong with my code??
I also added pictures of actual colors and wrong colors of the first frame. Wrong colored picture is result of conversion from YUV to BGR (CV_YCrCb2BGR). But converting from YUV to RGB (CV_YCrCb2RGB) does not help, unfortunately.
int main()
{
int iFrameWidth = 640;
int iFrameHeight = 480;
.
.
.
FILE *fYUV0 = fopen( "C:\\YUV_Videos\\flamenco2_0.yuv", "rb" );
char *cFileBuffer0 = new char[ iFrameWidth*iFrameHeight*3/2 ];
IplImage *iplY420Frame = cvCreateImageHeader( cvSize(iFrameWidth , iFrameHeight ), IPL_DEPTH_8U, 1 );
IplImage *iplU420Frame = cvCreateImageHeader( cvSize(iFrameWidth/2, iFrameHeight/2), IPL_DEPTH_8U, 1 );
IplImage *iplV420Frame = cvCreateImageHeader( cvSize(iFrameWidth/2, iFrameHeight/2), IPL_DEPTH_8U, 1 );
IplImage *iplY444Frame = cvCreateImage( cvSize(iFrameWidth, iFrameHeight), IPL_DEPTH_8U, 1 );
IplImage *iplU444Frame = cvCreateImage( cvSize(iFrameWidth, iFrameHeight), IPL_DEPTH_8U, 1 );
IplImage *iplV444Frame = cvCreateImage( cvSize(iFrameWidth, iFrameHeight), IPL_DEPTH_8U, 1 );
IplImage *iplYUV444Frame = cvCreateImage( cvSize(iFrameWidth, iFrameHeight), IPL_DEPTH_8U, 3 );
IplImage *iplRGBFrame0 = cvCreateImage( cvSize(iFrameWidth, iFrameHeight), IPL_DEPTH_8U, 3 );
.
.
.
while( fread(cFileBuffer0, 1, iFrameWidth*iFrameHeight*3/2, fYUV0) )
{
cvSetData( iplY420Frame, cFileBuffer0, iFrameWidth );
cvSetData( iplU420Frame, cFileBuffer0 + iFrameWidth*iFrameHeight, iFrameWidth/2 );
cvSetData( iplV420Frame, cFileBuffer0 + iFrameWidth*iFrameHeight*5/4, iFrameWidth/2 );
cvResize( iplY420Frame, iplY444Frame );
cvResize( iplU420Frame, iplU444Frame );
cvResize( iplV420Frame, iplY444Frame );
cvMerge( iplY444Frame, iplU444Frame, iplV444Frame, NULL, iplYUV444Frame );
cvCvtColor( iplYUV444Frame, iplRGBFrame0, CV_YCrCb2BGR );
.
.
.
cvNamedWindow( "View0" );
cvShowImage( "View0", iplRGBFrame0 );
cvWaitKey( 1000/25 );
}//end-of-while
cvDestroyWindow( "View0" );
return 0;
}//end-of-main
Actual colors of the first frame, acquired from a YUV Player:
http://i59.tinypic.com/2n7ee6h.jpg
Wrong colors of the first frame, output of my program:
http://i58.tinypic.com/29lzcqp.jpg
I have found it! Actually it has been more than two weeks since I found it but here it is: I was resizing V component into Y component.
...
cvResize( iplY420Frame, iplY444Frame );
cvResize( iplU420Frame, iplU444Frame );
cvResize( iplV420Frame, iplY444Frame ); // iplY444Frame must be corrected as iplV444Frame
...
Lessons learned:
Never name variables with only one letter, words are always the best.
I know the problem is related to the pointer used. Please help me in locating the error.
IplImage* ExtractBlue(IplImage* in)
{
int width = in->width;
int height = in->height;
IplImage *out = cvCreateImage( cvSize( width, height ), IPL_DEPTH_8U, 3 );
uchar *datain;
datain = (uchar *)in->imageData;
uchar *dataout;
dataout = (uchar *)out->imageData;
int i,j,k;
for(i=0;i<out->height;i++)
for(j=0;j<out->width;j++)
for(k=0;k<1;k++)
{
dataout[i*out->widthStep+j*out->nChannels+k]=datain[i*in->widthStep+j*in->nChannels+k];
dataout[i*out->widthStep+j*out->nChannels+(k+1)]=0;
dataout[i*out->widthStep+j*out->nChannels+(k+2)]=0;
}
return out;
}
"I know the problem is related to the pointer used" - yes. that's why you should avoid those horrible operations you're doing.
please do away with all IplImages, and use the c++ api !
if you want to retain the blue channel of an image, and set anything else to 0, it's so easy:
Mat in = imread(...);
Mat out = in.mul(Scalar(1,0,0));
char* filename1="1.bmp";
IplImage* greyLeftImg= cvLoadImage(filename1,0);
char* filename2="2.bmp";
IplImage* greyRightImg= cvLoadImage(filename2,0);
IplImage* greyLeftImg32=cvCreateImage(cvSize(width,height),32,greyLeftImg->nChannels);//IPL_DEPTH_32F
IplImage* greyRightImg32=cvCreateImage(cvSize(width,height),32,greyRightImg->nChannels);
Always failed,said " Assertion failed (src.size == dst.size && dst.type() == CV_8UC(src.channels())) in unknown function"
I have searched for many methods , but none of them seems to work?
A simple step to convert any gray scale 8 bit or 16 bit uint images in opencv to 32 bit floating type is like this...
IplImage* img = cvLoadImage( "E:\\Work_DataBase\\earth.jpg",0);
IplImage* out = cvCreateImage( cvGetSize(img), IPL_DEPTH_32F, img->nChannels);
double min,max;
cvMinMaxLoc(img,&min,&max);
// Remember values of the floating point image are in the range of 0 to 1, which u
// can't visualize by cvShowImage().......
cvCvtScale(img,out,1/ max,0);
Hope it is easy way...
Here is a simple function to convert any IplImage to 32 bit float.
IplImage* convert_to_float32(IplImage* img)
{
IplImage* img32f = cvCreateImage(cvGetSize(img),IPL_DEPTH_32F,img->nChannels);
for(int i=0; i<img->height; i++)
{
for(int j=0; j<img->width; j++)
{
cvSet2D(img32f,i,j,cvGet2D(img,i,j));
}
}
return img32f;
}
An important consideration is that for floating point images in OpenCV, only those can be visualized whose pixel values are from 0.0 and 1.0.
To visualize the floating point image, you have to scale the values from 0.0 to 1.0.
Here is an example for how to do this:
IplImage* img8u = cvLoadImage(filename1,0);
IplImage* img32f = convert_to_float32(img8u);
cvShowImage("float image",img32f); //Image will not be shown correctly
cvWaitKey(0);
cvScale(img32f, img32f, 1.0/255.0);
cvShowImage("float image normalized",img32f); //Image will be shown correctly now
cvWaitKey(0);
hey i tried to do subtraction between current frame to previous, (the code attached ) the code running but i get errors and gray window without result the errors i got on command prompt:
Compiler did not align stack variables. Libavcodec has been miscompiled and may be very slow or crash. This is not a bug in libavcodec, but in the compiler. You may try recompiling using gcc >= 4.2. Do not report crashes to FFmpeg developers. OpenCV Error: Assertion failed (src1.size() == dst.size() && src1.type() == dst. type()) in unknown function, file ........\ocv\opencv\src\cxcore\cxarithm.cpp , line 1563.
someone have an idea? please your help!! thank you
int main()
{
int key = 0;
CvCapture* capture = cvCaptureFromAVI( "macroblock.mpg" );
IplImage* frame = cvQueryFrame( capture );
IplImage* currframe = cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
IplImage* destframe = cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
if ( !capture )
{
fprintf( stderr, "Cannot open AVI!\n" );
return 1;
}
int fps = ( int )cvGetCaptureProperty( capture, CV_CAP_PROP_FPS );
cvNamedWindow( "dest", CV_WINDOW_AUTOSIZE );
while( key != 'x' )
{
frame = cvQueryFrame( capture );
currframe = cvCloneImage( frame );// copy frame to current
frame = cvQueryFrame( capture );// grab frame
cvSub(frame,currframe,destframe);// subtraction between the last frame to cur
if(key==27 )break;
cvShowImage( "dest",destframe);
key = cvWaitKey( 1000 / fps );
}
cvDestroyWindow( "dest" );
cvReleaseCapture( &capture );
return 0;
}
The problem is here
IplImage* currframe = cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
IplImage* destframe = cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
What you are doing is that you are reading off an mpeg that has 3 channels per frame. Now when you do the subtraction, you subtract a 3 channel frame from a 1 channel frame. This WILL cause problems. Try setting the number of channels to 3. And see if it works
IplImage* currframe = cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,3);
IplImage* destframe = cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,3);
To be sure, check the number of channels for the queried image, the cloned image. And since you are pushing the final image into a destination image of 1 channel. There you are corrupting the data. If no exception is thrown/caught at any place.
OpenCV Error: Assertion failed (src1.size() == dst.size() && src1.type() == dst. type())
Assertion failed seems to be a clear indicator of what I have explained.