Why does getConstPixels() from Magick::Image return nullptr? - imagemagick

I use the following code to convert Magick::Image to QImage:
QImage
convert( const Magick::Image & img )
{
QImage qimg( static_cast< int > ( img.columns() ),
static_cast< int > ( img.rows() ), QImage::Format_RGB888 );
const Magick::PixelPacket * pixels;
Magick::ColorRGB rgb;
for( int y = 0; y < qimg.height(); ++y)
{
pixels = img.getConstPixels( 0, y, static_cast< std::size_t > ( qimg.width() ), 1 );
for( int x = 0; x < qimg.width(); ++x )
{
rgb = ( *( pixels + x ) );
qimg.setPixel( x, y, QColor( static_cast< int> ( 255 * rgb.red() ),
static_cast< int > ( 255 * rgb.green() ),
static_cast< int > ( 255 * rgb.blue() ) ).rgb());
}
}
return qimg;
}
This code works, but with one test image with 8000x6000 resolution I've got nullptr at the first line of Magick::Image. img.getConstPixels( 0, 0, 8000, 1 ) just returns nullptr. How can it be possible? Maybe do I do something wrong here? Thanks.
ImageMagick version is 6.9.11.60 on Kubuntu 22.04.
I actually open GIF with Magick::readImages( ... ), then Magick::coalesceImages(), and then convert each frame of the GIF into QImage. I downloaded test GIF, that brokes ImageMagick, and this GIF is here.
Reproducible example:
#include <Magick++.h>
#include <QImage>
#include <QDebug>
int main()
{
Magick::InitializeMagick( nullptr );
std::vector< Magick::Image > imgs;
Magick::readImages( &imgs, "sample-gif-file-for-Testing.gif" );
std::vector< Magick::Image > cimgs;
Magick::coalesceImages( &cimgs, imgs.begin(), imgs.end() );
int i = 0;
for( auto it = cimgs.cbegin(), last = cimgs.cend(); it != last; ++it )
{
qDebug() << QString( "frame #%1" ).arg( i );
QImage qimg( static_cast< int > ( it->columns() ),
static_cast< int > ( it->rows() ), QImage::Format_RGB888 );
const Magick::PixelPacket * pixels;
Magick::ColorRGB rgb;
for( int y = 0; y < qimg.height(); ++y)
{
pixels = it->getConstPixels( 0, y, static_cast< std::size_t > ( qimg.width() ), 1 );
qDebug() << QString( "pixels from line %1" ).arg( y ) << pixels;
for( int x = 0; x < qimg.width(); ++x )
{
rgb = ( *( pixels + x ) );
qimg.setPixel( x, y, QColor( static_cast< int> ( 255 * rgb.red() ),
static_cast< int > ( 255 * rgb.green() ),
static_cast< int > ( 255 * rgb.blue() ) ).rgb());
}
}
++i;
}
return 0;
}

The problem is that if GIF image has one frame, then
Magick::coalesceImages( &cimgs, imgs.begin(), imgs.end() );
can break that frame, so do coalesceImages() only if imgs.size() > 1.

Related

OpenCV draw rectangle from webcam with 2 largest objects

I need to draw rectangle with 2 largest object from webcam. I already got to draw contours with 2 largest object from webcam but now i confuse in how to draw 2 largest Rectangle.
Someone can show me the code Please~
//find and draw contours
void showconvex(Mat &thresh,Mat &frame) {
int largestIndex = 0;
int largestContour = 0;
int secondLargestIndex = 0;
int secondLargestContour = 0;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
//find contours
findContours(thresh, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE);
/// Find the convex hull object for each contour
vector<vector<Point> >hull(contours.size());
vector<vector<int> >inthull(contours.size());
vector<vector<Vec4i> >defects(contours.size());
for (int i = 0; i < contours.size(); i++)
{
convexHull(Mat(contours[i]), hull[i], false);
convexHull(Mat(contours[i]),inthull[i], false);
if (inthull[i].size()>3)
convexityDefects(contours[i], inthull[i], defects[i]);
}
//find 2 largest contour
for( int i = 0; i< contours.size(); i++ )
{
if(contours[i].size() > largestContour)
{
secondLargestContour = largestContour;
secondLargestIndex = largestIndex;
largestContour = contours[i].size();
largestIndex = i;
}
else if(contours[i].size() > secondLargestContour)
{
secondLargestContour = contours[i].size();
secondLargestIndex = i;
}
}
//show contours of 2 biggest and hull as well
if(contours.size()>0)
{
//check for contouraea function if error occur
//draw the 2 largest contour using previously stored index.
drawContours(frame, contours, largestIndex, CV_RGB(0,255,0), 2, 8, hierarchy);
drawContours(frame, contours, secondLargestIndex, CV_RGB(0,255,0), 2, 8, hierarchy);
}
}
take a look at the code below
based on sorting contours by bounding boxes or by areas.
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
using namespace cv;
using namespace std;
struct contour_sorter_dsc // sorts contours by their bounding boxes descending
{
bool operator ()( const vector<Point>& a, const vector<Point> & b )
{
Rect ra( boundingRect(a) );
Rect rb( boundingRect(b) );
return ( ( rb.width * rb.height ) < ( ra.width * ra.height ) );
}
};
struct contour_sorter_dsc_area // sorts contours by their areas descending
{
bool operator ()( const vector<Point>& a, const vector<Point> & b )
{
double area_a = contourArea( a );
double area_b = contourArea( b );
return ( area_b < area_a );
}
};
int main( int argc, char** argv )
{
Mat src = imread( argv[1] );
if( src.empty() )
{
return -1;
}
Mat canvas1 = src.clone();
Mat canvas2 = src.clone();
Mat gray;
cvtColor( src, gray, COLOR_BGR2GRAY );
gray = gray > 127; // binarize image
vector<vector<Point> > contours;
findContours( gray, contours, RETR_LIST, CHAIN_APPROX_SIMPLE );
sort(contours.begin(), contours.end(), contour_sorter_dsc());
for( size_t i = 0; i< 2; i++ )
{ // checks if the first contour is image boundary
if( contours[0][0] == Point( 1, 1 ) & contours[0][1] == Point( 1, gray.rows -2 )
& contours[0][2] == Point( gray.cols - 2, gray.rows -2 ) & contours[0][3] == Point( gray.cols - 2, 1 ) )
{
contours[0] = contours[1];
contours[1] = contours[2];
}
if( i < contours.size())
{
drawContours( canvas1, contours, i, Scalar( 255,255,0 ) );
Rect minRect = boundingRect( Mat(contours[i]) );
rectangle( canvas1, minRect, Scalar( 0, 0, 255 ) );
}
}
imshow( "result of sorting contours by bounding boxes ", canvas1 );
sort(contours.begin(), contours.end(), contour_sorter_dsc_area());
for( size_t i = 0; i< 2; i++ )
{ // checks if the first contour is image boundary
if( contours[0][0] == Point( 1, 1 ) & contours[0][1] == Point( 1, gray.rows -2 )
& contours[0][2] == Point( gray.cols - 2, gray.rows -2 ) & contours[0][3] == Point( gray.cols - 2, 1 ) )
{
contours[0] = contours[1];
contours[1] = contours[2];
}
if( i < contours.size())
{
drawContours( canvas2, contours, i, Scalar( 255,255,0 ) );
Rect minRect = boundingRect( Mat(contours[i]) );
rectangle( canvas2, minRect, Scalar( 0, 0, 255 ) );
}
}
imshow( "result of sorting contours by areas ", canvas2 );
waitKey();
return 0;
}
Input image
Result Images according sort type

Particle Filter Model for Computer Vision Tracking

I see alot of posts for particle filters for such purposes, but none of them talk about the steps. Most tutorials online are for Kinematic Models involving R,Theta movements.
I want to use a particle filter to track a simple yellow blob. It is noisy as it's underwater, and at times may be occluded. How would I implement a model for this, and what might the "move" function of the object be?
You can use optical flow in order to detect the direction of movement.
This is how I do it:
#include <stdio.h>
#include <cv.h>
#include <highgui.h>
#include <math.h>
static const double pi = 3.14159265358979323846;
inline static double square(int a)
{
return a * a;
}
inline static void allocateOnDemand( IplImage **img, CvSize size, int depth, int channels
)
{
if ( *img != NULL ) return;
*img = cvCreateImage( size, depth, channels );
if ( *img == NULL )
{
fprintf(stderr, "Error: Couldn't allocate image. Out of memory?\n");
exit(-1);
}
}
int main(void)
{
CvCapture *input_video = cvCaptureFromCAM(0);
if (input_video == NULL)
{
fprintf(stderr, "Error: Can't open video.\n");
return -1;
}
cvQueryFrame( input_video );
CvSize frame_size;
frame_size.height =
(int) cvGetCaptureProperty( input_video, CV_CAP_PROP_FRAME_HEIGHT );
frame_size.width =
(int) cvGetCaptureProperty( input_video, CV_CAP_PROP_FRAME_WIDTH );
long number_of_frames;
cvSetCaptureProperty( input_video, CV_CAP_PROP_POS_AVI_RATIO, 1. );
number_of_frames = (int) cvGetCaptureProperty( input_video, CV_CAP_PROP_POS_FRAMES );
cvSetCaptureProperty( input_video, CV_CAP_PROP_POS_FRAMES, 0. );
cvNamedWindow("Optical Flow", CV_WINDOW_AUTOSIZE);
long current_frame = 0;
while(true)
{
static IplImage *frame = NULL, *frame1 = NULL, *frame1_1C = NULL, *frame2_1C =
NULL, *eig_image = NULL, *temp_image = NULL, *pyramid1 = NULL, *pyramid2 = NULL;
cvSetCaptureProperty( input_video, CV_CAP_PROP_POS_FRAMES, current_frame );
frame = cvQueryFrame( input_video );
if (frame == NULL)
{
fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
return -1;
}
allocateOnDemand( &frame1_1C, frame_size, IPL_DEPTH_8U, 1 );
cvConvertImage(frame, frame1_1C, CV_CVTIMG_FLIP);
allocateOnDemand( &frame1, frame_size, IPL_DEPTH_8U, 3 );
cvConvertImage(frame, frame1, CV_CVTIMG_FLIP);
frame = cvQueryFrame( input_video );
if (frame == NULL)
{
fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
return -1;
}
allocateOnDemand( &frame2_1C, frame_size, IPL_DEPTH_8U, 1 );
cvConvertImage(frame, frame2_1C, CV_CVTIMG_FLIP);
allocateOnDemand( &eig_image, frame_size, IPL_DEPTH_32F, 1 );
allocateOnDemand( &temp_image, frame_size, IPL_DEPTH_32F, 1 );
CvPoint2D32f frame1_features[400];
int number_of_features;
number_of_features = 400;
cvGoodFeaturesToTrack(frame1_1C, eig_image, temp_image, frame1_features, &
number_of_features, .01, .01, NULL);
CvPoint2D32f frame2_features[400];
char optical_flow_found_feature[400];
float optical_flow_feature_error[400];
CvSize optical_flow_window = cvSize(3,3);
CvTermCriteria optical_flow_termination_criteria
= cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 );
allocateOnDemand( &pyramid1, frame_size, IPL_DEPTH_8U, 1 );
allocateOnDemand( &pyramid2, frame_size, IPL_DEPTH_8U, 1 );
cvCalcOpticalFlowPyrLK(frame1_1C, frame2_1C, pyramid1, pyramid2, frame1_features,
frame2_features, number_of_features, optical_flow_window, 5,
optical_flow_found_feature, optical_flow_feature_error,
optical_flow_termination_criteria, 0 );
for(int i = 0; i < number_of_features; i++)
{
if ( optical_flow_found_feature[i] == 0 ) continue;
int line_thickness; line_thickness = 1;
CvScalar line_color; line_color = CV_RGB(255,0,0);
CvPoint p,q;
p.x = (int) frame1_features[i].x;
p.y = (int) frame1_features[i].y;
q.x = (int) frame2_features[i].x;
q.y = (int) frame2_features[i].y;
double angle; angle = atan2( (double) p.y - q.y, (double) p.x - q.x );
double hypotenuse; hypotenuse = sqrt( square(p.y - q.y) + square(p.x - q.x) );
q.x = (int) (p.x - 3 * hypotenuse * cos(angle));
q.y = (int) (p.y - 3 * hypotenuse * sin(angle));
cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
p.x = (int) (q.x + 9 * cos(angle + pi / 4));
p.y = (int) (q.y + 9 * sin(angle + pi / 4));
cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
p.x = (int) (q.x + 9 * cos(angle - pi / 4));
p.y = (int) (q.y + 9 * sin(angle - pi / 4));
cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
}
cvShowImage("Optical Flow", frame1);
int key_pressed;
key_pressed = cvWaitKey(0);
if (key_pressed == 'b' || key_pressed == 'B') current_frame--;
else current_frame++;
if (current_frame < 0) current_frame = 0;
if (current_frame >= number_of_frames - 1) current_frame = number_of_frames - 2;
}
}

Understaing V&J (Haar.cpp) sliding window

I'm going over the code of haar.cpp to understand the sliding window approach. Here is the code:
for( factor = 1; ; factor *= scaleFactor )
{
CvSize winSize = { cvRound(winSize0.width*factor),
cvRound(winSize0.height*factor) };
CvSize sz = { cvRound( img->cols/factor ), cvRound( img->rows/factor ) };
CvSize sz1 = { sz.width - winSize0.width + 1, sz.height - winSize0.height + 1 };
CvRect equRect = { icv_object_win_border, icv_object_win_border,
winSize0.width - icv_object_win_border*2,
winSize0.height - icv_object_win_border*2 };
CvMat img1, sum1, sqsum1, norm1, tilted1, mask1;
CvMat* _tilted = 0;
if( sz1.width <= 0 || sz1.height <= 0 )
break;
if( winSize.width > maxSize.width || winSize.height > maxSize.height )
break;
if( winSize.width < minSize.width || winSize.height < minSize.height )
continue;
img1 = cvMat( sz.height, sz.width, CV_8UC1, imgSmall->data.ptr );
sum1 = cvMat( sz.height+1, sz.width+1, CV_32SC1, sum->data.ptr );
sqsum1 = cvMat( sz.height+1, sz.width+1, CV_64FC1, sqsum->data.ptr );
if( tilted )
{
tilted1 = cvMat( sz.height+1, sz.width+1, CV_32SC1, tilted->data.ptr );
_tilted = &tilted1;
}
norm1 = cvMat( sz1.height, sz1.width, CV_32FC1, normImg ? normImg->data.ptr : 0 );
mask1 = cvMat( sz1.height, sz1.width, CV_8UC1, temp->data.ptr );
cvResize( img, &img1, CV_INTER_LINEAR );
cvIntegral( &img1, &sum1, &sqsum1, _tilted );
int ystep = factor > 2 ? 1 : 2;
const int LOCS_PER_THREAD = 1000;
int stripCount = ((sz1.width/ystep)*(sz1.height + ystep-1)/ystep + LOCS_PER_THREAD/2)/LOCS_PER_THREAD;
stripCount = std::min(std::max(stripCount, 1), 100);
#ifdef HAVE_IPP
if( use_ipp )
{
cv::Mat fsum(sum1.rows, sum1.cols, CV_32F, sum1.data.ptr, sum1.step);
cv::Mat(&sum1).convertTo(fsum, CV_32F, 1, -(1<<24));
}
else
#endif
cvSetImagesForHaarClassifierCascade( cascade, &sum1, &sqsum1, _tilted, 1. );
cv::Mat _norm1(&norm1), _mask1(&mask1);
cv::parallel_for_(cv::Range(0, stripCount),
cv::HaarDetectObjects_ScaleImage_Invoker(cascade,
(((sz1.height + stripCount - 1)/stripCount + ystep-1)/ystep)*ystep,
factor, cv::Mat(&sum1), cv::Mat(&sqsum1), &_norm1, &_mask1,
cv::Rect(equRect), allCandidates, rejectLevels, levelWeights, outputRejectLevels, &mtx));
}
}
Now, I want to make sure I got everything right. As I understand, we loop over the scales and in each scale we subsample the image and try to find objects at a fixed size (20X20 for faces), going over all the x and y locations.
The pseudo- code is:
for scale=1:ScaleMax
for X=1:width
for Y=1:height
Try do detect a face at position (x,y) and of a fixedsize of 20X20.
Is that precise or did I get something wrong?
Thanks,
Gil.
While the understanding is accurate, it is not precise.
For better precision, you should read the original paper from Viola and Jones, since all the magic is in the step "Try do detect a face at position (x,y) and of a fixedsize of 20X20"

How to increase haar detector's window size in OpenCV

I am using the code available in this website: http://nashruddin.com/OpenCV_Face_Detection to do face detection.
I would like to increase the size of the detected face region. I am not sure how to do it. Need some help on it..
The code i am using is this:
//
#include "stdafx.h"
#include <stdio.h>
#include <cv.h>
#include <highgui.h>
CvHaarClassifierCascade *cascade;
CvMemStorage *storage;
void detectFaces( IplImage *img );
int main( int argc, char** argv )
{
CvCapture *capture;
IplImage *frame;
int key;
char *filename = "C:/OpenCV2.1/data/haarcascades/haarcascade_frontalface_alt.xml";
cascade = ( CvHaarClassifierCascade* )cvLoad( filename, 0, 0, 0 );
storage = cvCreateMemStorage( 0 );
capture = cvCaptureFromCAM( 0 );
assert( cascade && storage && capture );
cvNamedWindow( "video", 1 );
while( key != 'q' ) {
frame = cvQueryFrame( capture );
if( !frame ) {
fprintf( stderr, "Cannot query frame!\n" );
break;
}
cvFlip( frame, frame, -1 );
frame->origin = 0;
detectFaces( frame );
key = cvWaitKey( 10 );
}
cvReleaseCapture( &capture );
cvDestroyWindow( "video" );
cvReleaseHaarClassifierCascade( &cascade );
cvReleaseMemStorage( &storage );
return 0;
}
void detectFaces( IplImage *img )
{
int i;
CvSeq *faces = cvHaarDetectObjects(
img,
cascade,
storage,
1.1,
3,
0 /*CV_HAAR_DO_CANNY_PRUNNING*/,
cvSize( 40, 40 ) );
for( i = 0 ; i < ( faces ? faces->total : 0 ) ; i++ ) {
CvRect *r = ( CvRect* )cvGetSeqElem( faces, i );
cvRectangle( img,
cvPoint( r->x, r->y ),
cvPoint( r->x + r->width, r->y + r->height ),
CV_RGB( 255, 0, 0 ), 1, 8, 0 );
}
cvShowImage( "video", img );
}
This increases the size of the rectangle around the face. If you meant increasing the haar detector's window size, please update your question.
int padding_width = 30; // pixels
int padding_height = 30; // pixels
for( i = 0 ; i < ( faces ? faces->total : 0 ) ; i++ ) {
CvRect *r = ( CvRect* )cvGetSeqElem( faces, i );
// Yes yes, all of this could be written much more compactly.
// It was written like this for clarity.
int topleft_x = r->x - (padding_width / 2);
int topleft_y = r->y - (padding_height / 2);
if (topleft_x < 0)
topleft_x = 0;
if (topleft_y < 0)
topleft_y = 0;
int bottomright_x = r->x + r->width + (padding_width / 2);
int bottomright_y = r->y + r->height + (padding_height / 2);
if (bottomright_x >= img->width)
bottomright_x = img->width - 1;
if (bottomright_y >= img->height)
bottomright_y = img->height - 1;
cvRectangle( img,
cvPoint(topleft_x, topleft_y),
cvPoint(bottomright_x, bottomright_y),
CV_RGB( 255, 0, 0 ), 1, 8, 0 );
}

FaceDetect OpenCV2.1 sample, Access violation writing location 0x00000000

I try to run facedetect opencv sample with vs2010.
debug result "Unhandled exception at 0x53fa42bf in facedetect.exe: 0xC0000005: Access violation writing location 0x00000000."
Here is the code
txt file: http://ebooks-libs.com/backup/facedetect-opencv2.1.txt
cpp file: http://ebooks-libs.com/backup/facedetect.cpp
#include "stdafx.h"
#include <iostream>
#include <cstdio>
#define CV_NO_BACKWARD_COMPATIBILITY
#include "cv.h"
#include "highgui.h"
#ifdef _EiC
#define WIN32
#endif
using namespace std;
using namespace cv;
void detectAndDraw( Mat& img,
CascadeClassifier& cascade, CascadeClassifier& nestedCascade,
double scale);
String cascadeName ="./data/haarcascades/haarcascade_frontalface_alt.xml";
String nestedCascadeName ="./data/haarcascades/haarcascade_eye_tree_eyeglasses.xml";
int main( int argc, const char** argv )
{
CvCapture* capture = 0;
Mat frame, frameCopy, image;
const String scaleOpt = "--scale=";
size_t scaleOptLen = scaleOpt.length();
const String cascadeOpt = "--cascade=";
size_t cascadeOptLen = cascadeOpt.length();
const String nestedCascadeOpt = "--nested-cascade";
size_t nestedCascadeOptLen = nestedCascadeOpt.length();
String inputName;
CascadeClassifier cascade, nestedCascade;
double scale = 1;
for( int i = 1; i < argc; i++ )
{
if( cascadeOpt.compare( 0, cascadeOptLen, argv[i], cascadeOptLen ) == 0 )
cascadeName.assign( argv[i] + cascadeOptLen );
else if( nestedCascadeOpt.compare( 0, nestedCascadeOptLen, argv[i], nestedCascadeOptLen ) == 0 )
{
if( argv[i][nestedCascadeOpt.length()] == '=' )
nestedCascadeName.assign( argv[i] + nestedCascadeOpt.length() + 1 );
if( !nestedCascade.load( nestedCascadeName ) )
cerr << "WARNING: Could not load classifier cascade for nested objects" << endl;
}
else if( scaleOpt.compare( 0, scaleOptLen, argv[i], scaleOptLen ) == 0 )
{
if( !sscanf( argv[i] + scaleOpt.length(), "%lf", &scale ) || scale < 1 )
scale = 1;
}
else if( argv[i][0] == '-' )
{
cerr << "WARNING: Unknown option %s" << argv[i] << endl;
}
else
inputName.assign( argv[i] );
}
if( !cascade.load( cascadeName ) )
{
cerr << "ERROR: Could not load classifier cascade" << endl;
cerr << "Usage: facedetect [--cascade=\"<cascade_path>\"]\n"
" [--nested-cascade[=\"nested_cascade_path\"]]\n"
" [--scale[=<image scale>\n"
" [filename|camera_index]\n" ;
return -1;
}
if( inputName.empty() || (isdigit(inputName.c_str()[0]) && inputName.c_str()[1] == '\0') )
capture = cvCaptureFromCAM( inputName.empty() ? 0 : inputName.c_str()[0] - '0' );
else if( inputName.size() )
{
image = imread( inputName, 1 );
if( image.empty() )
capture = cvCaptureFromAVI( inputName.c_str() );
}
else
image = imread( "lena.jpg", 1 );
cvNamedWindow( "result", 1 );
if( capture )
{
for(;;)
{
IplImage* iplImg = cvQueryFrame( capture );
frame = iplImg;
if( frame.empty() )
break;
if( iplImg->origin == IPL_ORIGIN_TL )
frame.copyTo( frameCopy );
else
flip( frame, frameCopy, 0 );
detectAndDraw( frameCopy, cascade, nestedCascade, scale );
if( waitKey( 10 ) >= 0 )
goto _cleanup_;
}
waitKey(0);
_cleanup_:
cvReleaseCapture( &capture );
}
else
{
if( !image.empty() )
{
detectAndDraw( image, cascade, nestedCascade, scale );
waitKey(0);
}
else if( !inputName.empty() )
{
/* assume it is a text file containing the
list of the image filenames to be processed - one per line */
FILE* f = fopen( inputName.c_str(), "rt" );
if( f )
{
char buf[1000+1];
while( fgets( buf, 1000, f ) )
{
int len = (int)strlen(buf), c;
while( len > 0 && isspace(buf[len-1]) )
len--;
buf[len] = '\0';
cout << "file " << buf << endl;
image = imread( buf, 1 );
if( !image.empty() )
{
detectAndDraw( image, cascade, nestedCascade, scale );
c = waitKey(0);
if( c == 27 || c == 'q' || c == 'Q' )
break;
}
}
fclose(f);
}
}
}
cvDestroyWindow("result");
return 0;
}
void detectAndDraw( Mat& img,
CascadeClassifier& cascade, CascadeClassifier& nestedCascade,
double scale)
{
int i = 0;
double t = 0;
vector<Rect> faces;
const static Scalar colors[] = { CV_RGB(0,0,255),
CV_RGB(0,128,255),
CV_RGB(0,255,255),
CV_RGB(0,255,0),
CV_RGB(255,128,0),
CV_RGB(255,255,0),
CV_RGB(255,0,0),
CV_RGB(255,0,255)} ;
Mat gray, smallImg( cvRound (img.rows/scale), cvRound(img.cols/scale), CV_8UC1 );
cvtColor( img, gray, CV_BGR2GRAY );
resize( gray, smallImg, smallImg.size(), 0, 0, INTER_LINEAR );
equalizeHist( smallImg, smallImg );
t = (double)cvGetTickCount();
cascade.detectMultiScale( smallImg, faces,1.1, 2, 0 |CV_HAAR_SCALE_IMAGE,Size(30, 30) );
//|CV_HAAR_FIND_BIGGEST_OBJECT
//|CV_HAAR_DO_ROUGH_SEARCH
t = (double)cvGetTickCount() - t;
printf( "detection time = %g ms\n", t/((double)cvGetTickFrequency()*1000.) );
for( vector<Rect>::const_iterator r = faces.begin(); r != faces.end(); r++, i++ )
{
Mat smallImgROI;
vector<Rect> nestedObjects;
Point center;
Scalar color = colors[i%8];
int radius;
center.x = cvRound((r->x + r->width*0.5)*scale);
center.y = cvRound((r->y + r->height*0.5)*scale);
radius = cvRound((r->width + r->height)*0.25*scale);
circle( img, center, radius, color, 3, 8, 0 );
if( nestedCascade.empty() )
continue;
smallImgROI = smallImg(*r);
nestedCascade.detectMultiScale( smallImgROI, nestedObjects,
1.1, 2, 0
//|CV_HAAR_FIND_BIGGEST_OBJECT
//|CV_HAAR_DO_ROUGH_SEARCH
//|CV_HAAR_DO_CANNY_PRUNING
|CV_HAAR_SCALE_IMAGE
,
Size(30, 30) );
for( vector<Rect>::const_iterator nr = nestedObjects.begin(); nr != nestedObjects.end(); nr++ )
{
center.x = cvRound((r->x + nr->x + nr->width*0.5)*scale);
center.y = cvRound((r->y + nr->y + nr->height*0.5)*scale);
radius = cvRound((nr->width + nr->height)*0.25*scale);
circle( img, center, radius, color, 3, 8, 0 );
}
}
cv::imshow( "result", img );
}
Need some help how to resolve it...
The access violation error probably means you are accessing a pointer that you haven't set the value for.
If you don't understand how to use the debugger (learn!) put printf("ok1,2,3 ....\n"); liberally through the code and work out how far it's gettign so you can narrow down where the error happens
I got this error too with OpenCV 2.3 on VS2010, even after adjusting the code so that cascadeName uses the full path to haarcascade_frontalface_alt.xml.
I was able to pinpoint the problem to the line that crashes the application:
if( !cascade.load( cascadeName ) )
The crash is coming from OpenCV's code and I don't know why it happens.

Resources