Mat img = imread("/home/akash/Desktop/coding/IP/openCV/chessBoard.jpg",1);
Mat gray;
int thresh = 200;
void corner_detect(int,void *){
Mat dst = Mat::zeros(gray.size(),CV_32FC1);
Mat dst_norm,dst_scale;
cornerHarris(gray,dst,2,3,0.04);
normalize(dst,dst_norm,0,255,NORM_MINMAX,CV_32FC1,Mat()); //????
convertScaleAbs(dst_norm,dst_scale); //????
namedWindow("dst_norm",CV_WINDOW_AUTOSIZE);
imshow("dst_norm",dst_norm);
for(int i=0;i<dst_norm.rows;i++){
for(int j=0;j<dst_norm.cols;j++){
if(dst_norm.at<float>(i,j) > thresh){
circle(dst_scale,Point(j,i),5,Scalar(0),2);
}
}
}
imshow("window",dst_scale);
}
int main(){
namedWindow("window",CV_WINDOW_AUTOSIZE);
namedWindow("input",CV_WINDOW_AUTOSIZE);
cvtColor(img,gray,CV_BGR2GRAY);
createTrackbar("threshold","window",&thresh,255,corner_detect);
corner_detect(0,0);
imshow("input",img);
waitKey(0);
return 0;
}
I have taken this code from here which is basically corner detection and drawing circles around it.
I want to ask(where "????" is mentioned in code) working of normalize and convertScaleAbs. I have read the docs but I am still in doubt.I also outputted the dst_norm but it helped me none.
I got that normalize is used to change the value range in array and convertScaleAbs is converting CV_32FC1 type image to CV_8UC1.
But i am unable to understand any insights(i.e. how i got dst_norm and dst_scale when i outputted them).
Any help would be appreciated....
screen shot for reference
Related
I'm using INRIA person dataset, i iterate the images and everything is fine and after i have this function
vector<Mat> HOG_extract(Mat input_image, bool patch_size, int width, int height)
{
Mat gray_image;
cvtColor(input_image, gray_image, CV_BGR2GRAY);
HOGDescriptor hog;
hog.winSize = Size(width, height);
hog.blockSize = Size(block_size, block_size);
hog.blockStride = Size(block_stride, block_stride);
hog.cellSize = Size(cell_size, cell_size);
hog.nbins = bin_size;
vector<float> hog_value;
vector<Point> locations;
hog.compute(gray_image, hog_value, Size(0, 0), Size(0, 0), locations);
}
when he gets to hog.compute i receive an exception and libpng error: IDAT: invalid distance too far back.
like how i can solve this? looks like something happend when using imread and converting in gray
There seems to be an issue with the dataset. I think it was edited with an error using an older version of libpng.
I managed to fix it with the help of "png-fix-IDAT-windowsize" tool which you can find here: http://www.libpng.org/pub/png/apps/pngcheck.html
I #m reading two images and want to get third one which is just combination of two.
img_object and img_scene don't have same size.
int main( int argc, char** argv )
Mat combine;
Mat img_object = imread( object_filename, CV_LOAD_IMAGE_GRAYSCALE );
Mat img_scene = imread( scene_filename , CV_LOAD_IMAGE_GRAYSCALE );
if( !img_object.data || !img_scene.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
namedWindow( "Display window oject", 0 );// Create a window for display.
namedWindow( "Display window scene ", 0 );
namedWindow( "Display window combine ", 0 );
imshow( "Display window oject", img_object );
imshow( "Display window scene", img_scene );
imshow( "Display window scene", combine );
waitKey(0);
return 0;
}
There is a very simple way of displaying two images side by side. The following function can be used which is provided by opencv.
Mat image1, image2;
hconcat(image1,image2,image1);//Syntax-> hconcat(source1,source2,destination);
This function can also be used to copy a set of columns from an image to another image.
Mat image;
Mat columns=image.colRange(20,30);
hconcat(image,columns,image);
// --------------------------------------------------------------
// Function to draw several images to one image.
// Small images draws into cells of size cellSize.
// If image larger than size of cell ot will be trimmed.
// If image smaller than cellSize there will be gap between cells.
// --------------------------------------------------------------
char showImages(string title, vector<Mat>& imgs, Size cellSize)
{
char k=0;
namedWindow(title);
float nImgs=imgs.size();
int imgsInRow=ceil(sqrt(nImgs)); // You can set this explicitly
int imgsInCol=ceil(nImgs/imgsInRow); // You can set this explicitly
int resultImgW=cellSize.width*imgsInRow;
int resultImgH=cellSize.height*imgsInCol;
Mat resultImg=Mat::zeros(resultImgH,resultImgW,CV_8UC3);
int ind=0;
Mat tmp;
for(int i=0;i<imgsInCol;i++)
{
for(int j=0;j<imgsInRow;j++)
{
if(ind<imgs.size())
{
int cell_row=i*cellSize.height;
int cell_col=j*cellSize.width;
imgs[ind].copyTo(resultImg(Range(cell_row,cell_row+tmp.rows),Range(cell_col,cell_col+tmp.cols)));
}
ind++;
}
}
imshow(title,resultImg);
k=waitKey(10);
return k;
}
If the images are not the same size, combine's width will be equal to the sum of the widths, but the height must be the bigger of the heights of the two images.
Define the combination image like this:
Mat combine(max(img_object.size().height, img_scene.size().height), img_object.size().width + img_scene.size().width, CV_8UC3);
Note that we're just creating a new Mat object with height equal to the maximum height and width equal to the combined width of the pictures (if you need a small margin between the pictures, you need to account for that here).
Then, you can define regions of interest for each side inside combine (using a convenient Matconstructor), and finally copy each image to the corresponding side (here I assume the object goes on the left and the scene goes on the right):
Mat left_roi(combine, Rect(0, 0, img_object.size().width, img_object.size().height));
img_object.copyTo(left_roi);
Mat right_roi(combine, Rect(img_object.size().width, 0, img_scene.size().width, img_scene.size().height));
img_scene.copyTo(right_roi);
Edit: Fixed the typo that TimZaman pointed out.
You can do this with a loop, supposing that your images have the same size :
Mat combine = Mat::zeros(img_object.rows,img_object.cols *2,img_object.type());
for (int i=0;i<combine.cols;i++) {
if (i < img_object.cols) {
combine.col(i) = img_object.col(i);
} else {
combine.col(i) = img_scene.col(i-img_object.col);
}
}
I didn't tested it, but that's the way you can do this
I have tried to put multiple images side by side, just try this.
Mat combine = Mat::zeros(img_buff[0].rows,
img_buff[0].cols * (int)img_index.size(), img_buff[0].type());
int cols = img_buff[0].cols;
for (int i=0;i<combine.cols;i++) {
int fram_index = i / img_buff[0].cols;
cout<<fram_index<<endl;
img_buff[fram_index].col(i % cols).copyTo(combine.col(i));
}
imshow("matching plot", combine);
Please pay attention, when you copy columns form one image to another do this:
A.row(j).copyTo(A.row(i));
Don't do this:
A.row(j) = A.row(i);
I wanna ask how to detecting humans or pedestrians on blob (findcontours)? I've try to learn how to detecting any object on the frame using findcontours() like this:
#include"stdafx.h"
#include<vector>
#include<iostream>
#include<opencv2/opencv.hpp>
#include<opencv2/core/core.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<opencv2/highgui/highgui.hpp>
int main(int argc, char *argv[])
{
cv::Mat frame;
cv::Mat fg;
cv::Mat blurred;
cv::Mat thresholded;
cv::Mat thresholded2;
cv::Mat result;
cv::Mat bgmodel;
cv::namedWindow("Frame");
cv::namedWindow("Background Model"
//,CV_WINDOW_NORMAL
);
//cv::resizeWindow("Background Model",400,300);
cv::namedWindow("Blob"
//,CV_WINDOW_NORMAL
);
//cv::resizeWindow("Blob",400,300);
cv::VideoCapture cap("campus3.avi");
cv::BackgroundSubtractorMOG2 bgs;
bgs.nmixtures = 3;
bgs.history = 1000;
bgs.varThresholdGen = 15;
bgs.bShadowDetection = true;
bgs.nShadowDetection = 0;
bgs.fTau = 0.5;
std::vector<std::vector<cv::Point>> contours;
for(;;)
{
cap >> frame;
cv::GaussianBlur(frame,blurred,cv::Size(3,3),0,0,cv::BORDER_DEFAULT);
bgs.operator()(blurred,fg);
bgs.getBackgroundImage(bgmodel);
cv::threshold(fg,thresholded,70.0f,255,CV_THRESH_BINARY);
cv::threshold(fg,thresholded2,70.0f,255,CV_THRESH_BINARY);
cv::Mat elementCLOSE(5,5,CV_8U,cv::Scalar(1));
cv::morphologyEx(thresholded,thresholded,cv::MORPH_CLOSE,elementCLOSE);
cv::morphologyEx(thresholded2,thresholded2,cv::MORPH_CLOSE,elementCLOSE);
cv::findContours(thresholded,contours,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE);
cv::cvtColor(thresholded2,result,CV_GRAY2RGB);
int cmin = 50;
int cmax = 1000;
std::vector<std::vector<cv::Point>>::iterator itc=contours.begin();
while (itc!=contours.end()) {
if (itc->size() > cmin && itc->size() < cmax){
std::vector<cv::Point> pts = *itc;
cv::Mat pointsMatrix = cv::Mat(pts);
cv::Scalar color( 0, 255, 0 );
cv::Rect r0= cv::boundingRect(pointsMatrix);
cv::rectangle(frame,r0,color,2);
++itc;
}else{++itc;}
}
cv::imshow("Frame",frame);
cv::imshow("Background Model",bgmodel);
cv::imshow("Blob",result);
if(cv::waitKey(30) >= 0) break;
}
return 0;
}
and now I wanna know how to detect humans? am I need to use hog? or haar? if yes I need to use them, how to use them? any tutorials to learn how to use it? because I'm so curious! and it's so much fun when I learn OpenCV! so addictive! :))
anyway I'll appreciate any help here, thanks. :)
This is a good start, with lots of enthusiasm. There is more than one way to do human detection on images/image sequences. I summarize a few below:
Since you are already extracting blobs that are supposed to be persons or objects, you can compare the features of these blobs with those of blobs resulting from a human in the scene. Many people look at the shape of the head-shoulder region, the height and area of the blob, etc.
You can also look at research papers like this one. The earlier researches are easier to understand and code, compared to the recent papers.
Instead of using background subtraction, you can also use an approach like Haar Wavelet based detection. This is widely used for face detection, but opencv contains a model for upper body detection. You can also build your own models, as described here.
Have fun!
I have a code in OpenCV (in C++) which uses "haarcascade_mcs_upperbody.xml" to detect upper body.
It detects single upper body. How can I make it detect multiple upper bodies.
I think CV_HAAR_FIND_BIGGEST_OBJECT is detecting only the biggest object. But I am not knowing how to solve this issue
The code goes like this:
int main(int argc, const char** argv)
{
CascadeClassifier body_cascade;
body_cascade.load("haarcascade_mcs_upperbody.xml");
VideoCapture captureDevice;
captureDevice.open(0);
Mat captureFrame;
Mat grayscaleFrame;
namedWindow("outputCapture", 1);
//create a loop to capture and find faces
while(true)
{
//capture a new image frame
captureDevice>>captureFrame;
//convert captured image to gray scale and equalize
cvtColor(captureFrame, grayscaleFrame, CV_BGR2GRAY);
equalizeHist(grayscaleFrame, grayscaleFrame);
//create a vector array to store the face found
std::vector<Rect> bodies;
//find faces and store them in the vector array
body_cascade.detectMultiScale(grayscaleFrame, faces, 1.1, 3,
CV_HAAR_FIND_BIGGEST_OBJECT|CV_HAAR_SCALE_IMAGE, Size(30,30));
//draw a rectangle for all found faces in the vector array on the original image
for(int i = 0; i < faces.size(); i++)
{
Point pt1(bodies[i].x + bodies[i].width, bodies[i].y + bodies[i].height);
Point pt2(bodies[i].x, bodies[i].y);
rectangle(captureFrame, pt1, pt2, cvScalar(0, 255, 0, 0), 1, 8, 0);
}
//print the output
imshow("outputCapture", captureFrame);
//pause for 33ms
waitKey(33);
}
return 0;
}
It seems there is some inconsistency in your code, since face_cascade is not defined anywhere, but I assume its type is CascadeClassifier.
detectMultiScale stores all detected objects in the faces vector. Are you sure it contains only one object?
Try removing the CV_HAAR_FIND_BIGGEST_OBJECT flag, because you want all objects to be detected, and not only the biggest one.
Also, make sure you set the minSize and maxSize parameters correctly (see documentation), since those parameters determine the minimal and maximal detectable object sizes.
Is there a quick solution to specify the ROI only within the contours of the blob I'm intereseted in?
My ideas so far:
Using the boundingRect, but it contains too much stuff I don't want to analyse.
Applying goodFeaturesToTrack to the whole image and then loop through the output coordinates to eliminate the once outside my blobs contour
Thanks in advance!
EDIT
I found what I need: cv::pointPolygonTest() seems to be the right thing, but I'm not sure how to implement it …
Here's some code:
// ...
IplImage forground_ipl = result;
IplImage *labelImg = cvCreateImage(forground.size(), IPL_DEPTH_LABEL, 1);
CvBlobs blobs;
bool found = cvb::cvLabel(&forground_ipl, labelImg, blobs);
IplImage *imgOut = cvCreateImage(cvGetSize(&forground_ipl), IPL_DEPTH_8U, 3);
if (found) {
vb::CvBlob *greaterBlob = blobs[cvb::cvGreaterBlob(blobs)];
cvb::cvRenderBlob(labelImg, greaterBlob, &forground_ipl, imgOut);
CvContourPolygon *polygon = cvConvertChainCodesToPolygon(&greaterBlob->contour);
}
"polygon" contains the contour I need.
goodFeaturesToTrack is implemented this way:
- (std::vector<cv::Point2f>)pointsFromGoodFeaturesToTrack:(cv::Mat &)_image
{
std::vector<cv::Point2f> corners;
cv::goodFeaturesToTrack(_image,corners, 100, 0.01, 10);
return corners;
}
So next I need to loop through the corners and check each point with cv::pointPolygonTest(), right?
You can create a mask over your interest region:
EDIT
How to make a mask:
Make a mask;
Mat mask(origImg.size(), CV_8UC1);
mask.setTo(Scalar::all(0));
// here I assume your contour is extracted with findContours,
// and is stored in a vector<vector<Point>>
// and that you know which contour is the blob
// if it's not the case, use fillPoly instead of drawContour();
Scalar color(255,255,255); // white. actually, it's monchannel.
drawContours(mask, contours, contourIdx, color );
// fillPoly(Mat& img, const Point** pts, const int* npts,
// int ncontours, const Scalar& color)
And now you're ready to use it. BUT, look carefully at the result - I have heard about some bugs in OpenCV regarding the mask parameter for feature extractors, and I am not sure if it's about this one.
// note the mask parameter:
void goodFeaturesToTrack(InputArray image, OutputArray corners, int maxCorners,
double qualityLevel, double minDistance,
InputArray mask=noArray(), int blockSize=3,
bool useHarrisDetector=false, double k=0.04 )
This will also improve the speed of your aplication - goodFeaturesToTrack eats a hoge amount of time, and if you apply it only on a smaller image, the overall gain is significant.