object detection of various shapes in opencv - opencv

I have an image and want to detect various objects at a time using opencv methods.
I have tried detecting one object using contouring and using the area to filter other counters. But I need to detect other objects too but they vary in area and length.
Can anyone help me to use any methods for detecting it.
This is the original image:
this is the code that I have tried for detection:
int main()
{
Mat msrc = imread("Task6_Resources/Scratch.jpg", 1);
Mat src = imread("Task6_Resources/Scratch.jpg", 0);
Mat imgblur;
GaussianBlur(src, imgblur, Size(13,13), 0);
cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE();
clahe->setClipLimit(8);
cv::Mat gcimg;
clahe->apply(imgblur, gcimg);
Mat thresh;
threshold(gcimg, thresh, 55, 255, THRESH_BINARY_INV);
Mat th_mina = minareafilter(thresh, 195); //function used to filter small and large blobs
Mat th_maxa = maxareafilter(th_mina, 393);
Mat imdilate;
dilate(th_maxa, imdilate, getStructuringElement(MORPH_RECT, Size(3, 1)), Point(-1, -1), 7);
int largest_area = 0;
int largest_contour_index = 0;
Rect bounding_rect;
vector<vector<Point>> contours;
vector<Vec4i> hierarchy;
findContours(imdilate, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE);
cout << "Number of contours" << contours.size() << endl;
//Largest contour according to area
for (int i = 0; i < contours.size(); i++){
double a = contourArea(contours[i], false);
if (a > largest_area) {
largest_area = a;
largest_contour_index = i;
bounding_rect = boundingRect(contours[i]);
}
}
for (int c = 0; c < contours.size(); c++){
printf(" * Contour[%d] Area OpenCV: %.2f - Length: %.2f \n",
c,contourArea(contours[c]), arcLength(contours[c], true));
}
rectangle(msrc, bounding_rect, Scalar(0, 255, 0), 2, 8, 0);
imshow("largest contour", msrc);
waitKey(0);
destroyAllWindows();
return 0;
}
This is the image on which I am applying contouring
After the code I am able to detect the green box using largest area in contouring, but I need to detect those red boxes too. (only the region of red boxes)
The problem is here I cannot apply again area parameter to filter the contours as some other contours have same area as the resultant contour.
The image result required:

Related

find trapezoid's 4 points for wraping (rectangle )

I need to find trapezoid's 4 points. I tried to use "ret" but only 2 points, seems just 1point is good.
find biggest rectangle result
int main(int argc, char** argv)
{
Mat src = imread("IMG_20160708_1338252.jpg");
imshow("source", src);
int largest_area = 0;
int largest_contour_index = 0;
Rect bounding_rect;
Mat thr;
cvtColor(src, thr, COLOR_BGR2GRAY); //Convert to gray
threshold(thr, thr, 125, 255, THRESH_BINARY); //Threshold the gray
vector<vector<Point> > contours; // Vector for storing contours
findContours(thr, contours, RETR_CCOMP, CHAIN_APPROX_SIMPLE); // Find the contours in the image
for (size_t i = 0; i < contours.size(); i++) // iterate through each contour.
{
double area = contourArea(contours[i]); // Find the area of contour
//src면적이랑 area가 같으면 제외
//if (src.size().area != area) {
if (area > largest_area)
{
largest_area = area;
largest_contour_index = i; //Store the index of largest contour
bounding_rect = boundingRect(contours[i]); // Find the bounding rectangle for biggest contour
}
//}
}
printf("top%d,%d\n", bounding_rect.tl().x, bounding_rect.tl().y);
printf("bottom%d,%d\n\n", bounding_rect.br().x, bounding_rect.br().y);
printf("top%d,%d\n", bounding_rect.x, bounding_rect.y);
printf("%d,%d\n", bounding_rect.x, bounding_rect.y+ bounding_rect.height);
printf("bottom%d,%d\n", bounding_rect.x + bounding_rect.width, bounding_rect.y);
printf("%d,%d\n\n", bounding_rect.x+ bounding_rect.width, bounding_rect.y+ bounding_rect.height);
//printf("bottom%d,%d\n", bounding_rect.br().x, bounding_rect.br().y);
//contours[largest_contour_index];//가장큰 사각형
//contours[largest_contour_index][0].x; contours[largest_contour_index][0].y;
printf("1-%d,", contours[largest_contour_index]);
printf("1-%d\n", contours[largest_contour_index][0].y);
printf("2-%d,", contours[largest_contour_index][1].x);
printf("2-%d\n", contours[largest_contour_index][1].y);
printf("3-%d,", contours[largest_contour_index][2].x);
printf("3-%d\n", contours[largest_contour_index][2].y);
printf("4-%d,", contours[largest_contour_index][3].x);
printf("4-%d\n", contours[largest_contour_index][3].y);
//printf("1%d,", contours[0][largest_contour_index].x);
drawContours(src, contours, largest_contour_index, Scalar(0, 255, 0), 2); // Draw the largest contour using previously stored index.
imshow("result", src);
waitKey();
return 0;
}
How can I find biggest rectangle?

How to improve the distance to detect objects?

I’m working on a project that should filter the red objects in an image and calculates the distance to this object with two webcams.
To detect the objects i convert the image from BGR to HSV and use the function inRange to threshold them.
Then i use findContours to get the contours in the image, which should be the contours of the red objects.
As last step i use boundingRect to get a Vector of Rect that contains one Rect per detected object.
The two images below shows my problem. The one with the pink rectangle is about 162cm away from the camera and the other about 175cm. If the Object is further then 170cm the object is not recognized, alltough the thresholded image is showing the contours of the object.
>170cm
<170cm
Is there a way to improve the distance in which the object is detected?
main.cpp
ObjectDetection obj;
StereoVision sv;
for (;;) {
cp.read(imgl);
cp2.read(imgr);
sv.calculateDisparity(imgl, imgr, dispfull, disp8, imgToDisplay);
Mat imgrt, imglt;
obj.filterColor(imgl, imgr, imglt, imgrt);
//p1 und p2 sind die gefundenen Konturen eines Bildes
vector<vector<Point> > p1 = obj.getPointOfObject(imglt);
vector<Rect> allRoisOfObjects = obj.getAllRectangles(imgl, p1);
for(int i = 0; i < allRoisOfObjects.size(); i++){
Rect pos = allRoisOfObjects.at(i);
pos.width -= 20;
pos.height -= 20;
pos.x += 10;
pos.y += 10;
disp = dispfull(pos);
float distance = sv.calculateAverageDistance(pos.tl(),pos.br(),dispfull);
stringstream ss;
ss << distance;
rectangle(imgToDisplay, allRoisOfObjects.at(i), color, 2,8, 0);
putText(imgToDisplay, ss.str(), pos.br(), 1, 1, color, 1);
ss.clear();
ss.str("");
newObjects.push_back(pos);
}
}
ObjectDetection.cpp
void ObjectDetection::filterColor(Mat& img1, Mat& img2, Mat& output1,
Mat& output2) {
Mat imgHSV, imgHSV2;
cvtColor(img1, imgHSV, COLOR_BGR2HSV); //Convert the captured frame from BGR to HSV
cvtColor(img2, imgHSV2, COLOR_BGR2HSV);
Mat imgThresholded, imgThresholded2;
inRange(imgHSV, Scalar(iLowH, iLowS, iLowV), Scalar(iHighH, iHighS, iHighV),
imgThresholded);
inRange(imgHSV2, Scalar(iLowH, iLowS, iLowV),
Scalar(iHighH, iHighS, iHighV), imgThresholded2);
output1 = imgThresholded;
output2 = imgThresholded2;
}
vector<vector<Point> > ObjectDetection::getPointOfObject(Mat img) {
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours(img, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE,
Point(0, 0));
return contours;
}
vector<Rect> ObjectDetection::getAllRectangles(Mat & img, vector<vector<Point> > contours){
vector<vector<Point> > contours_poly(contours.size());
vector<Rect> boundRect(contours.size());
vector<Point2f> center(contours.size());
vector<float> radius(contours.size());
Rect rrect;
rrect.height = -1;
RNG rng(12345);
for (int i = 0; i < contours.size(); i++) {
approxPolyDP(Mat(contours[i]), contours_poly[i], 3, true);
boundRect[i] = boundingRect(Mat(contours_poly[i]));
}
return boundRect;
}

Bounding a foreground object

I want to bound the foreground object by a rectangle, how shall I go about doing it?
I tried bounding the rectangle by collecting the white pixels , but it bounds the whole screen...
How shall I solve this problem?
//defined later
vector<Point> points;
RNG rng(12345);
int main(int argc, char* argv[])
{
//create GUI windows
namedWindow("Frame");
namedWindow("FG Mask MOG 2");
pMOG2 = createBackgroundSubtractorMOG2();
VideoCapture capture(0);
//update the background model
pMOG2->apply(frame, fgMaskMOG2);
frame_check = fgMaskMOG2.clone();
erode(frame_check, frame_check, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
dilate(frame_check, frame_check, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
/// Detect edges using Threshold
//threshold(frame_check, frame_check, 100, 255, THRESH_BINARY);
//find contours
findContours(frame_check, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
//points
for (size_t i = 0; i < contours.size(); i++) {
for (size_t j = 0; j < contours[i].size(); j++) {
if (contours[i].size() > 2000)
{
cv::Point p = contours[i][j];
points.push_back(p);
}
}
}
if (points.size() > 0){
Rect brect = cv::boundingRect(cv::Mat(points).reshape(2));
rectangle(frame, brect.tl(), brect.br(), CV_RGB(0, 255, 0), 2, CV_AA);
cout << points.size() << endl;
}
imshow("Frame", frame);
imshow("FG Mask MOG 2", fgMaskMOG2);
//get the input from the keyboard
keyboard = waitKey(1000);
}
}
Try this, it's from a project I am working on. To isolate objects I use color masks, which produce binary images, but in your case with a proper threshold on the foreground image (after filtering) you can achieve the same result.
// Morphological opening
erode(binary, binary, getStructuringElement(MORPH_ELLIPSE, filterSize));
dilate(binary, binary, getStructuringElement(MORPH_ELLIPSE, filterSize));
// Morphological closing
dilate(binary, binary, getStructuringElement(MORPH_ELLIPSE, filterSize));
erode(binary, binary, getStructuringElement(MORPH_ELLIPSE, filterSize));
// Find contours
vector<vector<Point>> contours;
findContours(binary, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
vector<Rect> rectangles;
for(auto& contour : contours)
{
// Produce a closed polygon with object's contours
vector<Point> polygon;
approxPolyDP(contour, polygon, 3, true);
// Get polygon's bounding rectangles
Rect rect = boundingRect(polygon);
rectangles.push_back(rect);
}
This is a small example on how to draw a bounding box around the foreground objects using the points of the contour. It's with OpenCV 2.9, so you probably need to change the initialization of the BackgroundSubtractorMOG2 if you're using OpenCV 3.0.
#include <opencv2\opencv.hpp>
using namespace cv;
int main(int argc, char *argv[])
{
BackgroundSubtractorMOG2 bg = BackgroundSubtractorMOG2(30, 16.0, false);
VideoCapture cap(0);
Mat3b frame;
Mat1b fmask;
for (;;)
{
cap >> frame;
bg(frame, fmask, -1);
vector<vector<Point>> contours;
findContours(fmask.clone(), contours, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE);
for(int i=0; i<contours.size(); ++i)
{
if(contours[i].size() > 200)
{
Rect roi = boundingRect(contours[i]);
drawContours(frame, contours, i, Scalar(0,0,255));
rectangle(frame, roi, Scalar(0,255,0));
}
}
imshow("frame", frame);
imshow("mask", fmask);
if (cv::waitKey(30) >= 0) break;
}
return 0;
}

OpenCV: Retrieving color of the center of a contour

Im trying to detect the colour of a set of shapes in a black image using OpenCV, for which I use Canny detection. However the color output always comes back as black.
std::vector<std::pair<cv::Point, cv::Vec3b> > Asteroids::DetectPoints(const cv::Mat &image)
{
cv::Mat imageGray;
cv::cvtColor( image, imageGray, CV_BGR2GRAY );
cv::threshold(imageGray, imageGray, 1, 255, cv::THRESH_BINARY);
cv::Mat canny_output;
std::vector<std::vector<cv::Point> > contours;
std::vector<cv::Vec4i> hierarchy;
int thresh = 10;
// Detect edges using canny
cv::Canny( imageGray, canny_output, thresh, thresh*2, 3 );
// Find contours
cv::findContours( canny_output, contours, hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_NONE, cv::Point(0, 0) );
std::vector<std::pair<cv::Point, cv::Vec3b> > points;
for(unsigned int i = 0; i < contours.size(); i++ )
{
cv::Rect rect = cv::boundingRect(contours[i]);
std::pair<cv::Point, cv::Vec3b> posColor;
posColor.first = cv::Point( rect.tl().x + (rect.size().width / 2), rect.tl().y + (rect.size().height / 2));
posColor.second = image.at<cv::Vec3b>( posColor.first.x, posColor.first.y );
//Dont add teh entry to the list if one with the same color and position is already pressent,
//The contour detection sometimes returns duplicates
bool isInList = false;
for(unsigned int j = 0; j < points.size(); j++)
if(points[j].first == posColor.first && points[j].second == posColor.second)
isInList = true;
if(!isInList)
points.push_back( posColor );
}
return points;
}
I know it has to be an issue with the positions or something along those lines, but I cant figure out what
I might be wrong, but off the top of my head :
Shouldn't this read
posColor.second = image.at<cv::Vec3b>(posColor.first.y, posColor.first.x);
and not the other way around like you did it ?
Matrix notation, not cartesian notation ?

opencv background substraction

I have an image of the background scene and an image of the same scene with objects in front. Now I want to create a mask of the object in the foreground with background substraction. Both images are RGB.
I have already created the following code:
cv::Mat diff;
diff.create(orgImage.dims, orgImage.size, CV_8UC3);
diff = abs(orgImage-refImage);
cv::Mat mask(diff.rows, diff.cols, CV_8U, cv::Scalar(0,0,0));
//mask = (diff > 10);
for (int j=0; j<diff.rows; j++) {
// get the address of row j
//uchar* dataIn= diff.ptr<uchar>(j);
//uchar* dataOut= mask.ptr<uchar>(j);
for (int i=0; i<diff.cols; i++) {
if(diff.at<cv::Vec3b>(j,i)[0] > 30 || diff.at<cv::Vec3b>(j,i)[1] > 30 || diff.at<cv::Vec3b>(j,i)[2] > 30)
mask.at<uchar>(j,i) = 255;
}
}
I dont know if I am doing this right?
Have a look at the inRange function from OpenCV. This will allow you to set multiple thresholds at the same time for a 3 channel image.
So, to create the mask you were looking for, do the following:
inRange(diff, Scalar(30, 30, 30), Scalar(255, 255, 255), mask);
This should also be faster than trying to access each pixel yourself.
EDIT : If skin detection is what you are trying to do, I would first do skin detection, and then afterwards do background subtraction to remove the background. Otherwise, your skin detector will have to take into account the intensity shift caused by the subtraction.
Check out my other answer, about good techniques for skin detection.
EDIT :
Is this any faster?
int main(int argc, char* argv[])
{
Mat fg = imread("fg.jpg");
Mat bg = imread("bg.jpg");
cvtColor(fg, fg, CV_RGB2YCrCb);
cvtColor(bg, bg, CV_RGB2YCrCb);
Mat distance = Mat::zeros(fg.size(), CV_32F);
vector<Mat> fgChannels;
split(fg, fgChannels);
vector<Mat> bgChannels;
split(bg, bgChannels);
for(size_t i = 0; i < fgChannels.size(); i++)
{
Mat temp = abs(fgChannels[i] - bgChannels[i]);
temp.convertTo(temp, CV_32F);
distance = distance + temp;
}
Mat mask;
threshold(distance, mask, 35, 255, THRESH_BINARY);
Mat kernel5x5 = getStructuringElement(MORPH_RECT, Size(5, 5));
morphologyEx(mask, mask, MORPH_OPEN, kernel5x5);
imshow("fg", fg);
imshow("bg", bg);
imshow("mask", mask);
waitKey();
return 0;
}
This code produces this mask based on your input imagery:
Finally, here is what I get using my simple thresholding method:
Mat diff = fgYcc - bgYcc;
vector<Mat> diffChannels;
split(diff, diffChannels);
// only operating on luminance for background subtraction...
threshold(diffChannels[0], bgfgMask, 1, 255.0, THRESH_BINARY_INV);
Mat kernel5x5 = getStructuringElement(MORPH_RECT, Size(5, 5));
morphologyEx(bgfgMask, bgfgMask, MORPH_OPEN, kernel5x5);
This produce the following mask:
I think when I'm doing it like this I get the right results: (in the YCrCb colorspace) but accessing each px is slow so I need to find another algorithm
cv::Mat mask(image.rows, image.cols, CV_8U, cv::Scalar(0,0,0));
cv::Mat_<cv::Vec3b>::const_iterator itImage= image.begin<cv::Vec3b>();
cv::Mat_<cv::Vec3b>::const_iterator itend= image.end<cv::Vec3b>();
cv::Mat_<cv::Vec3b>::iterator itRef= refRoi.begin<cv::Vec3b>();
cv::Mat_<uchar>::iterator itMask= mask.begin<uchar>();
for ( ; itImage!= itend; ++itImage, ++itRef, ++itMask) {
int distance = abs((*itImage)[0]-(*itRef)[0])+
abs((*itImage)[1]-(*itRef)[1])+
abs((*itImage)[2]-(*itRef)[2]);
if(distance < 30)
*itMask = 0;
else
*itMask = 255;
}

Resources