All i can find on the internet is opencv 2.x java code examples. I am using opencv 3.2 and trying to load an image in and turn all black lines that are longer than x amout of pixels to white (remove them). Here is where i'm at from starting with a opencv 2.4 version of a hough transform example...
Mat img = Imgcodecs.imread("C:/Users/user1/Desktop/topdown-6.jpg");
// Mat img = Imgcodecs.imread(fileName)
// generate gray scale and blur
Mat gray = new Mat();
Imgproc.cvtColor(img, gray, Imgproc.COLOR_BGR2GRAY);
Imgproc.blur(gray, gray, new Size(3, 3));
// detect the edges
Mat edges = new Mat();
int lowThreshold = 50;
int ratio = 3;
Imgproc.Canny(gray, edges, lowThreshold, lowThreshold * ratio);
Mat lines = new Mat();
Imgproc.HoughLinesP(edges, lines, 1, Math.PI / 180, 50, 50, 10);
for(int i = 0; i < lines.cols(); i++) {
double[] val = lines.get(0, i);
Imgproc.line(img, new Point(val[0], val[1]), new Point(val[2], val[3]), new Scalar(0, 0, 255), 2);
}
Image edgesImg = toBufferedImage(edges);
Image linesImg = toBufferedImage(lines);
Image imgg = toBufferedImage(img);
And i'm getting the error
OpenCV Error: Assertion failed (scn == 3 || scn == 4) in cv::cvtColor, file
C:\build\master_winpack-bindings-win32-vc14-static\opencv\modules\imgproc\src\color.cpp, line 9748
Exception in thread "main" CvException [org.opencv.core.CvException: cv::Exception: C:\build\master_winpack-bindings-win32-vc14-static\opencv\modules\imgproc\src\color.cpp:9748: error: (-215) scn == 3 || scn == 4 in function cv::cvtColor
]
at org.opencv.imgproc.Imgproc.cvtColor_1(Native Method)
at org.opencv.imgproc.Imgproc.cvtColor(Imgproc.java:1778)
at Main.main(Main.java:174)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at com.intellij.rt.execution.application.AppMain.main(AppMain.java:144)
Any help towards my goal would be awesome. Should i just use opencv version 2.x?
Edit:
public Image toBufferedImage(Mat m){
int type = BufferedImage.TYPE_BYTE_GRAY;
if ( m.channels() > 1 ) {
type = BufferedImage.TYPE_3BYTE_BGR;
}
int bufferSize = m.channels()*m.cols()*m.rows();
byte [] b = new byte[bufferSize];
m.get(0,0,b); // ERROR HAPPENING HERE
BufferedImage image = new BufferedImage(m.cols(),m.rows(), type);
final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
System.arraycopy(b, 0, targetPixels, 0, b.length);
return image;
}
The problem is that you are loading the image as a grayscale, so you do not need to convert it later. cvtColor expects the input mat to be a color Mat in your case when doing BGR2GRAY
Related
opencv version : 3.4.1
void drawOnImage(const cv::Mat& binary, Mat& image) {
Mat_<uchar>::const_iterator it = binary.begin<uchar>(); // THIS ONE
Mat_<uchar>::const_iterator itend = binary.end<uchar>();
for (int i = 0; it != itend; ++it, ++i) {
if (!*it)
circle(image, Point(i%image.step, i / image.step),
5, Scalar(255, 0, 0));
}
}
This function make circle on detected edge on image for highlight.
On this code, second line, occur error below.
Error: Assertion failed (elemSize() == sizeof(_Tp)) in cv::Mat::begin
Parameter binary is corner map, and image is original image that i used buuilding image.
I think that byte size problem so i checked it.
elemSize of corners is 3 and sizeof(uchar) is 1.
but i don't know how to solve this circumstance..
Mat getCorners(const Mat &image) {
Mat result;
dilate(image, result, cross);
erode(result, result, diamond);
Mat result2;
dilate(image, result2, x);
erode(result2, result2, square);
absdiff(result2, result, result);
applyThreshold(result);
return result; // result == binaray
}
output of this function is binaray.
I am trying to remove any contours that aren't in a square like shape. I check the image before and after to see if any contours have been removed. I use the circularity formula and values between 0.7 and 0.8 are square shaped. I expect to see that some contour lines are removed but none are
Here is what I have done so far.
public static void main(String[] args) {
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
Mat capturedFrame = Imgcodecs.imread("first.png");
//Gray
Mat gray = new Mat();
Imgproc.cvtColor(capturedFrame, gray, Imgproc.COLOR_BGR2GRAY);
//Blur
Mat blur = new Mat();
Imgproc.blur(gray, blur, new Size(3,3));
//Canny image
Mat canny = new Mat();
Imgproc.Canny(blur, canny, 20, 40, 3, true);
Imgcodecs.imwrite("test.png", canny);
//Dilate image to increase size of lines
Mat kernel = Imgproc.getStructuringElement(1, new Size(3,3));
Mat dilated = new Mat();
Imgproc.dilate(canny,dilated, kernel);
List<MatOfPoint> contours = new ArrayList<>();
//find contours
Imgproc.findContours(dilated, contours, new Mat(), Imgproc.RETR_TREE, Imgproc.CHAIN_APPROX_NONE);
//convert image
Imgproc.cvtColor(capturedFrame, capturedFrame, Imgproc.COLOR_BGR2RGB);
//Draw contours on original image
for(int n = 0; n < contours.size(); n++){
Imgproc.drawContours(capturedFrame, contours, n, new Scalar(255, 0 , 0), 1);
}
Imgcodecs.imwrite("before.png", capturedFrame);
//display image with all contours
Imshow showImg = new Imshow("displayImage");
showImg.show(capturedFrame);
//Remove contours that aren't close to a square shape.
for(int i = 0; i < contours.size(); i++){
double area = Imgproc.contourArea( contours.get(i));
MatOfPoint2f contour2f = new MatOfPoint2f(contours.get(i).toArray());
double perimeter = Imgproc.arcLength(contour2f, true);
//Found squareness equation on wiki...
// https://en.wikipedia.org/wiki/Shape_factor_(image_analysis_and_microscopy)
double squareness = 4 * Math.PI * area / Math.pow(perimeter, 2);
System.out.println("Squareness: " + squareness);
if(squareness <= 0.7 && squareness >= 0.8){
contours.remove(i);
}
}
for(int i = 0; i < contours.size(); i++){
Imgproc.drawContours(capturedFrame, contours, i, new Scalar(0, 255, 0), 1);
}
showImg.show(capturedFrame);
Imgcodecs.imwrite("remove.png", capturedFrame);
}
Here is the original image:
Here is the image before any contours are removed:
Here is the image final image where contours some contours should be removed:
squareness <= 0.7 && squareness >= 0.8 Looks like impossibe condition do you mean squareness <= 0.7 || squareness >= 0.8 ?
I am using OpenCV4Android version 2.4.11 and I am trying to detect the rectangles in frames retrieved from Camera. I referred to some questions in this website and they were so helpful. but the issue i am facing currently is
when i try to detect an object with light color in the middle as shown in the original image below the detection algorithm in this case does not detect the object as whole, rather it detects the dark parts of it as shown in image in the section titled "processed" below.
the code posted below indicates the steps i followed and the threshold values i used to detect the objects in the frames.
please let me know why the object as a whole is not getting detected and what can i do to detect the whole object not only parts of it
code:
//step 1
this.mMatGray = new Mat();
Imgproc.cvtColor(this.mMatInputFrame, this.mMatGray, Imgproc.COLOR_BGR2GRAY);
//step 2
this.mMatEdges = new Mat();
Imgproc.blur(this.mMatGray, this.mMatEdges, new Size(7, 7));//7,7
//step 3
Imgproc.Canny(this.mMatEdges, this.mMatEdges, 128, 128*2, 5, true);//..,..,2,900,7,true
//step 4
dilated = new Mat();
Mat dilateElement = Imgproc.getStructuringElement(Imgproc.MORPH_DILATE, new Size(3, 3));
Imgproc.dilate(mMatEdges, dilated, dilateElement);
ArrayList<MatOfPoint> contours = new ArrayList<>();
hierachy = new Mat();
Imgproc.findContours(dilated, contours, hierachy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
MatOfPoint2f approxCurve = new MatOfPoint2f();
if (contours.size() > 0) {
for (int i = 0; i < contours.size(); i++) {
MatOfPoint2f contour2f = new MatOfPoint2f(contours.get(i).toArray());
double approxDistance = Imgproc.arcLength(contour2f, true) * .02;//.02
Imgproc.approxPolyDP(contour2f, approxCurve, approxDistance, true);
MatOfPoint points = new MatOfPoint(approxCurve.toArray());
if (points.total() >= 4 && Imgproc.isContourConvex(points) && Math.abs(Imgproc.contourArea(points)) >= 40000 && Math.abs(Imgproc.contourArea(points)) <= 150000) {
Rect boundingRect = Imgproc.boundingRect(points);
RotatedRect minAreaRect = Imgproc.minAreaRect(contour2f);
Point[] rectPoints = new Point[4];
minAreaRect.points(rectPoints);
Rect minAreaAsRect = minAreaRect.boundingRect();
//to draw the minAreaRect
for( int j = 0; j < 4; j++ ) {
Core.line(mMatInputFrame, rectPoints[j], rectPoints[(j+1)%4], new Scalar(255,0,0));
}
Core.putText(mMatInputFrame, "MinAreaRect", new Point(10, 30), 1,1 , new Scalar(255,0,0),2);
Core.putText(mMatInputFrame, "Width: " + minAreaAsRect.width , new Point(minAreaAsRect.tl().x, minAreaAsRect.tl().y-100), 1,1 , new Scalar(255,0,0),2);
Core.putText(mMatInputFrame, "Height: " + minAreaAsRect.height, new Point(minAreaAsRect.tl().x, minAreaAsRect.tl().y-80), 1,1 , new Scalar(255,0,0),2);
Core.putText(mMatInputFrame, "Area: " + minAreaAsRect.area(), new Point(minAreaAsRect.tl().x, minAreaAsRect.tl().y-60), 1,1 , new Scalar(255,0,0),2);
//drawing the contour
Imgproc.drawContours(mMatInputFrame, contours, i, new Scalar(0,0,0),2);
//drawing the boundingRect
Core.rectangle(mMatInputFrame, boundingRect.tl(), boundingRect.br(), new Scalar(0, 255, 0), 1, 1, 0);
Core.putText(mMatInputFrame, "BoundingRect", new Point(10, 60), 1,1 , new Scalar(0,255,0),2);
Core.putText(mMatInputFrame, "Width: " + boundingRect.width , new Point(boundingRect.br().x-100, boundingRect.tl().y-100), 1,1 , new Scalar(0,255,0),2);
Core.putText(mMatInputFrame, "Height: " + boundingRect.height, new Point(boundingRect.br().x-100, boundingRect.tl().y-80), 1,1 , new Scalar(0,255,0),2);
Core.putText(mMatInputFrame, "Area: " + Imgproc.contourArea(points), new Point(boundingRect.br().x-100, boundingRect.tl().y-60), 1,1 , new Scalar(0,255,0),2);
}
}
}
original image:
processed image:
I have implemented in c++. API's are same so you can easily port for android. I have used Opencv 2.4.8 . Please check the implementation. Hope the code says what is done:
#include <iostream>
#include <string>
#include "opencv/highgui.h"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/objdetect/objdetect.hpp"
using namespace std;
using namespace cv;
Mat GetKernel(int erosion_size)
{
Mat element = getStructuringElement(cv::MORPH_CROSS,
cv::Size(2 * erosion_size + 1, 2 * erosion_size + 1),
cv::Point(erosion_size, erosion_size) );
return element;
}
int main()
{
Mat img = imread("C:/Users/dell2/Desktop/j6B3A.png",0);//loading gray scale image
Mat imgC = imread("C:/Users/dell2/Desktop/j6B3A.png",1);
GaussianBlur(img,img,Size(7,7),1.5,1.5);
Mat dimg;
adaptiveThreshold(img,dimg,255,ADAPTIVE_THRESH_GAUSSIAN_C,THRESH_BINARY,17,1);
dilate(dimg,img,GetKernel(2));
erode(img,dimg,GetKernel(2));
erode(dimg,img,GetKernel(1));
dimg = img;
//*
vector<vector<Point>> contours; // Vector for storing contour
vector<Vec4i> hierarchy;
findContours( dimg, contours, hierarchy,CV_RETR_TREE , CV_CHAIN_APPROX_NONE ); // Find the contours in the image
double largest_area = 0;
int largest_contour_index = 0;
Rect bounding_rect;
for( int i = 0; i< contours.size(); i++ ) // iterate through each contour.
{
double a=contourArea( contours[i],false); // Find the area of contour
if(a>largest_area){
largest_area=a;
largest_contour_index=i; //Store the index of largest contour
bounding_rect=boundingRect(contours[i]); // Find the bounding rectangle for biggest contour
}
}
drawContours( imgC, contours, largest_contour_index, Scalar(255,0,0), 2, 8, hierarchy, 0, Point() );
rectangle(imgC, bounding_rect, Scalar(0,255,0),2, 8,0);
/**/
//imshow("display",dimg);
imshow("display2",imgC);
waitKey(0);
return 0;
}
Output produced:
You can fine tune the threshold if necessary.
I am trying to detect a piece of paper from an image. Well I have had help from posts like this . But one difference is that my background color is almost same as the paper color, and I am getting wrong result.
[edit]
By wrong result I mean the paper outline contour is not detected at all. instead the largest contour covers the whole image.
image1
image2
My code so far (using emgu cv and c#)
MemStorage storage = new MemStorage();
List<Contour<Point>> candidateList = new List<Contour<Point>>();
List<double> areaList = new List<double>();
Image<Bgr, Byte> inputImage = new Image<Bgr, Byte>(image);
//Rectangle roi = new Rectangle(15, 15, image.Width - 15, image.Height - 15);
//inputImage.ROI = roi;
//inputImage = inputImage.Copy();
double threshHeight = inputImage.Size.Height * 0.50;
double threshWidth = inputImage.Size.Width * 0.50;
//std::vector<std::vector<cv::Point> > squares;
//cv::Mat pyr, timg, gray0(_image.size(), CV_8U), gray;
int thresh = 50, N = 5;
//cv::pyrDown(_image, pyr, cv::Size(_image.cols/2, _image.rows/2));
//cv::pyrUp(pyr, timg, _image.size());
//std::vector<std::vector<cv::Point> > contours;
Image<Gray, Byte> [] gray0 = new Image<Gray,byte>[3];
for( int c = 0; c < 3; c++ ) {
try{
int [] ch = {c, 0};
gray0[c] = new Image<Gray,byte>(inputImage.Size);
CvInvoke.cvMixChannels(new IntPtr[] { inputImage }, 1, new IntPtr[]{gray0[c]}, 1, ch, 1);
Image<Gray, Byte> gray = new Image<Gray,byte>(inputImage.Size);
for (int l = 0 ; l < N ; l++){
if (l == 0){
Image<Gray, Byte> cannyImage = gray0[c].Canny(0, thresh, 5);
CvInvoke.cvDilate(cannyImage, gray, IntPtr.Zero, 1);
//CvInvoke.cvShowImage("ch " + c + "-" + l, gray);
}else{
CvInvoke.cvThreshold(gray0[c], gray, (l + 1)*255/N, 255, Emgu.CV.CvEnum.THRESH.CV_THRESH_BINARY);
//CvInvoke.cvShowImage("ch " + c + "-" + l, gray0[c]);
}
//CvInvoke.cvShowImage("image", gray);
for (Contour<Point> contours = gray.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, storage); contours != null; contours = contours.HNext){
Contour<Point> currentContour = contours.ApproxPoly(contours.Perimeter * 0.02, storage);
if (currentContour.Count() >= 4){
if (currentContour.Area > 3000){
//if (currentContour.BoundingRectangle.Width >= threshWidth && currentContour.BoundingRectangle.Height > threshHeight){
candidateList.Add(currentContour);
areaList.Add(currentContour.Area);
inputImage.Draw(currentContour, new Bgr(255, 0, 0), 1);
}
}
}
}
}catch(Exception ex){
Debug.WriteLine(ex.Message);
}
}
/* finding the biggest one */
double area = -1.0;
Contour<Point> paper = null;
for (int i = 0 ; i < candidateList.Count ; i++){
if (areaList[i] > area){
area = areaList[i];
paper = candidateList[i];
}
}
if (paper != null){
if (paper.BoundingRectangle.Width >= threshWidth && paper.BoundingRectangle.Height > threshHeight){
inputImage.Draw(paper, new Bgr(0, 0, 255), 2);
}
}
return inputImage.ToBitmap();
Please let me know how to process these images.
I did this in matlab (sorry I'm not really proficient at OpenCV) but you should be able to emulate the code. I tried to make it very simple. I noticed that the gradient of the original images really highlights where the paper is. So I used that to make a "rough" outline of the paper. Using the gradient is a good starting point, and maybe you can start from there. I just downsampled, then upsampled the image (emulating morphological operations to clean the image, since you lose all the small details).
You might get better results if you smooth the image out first (with a Gaussian filter maybe) I didn't try it, but maybe you can give it a try. Here is the result
And here is the code for reference
im1 = imread('http://www.imageno.com/image.php?id=ai7b91pm9fcs&kk=1089743759');
im2 = imread('http://www.imageno.com/image.php?id=k99c9xpd6phs&kk=3354581295');
%converts to grayscale
gim1 = rgb2gray(im1);
gim2 = rgb2gray(im2);
%gets size of images
[m1, n1] = size(gim1);
[m2, n2] = size(gim2);
%takes gradient of image
[Gx1, Gy1] = gradient(double(gim1));
[Gx2, Gy2] = gradient(double(gim2));
%takes magnitude of gradient in X and Y direction
Gxy1 = sqrt(Gx1.^2 + Gy1.^2);
Gxy2 = sqrt(Gx2.^2 + Gy2.^2);
%downsamples image (to reduce noise)
scale_factor = 100;
small1 = imresize(Gxy1, [m1/scale_factor n1/scale_factor]);
small2 = imresize(Gxy2, [m2/scale_factor n2/scale_factor]);
%upsamples image (to original size)
big1 = imresize(small1, [m1 n1]);
big2 = imresize(small2, [m2 n2]);
%converts to binary mask
bw1 = (big1 >= 1);
bw2 = (big2 >= 1);
%displays images
figure(1);
subplot(2,4,1);imshow(gim1);title('grayscale 1');
subplot(2,4,5);imshow(gim2);title('grayscale 2');
%these gradients are a little deceiving. In matlab when it sees an image
%of type "double" its maps it so 0=black and 1=white. anything >=1 gets
%clipped to 1
subplot(2,4,2);imshow(Gxy1);title('gradient 1');
subplot(2,4,6);imshow(Gxy2);title('gradient 2');
subplot(2,4,3);imshow(big1);title('down->up sampled 1');
subplot(2,4,7);imshow(big2);title('down->up sampled 2');
%this is just some matlab witchcraft so I can multiply the 2D mask with a
%3D image (r g b) in a very efficient manner
subplot(2,4,4);imshow(bsxfun(#times,im1,uint8(bw1)));title('masked image 1');
subplot(2,4,8);imshow(bsxfun(#times,im2,uint8(bw2)));title('masked image 2');
I have an image of the background scene and an image of the same scene with objects in front. Now I want to create a mask of the object in the foreground with background substraction. Both images are RGB.
I have already created the following code:
cv::Mat diff;
diff.create(orgImage.dims, orgImage.size, CV_8UC3);
diff = abs(orgImage-refImage);
cv::Mat mask(diff.rows, diff.cols, CV_8U, cv::Scalar(0,0,0));
//mask = (diff > 10);
for (int j=0; j<diff.rows; j++) {
// get the address of row j
//uchar* dataIn= diff.ptr<uchar>(j);
//uchar* dataOut= mask.ptr<uchar>(j);
for (int i=0; i<diff.cols; i++) {
if(diff.at<cv::Vec3b>(j,i)[0] > 30 || diff.at<cv::Vec3b>(j,i)[1] > 30 || diff.at<cv::Vec3b>(j,i)[2] > 30)
mask.at<uchar>(j,i) = 255;
}
}
I dont know if I am doing this right?
Have a look at the inRange function from OpenCV. This will allow you to set multiple thresholds at the same time for a 3 channel image.
So, to create the mask you were looking for, do the following:
inRange(diff, Scalar(30, 30, 30), Scalar(255, 255, 255), mask);
This should also be faster than trying to access each pixel yourself.
EDIT : If skin detection is what you are trying to do, I would first do skin detection, and then afterwards do background subtraction to remove the background. Otherwise, your skin detector will have to take into account the intensity shift caused by the subtraction.
Check out my other answer, about good techniques for skin detection.
EDIT :
Is this any faster?
int main(int argc, char* argv[])
{
Mat fg = imread("fg.jpg");
Mat bg = imread("bg.jpg");
cvtColor(fg, fg, CV_RGB2YCrCb);
cvtColor(bg, bg, CV_RGB2YCrCb);
Mat distance = Mat::zeros(fg.size(), CV_32F);
vector<Mat> fgChannels;
split(fg, fgChannels);
vector<Mat> bgChannels;
split(bg, bgChannels);
for(size_t i = 0; i < fgChannels.size(); i++)
{
Mat temp = abs(fgChannels[i] - bgChannels[i]);
temp.convertTo(temp, CV_32F);
distance = distance + temp;
}
Mat mask;
threshold(distance, mask, 35, 255, THRESH_BINARY);
Mat kernel5x5 = getStructuringElement(MORPH_RECT, Size(5, 5));
morphologyEx(mask, mask, MORPH_OPEN, kernel5x5);
imshow("fg", fg);
imshow("bg", bg);
imshow("mask", mask);
waitKey();
return 0;
}
This code produces this mask based on your input imagery:
Finally, here is what I get using my simple thresholding method:
Mat diff = fgYcc - bgYcc;
vector<Mat> diffChannels;
split(diff, diffChannels);
// only operating on luminance for background subtraction...
threshold(diffChannels[0], bgfgMask, 1, 255.0, THRESH_BINARY_INV);
Mat kernel5x5 = getStructuringElement(MORPH_RECT, Size(5, 5));
morphologyEx(bgfgMask, bgfgMask, MORPH_OPEN, kernel5x5);
This produce the following mask:
I think when I'm doing it like this I get the right results: (in the YCrCb colorspace) but accessing each px is slow so I need to find another algorithm
cv::Mat mask(image.rows, image.cols, CV_8U, cv::Scalar(0,0,0));
cv::Mat_<cv::Vec3b>::const_iterator itImage= image.begin<cv::Vec3b>();
cv::Mat_<cv::Vec3b>::const_iterator itend= image.end<cv::Vec3b>();
cv::Mat_<cv::Vec3b>::iterator itRef= refRoi.begin<cv::Vec3b>();
cv::Mat_<uchar>::iterator itMask= mask.begin<uchar>();
for ( ; itImage!= itend; ++itImage, ++itRef, ++itMask) {
int distance = abs((*itImage)[0]-(*itRef)[0])+
abs((*itImage)[1]-(*itRef)[1])+
abs((*itImage)[2]-(*itRef)[2]);
if(distance < 30)
*itMask = 0;
else
*itMask = 255;
}