OpenCV - Create "own" canny filter - opencv

i would like to create my own canny filter (to have a edge detection) by creating a kernel and apply it to my image as follow:
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace std;
using namespace cv;
int main()
{
Mat img = imread("../Pictures/img.jpg");
Mat img_cpy = img.clone();
Mat kern = (Mat_<float>(5,5) << 2,4,5,4,2,
4,9,12,9,4,
5,12,15,12,5,
4,9,12,9,4,
2,4,5,4,2
) / 159;
Point anchor = Point(-1,-1);
filter2D(img_cpy, img, -1, kern, anchor, 0, BORDER_DEFAULT); //apply the filter
imshow("orig", img_cpy);
imshow("res", img);
waitKey(0);
return 0;
}
the thing is that i do not get the result i would like to have (binary image with white edges on black background)
what am i doing wrong / what do i need to add to my sample of code?
thank you a lot in advance

Related

Why doesn't OpenCV findContour method always find closed outer contour?

I have two images from the same camera position. The difference between them is that one was taken with orthographic and the other was taken with perspective projection.
Here is the two image:
When I run the findContour OpenCV method on them the result is the follwing:
Why OpenCV doesn't find a closed outer contour curve for the perspective one?
I tried both CV_RETR_TREE and CV_RETR_EXTERNAL flags with the combination of CV_CHAIN_APPROX_SIMPLE and CV_CHAIN_APPROX_NONE flags.
Here is the documentation and sample code (which I am using) for the findContour method.
Actually I can't reproduce your problem. Try with this code:
#include <opencv2\opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main()
{
RNG rng(1234);
Mat3b img = imread("path_to_image");
Mat1b gray;
cvtColor(img, gray, COLOR_BGR2GRAY);
Mat1b bw = ~gray;
vector<vector<Point>> contours;
findContours(bw, contours, RETR_LIST, CHAIN_APPROX_SIMPLE);
for (int i = 0; i < contours.size(); ++i)
{
Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
drawContours(img, contours, i, color, 2);
}
imshow("Result", img);
waitKey();
return 0;
}
Result:

Detect Lines Opencv in object

I have the image below. I want to detect the line that divides this object in two pieces. Which is the best way? I've tried with the Hough transform but sometimes the object is not big enough for it to detect. Any ideias?
Thanks!
Normally, Hough Transform is used for line detection.
But if it doesn't work for you, fitting line is also a good alternative.
Check OpenCV fitline function for more details and parameters.
Since you have already tried hough lines, I will demonstrate fitting line here, using OpenCV-Python :
# Load image, convert to grayscale, threshold and find contours
img = cv2.imread('lail.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray,127,255,cv2.THRESH_BINARY)
contours,hier = cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
cnt = contours[0]
# then apply fitline() function
[vx,vy,x,y] = cv2.fitLine(cnt,cv2.cv.CV_DIST_L2,0,0.01,0.01)
# Now find two extreme points on the line to draw line
lefty = int((-x*vy/vx) + y)
righty = int(((gray.shape[1]-x)*vy/vx)+y)
#Finally draw the line
cv2.line(img,(gray.shape[1]-1,righty),(0,lefty),255,2)
cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Below is the result I got :
EDIT :
If you want to find the line to divide the object into two pieces, first find the fitting line, then find the line normal to it.
For that, add below piece of code under cv2.fitLine() function :
nx,ny = 1,-vx/vy
mag = np.sqrt((1+ny**2))
vx,vy = nx/mag,ny/mag
And below are the results I got :
Hope it helps !!!
UPDATE :
Below is the C++ code for Python code of first case as you requested. The code works fine for me. Output is same as given above :
#include <iostream>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/core/core.hpp>
#include <opencv/cv.h>
using namespace std;
using namespace cv;
int main()
{
cv::Mat img, gray,thresh;
vector<vector<Point>> contours;
Vec4f lines;
img = cv::imread("line.png");
cv::cvtColor(img,gray,cv::COLOR_BGR2GRAY);
cv::threshold(gray,thresh,127,255,cv::THRESH_BINARY);
cv::findContours(thresh,contours,cv::RETR_LIST,cv::CHAIN_APPROX_SIMPLE);
cv::fitLine(Mat(contours[0]),lines,2,0,0.01,0.01);
//lefty = int((-x*vy/vx) + y)
//righty = int(((gray.shape[1]-x)*vy/vx)+y)
int lefty = (-lines[2]*lines[1]/lines[0])+lines[3];
int righty = ((gray.cols-lines[2])*lines[1]/lines[0])+lines[3];
cv::line(img,Point(gray.cols-1,righty),Point(0,lefty),Scalar(255,0,0),2);
cv::imshow("img",img);
cv::waitKey(0);
cv::destroyAllWindows();
}

OpenCV displaying 2 images adjacently in the same window

I am trying to display 2 images horizontally adjacent to each other in the same window using OpenCV.
I have tried using adjustROI function for this.Image 1 has 1088 pixels width and 2208 pixels height while Image 2 has 1280 pixels width and 2208 pixels height.Please suggest what could be wrong in the code below.All I am getting is an image of size Image2 with content from Image2 as well.
Mat img_matches=Mat(2208,2368,imgorig.type());//set size as combination of img1 and img2
img_matches.adjustROI(0,0,0,-1280);
imgorig.copyTo(img_matches);
img_matches.adjustROI(0,0,1088,1280);
imgorig2.copyTo(img_matches);
EDIT: Here's how I'd do what you want to do:
Mat left(img_matches, Rect(0, 0, 1088, 2208)); // Copy constructor
imgorig.copyTo(left);
Mat right(img_matches, Rect(1088, 0, 1280, 2208)); // Copy constructor
imgorig2.copyTo(right);
The copy constructors create a copy of the Mat header that points to the ROI defined by each of the Rects.
Full code:
#include <cv.h>
#include <highgui.h>
using namespace cv;
int
main(int argc, char **argv)
{
Mat im1 = imread(argv[1]);
Mat im2 = imread(argv[2]);
Size sz1 = im1.size();
Size sz2 = im2.size();
Mat im3(sz1.height, sz1.width+sz2.width, CV_8UC3);
Mat left(im3, Rect(0, 0, sz1.width, sz1.height));
im1.copyTo(left);
Mat right(im3, Rect(sz1.width, 0, sz2.width, sz2.height));
im2.copyTo(right);
imshow("im3", im3);
waitKey(0);
return 0;
}
Compiles with:
g++ foo.cpp -o foo.out `pkg-config --cflags --libs opencv`
EDIT2:
Here's how it looks with adjustROI:
#include <cv.h>
#include <highgui.h>
using namespace cv;
int
main(int argc, char **argv)
{
Mat im1 = imread(argv[1]);
Mat im2 = imread(argv[2]);
Size sz1 = im1.size();
Size sz2 = im2.size();
Mat im3(sz1.height, sz1.width+sz2.width, CV_8UC3);
// Move right boundary to the left.
im3.adjustROI(0, 0, 0, -sz2.width);
im1.copyTo(im3);
// Move the left boundary to the right, right boundary to the right.
im3.adjustROI(0, 0, -sz1.width, sz2.width);
im2.copyTo(im3);
// restore original ROI.
im3.adjustROI(0, 0, sz1.width, 0);
imshow("im3", im3);
waitKey(0);
return 0;
}
You have to keep track of what the current ROI is, and the syntax for moving the ROI around can be a little un-intuitive. The result is the same as the first block of code.
As the height (rows of Mat) of the images are same, function hconcat maybe used to horizontally concatenate two images (Mat) and thus can be used to display them side-by-side in the same window. OpenCV doc.
It works with both grayscale and color images. The number of color channels in the source matrices must be same.
Mat im1, im2; // source images im1 and im2
Mat newImage;
hconcat(im1, im2, newImage); // <---- place image side by side
imshow("Display side by side", newImage);
waitKey(0);
For the sake of completeness, vconcat can be similarly used for vertical concatenation.
Here's a solution inspired in #misha's answer.
#include <cv.h>
#include <highgui.h>
using namespace cv;
int
main(int argc, char **argv)
{
Mat im1 = imread(argv[1]);
Mat im2 = imread(argv[2]);
Size sz1 = im1.size();
Size sz2 = im2.size();
Mat im3(sz1.height, sz1.width+sz2.width, CV_8UC3);
im1.copyTo(im3(Rect(0, 0, sz1.width, sz1.height)));
im2.copyTo(im3(Rect(sz1.width, 0, sz2.width, sz2.height)));
imshow("im3", im3);
waitKey(0);
return 0;
}
Instead of using the copy constructor, this solution uses Mat::operator()(const Rect& roi). While both solutions are O(1), this solution seems cleaner.

Does SetROI work when applying Morphology in OpenCV?

I'm trying to apply a morphological closing operation only to an nxn neighborhood of a pixel at (i,j). Easiest way seemed to create a CvRect with cvRect(j-n,i-n,j+n,i+n), set the image's ROI to that and then apply morphology.
However, the result is the same as applying Morphology to the whole image, without setting an ROI. What am I doing wrong here?
I haven't tried doing this with the C interface, but here is how I did it using the C++ interface:
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <vector>
#include <iostream>
using namespace std;
using namespace cv;
int main(int argc, char* argv[])
{
Mat spots = imread("roi.png", 0);
Rect ulRoi(0, 0, spots.cols >> 1, spots.rows >> 1);
Mat opening(spots, ulRoi);
Mat element = getStructuringElement(MORPH_RECT, Size(7, 7));
morphologyEx(opening, opening, MORPH_OPEN, element);
imshow("opening", opening);
imshow("spots", spots);
waitKey();
return 0;
}
I have basically just contrived an image, and then only got rid of the "noise" halo in the upper left quadrant. My "noise" spots were only 5x5, so I made the morphological kernel 7x7 to obliterate the noise.
Here is the input image:
After a morphological opening, I get the following:
Hopefully that will help you out!

Sift implementation with OpenCV 2.2

Does someone know the link of example of SIFT implementation with OpenCV 2.2.
regards,
Below is a minimal example:
#include <opencv/cv.h>
#include <opencv/highgui.h>
int main(int argc, const char* argv[])
{
const cv::Mat input = cv::imread("input.jpg", 0); //Load as grayscale
cv::SiftFeatureDetector detector;
std::vector<cv::KeyPoint> keypoints;
detector.detect(input, keypoints);
// Add results to image and save.
cv::Mat output;
cv::drawKeypoints(input, keypoints, output);
cv::imwrite("sift_result.jpg", output);
return 0;
}
Tested on OpenCV 2.3
You can obtain the SIFT detector and SIFT-based extractor in several ways. As others have already suggested the more direct methods, I will provide a more "software engineering" approach that may make you code more flexible to changes (i.e. easier to change to other detectors and extractors).
Firstly, if you are looking to obtain the detector using built in parameters the best way is to use OpenCV"s factory methods for creating it. Here's how:
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main(int argc, char *argv[])
{
Mat image = imread("TestImage.jpg");
// Create smart pointer for SIFT feature detector.
Ptr<FeatureDetector> featureDetector = FeatureDetector::create("SIFT");
vector<KeyPoint> keypoints;
// Detect the keypoints
featureDetector->detect(image, keypoints); // NOTE: featureDetector is a pointer hence the '->'.
//Similarly, we create a smart pointer to the SIFT extractor.
Ptr<DescriptorExtractor> featureExtractor = DescriptorExtractor::create("SIFT");
// Compute the 128 dimension SIFT descriptor at each keypoint.
// Each row in "descriptors" correspond to the SIFT descriptor for each keypoint
Mat descriptors;
featureExtractor->compute(image, keypoints, descriptors);
// If you would like to draw the detected keypoint just to check
Mat outputImage;
Scalar keypointColor = Scalar(255, 0, 0); // Blue keypoints.
drawKeypoints(image, keypoints, outputImage, keypointColor, DrawMatchesFlags::DEFAULT);
namedWindow("Output");
imshow("Output", outputImage);
char c = ' ';
while ((c = waitKey(0)) != 'q'); // Keep window there until user presses 'q' to quit.
return 0;
}
The reason using the factory methods is flexible because now you can change to a different keypoint detector or feature extractor e.g. SURF simply by changing the argument passed to the "create" factory methods like this:
Ptr<FeatureDetector> featureDetector = FeatureDetector::create("SURF");
Ptr<DescriptorExtractor> featureExtractor = DescriptorExtractor::create("SURF");
For other possible arguments to pass to create other detectors or extractors see:
http://opencv.itseez.com/modules/features2d/doc/common_interfaces_of_feature_detectors.html#featuredetector-create
http://opencv.itseez.com/modules/features2d/doc/common_interfaces_of_descriptor_extractors.html?highlight=descriptorextractor#descriptorextractor-create
Now, using the factory methods means you gain the convenience of not having to guess some suitable parameters to pass to each of the detectors or extractors. This can be convenient for people new to using them. However, if you would like to create your own custom SIFT detector, you can wrap the SiftDetector object created with custom parameters and wrap it into a smart pointer and refer to it using the featureDetector smart pointer variable as above.
A simple example using SIFT nonfree feature detector in opencv 2.4
#include <opencv2/opencv.hpp>
#include <opencv2/nonfree/nonfree.hpp>
using namespace cv;
int main(int argc, char** argv)
{
if(argc < 2)
return -1;
Mat img = imread(argv[1]);
SIFT sift;
vector<KeyPoint> key_points;
Mat descriptors;
sift(img, Mat(), key_points, descriptors);
Mat output_img;
drawKeypoints(img, key_points, output_img);
namedWindow("Image");
imshow("Image", output_img);
waitKey(0);
destroyWindow("Image");
return 0;
}
OpenCV provides SIFT and SURF (here too) and other feature descriptors out-of-the-box.
Note that the SIFT algorithm is patented, so it may be incompatible with the regular OpenCV use/license.
Another simple example using SIFT nonfree feature detector in opencv 2.4
Be sure to add the opencv_nonfree240.lib dependency
#include "cv.h"
#include "highgui.h"
#include <opencv2/nonfree/nonfree.hpp>
int main(int argc, char** argv)
{
cv::Mat img = cv::imread("image.jpg");
cv::SIFT sift(10); //number of keypoints
cv::vector<cv::KeyPoint> key_points;
cv::Mat descriptors, mascara;
cv::Mat output_img;
sift(img,mascara,key_points,descriptors);
drawKeypoints(img, key_points, output_img);
cv::namedWindow("Image");
cv::imshow("Image", output_img);
cv::waitKey(0);
return 0;
}
in case someone is wondering how to do it with 2 images :
import numpy as np
import cv2
print ('Initiate SIFT detector')
sift = cv2.xfeatures2d.SIFT_create()
print ('find the keypoints and descriptors with SIFT')
gcp1, des1 = sift.detectAndCompute(src_img,None)
gcp2, des2 = sift.detectAndCompute(trg_img,None)
# create BFMatcher object
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1,des2)
# Sort them in the order of their distance.
matches = sorted(matches, key = lambda x:x.distance)
#print only the first 100 matches
img3 = drawMatches(src_img, gcp1, trg_img, gcp2, matches[:100])

Resources