I am trying to detect lines in an image using OpenCV (Emgu.Net implementation).
So far, results are not that bad, but it appears to me not be consistent.
For instance, as you can see from the original and the results, some lines are well captured, yet the small top vertical ones are not, despite the fact that they have the same thickness.
Here is my code:
public static void ProcessImage(Mat src, bool createComposedImage)
{
//Convert the image to grayscale
Mat gray = new Mat();
CvInvoke.CvtColor(src, gray, ColorConversion.Bgr2Gray);
//Use Gaussian filtering to remove noise
CvInvoke.GaussianBlur(gray, gray, new Size(3, 3), 1);
double cannyThreshold = 180.0;
Mat result = new Mat();
List<Mat> resultPortions = new List<Mat>();
if (createComposedImage)
{
resultPortions.Add(src);
}
using (Mat cannyEdges = new Mat())
{
double cannyThresholdLinking = 120.0;
CvInvoke.Canny(gray, cannyEdges, cannyThreshold, cannyThresholdLinking);
LineSegment2D[] lines = CvInvoke.HoughLinesP(
image: cannyEdges,
rho: 1, //Distance resolution in pixel-related units
theta: Math.PI / 180.0, //Angle resolution measured in radians.
10, //threshold
30, //min Line width
10); //gap between lines
if (createComposedImage)
{
Mat lineImage = ProcessLinesInImage(gray, lines);
resultPortions.Add(lineImage);
}
if (createComposedImage)
{
try
{
CvInvoke.VConcat(resultPortions.ToArray(), result);
}
catch
{
}
foreach (Mat resultPortion in resultPortions)
{
resultPortion.Dispose();
}
}
}
return;
}
Here are the images
Related
Predefined: My A4 sheet will always be of white color.
I need to detect A4 sheet from image. I am able to detect rectangles, now the problem is I am getting multiple rectangles from my image. So I extracted the images from the detected rectangle points.
Now I want to match image color with white color.
Using below method to extract image from contours detected :
- (cv::Mat) getPaperAreaFromImage: (std::vector<cv::Point>) square, cv::Mat image
{
// declare used vars
int paperWidth = 210; // in mm, because scale factor is taken into account
int paperHeight = 297; // in mm, because scale factor is taken into account
cv::Point2f imageVertices[4];
float distanceP1P2;
float distanceP1P3;
BOOL isLandscape = true;
int scaleFactor;
cv::Mat paperImage;
cv::Mat paperImageCorrected;
cv::Point2f paperVertices[4];
// sort square corners for further operations
square = sortSquarePointsClockwise( square );
// rearrange to get proper order for getPerspectiveTransform()
imageVertices[0] = square[0];
imageVertices[1] = square[1];
imageVertices[2] = square[3];
imageVertices[3] = square[2];
// get distance between corner points for further operations
distanceP1P2 = distanceBetweenPoints( imageVertices[0], imageVertices[1] );
distanceP1P3 = distanceBetweenPoints( imageVertices[0], imageVertices[2] );
// calc paper, paperVertices; take orientation into account
if ( distanceP1P2 > distanceP1P3 ) {
scaleFactor = ceil( lroundf(distanceP1P2/paperHeight) ); // we always want to scale the image down to maintain the best quality possible
paperImage = cv::Mat( paperWidth*scaleFactor, paperHeight*scaleFactor, CV_8UC3 );
paperVertices[0] = cv::Point( 0, 0 );
paperVertices[1] = cv::Point( paperHeight*scaleFactor, 0 );
paperVertices[2] = cv::Point( 0, paperWidth*scaleFactor );
paperVertices[3] = cv::Point( paperHeight*scaleFactor, paperWidth*scaleFactor );
}
else {
isLandscape = false;
scaleFactor = ceil( lroundf(distanceP1P3/paperHeight) ); // we always want to scale the image down to maintain the best quality possible
paperImage = cv::Mat( paperHeight*scaleFactor, paperWidth*scaleFactor, CV_8UC3 );
paperVertices[0] = cv::Point( 0, 0 );
paperVertices[1] = cv::Point( paperWidth*scaleFactor, 0 );
paperVertices[2] = cv::Point( 0, paperHeight*scaleFactor );
paperVertices[3] = cv::Point( paperWidth*scaleFactor, paperHeight*scaleFactor );
}
cv::Mat warpMatrix = getPerspectiveTransform( imageVertices, paperVertices );
cv::warpPerspective(image, paperImage, warpMatrix, paperImage.size(), cv::INTER_LINEAR, cv::BORDER_CONSTANT );
if (true) {
cv::Rect rect = boundingRect(cv::Mat(square));
cv::rectangle(image, rect.tl(), rect.br(), cv::Scalar(0,255,0), 5, 8, 0);
UIImage *object = [self UIImageFromCVMat:paperImage];
}
// we want portrait output
if ( isLandscape ) {
cv::transpose(paperImage, paperImageCorrected);
cv::flip(paperImageCorrected, paperImageCorrected, 1);
return paperImageCorrected;
}
return paperImage;
}
EDITED: I used below method to get the color from image. But now my problem after converting my original image to cv::mat, when I am cropping there is already transparent grey color over my image. So always I am getting the same color.
Is there any direct way to get original color from cv::mat image?
- (UIColor *)averageColor: (UIImage *) image {
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
unsigned char rgba[4];
CGContextRef context = CGBitmapContextCreate(rgba, 1, 1, 8, 4, colorSpace, kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGContextDrawImage(context, CGRectMake(0, 0, 1, 1), image.CGImage);
CGColorSpaceRelease(colorSpace);
CGContextRelease(context);
if(rgba[3] > 0) {
CGFloat alpha = ((CGFloat)rgba[3])/255.0;
CGFloat multiplier = alpha/255.0;
return [UIColor colorWithRed:((CGFloat)rgba[0])*multiplier
green:((CGFloat)rgba[1])*multiplier
blue:((CGFloat)rgba[2])*multiplier
alpha:alpha];
}
else {
return [UIColor colorWithRed:((CGFloat)rgba[0])/255.0
green:((CGFloat)rgba[1])/255.0
blue:((CGFloat)rgba[2])/255.0
alpha:((CGFloat)rgba[3])/255.0];
}
}
EDIT 2 :
Input Image
Getting this output
Need to detect only A4 sheet of white color.
I just resolved it using Google Vision api.
My objective was to calculate the cracks for builder purpose from image so in my case User will be using A4 sheet as reference on the image where crack is, and I will capture the A4 sheet and calculate the size taken by each pixel. Then build will tap on two points in the crack, and I will calculate the distance.
In google vision I used document text detection api and printed my app name on A4 sheet fully covered vertically or horizontally. And google vision api detect that text and gives me the coordinate.
I have an image and 2 regions (focus region and de-focus region). I use Open CV, I want to detect near region.
I apply watershed in OpenCV or Canny detector to detect the object. But the object includes near and far region.
So, I need an idea or help from anyone help me apply OpenCV to detect near region image.
Code for show image, that I attached.
private Mat CalculateMapStrength(Mat inputMat){
Imgproc.cvtColor(inputMat,inputMat, Imgproc.COLOR_RGBA2GRAY);
//Compute dx and dy derivatives
Mat dx = new Mat();
Mat dy = new Mat();
Imgproc.Sobel(inputMat, dx, CV_32F, 1, 0);
Imgproc.Sobel(inputMat, dy, CV_32F, 0, 1);
Core.convertScaleAbs(dx,dx);
Core.convertScaleAbs(dy,dy);
Mat outputMat = new Mat();
Core.addWeighted(dx,0.5,dy,0.5,0,outputMat);
return outputMat;
}
Beside, I get Image segmentation by watershed algorithm in OpenCV. Can I compile 2 result for detect object? How to compile that?
public Mat steptowatershed(Mat img)
{
Mat threeChannel = new Mat();
Imgproc.cvtColor(img, threeChannel, Imgproc.COLOR_BGR2GRAY);
Imgproc.threshold(threeChannel, threeChannel, 100, 255, Imgproc.THRESH_BINARY);
Mat fg = new Mat(img.size(),CvType.CV_8U);
Imgproc.erode(threeChannel,fg,new Mat());
Mat bg = new Mat(img.size(),CvType.CV_8U);
Imgproc.dilate(threeChannel,bg,new Mat());
Imgproc.threshold(bg,bg,1, 128,Imgproc.THRESH_BINARY_INV);
Mat markers = new Mat(img.size(),CvType.CV_8U, new Scalar(0));
Core.add(fg, bg, markers);
Mat result1=new Mat();
WatershedSegmenter segmenter = new WatershedSegmenter();
segmenter.setMarkers(markers);
Imgproc.cvtColor(img, img, Imgproc.COLOR_RGBA2RGB);
result1 = segmenter.process(img);
return result1;
}
public class WatershedSegmenter
{
public Mat markers=new Mat();
public void setMarkers(Mat markerImage)
{
markerImage.convertTo(markers, CvType.CV_32SC1);
}
public Mat process(Mat image)
{
Imgproc.watershed(image,markers);
markers.convertTo(markers,CvType.CV_8U);
return markers;
}
}
I am having some issues with detecting specific "blobs" in a set of images. Not all images are the same, but I suppose the same parameters would be used to detect anyways.
If you zoom in, you will see small, yellow aphids on the leaf. My goal is to single these out and count them. I don't really need to do much to the image, just obtain a count of them.
Right now, I have this:
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using Emgu.CV;
using Emgu.CV.Features2D;
using Emgu.CV.Structure;
using Emgu.CV.Util;
namespace AphidCounter
{
class Program
{
static void Main(string[] args)
{
// Read image
Mat im_in = CvInvoke.Imread("myimage1.jpg", Emgu.CV.CvEnum.LoadImageType.Grayscale);
//Mat im_in = CvInvoke.Imread("myimage2.png", Emgu.CV.CvEnum.LoadImageType.Color);
Mat im = im_in;
CvInvoke.Threshold(im_in, im, 40, 255, Emgu.CV.CvEnum.ThresholdType.BinaryInv); // 60, 255, 1
//CvInvoke.NamedWindow("Blob Detector", Emgu.CV.CvEnum.NamedWindowType.AutoSize);
DetectBlobs(im, 0);
CvInvoke.WaitKey(0);
}
static void DetectBlobs(Mat im, int c)
{
int maxT = 50;
int minA = 125; // Minimum area in pixels
int maxA = 550; // Maximum area in pixels
SimpleBlobDetectorParams EMparams = new SimpleBlobDetectorParams();
SimpleBlobDetector detector;
EMparams.MinThreshold = 0;
EMparams.MaxThreshold = 100;
if (minA < 1) minA = 1;
EMparams.FilterByArea = true;
EMparams.MinArea = minA;
EMparams.MaxArea = maxA;
if (maxT < 1) maxT = 1;
EMparams.MinConvexity = (float)maxT / 1000.0F; // 0.67
EMparams.FilterByInertia = true;
EMparams.MinInertiaRatio = 0.01F;
EMparams.FilterByColor = true;
EMparams.blobColor = 0;
VectorOfKeyPoint keyPoints = new VectorOfKeyPoint();
detector = new SimpleBlobDetector(EMparams);
detector.DetectRaw(im, keyPoints);
Mat im_with_keypoints = new Mat();
Bgr color = new Bgr(0, 0, 255);
Features2DToolbox.DrawKeypoints(im, keyPoints, im_with_keypoints, color, Features2DToolbox.KeypointDrawType.DrawRichKeypoints);
// Show blobs
CvInvoke.Imwrite("keypoints1.jpg", im_with_keypoints);
CvInvoke.Imshow("Blob Detector " + keyPoints.Size, im_with_keypoints);
System.Console.WriteLine("Number of keypoints: " + keyPoints.Size);
}
}
}
However, this is the result:
Am I not getting the parameters right? Or is there something else that I'm missing?
It is not because of some wrong parameters. The image segmentation part itself has its limitation.
Grayscale based thresholding may not work when the contrast between the blob and the background is very low. Yet a threshold value around 160 is quite tolerable in this example but not any accurate.
I would suggest to go for colour based thresholding since there is a decent colour gap.
Here is a C++ implementation of colour based thresholding. Blobs are filtered using the same SimpleBlobDetector.
I have converted the image from RGB to ‘Lab’ for better segmentation.
As the image provided is too huge, it took more time to process. So I cropped a key part of the image and tuned the blob params for the same. So I provide the cropped image too (755 x 494px).
Colour based thresholding and blob filtering:
#include "opencv2\imgproc\imgproc.hpp";
#include "opencv2\highgui\highgui.hpp";
#include "opencv2\features2d\features2d.hpp";
using namespace cv;
using namespace std;
void main()
{
char image_path[] = "E:/Coding/media/images/leaf_small.jpg";
Mat img_color, img_lab, img_thresh, img_open, img_close, img_keypoints;
img_color = imread(image_path, IMREAD_ANYCOLOR);
//Convert image to CIE Lab colorspace for better colour based segmentation
cvtColor(img_color, img_lab, CV_BGR2Lab);
//create window before creating trackbar
namedWindow("win_thresh", WINDOW_NORMAL);
namedWindow("win_blob", WINDOW_NORMAL);
//Using trackbar calculate the range of L,a,b values to seperate blobs
int low_L = 150, low_A = 0, low_B = 155,
high_L = 255, high_A = 255, high_B = 255;
//*Use trackbars to caliberate colour thresholding
createTrackbar("low_L", "win_thresh", &low_L, 255);
createTrackbar("low_A", "win_thresh", &low_A, 255);
createTrackbar("low_B", "win_thresh", &low_B, 255);
createTrackbar("high_L", "win_thresh", &high_L, 255);
createTrackbar("high_A", "win_thresh", &high_A, 255);
createTrackbar("high_B", "win_thresh", &high_B, 255);
int minArea = 35, maxArea = 172, minCircularity = 58, minConvexity = 87, minInertiaRatio = 21;
//Use trackbar and set Blob detector parameters
createTrackbar("minArea", "win_blob", &minArea, 200);
createTrackbar("maxArea", "win_blob", &maxArea, 200);
createTrackbar("minCircular", "win_blob", &minCircularity, 99);
createTrackbar("minConvex", "win_blob", &minConvexity, 99);
createTrackbar("minInertia", "win_blob", &minInertiaRatio, 99);
SimpleBlobDetector::Params params;
vector<KeyPoint> keypoints;
while (waitKey(1) != 27) //press 'esc' to quit
{
//inRange thresholds basedon the Scalar boundaries provided
inRange(img_lab, Scalar(low_L, low_A, low_B), Scalar(high_L, high_A, high_B), img_thresh);
//Morphological filling
Mat strucElement = getStructuringElement(CV_SHAPE_ELLIPSE, Size(5, 5), Point(2, 2));
morphologyEx(img_thresh, img_close, MORPH_CLOSE, strucElement);
imshow("win_thresh", img_close);
//**SimpleBlobDetector works only in inverted binary images
//i.e.blobs should be in black and background in white.
bitwise_not(img_close, img_close); // inverts matrix
//Code crashes if minArea or any miin value is set to zero
//since trackbar starts from 0, it is adjusted here by adding 1
params.filterByArea = true;
params.minArea = minArea + 1;
params.maxArea = maxArea + 1;
params.filterByCircularity = true;
params.filterByConvexity = true;
params.filterByInertia = true;
params.minCircularity = (minCircularity + 1) / 100.0;
params.minConvexity = (minConvexity + 1) / 100.0;
params.minInertiaRatio = (minInertiaRatio + 1) / 100.0;
SimpleBlobDetector detector(params);
detector.detect(img_close, keypoints);
drawKeypoints(img_color, keypoints, img_keypoints, Scalar(0, 0, 255), DrawMatchesFlags::DEFAULT);
stringstream displayText;
displayText = stringstream();
displayText << "Blob_count: " << keypoints.size();
putText(img_keypoints, displayText.str(), Point(0, 50), CV_FONT_HERSHEY_PLAIN, 2, Scalar(0, 0, 255), 2);
imshow("win_blob", img_keypoints);
}
return;
}
Output Screenshot
Tune the blob parameters according to the actual HD image.
Since the veins of the leaf are almost of the same colour and intensity of the aphid, this method also may utterly fail when an aphid sits close to or exactly on top of a vein.
This can be an ad-hoc fix but not robust enough.
There got to be a simple and robust method to achieve the result, using some filters, transformation or edge detection. Please share any other optimal solution if available.
EDIT: Opting Grayscale thresholding as previous approach failed
Colour thresholding approach failed for this_image
Colour based thresholding has a very narrow bandwidth, if the image falls within the bandwidth the accuracy will be really good, on the other hand colour shifts totally ruin the accuracy.
Since you will be processing 100s of images, colour thresholding may not be suitable.
I tried normal Grayscale thresholding with some morphological erosion and filling, and got a decent accuracy. Also Grayscale thresholding has better immunity to colour shifts.
Additionally we have auto thrsholding option using OTSU Thresholding which selects the threshold value based on the image.
Code snippet:
threshold(img_gray, img_thresh, 0, 255, THRESH_OTSU);
Mat strucElement = getStructuringElement(CV_SHAPE_ELLIPSE, Size(3, 3), Point(1, 1));
morphologyEx(img_thresh, img_open, MORPH_OPEN, strucElement);
Rest of the code remains the same.
Parameter values:
minArea = 75, maxArea = 1000, minCircularity = 50, minConvexity = 20, minInertiaRatio = 15
The white ants are hard to differentiate from aphids as we are not using colour information. So the min_area has to be carefully tuned in order to exclude them.
Processed images can be found here img_1, img_2.
Tweak the morphology methods and blob parameters to obtain an optimal average count.
I want to rotate following image by 20 degree at center.
I can achieve this in OpenCV by two different ways:
1. Perspective Transformation
2. Affine Transformation
public void perspectiveXformation(String imgPath, List<Point> sourceCorners,
List<Point> targetCorners) {
// Load image in gray-scale format
Mat matIncomingImg = Highgui.imread(imgPath, 0);
// Check if size of list, process only if there are four points in list.
if (sourceCorners.size() == 4) {
// Convert vector points into Mat type of object.
Mat sourceCornersMat =
Converters.vector_Point2f_to_Mat(sourceCorners);
Mat targetCornersMat =
Converters.vector_Point2f_to_Mat(targetCorners);
Mat matResultant = new Mat();
// Do the Perspective transformation
Mat matPtransform =
Imgproc.getPerspectiveTransform(sourceCornersMat,
targetCornersMat);
Imgproc.warpPerspective(matIncomingImg, matResultant,
matPtransform,
new Size(targetCorners.get(2).x, targetCorners.get(2).y));
Highgui.imwrite("/tmp/perspectiveXform.png", matResultant);
}
}
public void afflineXformation(String imgPath, Point center) {
Mat selectedMat = Highgui.imread(imgPath, 0);
Mat res = Imgproc.getRotationMatrix2D(center, 20, 1.0);
Mat newMat = new Mat();
Imgproc.warpAffine(selectedMat, newMat, res, selectedMat.size());
Highgui.imwrite("/tmp/afflineXform.png", newMat);
}
Which is the preferred way of rotating image ?
For my project I am using parts of the next code: link.
To track objects of a specific color I implemented this method:
My question is: How can I calculate the distance to the tracked colored objects?
Thank you in advance!
*The application calls the method for the left and right frame. This is not efficient...
**I need to calculate detectedObject.Zcor
DetectedObject Detect(IplImage *frame)
{
//Track object (left frame and right frame)
//Calculate average position
//Show X,Y,Z coordinate and detected color
color_image = frame;
imgThreshold = cvCreateImage(cvSize(color_image->width,color_image->height), IPL_DEPTH_8U, 1);
cvInitFont(&font, CV_FONT_HERSHEY_PLAIN, 1, 1, 0, 1.4f, CV_AA);
imgdraw = cvCreateImage(cvGetSize(color_image),8,3);
cvSetZero(imgdraw);
cvFlip(color_image, color_image, 1);
cvSmooth(color_image, color_image, CV_GAUSSIAN, 3, 0);
threshold = getThreshold(color_image);
cvErode(threshold, threshold, NULL, 3);
cvDilate(threshold, threshold, NULL, 10);
imgThreshold = cvCloneImage(threshold);
storage = cvCreateMemStorage(0);
contours = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvPoint), storage);
cvFindContours(threshold, storage, &contours, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_NONE, cvPoint(0,0));
final = cvCreateImage(cvGetSize(color_image),8,3);
for(; contours!=0; contours = contours->h_next)
{
CvRect rect = cvBoundingRect(contours, 0);
cvRectangle(color_image,
cvPoint(rect.x, rect.y),
cvPoint(rect.x+rect.width, rect.y+rect.height),
cvScalar(0,0,255,0),
2,8,0);
string s = to_string(rect.x) + "," + to_string(rect.y);
char const* pchar = s.c_str();
cvPutText(frame, pchar, cvPoint(rect.x, rect.y), &font, cvScalar(0,0,255,0));
detectedObject.Xcor = rect.x;
detectedObject.Ycor = rect.y;
}
cvShowImage("Threshold", imgThreshold);
cvAdd(final,imgdraw,final);
detectedObject.Zcor = 0;
return detectedObject;
}
For depth estimation you will need a calibrated stereo pair (known camera matrices for both the left and the right cameras). Then, using the camera matrices and corresponding points/contours in the stereo pair, you can compute depth.