How can I filter rectangle areas in an OpenCv image by color? - opencv

With the following code i detect all squares in a gray scale image:
for (;contours != null; contours = contours.HNext)
{
var approxContour = contours.ApproxPoly(contours.Perimeter * 0.05,
contours.Storage);
var rect = approxContour.GetMinAreaRect();
if (IsSquare(rect, rect.size.Height * 0.1f))
boxes.Add(rect);
}
I'm looking for a way to filter the squares based by their color. E.g. I want to remove all squares with an average gray value less then 128.
Which OpenCv function do I have to use?

You have to use ROIs and GetAverage():
var rect = approxContour.GetMinAreaRect();
gray.ROI = approxContour.BoundingRectangle;
var average = gray.GetAverage();
gray.ROI = Rectangle.Empty;
if (average.Intensity > 100)
{
continue;
}

Related

Core image filter with custom metal kernel doesn't work

I've made a custom CIFilter based on a custom kernel, I can't make it work the output image is filled with black and I can't understand why.
Here is the shader:
// MARK: Custom kernels
float4 eight_bit(sampler image, sampler palette_image, float paletteSize) {
float4 color = image.sample(image.coord());
float dist = distance(color, palette_image.sample(float2(0,0)));
float4 returnColor = palette_image.sample(float2(0,0));
for (int i = 1; i < floor(paletteSize); ++i) {
float tempDist = distance(color, palette_image.sample(float2(i,0)));
if (tempDist < dist) {
dist = tempDist;
returnColor = palette_image.sample(float2(i,0));
}
}
return returnColor;
}
The first sampler is the image that needs to be elaborated the second image is and image that contains the colors of a specific palette that must be used in that image.
The palette image is create from an array of RGBA values, passed to a Data buffer an created by using this CIImage initializer init(bitmapData data: Data, bytesPerRow: Int, size: CGSize, format: CIFormat, colorSpace: CGColorSpace?). The image is 1px in height and number of color wide. The image is obtained correctly and it looks like that:
Trying to inspect the shader I've found:
If I return color I get the original image, thus means that the sampler image is passed correctly
If I try to return a color from any pixel in palette_image the resulting image from the filter is black
I'm starting to think that the palette_image is somehow not passed correctly. Here how the image is passed through the filter:
override var outputImage: CIImage? {
guard let inputImage = inputImage else
{
return nil
}
let palette = EightBitColorFilter.palettes[Int(0)]
let paletteImage = EightBitColorFilter.image(from: palette)
let extent = inputImage.extent
let pixellateImage = inputImage.applyingFilter("CIPixellate", parameters: [kCIInputScaleKey: inputScale])
// let sampler = CISampler(image: paletteImage)
let arguments = [pixellateImage, paletteImage, Float(palette.count)] as [Any]
let final = kernel.apply(extent: extent, roiCallback: {
(index, rect) in
return rect
}, arguments: arguments)
return final
}
Your sampling coordinates are off.
Samplers use relative coordinates in Core Image, i.e. (0,0) corresponds to the upper left corner, (1,1) the lower right corner of the whole input image.
So try something like this:
float4 eight_bit(sampler image, sampler palette_image, float paletteSize) {
float4 color = image.sample(image.coord());
// initial offset to land in the middle of the first pixel
float2 firstPaletteCoord = float2(1.0 / (2.0 * palletSize), 0.5);
float dist = distance(color, palette_image.sample(firstPaletteCoord));
float4 returnColor = palette_image.sample(firstPaletteCoord);
for (int i = 1; i < floor(paletteSize); ++i) {
// step one pixel further
float2 paletteCoord = firstPaletteCoord + float2(1.0 / paletteSize, 0.0);
float4 paletteColor = palette_image.sample(paletteCoord);
float tempDist = distance(color, paletteColor);
if (tempDist < dist) {
dist = tempDist;
returnColor = paletteColor;
}
}
return returnColor;
}

Why Circle detection detects many circles than its there?

I am using following functions to detect images. However, it detects thousands of circles instead of 16. How can I make sure it only detects what I see? Changing Radius or Relative intensity do not make any difference.
The images I used is this :
Bitmap ImageBitmap = (Bitmap)pictureBox1.Image;
var filter = new FiltersSequence(new IFilter[]
{
Grayscale.CommonAlgorithms.BT709,
new Threshold(0x40)
});
var binaryImage = filter.Apply(ImageBitmap);
// for (int i = 0; i < 10000; i++)
{
// System.Drawing.Image image = System.Drawing.Image.FromFile(imagePath);
// GrayBMP_File.CreateGrayBitmapFile(image, "c:/path/to/8bpp/image.bmp");
// Bitmap ImageBitmap = Convert.Gra ImageBitmap.Con
HoughCircleTransformation circleTransform = new HoughCircleTransformation(50);
// apply Hough circle transform
circleTransform.ProcessImage(binaryImage);
Bitmap houghCirlceImage = circleTransform.ToBitmap();
// get circles using relative intensity
HoughCircle[] circles = circleTransform.GetCirclesByRelativeIntensity(0.9);
int numCircles = circleTransform.CirclesCount;
label1.Text = numCircles.ToString();
pictureBox1.Image = houghCirlceImage;
System.Drawing.Graphics g = System.Drawing.Graphics.FromImage(ImageBitmap);
foreach (HoughCircle circle in circles)
{
g.DrawEllipse(Pens.Green, circle.X, circle.Y, 10,10);
}
pictureBox1.Image = ImageBitmap;
// ImageBitmap.Dispose();
// binaryImage.Dispose();
}
Try this python solution from here:
import cv2
import numpy as np
img = cv2.imread('test.jpg',0)
cimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)
circles = cv2.HoughCircles(img,cv2.HOUGH_GRADIENT,1,20,
param1=50,param2=30,minRadius=0,maxRadius=0)
circles = np.uint16(np.around(circles))
d=1
for i in circles[0,:]:
# draw the outer circle
cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
crop_img=img[i[0]-i[2]-2:i[0]+i[2]+2,i[1]-i[2]-2:i[1]+i[2]+2]
cv2.imshow('cropped circle',crop_img)
cv2.imwrite('test_%d.png'%d,crop_img)
cv2.waitKey(0)
d+=1
cv2.imshow('detected circles',cimg)
print(len(circles[0,:]))
cv2.waitKey(0)
cv2.destroyAllWindows()
OUTPUT:
16

Drawing a confidence ellipse on top of a scatter plot

I'm currently working on an iOS app where I'm using the CorePlot library (Version 2.1) to draw a scatter plot. My scatter plot draws fine, and in the next step I'd like to draw an translucent confidence ellipse on top of the plot. I've written a class computing the main and minor axis and the required rotation angle of my ellipse. My ConfidenceEllipse class implements a getPath() method which returns a CGPath representing the ellipse to draw.
func getPath() -> CGPath
{
var ellipse: CGPath
var transform = CGAffineTransformIdentity
transform = CGAffineTransformTranslate (transform, CGFloat(-self.meanX), CGFloat(-self.meanY))
transform = CGAffineTransformRotate (transform, CGFloat(self.angle))
transform = CGAffineTransformTranslate (transform, CGFloat(self.meanX), CGFloat(self.meanY))
ellipse = CGPathCreateWithEllipseInRect(CGRectMake (CGFloat(-self.mainHalfAxis), CGFloat(-self.minorHalfAxis), CGFloat(2 * self.mainHalfAxis), CGFloat(2 * self.minorHalfAxis)),&transform)
return ellipse
}
After searching the web for a while, it appears that Annotations are the way to go, so I tried this:
let graph = hostView.hostedGraph!
let space = graph.defaultPlotSpace
let ellipse = ConfidenceEllipse(chiSquare: 5.991)
ellipse.analyze(self.samples)
let annotation = CPTPlotSpaceAnnotation (plotSpace: space!, anchorPlotPoint: [0,0])
let overlay = CPTBorderedLayer (frame: graph.frame)
overlay.outerBorderPath = ellipse.getPath()
let fillColor = CPTColor.yellowColor()
overlay.fill = CPTFill (color: fillColor)
annotation.contentLayer = overlay
annotation.contentLayer?.opacity = 0.5
graph.addAnnotation(annotation)
Doing this, will give me the following
Screenshot
As you can see, the overlay takes up the full size of the frame, which seems logical given the fact that I passed the frames dimensions when creating the CPTBorderedLayer object. I also tried leaving the constructor empty, but then the overlay doesn't show at all. So I'm wondering, is there anything I'm missing here ?
You need to scale the ellipse to match the plot. Use the plot area bounds for the frame of the annotation layer and attach the annotation to the plot area. Scale the ellipse in the x- and y-directions to match the transform used by the plot space to fit plots in the plot area.
Edit:
After looking into how bordered layers work, I realized my suggestion above won't work. CPTBorderedLayer sets the outerBorderPath automatically whenever the layer bounds change. Instead of trying to affect the layer border, draw the ellipse into an image and use that as the fill for the bordered layer. You should size the layer so the ellipse just fits inside.
After failing to get the Annotations to work properly, I decided to take a different road. My final solution consists in overlaying my original scatter plot with a second one, which only contains one datapoint, namely the center of my confidence ellipse. Here's the code
func drawConfidenceEllipse () {
let graph = hostView.hostedGraph!
let plotSpace = graph.defaultPlotSpace as! CPTXYPlotSpace
let scaleX = (graph.bounds.size.width - graph.paddingLeft - graph.paddingRight) / CGFloat(plotSpace.xRange.lengthDouble)
let scaleY = (graph.bounds.size.height - graph.paddingTop - graph.paddingBottom) / CGFloat(plotSpace.yRange.lengthDouble)
let analysis = ConfidenceEllipse(chiSquare: 5.991)
analysis.analyze(self.samples)
let unscaledPath = analysis.getPath()
let bounds = CGPathGetBoundingBox(unscaledPath)
var scaler = CGAffineTransformIdentity
scaler = CGAffineTransformScale (scaler, scaleX, scaleY)
scaler = CGAffineTransformTranslate (scaler, CGFloat (-bounds.origin.x), CGFloat (-bounds.origin.y))
let scaledPath = CGPathCreateCopyByTransformingPath (unscaledPath, &scaler)
let scaledBounds = CGPathGetPathBoundingBox(scaledPath)
let symbol = CPTPlotSymbol ()
symbol.symbolType = CPTPlotSymbolType.Custom
symbol.customSymbolPath = scaledPath
symbol.fill = CPTFill (color: CPTColor.yellowColor().colorWithAlphaComponent(0.25))
symbol.size = CGSize (width: scaledBounds.size.width, height: scaledBounds.size.height)
let lineStyle = CPTMutableLineStyle()
lineStyle.lineWidth = 1
lineStyle.lineColor = CPTColor.yellowColor()
symbol.lineStyle = lineStyle
let ellipse = CPTScatterPlot (frame: hostView.frame)
ellipse.title = "Confidence Ellipse"
ellipse.delegate = self
ellipse.dataSource = self
ellipse.plotSymbol = symbol
ellipse.dataLineStyle = nil
graph.addPlot(ellipse)
}
Here's a screenshot of the final result:
95% Confidence Ellipse on top of scatter plot
Hope this helps

Eliminating blob inside another blob

I'm currently working on a program for character recognition using C# and AForge.NET and now I'm struggling with the processing of blobs.
This is how I created the blobs:
BlobCounter bcb = new BlobCounter();
bcb.FilterBlobs = true;
bcb.MinHeight = 30;
bcb.MinWidth = 5;
bcb.ObjectsOrder = ObjectsOrder.XY;
bcb.ProcessImage(image);
I also marked them with rectangles:
Rectangle[] rects;
rects = bcb.GetObjectsRectangles();
Pen pen = new Pen(Color.Red, 1);
Graphics g = Graphics.FromImage(image);
foreach (Rectangle rect in rects)
{
g.DrawRectangle(pen, rect);
}
After execution my reference image looks like this:
BlobImage
As you can see, almost all characters are recognized. Unfortunately, some character include blobs inside a blob, e.g. "g", "o" or "d".
I would like to eliminate the blobs which are inside another blob.
I tried to adjust the drawing of the rectangles to achieve my objective:
foreach (Rectangle rect in rects)
{
for (int i = 0; i < (rects.Length - 1); i++)
{
if (rects[i].Contains(rects[i + 1]))
rects[i] = Rectangle.Union(rects[i], rects[i + 1]);
}
g.DrawRectangle(pen, rect);
}
...but it wasn't successful at all.
Maybe some of you can help me?
you can try to detect rectangles within rectangles by check their corner indices,
I have one MATLAB code for this which i have written for similar kind of problem:
Here is snippet of the code is:
function varargout = isBoxMerg(ReferenceBox,TestBox,isNewBox)
X = ReferenceBox; Y = TestBox;
X1 = X(1);Y1 = X(2);W1 = X(3);H1 = X(4);
X2 = Y(1);Y2 = Y(2);W2 = Y(3);H2 = Y(4);
if ((X1+W1)>=X2 && (Y2+H2)>=Y1 && (Y1+H1)>=Y2 && (X1+W1)>=X2 && (X2+W2)>=X1)
Intersection = true;
else
`Intersection = false;`
end
Where X and Y are upper left corner indices of the bounding rectangle; W and H are width and height respectively.
in above if Intersection variable becomes true that means the boxes having intersection. you can use this code for further customization.
Thank You

character image thinning

im doing an ocr application . im confusing that how to do skew an image like this :
Second ,i have a character image with many font size. the problem is : how to thin them to the same size like this
For your first point: find the angle by which the text is rotated, and rotate your image by that angle. In your sample you can do this by finding the angles of the lines between the large black patches on the edges and the white areas. Look into edge detection and hough transform to help you find the lines, and then help you find their angle. OpenCV has a good implementation of both algorithms.
For your second point: that is the morphological operation binary skeleton in action.
you can use the following code for detecting and correcting skew but i need your help if you get any thinning algorithms...asume the input image is on the picture box....
try
{
//Check if there exists an image on the picture box
if (pictureBox1.Image == null)
{
MessageBox.Show("Please load an image first.", "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
uploadImageToolStripMenuItem.PerformClick();
return;
}
Bitmap image = new Bitmap(pictureBox1.Image);
BitmapData imageData = image.LockBits(new Rectangle(0, 0, image.Width, image.Height),
ImageLockMode.ReadWrite, PixelFormat.Format8bppIndexed);
//document image skew detection starts here
DocumentSkewChecker skewChecker = new DocumentSkewChecker();
// get documents skew angle
double angle = skewChecker.GetSkewAngle(imageData);
// create rotation filter and rotate image applying the filter
RotateBilinear rotationFilter = new RotateBilinear(-angle);
rotationFilter.FillColor = Color.White;
image.UnlockBits(imageData);
//if the angle is more 90 or 180, consider it as a normal image or if it is not, perform a skew correction
if (-angle == 90 || -angle == 180)
{
pictureBox1.Image = image;
pictureBox1.SizeMode = PictureBoxSizeMode.Zoom;
return;
}
//Bitmap rotatedImage = rotationFilter.Apply();
//draw a bitmap based on the skew angle...
Bitmap returnBitmap = new Bitmap(image.Width, image.Height);
Graphics g = Graphics.FromImage(returnBitmap);
g.TranslateTransform((float)image.Width / 2, (float)image.Height / 2);
g.RotateTransform(((float)angle));
g.TranslateTransform(-(float)image.Width / 2, -(float)image.Height / 2);
g.DrawImage(image, new Point(0, 0));
pictureBox1.Image = returnBitmap;
pictureBox1.SizeMode = PictureBoxSizeMode.Zoom;
}
catch (Exception ex)
{
MessageBox.Show(ex.Message);
}

Resources