Finding centroid of circle in opencv - opencv

I am trying to find centroid of circular objects OR a circle that can bounding around circular objects in a grayscale image.
So far what I have done is turn that grayscale image to binary image using adaptive thresholding.
Grayscale image
Threshold image
Up till now, i have used hough transform and Findcontour. None of these method work.
What should be an approach to this?

I got a decent result using the Hough transform for circles. This is the pipeline:
img = cv2.imread('I7Ykpbs.jpg', 0)
img = cv2.GaussianBlur(img, (5, 5), 2, 2)
img_th = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 9, 3)
circles = cv2.HoughCircles(img_th, cv2.HOUGH_GRADIENT, 2, minDist=30,
param1=200, param2=40, minRadius=10, maxRadius=20)
for i in range(circles.shape[1]):
c = circles[0,i,:]
center = (np.round(c[0]), np.round(c[1]))
radius = np.round(c[2])
# print(center)
# print(radius)
if np.linalg.norm(np.array([600., 600.])-center) < 500.:
cv2.circle(img, center, 3, (0,255,0), -1, 8, 0)
cv2.circle(img, center, radius, (0,0,255), 3, 8, 0)
plt.imshow(img)
plt.show()
It's not perfect but I think you can start from here and do some finetuning on parameters and preprocessing to optimize the result.

Related

OpenCV - Computing distance between two edges in an image

I am trying to compute distance (in # of pixels) between two edges in an image. I have corrected for image perspective using cv2.warpPerspective method and have converted the resulting image into grayscale followed by filtering using gaussian blur. I have tried various thresholding methods and found out that cv2.ADAPTIVE_THRESH_GAUSSIAN works best. Other methods are too noisy or miss the second edge in the left side of the object as seen in result of adaptive gaussian thresholding.
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Load the image
imgRoadvR10 = cv2.imread('sampleimage.jpg') # image is already corrected for perspective warp using cv2.warpPerspective
# convert to grayscale
imgRoadvR10_GrayPersp = cv2.cvtColor(imgRoadvR10, cv2.COLOR_BGR2GRAY)
# gaussian blur
a10lvR10_gblur = cv2.GaussianBlur(imgRoadvR10_GrayPersp,(5,5),0)
# Try different thresholding methods
ret,a10lvR10_th1 = cv2.threshold(a10lvR10_gblur,127,255,cv2.THRESH_BINARY)
a10lvR10_th2 = cv2.adaptiveThreshold(a10lvR10_gblur,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
cv2.THRESH_BINARY,11,2)
a10lvR10_th3 = cv2.adaptiveThreshold(a10lvR10_gblur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY_INV,11,2)
# Otsu's thresholding
ret2,a10lvR10_th4 = cv2.threshold(a10lvR10_gblur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
print(ret2)
# Plot results
plt.figure()
titles = ['Original Image', 'Global Thresholding (v = 127)',
'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding','OTSU Thresholding']
images = [a10lvR10_gblur, a10lvR10_th1, a10lvR10_th2, a10lvR10_th3, a10lvR10_th4]
for i in range(5):
plt.subplot(2,3,i+1),plt.imshow(images[i],'gray')
plt.title(titles[i])
plt.xticks([]),plt.yticks([])
plt.show()
Closer look at result of adaptive gaussian thresholding:
I want to find the width of this rectangular object. The width is measured from the second edge on the left side to the edge on the right side (see image below):
How can I measure the width? I have been reading upon morphological operations and edge detection, But not sure how to proceed next. Any suggestions will be appreciated
This is not the best idea and I think a more logical and simple solution can be obtained. However, this idea may help you.
import cv2
import numpy as np
#load image
im = cv2.imread("test3.jpg", 1)
#Convert to gray
mask = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
#convert to black and white
mask = cv2.threshold(mask, 127, 255, cv2.THRESH_BINARY)[1]
#try to remove noise
#you can just use median blur or any other method
mask = cv2.erode(mask, np.ones((8, 0), "uint8"))
mask = cv2.dilate(mask, np.ones((32, 0), "uint8"))
mask = cv2.medianBlur(mask, 9)
#save cleaned image
cv2.imwrite("out1.jpg", mask)
A cleaner version of your output image:
out1:
Next we can get the coordinates of the lines. I got the coordinates of the first line from the left. I think you have to change the code a bit to get the coordinates of the sidebar.
h = len(mask) - 1
def count(row):
counter = 0
for i in range(0, len(row)):
if row[i] == 255:
break
counter += 1
return counter
def line(im, pt1, pt2, color, thickness):
im = cv2.line(
img=im,
pt1=pt1,
pt2=pt2,
color=color,
thickness=thickness,
lineType=cv2.LINE_AA,
)
return im
def center(x1, y1, x2, y2):
return (int((x1 + x2) / 2), int((y1 + y2) / 2))
topLeft = count(mask[0])
bottomLeft = count(mask[h])
# to shadow and hide the old left line
mask = line(mask, (topLeft, 0), (bottomLeft, h), (0, 0, 0), 80)
topRight = count(mask[0])
bottomRight = count(mask[h])
# to shadow and hide the old right line
mask = line(mask, (topRight, 0), (bottomRight, h), (0, 0, 0), 80)
mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
# to draw new clean left line
mask = line(mask, (topLeft, 0), (bottomLeft, h), (128, 0, 255), 25)
# to draw new clean right line
mask = line(mask, (topRight, 0), (bottomRight, h), (128, 0, 255), 25)
a = center(topLeft, 0, bottomLeft, h)
b = center(topRight, 0, bottomRight, h)
mask = line(mask, a, b, (128, 0, 255), 25)
cv2.imwrite("out2.jpg", mask)
out2:
Now you can calculate the distance between "a" and "b".

opencv findContours, contourArea value not consistant

I have a test image (see the 1st image below), and a very simple code to blur and make canny edge detection of this image, then use findcontours to get contours.
image = cv2.imread("testimage.jpg")
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(image, (11,11), 0)
cv2.imshow("Blurred", blurred)
canny = cv2.Canny(blurred, 50, 130)
cv2.imshow("Canny", canny)
(_, conts, _) = cv2.findContours(canny.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
img = image.copy()
for c in conts:
M = cv2.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
cv2.drawContours(img, [c], -1, (0,255,0), 1)
cv2.putText(img, "area:"+str(cv2.contourArea(c)), (cX-20,cY-20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
cv2.imshow("Contours", img)
cv2.waitKey(0)
I put the value of each contour on the image. As you may see, the contours look similar, but there is one contour with area value extremely low (only 16.0).
What might be the reason for this? And how to get consistent values among these contours?

Image Processing:How to extract cementing matrix between different circles using opencv

A CT scanning image, in which the circles is connected by cement matrix. I want to extract the volume of cement matrix. However, I cannot find perfect threshold value to divide circles and cement matrix by using Watershed algorithm.
I also have tried to use OpenCV HoughCircles and findContours to detect circles. But the result is perfect enough. Perhaps I am just not familiar enough with the OpenCV. Attached is my image that I need to extract cementing matrix between different circles. You should be able to see it clearly with your eyes. However, none of the circle detection algorithms seem to work.
Note I even looked at and tried the solution here so it is not a duplicate of that question: Opencv divide contacted circles into single. enter link description here
And another solution:OpenCV detect partial circle with noise enter link description here
This is my source image that I need to use.
Original Image:
HoughCircles Image:
The code:
enter code here
import cv2
import numpy as np
def houghdetect(image,img):
circles = cv2.HoughCircles(image, cv2.HOUGH_GRADIENT, 1, 50, param1 = 20, param2 = 27, minRadius = 25, maxRadius = 50)
circles = np.uint16(np.around(circles))
for i in circles[0, 1:]:
cv2.circle(img, (i[0], i[1]), i[2], (0, 0, 255), 2)
cv2.namedWindow('detect_circle', 0)
cv2.resizeWindow('detect_circle', 699, 575)
cv2.imshow('detect_circle', img)
img = cv2.imread('C:\THU\python\learn\outputtif\\5.jpg')
dst = cv2.GaussianBlur(img, (3,3), 0)
gray = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)
ret, threshold = cv2.threshold(gray, 135, 255, cv2.THRESH_TOZERO)
Gthreshold = cv2.GaussianBlur(threshold, (5,5), 0)
cv2.namedWindow('Gthreshold', 0)
cv2.resizeWindow('Gthreshold', 699, 575)
cv2.imshow("Gthreshold", Gthreshold)
houghdetect(Gthreshold, img)
cv2.waitKey()
cv2.destroyAllWindows()

OpenCV cv2.HoughCircles detection giving bad results on easy picture

I am new to opencv and I'm trying to do circle detection using HoughCircles, but it's giving me circles where there are none, and it's not detecting the huge obvious circle that I want it to. I tried changing the parameters but can't get it to work well. What am I doing wrong?
Original Image:
Image After Thresholding:
Canny Filtered with Circles:
path=r"minimap.png"
screen = cv2.imread(path,cv2.IMREAD_GRAYSCALE)
cv2.imshow('Original', screen)
ret,screen = cv2.threshold(screen,200,255,cv2.THRESH_BINARY)
cv2.imshow('Thresholded', screen)
P=50
can = cv2.Canny(screen,P/2,P)
cv2.imshow('Canny', can)
if 1:
circles = cv2.HoughCircles(screen, cv2.HOUGH_GRADIENT, dp=1, minDist=50, param1=P, param2=53, minRadius=0, maxRadius=0)
print(circles)
circles = np.uint16(np.around(circles))
can=cv2.cvtColor(can,cv2.COLOR_GRAY2RGB)
for i in circles[0,:]:
# draw the outer circle
cv2.circle(can, (i[0], i[1]), i[2], (0, 255, 0), 3)
# draw the center of the circle
cv2.circle(can, (i[0], i[1]), 2, (0, 0, 255), 5)
cv2.imshow('Circles', can)
cv2.waitKey()
You've to play with the cv2.HoughCircles parameters and I don't think the same values will give good results for all images. For your image:
image = cv2.resize(image, (0,0), fx=0.5, fy=0.5)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7,7), sigmaX=-1, sigmaY=-1)
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, rows / 2,
param1=130, param2=100,
minRadius=0, maxRadius=0)

How can I filter out points of an edge-detected circle that are extremely noisy?

I am working on detecting the center and radius of a circular aperture that is illuminated by a laser beam. The algorithm will be fed images from a system that I have no physical control over (i.e. dimming the source or adjusting the laser position.) I need to do this with C++, and have chosen to use openCV.
In some regions the edge of the aperture is well defined, but in others it is very noisy. I currently am trying to isolate the "good" points to do a RANSAC fit, but I have taken other steps along the way. Below are two original images for reference:
I first began by trying to do a Hough fit. I performed a median blur to remove the salt and pepper noise, then a Gaussian blur, and then fed the image to the HoughCircle function in openCV, with sliders controlling the Hough parameters 1 and 2 defined here. The results were disastrous:
I then decided to try to process the image some more before sending it to the HoughCircle. I started with the original image, median blurred, Gaussian blurred, thresholded, dilated, did a Canny edge detection, and then fed the Canny image to the function.
I was eventually able to get a reasonable estimate of my circle, but it was about the 15th circle to show up when manually decreasing the Hough parameters. I manually drew the purple outline, with the green circles representing Hough outputs that were near my manual estimate. The below images are:
Canny output without dilation
Canny output with dilation
Hough output of the dilated Canny image drawn on the original image.
As you can see, the number of invalid circles vastly outnumbers the correct circle, and I'm not quite sure how to isolate the good circles given that the Hough transform returns so many other invalid circles with parameters that are more strict.
I currently have some code I implemented that works OK for all of the test images I was given, but the code is a convoluted mess with many tunable parameters that seems very fragile. The driving logic behind what I did was from noticing that regions of the aperture edges that were well-illuminated by the laser were relatively constant across several threshold levels (image shown below).
I did edge detection at two threshold levels and stored points that overlapped in both images. Currently there is also some inaccuracy with the result because the aperture edge does still shift slightly with the different threshold levels. I can post the very long code for this if necessary, but the pseudo-code behind it is:
1. Perform a median blur, followed by a Gaussian blur. Kernels are 9x9.
2. Threshold the image until 35% of the image is white. (~intensities > 30)
3. Take the Canny edges of this thresholded image and store (Canny1)
4. Take the original image, perform the same median and Gaussian blurs, but threshold with a 50% larger value, giving a smaller spot (~intensities > 45)
5. Perform the "Closing" morphology operation to further erode the spot and remove any smaller contours.
6. Perform another Canny to get the edges, and store this image (Canny2)
7. Blur both the Canny images with a 7x7 Gaussian blur.
8. Take the regions where the two Canny images overlap and say that these points are likely to be good points.
9. Do a RANSAC circle fit with these points.
I've noticed that there are regions of the edge detected circle that are pretty distinguishable by the human eye as being part of the best circle. Is there a way to isolate these regions for a RANSAC fit?
Code for Hough:
int houghParam1 = 100;
int houghParam2 = 100;
int dp = 10; //divided by 10 later
int x=616;
int y=444;
int radius = 398;
int iterations = 0;
int main()
{
namedWindow("Circled Orig");
namedWindow("Processed", 1);
namedWindow("Circles");
namedWindow("Parameters");
namedWindow("Canny");
createTrackbar("Param1", "Parameters", &houghParam1, 200);
createTrackbar("Param2", "Parameters", &houghParam2, 200);
createTrackbar("dp", "Parameters", &dp, 20);
createTrackbar("x", "Parameters", &x, 1200);
createTrackbar("y", "Parameters", &y, 1200);
createTrackbar("radius", "Parameters", &radius, 900);
createTrackbar("dilate #", "Parameters", &iterations, 20);
std::string directory = "Secret";
std::string suffix = ".pgm";
Mat processedImage;
Mat origImg;
for (int fileCounter = 2; fileCounter < 3; fileCounter++) //1, 12
{
std::string numString = std::to_string(static_cast<long long>(fileCounter));
std::string imageFile = directory + numString + suffix;
testImage = imread(imageFile);
Mat bwImage;
cvtColor(testImage, bwImage, CV_BGR2GRAY);
GaussianBlur(bwImage, processedImage, Size(9, 9), 9);
threshold(processedImage, processedImage, 25, 255, THRESH_BINARY); //THRESH_OTSU
int numberContours = -1;
int iterations = 1;
imshow("Processed", processedImage);
}
vector<Vec3f> circles;
Mat element = getStructuringElement(MORPH_ELLIPSE, Size(5, 5));
float dp2 = dp;
while (true)
{
float dp2 = dp;
Mat circleImage = processedImage.clone();
origImg = testImage.clone();
if (iterations > 0) dilate(circleImage, circleImage, element, Point(-1, -1), iterations);
Mat cannyImage;
Canny(circleImage, cannyImage, 100, 20);
imshow("Canny", cannyImage);
HoughCircles(circleImage, circles, HOUGH_GRADIENT, dp2/10, 5, houghParam1, houghParam2, 300, 5000);
cvtColor(circleImage, circleImage, CV_GRAY2BGR);
for (size_t i = 0; i < circles.size(); i++)
{
Scalar color = Scalar(0, 0, 255);
Point center2(cvRound(circles[i][0]), cvRound(circles[i][1]));
int radius2 = cvRound(circles[i][2]);
if (abs(center2.x - x) < 10 && abs((center2.y - y) < 10) && abs(radius - radius2) < 20) color = Scalar(0, 255, 0);
circle(circleImage, center2, 3, color, -1, 8, 0);
circle(circleImage, center2, radius2, color, 3, 8, 0);
circle(origImg, center2, 3, color, -1, 8, 0);
circle(origImg, center2, radius2,color, 3, 8, 0);
}
//Manual circles
circle(circleImage, Point(x, y), 3, Scalar(128, 0, 128), -1, 8, 0);
circle(circleImage, Point(x, y), radius, Scalar(128, 0, 128), 3, 8, 0);
circle(origImg, Point(x, y), 3, Scalar(128, 0, 128), -1, 8, 0);
circle(origImg, Point(x, y), radius, Scalar(128, 0, 128), 3, 8, 0);
imshow("Circles", circleImage);
imshow("Circled Orig", origImg);
int x = waitKey(50);
}
Mat drawnImage;
cvtColor(processedImage, drawnImage, CV_GRAY2BGR);
return 1;
}
Thanks #jalconvolvon - this is an interesting problem. Here's my result:
What I find important on and on is using dynamic parameter adjustment when prototyping, thus I include the function I used to tune Canny detection. The code also uses this answer for the Ransac part.
import cv2
import numpy as np
import auxcv as aux
from skimage import measure, draw
def empty_function(*arg):
pass
# tune canny edge detection. accept with pressing "C"
def CannyTrackbar(img, win_name):
trackbar_name = win_name + "Trackbar"
cv2.namedWindow(win_name)
cv2.resizeWindow(win_name, 500,100)
cv2.createTrackbar("canny_th1", win_name, 0, 255, empty_function)
cv2.createTrackbar("canny_th2", win_name, 0, 255, empty_function)
cv2.createTrackbar("blur_size", win_name, 0, 255, empty_function)
cv2.createTrackbar("blur_amp", win_name, 0, 255, empty_function)
while True:
trackbar_pos1 = cv2.getTrackbarPos("canny_th1", win_name)
trackbar_pos2 = cv2.getTrackbarPos("canny_th2", win_name)
trackbar_pos3 = cv2.getTrackbarPos("blur_size", win_name)
trackbar_pos4 = cv2.getTrackbarPos("blur_amp", win_name)
img_blurred = cv2.GaussianBlur(img.copy(), (trackbar_pos3 * 2 + 1, trackbar_pos3 * 2 + 1), trackbar_pos4)
canny = cv2.Canny(img_blurred, trackbar_pos1, trackbar_pos2)
cv2.imshow(win_name, canny)
key = cv2.waitKey(1) & 0xFF
if key == ord("c"):
break
cv2.destroyAllWindows()
return canny
img = cv2.imread("sphere.jpg")
#resize for convenience
img = cv2.resize(img, None, fx = 0.2, fy = 0.2)
#closing
kernel = np.ones((11,11), np.uint8)
img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
#sharpening
kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
img = cv2.filter2D(img, -1, kernel)
#test if you use different scale img than 0.2 of the original that I used
#remember that the actual kernel size for GaussianBlur is trackbar_pos3*2+1
#you want to get as full circle as possible here
#canny = CannyTrackbar(img, "canny_trakbar")
#additional blurring to reduce the offset toward brighter region
img_blurred = cv2.GaussianBlur(img.copy(), (8*2+1,8*2+1), 1)
#detect edge. important: make sure this works well with CannyTrackbar()
canny = cv2.Canny(img_blurred, 160, 78)
coords = np.column_stack(np.nonzero(canny))
model, inliers = measure.ransac(coords, measure.CircleModel,
min_samples=3, residual_threshold=1,
max_trials=1000)
rr, cc = draw.circle_perimeter(int(model.params[0]),
int(model.params[1]),
int(model.params[2]),
shape=img.shape)
img[rr, cc] = 1
import matplotlib.pyplot as plt
plt.imshow(img, cmap='gray')
plt.scatter(model.params[1], model.params[0], s=50, c='red')
plt.axis('off')
plt.savefig('sphere_center.png', bbox_inches='tight')
plt.show()
Now I'd probably try to calculate where pixels are statisticaly brigher and where they are dimmer to adjust the laser position (if I understand correctly what you're trying to do)
If the Ransac is still not enough. I'd try tuning Canny to only detect a perfect arc on top of the circle (where it's well outlined) and than try using the following dependencies (I suspect that this should be possible):

Resources