OpenCV cv2.HoughCircles detection giving bad results on easy picture - opencv

I am new to opencv and I'm trying to do circle detection using HoughCircles, but it's giving me circles where there are none, and it's not detecting the huge obvious circle that I want it to. I tried changing the parameters but can't get it to work well. What am I doing wrong?
Original Image:
Image After Thresholding:
Canny Filtered with Circles:
path=r"minimap.png"
screen = cv2.imread(path,cv2.IMREAD_GRAYSCALE)
cv2.imshow('Original', screen)
ret,screen = cv2.threshold(screen,200,255,cv2.THRESH_BINARY)
cv2.imshow('Thresholded', screen)
P=50
can = cv2.Canny(screen,P/2,P)
cv2.imshow('Canny', can)
if 1:
circles = cv2.HoughCircles(screen, cv2.HOUGH_GRADIENT, dp=1, minDist=50, param1=P, param2=53, minRadius=0, maxRadius=0)
print(circles)
circles = np.uint16(np.around(circles))
can=cv2.cvtColor(can,cv2.COLOR_GRAY2RGB)
for i in circles[0,:]:
# draw the outer circle
cv2.circle(can, (i[0], i[1]), i[2], (0, 255, 0), 3)
# draw the center of the circle
cv2.circle(can, (i[0], i[1]), 2, (0, 0, 255), 5)
cv2.imshow('Circles', can)
cv2.waitKey()

You've to play with the cv2.HoughCircles parameters and I don't think the same values will give good results for all images. For your image:
image = cv2.resize(image, (0,0), fx=0.5, fy=0.5)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7,7), sigmaX=-1, sigmaY=-1)
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, rows / 2,
param1=130, param2=100,
minRadius=0, maxRadius=0)

Related

Remove Yellow rectangle from image

I am using this code to remove this yellow stamp from an image :
import cv2
import numpy as np
# read image
img = cv2.imread('input.jpg')
# threshold on yellow
lower = (0, 200, 200)
upper = (100, 255, 255)
thresh = cv2.inRange(img, lower, upper)
# apply dilate morphology
kernel = np.ones((9, 9), np.uint8)
mask = cv2.morphologyEx(thresh, cv2.MORPH_DILATE, kernel)
# get largest contour
contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
big_contour = max(contours, key=cv2.contourArea)
x, y, w, h = cv2.boundingRect(big_contour)
# draw filled white contour on input
result = img.copy()
cv2.drawContours(result, [big_contour], 0, (255, 255, 255), -1)
cv2.imwrite('yellow_removed.png', result)
# show the images
cv2.imshow("RESULT", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
I get the following error:
big_contour = max(contours, key=cv2.contourArea) ValueError: max() arg
is an empty sequence
Obviously, it is not detecting any contours, and the contours array is empty, but I could not figure out why that is or how to fix it.
Help is appreciated!
Check your lower thresholds. It worked for me for both images when I changed the lower threshold to lower = (0, 120, 120).
The thresholds is the reason due to the second image being darker. Lowering these thresholds captures more of the yellow area, but will still leave some holes when drawing the contour.
lower = (0, 130, 130)
You can fix this by drawing the bounding rectangle instead.
cv2.rectangle(result,(x,y),(x+w,y+h),(255,255,255),-1)
Using HSV color space is great for figuring out a particular shade/tone of color. When you have dominant colors to isolate, you can opt for the LAB color space. I have explained as to why this is better in this answer.
Code:
img = cv2.imread('bill.jpg')
# create another copy for the result
img2 = img.copy()
# convert to LAB space and store b-channel
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
b_channel = lab[:,:,-1]
Notice how bright the yellow region is above.
# Perform Otsu threshold
th = cv2.threshold(b_channel, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
# Find the contour with largest area
contours, hierarchy = cv2.findContours(th, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
c = max(contours, key = cv2.contourArea)
# draw the contour on plain black image of same shape as original
mask = np.zeros((img.shape[0], img.shape[1]), np.uint8)
mask = cv2.drawContours(mask,[c],0,255, -1)
# dilation to avoid border effects
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
dilate = cv2.dilate(mask, kernel, iterations=1)
img2[dilate == 255] = (255, 255, 255)
Another example:
Input:
Result:

OpenCV - Computing distance between two edges in an image

I am trying to compute distance (in # of pixels) between two edges in an image. I have corrected for image perspective using cv2.warpPerspective method and have converted the resulting image into grayscale followed by filtering using gaussian blur. I have tried various thresholding methods and found out that cv2.ADAPTIVE_THRESH_GAUSSIAN works best. Other methods are too noisy or miss the second edge in the left side of the object as seen in result of adaptive gaussian thresholding.
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Load the image
imgRoadvR10 = cv2.imread('sampleimage.jpg') # image is already corrected for perspective warp using cv2.warpPerspective
# convert to grayscale
imgRoadvR10_GrayPersp = cv2.cvtColor(imgRoadvR10, cv2.COLOR_BGR2GRAY)
# gaussian blur
a10lvR10_gblur = cv2.GaussianBlur(imgRoadvR10_GrayPersp,(5,5),0)
# Try different thresholding methods
ret,a10lvR10_th1 = cv2.threshold(a10lvR10_gblur,127,255,cv2.THRESH_BINARY)
a10lvR10_th2 = cv2.adaptiveThreshold(a10lvR10_gblur,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
cv2.THRESH_BINARY,11,2)
a10lvR10_th3 = cv2.adaptiveThreshold(a10lvR10_gblur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY_INV,11,2)
# Otsu's thresholding
ret2,a10lvR10_th4 = cv2.threshold(a10lvR10_gblur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
print(ret2)
# Plot results
plt.figure()
titles = ['Original Image', 'Global Thresholding (v = 127)',
'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding','OTSU Thresholding']
images = [a10lvR10_gblur, a10lvR10_th1, a10lvR10_th2, a10lvR10_th3, a10lvR10_th4]
for i in range(5):
plt.subplot(2,3,i+1),plt.imshow(images[i],'gray')
plt.title(titles[i])
plt.xticks([]),plt.yticks([])
plt.show()
Closer look at result of adaptive gaussian thresholding:
I want to find the width of this rectangular object. The width is measured from the second edge on the left side to the edge on the right side (see image below):
How can I measure the width? I have been reading upon morphological operations and edge detection, But not sure how to proceed next. Any suggestions will be appreciated
This is not the best idea and I think a more logical and simple solution can be obtained. However, this idea may help you.
import cv2
import numpy as np
#load image
im = cv2.imread("test3.jpg", 1)
#Convert to gray
mask = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
#convert to black and white
mask = cv2.threshold(mask, 127, 255, cv2.THRESH_BINARY)[1]
#try to remove noise
#you can just use median blur or any other method
mask = cv2.erode(mask, np.ones((8, 0), "uint8"))
mask = cv2.dilate(mask, np.ones((32, 0), "uint8"))
mask = cv2.medianBlur(mask, 9)
#save cleaned image
cv2.imwrite("out1.jpg", mask)
A cleaner version of your output image:
out1:
Next we can get the coordinates of the lines. I got the coordinates of the first line from the left. I think you have to change the code a bit to get the coordinates of the sidebar.
h = len(mask) - 1
def count(row):
counter = 0
for i in range(0, len(row)):
if row[i] == 255:
break
counter += 1
return counter
def line(im, pt1, pt2, color, thickness):
im = cv2.line(
img=im,
pt1=pt1,
pt2=pt2,
color=color,
thickness=thickness,
lineType=cv2.LINE_AA,
)
return im
def center(x1, y1, x2, y2):
return (int((x1 + x2) / 2), int((y1 + y2) / 2))
topLeft = count(mask[0])
bottomLeft = count(mask[h])
# to shadow and hide the old left line
mask = line(mask, (topLeft, 0), (bottomLeft, h), (0, 0, 0), 80)
topRight = count(mask[0])
bottomRight = count(mask[h])
# to shadow and hide the old right line
mask = line(mask, (topRight, 0), (bottomRight, h), (0, 0, 0), 80)
mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
# to draw new clean left line
mask = line(mask, (topLeft, 0), (bottomLeft, h), (128, 0, 255), 25)
# to draw new clean right line
mask = line(mask, (topRight, 0), (bottomRight, h), (128, 0, 255), 25)
a = center(topLeft, 0, bottomLeft, h)
b = center(topRight, 0, bottomRight, h)
mask = line(mask, a, b, (128, 0, 255), 25)
cv2.imwrite("out2.jpg", mask)
out2:
Now you can calculate the distance between "a" and "b".

I want to detect all the underlined words in a paragraph

Original Image
Click here for the image
For this, I am trying to detect the underlines first. But as the underlines might be tilted, this code:
import time
from google.colab.patches import cv2_imshow
from collections import OrderedDict
# Let's load a simple image with 3 black squares
image = cv2.imread("line_detected.png")
cv2.waitKey(0)
# Grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Find Canny edges
font = cv2.FONT_HERSHEY_COMPLEX
edged = cv2.Canny(gray, 30, 200)
cv2.waitKey(0)
# Finding Contours
# Use a copy of the image e.g. edged.copy()
# since findContours alters the image
contours, hierarchy = cv2.findContours(edged, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2_imshow(edged)
cv2.waitKey(0)
print("Number of Contours found = " + str(len(contours)))
# Draw all contours
# -1 signifies drawing all contours
# cv2.drawContours(image, contours, -1, (0, 255, 0), 3)
mask = np.ones(image.shape[:2], dtype="uint8") * 255
d=OrderedDict()
coords=[]
nuclei = []
l=[]
heading=[]
images=[]
lvalue=0
line=[]
h=[]
contours = contours[::-1]
for cnt in (contours):
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.04 * peri, True)
if (len(approx==2)):
x, y, w, h = cv2.boundingRect(cnt)
# print(h)
cv2.rectangle(img,(x, y), (x+w, y+h),(0, 0, 255), 2)
cv2_imshow(img)
is not able to detect the slanting underlines very properly. Also, I want this code to extend to detecting only the gray underlines. "minor differences" has a single underline as it is slanted/tilted, it reads it as two straight lines. Also, it is reading the images in the left which it should not read(tesseract giving weird outputs).
For the gray shade only I found this mask thing online:
lower_range = np.array([110,50,50])
upper_range = np.array([130,255,255])
mask = cv2.inRange(hsv, lower_range, upper_range)
But Don't know how to incorporate in code... I'm a beginner, any help is much appreciated!

Finding centroid of circle in opencv

I am trying to find centroid of circular objects OR a circle that can bounding around circular objects in a grayscale image.
So far what I have done is turn that grayscale image to binary image using adaptive thresholding.
Grayscale image
Threshold image
Up till now, i have used hough transform and Findcontour. None of these method work.
What should be an approach to this?
I got a decent result using the Hough transform for circles. This is the pipeline:
img = cv2.imread('I7Ykpbs.jpg', 0)
img = cv2.GaussianBlur(img, (5, 5), 2, 2)
img_th = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 9, 3)
circles = cv2.HoughCircles(img_th, cv2.HOUGH_GRADIENT, 2, minDist=30,
param1=200, param2=40, minRadius=10, maxRadius=20)
for i in range(circles.shape[1]):
c = circles[0,i,:]
center = (np.round(c[0]), np.round(c[1]))
radius = np.round(c[2])
# print(center)
# print(radius)
if np.linalg.norm(np.array([600., 600.])-center) < 500.:
cv2.circle(img, center, 3, (0,255,0), -1, 8, 0)
cv2.circle(img, center, radius, (0,0,255), 3, 8, 0)
plt.imshow(img)
plt.show()
It's not perfect but I think you can start from here and do some finetuning on parameters and preprocessing to optimize the result.

opencv findContours, contourArea value not consistant

I have a test image (see the 1st image below), and a very simple code to blur and make canny edge detection of this image, then use findcontours to get contours.
image = cv2.imread("testimage.jpg")
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(image, (11,11), 0)
cv2.imshow("Blurred", blurred)
canny = cv2.Canny(blurred, 50, 130)
cv2.imshow("Canny", canny)
(_, conts, _) = cv2.findContours(canny.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
img = image.copy()
for c in conts:
M = cv2.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
cv2.drawContours(img, [c], -1, (0,255,0), 1)
cv2.putText(img, "area:"+str(cv2.contourArea(c)), (cX-20,cY-20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
cv2.imshow("Contours", img)
cv2.waitKey(0)
I put the value of each contour on the image. As you may see, the contours look similar, but there is one contour with area value extremely low (only 16.0).
What might be the reason for this? And how to get consistent values among these contours?

Resources