OpenCV circle/ellipse detection with blurred edge - opencv

How to locate the pupil(the small circle in the eyeball) in the following picture and calculate the area of the pupil. I tried Hough circle detection and ellipse fit on contours with various threshold but none of these naive approaches work very well.
Specifically, HoughCircle detection is totally lost in many noises, and Ellipse detection with pruning often end up giving the larger circle.
And I'm unsure how to determine a functional threshold without manual adjustment on a trackbar. Can somebody give me some guidance on how to accurately do this?
Eyeball_sample

A simple image processing process should help you achieve your goal.
First load the image in grayscale. I think Otsu threshold method is robust enough to extract the pupil region of the eye. Extra morphological process is required to remove noise and unfilled regions
Then using a connected component analysis, we can isolate the pupil region for further processing.
With this region, we can get the edge by subtracting a dilated region with the original region as shown below.
Finally, we can run a circle fitting or a ellipse fitting algorithm to obtain the corresponding shape,
Circle fitting is shown in red wherelse ellipse is shown in green. Both return the same center position albeit slightly different shape.
Here is the code used. I shrink the image to speed up the process but it will work the same using the original size.
import cv2
import numpy as np
img = cv2.imread('eye.jpg',0)
small_img = cv2.resize(img,(0,0),fx = 0.25, fy = 0.25)
r,c = small_img.shape
# Threshold objs
_, thresh = cv2.threshold(small_img,0,255,cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
# Morphological close process to cluster nearby objects
bin_img = cv2.dilate(thresh, None,iterations = 5)
bin_img = cv2.erode(bin_img, None,iterations = 5)
# Analyse connected components
num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(bin_img)
# Find circle center and radius
# Radius calculated by averaging the height and width of bounding box
bin_eye = np.zeros((r,c))
cnt_label = labels[r/2,c/2]
bin_eye[labels == cnt_label] = 255
area = stats[cnt_label][4]
radius = np.sqrt(area / np.pi)
cnt_pt = ((centroids[cnt_label][0]),(centroids[cnt_label][1]))
# fit ellipse
bin_eye_large = cv2.dilate(bin_eye, None,iterations = 1)
# Get ellipse edge
edge_eye = bin_eye_large - bin_eye
# extract location points for fitting
ellip_pts = np.where(edge_eye > 0)
ellip_pts = np.transpose(ellip_pts)
temp = np.copy(ellip_pts[:,0])
ellip_pts[:,0] = ellip_pts[:,1]
ellip_pts[:,1] = temp
# fit ellipse
ellip = cv2.fitEllipse(ellip_pts)
# Display final result
edges_color = cv2.cvtColor(small_img,cv2.COLOR_GRAY2BGR)
cv2.circle(edges_color,(int(cnt_pt[0]),int(cnt_pt[1])),int(radius),(0,0,255),1)
cv2.circle(edges_color,(int(cnt_pt[0]),int(cnt_pt[1])),5,(0,0,255),1)
cv2.ellipse(edges_color,ellip, (0,255,0))
cv2.circle(edges_color,(int(ellip[0][0]),int(ellip[0][1])),5,(0,255,0),1)
cv2.imshow('edges_color',edges_color)
cv2.imshow('bin_img',bin_img)
cv2.imshow('eye_label',bin_eye)
cv2.imshow('eye_edge',edge_eye)
cv2.waitKey(0)

Related

Edge detection on photovoltaic modules on infrared images

Context
My goal is to detect PV modules on the dataset of infrared images taken by a drone. I want to improve the edge detection so my algorithm performs better. Detected and labelled modules are then used to train a neural network.
Dataset
I have several hundred images taken at different times and from different altitudes. I guess their quality is not perfect - the environmental conditions could be better, e.g:
altitude - sometimes the edges between modules are not the images could be taken from a lower altitude so the edges are better visible.
capture time - sometimes the background (grass) is very hot. Most likely the images could be taken late morning/early afternoon.
However, I have to stick to what I have.
As you can see sometimes (e.g. image_3) the "middle line" is hardly visible.
Code
Preprocessing below is based on project I found on Github. Standard preprocessing and Canny Edge detection is used.
import cv2
import numpy as np
def detect_edges():
# image_path = "data/stackoverflow/TEMP_DJI_1_R (715).JPG"
# image_path = "data/stackoverflow/TEMP_DJI_6_R (720).JPG"
image_path = "data/stackoverflow/TEMP_DJI_5_R (657).JPG"
# read image
input_image = cv2.imread(image_path, cv2.IMREAD_COLOR)
cv2.imshow('input_image', input_image)
# scale image
image_scaling = 11.0
scaled_image_rgb = cv2.resize(src=input_image, dsize=(0, 0), fx=image_scaling, fy=image_scaling)
cv2.imshow('scaled_image', scaled_image_rgb)
# blur image
gaussian_blur = 7
blurred_image = cv2.blur(scaled_image_rgb, (gaussian_blur, gaussian_blur))
cv2.imshow('blurred_image', blurred_image)
# gray image
grayed_image = cv2.cvtColor(scaled_image_rgb, cv2.COLOR_BGR2GRAY)
cv2.imshow('grayed_image', grayed_image)
# red threshold
red_threshold = 120
red_channel = scaled_image_rgb[:, :, 2]
_, thresholded_image = cv2.threshold(red_channel, red_threshold, 255, 0, cv2.THRESH_BINARY)
# dilation and erosion
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 9))
closing = cv2.morphologyEx(thresholded_image, cv2.MORPH_CLOSE, kernel)
opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel)
# min area
min_area = 250 * 200
contours, hierarchy = cv2.findContours(opening, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
areas = [cv2.contourArea(contour) for contour in contours]
discarded_contours = [area < min_area for area in areas]
contours = [contours[i] for i in range(len(contours)) if not discarded_contours[i]]
mask = np.zeros_like(grayed_image)
cv2.drawContours(mask, contours, -1, (255), cv2.FILLED)
mask = cv2.dilate(mask, kernel, iterations=5)
mask = cv2.blur(mask, (25, 25))
mask = mask.astype(np.float) / 255.
preprocessed_image = (grayed_image * mask).astype(np.uint8)
cv2.imshow('preprocessed_image', preprocessed_image)
hysteresis_min_thresh = 25
hysteresis_max_thresh = 40
# canny edge
canny_image = cv2.Canny(image=preprocessed_image, threshold1=hysteresis_min_thresh,
threshold2=hysteresis_max_thresh, apertureSize=3)
cv2.imshow('canny_image', canny_image)
cv2.waitKey()
Results
The results are not bad, however they must be improved before further processing.
What kind of operations would be best to distinguish panels from the background (grass)?
In the case of the images with hardly visible "middle" lines (image_3), are there any chances of finding that "internal" edge? Maybe for these images, I should rather focus on finding outer edges only and draw an artificial line in the middle to divide the whole panel into two?

How to crop the given Irregularly shaped object along its outline in OpenCV

I have been working on a code where an image is given as shown
I have to place this knife onto some other image. The condition is that I have to crop the knife along its outline and not in a rectangular box.
import numpy as np
import cv2
from matplotlib import pyplot as plt
img = cv2.imread('2.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(img)
img_blur = cv2.bilateralFilter(img, d = 7,
sigmaSpace = 75, sigmaColor =75)
img_gray = cv2.cvtColor(img_blur, cv2.COLOR_RGB2GRAY)
a = img_gray.max()
_, thresh = cv2.threshold(img_gray, a/2+60, a,cv2.THRESH_BINARY_INV)
plt.imshow(thresh, cmap = 'gray')
contours, hierarchy = cv2.findContours(
image = thresh,
mode = cv2.RETR_TREE,
method = cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key = cv2.contourArea, reverse = True)
img_copy = img.copy()
final = cv2.drawContours(img_copy, contours, contourIdx = -1,
color = (255, 0, 0), thickness = 2)
plt.imshow(img_copy)
This is what I have tried but it doesn't seem to work well.
Input
Output
You can do it starting with bounding box using snake algorithm with balloon force added.
Snake's algo is defined such that it minimizes 3 energies - Continuity, Curvature and Gradient. The first two (together called internal energy) get minimized when points (on curve) are pulled closer and closer i.e. contract. If they expand then energy increases which is not allowed by snake algorithm.
But this initial algo proposed in 1987 has a few problems. One of the problem is that in flat areas (where gradient is zero) algo fails to converge and does nothing. There are several modifications proposed to solve this problem. The solution of interest here is - Balloon Force proposed by LD Cohen in 1989.
Balloon force guides the contour in non-informative areas of the image, i.e., areas where the gradient of the image is too small to push the contour towards a border. A negative value will shrink the contour, while a positive value will expand the contour in these areas. Setting this to zero will disable the balloon force.
Another improvement is - Morphological Snakes which use morphological operators (such as dilation or erosion) over a binary array instead of solving PDEs over a floating point array, which is the standard approach for active contours. This makes Morphological Snakes faster and numerically more stable than their traditional counterpart.
Scikit-image's implementation using the above two improvements is morphological_geodesic_active_contour. It has a parameter balloon
Using your image
import numpy as np
import matplotlib.pyplot as plt
from skimage.segmentation import morphological_geodesic_active_contour, inverse_gaussian_gradient
from skimage.color import rgb2gray
from skimage.util import img_as_float
from PIL import Image, ImageDraw
im = Image.open('knife.jpg')
im = np.array(im)
im = rgb2gray(im)
im = img_as_float(im)
plt.imshow(im, cmap='gray')
Now let us create a function which will help us to store iterations:
def store_evolution_in(lst):
"""Returns a callback function to store the evolution of the level sets in
the given list.
"""
def _store(x):
lst.append(np.copy(x))
return _store
This method needs image to be preprocessed to highlight the contours. This can be done using the function inverse_gaussian_gradient, although the user might want to define their own version. The quality of the MorphGAC segmentation depends greatly on this preprocessing step.
gimage = inverse_gaussian_gradient(im)
Below we define our starting point - a bounding box.
init_ls = np.zeros(im.shape, dtype=np.int8)
init_ls[200:-400, 20:-30] = 1
List with intermediate results for plotting the evolution
evolution = []
callback = store_evolution_in(evolution)
Now required magic line for morphological_geodesic_active_contour with balloon contraction is below:
ls = morphological_geodesic_active_contour(gimage, 200, init_ls,
smoothing=1, balloon=-0.75,
threshold=0.7,
iter_callback=callback)
Now let us plot the results:
fig, axes = plt.subplots(1, 2, figsize=(8, 8))
ax = axes.flatten()
ax[0].imshow(im, cmap="gray")
ax[0].set_axis_off()
ax[0].contour(ls, [0.5], colors='b')
ax[0].set_title("Morphological GAC segmentation", fontsize=12)
ax[1].imshow(ls, cmap="gray")
ax[1].set_axis_off()
contour = ax[1].contour(evolution[0], [0.5], colors='r')
contour.collections[0].set_label("Starting Contour")
contour = ax[1].contour(evolution[25], [0.5], colors='g')
contour.collections[0].set_label("Iteration 25")
contour = ax[1].contour(evolution[-1], [0.5], colors='b')
contour.collections[0].set_label("Last Iteration")
ax[1].legend(loc="upper right")
title = "Morphological GAC Curve evolution"
ax[1].set_title(title, fontsize=12)
plt.show()
With more balloon force you can get only the blade of knife as well.
ls = morphological_geodesic_active_contour(gimage, 100, init_ls,
smoothing=1, balloon=-1,
threshold=0.7,
iter_callback=callback)
Play with these parameters - smoothing, balloon, threshold to get your perfect curve

Why cannot draw lines after Canny Edge Detection?

Actually, I am noob for working with Computer Vision. Sorry in advance.
I want to detect edges of tram lane. Mostly, the code works well but sometimes It cannot even draw a line. I don't know why.
cropped_Image function is just cropping the polygonal area of the current frame.
display_lines function draw lines whose absolute value of angle is between 30 and 90. It uses cv2.line to draw lines.
Here is the code:
_,frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) # convert image to gray to be one layer
blur = cv2.GaussianBlur(gray, (1, 1), 0) # to reduce noise in gray scale image
canny = cv2.Canny(blur, 150, 200, apertureSize=3)
cropped_image = region_of_interest(canny) # simply, it crops bottom of image
lines = cv2.HoughLinesP(cropped_image, 1, np.pi / 180, 100, np.array([]),
minLineLength=5, maxLineGap=5)
hough_bundler = HoughBundler()
lines_merged = hough_bundler.process_lines(lines, cropped_image)
line_image = display_lines(frame, lines_merged)
combo_image = cv2.addWeighted(frame, 0.8, line_image, 1, 1)
cv2.imshow(‘test’, combo_image)
To see it: HoughBundler
Expected: expected img
Canny: canny img of wrong result
Result: wrong result
First of all I'd start by fixing the cv2.GuassianBlur() line. You've used a 1x1 kernel which doesn't do anything, you need to use at least a 3x3 kernel. Look into how convolutions are applied if you want to know why a 1x1 filter doesn't work.
Secondly, I would play with the Canny aperture size to suit my needs. Also after edge detection you can use cv2.erode() with a 3x3 or 5x5 kernel so that you don't get a broken line in the image.

How to get the area of the contours?

I have a picture like this:
And then I transform it into binary image and use canny to detect edge of the picture:
gray = cv.cvtColor(image, cv.COLOR_RGB2GRAY)
edge = Image.fromarray(edges)
And then I get the result as:
I want to get the area of 2 like this:
My solution is to use HoughLines to find lines in the picture and calculate the area of triangle formed by lines. However, this way is not precise because the closed area is not a standard triangle. How to get the area of region 2?
A simple approach using floodFill and countNonZero could be the following code snippet. My standard quote on contourArea from the help:
The function computes a contour area. Similarly to moments, the area is computed using the Green formula. Thus, the returned area and the number of non-zero pixels, if you draw the contour using drawContours or fillPoly, can be different. Also, the function will most certainly give a wrong results for contours with self-intersections.
Code:
import cv2
import numpy as np
# Input image
img = cv2.imread('images/YMMEE.jpg', cv2.IMREAD_GRAYSCALE)
# Needed due to JPG artifacts
_, temp = cv2.threshold(img, 128, 255, cv2.THRESH_BINARY)
# Dilate to better detect contours
temp = cv2.dilate(temp, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
# Find largest contour
cnts, _ = cv2.findContours(temp, cv2.RETR_EXTERNAL , cv2.CHAIN_APPROX_NONE)
largestCnt = []
for cnt in cnts:
if (len(cnt) > len(largestCnt)):
largestCnt = cnt
# Determine center of area of largest contour
M = cv2.moments(largestCnt)
x = int(M["m10"] / M["m00"])
y = int(M["m01"] / M["m00"])
# Initiale mask for flood filling
width, height = temp.shape
mask = img2 = np.ones((width + 2, height + 2), np.uint8) * 255
mask[1:width, 1:height] = 0
# Generate intermediate image, draw largest contour, flood filled
temp = np.zeros(temp.shape, np.uint8)
temp = cv2.drawContours(temp, largestCnt, -1, 255, cv2.FILLED)
_, temp, mask, _ = cv2.floodFill(temp, mask, (x, y), 255)
temp = cv2.morphologyEx(temp, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
# Count pixels in desired region
area = cv2.countNonZero(temp)
# Put result on original image
img = cv2.putText(img, str(area), (x, y), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, 255)
cv2.imshow('Input', img)
cv2.imshow('Temp image', temp)
cv2.waitKey(0)
Temporary image:
Result image:
Caveat: findContours has some problems one the right side, where the line is very close to the bottom image border, resulting in possibly omitting some pixels.
Disclaimer: I'm new to Python in general, and specially to the Python API of OpenCV (C++ for the win). Comments, improvements, highlighting Python no-gos are highly welcome!
There is a very simple way to find this area, if you take some assumptions that are met in the example image:
The area to be found is bounded on top by a line
Any additional lines in the image are above the line of interest
There are no discontinuities in the line
In this case, the area of the region of interest is given by the sum of the lengths from the bottom of the image to the first set pixel. We can compute this with:
import numpy as np
import matplotlib.pyplot as pp
img = pp.imread('/home/cris/tmp/YMMEE.jpg')
img = np.flip(img, axis=0)
pos = np.argmax(img, axis=0)
area = np.sum(pos)
print('Area = %d\n'%area)
This prints Area = 22040.
np.argmax finds the first set pixel on each column of the image, returning the index. By first using np.flip, we flip this axis so that the first pixel is actually the one on the bottom. The index corresponds to the number of pixels between the bottom of the image and the line (not including the set pixel).
Thus, we're computing the area under the line. If you need to include the line itself in the area, add pos.shape[0] to the area (i.e. the number of columns).

Reduce the image to the text contents using scikit image

Here is the image from which I want to take the text out.
How to remove the black border and reduce the image to only 50?
Approach I took:
I tried to use corner detectors (corner peak and corner harris) and pick the first 2 coordinates from the left and last 2 coordinates from the right.
With those 4 coordinates I cropped the image and I further reduced by 5 on all sides.
Certainly not efficient way of doing it. I also looked at few segmentation also. Not able to get it right. I am using scikit image for solving this.
Using corners might not work since corner points can also be present in characters.
Here is what i tried with hough lines as described below:
1) First erode the image to minimize the gap between lines and characters
2) Use Hough line detection algorithm to detect and delete the lines
3) Dilate the image to get clear characters
4) Now we have characters and lines separated, so we can delete the lines by finding the connected components.
Here is the code implementation of the same in Python:
img = cv2.imread('D:\Image\st1.png',0)
ret, thresh = cv2.threshold(img, 150, 255, cv2.THRESH_BINARY_INV)
#dilate the image to reduce gap between characters and lines and get hough lines correctly
kernel = np.ones((3,3),np.uint8)
erosion = cv2.erode(thresh,kernel,iterations = 1)
#find canny edge image
canny = cv2.Canny(erosion,100,200)
minLineLength=img.shape[1]/4
lines = cv2.HoughLinesP(image=canny,rho=0.02,theta=np.pi/500, threshold=10,lines=np.array([]), minLineLength=minLineLength,maxLineGap=10)
a,b,c = lines.shape
# delete the lines
for i in range(a):
cv2.line(erosion, (lines[i][0][0], lines[i][0][1]), (lines[i][0][2], lines[i][0][3]), 0, 3, cv2.LINE_AA)
#erode the image
kernel = np.ones((3,3),np.uint8)
erosion = cv2.dilate(erosion, kernel, iterations=1)
# find connected components
connectivity = 4
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(erosion, connectivity, cv2.CV_32S)
sizes = stats[1:, -1]; nb_components = nb_components - 1
min_size = 250 #threshhold value for lines length
img2 = np.zeros((output.shape), np.uint8)
for i in range(0, nb_components):
if sizes[i] >= min_size:
img2[output == i + 1] = 255 #delete the line components
img = cv2.bitwise_not(img2)
Output image:

Resources