Opencv How can I have smoother contours on curves? - opencv

I'm facing some problems to get smoother contours on curves.
After an image processing I have this image.
I am trying to get smoother curves with this code:
imgWithBridgesBw = convert_to_bw(imgWithBridges)
# add later this mask upper
ellipsekernel20 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
enhanceMask = np.ones((InterholesBw.shape[0],InterholesBw.shape[1]) ,dtype="uint8") * 255
enhanceMaskColor = np.ones((InterholesBw.shape[0],InterholesBw.shape[1],3) ,dtype="uint8") * 255
# invert the Layer to get less blank pixels
imgWithBridgesBwInv = 255 - imgWithBridgesBw
imgWithBridgesBwInv_dilate = cv2.dilate(imgWithBridgesBwInv,ellipsekernel20,iterations =1)
imgWithBridgesBwInv_erode = cv2.erode(imgWithBridgesBwInv_dilate, ellipsekernel20, iterations = 1)
_ ,allCnts, hier = cv2.findContours(imgWithBridgesBwInv_dilate,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(enhanceMask,allCnts,-1,0,2)
cv2.drawContours(enhanceMaskColor,allCnts,-1,(255,0,0),2)
The result is this:
https://dl.dropboxusercontent.com/u/710615/testEhhancBwColor_.jpg
As you can see at this image, which is a "zoom", I'm not reaching any success.

Related

Seat belt detection using hough transform

I want to detect the seat belt is fasten or not. I have used the below step.
color segmentation
image bluring
edge detection
morphological transform
hough problabstic
angular filtering
Right now i am not able to extract the feature from from image which is returned by morphological transform. I am attaching the output which is returned by morpgological transform. Any help will be appreciated.
img = cv2.imread('belt1.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#linek = np.zeros((11,11),dtype=np.uint8)
#linek[5,...]=1
#x=cv2.morphologyEx(gray, cv2.MORPH_OPEN, linek ,iterations=1)
#gray-=x
#kernel = np.ones((5, 5), np.uint8)
#gray = cv2.dilate(gray, kernel, iterations=1)
#gray = cv2.erode(gray, kernel, iterations=1)
kernel_size = 5
blur_gray = cv2.GaussianBlur(gray,(kernel_size, kernel_size),0)
low_threshold = 50
high_threshold = 150
kernel = np.ones((5,5),np.uint8)
edges = cv2.Canny(blur_gray, low_threshold, high_threshold)
edges = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, kernel)
rho = 1 # distance resolution in pixels of the Hough grid
theta = np.pi / 180 # angular resolution in radians of the Hough grid
threshold = 80 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 100 # minimum number of pixels making up a line
max_line_gap = 40 # maximum gap in pixels between connectable line segments
line_image = np.copy(img) * 0 # creating a blank to draw lines on
# Run Hough on edge detected image
# Output "lines" is an array containing endpoints of detected line segments
lines = cv2.HoughLinesP(edges, rho, theta, threshold, np.array([]),
min_line_length, max_line_gap)
here i am not able to extract feature:

OpenCV circle/ellipse detection with blurred edge

How to locate the pupil(the small circle in the eyeball) in the following picture and calculate the area of the pupil. I tried Hough circle detection and ellipse fit on contours with various threshold but none of these naive approaches work very well.
Specifically, HoughCircle detection is totally lost in many noises, and Ellipse detection with pruning often end up giving the larger circle.
And I'm unsure how to determine a functional threshold without manual adjustment on a trackbar. Can somebody give me some guidance on how to accurately do this?
Eyeball_sample
A simple image processing process should help you achieve your goal.
First load the image in grayscale. I think Otsu threshold method is robust enough to extract the pupil region of the eye. Extra morphological process is required to remove noise and unfilled regions
Then using a connected component analysis, we can isolate the pupil region for further processing.
With this region, we can get the edge by subtracting a dilated region with the original region as shown below.
Finally, we can run a circle fitting or a ellipse fitting algorithm to obtain the corresponding shape,
Circle fitting is shown in red wherelse ellipse is shown in green. Both return the same center position albeit slightly different shape.
Here is the code used. I shrink the image to speed up the process but it will work the same using the original size.
import cv2
import numpy as np
img = cv2.imread('eye.jpg',0)
small_img = cv2.resize(img,(0,0),fx = 0.25, fy = 0.25)
r,c = small_img.shape
# Threshold objs
_, thresh = cv2.threshold(small_img,0,255,cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
# Morphological close process to cluster nearby objects
bin_img = cv2.dilate(thresh, None,iterations = 5)
bin_img = cv2.erode(bin_img, None,iterations = 5)
# Analyse connected components
num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(bin_img)
# Find circle center and radius
# Radius calculated by averaging the height and width of bounding box
bin_eye = np.zeros((r,c))
cnt_label = labels[r/2,c/2]
bin_eye[labels == cnt_label] = 255
area = stats[cnt_label][4]
radius = np.sqrt(area / np.pi)
cnt_pt = ((centroids[cnt_label][0]),(centroids[cnt_label][1]))
# fit ellipse
bin_eye_large = cv2.dilate(bin_eye, None,iterations = 1)
# Get ellipse edge
edge_eye = bin_eye_large - bin_eye
# extract location points for fitting
ellip_pts = np.where(edge_eye > 0)
ellip_pts = np.transpose(ellip_pts)
temp = np.copy(ellip_pts[:,0])
ellip_pts[:,0] = ellip_pts[:,1]
ellip_pts[:,1] = temp
# fit ellipse
ellip = cv2.fitEllipse(ellip_pts)
# Display final result
edges_color = cv2.cvtColor(small_img,cv2.COLOR_GRAY2BGR)
cv2.circle(edges_color,(int(cnt_pt[0]),int(cnt_pt[1])),int(radius),(0,0,255),1)
cv2.circle(edges_color,(int(cnt_pt[0]),int(cnt_pt[1])),5,(0,0,255),1)
cv2.ellipse(edges_color,ellip, (0,255,0))
cv2.circle(edges_color,(int(ellip[0][0]),int(ellip[0][1])),5,(0,255,0),1)
cv2.imshow('edges_color',edges_color)
cv2.imshow('bin_img',bin_img)
cv2.imshow('eye_label',bin_eye)
cv2.imshow('eye_edge',edge_eye)
cv2.waitKey(0)

Image Classification using openCV, feature extraction and model building

I am currently working on a project, where the problem statement is to detect handwritten text from a image of a particular form. As a pre-processing step I have extracted texts in the form of bounding boxes, and I have around 1500 images of texts extracted from the image form, out of which 50 of them are handwritten.
The problem is how do I now use these extracted images to train a classifier model which will classify the images as printed or handwritten text. I have no prior knowledge of Deep learning. Any help will be appreciated. I am uploading the image and the extracted images, as well as the code to extract the texts from the images.
im_ns = cv.imread('~/Image processing/IMG_20180921_111952.png')
gray = cv.cvtColor(im_ns,cv.COLOR_BGR2GRAY)
blurred_g = cv.GaussianBlur(gray,(11,11),0)
ret, th1 = cv.threshold(blurred_g,127,255,cv.THRESH_BINARY)
th2 = cv.adaptiveThreshold(blurred_g,255,cv.ADAPTIVE_THRESH_MEAN_C,cv.THRESH_BINARY,11,2)
th3 = cv.adaptiveThreshold(blurred_g,255,cv.ADAPTIVE_THRESH_GAUSSIAN_C,cv.THRESH_BINARY,11,2)
##Detecting horizontal Lines and removing them
th3_di1 = th3_di.copy()
hor = int(round(th3_di1.shape[1]/30,0))
hor_struc = cv.getStructuringElement(cv.MORPH_RECT,(hor,1))
bw_hor_er = cv.erode(th3_di1,hor_struc,iterations=1)
bw_hor_di = cv.dilate(th3_di1,hor_struc,iterations=1)
for i in range(0,bw_hor_di.shape[0]):
for j in range(0,bw_hor_di.shape[1]):
if bw_hor_di[i,j] == 0:
th3_di1[i,j] = 255
else:
th3_di1[i,j] = th3_di1[i,j]
plt.figure(figsize=(20,25))
plt.imshow(th3_di1,'gray')
# perform a connected component analysis on the thresholded
# image, then initialize a mask to store only the "large"
# components
labels = measure.label(th3_di1, neighbors=4, background=255)
mask = np.zeros(th3_di1.shape, dtype="uint8")
plt.figure(figsize=(30,25))
plt.imshow(labels)
# loop over the unique components
for lab in np.unique(labels):
# if this is the background label, ignore it
if lab == 0:
continue
# otherwise, construct the label mask and count the
# number of pixels
labelMask = np.zeros(th3_di.shape, dtype="uint8")
labelMask[labels == lab] = 255
numPixels = cv.countNonZero(labelMask)
# if the number of pixels in the component is sufficiently
# large, then add it to our mask of "large blobs"
if numPixels > 8:
mask = cv.add(mask, labelMask)
plt.figure(figsize=(30,24))
plt.imshow(mask,'gray')
# find the contours in the mask, then sort them from left to
# right
cnts = cv.findContours(mask.copy(), cv.RETR_EXTERNAL,
cv.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
cnts = contours.sort_contours(cnts)[0]
# loop over the contours to make rectangles for the th3 image with gassian thresholding
for (i, c) in enumerate(cnts):
# draw the bright spot on the image
(x,y,w,h) = cv.boundingRect(c)
#((cX, cY), radius) = cv.minEnclosingCircle(c)
cv.rectangle(th3,(x,y),(x+w,y+h),(0,255),2)
cv.putText(th3, "",(x+w+10,y+h),0,0.3,(0,255,0))
# show the output image
cv.imshow("Image", th3)
cv.waitKey(10000)
cv.destroyAllWindows()
##Extracting the bounding boxes
idx=0
for (i, c) in enumerate(cnts):
# draw the bright spot on the image
idx += 1
x,y,w,h = cv.boundingRect(c)
roi = im_ns[y:y+h,x:x+w]
#((cX, cY), radius) = cv.minEnclosingCircle(c)
#cv.rectangle(im_ns,(x,y),(x+w,y+h),(0,255),2)
cv.imwrite(str(idx)+'.jpg',roi)
Images:

Line detection in noisy image (and no detection when it is not present)

I have tried to extract the dark line inside very noisy images without success. Some tips?
My current steps for the first example:
1) Clahe: with clip_limit = 10 and grid_size = (8,8)
2) Box Filter: with size = (5,5)
3) Inverted Image: 255 - image
4) Threshold: when inverted_image < 64
UPDATE
I have performed some preprocessing steps to improve the quality of tested images. I adjusted my ROI mask to crop top and down (because they are low intensities) and added a illumination correction to see better the line. Follow below the current images:
Even though the images are noisy, you are only looking for straight lines towards the north of image. So, why don't use some kind of matched filter with morphological operations?
EDIT: I have modified it.
1) Use median filter along the x and y axis, and normalize the images.
2) Matched filter with all possible orientations of lines.
% im=imread('JwXON.png');
% im=imread('Fiy72.png');
% im=imread('Ya9AN.png');
im=imread('OcgaIt8.png');
imOrig=im;
matchesx = fl(im, 1);
matchesy = fl(im, 0);
matches = matchesx + matchesy;
[x, y] = find(matches);
figure(1);
imagesc(imOrig), axis image
hold on, plot(y, x, 'r.', 'MarkerSize',5)
colormap gray
%----------
function matches = fl(im, direc)
if size(im,3)~=1
im = double(rgb2gray(im));
else
im=double(im);
end
[n, m] = size(im);
mask = bwmorph(imfill(im>0,'holes'),'thin',10);
indNaN=find(im==0); im=255-im; im(indNaN)=0;
N = n - numel(find(im(:,ceil(m/2))==0));
N = ceil(N*0.8); % possible line length
% Normalize the image with median filter
if direc
background= medfilt2(im,[1,30],'symmetric');
thetas = 31:149;
else
background= medfilt2(im,[30,1],'symmetric');
thetas = [1:30 150:179];
end
normIm = im - background;
normIm(normIm<0)=0;
% initialize matched filter result
matches=im*0;
% search for different angles of lines
for theta=thetas
normIm2 = imclose(normIm>0,strel('line',5,theta));
normIm3 = imopen(normIm2>0,strel('line',N,theta));
matches = matches + normIm3;
end
% eliminate false alarms
matches = imclose(matches,strel('disk',2));
matches = matches>3 & mask;
matches = bwareaopen(matches,100);

How to detect document from a picture in opencv?

I am trying to design an app similar to camscanner. For that, I have to take an image and then find the document in that. I started off with the code described here - http://opencvpython.blogspot.in/2012/06/sudoku-solver-part-2.html
I found the contours and the rectangular contour with max area should be the required document. For every contour, I am finding an approximate closed PolyDP. Of all the polyDP of size 4, the one with max area should be the required document. However, this method is not working.
The input image for the process is this
I tried to print the contour with max area and this resulted in this (Contour inside letter 'C')
Code:
img = cv2.imread('bounce.jpeg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray,(5,5),0)
thresh = cv2.adaptiveThreshold(gray,255,1,1,11,2)
_, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
def biggestRectangle(contours):
biggest = None
max_area = 0
indexReturn = -1
for index in range(len(contours)):
i = contours[index]
area = cv2.contourArea(i)
if area > 100:
peri = cv2.arcLength(i,True)
approx = cv2.approxPolyDP(i,0.1*peri,True)
if area > max_area: #and len(approx)==4:
biggest = approx
max_area = area
indexReturn = index
return indexReturn
indexReturn = biggestRectangle(contours)
cv2.imwrite('hola.png',cv2.drawContours(img, contours, indexReturn, (0,255,0)))
What is going wrong in this? Is there any other method by which I can capture the document in this picture?
Try this :
output image
import cv2
import numpy as np
img = cv2.imread('bounce.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
invGamma = 1.0 / 0.3
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
# apply gamma correction using the lookup table
gray = cv2.LUT(gray, table)
ret,thresh1 = cv2.threshold(gray,80,255,cv2.THRESH_BINARY)
#thresh = cv2.adaptiveThreshold(gray,255,1,1,11,2)
_, contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
def biggestRectangle(contours):
biggest = None
max_area = 0
indexReturn = -1
for index in range(len(contours)):
i = contours[index]
area = cv2.contourArea(i)
if area > 100:
peri = cv2.arcLength(i,True)
approx = cv2.approxPolyDP(i,0.1*peri,True)
if area > max_area: #and len(approx)==4:
biggest = approx
max_area = area
indexReturn = index
return indexReturn
indexReturn = biggestRectangle(contours)
hull = cv2.convexHull(contours[indexReturn])
cv2.imwrite('hola.png',cv2.drawContours(img, [hull], 0, (0,255,0),3))
#cv2.imwrite('hola.png',thresh1)
I would do it like this:
Do preprocessing like blur / canny
Extract all lines from the image using the hough line transform (open cv doc).
Use the 4 strongest lines
Try to construct the contour of the document using the four lines
Right now I do not have an OpenCV installed so I cannot try this approach but maybe it leads you in the right directon.

Resources