How to detect edges for pattern on space? - opencv

I'm building a new machine and have a problem with edge detection. I have a piece of paper put on one cylinder as shown in the image below. How to detect edges of this paper?
I attempted to build a led background but this paper is very large. Therefore, my machine don't have enough spaces to run.

You can use below code as a reference. Here, I am basically using cv2.inRange function to segment light green color from the image(not dark green, otherwise edge of one of the axis will also be detected) and finally applying Canny edge detection on the grayscaled version of the segmented image, i.e., cv2.Canny.
import cv2
import numpy as np
img = cv2.imread('cylinder.png')
# convert to HSV color space
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# Threshold the HSV image to get green colors by defining range of green color in HSV
mask = cv2.inRange(img_hsv, (36,0,0), (55,255,255))
# Bitwise-AND mask and original image
res = cv2.bitwise_and(img, img, mask = mask)
# coverting image with green colored region of interest from HSV to RGB
img_hsv2bgr = cv2.cvtColor(res, cv2.COLOR_HSV2BGR)
# coverting image from RGB to GRAYSCALE
img_gray = cv2.cvtColor(img_hsv2bgr, cv2.COLOR_BGR2GRAY)
# canny edge detection
edges = cv2.Canny(img_gray, 100, 200)
cv2.imshow('Edges', edges)
cv2.waitKey(0)
cv2.destroyAllWindows()
output:
EDIT: After making some modification to above code, like segmenting yellow color portion along with light green color and applying Gaussian blurring before passing into the cv2.Canny function gives even better output than above.
Code:
# Threshold the HSV image to get both green and yellow colors by defining range of color in HSV
mask_green = cv2.inRange(img_hsv, (36,0,0), (55,255,255))
mask_yellow = cv2.inRange(img_hsv, (21, 39, 64), (38, 255, 255))
mask = cv2.bitwise_or(mask_green, mask_yellow)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(img, img, mask = mask)
# coverting image with green colored region of interest from HSV to RGB
frame_hsv2bgr = cv2.cvtColor(res, cv2.COLOR_HSV2BGR)
# coverting image from RGB to GRAYSCALE
frame_gray = cv2.cvtColor(frame_hsv2bgr, cv2.COLOR_BGR2GRAY)
gaussian_blurred = cv2.GaussianBlur(frame_gray,(5, 3), 0)
# canny edge detection
edges = cv2.Canny(gaussian_blurred, 100, 200)
output:

Related

filtering out anything besides white in opencv

I am currently developing a robot that stays within the lines and avoids circles on the ground of the same color as the lines. I want to print all the coordinates(x,y) to the terminal to get started to make sure I am detecting the correct pixels. I wrote a program that I thought filtered out anything besides white and prints out the x,y coordinates of the white pixels, but it doesn't seem to be working. I passed through a completely black image and a completely white image and received the same coordinates. Can anyone tell me what I am doing wrong?
import cv2
import numpy as np
#cap = cv2.VideoCapture(1)
frame = cv2.imread("black.png")
while(True):
#_, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of white color in HSV
lower_white = np.array([0, 0, 0], dtype=np.uint8)
upper_white = np.array([0, 0, 255], dtype=np.uint8)
# Threshold the HSV image to get only white colors
mask = cv2.inRange(hsv, lower_white, upper_white)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(frame,frame, mask= mask)
coord = cv2.findNonZero(mask)
print(coord)
cv2.imshow('frame',frame)
cv2.imshow('mask',mask)
cv2.imshow('res',res)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
Black image
White image
Test image

How to obtain long edge between black and white thresholds?

Using OpenCv I was converted green areas to white(255) and black(0): see CURRENT OUTPUT.
Using Canny, Laplacian and Sobel edge detection yielded many little edges, instead of a long consistent edge see CANNY EDGE Example.
How could I achieve one long edge as seen in the desired output?
Relevant Code:
image = cv.imread('grass pic.jpg')
lane_image = np.copy(image)
pic = cv.cvtColor(lane_image ,cv.COLOR_BGR2RGB)
lower = np.array([24,0,0])
upper = np.array([177, 194, 20])
green_selection = cv.inRange(pic, lower, upper)
canny= cv.Canny(green_selection,50,150)
plt.imshow(canny, cmap='gray')
CURRENT OUTPUT:
CANNY EDGE Example
DESIRED OUTPUT:
Converting the image to HSV colour space might help you to detect your desired line.
import cv2
import numpy as np
image = cv2.imread("image.png")
HSV = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
low = np.array([23, 98, 0])
high = np.array([253, 255, 255])
mask = cv2.inRange(HSV, low, high)
cv2.imshow("mask", mask)
cv2.imwrite("mask.png", mask)
result = cv2.bitwise_and(image, image, mask=mask)
cv2.imshow("result", result)
cv2.imwrite("result.png", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Mask Image
Result Image

Blurred Circle detection

I am new to opencv and want to detect the center point of these circles. I tried with Hough Circles with thresholding but it doesn't seem to generate good results all the time.
This image is easy to get using contours and threshloding:
It is harder to do this one:
The thresholding and Hough circle doesn't work with this image:
Adding more images for help
Can you suggest any method that will be reliable for all the images?
Since the circle is the only bright thing in the image, we can get the center by looking for the centroid of the white blob. We'll auto-threshold with otsu's and use findContours to get the centroid of the mask.
import cv2
import numpy as np
# load image
img = cv2.imread("circ1.png");
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY);
# threshold
gray = cv2.GaussianBlur(gray, (5,5), 0);
_, mask = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU);
# contour
_, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE);
# center
M = cv2.moments(contours[0]);
cx = int(M['m10']/M['m00']);
cy = int(M['m01']/M['m00']);
center = (int(cx), int(cy));
# draw
img = cv2.circle(img, center, 4, (0,0,200), -1);
# show
cv2.imshow("marked", img);
cv2.imshow("mask", mask);
cv2.waitKey(0);

Is there a way I can remove other components in the image except the beans using OpenCV python?

I have been trying to remove every other component from the image except the beans. I have tried using edging and contour I couldn't get it right.
enter image description here
If the "beans" are the dark region, then here is one way in Python OpenCV.
- Read the input
- Threshold on the blue color
- Apply morphology close to clean it up a little
- Invert
- Find the largest contour
- Draw a white filled contour on a black background as a mask
- Use the mask to make everything in the input black except the "beans"
- Save the results
Input:
import cv2
import numpy as np
# load image
img = cv2.imread("beans.jpg")
lower =(80,70,30)
upper = (220,220,180)
# create the mask and use it to change the colors
thresh = cv2.inRange(img, lower, upper)
# apply morphology
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
morph = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
# invert
morph = 255 - morph
# find largest contour
contours = cv2.findContours(morph, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
big_contour = max(contours, key=cv2.contourArea)
# draw white filled contour on black background as mask
mask = np.zeros_like(thresh)
cv2.drawContours(mask, [big_contour], 0, 255, -1)
# apply mask to img
result = img.copy()
result[mask==0] = (0,0,0)
# write result to disk
cv2.imwrite("beans_thresh2.png", thresh)
cv2.imwrite("beans_morph2.png", morph)
cv2.imwrite("beans_mask2.png", mask)
cv2.imwrite("beans_result2.png", result)
# display it
cv2.imshow("thresh", thresh)
cv2.imshow("morph", morph)
cv2.imshow("mask", mask)
cv2.imshow("result", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Threshold image:
Morphology cleaned image:
Mask Image:
Result Image:

openCV problem with detecting contours of shapes fully

I am doing this university project where i try to detect UI elements on screenshots of Android applications using openCV. I am not expecting a 100 percent accuracy for this detection of UI elements.
This is my code below. I convert the image to gray scale, apply Gaussian blur and then use adaptive threshold to convert the image to binary. After which i use the find contours method.
ap = argparse.ArgumentParser()
ap.add_argument("-i","--image", help = "path to an image", required =
True)
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow("gray",gray)
cv2.waitKey(0)
blurred = cv2.GaussianBlur(gray, (5,5), 0)
thresh = cv2.adaptiveThreshold(blurred, 255,
cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 11, 4)
cv2.imshow("thresh",thresh)
cv2.waitKey(0)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cv2.drawContours(image, cnts, -1, (0,255,0), 1)
cv2.imshow("contours", image)
cv2.waitKey(0)
for c in cnts:
area = cv2.contourArea(c)
print(area)
if area > 50:
M = cv2.moments(c)
cX = int(M['m10'] / M['m00'])
cY = int(M['m01'] / M['m00'])
#cv2.drawContours(image, [c], -1, (0,255,0), 2) # draw contours on image
(x,y,w,h) = cv2.boundingRect(c) # for each contour get a
bounding rectangle
mask = np.zeros(image.shape[:2], dtype = "uint8") # find
shape of the image dimensions and set up a mask
mask[y: y + h, x: x + w] = 255 # convert region of
interest into white
to_display = cv2.bitwise_and(image,image, mask = mask) # carry
out bitwise and
#cv2.putText(image, 'center', (c))
cv2.imshow("Image", to_display)
cv2.waitKey(0)
this is the screenshot that i am running my code on.
The leftmost screenshot represents the image after applying a threshold to it.
The middle image represents the image i get after drawing the contours.
The last image shows when i am examining each individual contour. The contour covers the line but does not encapsulate the rectangle.
I have a few questions.
1) Is it possible to sieve out the contours for the white rectangles. What alteration do i have to make to my code to be able to achieve this?
2) I am trying to sieve out the unimportant contours eg. the words and I was thinking if i could use the getArea() function to help me with it. The idea is that i would set a minimum contour size to filter out the smaller contours that account for the words.
This is another image that i have tried to identify the "objects" in this screenshots.
I face the same issue here where i cant identify the white rectangles. I am only identifying the borders of the rectangle.
Would appreciate any form of help as I am still new to openCv
Original images before processing:
There is no need to blur. In fact I makes it harder. Simple thresholding works best with hard transitions. The second image is easiest. There are white items on a grayish background. By selecting only very white values the items are selected.
Result:
Code:
# load image
img = cv2.imread("app.png")
# convert to gray
img2 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# crate a mask that hold only white values (above 250)
ret,thresh1 = cv2.threshold(img2,250,255,cv2.THRESH_BINARY)
# find contours in mask
im2, contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# select large contours (menu items only)
for cnt in contours:
print(cv2.contourArea(cnt))
if cv2.contourArea(cnt) > 5000:
# draw a rectangle around the items
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0),3)
#cv2.drawContours(img, [cnt], 0, (0,255,0), 3) #also works, but has issues with letters at the last item
#show image
cv2.imshow("img", img)
#cv2.imshow("mask", thresh) # shows mask
cv2.waitKey(0)
cv2.destroyAllWindows()
The first image is more complex, because it is divided in by a very thin red line. Selecting colors is easier in HSV colorspace. Next red values are used to create a mask, some noise is removed and then contours are detected.
Result:
# load image
img = cv2.imread("app2.png")
# convert to HSV
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# set lower and upper color limits
lower_val = np.array([0,0,0])
upper_val = np.array([20,50,255])
# Threshold the HSV image
mask = cv2.inRange(hsv, lower_val, upper_val)
# remove noise
kernel = np.ones((1,2),np.uint8)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
kernel = np.ones((1,5),np.uint8)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
# find contours in mask
im2, contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# select large contours (menu items only)
for cnt in contours:
print(cv2.contourArea(cnt))
if cv2.contourArea(cnt) > 1000:
# draw a rectangle around the items
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0),3)
#show image
cv2.imshow("img", img)
cv2.imshow("mask", mask)
cv2.waitKey(0)
cv2.destroyAllWindows()

Resources