I use the function findContours to find the borders of the driveway. However, since the driveway usually comes across the image, I get polygons like this. What I really like is line instances, so I am wondering is there any way to break the polygon into line instances.
You can use approxPolyDP to simplify a contour down into larger lines.
The epsilon value specifies the minimum length for each line segment.
import cv2
import numpy as np
import random
# turns a list into a tuple
def tup(arr):
return (int(arr[0]), int(arr[1]));
# load image
img = cv2.imread("rect.png");
img = img[:,1:]; # there's a weird thing on the left side
# get mask
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY);
mask = cv2.inRange(gray, 10, 255);
# dilate and erode to clean up lines
kernel = np.ones((3,3), np.uint8);
mask = cv2.dilate(mask, kernel, iterations = 2);
mask = cv2.erode(mask, kernel, iterations = 2);
# get contour # OpenCV 3.4, Other versions will return (countours, _)
_, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE);
# get approximate lines
epsilon = 0.005*cv2.arcLength(contours[0],True); # epsilon is the minimum length of the line
approx = cv2.approxPolyDP(contours[0],epsilon,True);
# draw lines
for a in range(len(approx)):
color = [random.randint(0,255) for col in range(3)];
start = approx[a-1][0]; # points have an extra layer of brackets
end = approx[a][0];
cv2.line(img, tup(start), tup(end), color, 2);
# show
cv2.imshow("img", img);
cv2.waitKey(0);
Related
I can read text from an image using OCR. However, it works line by line.
I want to now group text based on solid lines surrounding the text.
For example, consider I have below rectangle banners. I can read text line by line. Fine! Now I want to group them by Board A,B,C and hold them in some data structure, so that I can identify, which lines belong to which board. It is given that images would be diagrams like this with solid lines around each block of text.
Please guide me on the right approach.
As mentioned in the comments by Yunus, you need to crop sub-images and feed them to an OCR module individually. An additional step could be ordering of the contours.
Approach:
Obtain binary image and invert it
Find contours
Crop sub-images based on the bounding rectangle for each contour
Feed each sub-image to OCR module (I used easyocr for demonstration)
Store text for each board in a dictionary
Code:
# Libraries import
import cv2
from easyocr import Reader
reader = Reader(['en'])
img = cv2.imread('board_text.jpg',1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# inverse binary
th = cv2.threshold(gray,127,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)[1]
# find contours and sort them from left to right
contours, hierarchy = cv2.findContours(th, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contours = sorted(contours, key=lambda x: [cv2.boundingRect(x)[0], cv2.boundingRect(x)[1]])
#initialize dictionary
board_dictionary = {}
# iterate each contour and crop bounding box
for i, c in enumerate(cnts):
x,y,w,h = cv2.boundingRect(c)
crop_img = img[y:y+h, x:x+w]
# feed cropped image to easyOCR module
results = reader.readtext(crop_img)
# result is output per line
# create a list to append all lines in cropped image to it
board_text = []
for (bbox, text, prob) in results:
board_text.append(text)
# convert list of words to single string
board_para = ' '.join(board_text)
#print(board_para)
# store string within a dictionary
board_dictionary[str(i)] = board_para
Dictionary Output:
board_dictionary
{'0': 'Board A Board A contains Some Text, That goes Here Some spaces and then text again', '1': 'Board B Board B has some text too but sparse.', '2': 'Board €C Board C is wide and contains text with white spaces '}
Drawing each contour
img2 = img.copy()
for i, c in enumerate(cnts):
x,y,w,h = cv2.boundingRect(c)
img2 = cv2.rectangle(img2, (x, y), (x + w, y + h), (0,255,0), 3)
Note:
While working on different images make sure the ordering is correct.
Choice of OCR module is yours pytesseract and easyocr are the options I know.
This can be done by performing following steps:
Find the shapes.
Compute the shape centers.
Find the text boxes.
Compute the text boxes centers.
Associate the textboxes with shapes based on distance.
The code is as follows:
import cv2
from easyocr import Reader
import math
shape_number = 2
image = cv2.imread("./ueUco.jpg")
deep_copy = image.copy()
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(image_gray, 150, 255, cv2.THRESH_BINARY)
thresh = 255 - thresh
shapes, hierarchy = cv2.findContours(image=thresh, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(image=deep_copy, contours=shapes, contourIdx=-1, color=(0, 255, 0), thickness=2, lineType=cv2.LINE_AA)
shape_centers = []
for shape in shapes:
row = int((shape[0][0][0] + shape[3][0][0])/2)
column = int((shape[3][0][1] + shape[2][0][1])/2)
center = (row, column, shape)
shape_centers.append(center)
# cv2.imshow('Shapes', deep_copy)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
languages = ['en']
reader = Reader(languages, gpu = True)
results = reader.readtext(image)
def cleanup_text(text):
return "".join([c if ord(c) < 128 else "" for c in text]).strip()
for (bbox, text, prob) in results:
text = cleanup_text(text)
(tl, tr, br, bl) = bbox
tl = (int(tl[0]), int(tl[1]))
tr = (int(tr[0]), int(tr[1]))
br = (int(br[0]), int(br[1]))
bl = (int(bl[0]), int(bl[1]))
column = int((tl[0] + tr[0])/2)
row = int((tr[1] + br[1])/2)
center = (row, column, bbox)
distances = []
for iteration, shape_center in enumerate(shape_centers):
shape_row = shape_center[0]
shape_column = shape_center[1]
dist = int(math.dist([column, row], [shape_row, shape_column]))
distances.append(dist)
min_value = min(distances)
min_index = distances.index(min_value)
if min_index == shape_number:
cv2.rectangle(image, tl, br, (0, 255, 0), 2)
cv2.putText(image, text, (tl[0], tl[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)
cv2.imshow("Image", image)
cv2.waitKey(0)
cv2.imwrite(f"image_{shape_number}.jpg", image)
cv2.destroyAllWindows()
The output looks like this.
Please note that this solution is almost complete. You just have to compute the text embodied in each shape and put it in your desired data structure.
Note: shape_number represents the shape that you want to consider.
There is another solution that I would like you to work on.
Find all the text boxes.
Compute the centers for text boxes.
Run k-means clustering on the centers.
I would prefer the second solution but for the time being, I implemented the first one.
Original Image
Click here for the image
For this, I am trying to detect the underlines first. But as the underlines might be tilted, this code:
import time
from google.colab.patches import cv2_imshow
from collections import OrderedDict
# Let's load a simple image with 3 black squares
image = cv2.imread("line_detected.png")
cv2.waitKey(0)
# Grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Find Canny edges
font = cv2.FONT_HERSHEY_COMPLEX
edged = cv2.Canny(gray, 30, 200)
cv2.waitKey(0)
# Finding Contours
# Use a copy of the image e.g. edged.copy()
# since findContours alters the image
contours, hierarchy = cv2.findContours(edged, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2_imshow(edged)
cv2.waitKey(0)
print("Number of Contours found = " + str(len(contours)))
# Draw all contours
# -1 signifies drawing all contours
# cv2.drawContours(image, contours, -1, (0, 255, 0), 3)
mask = np.ones(image.shape[:2], dtype="uint8") * 255
d=OrderedDict()
coords=[]
nuclei = []
l=[]
heading=[]
images=[]
lvalue=0
line=[]
h=[]
contours = contours[::-1]
for cnt in (contours):
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.04 * peri, True)
if (len(approx==2)):
x, y, w, h = cv2.boundingRect(cnt)
# print(h)
cv2.rectangle(img,(x, y), (x+w, y+h),(0, 0, 255), 2)
cv2_imshow(img)
is not able to detect the slanting underlines very properly. Also, I want this code to extend to detecting only the gray underlines. "minor differences" has a single underline as it is slanted/tilted, it reads it as two straight lines. Also, it is reading the images in the left which it should not read(tesseract giving weird outputs).
For the gray shade only I found this mask thing online:
lower_range = np.array([110,50,50])
upper_range = np.array([130,255,255])
mask = cv2.inRange(hsv, lower_range, upper_range)
But Don't know how to incorporate in code... I'm a beginner, any help is much appreciated!
I have a picture like this:
And then I transform it into binary image and use canny to detect edge of the picture:
gray = cv.cvtColor(image, cv.COLOR_RGB2GRAY)
edge = Image.fromarray(edges)
And then I get the result as:
I want to get the area of 2 like this:
My solution is to use HoughLines to find lines in the picture and calculate the area of triangle formed by lines. However, this way is not precise because the closed area is not a standard triangle. How to get the area of region 2?
A simple approach using floodFill and countNonZero could be the following code snippet. My standard quote on contourArea from the help:
The function computes a contour area. Similarly to moments, the area is computed using the Green formula. Thus, the returned area and the number of non-zero pixels, if you draw the contour using drawContours or fillPoly, can be different. Also, the function will most certainly give a wrong results for contours with self-intersections.
Code:
import cv2
import numpy as np
# Input image
img = cv2.imread('images/YMMEE.jpg', cv2.IMREAD_GRAYSCALE)
# Needed due to JPG artifacts
_, temp = cv2.threshold(img, 128, 255, cv2.THRESH_BINARY)
# Dilate to better detect contours
temp = cv2.dilate(temp, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
# Find largest contour
cnts, _ = cv2.findContours(temp, cv2.RETR_EXTERNAL , cv2.CHAIN_APPROX_NONE)
largestCnt = []
for cnt in cnts:
if (len(cnt) > len(largestCnt)):
largestCnt = cnt
# Determine center of area of largest contour
M = cv2.moments(largestCnt)
x = int(M["m10"] / M["m00"])
y = int(M["m01"] / M["m00"])
# Initiale mask for flood filling
width, height = temp.shape
mask = img2 = np.ones((width + 2, height + 2), np.uint8) * 255
mask[1:width, 1:height] = 0
# Generate intermediate image, draw largest contour, flood filled
temp = np.zeros(temp.shape, np.uint8)
temp = cv2.drawContours(temp, largestCnt, -1, 255, cv2.FILLED)
_, temp, mask, _ = cv2.floodFill(temp, mask, (x, y), 255)
temp = cv2.morphologyEx(temp, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
# Count pixels in desired region
area = cv2.countNonZero(temp)
# Put result on original image
img = cv2.putText(img, str(area), (x, y), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, 255)
cv2.imshow('Input', img)
cv2.imshow('Temp image', temp)
cv2.waitKey(0)
Temporary image:
Result image:
Caveat: findContours has some problems one the right side, where the line is very close to the bottom image border, resulting in possibly omitting some pixels.
Disclaimer: I'm new to Python in general, and specially to the Python API of OpenCV (C++ for the win). Comments, improvements, highlighting Python no-gos are highly welcome!
There is a very simple way to find this area, if you take some assumptions that are met in the example image:
The area to be found is bounded on top by a line
Any additional lines in the image are above the line of interest
There are no discontinuities in the line
In this case, the area of the region of interest is given by the sum of the lengths from the bottom of the image to the first set pixel. We can compute this with:
import numpy as np
import matplotlib.pyplot as pp
img = pp.imread('/home/cris/tmp/YMMEE.jpg')
img = np.flip(img, axis=0)
pos = np.argmax(img, axis=0)
area = np.sum(pos)
print('Area = %d\n'%area)
This prints Area = 22040.
np.argmax finds the first set pixel on each column of the image, returning the index. By first using np.flip, we flip this axis so that the first pixel is actually the one on the bottom. The index corresponds to the number of pixels between the bottom of the image and the line (not including the set pixel).
Thus, we're computing the area under the line. If you need to include the line itself in the area, add pos.shape[0] to the area (i.e. the number of columns).
I would like to create a program that is able to extract lines from a graph.
For example, if a graph like this is inputted, I would just want the red line to be outputted.
Below I have tried to do this using a hough line transformation, however, I do not get very promising results.
import cv2
import numpy as np
graph_img = cv2.imread("/Users/2020shatgiskessell/Desktop/Graph1.png")
gray = cv2.cvtColor(graph_img, cv2.COLOR_BGR2GRAY)
kernel_size = 5
#grayscale image
blur_gray = cv2.GaussianBlur(gray,(kernel_size, kernel_size),0)
#Canny edge detecion
edges = cv2.Canny(blur_gray, 50, 150)
#Hough Lines Transformation
#distance resoltion of hough grid (pixels)
rho = 1
#angular resolution of hough grid (radians)
theta = np.pi/180
#minimum number of votes
threshold = 15
#play around with these
min_line_length = 25
max_line_gap = 20
#make new image
line_image = np.copy(graph_img)
#returns array of lines
lines = cv2.HoughLinesP(edges, rho, theta, threshold, np.array([]),
min_line_length, max_line_gap)
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0),2)
lines_edges = cv2.addWeighted(graph_img, 0.8, line_image, 1, 0)
cv2.imshow("denoised image",edges)
if cv2.waitKey(0) & 0xff == 27:
cv2.destroyAllWindows()
This produces the output image below, which does not accurately recognize the graph line. How might I go about doing this?
Note: For now, I am not concerned about the graph titles or any other text.
I would also like the code to work for other graph images aswell, such as:
etc.
If the graph does not have many noises around it (like your example) I would suggest to threshold your image with Otsu threshold instead of looking for edges . Then you simply search the contours, select the biggest one (graph) and draw it on a blank mask. After that you can perform a bitwise operation on image with the mask and you will get a black image with the graph. If you like the white background better, then simply change all black pixels to white. Steps are written in the example. Hope it helps a bit. Cheers!
Example:
import numpy as np
import cv2
# Read the image and create a blank mask
img = cv2.imread('graph.png')
h,w = img.shape[:2]
mask = np.zeros((h,w), np.uint8)
# Transform to gray colorspace and threshold the image
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# Search for contours and select the biggest one and draw it on mask
_, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
cnt = max(contours, key=cv2.contourArea)
cv2.drawContours(mask, [cnt], 0, 255, -1)
# Perform a bitwise operation
res = cv2.bitwise_and(img, img, mask=mask)
# Convert black pixels back to white
black = np.where(res==0)
res[black[0], black[1], :] = [255, 255, 255]
# Display the image
cv2.imshow('img', res)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:
EDIT:
For noisier pictures you could try this code. Note that different graphs have different noises and may not work on every graph image since the denoisiation process would be specific in every case. For different noises you can use different ways to denoise it, for example histogram equalization, eroding, blurring etc. This code works well for all 3 graphs. Steps are written in comments. Hope it helps. Cheers!
import numpy as np
import cv2
# Read the image and create a blank mask
img = cv2.imread('graph.png')
h,w = img.shape[:2]
mask = np.zeros((h,w), np.uint8)
# Transform to gray colorspace and threshold the image
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# Perform opening on the thresholded image (erosion followed by dilation)
kernel = np.ones((2,2),np.uint8)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
# Search for contours and select the biggest one and draw it on mask
_, contours, hierarchy = cv2.findContours(opening,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
cnt = max(contours, key=cv2.contourArea)
cv2.drawContours(mask, [cnt], 0, 255, -1)
# Perform a bitwise operation
res = cv2.bitwise_and(img, img, mask=mask)
# Threshold the image again
gray = cv2.cvtColor(res,cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# Find all non white pixels
non_zero = cv2.findNonZero(thresh)
# Transform all other pixels in non_white to white
for i in range(0, len(non_zero)):
first_x = non_zero[i][0][0]
first_y = non_zero[i][0][1]
first = res[first_y, first_x]
res[first_y, first_x] = 255
# Display the image
cv2.imshow('img', res)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:
I'm a newbie to computer vision, and I'm trying to detect all the test strips in this image:
The result I'm trying to get:
I assume it should be very easy, because all the target objects are in rectangular shape and have a fixed aspect ratio. But I have no idea which algorithm or function should I use.
I've tried edge detection and the 2D feature detection example in OpenCV, but the result is not ideal. How should I detect these similar objects but with small differences?
Update:
The test strips can vary in colors, and of course, the shade of the result lines. But they all have the same references lines, as showing in the picture:
I don't know how should I describe these simple features for object detection, as most examples I found online are for complex objects like a building or a face.
The solution is not exact, but it provides a good starting point. You have to play with the parameters though. It would greatly help you if you partition the strips using some threshold method and then apply hough lines individually as #api55 mentioned.
Here are the results I got.
Code.
import cv2
import numpy as np
# read image
img = cv2.imread('KbxN6.jpg')
# filter it
img = cv2.GaussianBlur(img, (11, 11), 0)
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# get edges using laplacian
laplacian_val = cv2.Laplacian(gray_img, cv2.CV_32F)
# lap_img = np.zeros_like(laplacian_val, dtype=np.float32)
# cv2.normalize(laplacian_val, lap_img, 1, 255, cv2.NORM_MINMAX)
# cv2.imwrite('laplacian_val.jpg', lap_img)
# apply threshold to edges
ret, laplacian_th = cv2.threshold(laplacian_val, thresh=2, maxval=255, type=cv2.THRESH_BINARY)
# filter out salt and pepper noise
laplacian_med = cv2.medianBlur(laplacian_th, 5)
# cv2.imwrite('laplacian_blur.jpg', laplacian_med)
laplacian_fin = np.array(laplacian_med, dtype=np.uint8)
# get lines in the filtered laplacian using Hough lines
lines = cv2.HoughLines(laplacian_fin,1,np.pi/180,480)
for rho,theta in lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
# overlay line on original image
cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)
# cv2.imwrite('processed.jpg', img)
# cv2.imshow('Window', img)
# cv2.waitKey(0)
This is an alternative solution by using the function findCountours in combination with canny edge detection. The code is based very slightly on this tutorial
import cv2
import numpy as np
import imutils
image = cv2.imread('test.jpg')
resized = imutils.resize(image, width=300)
ratio = image.shape[0] / float(resized.shape[0])
# convert the resized image to grayscale, blur it slightly,
# and threshold it
gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(resized,100,200)
cv2.imshow('dsd2', edges)
cv2.waitKey(0)
cnts = cv2.findContours(edges.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
sd = ShapeDetector()
# loop over the contours
for c in cnts:
# compute the center of the contour, then detect the name of the
# shape using only the contour
M = cv2.moments(c)
cX = int((M["m10"] / M["m00"]) * ratio)
cY = int((M["m01"] / M["m00"]) * ratio)
# multiply the contour (x, y)-coordinates by the resize ratio,
# then draw the contours and the name of the shape on the image
c = c.astype("float")
c *= ratio
c = c.astype("int")
cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
#show the output image
#cv2.imshow("Image", image)
#cv2.waitKey(0)
cv2.imwrite("erg.jpg",image)
Result:
I guess it can be improved by tuning following parameters:
image resizing width
CHAIN_APPROX_NONE (findContour Docs)
It is maybe also usefull to filter small contours or merge contours which are close to each other.