How to read characters in the detected objects rectangle? - opencv

I was successful with tensorflow object detection API like an image below.
but now I want to read character inside green box, how to do that?

First you need to crop out the bounding boxes of plate(s) and then you can use tesseract. To get the text. A pseudo code for your problem may look like:
import cv2
import pytesseract
original_img = cv2.imread("/path/to/your/img.png")
for plate in detected_plates:
if plate.confidence > 0.98:
b_box = plate.bounding_rect # bounding box in [x, y, w, h]
img_cropped = original_img[b_box[1]: b_box[1] + b_box[3], b_box[0]: b_box[0] + b_box[2]]
print(pytesseract.image_to_string(img_cropped))

# Load image using OpenCV and
# expand image dimensions to have shape: [1, None, None, 3]
# i.e. a single-column array, where each item in the column has the pixel RGB value
image = cv2.imread(PATH_TO_IMAGE)
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image_expanded = np.expand_dims(image_rgb, axis=0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_expanded})
# Draw the results of the detection (aka 'visulaize the results')
vis_util.visualize_boxes_and_labels_on_image_array(
image,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=1,
min_score_thresh=0.60)
# All the results have been drawn on image. Now display the image.
cv2.imshow('Object detector', image)

Related

How Save & Reuse mask of panoptic segmentation ( detectron2 ) using opencv drawContours?

In detectron2 the output get the bellow output upon predection.
panoptic_seg, segments_info = predictor(im)["panoptic_seg"]
it can be drawn via draw_panoptic_seg_predictionsfunction, but we want to save the mask deatils & redraw using opencv how can we do that ?
for instance segmentation we can do like this :
import cv2
import numpy as np
# "outputs" is the inference output in the format described here - https://detectron2.readthedocs.io/tutorials/models.html#model-output-format
# Extract the contour of each predicted mask and save it in a list
contours = []
for pred_mask in outputs['instances'].pred_masks:
# pred_mask is of type torch.Tensor, and the values are boolean (True, False)
# Convert it to a 8-bit numpy array, which can then be used to find contours
mask = pred_mask.to("cpu").numpy().astype('uint8')
contour, _ = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
contours.append(contour[0]) # contour is a tuple (OpenCV 4.5.2), so take the first element which is the array of contour points
and then draw the mask with this code
# "image" is the original BGR image
image_with_overlaid_predictions = im.copy()
for contour in contours:
cv2.drawContours(image_with_overlaid_predictions, [contour], -1, (0,255,0), 20)
plt.figure()
plt.imshow(image_with_overlaid_predictions)
I want to achive similar with pantopic mask

Pixel per mm calculation of an image using camera calibration matrix and object distance is not same as pixel dimension of object in MS Paint

I want to calculate number of pixels per grid (i.e. pixels per 11 mm) of the checkerboard. I am doing this to validate that mm/pixel calculation I obtain using calibration matrix and formula (below) is same as what I will see when I open the image in MS Paint.
For the checkerboard image (1920x1080 resolution): 11 grid x 7 grid in size with each grid as 11x 11 mm, at a distance of 500 mm (picture below).
I compute a calibration matrix using code:
import cv2
import numpy as np
import pathlib
#from utils import *
import glob
from argparse import ArgumentParser
topview_image_path = 'checkerboard_top\*.png'
camera_orientation = 'topview'
if camera_orientation == 'topview':
image_path = topview_image_path
def calibrate_chessboard(folder):
# Defining the dimensions of checkerboard
CHECKERBOARD = (6,9)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# Creating vector to store vectors of 3D points for each checkerboard image
objpoints = []
# Creating vector to store vectors of 2D points for each checkerboard image
imgpoints = []
# Defining the world coordinates for 3D points
objp = np.zeros((1, CHECKERBOARD[0] * CHECKERBOARD[1], 3), np.float32)
objp[0,:,:2] = np.mgrid[0:CHECKERBOARD[0], 0:CHECKERBOARD[1]].T.reshape(-1, 2)
prev_img_shape = None
# Extracting path of individual image stored in a given directory
print("image path:", image_path)
images = glob.glob(image_path)
#images = glob.glob(f'{folder}/*.png')
# if len(images) == 0:
# images = glob.glob(f'{folder}/*.jpg')
# print(images)
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chess board corners
# If desired number of corners are found in the image then ret = true
ret, corners = cv2.findChessboardCorners(gray, CHECKERBOARD, cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_FAST_CHECK + cv2.CALIB_CB_NORMALIZE_IMAGE)
"""
If desired number of corner are detected,
we refine the pixel coordinates and display
them on the images of checker board
"""
if ret == True:
objpoints.append(objp)
# refining pixel coordinates for given 2d points.
corners2 = cv2.cornerSubPix(gray, corners, (11,11),(-1,-1), criteria)
imgpoints.append(corners2)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, CHECKERBOARD, corners2, ret)
cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
h,w = img.shape[:2]
"""
Performing camera calibration by
passing the value of known 3D points (objpoints)
and corresponding pixel coordinates of the
detected corners (imgpoints)
"""
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
return [ret, mtx, dist, rvecs, tvecs]
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--type', dest='type',type=str, default='topview',help='is the image topview or sideview?')
parser.add_argument('--folder', dest='folder',type=str, default='cal_images/checkerboard_topview',help='is the image topview or sideview?')
args = parser.parse_args()
WIDTH = 6
HEIGHT = 9
# Calibrate
ret, mtx, dist, rvecs, tvecs = calibrate_chessboard(args.folder)
print(mtx)
print(dist)
mtx_list = ["calibration matrix:\n", str(mtx),
"\ndistortion matrix:", str(dist)]
txt_file = "matrix_" + camera_orientation +".txt"
with open(txt_file, mode='wt', encoding='utf-8') as myfile:
myfile.write('\n'.join(mtx_list))
and get the calibration matrix as:
M = [[2.86276094e+03 0.00000000e+00 8.23315889e+02]
[0.00000000e+00 2.86846709e+03 5.80987675e+02]
[0.00000000e+00 0.00000000e+00 1.00000000e+00]]
This gives me the focal length (f) in pixel units (M[0][0]) i.e. 2862.
I then calculate size in pixels (X_sizepx) of the checkerboard grid 11 mm (X_sizemm) object at distance Z (in my case 500mm) using formula:
X_sizepx = (f/Z) * X_sizemm
Substituting all the values: f = 2862, Z =500, X_sizemm = 11, I get 62.94. So as per opencv calibration and the formula, 11mm should ~63 pixels.
I then open the checkerboard image in MS paint to see the pixel dimension of the square grid in pixels and it says 41 pixels (image below).
This is a big difference if I were to use camera calibration matrix and the the formula. Is there something I am doing wrong?
Note: Z can never be more than 530 mm if I were to assume that Z might be slightly off.

How do I measure text size to extract only certain letters from an image in OpenCV?

I am looking to extract text from a license plate. For now I have been using pytesseract with opencv to zero in on the relevant contours and pull out text. This works decently for non-American plates, but I am curious about applying this to American plates which come with a lot of little letters surrounding the big plate id ones. My thoughts were to use font size to filter out letters under a certain threshold. Is that the best approach?
below is code so far:
import cv2
import pytesseract
import imutils
#read image
image = cv2.imread('plateTest2.jpeg')
#RGB to Gray Scale converstion
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#noise removal
gray = cv2.bilateralFilter(gray,11,17,17)
#find edges of the grayscale image
edged = cv2.Canny(gray, 170,200)
#Find contours based on Edges
_,cnts, new = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
#Create copy of original image to draw all contours
img1 = image.copy()
cv2.drawContours(img1, cnts, -1, (0,255,0), 3)
#sort contours based on their area keeping minimum required area as '30' (anything smaller than this will not be considered)
cnts=sorted(cnts, key = cv2.contourArea, reverse = True)[:30]
NumberPlateCnt = None #we currently have no Number plate contour
#Top 30 Contours
img2 = image.copy()
cv2.drawContours(img2, cnts, -1, (0,255,0), 3)
idx='plateTest2.jpg'
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
# print ("approx = ",approx)
if len(approx) == 4: # Select the contour with 4 corners
NumberPlateCnt = approx #This is our approx Number Plate Contour
# Crop those contours and store it in Cropped Images folder
x, y, w, h = cv2.boundingRect(c) #This will find out co-ord for plate
new_img = gray[y:y + h, x:x + w] #Create new image
cv2.imwrite('/' + 'cropped_' + str(idx), new_img) #Store new image
#idx+=1
break
#Drawing the selected contour on the original image
#print(NumberPlateCnt)
cv2.drawContours(image, [NumberPlateCnt], -1, (0,255,0), 3)
Cropped_img_loc = '/' + 'cropped_' + str(idx)#'cropped_images/8.png'
#Use tesseract to covert image into string
text = pytesseract.image_to_string(Cropped_img_loc, lang='eng')
text = text.replace('.', '')
text = text.replace(' ','')
return text
Here is a picture of a plate that returns too much text where things like 'SUNSHINESTATE' show up:
Should I rely on pytesseract to identify font size and filter out smaller characters? Or should I be filtering before using contour size? Appreciate the help.

OpenCV - Extracting lines on a graph

I would like to create a program that is able to extract lines from a graph.
For example, if a graph like this is inputted, I would just want the red line to be outputted.
Below I have tried to do this using a hough line transformation, however, I do not get very promising results.
import cv2
import numpy as np
graph_img = cv2.imread("/Users/2020shatgiskessell/Desktop/Graph1.png")
gray = cv2.cvtColor(graph_img, cv2.COLOR_BGR2GRAY)
kernel_size = 5
#grayscale image
blur_gray = cv2.GaussianBlur(gray,(kernel_size, kernel_size),0)
#Canny edge detecion
edges = cv2.Canny(blur_gray, 50, 150)
#Hough Lines Transformation
#distance resoltion of hough grid (pixels)
rho = 1
#angular resolution of hough grid (radians)
theta = np.pi/180
#minimum number of votes
threshold = 15
#play around with these
min_line_length = 25
max_line_gap = 20
#make new image
line_image = np.copy(graph_img)
#returns array of lines
lines = cv2.HoughLinesP(edges, rho, theta, threshold, np.array([]),
min_line_length, max_line_gap)
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0),2)
lines_edges = cv2.addWeighted(graph_img, 0.8, line_image, 1, 0)
cv2.imshow("denoised image",edges)
if cv2.waitKey(0) & 0xff == 27:
cv2.destroyAllWindows()
This produces the output image below, which does not accurately recognize the graph line. How might I go about doing this?
Note: For now, I am not concerned about the graph titles or any other text.
I would also like the code to work for other graph images aswell, such as:
etc.
If the graph does not have many noises around it (like your example) I would suggest to threshold your image with Otsu threshold instead of looking for edges . Then you simply search the contours, select the biggest one (graph) and draw it on a blank mask. After that you can perform a bitwise operation on image with the mask and you will get a black image with the graph. If you like the white background better, then simply change all black pixels to white. Steps are written in the example. Hope it helps a bit. Cheers!
Example:
import numpy as np
import cv2
# Read the image and create a blank mask
img = cv2.imread('graph.png')
h,w = img.shape[:2]
mask = np.zeros((h,w), np.uint8)
# Transform to gray colorspace and threshold the image
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# Search for contours and select the biggest one and draw it on mask
_, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
cnt = max(contours, key=cv2.contourArea)
cv2.drawContours(mask, [cnt], 0, 255, -1)
# Perform a bitwise operation
res = cv2.bitwise_and(img, img, mask=mask)
# Convert black pixels back to white
black = np.where(res==0)
res[black[0], black[1], :] = [255, 255, 255]
# Display the image
cv2.imshow('img', res)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:
EDIT:
For noisier pictures you could try this code. Note that different graphs have different noises and may not work on every graph image since the denoisiation process would be specific in every case. For different noises you can use different ways to denoise it, for example histogram equalization, eroding, blurring etc. This code works well for all 3 graphs. Steps are written in comments. Hope it helps. Cheers!
import numpy as np
import cv2
# Read the image and create a blank mask
img = cv2.imread('graph.png')
h,w = img.shape[:2]
mask = np.zeros((h,w), np.uint8)
# Transform to gray colorspace and threshold the image
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# Perform opening on the thresholded image (erosion followed by dilation)
kernel = np.ones((2,2),np.uint8)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
# Search for contours and select the biggest one and draw it on mask
_, contours, hierarchy = cv2.findContours(opening,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
cnt = max(contours, key=cv2.contourArea)
cv2.drawContours(mask, [cnt], 0, 255, -1)
# Perform a bitwise operation
res = cv2.bitwise_and(img, img, mask=mask)
# Threshold the image again
gray = cv2.cvtColor(res,cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# Find all non white pixels
non_zero = cv2.findNonZero(thresh)
# Transform all other pixels in non_white to white
for i in range(0, len(non_zero)):
first_x = non_zero[i][0][0]
first_y = non_zero[i][0][1]
first = res[first_y, first_x]
res[first_y, first_x] = 255
# Display the image
cv2.imshow('img', res)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:

How to detect test strips with OpenCV?

I'm a newbie to computer vision, and I'm trying to detect all the test strips in this image:
The result I'm trying to get:
I assume it should be very easy, because all the target objects are in rectangular shape and have a fixed aspect ratio. But I have no idea which algorithm or function should I use.
I've tried edge detection and the 2D feature detection example in OpenCV, but the result is not ideal. How should I detect these similar objects but with small differences?
Update:
The test strips can vary in colors, and of course, the shade of the result lines. But they all have the same references lines, as showing in the picture:
I don't know how should I describe these simple features for object detection, as most examples I found online are for complex objects like a building or a face.
The solution is not exact, but it provides a good starting point. You have to play with the parameters though. It would greatly help you if you partition the strips using some threshold method and then apply hough lines individually as #api55 mentioned.
Here are the results I got.
Code.
import cv2
import numpy as np
# read image
img = cv2.imread('KbxN6.jpg')
# filter it
img = cv2.GaussianBlur(img, (11, 11), 0)
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# get edges using laplacian
laplacian_val = cv2.Laplacian(gray_img, cv2.CV_32F)
# lap_img = np.zeros_like(laplacian_val, dtype=np.float32)
# cv2.normalize(laplacian_val, lap_img, 1, 255, cv2.NORM_MINMAX)
# cv2.imwrite('laplacian_val.jpg', lap_img)
# apply threshold to edges
ret, laplacian_th = cv2.threshold(laplacian_val, thresh=2, maxval=255, type=cv2.THRESH_BINARY)
# filter out salt and pepper noise
laplacian_med = cv2.medianBlur(laplacian_th, 5)
# cv2.imwrite('laplacian_blur.jpg', laplacian_med)
laplacian_fin = np.array(laplacian_med, dtype=np.uint8)
# get lines in the filtered laplacian using Hough lines
lines = cv2.HoughLines(laplacian_fin,1,np.pi/180,480)
for rho,theta in lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
# overlay line on original image
cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)
# cv2.imwrite('processed.jpg', img)
# cv2.imshow('Window', img)
# cv2.waitKey(0)
This is an alternative solution by using the function findCountours in combination with canny edge detection. The code is based very slightly on this tutorial
import cv2
import numpy as np
import imutils
image = cv2.imread('test.jpg')
resized = imutils.resize(image, width=300)
ratio = image.shape[0] / float(resized.shape[0])
# convert the resized image to grayscale, blur it slightly,
# and threshold it
gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(resized,100,200)
cv2.imshow('dsd2', edges)
cv2.waitKey(0)
cnts = cv2.findContours(edges.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
sd = ShapeDetector()
# loop over the contours
for c in cnts:
# compute the center of the contour, then detect the name of the
# shape using only the contour
M = cv2.moments(c)
cX = int((M["m10"] / M["m00"]) * ratio)
cY = int((M["m01"] / M["m00"]) * ratio)
# multiply the contour (x, y)-coordinates by the resize ratio,
# then draw the contours and the name of the shape on the image
c = c.astype("float")
c *= ratio
c = c.astype("int")
cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
#show the output image
#cv2.imshow("Image", image)
#cv2.waitKey(0)
cv2.imwrite("erg.jpg",image)
Result:
I guess it can be improved by tuning following parameters:
image resizing width
CHAIN_APPROX_NONE (findContour Docs)
It is maybe also usefull to filter small contours or merge contours which are close to each other.

Resources