can't detect equal sign on image - opencv

I'm working on a computer vision project that understands mathematical signs. It detects all correctly except the "Equal" mark. But the "equal" sign is perceived as two separate "minus" signs.
image = cv2.imread('./deneme.png')
grey = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(grey.copy(), 0, 255, cv2.THRESH_BINARY_INV)
contours, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
preprocessed_digits = []
for c in contours:
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(image, (x,y), (x+w, y+h), color=(0, 255, 0), thickness=2)
digit = thresh[y:y+h, x:x+w]
digit = make_square(digit)
preprocessed_digits.append(digit)
plt.imshow(image, cmap="gray")
plt.show()
I have no idea how to fix this problem. How can I detect the equal sign?

Is that the result you're trying to achieve?
Since it seems that your math expressions are written horizontally, as #Micka points out, you have very strong priors on the relationship between the two different components of an equal sign, so there is a straight-forward (but hacky) way to detect when to minus signs are actually an equal:
import cv2
import matplotlib.pyplot as plt
import numpy as np
class Rect:
def __init__(self,
a,
b,
c,
d):
self.a=a
self.b=b
self.c=c
self.d=d
self.center=np.array([(a+c)/2, (b+d)/2])
def merge_rectangles(r_1, r_2):
a_m= min(r_1.a, r_2.a)
b_m= min(r_1.b, r_2.b)
c_M= max(r_1.c, r_2.c)
d_M= max(r_1.d, r_2.d)
return Rect(a_m, b_m, c_M, d_M)
def they_form_an_equal_sign(rect_1,
rect_2,
tol_w=10,#in pixels
tol_h=10):
#check if the bounding boxes approximately align
b0 = (np.abs(rect_1.a - rect_2.a) < tol_w ) and (np.abs(rect_1.c - rect_2.c) < tol_w)
#check if the bounding boxes have approximately the same height
h1 = np.abs(rect_1.d - rect_1.b)
h2 = np.abs(rect_2.d - rect_2.b)
b1 = np.abs(h1-h2) < tol_h
return b0 and b1
image = cv2.imread('/tmp/m.png')
grey = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(grey.copy(), 0, 255, cv2.THRESH_BINARY_INV)
contours, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
candidate_rectangles=[]
for c in contours:
x,y,w,h = cv2.boundingRect(c)
candidate_rectangles.append(Rect(x,y,x+w,y+h))
kept=np.ones(len(candidate_rectangles))
new_rectangles=[]
for i in range(len(candidate_rectangles)):
for j in range(i+1,len(candidate_rectangles)):
b=they_form_an_equal_sign(candidate_rectangles[i], candidate_rectangles[j])
if b:
new_rect=merge_rectangles(candidate_rectangles[i], candidate_rectangles[j])
new_rectangles.append(new_rect)
kept[i]=0
kept[j]=0
for i in range(len(kept)):
if kept[i]:
rect=candidate_rectangles[i]
cv2.rectangle(image, (rect.a, rect.b), (rect.c, rect.d), color=(0, 255, 0), thickness=2)
for rect in new_rectangles:
cv2.rectangle(image, (rect.a, rect.b), (rect.c, rect.d), color=(0, 255, 0), thickness=2)
plt.imshow(image, cmap="gray")
plt.show()
Basically, this takes the brute-force approach of comparing every two bounding boxes that have been detected in your code, and merging them into a larger one if they meet the requirements from your prior: i.e. if they are horizontally algined (as the top and bottom parts of a minus sign should) and if their height is approximately the same.
However, this is obviously not robust: you need to adjust thresholds and the entire thing will fall apart if your expressions aren't horizontal and clearly separated. If you want a more robust/useful system, basic ML approaches for character recognition are a better place to start.

Related

How to calculate diameter at multiple points of an object using image pixels?

I am trying to get diameters on different points of a cylinder over a certain length using computer vision to replace the use of optical micrometer.
Image of a cylinder:
How can I calculate the diameter of this object (cylinder) on multiple points (blue lines) along its length as shown in the image using OpenCV python?
An OpenCV solution. The main idea is to:
Detect edges
Find the contours of the edges
Fill in the contour areas
Go through each column in the image and count the nonzero pixels
1., 2. and 3. could possibly be simplified by a single thresholding step depending on your use case
import numpy as np
import cv2
src = cv2.imread('/path/to/src.jpg')
mask = np.zeros(src.shape, dtype=np.uint8)
w, h, c = src.shape
# edge detection
threshold = 100
gray = cv2.Canny(src, threshold, threshold * 2)
cv2.imshow('', gray)
cv2.waitKey(0)
# find contours
cnts = cv2.findContours(gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
threshold_area = 0.5
# fill area withing contours with white color
for c in cnts:
area = cv2.contourArea(c)
if area > threshold_area:
cv2.drawContours(mask, [c], -1, (255, 255, 255), -1)
cv2.imshow('', mask)
cv2.waitKey(0)
# get non zero values (height) of each column
column_pixels = [cv2.countNonZero(mask[:, i]) for i in range(0, w)]
print(column_pixels)
Src image:
Canny result:
After filling in contours with white color:
countNonZero is applied on this last image for each column

Adjusting pytesseract parameters

Note: I am migrating this question from Data Science Stack Exchange, where it received little exposure.
I am trying to implement an OCR solution to identify the numbers read from the picture of a screen.
I am adapting this pyimagesearch tutorial to my problem.
Because I am dealing with a dark background, I first invert the image, before converting it to grayscale and thresholding it:
inverted_cropped_image = cv2.bitwise_not(cropped_image)
gray = get_grayscale(inverted_cropped_image)
thresholded_image = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY)[1]
Then I call pytesseract's image_to_data function to output a dictionary containing the different text regions and their confidence intervals:
from pytesseract import Output
results = pytesseract.image_to_data(thresholded_image, output_type=Output.DICT)
Finally I iterate over results and plot them when their confidence exceeds a user defined threshold (70%). What bothers me, is that my script identifies everything in the image except the number that I would like to recognize (1227.938).
My first guess is that the image_to_data parameters are not set properly.
Checking this website, I selected a page segmentation mode (psm) of 11 (sparse text) and tried whitelisting numbers only (tessedit_char_whitelist=0123456789m.'):
results = pytesseract.image_to_data(thresholded_image, config='--psm 11 --oem 3 -c tessedit_char_whitelist=0123456789m.', output_type=Output.DICT)
Alas, this is even worse, and the script now identifies nothing at all!
Do you have any suggestion? Am I missing something obvious here?
EDIT #1:
At Ann Zen's request, here's the code used to obtain the first image:
import imutils
import cv2
import matplotlib.pyplot as plt
import numpy as np
import pytesseract
from pytesseract import Output
def get_grayscale(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
filename = "IMAGE.JPG"
cropped_image = cv2.imread(filename)
inverted_cropped_image = cv2.bitwise_not(cropped_image)
gray = get_grayscale(inverted_cropped_image)
thresholded_image = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY)[1]
results = pytesseract.image_to_data(thresholded_image, config='--psm 11 --oem 3 -c tessedit_char_whitelist=0123456789m.', output_type=Output.DICT)
color = (255, 255, 255)
for i in range(0, len(results["text"])):
x = results["left"][i]
y = results["top"][i]
w = results["width"][i]
h = results["height"][i]
text = results["text"][i]
conf = int(results["conf"][i])
print("Confidence: {}".format(conf))
if conf > 70:
print("Confidence: {}".format(conf))
print("Text: {}".format(text))
print("")
text = "".join([c if ord(c) < 128 else "" for c in text]).strip()
cv2.rectangle(cropped_image, (x, y), (x + w, y + h), color, 2)
cv2.putText(cropped_image, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX,1.2, color, 3)
cv2.imshow('Image', cropped_image)
cv2.waitKey(0)
EDIT #2:
Rarely have I spent reputation points so well! All three replies posted so far helped me refine my algorithm.
First, I wrote a Tkinter program allowing me to manually crop the image around the number of interest (modifying the one found in this SO post)
Then I used Ann Zen's idea of narrowing down the search area around the fractional part. I am using her nifty process function to prepare my grayscale image for contour extraction: contours, _ = cv2.findContours(process(img_gray), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE). I am using RETR_EXTERNAL to avoid dealing with overlapping bounding rectangles.
I then sorted my contours from left to right. Bounding rectangles exceeding a user-defined threshold are associated with the integral part (white rectangles); otherwise they are associated with the fractional part (black rectangles).
I then extracted the characters using Esraa's approach i.e. applying a Gaussian blur prior to calling Tesseract. I used a much larger kernel (15x15 vs 3x3) to achieve this.
I am not out of the woods yet, but hopefully I will get better results by using Ahx's adaptive thresholding.
The Concept
As you have probably heard, pytesseract is not good at detecting text of different sizes on the same line as one piece of text. In your case, you want to detect the 1227.938, where the 1227 is much larger than the .938.
One way to go about solving this is to have the program estimate where the .938 is, and enlarge that part of the image. After that, pytesseract will have no problem in returning the text.
The Code
import cv2
import numpy as np
import pytesseract
def process(img):
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(img_gray, 200, 255, cv2.THRESH_BINARY)
img_canny = cv2.Canny(thresh, 100, 100)
kernel = np.ones((3, 3))
img_dilate = cv2.dilate(img_canny, kernel, iterations=2)
return cv2.erode(img_dilate, kernel, iterations=2)
img = cv2.imread("image.png")
img_copy = img.copy()
hh = 50
contours, _ = cv2.findContours(process(img), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
for cnt in contours:
if 20 * hh < cv2.contourArea(cnt) < 30 * hh:
x, y, w, h = cv2.boundingRect(cnt)
ww = int(hh / h * w)
src_seg = img[y: y + h, x: x + w]
dst_seg = img_copy[y: y + hh, x: x + ww]
h_seg, w_seg = dst_seg.shape[:2]
dst_seg[:] = cv2.resize(src_seg, (ww, hh))[:h_seg, :w_seg]
gray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray, 180, 255, cv2.THRESH_BINARY)
results = pytesseract.image_to_data(thresh)
for b in map(str.split, results.splitlines()[1:]):
if len(b) == 12:
x, y, w, h = map(int, b[6: 10])
cv2.putText(img, b[11], (x, y + h + 15), cv2.FONT_HERSHEY_COMPLEX, 0.6, 0)
cv2.imshow("Result", img)
cv2.waitKey(0)
The Output
Here is the input image:
And here is the output image:
As you have said in your post, the only part you need the the decimal 1227.938. If you want to filter out the rest of the detected text, you can try tweaking some parameters. For example, replacing the 180 from _, thresh = cv2.threshold(gray, 180, 255, cv2.THRESH_BINARY) with 230 will result in the output image:
The Explanation
Import the necessary libraries:
import cv2
import numpy as np
import pytesseract
Define a function, process(), that will take in an image array, and return a binary image array that is the processed version of the image that will allow proper contour detection:
def process(img):
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(img_gray, 200, 255, cv2.THRESH_BINARY)
img_canny = cv2.Canny(thresh, 100, 100)
kernel = np.ones((3, 3))
img_dilate = cv2.dilate(img_canny, kernel, iterations=2)
return cv2.erode(img_dilate, kernel, iterations=2)
I'm sure that you don't have to do this, but due to a problem in my environment, I have to add pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe' before I can call the pytesseract.image_to_data() method, or it throws an error:
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
Read in the original image, make a copy of it, and define the rough height of the large part of the decimal:
img = cv2.imread("image.png")
img_copy = img.copy()
hh = 50
Detect the contours of the processed version of the image, and add a filter that roughly filters out the contours so that the small text remains:
contours, _ = cv2.findContours(process(img), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
for cnt in contours:
if 20 * hh < cv2.contourArea(cnt) < 30 * hh:
Define the bounding box of each contour that didn't get filtered out, and use the properties to enlarge those parts of the image to the height defined for the large text (making sure to also scale the width accordingly):
x, y, w, h = cv2.boundingRect(cnt)
ww = int(hh / h * w)
src_seg = img[y: y + h, x: x + w]
dst_seg = img_copy[y: y + hh, x: x + ww]
h_seg, w_seg = dst_seg.shape[:2]
dst_seg[:] = cv2.resize(src_seg, (ww, hh))[:h_seg, :w_seg]
Finally, we can use the pytesseract.image_to_data() method to detect the text. Of course, we'll need to threshold the image again:
gray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray, 180, 255, cv2.THRESH_BINARY)
results = pytesseract.image_to_data(thresh)
for b in map(str.split, results.splitlines()[1:]):
if len(b) == 12:
x, y, w, h = map(int, b[6: 10])
cv2.putText(img, b[11], (x, y + h + 15), cv2.FONT_HERSHEY_COMPLEX, 0.6, 0)
cv2.imshow("Result", img)
cv2.waitKey(0)
I have been working with Tesseract for quite some time, so let me clarify something for you. Tesseract is extremely helpful if you're trying to recognize text in documents more than any other computer vision projects. It usually needs a binarized image to get a good output. Therefore, you will always need some image pre-processing.
However, after several trials in the past with all page segmentation modes, I realized that it fails when font size differs on the same line without having a space. Sometimes PSM 6 is helpful if the difference is low, but in your condition, you may try an alternative. If you don't care about the decimals, you may try the following solution:
img = cv2.imread(r'E:\Downloads\Iwzrg.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_blur = cv2.GaussianBlur(gray, (3,3),0)
_,thresh = cv2.threshold(img_blur,200,255,cv2.THRESH_BINARY_INV)
# If using a fixed camera
new_img = thresh[0:100, 80:320]
text = pytesseract.image_to_string(new_img, lang='eng', config='--psm 6 --oem 3 -c tessedit_char_whitelist=0123456789')
OUTPUT: 1227
I would like to recommend applying another image processing method.
Because I am dealing with a dark background, I first invert the image, before converting it to grayscale and thresholding it:
You applied global thresholding and couldn't achieve the desired result.
Then you can apply either adaptive-thresholding or inRange
For the given image, if we apply the inRange threshold:
To be able to recognize the image as accurately as possible we can add a border to the top of the image and resize the image (Optional)
In the OCR section, check if the detected region contains a digit
if text.isdigit():
Then display on the image:
The result is nearly the desired value. Now you can try with the other suggested methods to find the exact value.
The problem is .938 recognized as 235, maybe resizing using different values might improve the result.
Code:
from cv2 import imread, cvtColor, COLOR_BGR2HSV as HSV, inRange, getStructuringElement, resize
from cv2 import imshow, waitKey, MORPH_RECT, dilate, bitwise_and, rectangle, putText
from cv2 import copyMakeBorder as addBorder, BORDER_CONSTANT as CONSTANT, FONT_HERSHEY_SIMPLEX
from numpy import array
from pytesseract import image_to_data, Output
bgr = imread("Iwzrg.png")
resized = resize(bgr, (800, 600), fx=0.75, fy=0.75)
bordered = addBorder(resized, 200, 0, 0, 0, CONSTANT, value=0)
hsv = cvtColor(bordered, HSV)
mask = inRange(hsv, array([0, 0, 250]), array([179, 255, 255]))
kernel = getStructuringElement(MORPH_RECT, (50, 30))
dilated = dilate(mask, kernel, iterations=1)
thresh = 255 - bitwise_and(dilated, mask)
data = image_to_data(thresh, output_type=Output.DICT)
for i in range(0, len(data["text"])):
x = data["left"][i]
y = data["top"][i]
w = data["width"][i]
h = data["height"][i]
text = data["text"][i]
if text.isdigit():
print("Text: {}".format(text))
print("")
text = "".join([c if ord(c) < 128 else "" for c in text]).strip()
rectangle(thresh, (x, y), (x + w, y + h), (0, 255, 0), 2)
putText(thresh, text, (x, y - 10), FONT_HERSHEY_SIMPLEX, 1.2, (0, 0, 255), 3)
imshow("", thresh)
waitKey(0)

Image Processing: Mapping a scanned image on a template image with many identical features

Problem description
We are trying to match a scanned image onto a template image:
Example of a scanned image:
Example of a template image:
The template image contains a collection of hearts varying in size and contour properties (closed, open left and open right). Each heart in the template is a Region of Interest for which we know the location, size, and contour type. Our goal is to match a scanned onto the template so that we can extract these ROIs in the scanned image. In the scanned image, some of these hearts are crossed, and they will be presented to a classifier that decides if they are crossed or not.
Our approach
Following a tutorial on PyImageSearch, we have attempted to use ORB to find matching keypoints (code included below). This should allow us to compute a perspective transform matrix that maps the scanned image on the template image.
We have tried some preprocessing steps such as thresholding and/or blurring the scanned image. We have also tried to increase the maximum number of features as much as possible.
The problem
The method fails to work for our image set. This can be seen in the following image:
It appears that a lot of keypoints are mapped to the wrong part of the template image, so the transform matrix is not calculated correctly.
Is ORB the right technique to use here, or are there parameters of the algorithm that could be fine-tuned to improve performance? It feels like we are missing out on something simple that should make it work, but we really don't know how to go forward with this approach :).
We are trying out an alternative technique where we cross-correlate the scan with individual heart shapes. This should give an image with peaks at the heart locations. By drawing a bounding box around these peaks we hope to map that bounding box on the bounding box of the template (I can elaborat on this upon request)
Any suggestions are greatly appreciated!
import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np
# Preprocessing parameters
THRESHOLD = True
BLUR = False
# ORB parameters
MAX_FEATURES = 4048
KEEP_PERCENT = .01
SHOW_DEBUG = True
# Convert both the input image and template to grayscale
scan_file = r'scan.jpg'
template_file = r'template.jpg'
scan = cv.imread(scan_file)
template = cv.imread(template_file)
scan_gray = cv.cvtColor(scan, cv.COLOR_BGR2GRAY)
template_gray = cv.cvtColor(template, cv.COLOR_BGR2GRAY)
if THRESHOLD:
_, scan_gray = cv.threshold(scan_gray, 127, 255, cv.THRESH_BINARY)
_, template_gray = cv.threshold(template_gray, 127, 255, cv.THRESH_BINARY)
if BLUR:
scan_gray = cv.blur(scan_gray, (5, 5))
template_gray = cv.blur(template_gray, (5, 5))
# Use ORB to detect keypoints and extract (binary) local invariant features
orb = cv.ORB_create(MAX_FEATURES)
(kps_template, desc_template) = orb.detectAndCompute(template_gray, None)
(kps_scan, desc_scan) = orb.detectAndCompute(scan_gray, None)
# Match the features
#method = cv.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING
#matcher = cv.DescriptorMatcher_create(method)
#matches = matcher.match(desc_scan, desc_template)
bf = cv.BFMatcher(cv.NORM_HAMMING)
matches = bf.match(desc_scan, desc_template)
# Sort the matches by their distances
matches = sorted(matches, key = lambda x : x.distance)
# Keep only the top matches
keep = int(len(matches) * KEEP_PERCENT)
matches = matches[:keep]
if SHOW_DEBUG:
matched_visualization = cv.drawMatches(scan, kps_scan, template, kps_template, matches, None)
plt.imshow(matched_visualization)
Based on the clarifications provided by #it_guy, I have attempted to find all the crossed hearts using just the scanned image. I would have to try the algorithm on more images to check whether this approach will generalize or not.
Binarize the scanned image.
gray_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray_image, 180, 255, cv2.THRESH_BINARY_INV)
Perform dilation to close small gaps in the outline of the hearts, and the curves representing crosses. Note - The structuring element np.ones((1,2), np.uint8 can be changed by running the algorithm through multiple images and finding the most suitable structuring element.
closing_original = cv2.morphologyEx(original_binary, cv2.MORPH_DILATE, np.ones((1,2), np.uint8)).
Find all the contours in the image. The contours include all hearts and the triangle at the bottom. We eliminate other contours like dots by placing constraints on the height and width of contours to filter them. Further, we also use contour hierachies to eliminate inner contours in cross hearts.
contours_original, hierarchy_original = cv2.findContours(closing_original, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
We iterate through each of the filtered contours.
Contour with normal heart -
Contour with crossed heart -
Let us observe the difference between these two types of hearts. If we look at the transition from white-to-black pixel and black-to-white pixel ( from top to bottom ) inside the normal heart, we see that for majority of the image columns the number of such transitions are 4. ( Top border - 2 transitions, bottom border - 2 transitions )
white-to-black pixel - (255, 255, 0, 0, 0)
black-to-white pixel - (0, 0, 255, 255, 255)
But, in the case of the crossed heart, the number of transitions for majority of the columns must be 6, since the crossed curve / line adds two more transitions inside the heart (black-to-white first, then white-to-black). Hence, among all image columns which have greater than or equal to 4 such transitions, if more than 40% of the columns have 6 transitions, then the given contour represents a crossed contour. Result -
Code -
import cv2
import numpy as np
def convert_to_binary(rgb_image):
gray_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray_image, 180, 255, cv2.THRESH_BINARY_INV)
return gray_image, thresh
original = cv2.imread('original.jpg')
height, width = original.shape[:2]
original_gray, original_binary = convert_to_binary(original) # Get binary image
cv2.imwrite("binary.jpg", original_binary)
closing_original = cv2.morphologyEx(original_binary, cv2.MORPH_DILATE, np.ones((1,2), np.uint8)) # Close small gaps in the binary image
cv2.imwrite("closed.jpg", closing_original)
contours_original, hierarchy_original = cv2.findContours(closing_original, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE) # Get all the contours
bounding_rects_original = [cv2.boundingRect(c) for c in contours_original] # Get all contour bounding boxes
orig_boxes = list()
all_contour_image = original.copy()
for i, (x, y, w, h) in enumerate(bounding_rects_original):
if h > height / 2 or w > width / 2: # Eliminate extremely large contours
continue
if h < w / 2 or w < h / 2: # Eliminate vertical / horuzontal lines
continue
if w * h < 200: # Eliminate small area contours
continue
if hierarchy_original[0][i][3] != -1: # Eliminate contours created by heart crosses
continue
orig_boxes.append((x, y, w, h))
cv2.rectangle(all_contour_image, (x,y), (x + w, y + h), (0, 255, 0), 3)
# cv2.imshow("warped", closing_original)
cv2.imwrite("all_contours.jpg", all_contour_image)
final_image = original.copy()
for x, y, w, h in orig_boxes:
cropped_image = closing_original[y - 2 :y + h + 2, x: x + w] # Get the heart binary image
col_pixel_diffs = np.abs(np.diff(cropped_image.T.astype(np.int16))/255) # Obtain all consecutive pixel differences in all the columns
column_sums = np.sum(col_pixel_diffs, axis=1) # Get the sum of each column's transitions. This results in an array of size equal
# to the number of columns, each element representing the number of black-white and white-black transitions.
percent_crosses = np.sum(column_sums >= 6)/ np.sum(column_sums >= 4) # Percentage of columns with 6 transitions among columns with 4 transitions
if percent_crosses > 0.4: # Crossed heart criterion
cv2.rectangle(final_image, (x,y), (x + w, y + h), (0, 255, 0), 3)
cv2.imwrite("crossed_heart.jpg", cropped_image)
else:
cv2.imwrite("normal_heart.jpg", cropped_image)
cv2.imwrite("all_crossed_hearts.jpg", final_image)
This approach can be tested on more images to find its accuracy.

issue of the recognize people by their clothes color with not severe illumination environments

I am interested in the human following using a real robot.
I'd like to use the color of clothes as a key feature to identify the target person in front of the robot to follow him/ her but I am suffering due to it is a weak feature with a very simple illumination changing. So, I need to alter this algorithm to another or update values (RGB) online in real-time but I don't have enough experience with image processing.
this is my full code for color detection:
import cv2
import numpy as np
from imutils.video import FPS
# capturing video through webcam
import time
cap = cv2.VideoCapture(0)
width = cap.get(3) # float
height = cap.get(4) # float
print width, height
time.sleep(2.0)
fps = FPS().start()
while (1):
_, img = cap.read()
if _ is True:
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
else:
continue
# blue color
blue_lower = np.array([99, 115, 150], np.uint8)
blue_upper = np.array([110, 255, 255], np.uint8)
blue = cv2.inRange(hsv, blue_lower, blue_upper)
kernal = np.ones((5, 5), "uint8")
blue = cv2.dilate(blue, kernal)
res_blue = cv2.bitwise_and(img, img, mask=blue)
# Tracking blue
(_, contours, hierarchy) = cv2.findContours(blue, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for pic, contour in enumerate(contours):
area = cv2.contourArea(contour)
if (area > 300):
x, y, w, h = cv2.boundingRect(contour)
img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
cv2.putText(img, "Blue Colour", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0))
cv2.imshow("Color Tracking", img)
if cv2.waitKey(10) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
fps.update()
# stop the timer and display FPS information
fps.stop()
# print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
# print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
these are the outputs:
1- recognize a person by his clothes color
2- it is lost, the illumination changing is a very simple not severe
Any ideas or suggestions will be appreciated
It does look like you need to use a bit more advanced color similarity function to handle complex cases. Delta E will be the right starting point.
Proper threshold or several colors with associated thresholds will help to achieve pretty accurate results:
See the list of colours on the right side
Complete example.

Detecting circles in OpenCV

Though I realize that there is no "one size fits all" setting for OpenCV's HoughCircles, I'm having quite a bit of trouble finding even one reasonable set of parameters.
My input image is the following photo, which contains some pretty obvious big black circles, as well as some noise around it:
I tried playing with the p1 and p2 arguments, to try and get precisely the four black circles detected (and optionally the tape roll at the top -- that's not required but I wouldn't mind if it matched either).
import numpy as np
import cv2
gray = frame = cv2.imread('testframe2.png')
gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray,(5,5),0)
# gray = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 5, 2)
p1 = 200
p2 = 55
while True:
out = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 10, param1=p1, param2=p2, minRadius=10, maxRadius=0)
if circles is not None:
for (x, y, r) in circles[0]:
cv2.rectangle(out, (int(x - r), int(y - r)), (int(x + r), int(y + r)), (255, 0, 0))
cv2.putText(out, "r = %d" % int(r), (int(x + r), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 0, 0))
cv2.putText(out, "p: (%d, %d)" % (p1, p2), (0, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 4)
cv2.imshow('debug', out)
if cv2.waitKey(0) & 0xFF == ord('x'):
break
elif cv2.waitKey(0) & 0xFF == ord('q'):
p1 += 5
elif cv2.waitKey(0) & 0xFF == ord('a'):
p1 -= 5
elif cv2.waitKey(0) & 0xFF == ord('w'):
p2 += 5
elif cv2.waitKey(0) & 0xFF == ord('s'):
p2 -= 5
cv2.destroyAllWindows()
It seems the best I can do is detect the big circle several times but not the small one at all, or get a lot of false positives:
I've Read The F** Manual but it does not help me further: how do I somewhat reliably detect the circles and nothing but the circles in this image?
There was a bit of manual tweaking with the HoughCircles params, but this gives the result you're looking for. I've used the OpenCV Wrapper library which just simplifies some things.
import cv2
import opencv_wrapper as cvw
import numpy as np
frame = cv2.imread("tape.png")
gray = cvw.bgr2gray(frame)
thresh = cvw.threshold_otsu(gray, inverse=True)
opened = cvw.morph_open(thresh, 9)
circles = cv2.HoughCircles(
opened, cv2.HOUGH_GRADIENT, 1, 10, param1=100, param2=17, minRadius=5, maxRadius=-1
)
if circles is not None:
circles = np.around(circles).astype(int)
for circle in circles[0]:
cv2.floodFill(thresh, None, (circle[0], circle[1]), 155)
only_circles = thresh.copy()
only_circles[only_circles != 155] = 0
contours = cvw.find_external_contours(only_circles)
cvw.draw_contours(frame, contours, (255, 0, 255), thickness=2)
cv2.imwrite("tape_result.png", frame)
I used HoughCircles to find just the centers, as suggested in the documentation note.
I then used floodFill to fill the circles. Note that the left-most circle is very close to the edge. If the image was blurred, the flood filling would go into the background.
Disclosure: I'm the author of OpenCV Wrapper. Haven't added Hough Circles and flood filling yet.

Resources