How to detect test strips with OpenCV? - opencv

I'm a newbie to computer vision, and I'm trying to detect all the test strips in this image:
The result I'm trying to get:
I assume it should be very easy, because all the target objects are in rectangular shape and have a fixed aspect ratio. But I have no idea which algorithm or function should I use.
I've tried edge detection and the 2D feature detection example in OpenCV, but the result is not ideal. How should I detect these similar objects but with small differences?
Update:
The test strips can vary in colors, and of course, the shade of the result lines. But they all have the same references lines, as showing in the picture:
I don't know how should I describe these simple features for object detection, as most examples I found online are for complex objects like a building or a face.

The solution is not exact, but it provides a good starting point. You have to play with the parameters though. It would greatly help you if you partition the strips using some threshold method and then apply hough lines individually as #api55 mentioned.
Here are the results I got.
Code.
import cv2
import numpy as np
# read image
img = cv2.imread('KbxN6.jpg')
# filter it
img = cv2.GaussianBlur(img, (11, 11), 0)
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# get edges using laplacian
laplacian_val = cv2.Laplacian(gray_img, cv2.CV_32F)
# lap_img = np.zeros_like(laplacian_val, dtype=np.float32)
# cv2.normalize(laplacian_val, lap_img, 1, 255, cv2.NORM_MINMAX)
# cv2.imwrite('laplacian_val.jpg', lap_img)
# apply threshold to edges
ret, laplacian_th = cv2.threshold(laplacian_val, thresh=2, maxval=255, type=cv2.THRESH_BINARY)
# filter out salt and pepper noise
laplacian_med = cv2.medianBlur(laplacian_th, 5)
# cv2.imwrite('laplacian_blur.jpg', laplacian_med)
laplacian_fin = np.array(laplacian_med, dtype=np.uint8)
# get lines in the filtered laplacian using Hough lines
lines = cv2.HoughLines(laplacian_fin,1,np.pi/180,480)
for rho,theta in lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
# overlay line on original image
cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)
# cv2.imwrite('processed.jpg', img)
# cv2.imshow('Window', img)
# cv2.waitKey(0)

This is an alternative solution by using the function findCountours in combination with canny edge detection. The code is based very slightly on this tutorial
import cv2
import numpy as np
import imutils
image = cv2.imread('test.jpg')
resized = imutils.resize(image, width=300)
ratio = image.shape[0] / float(resized.shape[0])
# convert the resized image to grayscale, blur it slightly,
# and threshold it
gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(resized,100,200)
cv2.imshow('dsd2', edges)
cv2.waitKey(0)
cnts = cv2.findContours(edges.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
sd = ShapeDetector()
# loop over the contours
for c in cnts:
# compute the center of the contour, then detect the name of the
# shape using only the contour
M = cv2.moments(c)
cX = int((M["m10"] / M["m00"]) * ratio)
cY = int((M["m01"] / M["m00"]) * ratio)
# multiply the contour (x, y)-coordinates by the resize ratio,
# then draw the contours and the name of the shape on the image
c = c.astype("float")
c *= ratio
c = c.astype("int")
cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
#show the output image
#cv2.imshow("Image", image)
#cv2.waitKey(0)
cv2.imwrite("erg.jpg",image)
Result:
I guess it can be improved by tuning following parameters:
image resizing width
CHAIN_APPROX_NONE (findContour Docs)
It is maybe also usefull to filter small contours or merge contours which are close to each other.

Related

Camera calibration with OpenCV-python for autonomous car doesn't work well

Problems
doesn't work well
When I use my code with my Image, it doesn't work well.
I only edited 'wc' and 'hc' from OpenCV DOC
import glob
import cv2 as cv
import numpy as np
wc = 7
hc = 4
# termination criteria
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((wc * hc, 3), np.float32)
objp[:, :2] = np.mgrid[0:hc, 0:wc].T.reshape(-1, 2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
images = glob. Glob('1.jpg')
for fname in images:
img = cv.imread(fname)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv.findChessboardCorners(gray, (hc, wc), None)
# If found, add object points, image points (after refining them)
print(ret, wc, hc)
if True:
objpoints.append(objp)
corners2 = cv.cornerSubPix(gray, corners, (20, 20), (-1, -1),
criteria) # image, corners, winSize, zeroZone, criteria
imgpoints.append(corners2)
# Draw and display the corners
cv.drawChessboardCorners(img, (hc, wc), corners2, ret)
cv.imwrite('ChessboardCorners.png', img)
cv.waitKey(0)
ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
img = cv.imread('1.jpg')
print(img.shape[:2])
h, w = img.shape[:2]
newcameramtx, roi = cv.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))
# undistort
dst = cv.undistort(img, mtx, dist, None, newcameramtx)
# crop the image
x, y, w, h = roi
dst = dst[y:y + h, x:x + w]
cv.imwrite('calibresult.png', dst)
cv.waitKey(0)
mean_error = 0
for i in range(len(objpoints)):
imgpoints2, _ = cv.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
error = cv.norm(imgpoints[i], imgpoints2, cv.NORM_L2) / len(imgpoints2)
mean_error += error
print("total error: {}".format(mean_error / len(objpoints)))
print("\n\n", fname, "claer")
cv.destroyAllWindows()
exit(0)
original image - not well
ChessboardCorners - (I'm not sure that this is not well)
calibresult image - not well
works well with other images
But, when I use my code with the Image which was in the example in OpenCV DOC, it works well.
import glob
import cv2 as cv
import numpy as np
wc = 6
hc = 7
# termination criteria
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((wc * hc, 3), np.float32)
objp[:, :2] = np.mgrid[0:hc, 0:wc].T.reshape(-1, 2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
images = glob. Glob('img.png')
for fname in images:
img = cv.imread(fname)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv.findChessboardCorners(gray, (hc, wc), None)
# If found, add object points, image points (after refining them)
print(ret, wc, hc)
if True:
objpoints.append(objp)
corners2 = cv.cornerSubPix(gray, corners, (20, 20), (-1, -1),
criteria) # image, corners, winSize, zeroZone, criteria
imgpoints.append(corners2)
# Draw and display the corners
cv.drawChessboardCorners(img, (hc, wc), corners2, ret)
cv.imwrite('ChessboardCorners.png', img)
cv.waitKey(0)
ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
img = cv.imread('img.png')
print(img.shape[:2])
h, w = img.shape[:2]
newcameramtx, roi = cv.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))
# undistort
dst = cv.undistort(img, mtx, dist, None, newcameramtx)
# crop the image
x, y, w, h = roi
dst = dst[y:y + h, x:x + w]
cv.imwrite('calibresult.png', dst)
cv.waitKey(0)
mean_error = 0
for i in range(len(objpoints)):
imgpoints2, _ = cv.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
error = cv.norm(imgpoints[i], imgpoints2, cv.NORM_L2) / len(imgpoints2)
mean_error += error
print("total error: {}".format(mean_error / len(objpoints)))
print("\n\n", fname, "claer")
cv.destroyAllWindows()
exit(0)
I removed the images because "Your question appears to be spam."
Please see the images on OpenCV DOC
Please, give me the solution to this problem.
Do I need to modify the parameters, or what should I do?
Is my chessboard wrong?
Below is what I have tried.
First, I tried to find correct numbers of 'wc' and 'hc'
I used this code to find.
import glob
import cv2 as cv
import numpy as np
for i in range(3, 50):
for j in range(i + 1, 50): # I used this code becuase I found that the order of the variables does not matter last time.
wc = i
hc = j
# termination criteria
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((wc * hc, 3), np.float32)
objp[:, :2] = np.mgrid[0:hc, 0:wc].T.reshape(-1, 2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
images = glob.glob('1.jpg')
for fname in images:
img = cv.imread(fname)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv.findChessboardCorners(gray, (hc, wc), None)
# If found, add object points, image points (after refining them)
print(ret, wc, hc)
And the result here:
False 3 4
False 3 5
False 3 6
···
False 4 5
False 4 6
**True 4 7**
False 4 8
False 4 9
···
False 47 48
False 47 49
False 48 49
Process finished with exit code 0
I also found that the Image which was in the example in OpenCV DOC has another 'wc' and 'hc', (4, 4).
And result here:
ChessboardCorners by (4, 4
calibresult by (4, 4)
So, I'm expecting that the 'wc'and 'hc' of my Image (4, 7) might be small.
Should I increase the max and do a brute-force search again?
I can't tell for sure, but it looks like you are only using a single input image for calibration. If that's true, try increasing to at least 5 images (more would be better) and see if that helps. The images should be at different angles and distances.
I notice that your lens has significant distortion. A few suggestions for getting that to work well:
Use the rational model for distortion - the basic kappa 1, kappa 2 model won't do well.
Your data set will need to include image points from all parts of the image, including near the edges and corners of the image. This can be difficult/impossible to achieve using the normal chessboard pattern (because the entire pattern must be visible in the image) - I suggest using the ChAruco calibration pattern/functions. This uses a modified chessboard pattern that includes Aruco markers embedded in the white squares, which allows for partial patterns to be used.
Note that the wc and hc parameters you are searching for are used to describe the chessboard pattern width and height. This should be known to you ahead of time and you shouldn't need to search for it.

houghlinesp and thresholding

I am using opencv Houghlinesp to detect lines in a parking lot. Here is the source image
When I did a hough transform-p to detect the lines, I got final image like this.
It did detect empty spaces. Any ideas how these noisy lines on top of the cars can be removed? Or any direction on alternative algorithms or approaches highly appreciated.
img = cv.imread('Parking-Lot.jpg')
threshold=100
minLineLength = 60
rho=2
maxLineGap=20
theta = np.pi/180
edges = cv.Canny(img, 100, 200)
lines = cv.HoughLinesP(edges, rho, theta, threshold, np.array([]), minLineLength =minLineLength , maxLineGap=maxLineGap)
for i in range(len(lines)):
for line in lines[i]:
cv.line(img, (line[0],line[1]), (line[2],line[3]), (0,255,0), 2)
cv2.imwrite("lines.jpg", img)
You can remove most of the noise by thresholding your image before you apply the edge detection. That way you will remove (most of) the cars and keep your white space lines you are interested in:
import cv2
import numpy as np
img = cv2.imread('Parking-Lot.jpg')
threshold=100
minLineLength = 60
rho=2
maxLineGap=20
theta = np.pi/180
# here you convert the image to grayscale and then threshhold to binary
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray,180,255,cv2.THRESH_BINARY)
# continue with the threshholded image instead
edges = cv2.Canny(thresh, 100, 200)
lines = cv2.HoughLinesP(edges, rho, theta, threshold, np.array([]), minLineLength =minLineLength , maxLineGap=maxLineGap)
for i in range(len(lines)):
for line in lines[i]:
cv2.line(img, (line[0],line[1]), (line[2],line[3]), (0,255,0), 2)
cv2.imwrite("lines.jpg", img)
This will yield you a much cleaner result:
Feel free to experiment with the threshold parameters; you will need to find a threshold that excludes most of the cars while keeping all the lines that you want to detect.

Extract face rectangle from ID card

I’m researching the subject of extracting the information from ID cards and have found a suitable algorithm to locate the face on the front. As it is, OpenCV has Haar cascades for that, but I’m unsure what can be used to extract the full rectangle that person is in instead of just the face (as is done in https://github.com/deepc94/photo-id-ocr). The few ideas that I’m yet to test are:
Find second largest rectangle that’s inside the card containing the face rect
Do “explode” of the face rectangle until it hits the boundary
Play around with filters to see what can be seen
What can be recommended to try here as well? Any thoughts, ideas or even existing examples are fine.
Normal approach:
import cv2
import numpy as np
import matplotlib.pyplot as plt
image = cv2.imread("a.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
_,thresh = cv2.threshold(gray,128,255,cv2.THRESH_BINARY)
cv2.imshow("thresh",thresh)
thresh = cv2.bitwise_not(thresh)
element = cv2.getStructuringElement(shape=cv2.MORPH_RECT, ksize=(7, 7))
dilate = cv2.dilate(thresh,element,6)
cv2.imshow("dilate",dilate)
erode = cv2.erode(dilate,element,6)
cv2.imshow("erode",erode)
morph_img = thresh.copy()
cv2.morphologyEx(src=erode, op=cv2.MORPH_CLOSE, kernel=element, dst=morph_img)
cv2.imshow("morph_img",morph_img)
_,contours,_ = cv2.findContours(morph_img,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
areas = [cv2.contourArea(c) for c in contours]
sorted_areas = np.sort(areas)
cnt=contours[areas.index(sorted_areas[-3])] #the third biggest contour is the face
r = cv2.boundingRect(cnt)
cv2.rectangle(image,(r[0],r[1]),(r[0]+r[2],r[1]+r[3]),(0,0,255),2)
cv2.imshow("img",image)
cv2.waitKey(0)
cv2.destroyAllWindows()
I found the first two biggest contours are the boundary, the third biggest contour is the face. Result:
There is also another way to investigate the image, using sum of pixel values by axises:
x_hist = np.sum(morph_img,axis=0).tolist()
plt.plot(x_hist)
plt.ylabel('sum of pixel values by X-axis')
plt.show()
y_hist = np.sum(morph_img,axis=1).tolist()
plt.plot(y_hist)
plt.ylabel('sum of pixel values by Y-axis')
plt.show()
Base on those pixel sums over 2 asixes, you can crop the region you want by setting thresholds for it.
Haarcascades approach (The most simple)
# Using cascade Classifiers
import numpy as np
import cv2
# We point OpenCV's CascadeClassifier function to where our
# classifier (XML file format) is stored
face_classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# Load our image then convert it to grayscale
image = cv2.imread('./your/image/path.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow('Original image', image)
# Our classifier returns the ROI of the detected face as a tuple
# It stores the top left coordinate and the bottom right coordiantes
faces = face_classifier.detectMultiScale(gray, 1.3, 5)
# When no faces detected, face_classifier returns and empty tuple
if faces is ():
print("No faces found")
# We iterate through our faces array and draw a rectangle
# over each face in faces
for (x, y, w, h) in faces:
x = x - 25 # Padding trick to take the whole face not just Haarcascades points
y = y - 40 # Same here...
cv2.rectangle(image, (x, y), (x + w + 50, y + h + 70), (27, 200, 10), 2)
cv2.imshow('Face Detection', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
Link to the haarcascade_frontalface_default file
update to #Sanix darker code,
# Using cascade Classifiers
import numpy as np
import cv2
img = cv2.imread('link_to_your_image')
face_classifier = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
scale_percent = 60 # percent of original size
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
image = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# face classifier
faces = face_classifier.detectMultiScale(gray, 1.3, 5)
# When no faces detected, face_classifier returns and empty tuple
if faces is ():
print("No faces found")
# We iterate through our faces array and draw a rectangle
# over each face in faces
for (x, y, w, h) in faces:
x = x - 25 # Padding trick to take the whole face not just Haarcascades points
y = y - 40 # Same here...
cv2.rectangle(image, (x, y), (x + w + 50, y + h + 70), (27, 200, 10), 2)
cv2.imshow('Face Detection', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
# if you want to crop the face use below code
for (x, y, width, height) in faces:
roi = image[y:y+height, x:x+width]
cv2.imwrite("face.png", roi)

OpenCV - Extracting lines on a graph

I would like to create a program that is able to extract lines from a graph.
For example, if a graph like this is inputted, I would just want the red line to be outputted.
Below I have tried to do this using a hough line transformation, however, I do not get very promising results.
import cv2
import numpy as np
graph_img = cv2.imread("/Users/2020shatgiskessell/Desktop/Graph1.png")
gray = cv2.cvtColor(graph_img, cv2.COLOR_BGR2GRAY)
kernel_size = 5
#grayscale image
blur_gray = cv2.GaussianBlur(gray,(kernel_size, kernel_size),0)
#Canny edge detecion
edges = cv2.Canny(blur_gray, 50, 150)
#Hough Lines Transformation
#distance resoltion of hough grid (pixels)
rho = 1
#angular resolution of hough grid (radians)
theta = np.pi/180
#minimum number of votes
threshold = 15
#play around with these
min_line_length = 25
max_line_gap = 20
#make new image
line_image = np.copy(graph_img)
#returns array of lines
lines = cv2.HoughLinesP(edges, rho, theta, threshold, np.array([]),
min_line_length, max_line_gap)
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0),2)
lines_edges = cv2.addWeighted(graph_img, 0.8, line_image, 1, 0)
cv2.imshow("denoised image",edges)
if cv2.waitKey(0) & 0xff == 27:
cv2.destroyAllWindows()
This produces the output image below, which does not accurately recognize the graph line. How might I go about doing this?
Note: For now, I am not concerned about the graph titles or any other text.
I would also like the code to work for other graph images aswell, such as:
etc.
If the graph does not have many noises around it (like your example) I would suggest to threshold your image with Otsu threshold instead of looking for edges . Then you simply search the contours, select the biggest one (graph) and draw it on a blank mask. After that you can perform a bitwise operation on image with the mask and you will get a black image with the graph. If you like the white background better, then simply change all black pixels to white. Steps are written in the example. Hope it helps a bit. Cheers!
Example:
import numpy as np
import cv2
# Read the image and create a blank mask
img = cv2.imread('graph.png')
h,w = img.shape[:2]
mask = np.zeros((h,w), np.uint8)
# Transform to gray colorspace and threshold the image
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# Search for contours and select the biggest one and draw it on mask
_, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
cnt = max(contours, key=cv2.contourArea)
cv2.drawContours(mask, [cnt], 0, 255, -1)
# Perform a bitwise operation
res = cv2.bitwise_and(img, img, mask=mask)
# Convert black pixels back to white
black = np.where(res==0)
res[black[0], black[1], :] = [255, 255, 255]
# Display the image
cv2.imshow('img', res)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:
EDIT:
For noisier pictures you could try this code. Note that different graphs have different noises and may not work on every graph image since the denoisiation process would be specific in every case. For different noises you can use different ways to denoise it, for example histogram equalization, eroding, blurring etc. This code works well for all 3 graphs. Steps are written in comments. Hope it helps. Cheers!
import numpy as np
import cv2
# Read the image and create a blank mask
img = cv2.imread('graph.png')
h,w = img.shape[:2]
mask = np.zeros((h,w), np.uint8)
# Transform to gray colorspace and threshold the image
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# Perform opening on the thresholded image (erosion followed by dilation)
kernel = np.ones((2,2),np.uint8)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
# Search for contours and select the biggest one and draw it on mask
_, contours, hierarchy = cv2.findContours(opening,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
cnt = max(contours, key=cv2.contourArea)
cv2.drawContours(mask, [cnt], 0, 255, -1)
# Perform a bitwise operation
res = cv2.bitwise_and(img, img, mask=mask)
# Threshold the image again
gray = cv2.cvtColor(res,cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# Find all non white pixels
non_zero = cv2.findNonZero(thresh)
# Transform all other pixels in non_white to white
for i in range(0, len(non_zero)):
first_x = non_zero[i][0][0]
first_y = non_zero[i][0][1]
first = res[first_y, first_x]
res[first_y, first_x] = 255
# Display the image
cv2.imshow('img', res)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:

Segmenting products on the shelf

I am trying to detect edges from the products on a shelf using histogram projections. But I am stuck at 2 levels.
The challenges that I m facing are:
How to get the longest non shelf segment from the image i.e Detect the width of the widest product on the shelf from the available one.
How to achieve morphological reconstruction using custom markers.To eliminate
all small horizontal segments, I am generating 2 markers which can be seen in 'markers.png' (Attached). With them, I am calculating the minimum of the reconstruction outputs from both the markers.
Need assistance on this.
Thanks a lot
Below is my python code for the same.
Below is my python code
********************************************************************************
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
import math
# Read the input image
img = cv.imread('C:\\Users\\672059\\Desktop\\p2.png')
# Converting from BGR to RGB. Default is BGR.
# img_rgb = cv.cvtColor(img, cv.COLOR_BGR2RGB)
# Resize the image to 150,150
img_resize = cv.resize(img, (150, 150))
# Get the dimensions of the image
img_h, img_w, img_c = img_resize.shape
# Split the image on channels
red = img[:, :, 0]
green = img[:, :, 1]
blue = img[:, :, 2]
# Defining a vse for erosion
vse = np.ones((img_h, img_w), dtype=np.uint8)
# Morphological Erosion for red channel
red_erode = cv.erode(red, vse, iterations=1)
grad_red = cv.subtract(red, red_erode)
# Morphological Erosion for green channel
green_erode = cv.erode(green, vse, iterations=1)
grad_green = cv.subtract(green, green_erode)
# Morphological Erosion for blue channel
blue_erode = cv.erode(blue, vse, iterations=1)
grad_blue = cv.subtract(blue, blue_erode)
# Stacking the individual channels into one processed image
grad = [grad_red, grad_green, grad_blue]
retrieved_img = np.stack(grad, axis=-1)
retrieved_img = retrieved_img.astype(np.uint8)
retrieved_img_gray = cv.cvtColor(retrieved_img, cv.COLOR_RGB2GRAY)
plt.title('Figure 1')
plt.imshow(cv.bitwise_not(retrieved_img_gray), cmap=plt.get_cmap('gray'))
plt.show()
# Hough Transform of the image to get the longest non shelf boundary from the image!
edges = cv.Canny(retrieved_img_gray, 127, 255)
minLineLength = img_w
maxLineGap = 10
lines = cv.HoughLinesP(edges, 1, np.pi/180, 127, minLineLength=1, maxLineGap=1)
temp = img.copy()
l = []
for x in range(0, len(lines)):
for x1, y1, x2, y2 in lines[x]:
cv.line(temp, (x1, y1), (x2, y2), (0, 255, 0), 2)
d = math.sqrt((x2-x1)**2 + (y2-y1)**2)
l.append(d)
# Defining a hse for erosion
hse = np.ones((1, 7), dtype=np.uint8)
opening = cv.morphologyEx(retrieved_img_gray, cv.MORPH_OPEN, hse)
plt.title('Figure 2')
plt.subplot(1, 2, 1), plt.imshow(img)
plt.subplot(1, 2, 2), plt.imshow(cv.bitwise_not(opening), 'gray')
plt.show()
# Dilation with disk shaped structuring element
horizontal_size = 7
horizontalstructure = cv.getStructuringElement(cv.MORPH_ELLIPSE, (horizontal_size, 1))
dilation = cv.dilate(opening, horizontalstructure)
plt.title('Figure 3')
plt.imshow(cv.bitwise_not(dilation), 'gray')
plt.show()
# Doing canny edge on dilated image
edge = cv.Canny(dilation, 127, 255)
plt.title('Figure 4')
plt.imshow(edges, cmap='gray')
plt.show()
h_projection = edge.sum(axis=1)
print(h_projection)
plt.title('Projection')
plt.plot(h_projection)
plt.show()
listing = []
for i in range(1, len(h_projection)-1):
if h_projection[i-1] == 0 and h_projection[i] == 0:
listing.append(dilation[i])
listing.append(dilation[i-1])
a = np.array([np.array(b) for b in l])
h = len(l)
_, contours, _ = cv.findContours(a, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
x, y, w, h = cv.boundingRect(contours[0])
y = y + i - h
cv.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
l.clear()
plt.imshow(img)
plt.show()
# Generating a mask
black_bg = np.ones([img_h, img_w], dtype=np.uint8)
# Clone the black bgd image
left = black_bg.copy()
right = black_bg.copy()
# Taking 10% of the image width
ten = int(0.1 * img_w)
left[:, 0:ten+1] = 0
right[:, img_w-ten:img_w+1] = 0
plt.title('Figure 4')
plt.subplot(121), plt.imshow(left, 'gray')
plt.subplot(122), plt.imshow(right, 'gray')
plt.show()
# Marker = left and right. Mask = dilation
mask = dilation
marker_left = left
marker_right = right
********************************************************************************
markers.png link: https://i.stack.imgur.com/45WJ6.png
********************************************************************************
Based on you input image, I would :
take a picture of an empty fridge
then compare the current image with the empty one.
play with morphological operations
get connected components > size N
If you can't take a empty fridge image:
segment the shelves (threshold white parts)
undo do the rotation of the image by using image moments of the shelves
for each shelve:
Threshold on saturation
Do a vertical projection
Count maxima.
Tresholded:
Erode-dilate:
Connected componens (width > 10 * height + > minsize):
And you have shelves.
Now take the average Y form each shelf and cut the original image in pieces:
Dither to 8 colors:
and threshold:
Connected components (h>1.5*w, minsize... this is hard here, I played with it :)

Resources