how to find distance between hough lines in openCV? - opencv

I am new to opencv-python. I have found the lines in image through houghtransformP. The lines drawn from hough transform are discontinued and are giving multiple lines. I need to draw only one line for the edges and find the 'distance' between lines which are found.
The output image is shown below
"""
Created on Fri Nov 8 11:41:16 2019
#author: romanth.chowan
"""
import cv2
import numpy as np
import math
def getSlopeOfLine(line):
xDis = line[0][2] - line[0][0]
if (xDis == 0):
return None
return (line[0][3] - line[0][1]) / xDis
if __name__ == '__main__':
inputFileName_ =r"C:\Users\romanth.chowan\Desktop\opencv\stent spec\2prox.jpeg"
img = cv2.imread(inputFileName_)
img1=cv2.GaussianBlur(img,(5,5),0)
gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
edges = cv2.Laplacian(gray,cv2.CV_8UC1) # Laplacian Edge Detection
lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 300, 10, 10)
print(len(lines))
parallelLines = []
for a in lines:
for b in lines:
if a is not b:
slopeA = getSlopeOfLine(b)
slopeB = getSlopeOfLine(b)
if slopeA is not None and slopeB is not None:
if 0 <= abs(slopeA - slopeB) <= 10:
parallelLines.append({'lineA': a, 'lineB': b})
for pairs in parallelLines:
lineA = pairs['lineA']
lineB = pairs['lineB']
leftx, boty, rightx, topy = lineA[0]
cv2.line(img, (leftx, boty), (rightx, topy), (0, 0, 255), 2)
left_x, bot_y, right_x, top_y = lineB[0]
cv2.line(img, (left_x, bot_y), (right_x, top_y), (0, 0, 255), 2)
cv2.imwrite('linesImg.jpg', img)
output image after drawing lines:

It's mostly geometric task, not specific to OpenCV.
For each line you have two points (x1,y1) and (x2,y2) which are already used in your getSlopeOfLine(line) method.
You can denote each line in form:
ax + by + c = 0
To do that use two known line's points:
(y1 - y2)x + (x2 - x1)y + (x1y2 - x2y1) = 0
Note than parallel lines have same a and b while different c.
And than measure distance between any two of them (distance between non-parallel lines is equal to zero since they have a crossing point) :
d = abs(c2 - c1) / sqrt(a*a + b*b)
In Euclidean geometry line may be denoted in several ways and one may suit specific task better than another.
Currently you evaluate line's slope, from formula above we can get:
y = (-b / a)x - c / b
same to (b has another meaning now)
y = kx + b
Or using two line's points:
y = (x1 - x2) / (y1 - y2) * x + (x1y2 - x2y1)
Where k is line's slope (tan(alpha)) and b is shift.
Now you just match parallel lines (one with close k). You can take in account line's shift to merge several parallel lines into one.

Related

How to accurately detect and localize car fuse?

Currently I'm working on a project, where I need to measure the width of car fuse wire. In order to achieve that I need to detect and localize the fuse on the image. fuse_image
My plan is to find bounding rectangle region with the fuse and then search for a wire contours in fixed position of that region.fuse_contours
I have already tried ORB, BRISK feature based template matching, but the results were not acceptable. Maybe anyone can suggest some possible methods to solve this task?
We can start the problem by applying Canny operation to see the features of the image. Result is:
The aim is to calculate the width. Therefore we only need the left and right outer length of the image. We don't need inner lines. To remove the inner features we can smooth the image.
How do we accurately calculate the width? What part of the features can we take as an reference? If we consider the base? The base features are:
How do we find the base feature coordinates?
Blue point is the one with the highest y coordinate value
Red point is the one with the highest x coordinate value
For all detected line coordinates, we need to find the highest y coordinate value with the corresponding x coordinate value. We need to find the highest x coordinate value with the corresponding y value.
For detecting line coordinates we can use fast line detector. Result will be:
We can calculate the euclidian-distance, which will be: 146.49 pixel
The idea is based on the finding the base and then calculating the euclidean-distance.
Update
The orientation of the fuse can be random.
First, we need to get the fuse part of the image.
Second, we need to get the canny features (or any other filtering method)
At this point we need to find the left (blue-dot) and right (red-dot) part of the fuse:
If we connect them:
We will have an approximate length of the fuse.
So How do we find the left and right parts of the fuse?
Finding left part:
1. From the current x1, x2 tuples
2. If min(x1, x2) < x_min
3. x_min = min(x1, x2)
Finding right part:
1. From the current x1, x2 tuples
2. If max(x1, x2) > x_max
3. x_max = max(x1, x2)
This is my idea for approaching the problem. You can modify for better results.
Code:
# Load libraries
import cv2
import numpy as np
# Load the image
img = cv2.imread("E8XlZ.jpg")
# Get the image dimension
(h, w) = img.shape[:2]
# Convert to hsv
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# Get the binary-mask
msk = cv2.inRange(hsv, np.array([0, 24, 161]), np.array([77, 255, 217]))
# Display the mask
cv2.imshow("msk", msk)
cv2.waitKey(0)
# Smooth the image
gauss = cv2.GaussianBlur(msk, (21, 21), 0)
# Canny features
cny = cv2.Canny(gauss, 50, 200)
# Display canny features
cv2.imshow("cny", cny)
cv2.waitKey(0)
# Initialize line-detector
lns = cv2.ximgproc.createFastLineDetector().detect(cny)
# Initialize temporary variables
x_min, x_max, y_min, y_max = w, 0, 0, 0
# Detect the lines
for line in lns:
# Get current coordinates
x1 = int(line[0][0])
y1 = int(line[0][1])
x2 = int(line[0][2])
y2 = int(line[0][3])
# Get maximum coordinates
if max(x1, x2) > x_max:
x_max = max(x1, x2)
y_max = y1 if x_max == x1 else y2
if min(x1, x2) < x_min:
x_min = min(x1, x2)
y_min = y1 if x_min == x1 else y2
# Draw the points
cv2.circle(img, (x_min, int((y_min + y_max)/2)), 3, (255, 0, 0), 5)
cv2.circle(img, (x_max, int((y_min + y_max)/2)), 3, (0, 0, 255), 5)
# Write coordinates to the console
print("Coordinates: ({}, {})->({}, {})".format(x_min, int((y_min + y_max)/2), x_max, int((y_min + y_max)/2)))
# Draw the minimum and maximum coordinates
cv2.line(img, (x_min, int((y_min + y_max)/2)), (x_max, int((y_min + y_max)/2)), (0, 255, 0), 5)
# Calculate the euclidean distance
pt1 = np.array((x_min, int((y_min + y_max)/2)))
pt2 = np.array((x_max, int((y_min + y_max)/2)))
dist = np.linalg.norm(pt1 - pt2)
print("Result: %.2f pixel" % dist)
# Display the result
cv2.imshow("img", img)
cv2.waitKey(0)

Why cv.matchShape is NOT invariant to translation as it claims?

I have two contours to match (think of them as any arbitrary 2D closed curves). opencv claims to have matchShapes function that is invariant under translation, rotation and scale. But it seems to me that this is not the case, when I add shift (10, 5) to one of the curves, the function returns a different result, let alone if I did something whackier. Why is that?
matchShape
Reproducible example:
t = np.arange(0, np.pi, 0.001)
x, y = np.cos(t), np.sin(t)
xy = np.stack([x, y], -1)
print(cv.matchShapes(xy, xy, 1, 0))
print(cv.matchShapes(xy, xy + (2, 10), 1, 0))
The objects you send to cv.matchShapes() need to be contour objects which are different to a straight up 2D numpy array. The following code converts your curves to a plot,
then to an image & the contours of the 2 curves are found.
Finally cv.matchShapes() is run.
The output: 0 for the self match & 6.637412841570267e-12 for the match with the translated curve, a pretty accurate match under translation.
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
t = np.arange(0, np.pi, 0.001)
x, y = np.cos(t), np.sin(t)
ax.plot(x, y)
x_new = x + 2
y_new = y + 10
ax.plot(x_new, y_new, 'b')
[s.set_visible(False) for s in ax.spines.values()]
[t.set_visible(False) for t in ax.get_xticklines()]
[t.set_visible(False) for t in ax.get_yticklines()]
ax.axis('off')
plt.savefig('xy.jpg')
xy_img = cv.imread('xy.jpg', cv.IMREAD_COLOR)
xy_cpy = cv.cvtColor(xy_img, cv.COLOR_BGR2GRAY)
(threshold, bw) = cv.threshold(xy_cpy, 127, 255, cv.THRESH_BINARY)
contours, hier = cv.findContours(bw, cv.RETR_LIST, cv.CHAIN_APPROX_NONE)
contours = contours[0:-1] # remove box surounding whole image
print(cv.matchShapes(contours[0], contours[0], method=cv.CONTOURS_MATCH_I1, parameter=0))
print(cv.matchShapes(contours[0], contours[1], method=cv.CONTOURS_MATCH_I1, parameter=0))
cv.namedWindow("xy")
cv.drawContours(xy_img, contours, -1, (0, 255, 0), 3)
cv.imshow("xy", xy_img)
cv.waitKey()

How to find logPolar Magnitude scale parameter without manual experiments

I want to understand logPolar transformation. I have created a picture with a red circle and then applied the openCV logPolar transformation.
Here is code:
import cv2
import numpy as np
r = 300
SOME_EXTRA_SPACE = 5
combinedImg = np.zeros((2 * r + SOME_EXTRA_SPACE, 2 * r + SOME_EXTRA_SPACE, 3), np.uint8)
cv2.circle(combinedImg,(r, r), r, (0,0,255), thickness = 3)
cv2.circle(combinedImg,(r, r), 3, (110,50,50), thickness = 10) # circle centr
polar = cv2.logPolar(combinedImg, (r , r), r, cv2.WARP_FILL_OUTLIERS )
cv2.imshow("origina image", combinedImg)
cv2.imshow("polar", polar)
cv2.waitKey();cv2.destroyAllWindows()
So I am expecting the circle become a red line in the end. Instead i got just a transformed circle center:
and if I change the M – Magnitude scale parameter of logPolar to 50 instead of radius - it is working as expected. Why M is not equal to radius and how to find it without manual experiments? Isn't it enough to have a radius of the circle?

OpenCV: detect flawed rectangle

currently I'm working on a project where I try to find the corners of the rectangle's surface in a photo using OpenCV (Python, Java or C++)
I've selected desired surface by filtering color, then I've got mask and passed it to the cv2.findContours:
cnts, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnt = sorted(cnts, key = cv2.contourArea, reverse = True)[0]
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.02*peri, True)
if len(approx) == 4:
cv2.drawContours(mask, [approx], -1, (255, 0, 0), 2)
This gives me an inaccurate result:
Using cv2.HoughLines I've managed to get 4 straight lines that accurately describe the surface. Their intersections are exactly what I need:
edged = cv2.Canny(mask, 10, 200)
hLines = cv2.HoughLines(edged, 2, np.pi/180, 200)
lines = []
for rho,theta in hLines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv2.line(mask, (x1,y1), (x2,y2), (255, 0, 0), 2)
lines.append([[x1,y1],[x2,y2]])
The question is: is it possible to somehow tweak findContours?
Another solution would be to find coordinates of intersections. Any clues for this approach are welcome :)
Can anybody give me a hint how to solve this problem?
Finding intersection is not so trivial problem as it seems to be, but before the intersection points will be found, following problems should be considered:
The most important thing is to choose the right parameters for the HoughLines function, since it can return from 0 to an infinite numbers of lines (we need 4 parallel)
Since we do not know in what order these lines go, they need to be compared with each other
Because of the perspective, parallel lines are no longer parallel, so each line will have a point of intersection with the others. A simple solution would be to filter the coordinates located outside the photo. But it may happen that an undesirable intersection will be within the photo.
The coordinates should be sorted. Depending on the task, it could be done in different ways.
cv2.HoughLines will return an array with the values of rho and theta for each line.
Now the problem becomes a system of equations for all lines in pairs:
def intersections(edged):
# Height and width of a photo with a contour obtained by Canny
h, w = edged.shape
hl = cv2.HoughLines(edged,2,np.pi/180,190)[0]
# Number of lines. If n!=4, the parameters should be tuned
n = hl.shape[0]
# Matrix with the values of cos(theta) and sin(theta) for each line
T = np.zeros((n,2),dtype=np.float32)
# Vector with values of rho
R = np.zeros((n),dtype=np.float32)
T[:,0] = np.cos(hl[:,1])
T[:,1] = np.sin(hl[:,1])
R = hl[:,0]
# Number of combinations of all lines
c = n*(n-1)/2
# Matrix with the obtained intersections (x, y)
XY = np.zeros((c,2))
# Finding intersections between all lines
for i in range(n):
for j in range(i+1, n):
XY[i+j-1, :] = np.linalg.inv(T[[i,j],:]).dot(R[[i,j]])
# filtering out the coordinates outside the photo
XY = XY[(XY[:,0] > 0) & (XY[:,0] <= w) & (XY[:,1] > 0) & (XY[:,1] <= h)]
# XY = order_points(XY) # obtained points should be sorted
return XY
here is the result:
It is possible to:
select the longest contour
break it into segments and group them by gradient
Fit lines to the largest four groups
Find intersection points
But then, Hough transform does nearly the same thing. Is there any particular reason for not using it?
Intersection points of lines are very easy to calculate. A high-school coordinate geometry lesson can provide you with the algorithm.

Choosing Lines From Hough Lines

I'm using Hough Lines to do corner detection for this image. i plan to find the intersection of the lines as the corner.
This is the image.
Unfortunately, Hough return lots of lines for each line I expect
How do I tune the Hough Lines so there is only four lines each corresponds to actual line on the image?
OpenCVs hough transform really could use some better Non-Maximum Suppression. Without that, you get this phenomenon of duplicate lines. Unfortunately I know of no easy way to tune that, besides reimplementing your own hough transform. (Which is a valid option. Hough transform is fairly simple)
Fortunately it is easy to fix in post-processing:
For the non-probabilistic hough transform, OpenCv will return the lines in order of their confidence, with the strongest line first. So simply take the first four lines that differ strongly in either rho or theta.
so, add the first line found by HoughLines into a new List: strong_lines
for each line found by HoughLines:
test whether its rho and theta are close to any strong_line (e.g. rho is within 50 pixels and theta is within 10° of the other line)
if not, put it into the list of strong_lines
if you have found 4 strong_lines, break
I implemented the approach described by HugoRune and though I would share my code as an example of how I implemented this. I used a tolerance of 5 degrees and 10 pixels.
strong_lines = np.zeros([4,1,2])
minLineLength = 2
maxLineGap = 10
lines = cv2.HoughLines(edged,1,np.pi/180,10, minLineLength, maxLineGap)
n2 = 0
for n1 in range(0,len(lines)):
for rho,theta in lines[n1]:
if n1 == 0:
strong_lines[n2] = lines[n1]
n2 = n2 + 1
else:
if rho < 0:
rho*=-1
theta-=np.pi
closeness_rho = np.isclose(rho,strong_lines[0:n2,0,0],atol = 10)
closeness_theta = np.isclose(theta,strong_lines[0:n2,0,1],atol = np.pi/36)
closeness = np.all([closeness_rho,closeness_theta],axis=0)
if not any(closeness) and n2 < 4:
strong_lines[n2] = lines[n1]
n2 = n2 + 1
EDIT: The code was updated to reflect the comment regarding a negative rho value
Collect the intersection of all line
for (int i = 0; i < lines.size(); i++)
{
for (int j = i + 1; j < lines.size(); j++)
{
cv::Point2f pt = computeIntersectionOfTwoLine(lines[i], lines[j]);
if (pt.x >= 0 && pt.y >= 0 && pt.x < image.cols && pt.y < image.rows)
{
corners.push_back(pt);
}
}
}
You can google the algorithm to find the intersection of two lines.
Once you collect all the intersection points you can easily determine the min max which will give you top-left and bottom right points. From these two points you can easily get the rectangle.
Here Sorting 2d point array to find out four corners & http://opencv-code.com/tutorials/automatic-perspective-correction-for-quadrilateral-objects/ Refer these two links.
Here is a complete solution written in python 2.7.x using OpenCV 2.4.
It is based on ideas from this thread.
Method: Detect all lines. Assume that the Hough function returns highest ranked lines first. Filter the lines to keep those that are separated by some minimum distance and/or angle.
Image of all Hough lines:
https://i.ibb.co/t3JFncJ/all-lines.jpg
Filtered lines:
https://i.ibb.co/yQLNxXT/filtered-lines.jpg
Code:
http://codepad.org/J57oVIzs
"""
Detect the best 4 lines for a rounded rectangle.
"""
import numpy as np
import cv2
input_image = cv2.imread("image.jpg")
def drawLines(img, lines):
"""
Draw lines on an image
"""
for line in lines:
for rho,theta in line:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv2.line(img, (x1,y1), (x2,y2), (0,0,255), 1)
input_image_grey = cv2.cvtColor(input_image, cv2.COLOR_BGR2GRAY)
edged = input_image_grey
rho = 1 # 1 pixel
theta = 1.0*0.017 # 1 degree
threshold = 100
lines = cv2.HoughLines(edged, rho, theta, threshold)
# Fix negative angles
num_lines = lines.shape[1]
for i in range(0, num_lines):
line = lines[0,i,:]
rho = line[0]
theta = line[1]
if rho < 0:
rho *= -1.0
theta -= np.pi
line[0] = rho
line[1] = theta
# Draw all Hough lines in red
img_with_all_lines = np.copy(input_image)
drawLines(img_with_all_lines, lines)
cv2.imshow("Hough lines", img_with_all_lines)
cv2.waitKey()
cv2.imwrite("all_lines.jpg", img_with_all_lines)
# Find 4 lines with unique rho & theta:
num_lines_to_find = 4
filtered_lines = np.zeros([1, num_lines_to_find, 2])
if lines.shape[1] < num_lines_to_find:
print("ERROR: Not enough lines detected!")
# Save the first line
filtered_lines[0,0,:] = lines[0,0,:]
print("Line 1: rho = %.1f theta = %.3f" % (filtered_lines[0,0,0], filtered_lines[0,0,1]))
idx = 1 # Index to store the next unique line
# Initialize all rows the same
for i in range(1,num_lines_to_find):
filtered_lines[0,i,:] = filtered_lines[0,0,:]
# Filter the lines
num_lines = lines.shape[1]
for i in range(0, num_lines):
line = lines[0,i,:]
rho = line[0]
theta = line[1]
# For this line, check which of the existing 4 it is similar to.
closeness_rho = np.isclose(rho, filtered_lines[0,:,0], atol = 10.0) # 10 pixels
closeness_theta = np.isclose(theta, filtered_lines[0,:,1], atol = np.pi/36.0) # 10 degrees
similar_rho = np.any(closeness_rho)
similar_theta = np.any(closeness_theta)
similar = (similar_rho and similar_theta)
if not similar:
print("Found a unique line: %d rho = %.1f theta = %.3f" % (i, rho, theta))
filtered_lines[0,idx,:] = lines[0,i,:]
idx += 1
if idx >= num_lines_to_find:
print("Found %d unique lines!" % (num_lines_to_find))
break
# Draw filtered lines
img_with_filtered_lines = np.copy(input_image)
drawLines(img_with_filtered_lines, filtered_lines)
cv2.imshow("Filtered lines", img_with_filtered_lines)
cv2.waitKey()
cv2.imwrite("filtered_lines.jpg", img_with_filtered_lines)
The above approach (proposed by #HugoRune's and implemented by #Onamission21) is correct but has a little bug. cv2.HoughLines may return negative rho and theta upto pi. Notice for example that the line (r0,0) is very close to the line (-r0,pi-epsilon) but they would not be found in the above closeness test.
I simply treated negative rhos by applying rho*=-1, theta-=pi before closeness calculations.

Resources