Is there a way to extract single line from Hough's Trasnform in OpenCV? - opencv

Am using Hough's Transform to detect straight lines in an image. Transformation is done after a Canny edge detection and am able to get the lines, however, i need to display only the Left most line. Here is the section of code
cv::Mat Final, Canned;
HoughLines(Canned, lines, 1, CV_PI / 180, 150, 0, 0);
for (size_t i = 0; i < lines.size(); i++)
{
float rho = lines[i][0], theta = lines[i][1];
cv::Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000 * (-b));
pt1.y = cvRound(y0 + 1000 * (a));
pt2.x = cvRound(x0 - 1000 * (-b));
pt2.y = cvRound(y0 - 1000 * (a));
line(Final,pt1, pt2, Scalar(0, 0, 255), 1, CV_AA);
}
imshow("detected lines", Final);
Am attaching the image generated after applying Hough Transform
I need to display only the Left most line
here is the elements of Line vector.
[-386, 3.12414]
[-332, 3.08923]
[-381, 3.12414]
[-337, 3.10669]
[386, 0]
[-323, 3.05433]
[-339, 3.10669]
[-335, 3.08923]
[-330, 3.07178]
[383, 0]
[-317, 3.08923]

You can iterate though the lines and find the left most line by averaging the x-coordinates of the expoints of the line.
# Iterate through lines and find the x-position
xPositions = []
for line in lines:
cdst, pt1, pt2 = draw_line(line, cdst, (0,0,255))
xPositions.append((pt1[0]+pt2[0])/2)
Here is the output image:
Here is the source image:
Here is the complete code:
import math
import cv2 as cv
import numpy as np
src = cv.imread('/home/stephen/Desktop/lines.png', cv.IMREAD_GRAYSCALE)
dst = cv.Canny(src, 50, 200, None, 3)
# Copy edges to the images that will display the results in BGR
cdst = cv.cvtColor(dst, cv.COLOR_GRAY2BGR)
cdstP = np.copy(cdst)
# Find lines
lines = cv.HoughLines(dst, 1, np.pi / 180, 100, None, 0, 0)
# Function that draws line
def draw_line(line, img, color):
rho = line[0][0]
theta = line[0][1]
a = math.cos(theta)
b = math.sin(theta)
x0 = a * rho
y0 = b * rho
pt1 = (int(x0 + 1000*(-b)), int(y0 + 1000*(a)))
pt2 = (int(x0 - 1000*(-b)), int(y0 - 1000*(a)))
cv.line(cdst, pt1, pt2, color, 3, cv.LINE_AA)
return img, pt1, pt2
# Iterate through lines and find the x-position
xPositions = []
for line in lines:
cdst, pt1, pt2 = draw_line(line, cdst, (0,0,255))
xPositions.append((pt1[0]+pt2[0])/2)
# Find the left most line
leftMost = xPositions.index(min(xPositions))
# Draw only the left most line
cdst, pt1, pt2 = draw_line(lines[leftMost], cdst, (0,255,0))
cv.imshow('lines', cdst)
cv.waitKey()
cv.destroyAllWindows()

Related

I want to change this code from how to trcak after cars to track after circle in circle

this is the code but it didn't work with the sign
import cv2
import numpy as np
from object_detection import ObjectDetection
import math
# Initialize Object Detection
od = ObjectDetection()
cap = cv2.VideoCapture("Sign black.mp4")
#Initalize count
count = 0
center_points_prev_frame = []
tracking_objects = {}
track_id = 0
while True:
ret, frame = cap.read()
count += 1
if not ret:
break
# Point current frame
center_points_cur_frame = []
# Detect objects on frame
(class_ids, scores, boxes) = od.detect(frame)
for box in boxes:
(x, y, w, h) = box
cx = int((x + x + w) / 2)
cy = int((y + y + h) / 2)
center_points_cur_frame.append((cx, cy))
#print("FRAME N* ", count, " ", x, y, w, h)
#cv2.circle(frame, (cx,cy), 5, (0, 0, 255), -1)
cv2.rectangle(frame, (x,y), (x + w, y + h), (0, 255, 0), 2)
# Only at the beginning we compare previouse and current frame
if count <= 2:
for pt in center_points_cur_frame:
for pt2 in center_points_prev_frame:
distance = math.hypot(pt2[0] - pt[0], pt2[1] - pt[1])
if distance < 20:
tracking_objects[track_id] = pt
track_id += 1
else:
tracking_objects_copy = tracking_objects.copy()
for object_id, pt2 in tracking_objects_copy.items():
object_exists = False
for pt in center_points_cur_frame:
distance = math.hypot(pt2[0] - pt[0], pt2[1] - pt[1])
# Update object position
if distance < 20:
tracking_objects[object_id] = pt
object_exists = True
continue
# Remove IDs lost
if not object_exists:
tracking_objects.pop(object_id)
for object_id, pt in tracking_objects.items():
cv2.circle(frame, pt, 5, (0, 0, 255), -1)
cv2.putText(frame, str(object_id), (pt[0], pt[1] - 7), 0, 1, (0, 0, 255), 2)
print("Tracking objects")
print(tracking_objects)
# cv2.circle(frame, pt, 5, (0, 0, 255), -1)
print("CUR FRAME LEFT PTS")
print(center_points_cur_frame)
#print("PREV FRAME")
#print(center_points_prev_frame)
cv2.imshow("Frame", frame)
# Make a copy of the points
center_points_prev_frame = center_points_cur_frame.copy()
Key = cv2.waitKey(1)
if Key == 27:
break
cap.release()
cv2.destroyAllWindows()
and this is
import numpy as np
class ObjectDetection:
def __init__(self, weights_path="dnn_model/yolov4.weights", cfg_path="dnn_model/yolov4.cfg"):
print("Loading Object Detection")
print("Running opencv dnn with YOLOv4")
self.nmsThreshold = 0.4
self.confThreshold = 0.5
self.image_size = 608
# Load Network
net = cv2.dnn.readNet(weights_path, cfg_path)
# Enable GPU CUDA
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
self.model = cv2.dnn_DetectionModel(net)
self.classes = []
self.load_class_names()
self.colors = np.random.uniform(0, 255, size=(80, 3))
self.model.setInputParams(size=(self.image_size, self.image_size), scale=1/255)
def load_class_names(self, classes_path="dnn_model/classes.txt"):
with open(classes_path, "r") as file_object:
for class_name in file_object.readlines():
class_name = class_name.strip()
self.classes.append(class_name)
self.colors = np.random.uniform(0, 255, size=(80, 3))
return self.classes
def detect(self, frame):
return self.model.detect(frame, nmsThreshold=self.nmsThreshold, confThreshold=self.confThreshold)

Real-time OCR Videstreaming is lagging when running real-time and implementing text recognition

I'm done installing packages and debugging it. This code is from https://github.com/LaggyHammer/real-time-OCR. When I run it, my camera or the video-streaming gets lag or it has delayed from my end.
Here's the code of what I executed through CLI.
# coding: utf-8
# =====================================================================
# Filename: video_text_detection.py
#
# py Ver: python 3.6 or later
#
# Description: Recognizes regions of text in a given video or through the webcam feed
#
# Usage: python real_time_ocr.py --east frozen_east_text_detection.pb
# or
# python real_time_ocr.py --east frozen_east_text_detection.pb --video test.avi
#
# Note: Requires opencv 3.4.2 or later
# For more in-script documentation, look at video_text_detection_modular.py
#
# Author: Ankit Saxena (ankch24#gmail.com)
# =====================================================================
from imutils.video import VideoStream
from imutils.video import FPS
from imutils.object_detection import non_max_suppression
import numpy as np
import argparse
import imutils
import time
import cv2
import pytesseract
# setting up tesseract path
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
def box_extractor(scores, geometry, min_confidence):
num_rows, num_cols = scores.shape[2:4]
rectangles = []
confidences = []
for y in range(num_rows):
scores_data = scores[0, 0, y]
x_data0 = geometry[0, 0, y]
x_data1 = geometry[0, 1, y]
x_data2 = geometry[0, 2, y]
x_data3 = geometry[0, 3, y]
angles_data = geometry[0, 4, y]
for x in range(num_cols):
if scores_data[x] < min_confidence:
continue
offset_x, offset_y = x * 4.0, y * 4.0
angle = angles_data[x]
cos = np.cos(angle)
sin = np.sin(angle)
box_h = x_data0[x] + x_data2[x]
box_w = x_data1[x] + x_data3[x]
end_x = int(offset_x + (cos * x_data1[x]) + (sin * x_data2[x]))
end_y = int(offset_y + (cos * x_data2[x]) - (sin * x_data1[x]))
start_x = int(end_x - box_w)
start_y = int(end_y - box_h)
rectangles.append((start_x, start_y, end_x, end_y))
confidences.append(scores_data[x])
return rectangles, confidences
def get_arguments():
ap = argparse.ArgumentParser()
ap.add_argument('-v', '--video', type=str,
help='path to optional video file')
ap.add_argument('-east', '--east', type=str, required=True,
help='path to EAST text detection model')
ap.add_argument('-c', '--min_confidence', type=float, default=0.5,
help='minimum confidence to process a region')
ap.add_argument('-w', '--width', type=int, default=320,
help='resized image width (multiple of 32)')
ap.add_argument('-e', '--height', type=int, default=320,
help='resized image height (multiple of 32)')
ap.add_argument('-p', '--padding', type=float, default=0.0,
help='padding on each ROI border')
arguments = vars(ap.parse_args())
return arguments
if __name__ == '__main__':
args = get_arguments()
w, h = None, None
new_w, new_h = args['width'], args['height']
ratio_w, ratio_h = None, None
layer_names = ['feature_fusion/Conv_7/Sigmoid', 'feature_fusion/concat_3']
print("[INFO] loading EAST text detector...")
net = cv2.dnn.readNet(args["east"])
if not args.get('video', False):
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(0)
else:
vs = cv2.VideoCapture(args['video'])
fps = FPS().start()
while True:
frame = vs.read()
frame = frame[1] if args.get('video', False) else frame
if frame is None:
break
frame = imutils.resize(frame, width=500)
orig = frame.copy()
orig_h, orig_w = orig.shape[:2]
if w is None or h is None:
h, w = frame.shape[:2]
ratio_w = w / float(new_w)
ratio_h = h / float(new_h)
frame = cv2.resize(frame, (new_w, new_h))
blob = cv2.dnn.blobFromImage(frame, 1.0, (new_w, new_h), (123.68, 116.78, 103.94),
swapRB=True, crop=False)
net.setInput(blob)
scores, geometry = net.forward(layer_names)
rectangles, confidences = box_extractor(scores, geometry, min_confidence=args['min_confidence'])
boxes = non_max_suppression(np.array(rectangles), probs=confidences)
for (start_x, start_y, end_x, end_y) in boxes:
start_x = int(start_x * ratio_w)
start_y = int(start_y * ratio_h)
end_x = int(end_x * ratio_w)
end_y = int(end_y * ratio_h)
dx = int((end_x - start_x) * args['padding'])
dy = int((end_y - start_y) * args['padding'])
start_x = max(0, start_x - dx)
start_y = max(0, start_y - dy)
end_x = min(orig_w, end_x + (dx * 2))
end_y = min(orig_h, end_y + (dy * 2))
# ROI to be recognized
roi = orig[start_y:end_y, start_x:end_x]
# recognizing text
config = '-l eng --oem 1 --psm 7'
text = pytesseract.image_to_string(roi, config=config)
cv2.rectangle(orig, (start_x, start_y), (end_x, end_y), (0, 255, 0), 2)
cv2.putText(orig, text, (start_x, start_y - 20),
cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 0, 255), 3)
fps.update()
cv2.imshow("Detection", orig)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
fps.stop()
print(f"[INFO] elapsed time {round(fps.elapsed(), 2)}")
print(f"[INFO] approx. FPS : {round(fps.fps(), 2)}")
if not args.get('video', False):
vs.stop()
else:
vs.release()
cv2.destroyAllWindows()
Is there a way easiest way to make the videostreaming smoother with this code?

Detecting multiple lines in OpenCV HoughLines

I'm using OpenCV 4.4 and running the following code to detect lines of a grid. When I display the image it always detect one line as shown in the screenshot. How can I detect all vertical lines in the grid?
grid = cv2.imread('images/grid.jpeg')
grayscale = cv2.cvtColor(grid, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(grayscale, 50, 150, apertureSize=3)
lines = cv2.HoughLines(edges, 1, np.pi/180, 100)
for rho, theta in lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * (a))
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * (a))
cv2.line(grid, (x1, y1), (x2, y2), (255, 0, 0), 2)
cv2.imshow("Lines", grid)
cv2.waitKey(0)
cv2.destroyAllWindows()
Original Image:
You can use lineDetector algorithm.
Find the edges of your image, as #Micka suggested
img = cv2.imread("lines.png")
img_gry = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_cny = cv2.Canny(img_gry, 50, 200)
Result:
To detect the vertical edges, the difference between x-coordinates should be close to 0 Since only y-coordinates are changing.
if abs(x1 - x2) < 3:
cv2.line(img, pt1=(x1, y1), pt2=(x2, y2), color=(0, 0, 255), thickness=3)
Result:
Code:
import cv2
img = cv2.imread("lines.png")
img_gry = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_cny = cv2.Canny(img_gry, 50, 200)
lns = cv2.ximgproc.createFastLineDetector().detect(img_cny)
for ln in lns:
x1 = int(ln[0][0])
y1 = int(ln[0][1])
x2 = int(ln[0][2])
y2 = int(ln[0][3])
if abs(x1 - x2) < 3:
cv2.line(img, pt1=(x1, y1), pt2=(x2, y2), color=(0, 0, 255), thickness=3)
cv2.imshow("lns", img)
cv2.waitKey(0)

How to read an analogue gauge using Open CV

I'm interested in trying to read an analog gauge using a Raspberry PI and Open CV. I've only really messed with face detection in opencv, so I don't even know where to begin. Any ideas, starting points?
You can detect circles with HoughCircles method and detect lines with HoughLinesP method of with opencv lib in Python. After detecting these, you can find out the value of the gauge from the line's position via trigonometry.
You can see the sample code in python. It basically does these:
Read image with imread method.
turn it in to gray with cvtColor.
Find out the circles' center x,y coordinates and radius with HoughCircles, these method has some parameter that can be tweaked.
Detect the lines with HoughLinesP method again parameters should be tweaked.
Calculate the value, considering max value, min value on the gauge and angle interval of the gauge.
Reference: https://github.com/intel-iot-devkit/python-cv-samples/tree/master/examples/analog-gauge-reader
Hope this helps.
CODE:
import os
import cv2
import numpy
def getScriptDir():
currentFile = __file__ # May be 'my_script', or './my_script' or
realPath = os.path.realpath(currentFile) # /home/user/test/my_script.py
dirPath = os.path.dirname(realPath)
return dirPath
def getUserRealGaugeDetails():
min_angle = input('Min derece: ') #the lowest possible angle
max_angle = input('Max derece ') #highest possible angle
min_value = input('Min deger: ') #usually zero
max_value = input('Max deger: ') #maximum reading of the gauge
units = input('Birim girin: ')
return min_angle,max_angle,min_value,max_value,units
def setStaticUserRealGaugeDetails():
min_angle = 5 # input('Min angle (lowest possible angle of dial) - in degrees: ') #the lowest possible angle
max_angle = 355 # input('Max angle (highest possible angle) - in degrees: ') #highest possible angle
min_value = -20 #input('Min value: ') #usually zero
max_value = 120 #input('Max value: ') #maximum reading of the gauge
units = 'b' #input('Enter units: ')
return min_angle,max_angle,min_value,max_value,units
def getImage():
dirPath = getScriptDir()
dirPath += "/images/1.jpg"
return cv2.imread(dirPath)
def distance2Points(x1, y1, x2, y2):
#print np.sqrt((x2-x1)^2+(y2-y1)^2)
return numpy.sqrt((x2 - x1)**2 + (y2 - y1)**2)
def averageCircle(circles, b):
avg_x=0
avg_y=0
avg_r=0
for i in range(b):
#optional - average for multiple circles (can happen when a gauge is at a slight angle)
avg_x = avg_x + circles[0][i][0]
avg_y = avg_y + circles[0][i][1]
avg_r = avg_r + circles[0][i][2]
avg_x = int(avg_x/(b))
avg_y = int(avg_y/(b))
avg_r = int(avg_r/(b))
return avg_x, avg_y, avg_r
#return the center and radius of the circle
def getCircleAndCustomize(image):
height, width = image.shape[:2]
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #convert to gray
# gray = cv2.GaussianBlur(gray, (5, 5), 0)
# gray = cv2.medianBlur(gray, 5)
# cv2.imwrite('C:/Users/okarademirci/Desktop/analog-gauge-reader/images/gauge-gray-2.jpg', gray)
#detect circles
#restricting the search from 35-48% of the possible radii gives fairly good results across different samples. Remember that
#these are pixel values which correspond to the possible radii search range.
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 20, numpy.array([]), 100, 50, int(height*0.35), int(height*0.48))
#coordinates and radius
a, b, c = circles.shape
x,y,r = averageCircle(circles, b)
return x ,y ,r
def get_current_value(img, min_angle, max_angle, min_value, max_value, x, y, r):
gray2 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Set threshold and maxValue
thresh = 175
maxValue = 255
# for testing purposes, found cv2.THRESH_BINARY_INV to perform the best
# th, dst1 = cv2.threshold(gray2, thresh, maxValue, cv2.THRESH_BINARY);
# th, dst2 = cv2.threshold(gray2, thresh, maxValue, cv2.THRESH_BINARY_INV);
# th, dst3 = cv2.threshold(gray2, thresh, maxValue, cv2.THRESH_TRUNC);
# th, dst4 = cv2.threshold(gray2, thresh, maxValue, cv2.THRESH_TOZERO);
# th, dst5 = cv2.threshold(gray2, thresh, maxValue, cv2.THRESH_TOZERO_INV);
# cv2.imwrite('gauge-%s-dst1.%s' % (gauge_number, file_type), dst1)
# cv2.imwrite('gauge-%s-dst2.%s' % (gauge_number, file_type), dst2)
# cv2.imwrite('gauge-%s-dst3.%s' % (gauge_number, file_type), dst3)
# cv2.imwrite('gauge-%s-dst4.%s' % (gauge_number, file_type), dst4)
# cv2.imwrite('gauge-%s-dst5.%s' % (gauge_number, file_type), dst5)
# apply thresholding which helps for finding lines
th, dst2 = cv2.threshold(gray2, thresh, maxValue, cv2.THRESH_BINARY_INV)
# found Hough Lines generally performs better without Canny / blurring, though there were a couple exceptions where it would only work with Canny / blurring
#dst2 = cv2.medianBlur(dst2, 5)
#dst2 = cv2.Canny(dst2, 50, 150)
#dst2 = cv2.GaussianBlur(dst2, (5, 5), 0)
# for testing, show image after thresholding
dirPath = getScriptDir() + '/images/afterTreshold.jpg'
cv2.imwrite(dirPath, dst2)
# find lines
minLineLength = 10
maxLineGap = 0
lines = cv2.HoughLinesP(image=dst2, rho=3, theta=numpy.pi / 180, threshold=100,minLineLength=minLineLength, maxLineGap=0) # rho is set to 3 to detect more lines, easier to get more then filter them out later
#for testing purposes, show all found lines
# for i in range(0, len(lines)):
# for x1, y1, x2, y2 in lines[i]:
# cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
# cv2.imwrite('gauge-%s-lines-test.%s' %(gauge_number, file_type), img)
# remove all lines outside a given radius
final_line_list = []
#print "radius: %s" %r
diff1LowerBound = 0.15 #diff1LowerBound and diff1UpperBound determine how close the line should be from the center
diff1UpperBound = 0.25
diff2LowerBound = 0.5 #diff2LowerBound and diff2UpperBound determine how close the other point of the line should be to the outside of the gauge
diff2UpperBound = 1.0
for i in range(0, len(lines)):
for x1, y1, x2, y2 in lines[i]:
diff1 = distance2Points(x, y, x1, y1) # x, y is center of circle
diff2 = distance2Points(x, y, x2, y2) # x, y is center of circle
#set diff1 to be the smaller (closest to the center) of the two), makes the math easier
if (diff1 > diff2):
temp = diff1
diff1 = diff2
diff2 = temp
# check if line is within an acceptable range
if (((diff1<diff1UpperBound*r) and (diff1>diff1LowerBound*r) and (diff2<diff2UpperBound*r)) and (diff2>diff2LowerBound*r)):
line_length = distance2Points(x1, y1, x2, y2)
# add to final list
final_line_list.append([x1, y1, x2, y2])
#testing only, show all lines after filtering
# for i in range(0,len(final_line_list)):
# x1 = final_line_list[i][0]
# y1 = final_line_list[i][1]
# x2 = final_line_list[i][2]
# y2 = final_line_list[i][3]
# cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
# assumes the first line is the best one
x1 = final_line_list[0][0]
y1 = final_line_list[0][1]
x2 = final_line_list[0][2]
y2 = final_line_list[0][3]
cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
#for testing purposes, show the line overlayed on the original image
#cv2.imwrite('gauge-1-test.jpg', img)
#cv2.imwrite('C:/Users/okarademirci/Desktop/analog-gauge-reader/images/gauge-%s-lines-2.%s' % (gauge_number, file_type), img)
#find the farthest point from the center to be what is used to determine the angle
dist_pt_0 = distance2Points(x, y, x1, y1)
dist_pt_1 = distance2Points(x, y, x2, y2)
if (dist_pt_0 > dist_pt_1):
x_angle = x1 - x
y_angle = y - y1
else:
x_angle = x2 - x
y_angle = y - y2
# take the arc tan of y/x to find the angle
res = numpy.arctan(numpy.divide(float(y_angle), float(x_angle)))
#np.rad2deg(res) #coverts to degrees
# print x_angle
# print y_angle
# print res
# print np.rad2deg(res)
#these were determined by trial and error
res = numpy.rad2deg(res)
if x_angle > 0 and y_angle > 0: #in quadrant I
final_angle = 270 - res
if x_angle < 0 and y_angle > 0: #in quadrant II
final_angle = 90 - res
if x_angle < 0 and y_angle < 0: #in quadrant III
final_angle = 90 - res
if x_angle > 0 and y_angle < 0: #in quadrant IV
final_angle = 270 - res
#print final_angle
old_min = float(min_angle)
old_max = float(max_angle)
new_min = float(min_value)
new_max = float(max_value)
old_value = final_angle
old_range = (old_max - old_min)
new_range = (new_max - new_min)
new_value = (((old_value - old_min) * new_range) / old_range) + new_min
return new_value
def main():
# 1) get the image from directory.
image = getImage()
min_angle,max_angle,min_value,max_value,units = setStaticUserRealGaugeDetails()
# 2) covnert the image to gray .
# 3) find the circle in the image with customization
x,y,r = getCircleAndCustomize(image)
# 4) find the line in the circle.
# 5) find the value in the range of guage
newValue = get_current_value(image,min_angle,max_angle,min_value,max_value,x,y,r)
print(newValue)
if __name__=='__main__':
main()

Hough Line: Detect ticks on the image

I am trying the detect ticks on the following image using hough line transformation:
I am using the following simple open CV code:
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
cv2.imwrite('original.jpg',img)
edges = cv2.Canny(gray,50,150,apertureSize = 3)
lines = cv2.HoughLines(edges,1,np.pi/180,200)
for rho,theta in lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv2.line(img,(x1,y1),(x2,y2),(0,0,255),2)
I am getting the following output:
I wanted to detect the ticks, but instead it detected the lines. How can I solve it? Any help or suggestion will be appreciated.
I'm not sure what you mean by "ticks", I guess the green and red lines?!?
Using C++ API and HoughLinesP:
function call:
cv::HoughLinesP(edges, lines, 1, CV_PI/720.0, 30, 10 /* min-length */, 1 /* max gap */);
canny:
cv::Mat edges;
cv::Canny(gray, edges, 50, 150, 3);
I get this result for canny:
edges look like this
that's why the result looks like:
but using edges from thresholds:
edges = gray > 50;
edge image:
result:

Resources