I don't know why this vector field draws blue arrows on the dots the field is built around. I assume its because of the function generator, but I don't understand it well enough to see why it would generate them or what it means in the vector field. The documentation on ArrowVectorField did not address this issue.
The picture shows the small blue arrows on the center dot and on the other three attractor states.
# function generator
# https://github.com/3b1b/videos/blob/436842137ee6b89cbb2aa10fa2d4c2e12361dac8/_2018/div_curl.py#L100
def get_force_field_func(*point_strength_pairs, **kwargs):
radius = kwargs.get("radius", 0.5)
def func(point):
result = np.array(ORIGIN)
for center, strength in point_strength_pairs:
to_center = center - point
norm = np.linalg.norm(to_center)
if norm == 0:
continue
elif norm < radius:
to_center /= radius**3
elif norm >= radius:
to_center /= norm**3
to_center *= -strength
result += to_center
return result
return func
class Test(Scene):
def construct(self):
progenitor = Dot()
self.add(progenitor)
attractor1 = Dot().move_to(RIGHT * 2 + UP * 3)
attractor2 = Dot().move_to(UP * 2 + LEFT * 4)
attractor3 = Dot().move_to(DOWN * 2 + RIGHT * 4)
constrained_func = get_force_field_func(
(progenitor.get_center(), 1),
(attractor1.get_center(), -0.5),
(attractor2.get_center(), -2),
(attractor3.get_center(), -1)
)
constrained_field = ArrowVectorField(constrained_func)
self.add(constrained_field)
Look at the return value of your generator_function at these values, for example at the origin:
>>> constrained_func(np.array([0, 0, 0]))
array([-0.02338674, 0.05436261, 0. ])
This is a small vector pointing to the top left -- which is exactly what the small blue arrow in the origin is.
If you'd like to get rid of that, you could replace continue with return np.array([0., 0., 0.]) -- but that might not be in line with the concept modeled by the generator function.
Are there ways to just extract the area of interest (the square light part in the red circle in the original image)? That means I need to get the coordinates of the edge and then masking the image outside the boundaries. I don't know how to do that. Could anyone help? Thanks!
#define horizontal and Vertical sobel kernels
Gx = np.array([[-1, 0, 1],[-2, 0, 2],[-1, 0, 1]])
Gy = np.array([[-1, -2, -1],[0, 0, 0],[1, 2, 1]])
#define kernal convolution function
# with image X and filter F
def convolve(X, F):
# height and width of the image
X_height = X.shape[0]
X_width = X.shape[3]
# height and width of the filter
F_height = F.shape[0]
F_width = F.shape[1]
H = (F_height - 1) // 2
W = (F_width - 1) // 2
#output numpy matrix with height and width
out = np.zeros((X_height, X_width))
#iterate over all the pixel of image X
for i in np.arange(H, X_height-H):
for j in np.arange(W, X_width-W):
sum = 0
#iterate over the filter
for k in np.arange(-H, H+1):
for l in np.arange(-W, W+1):
#get the corresponding value from image and filter
a = X[i+k, j+l]
w = F[H+k, W+l]
sum += (w * a)
out[i,j] = sum
#return convolution
return out
#normalizing the vectors
sob_x = convolve(image, Gx) / 8.0
sob_y = convolve(image, Gy) / 8.0
#calculate the gradient magnitude of vectors
sob_out = np.sqrt(np.power(sob_x, 2) + np.power(sob_y, 2))
# mapping values from 0 to 255
sob_out = (sob_out / np.max(sob_out)) * 255
plt.imshow(sob_out, cmap = 'gray', interpolation = 'bicubic')
plt.show()
I have collected images of S9 phones, added labels with labellmg and trained for a few hours in google colab. I had minimal loss so I thought it is enough. I only selected the rectangles where the phone is displayed and nothing else. What I dont understand is, it draws a lot of rectangles on the phone. I only want 1 or 2 rectangles drawn on the phone itself. Did I do something wrong?
def detect_img(self, img):
blob = cv2.dnn.blobFromImage(img, 0.00392 ,(416,416), (0,0,0), True, crop=False)
input_img = self.net.setInput(blob)
output = self.net.forward(self.output)
height, width, channel = img.shape
boxes = []
trusts = []
class_ids = []
for out in output:
for detect in out:
total_scores = detect[5:]
class_id = np.argmax(total_scores)
trust_factor = total_scores[class_id]
if trust_factor > 0.5:
x_center = int(detect[0] * width)
y_center = int(detect[1] * height)
w = int(detect[2] * width)
h = int(detect[3] * height)
x = int(x_center - w / 2)
y = int(x_center - h / 2)
boxes.append([x,y,w,h])
trusts.append(float(trust_factor))
class_ids.append(class_id)
cv2.rectangle(img, (x_center,y_center), (x + w, y + h), (0,255,0), 2)
When I set the trust_factor to 0.8, a lot of the rectangles are gone but there are still rectangles outside the phone, while I only selected the phone itself in labellmg and not the background.
You can use function "non maximum suppression" that it removes rectangles which have less score. I put a code for NMS
def NMS(boxes, overlapThresh = 0.4):
# Return an empty list, if no boxes given
if len(boxes) == 0:
return []
x1 = boxes[:, 0] # x coordinate of the top-left corner
y1 = boxes[:, 1] # y coordinate of the top-left corner
x2 = boxes[:, 2] # x coordinate of the bottom-right corner
y2 = boxes[:, 3] # y coordinate of the bottom-right corner
# Compute the area of the bounding boxes and sort the bounding
# Boxes by the bottom-right y-coordinate of the bounding box
areas = (x2 - x1 + 1) * (y2 - y1 + 1) # We add 1, because the pixel at the start as well as at the end counts
# The indices of all boxes at start. We will redundant indices one by one.
indices = np.arange(len(x1))
for i,box in enumerate(boxes):
# Create temporary indices
temp_indices = indices[indices!=i]
# Find out the coordinates of the intersection box
xx1 = np.maximum(box[0], boxes[temp_indices,0])
yy1 = np.maximum(box[1], boxes[temp_indices,1])
xx2 = np.minimum(box[2], boxes[temp_indices,2])
yy2 = np.minimum(box[3], boxes[temp_indices,3])
# Find out the width and the height of the intersection box
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
# compute the ratio of overlap
overlap = (w * h) / areas[temp_indices]
# if the actual boungding box has an overlap bigger than treshold with any other box, remove it's index
if np.any(overlap) > treshold:
indices = indices[indices != i]
#return only the boxes at the remaining indices
return boxes[indices].astype(int)
I'm interested in trying to read an analog gauge using a Raspberry PI and Open CV. I've only really messed with face detection in opencv, so I don't even know where to begin. Any ideas, starting points?
You can detect circles with HoughCircles method and detect lines with HoughLinesP method of with opencv lib in Python. After detecting these, you can find out the value of the gauge from the line's position via trigonometry.
You can see the sample code in python. It basically does these:
Read image with imread method.
turn it in to gray with cvtColor.
Find out the circles' center x,y coordinates and radius with HoughCircles, these method has some parameter that can be tweaked.
Detect the lines with HoughLinesP method again parameters should be tweaked.
Calculate the value, considering max value, min value on the gauge and angle interval of the gauge.
Reference: https://github.com/intel-iot-devkit/python-cv-samples/tree/master/examples/analog-gauge-reader
Hope this helps.
CODE:
import os
import cv2
import numpy
def getScriptDir():
currentFile = __file__ # May be 'my_script', or './my_script' or
realPath = os.path.realpath(currentFile) # /home/user/test/my_script.py
dirPath = os.path.dirname(realPath)
return dirPath
def getUserRealGaugeDetails():
min_angle = input('Min derece: ') #the lowest possible angle
max_angle = input('Max derece ') #highest possible angle
min_value = input('Min deger: ') #usually zero
max_value = input('Max deger: ') #maximum reading of the gauge
units = input('Birim girin: ')
return min_angle,max_angle,min_value,max_value,units
def setStaticUserRealGaugeDetails():
min_angle = 5 # input('Min angle (lowest possible angle of dial) - in degrees: ') #the lowest possible angle
max_angle = 355 # input('Max angle (highest possible angle) - in degrees: ') #highest possible angle
min_value = -20 #input('Min value: ') #usually zero
max_value = 120 #input('Max value: ') #maximum reading of the gauge
units = 'b' #input('Enter units: ')
return min_angle,max_angle,min_value,max_value,units
def getImage():
dirPath = getScriptDir()
dirPath += "/images/1.jpg"
return cv2.imread(dirPath)
def distance2Points(x1, y1, x2, y2):
#print np.sqrt((x2-x1)^2+(y2-y1)^2)
return numpy.sqrt((x2 - x1)**2 + (y2 - y1)**2)
def averageCircle(circles, b):
avg_x=0
avg_y=0
avg_r=0
for i in range(b):
#optional - average for multiple circles (can happen when a gauge is at a slight angle)
avg_x = avg_x + circles[0][i][0]
avg_y = avg_y + circles[0][i][1]
avg_r = avg_r + circles[0][i][2]
avg_x = int(avg_x/(b))
avg_y = int(avg_y/(b))
avg_r = int(avg_r/(b))
return avg_x, avg_y, avg_r
#return the center and radius of the circle
def getCircleAndCustomize(image):
height, width = image.shape[:2]
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #convert to gray
# gray = cv2.GaussianBlur(gray, (5, 5), 0)
# gray = cv2.medianBlur(gray, 5)
# cv2.imwrite('C:/Users/okarademirci/Desktop/analog-gauge-reader/images/gauge-gray-2.jpg', gray)
#detect circles
#restricting the search from 35-48% of the possible radii gives fairly good results across different samples. Remember that
#these are pixel values which correspond to the possible radii search range.
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 20, numpy.array([]), 100, 50, int(height*0.35), int(height*0.48))
#coordinates and radius
a, b, c = circles.shape
x,y,r = averageCircle(circles, b)
return x ,y ,r
def get_current_value(img, min_angle, max_angle, min_value, max_value, x, y, r):
gray2 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Set threshold and maxValue
thresh = 175
maxValue = 255
# for testing purposes, found cv2.THRESH_BINARY_INV to perform the best
# th, dst1 = cv2.threshold(gray2, thresh, maxValue, cv2.THRESH_BINARY);
# th, dst2 = cv2.threshold(gray2, thresh, maxValue, cv2.THRESH_BINARY_INV);
# th, dst3 = cv2.threshold(gray2, thresh, maxValue, cv2.THRESH_TRUNC);
# th, dst4 = cv2.threshold(gray2, thresh, maxValue, cv2.THRESH_TOZERO);
# th, dst5 = cv2.threshold(gray2, thresh, maxValue, cv2.THRESH_TOZERO_INV);
# cv2.imwrite('gauge-%s-dst1.%s' % (gauge_number, file_type), dst1)
# cv2.imwrite('gauge-%s-dst2.%s' % (gauge_number, file_type), dst2)
# cv2.imwrite('gauge-%s-dst3.%s' % (gauge_number, file_type), dst3)
# cv2.imwrite('gauge-%s-dst4.%s' % (gauge_number, file_type), dst4)
# cv2.imwrite('gauge-%s-dst5.%s' % (gauge_number, file_type), dst5)
# apply thresholding which helps for finding lines
th, dst2 = cv2.threshold(gray2, thresh, maxValue, cv2.THRESH_BINARY_INV)
# found Hough Lines generally performs better without Canny / blurring, though there were a couple exceptions where it would only work with Canny / blurring
#dst2 = cv2.medianBlur(dst2, 5)
#dst2 = cv2.Canny(dst2, 50, 150)
#dst2 = cv2.GaussianBlur(dst2, (5, 5), 0)
# for testing, show image after thresholding
dirPath = getScriptDir() + '/images/afterTreshold.jpg'
cv2.imwrite(dirPath, dst2)
# find lines
minLineLength = 10
maxLineGap = 0
lines = cv2.HoughLinesP(image=dst2, rho=3, theta=numpy.pi / 180, threshold=100,minLineLength=minLineLength, maxLineGap=0) # rho is set to 3 to detect more lines, easier to get more then filter them out later
#for testing purposes, show all found lines
# for i in range(0, len(lines)):
# for x1, y1, x2, y2 in lines[i]:
# cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
# cv2.imwrite('gauge-%s-lines-test.%s' %(gauge_number, file_type), img)
# remove all lines outside a given radius
final_line_list = []
#print "radius: %s" %r
diff1LowerBound = 0.15 #diff1LowerBound and diff1UpperBound determine how close the line should be from the center
diff1UpperBound = 0.25
diff2LowerBound = 0.5 #diff2LowerBound and diff2UpperBound determine how close the other point of the line should be to the outside of the gauge
diff2UpperBound = 1.0
for i in range(0, len(lines)):
for x1, y1, x2, y2 in lines[i]:
diff1 = distance2Points(x, y, x1, y1) # x, y is center of circle
diff2 = distance2Points(x, y, x2, y2) # x, y is center of circle
#set diff1 to be the smaller (closest to the center) of the two), makes the math easier
if (diff1 > diff2):
temp = diff1
diff1 = diff2
diff2 = temp
# check if line is within an acceptable range
if (((diff1<diff1UpperBound*r) and (diff1>diff1LowerBound*r) and (diff2<diff2UpperBound*r)) and (diff2>diff2LowerBound*r)):
line_length = distance2Points(x1, y1, x2, y2)
# add to final list
final_line_list.append([x1, y1, x2, y2])
#testing only, show all lines after filtering
# for i in range(0,len(final_line_list)):
# x1 = final_line_list[i][0]
# y1 = final_line_list[i][1]
# x2 = final_line_list[i][2]
# y2 = final_line_list[i][3]
# cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
# assumes the first line is the best one
x1 = final_line_list[0][0]
y1 = final_line_list[0][1]
x2 = final_line_list[0][2]
y2 = final_line_list[0][3]
cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
#for testing purposes, show the line overlayed on the original image
#cv2.imwrite('gauge-1-test.jpg', img)
#cv2.imwrite('C:/Users/okarademirci/Desktop/analog-gauge-reader/images/gauge-%s-lines-2.%s' % (gauge_number, file_type), img)
#find the farthest point from the center to be what is used to determine the angle
dist_pt_0 = distance2Points(x, y, x1, y1)
dist_pt_1 = distance2Points(x, y, x2, y2)
if (dist_pt_0 > dist_pt_1):
x_angle = x1 - x
y_angle = y - y1
else:
x_angle = x2 - x
y_angle = y - y2
# take the arc tan of y/x to find the angle
res = numpy.arctan(numpy.divide(float(y_angle), float(x_angle)))
#np.rad2deg(res) #coverts to degrees
# print x_angle
# print y_angle
# print res
# print np.rad2deg(res)
#these were determined by trial and error
res = numpy.rad2deg(res)
if x_angle > 0 and y_angle > 0: #in quadrant I
final_angle = 270 - res
if x_angle < 0 and y_angle > 0: #in quadrant II
final_angle = 90 - res
if x_angle < 0 and y_angle < 0: #in quadrant III
final_angle = 90 - res
if x_angle > 0 and y_angle < 0: #in quadrant IV
final_angle = 270 - res
#print final_angle
old_min = float(min_angle)
old_max = float(max_angle)
new_min = float(min_value)
new_max = float(max_value)
old_value = final_angle
old_range = (old_max - old_min)
new_range = (new_max - new_min)
new_value = (((old_value - old_min) * new_range) / old_range) + new_min
return new_value
def main():
# 1) get the image from directory.
image = getImage()
min_angle,max_angle,min_value,max_value,units = setStaticUserRealGaugeDetails()
# 2) covnert the image to gray .
# 3) find the circle in the image with customization
x,y,r = getCircleAndCustomize(image)
# 4) find the line in the circle.
# 5) find the value in the range of guage
newValue = get_current_value(image,min_angle,max_angle,min_value,max_value,x,y,r)
print(newValue)
if __name__=='__main__':
main()
How to get the new coordinates of the points a and b in this exemple after a transformation M (40 degrees counter-clockwise rotation) ?
import cv2
cap = cv2.VideoCapture("http://i.imgur.com/7G91d2im.jpg")
a, b = (100, 100), (200, 200)
if cap.isOpened():
ret, im = cap.read()
rows, cols = im.shape[:2]
im_keypoints = im.copy()
for point in [a, b]:
cv2.circle(im_keypoints, point, 6, (0, 0, 255), -1)
cv2.imwrite("im_keypoints.jpg", im_keypoints)
M = cv2.getRotationMatrix2D((cols / 2, rows / 2), 40, 1)
im_rotated = cv2.warpAffine(im, M, (cols, rows))
cv2.imwrite("im_rotated.jpg", im_rotated)
M is a 2 by 3 rotation matrix, so all you need to do it apply M to your points.
im_rotated_keypoints = im_rotated.copy()
for point in [a, b]:
# Convert to homogenous coordinates in np array format first so that you can pre-multiply M
rotated_point = M.dot(np.array(point + (1,)))
cv.circle(im_rotated_keypoints, (int(rotated_point[0]), int(rotated_point[1])), 6, (0, 0, 255), -1)
And you should be able to see