I have the following graph:
tf.reset_default_graph()
input_tensor = tf.placeholder(tf.int32, shape = [None, memory_size, max_sent_length], name = 'Input')
q_tensor = tf.placeholder(tf.int32, shape = [None,max_sent_length], name = 'Question')
a_tensor = tf.placeholder(tf.float32, shape = [None,V+1], name = 'Answer')
# Embedding matrices
A_prior = tf.get_variable(name = 'A', shape = [V+1,d], initializer = tf.random_normal_initializer(stddev = 0.1))
A = tf.concat(0,[tf.zeros(shape = tf.pack([1,tf.shape(A_prior)[1]])),tf.slice(A_prior,[1,0],[-1,-1])])
B = tf.get_variable(name = 'B', shape = [V+1,d], initializer = tf.random_normal_initializer(stddev = 0.1))
C = tf.get_variable(name = 'C', shape = [V+1,d], initializer = tf.random_normal_initializer(stddev = 0.1))
W = tf.get_variable(name = 'W', shape = [V+1,d], initializer= tf.random_normal_initializer(stddev = 0.1))
embeddings = tf.reduce_sum(tf.nn.embedding_lookup(A,input_tensor),2)
u = tf.reshape(tf.reduce_sum(tf.nn.embedding_lookup(B,q_tensor),1),[-1,1,d])
test = tf.transpose(embeddings, perm = [0,2,1])
test_batch_mul = tf.squeeze(tf.batch_matmul(u,test))
cond = tf.not_equal(test_batch_mul,0.0)
tt = tf.fill(tf.shape(test_batch_mul),-1000.0)
softmax_in = tf.select(cond, test_batch_mul, tt)
p_values = tf.nn.softmax(softmax_in)
c_values = tf.reduce_sum(tf.nn.embedding_lookup(C,input_tensor),2)
o = tf.squeeze(tf.batch_matmul(tf.expand_dims(p_values,1),c_values))
a_pred = tf.nn.softmax(tf.matmul(tf.squeeze(u)+o,tf.transpose(W)))
loss = tf.nn.softmax_cross_entropy_with_logits(a_pred, a_tensor, name = 'loss')
cost = tf.reduce_mean(loss)
global_step = tf.Variable(0,name = 'global_step', trainable= False)
#optimizer = tf.train.MomentumOptimizer(0.01,0.9)
vars_list = tf.trainable_variables()
grads = tf.gradients(cost, vars_list)
and then run the following session:
sess = tf.Session()
init = tf.initialize_all_variables()
sess.run(init)
input_feed = {input_tensor : phrases, q_tensor : questions, a_tensor : answers}
grad_results = sess.run(grads, feed_dict = input_feed)
If I e.g. look at the gradient of cost with respect to the embedding matrix A, I get a gradient of shape (batch_size * memory_size * max_sent_length, d), so the first dimension of the gradient corresponds to the product of the dimensions of the input tensor. I don't understand why tensorflow returns a matrix of that shape for the gradient of A, shouldn't you get a gradient with the shape of A?
Related
I am using openCV for face detection. Sometimes, openCV makes an issue to detect faces. the function name is
def detectFace(img_path):
img = functions.detectFace(img_path)[0] #detectFace returns (1, 224, 224, 3)
return img[:, :, ::-1] #bgr to rgb
I want above output from MTCNN algorithm
detectFace function code
def detectFace(img, target_size=(224, 224), grayscale = False, enforce_detection = True):
img_path = ""
img = "/media/khawar/HDD_Khawar/Projects/" + img
print(img)
#-----------------------
exact_image = False
if type(img).__module__ == np.__name__:
exact_image = True
base64_img = False
if len(img) > 11 and img[0:11] == "data:image/":
base64_img = True
#-----------------------
opencv_path = get_opencv_path()
face_detector_path = opencv_path+"haarcascade_frontalface_default.xml"
eye_detector_path = opencv_path+"haarcascade_eye.xml"
if os.path.isfile(face_detector_path) != True:
raise ValueError("Confirm that opencv is installed on your environment! Expected path ",face_detector_path," violated.")
#--------------------------------
face_detector = cv2.CascadeClassifier(face_detector_path)
eye_detector = cv2.CascadeClassifier(eye_detector_path)
if base64_img == True:
img = loadBase64Img(img)
elif exact_image != True: #image path passed as input
if os.path.isfile(img) != True:
raise ValueError("Confirm that ",img," exists")
img = cv2.imread(img)
img_raw = img.copy()
#--------------------------------
faces = []
try:
faces = face_detector.detectMultiScale(img, 1.3, 5)
except:
pass
#print("found faces in ",image_path," is ",len(faces))
if len(faces) > 0:
print(faces[0])
x,y,w,h = faces[0]
detected_face = img[int(y):int(y+h), int(x):int(x+w)]
detected_face_gray = cv2.cvtColor(detected_face, cv2.COLOR_BGR2GRAY)
#---------------------------
#face alignment
eyes = eye_detector.detectMultiScale(detected_face_gray)
if len(eyes) >= 2:
#find the largest 2 eye
base_eyes = eyes[:, 2]
items = []
for i in range(0, len(base_eyes)):
item = (base_eyes[i], i)
items.append(item)
df = pd.DataFrame(items, columns = ["length", "idx"]).sort_values(by=['length'], ascending=False)
eyes = eyes[df.idx.values[0:2]]
#-----------------------
#decide left and right eye
eye_1 = eyes[0]; eye_2 = eyes[1]
if eye_1[0] < eye_2[0]:
left_eye = eye_1
right_eye = eye_2
else:
left_eye = eye_2
right_eye = eye_1
#-----------------------
#find center of eyes
left_eye_center = (int(left_eye[0] + (left_eye[2] / 2)), int(left_eye[1] + (left_eye[3] / 2)))
left_eye_x = left_eye_center[0]; left_eye_y = left_eye_center[1]
right_eye_center = (int(right_eye[0] + (right_eye[2]/2)), int(right_eye[1] + (right_eye[3]/2)))
right_eye_x = right_eye_center[0]; right_eye_y = right_eye_center[1]
#-----------------------
#find rotation direction
if left_eye_y > right_eye_y:
point_3rd = (right_eye_x, left_eye_y)
direction = -1 #rotate same direction to clock
else:
point_3rd = (left_eye_x, right_eye_y)
direction = 1 #rotate inverse direction of clock
#-----------------------
#find length of triangle edges
a = distance(left_eye_center, point_3rd)
b = distance(right_eye_center, point_3rd)
c = distance(right_eye_center, left_eye_center)
#-----------------------
#apply cosine rule
if b != 0 and c != 0: #this multiplication causes division by zero in cos_a calculation
cos_a = (b*b + c*c - a*a)/(2*b*c)
angle = np.arccos(cos_a) #angle in radian
angle = (angle * 180) / math.pi #radian to degree
#-----------------------
#rotate base image
if direction == -1:
angle = 90 - angle
img = Image.fromarray(img_raw)
img = np.array(img.rotate(direction * angle))
#you recover the base image and face detection disappeared. apply again.
faces = face_detector.detectMultiScale(img, 1.3, 5)
if len(faces) > 0:
x,y,w,h = faces[0]
detected_face = img[int(y):int(y+h), int(x):int(x+w)]
#-----------------------
#face alignment block end
#---------------------------
#face alignment block needs colorful images. that's why, converting to gray scale logic moved to here.
if grayscale == True:
detected_face = cv2.cvtColor(detected_face, cv2.COLOR_BGR2GRAY)
detected_face = cv2.resize(detected_face, target_size)
img_pixels = image.img_to_array(detected_face)
img_pixels = np.expand_dims(img_pixels, axis = 0)
#normalize input in [0, 1]
img_pixels /= 255
return img_pixels
else:
if (exact_image == True) or (enforce_detection != True):
if grayscale == True:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, target_size)
img_pixels = image.img_to_array(img)
img_pixels = np.expand_dims(img_pixels, axis = 0)
img_pixels /= 255
return img_pixels
else:
print(img)
raise ValueError("Face could not be detected. Please confirm that the picture is a face photo or consider to set enforce_detection param to False.")
Try this out .
import mtcnn
import matplotlib.pyplot as plt
# load image from file
filename = "glediston-bastos-ZtmmR9D_2tA-unsplash.jpg"
pixels = plt.imread(filename)
print("Shape of image/array:",pixels.shape)
imgplot = plt.imshow(pixels)
plt.show()
# draw an image with detected objects
def draw_facebox(filename, result_list):
# load the image
data = plt.imread(filename)
# plot the image
plt.imshow(data)
# get the context for drawing boxes
ax = plt.gca()
# plot each box
for result in result_list:
# get coordinates
x, y, width, height = result['box']
# create the shape
rect = plt.Rectangle((x, y), width, height, fill=False, color='green')
# draw the box
ax.add_patch(rect)
# show the plot
plt.show()
# filename = 'test1.jpg' # filename is defined above, otherwise uncomment
# load image from file
# pixels = plt.imread(filename) # defined above, otherwise uncomment
# detector is defined above, otherwise uncomment
#detector = mtcnn.MTCNN()
# detect faces in the image
faces = detector.detect_faces(pixels)
# display faces on the original image
draw_facebox(filename, faces)
# draw the dots
for key, value in result['keypoints'].items():
# create and draw dot
dot = plt.Circle(value, radius=20, color='orange')
ax.add_patch(dot)
You are using detectFace function within deepface? It currently wraps opencv, ssd, dlib and mtcnn to detect and align faces.
def detectFace(img_path):
backends = ['opencv', 'ssd', 'dlib', 'mtcnn']
img = functions.detectFace(img_path, detector_backend = backends[3])[0] #detectFace returns (1, 224, 224, 3)
return img[:, :, ::-1] #bgr to rgb
The result of the detectFace function is detected and aligned with mtcnn now.
Besides, you can run face recognition with mtcnn backend as well.
from deepface import DeepFace
obj = DeepFace.verify("img1.jpg", "img2.jpg", detector_backend = 'mtcnn')
I've been trying to code a 3D scanner that uses structured light (one camera and projector only). I'm using Taubin and Moreno's software (at their site that is somehow down right now, use wayback machine to check it out.) to obtain the projector intrinsics and extrinsics as a start, and the linear least squares triangulation method described here.
However, regardless of the objects scanned, the point clouds obtained are warped in a convex manner (see images below). This is most likely not a intrinsics/extrinsics/distortion parameter issue, as the same calibration parameters gives a proper point cloud when using the software linked above. I'm also inclined to say that my decoding process is not faulty, as the row and column correspondences appear to be correct (see below). Using a previously decoded dataset also gives the same issue.
def linearLS_triangulation(u_c, u_p, P_c, P_p, A, B):
"""
Performs linear least squares triangulation via an overdetermined linear
system
Reference:
http://users.cecs.anu.edu.au/~hartley/Papers/triangulation/triangulation.pdf
"""
A[0][0] = u_c[0]*P_c[2][0] - P_c[0][0]
A[0][1] = u_c[0]*P_c[2][1] - P_c[0][1]
A[0][2] = u_c[0]*P_c[2][2] - P_c[0][2]
A[1][0] = u_c[1]*P_c[2][0] - P_c[1][0]
A[1][1] = u_c[1]*P_c[2][1] - P_c[1][1]
A[1][2] = u_c[1]*P_c[2][2] - P_c[1][2]
A[2][0] = u_p[0]*P_p[2][0] - P_p[0][0]
A[2][1] = u_p[0]*P_p[2][1] - P_p[0][1]
A[2][2] = u_p[0]*P_p[2][2] - P_p[0][2]
A[3][0] = u_p[1]*P_p[2][0] - P_p[1][0]
A[3][1] = u_p[1]*P_p[2][1] - P_p[1][1]
A[3][2] = u_p[1]*P_p[2][2] - P_p[1][2]
B[0][0] = -(u_c[0] * P_c[2][3] - P_c[0][3])
B[1][0] = -(u_c[1] * P_c[2][3] - P_c[1][3])
B[2][0] = -(u_p[0] * P_p[2][3] - P_p[0][3])
B[3][0] = -(u_p[1] * P_p[2][3] - P_p[1][3])
# Use of the normal equation, np.linalg.lstsq also works!
w = np.linalg.inv(A.T.dot(A)).dot(A.T).dot(B)
return w[:, 0]
def get_cam_points(decoded, K_c):
"""
Get list of camera pixels that have a correspondence to projector pixels
Returned in global coordinates, where world centre is centre of projection
of the camera
"""
[height, width] = np.nonzero(decoded[0])
points_cam = np.zeros([3, height.shape[0]], dtype = np.float)
K_c_inv = np.linalg.inv(K_c)
for i in range(height.shape[0]):
points_cam[:, i] = [width[i], height[i], 1]
points_cam = np.dot(K_c_inv, points_cam)
return points_cam
def get_proj_pixels(width_p, height_p, K_p, dist_p, R_p, T_p):
"""
Passes the resolution of the projector along with the intrinsics and
extrinsics, computing the mapping from projector pixels to the optical
rays returned in [x, y, z] for each pixel in the 'image'
This assumes that the camera is the origin, with rotation and translation
matrixes of the projector respect to that.
"""
column_p = np.arange(width_p, dtype = np.float)
row_p = np.arange(height_p, dtype = np.float)
C, R = np.meshgrid(column_p, row_p)
uv_p = np.zeros([np.ravel(C).shape[0], 1, 2], dtype = np.float)
uv_p[:, 0, :] = np.c_[np.ravel(C),np.ravel(R)]
uv_p = cv2.undistortPoints(uv_p, K_p, dist_p)
uv_p = uv_p[:,0,:]
uv_p = np.c_[uv_p, np.ones([np.ravel(C).shape[0]])]
uv_p = uv_p.transpose()
uv_grid = np.zeros([3, height_p, width_p], dtype = np.float)
uv_grid[0] = np.reshape(uv_p[0, :], [height_p, width_p])
uv_grid[1] = np.reshape(uv_p[1, :], [height_p, width_p])
uv_grid[2] = np.reshape(uv_p[2, :], [height_p, width_p])
return uv_grid
def triangulate_all(decoded, P_c, P_p, dist_p, K_c, K_p, width_p, height_p):
[height, width] = np.nonzero(decoded[0])
points = np.zeros([3, height.shape[0]], dtype = np.float)
points_cam = get_cam_points(decoded, K_c)
points_proj = np.zeros([3, height.shape[0]], dtype = np.float)
uv_grid = get_proj_pixels(width_p, height_p, K_p, dist_p,
P_p[:, :3], P_p[:, 3].reshape(-1, 1))
# Get list of projector pixels corresponding to non-zero camera pixels
for i in range(height.shape[0]):
inter = decoded[:, height[i], width[i]]
points_proj[:, i] = uv_grid[:, inter[1], inter[0]]
A = np.zeros((4, 3), dtype = np.float)
B = np.zeros((4, 1), dtype = np.float)
for i in range(height.shape[0]):
points[:, i] = linearLS_triangulation(points_cam[:, i],
points_proj[:, i],
P_c, P_p, A, B)
return points
print('Loading calibration parameters...')
calib_params = cv2.FileStorage('calibration.yml', cv2.FILE_STORAGE_READ)
dist_c = calib_params.getNode('cam_kc').mat()
dist_p = calib_params.getNode('proj_kc').mat()
K_c = calib_params.getNode('cam_K').mat()
K_p = calib_params.getNode('proj_K').mat()
R_p = calib_params.getNode('R').mat()
R_p = R_p.transpose() # Rotation matrix of projector with respect to camera origin
R_c = np.array([[1,0,0],[0,1,0],[0,0,1]])
T_p = calib_params.getNode('T').mat()
T_c = np.array([0,0,0])
width_p = 1920
height_p = 1080
P_c = np.c_[R_c, T_c]
P_p = np.c_[R_p, T_p]
print('Loading color image...')
color = cv2.imread(scandir + 'Image01.jpg')
color = color/255
print('Loading decoded matrix...')
# A 2 x imgheight x imgwidth (in pixels) matrix, with the first channel being the column (x-direction)
# estimates and the second channel being the row (y-direction) pixel estimates of the projector.
# E.g. a pixel at point [300, 400] (Origin at top left of image!) would correspond to the projector
# pixels of [16, 4] (Origin at top left). A zero would indicate the lack of correspondence for that
# specific pixel
decoded = np.load('Decoded Matrix.npy')
points = triangulate_all(decoded, P_c, P_p, dist_p, K_c, K_p,
width_p, height_p)
Point cloud and original image
Another point cloud, and the decoded row and column estimates
Help would be greatly appreciated! At a loss of what to do.
Edit:
Got rid of the line normalizing the ray vectors
I'm trying find difference in two image
db_image:
query_image:
abs_diff:
query_image is greater than db_image a little bit. I'm using opencv SIFT to extract feature and FlannBasedMatcher to find Homography finaly i'm using absdiff to extract difference area . It's work ! But some image not warpPerspective prefect and have some anti-aliased noise pixel i think. how to solve this.
Sorry for my bad english!
Here is my source code :
grayscale_db = cv2.cvtColor(db_img, cv2.COLOR_BGR2GRAY)
gray_scale_query = cv2.cvtColor(query_img, cv2.COLOR_BGR2GRAY)
# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(grayscale_db, None)
kp2, des2 = sift.detectAndCompute(gray_scale_query, None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
good = [m for m, n in matches if m.distance < 0.9 * n.distance]
result_area = None
h, w = grayscale_db.shape
if len(good) > threshold:
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
if return_area:
return cv2.minAreaRect(dst_pts)
src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(dst_pts, src_pts, cv2.RANSAC, 5.0)
result_area = cv2.warpPerspective(query_img, M, (w, h), borderMode=cv2.BORDER_CONSTANT,
borderValue=(255, 255, 255), flags=cv2.INTER_LINEAR)
diff = cv2.absdiff( db_img,result_area)
I have used RANSAC algorithm to find the homography and wrap perspective operation to apply it to an image. here is the code
MIN_MATCH_COUNT = 10
img1 = cv2.imread('bus1.jpg',0)
img2 = cv2.imread('bus2.jpg',0)
sift = cv2.SIFT()
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
h,w = img1.shape
result=cv2.warpPerspective(img2,M,(w,h))
cv2.imshow('result',result)
cv2.waitKey(0)
cv2.destroyAllWindows()
output is not showing the whole image .what is wrong?
how to wrap the image?
You are computing the homography from img1 to img2 but you are applying it to img2 and not img1.
Change result = cv2.warpPerspective(img2, M, (w,h)) to result = cv2.warpPerspective(img1, M, (2 * w, h)) (the 2 * w is so that there is a bigger part of the warped image included in result)
I am trying to make a shape recognition classifier in which if you give an individual picture of an object (from a scene), it would be able to classify (after machine learning) the shape of an object (cylinder, cube, sphere, etc).
Original scene:
Individual objects it will classify:
I attempted to do this using cv2.approxPolyDB with an attempt to classify a cylinder. However, either my implementation isn't good or this wasn't a good choice of an algorithm to choose in the first place, the objects in the shape of cylinders were assigned a approxPolyDB value of 3 or 4.
Perhaps I can threshold and, in general, if given a value of 3 or 4, assume the object is a cylinder, but I feel like it's not the most reliable method for 3D shape classification. I feel like there is a better way to implement this and a better method as opposed to just hardcoding values. I feel like that with this method, it can easily confuse a cylinder with a cube.
Is there any way I can improve my 3D shape recognition program?
Code:
import cv2
import numpy as np
from pyimagesearch import imutils
from PIL import Image
from time import time
def invert_img(img):
img = (255-img)
return img
def threshold(im):
imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
imgray = cv2.medianBlur(imgray,9)
imgray = cv2.Canny(imgray,75,200)
return imgray
def view_all_contours(im, size_min, size_max):
main = np.array([[]])
cnt_target = im.copy()
for c in cnts:
epsilon = 0.1*cv2.arcLength(c,True)
approx = cv2.approxPolyDP(c,epsilon,True)
area = cv2.contourArea(c)
print 'area: ', area
test = im.copy()
# To weed out contours that are too small or big
if area > size_min and area < size_max:
print c[0,0]
print 'approx: ', len(approx)
max_pos = c.max(axis=0)
max_x = max_pos[0,0]
max_y = max_pos[0,1]
min_pos = c.min(axis=0)
min_x = min_pos[0,0]
min_y = min_pos[0,1]
# Load each contour onto image
cv2.drawContours(cnt_target, c, -1,(0,0,255),2)
print 'Found object'
frame_f = test[min_y:max_y , min_x:max_x]
main = np.append(main, approx[None,:][None,:])
thresh = frame_f.copy()
thresh = threshold(thresh)
contours_small, hierarchy = cv2.findContours(thresh.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cnts_small = sorted(contours_small, key = cv2.contourArea, reverse = True)
cv2.drawContours(frame_f, cnts_small, -1,(0,0,255),2)
cv2.imshow('Thresh', thresh)
cv2.imshow('Show Ya', frame_f)
cv2.waitKey(0)
# Uncomment in order to show all rectangles in image
print '---------------------------------------------'
#cv2.drawContours(cnt_target, cnts, -1,(0,255,0),2)
print main.shape
print main
return cnt_target
time_1 = time()
roi = cv2.imread('images/beach_trash_3.jpg')
hsv = cv2.cvtColor(roi,cv2.COLOR_BGR2HSV)
target = cv2.imread('images/beach_trash_3.jpg')
target = imutils.resize(target, height = 400)
hsvt = cv2.cvtColor(target,cv2.COLOR_BGR2HSV)
img_height = target.shape[0]
img_width = target.shape[1]
# calculating object histogram
roihist = cv2.calcHist([hsv],[0, 1], None, [180, 256], [0, 180, 0, 256] )
# normalize histogram and apply backprojection
cv2.normalize(roihist,roihist,0,255,cv2.NORM_MINMAX)
dst = cv2.calcBackProject([hsvt],[0,1],roihist,[0,180,0,256],1)
# Now convolute with circular disc
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
cv2.filter2D(dst,-1,disc,dst)
# threshold and binary AND
ret,thresh = cv2.threshold(dst,50,255,0)
thresh_one = thresh.copy()
thresh = cv2.merge((thresh,thresh,thresh))
res = cv2.bitwise_and(target,thresh)
# Implementing morphological erosion & dilation
kernel = np.ones((9,9),np.uint8) # (6,6) to get more contours (9,9) to reduce noise
thresh_one = cv2.erode(thresh_one, kernel, iterations = 3)
thresh_one = cv2.dilate(thresh_one, kernel, iterations=2)
# Invert the image
thresh_one = invert_img(thresh_one)
# To show prev img
#res = np.vstack((target,thresh,res))
#cv2.imwrite('res.jpg',res)
#cv2.waitKey(0)
#cv2.imshow('Before contours', thresh_one)
cnt_target = target.copy()
cnt_full = target.copy()
# Code to draw the contours
contours, hierarchy = cv2.findContours(thresh_one.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(contours, key = cv2.contourArea, reverse = True)
print time() - time_1
size_min = 200
size_max = 5000
cnt_target = view_all_contours(target, size_min, size_max)
cv2.drawContours(cnt_full, cnts, -1,(0,0,255),2)
res = imutils.resize(thresh_one, height = 700)
cv2.imshow('Original image', target)
cv2.imshow('Preprocessed', thresh_one)
cv2.imshow('All contours', cnt_full)
cv2.imshow('Filtered contours', cnt_target)
cv2.waitKey(0)