How to avoid the MinAreaRec plot for the whole canvas of window? - opencv

So, I am trying to measure the line (check the attached code and result), the problem is when creating the boxplot points to measure the line it also creating boxplot points of the whole canvas of window no matter the image or thresholding value is. I also tried to limit in contourarea(c) command but couldn't limit it (don't know why). Kindly improvise the code or explain where I can counter this problem of mine in layman language as I am a newbee.
Thankyou!
import cv2
import numpy as np
import imutils
import imutils.perspective as persp
import scipy.spatial.distance as dist
print("PRESSED MODE 1 CONTOUR")
# cv2.destroyAllWindows()
img = cv2.imread('opencv_frame_3.png')
# img2 = cv2.imread('shapes.jpg')
# rgb_img2 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
rgb_img = cv2.cvtColor(gray, cv2.COLOR_BGR2RGB)
# thresh, thresh_img = cv2.threshold(gray,90,255,cv2.THRESH_BINARY)
# thresh_img = cv2.erode(thresh_img, None, iterations=5)
# thresh_img = cv2.dilate(thresh_img, None, iterations=1)
# rgb_img_thresh = cv2.cvtColor(thresh_img, cv2.COLOR_BGR2RGB)
thresh_img = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,17,5)
# thresh_img = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,175,15)
# thr = cv2.cvtColor(thr, cv2.COLOR_BGR2RGB)
conts = cv2.findContours(thresh_img, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
conts = imutils.grab_contours(conts)
# print(len(conts))
cont_img = np.zeros(img.shape)
cont_img = cv2.drawContours(cont_img,conts, -1 , (0,255,0),1)
cont_img2 = np.copy(img)
cont_img2 = cv2.drawContours(cont_img2,conts, -1 , (0,255,0),1)
# cv2.imshow('black',cont_img)
def midPoint (ptA, ptB):
return ((ptA[0]+ptB[0])/2 , (ptA[1]+ptB[1])/2)
for c in conts:
box = cv2.minAreaRect(c)
# print(box)
box = cv2.boxPoints(box)
box = np.array(box, dtype='int')
area = cv2.contourArea(c)
# print(area)
if cv2.contourArea(c) < 50000:
continue
area = cv2.contourArea(c)
print(area/96)
print("________")
cv2.drawContours(cont_img,[c],-1,(0,0,255),2)
cv2.drawContours(cont_img, [box], -1, (255, 0, 0), 2)
cv2.drawContours(cont_img2,[c],-1,(0,0,255),2)
cv2.drawContours(cont_img2, [box], -1, (255, 0, 0), 2)
for (x,y) in box:
cv2.circle(cont_img,(x,y),2,(255,255,255),20)
cv2.circle(cont_img2, (x, y), 2, (255, 255, 255), 20)
(tl,tr,br,bl) = box
(trX,tlX) = midPoint(tr,tl)
(brX, blX) = midPoint(br, bl)
cv2.circle(cont_img, (int(trX), int(tlX)),1, (0, 255, 255), 20)
cv2.circle(cont_img, (int(brX), int(blX)),1, (0, 255, 255), 20)
cv2.line(cont_img, (int(trX), int(tlX)), (int(brX), int(blX)), (255,0,255), 2)
cv2.circle(cont_img2, (int(trX), int(tlX)), 1, (0, 255, 255), 20)
cv2.circle(cont_img2, (int(brX), int(blX)), 1, (0, 255, 255), 20)
cv2.line(cont_img2, (int(trX), int(tlX)), (int(brX), int(blX)), (255, 0, 255), 2)
dA = dist.euclidean((int(trX), int(tlX)), (int(brX), int(blX)))
cv2.putText(cont_img,"{:.2f} px".format(dA/300), (int(trX+20), int(blX-20)),
cv2.FONT_HERSHEY_SIMPLEX,0.7,(0,255,255),2)
cv2.putText(cont_img2,"{:.2f} px".format(dA/300), (int(trX+20), int(blX-20)),
cv2.FONT_HERSHEY_SIMPLEX,0.7,(0,255,255),2)
(tlX, blX) = midPoint(tl, bl)
(trX, brX) = midPoint(tr, br)
cv2.circle(cont_img, (int(tlX), int(blX)),1, (0, 255, 0), 20)
cv2.circle(cont_img, (int(trX), int(brX)),1, (0, 255, 0), 20)
cv2.line(cont_img, (int(tlX), int(blX)), (int(trX), int(brX)), (255,0,255), 2)
cv2.circle(cont_img2, (int(tlX), int(blX)),1, (0, 255, 0), 20)
cv2.circle(cont_img2, (int(trX), int(brX)),1, (0, 255, 0), 20)
cv2.line(cont_img2, (int(tlX), int(blX)), (int(trX), int(brX)), (255,0,255), 2)
dB = dist.euclidean((int(tlX), int(blX)), (int(trX), int(brX)))
cv2.putText(cont_img,"{:.2f} in".format(dB/300), (int(trX-120), int(blX-320)),
cv2.FONT_HERSHEY_SIMPLEX,0.7,(0, 255, 255),2)
cv2.putText(cont_img2, "{:.2f} in".format(dB/300), (int(trX-120), int(blX -320)),
cv2.FONT_HERSHEY_SIMPLEX,0.7, (0, 255, 255),2)
print("Line A", "{:.2f} in".format(dA/96))
print("Line B", "{:.2f} in".format(dB/96))
print("**************")
# cont_img = cv2.resize(cont_img,(0,0),fx=0.5,fy=0.5)
cv2.imshow('area',cont_img)
# cont_img2 = cv2.resize(cont_img2,(0,0),fx=0.5,fy=0.5)
# cv2.imshow('ORG',cont_img2)
# thresh_img = cv2.resize(thresh_img,(0,0),fx=0.5,fy=0.5)
cv2.imshow("thresh", thresh_img)
# imgStack = stackImages(0.4,([img,rgb_img],
# [rgb_img_thresh,cont_img]))
#
# cv2.imshow('imgthresh',imgStack)
cv2.waitKey(0)
cv2.destroyAllWindows()
Check the image of output the whole window size is getting measured
I tried to either limit the measurement or area of contourarea(c) or c itself but it doesn't work for me

Related

opencv error: (-209:Sizes of input arguments do not match) The operation is neither 'array op array'

I am working on a mini project that uses opencv
code is here, but it gives an erro how can i modifiy.
import cv2
cap = cv2.VideoCapture('./opencv/images/vtest.avi')
_, frame1 = cap.read()
_, frame2 = cap.read()
while cap.isOpened():
diff = cv2.absdiff(frame1, frame2)
gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5), 0)
_, th = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
dilated = cv2.dilate(th, None, iterations=3)
contours, heriacy = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#cv2.drawContours(frame1, contours, -1, (0, 255, 0), 2)
for contour in contours:
(x, y, w, h) = cv2.boundingRect(contour)
#print(x,y,w,h)
if cv2.contourArea(contour)>1000:
cv2.rectangle(frame1, (x,y), (x+w, y+h), (0,255,0), 2)
cv2.putText(frame1, "stauts: Movement", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 1,(0, 0, 255), 3)
#cv2.putText(img, text, org, fontFace, fontScale, color)
cv2.imshow("result_video", frame1)
frame1 = frame2
ret, frame2 = cap.read()
if cv2.waitKey(40) == 27:
break
cv2.destroyAllWindows()
cap.release()

Opencv, combination of chroma key correction and contour detection gives unexpected result

I am doing contour detection on a chroma key corrected image. Everything works fine when I filter out just the blues, but when I try to get a better chroma correction by also filtering the reds, suddenly my contours cannot be detected anymore. Anyone any suggestions?
WITH BLUE FILTER:
img = cv2.imread('yellowcropped.jpg', 1)
lower_blue = np.array([0, 0, 15]) ##[R value, G value, B value]
upper_blue = np.array([255, 255, 60])
mask = cv2.inRange(image_copy, lower_blue, upper_blue)
WITH BLUE AND RED FILTER:
lower_blue = np.array([180, 0, 15]) ##[R value, G value, B value]
upper_blue = np.array([255, 255, 60])
(notice the top left image get's much crisper, but NO CONTOURS are detected anymore.)
BELOW MY CONTOUR FINDING CODE:
imgContour = image_original.copy()
imgBlur = cv2.GaussianBlur(img, (7, 7), 1)
imgGray = imgBlur
imgCanny = cv2.Canny(imgGray,threshold1,threshold2)
kernel = np.ones((5, 5))
imgDil = cv2.dilate(imgCanny, kernel, iterations=1)
getContours(imgDil,imgContour)
def getContours(img,imgContour):
""" DRAWS AND FINDS CONTOURS, THEN RETURNS a list of lists incl x0, y0, w, h"""
contour_list = []
contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# print('contours:', contours)
for cnt in contours:
area = cv2.contourArea(cnt)
areaMin = cv2.getTrackbarPos("Area", "Parameters")
if area > areaMin and area < 5000:
cv2.drawContours(imgContour, cnt, -1, (255, 0, 0), 7)
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.02 * peri, True)
# print(len(approx))
x , y , w, h = cv2.boundingRect(approx)
print('contour bounding:', x,y,w,h)
center_x = int(x + w/2)
center_y = int(y + h/2)
cv2.circle(imgContour,(center_x, center_y), 5, (0, 0, 255), 5)
cv2.rectangle(imgContour, (x , y ), (x + w , y + h ), (0, 255, 0), 5)
cv2.putText(imgContour, "Points: " + str(len(approx)), (x + w + 20, y + 20), cv2.FONT_HERSHEY_COMPLEX, .7,
(0, 255, 0), 2)
cv2.putText(imgContour, "Area: " + str(int(area)), (x + w + 20, y + 45), cv2.FONT_HERSHEY_COMPLEX, 0.7,
(0, 255, 0), 2)
if area < 3500:
cv2.putText(imgContour, "THIS IS A SMALL PART" , (x + w + 20, y + 70), cv2.FONT_HERSHEY_COMPLEX, 0.7,
(0, 255, 0), 2)
contour_list.append([x,y,w,h])
return contour_listenter code here
So i still do not entirely know what went wrong here but I found a solution for anyone in the future looking to first chroma key correct (Remove background) and then do contour detection:
I dropped the gaussian filter, dilate and canny and instead just inverted the image's colours (contour detection only detects white parts on black background) using:
mask = cv2.bitwise_not(mask)
I then changed the contour detection from cv2.RETR_EXTERNAL to cv2.RETR_LIST
Somehow that fixed it, the result is now really good.

how to get the min a contour of the color with HSV?

I'm trying to work on an image-processing. So, I need to grab the max and min area of the contour under for pic, contour in enumerate(contours): after selecting the min area if (area > 2000):
I could grab the max and min of the contour outside for loop, the problem that I need which min contour greater than 2000 in this code.
my full code:
import cv2
import numpy as np
from imutils.video import FPS
import time
cap = cv2.VideoCapture(0)
width = cap.get(3) # float
height = cap.get(4) # float
print width, height
time.sleep(2.0)
fps = FPS().start()
while (1):
_, img = cap.read()
if _ is True:
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
else:
continue
blue_lower = np.array([86,0,90], np.uint8)
blue_upper = np.array([163, 64, 145], np.uint8)
blue = cv2.inRange(hsv, blue_lower, blue_upper)
kernal = np.ones((9, 9), "uint8")
blue = cv2.dilate(blue, kernal)
res_blue = cv2.bitwise_and(img, img, mask=blue)
(_, contours, hierarchy) = cv2.findContours(blue, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for pic, contour in enumerate(contours):
area = cv2.contourArea(contour)
if (area > 2000):
print area
x, y, w, h = cv2.boundingRect(contour)
img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
cv2.putText(img, "Blue Colour", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0))
if len(contours) > 0:
c = max(contours, key=cv2.contourArea)
x, y, w, h = cv2.boundingRect(c)
img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 0), 5)
cv2.putText(img, "Blue Colour", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0))
cv2.imshow("Color Tracking", img)
if cv2.waitKey(10) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
fps.update()
Any ideas or suggestions will be appreciated

How to improve the recognition with the passport?

We need to increase the recognition process on the passport. Advise how better to make the bounding box +1000 karma. And still any recommendations are welcome!
When recognizing incorrectly finds contours, it is not clear why and paints over the characters, as a result of which the characters suffer. Help with code please
# импорт необходимых пакетов
from PIL import Image
import pytesseract
import argparse
import cv2
import os
import numpy as np
# параметры цветового фильтра
hsv_min = np.array((1, 10, 20), np.uint8)
hsv_max = np.array((26, 238, 255), np.uint8)
# построить разбор аргументов и разбор аргументов
if __name__ == '__main__':
print(__doc__)
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image" )
ap.add_argument("-p", "--preprocess", type=str, default="thresh")
args = vars(ap.parse_args())
image = cv2.imread("photo_2019-07-16_10-45-52.jpg")
# меняем цветовую модель с BGR на HSV
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# черно-белые
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
kernel_size = 5
blur_gray = cv2.GaussianBlur(gray, (kernel_size, kernel_size), 0)
# порог выполнения
gray, mask = cv2.threshold(gray, 150, cv2 .ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY)
# найти контуры
contours, gray = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# выберите самый большой контур
largest_area = 0
for cnt in contours:
if cv2.contourArea(cnt) > largest_area:
cont = cnt
largest_area = cv2.contourArea(cnt)
# найдите прямоугольник (и угловые точки этого прямоугольника), который окружает контуры / фото
rect = cv2.minAreaRect(cont)
box = cv2.boxPoints(rect)
box = np.int0(box)
############ Деформация изображения в квадрат#########
# назначение угловых точек интересующего региона
pts1 = np.float32([box[1], box[2], box[3], box[0]])
# предоставление новых координат угловых точек
pts2 = np.float32([[0, 0], [50, 50], [50, 50], [0, 0]])
# определение и применение матрицы преобразования
M = cv2.getPerspectiveTransform(pts1, pts2)
tmp = cv2.warpPerspective(image, M, (500, 500))
# черно-белые
gray_image2 = cv2.cvtColor(tmp, cv2.COLOR_BGR2GRAY)
# порог выполнения
gray, mask2 = cv2.threshold(gray_image2, 100, 255, cv2.THRESH_BINARY_INV)
thresh = cv2.inRange(hsv, hsv_min, hsv_max)
# ищем контуры и складируем их в переменную contours
gray, contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# отображаем контуры поверх изображения
cv2.drawContours(image, contours, -1, (255, 0, 0), 1, cv2.LINE_AA, hierarchy, 0)
cv2.drawContours(image, contours, -1, (50, 10, 0), 1, cv2.LINE_AA, hierarchy, 2)
# удалить шум / закрыть зазоры
kernel = np.ones((5, 5), np.uint8)
gray = cv2.morphologyEx(mask2, cv2.MORPH_CLOSE, kernel)
# нарисовать прямоугольник на исходном изображении
cv2.drawContours(image, [box], 0, (255, 0, 0), 2)
#показать изображение
# проверьте, следует ли применять пороговое значение для предварительной обработки изображения
if args["preprocess"] == "thresh":
gray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
# сделать проверку, чтобы увидеть, если медиана размытие должно быть сделано, чтобы удалить шум
elif args["preprocess"] == "blur":
gray = cv2.medianBlur(gray, 3)
# запишите изображение в оттенках серого на диск как временный файл, чтобы мы могли применение OCR к нему
filename = "{}.png".format(os.getpid())
cv2.imwrite(filename, image)
# загрузите изображение как изображение PIL/Pillow, примените OCR
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
text = pytesseract.image_to_string(image, lang='rus+eng')
os.remove(filename)
print(text)
#выходной файл
os.system('python Test.py > test.txt')
# показать выходные изображения
cv2.imshow("Image", image)
#cv2.imshow("Output", gray)
cv2.waitKey(0)

How to use connectedComponentsWithStats to remove dots?

With contour,I can do it like this:
if cv2.contourArea(cntr) <= 3:
cv2.drawContours(img, [cntr], -1, (0, 0, 0), 1)
How to do it with ConnectedComponentsStats?
If you want to do the same with connectedComponentsWithStats these are the following steps:
// This here is for making a custom image
img = np.zeros((500,500,3),dtype=np.uint8)
for i in xrange(1,5):
img = cv2.circle(img, (i*80,i*80), 5, (255,255,255), -1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
// CCA from here
labelnum, labelimg, contours, GoCs = cv2.connectedComponentsWithStats(gray)
for label in xrange(1,labelnum):
x,y = GoCs[label]
img = cv2.circle(img, (int(x),int(y)), 1, (0,0,255), -1)
x,y,w,h,size = contours[label]
if size <= 100:
img = cv2.rectangle(img, (x,y), (x+w,y+h), (255,255,0), 1)
This gives the following image:
Hope it helps!

Resources