please help with displaying information above the contour frame, how to display the size of the frame, if it is known that the size of the product is 670 pixels, length, physical 4.1 mm, and how to draw the center of the axis on the object? opencv version 4.7.0.68 , please edit the code.
import numpy as np
import cv2
cap = cv2.VideoCapture ( 1 )
kernel = np.ones ( (2 , 2) , np.uint8 )
while (True):
# Capture frame-by-frame
ret , frame = cap.read ()
# Our operations on the frame come here
gray = cv2.cvtColor ( frame , cv2.COLOR_BGR2GRAY )
gray = cv2.GaussianBlur ( gray , (7 , 7) , 0 )
gray = cv2.medianBlur ( gray , 5 ) # to remove salt and paper noise
# to binary
ret , thresh = cv2.threshold ( gray , 128 , 128 , 128 ) # to detect white objects
# to get outer boundery only
thresh = cv2.morphologyEx ( thresh , cv2.MORPH_GRADIENT , kernel )
# to strength week pixels
thresh = cv2.dilate ( thresh , kernel , iterations = 1 )
im2 = contours , hierarchy = cv2.findContours ( thresh , cv2.RETR_EXTERNAL , cv2.CHAIN_APPROX_SIMPLE )
if len ( contours ) > 0:
cv2.drawContours ( frame , contours , -1 , (0 , 255 , 0) , 3 )
# find the biggest countour (c) by the area
c = max ( contours , key = cv2.contourArea )
x , y , w , h = cv2.boundingRect ( c )
# draw the biggest contour (c) in green
cv2.rectangle ( frame , (x , y) , (x + w , y + h) , (255 , 0 , 0) , 2 )
# Display the resulting frame
cv2.imshow ( 'frame' , frame )
if cv2.waitKey ( 27 ) & 0xFF == ord ( 'q' ):
break
# When everything done, release the capture
cap.release ()
cv2.destroyAllWindows ()
Print(frameWidth)
Print(FrameHeight)
Related
Input
I have the following depth images of type uint16 obtained from Intel realsense L515 camera which is supposed to have an Avg Depth Accuracy< 5mm # 1m.
Goal
I want to quantify the depth of the blocks inside this image to get a discrete representation of the blocks inside my region of interest of 23 x 11 block positions such as
P_x1_y1 : z = 1(one block), P_x2_y2: z = 2 (two blocks), up to 5 blocks (as in the image center).
The ROI RGB Image can clarify my aim (but it is not used as an input):
What I have tried so far:
Obtaining the ROI:
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import numpy as np
import cv2
import matplotlib.pyplot as plt
def get_roi(d1, output_size=(736, 384), ratio=(0.77, 0.54), shift=(0, 80), verbose=False):
"""
Function: get_roi, to find and resize the ROI.
---
Parameters:
#param: d1, nd array, depth image.
#param: output_size, tuple, the output ROI size.
#param: ratio, tuple, the ratio of the ROI to the detected zone.
#param: shift, tuple, the shift in pixles to align the ROI.
#param: verbose, bool, to vizualize the result.
---
#return: roi, nd array, ROI resized.
"""
d = d1.copy()
th = cv2.threshold(d, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
th = th.astype(np.uint8)
contours = cv2.findContours(th, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE )[0]
cnt_thresh = 10000
fx, fy = ratio
x_shift, y_shift = shift
for i, cnt in enumerate(contours):
if(cv2.contourArea(cnt)>cnt_thresh):
x,y,w,h = cv2.boundingRect(cnt)
cx = x + w//2 + x_shift
cy = y + h//2 + y_shift
nw = int(fx * w)
nh = int(fy * h)
# cv2.rectangle(d1,(cx-nw//2,cy-nh//2),(cx+nw//2,cy+nh//2),color=0)
d_roi = d1[cy-nh//2:cy+nh//2,cx-nw//2:cx+nw//2]
roi = cv2.resize(d_roi, output_size)
# Visualize results
if(verbose):
plt.imshow(roi)
plt.show()
return roi
Finding the mode(most frequent) non-zero value of each cell in the grid:
def mode(arr):
"""
Function: mode, to find the mode of an array.
---
Parameters:
#param: arr, nd array, any.
---
#return: the mode value (whatever int/float/etc) of this array.
"""
vals,counts = np.unique(arr, return_counts=True)
if 0 in vals:
z_idx = np.where(vals == 0)
vals = np.delete(vals, z_idx)
counts = np.delete(counts, z_idx)
index = np.argmax(counts)
return vals[index]
Quantifying the values of each cell:
def mapVal(val):
"""
Function: mapVal, to map depth values.
---
Parameters:
#param: val, int, any.
---
#return: int val, specific value 0, 50, 150, 200, 250, val.
"""
if val<=183:
return 0
if val>183 and val <=230:
return 50
if val>230 and val <=295:
return 100
if val>295 and val <=390:
return 150
if val>390 and val <=470:
return 200
if val>470:
return 250
else:
return val
grid the ROI into cells, and applying Linear correction for the depth static error:
def gridWorkspace(roi, gridSize=(23, 11), shift=[0, 5], verbose=False):
"""
Function: gridWorkspace, to find the contours of the red markers.
---
Parameters:
#param: roi, nd array, cropped region of interest.
#param: gridSize, tuple, lenght/width or the Workspace.
#param: shift, to make static error compensation for alignment.
#param: verbose, boolean, to show the output of the function.
---
#return: None.
"""
# Store a deep copy for results:
roi_copy = roi.copy()
# Divide the image into a grid:
verticalCells = gridSize[1]
horizontalCells = gridSize[0]
# Cell dimensions
bigRectWidth = roi_copy.shape[1]
bigRectHeight = roi_copy.shape[0]
cellWidth = bigRectWidth // horizontalCells
cellHeight = bigRectHeight // verticalCells
x_shift, y_shift = shift
# # Correction values
origin = mode(roi[y_shift:cellHeight+ y_shift, x_shift:cellWidth+x_shift])
x_max = mode(roi[y_shift:y_shift+cellHeight, x_shift+(horizontalCells-1)*cellWidth:x_shift+horizontalCells*cellWidth])
y_max = mode(roi[y_shift++(verticalCells-1)*cellHeight:y_shift+verticalCells*cellHeight, x_shift:x_shift+cellWidth])
print("origin= {}, x_max= {}, y_max= {}".format(origin, x_max, y_max))
x_corr = ( int(x_max) - int(origin) ) // horizontalCells
y_corr = ( int(y_max) - int(origin) ) // verticalCells
print("x_corr = {}, y_corr = {}".format(x_corr, y_corr))
# Loop thru vertical dimension:
for j in range(verticalCells):
# Cell starting y position:
yo = j * cellHeight + y_shift
# Loop thru horizontal dimension:
for i in range(horizontalCells):
# Cell starting x position:
xo = i * cellWidth + x_shift
# Cell Dimensions:
cX = int(xo)
cY = int(yo)
# Quantify current cell:
# print(i, j, mode(roi[cY:cY + cellHeight, cX:cX + cellWidth]))
roi_copy[cY:cY + cellHeight, cX:cX + cellWidth] = mapVal(mode(roi[cY:cY + cellHeight, cX:cX + cellWidth]) - j*y_corr - i*x_corr)# mapVal(371 - mode(roi[cY:cY + cellHeight, cX:cX + cellWidth]))
# Draw Cell
cv2.rectangle(roi_copy, (cX, cY), (cX + cellWidth, cY + cellHeight), (100, 100, 255), 1)
# Visualize results
if(verbose):
plt.imshow(roi_copy)
plt.show()
So when I try:
path = ""
imName = "d1.png"
d1 = cv2.imread(path+imName, -1)
roi = get_roi(d1, verbose=False)
roi = np.max(roi) - roi
roi[roi<0] = 0
roi[roi>500] = 0
gridWorkspace(roi, verbose=True)
I get this result:
Can you please tell me what can I do to improve my segmentation? thanks in advance.
Square detection in an image using cv2.rectangle()
image Using drawcontours(), I can see multiple points
Original image
I am able to detect two out of three squares.
The problem is that this is just a part of a very wide image, and only one square is undetectable. One big green box is missing and it is not detectable.
Can you help in square detection?
I have used the below code:
def getContours(img,imgContour):
contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
contours = sorted(contours, key = cv2.contourArea, reverse = True)
# image_number = 0
for cnt in contours:
area = cv2.contourArea(cnt)
# cv2.drawContours(imgContour, cnt, -1, (255, 0, 255), 7)
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.02 * peri, True)
print(len(approx))
# if len(approx) == 4:
x , y , w, h = cv2.boundingRect(cnt)
ar = w/h
if ar >= 0.95 and ar <= 1.05:
cv2.rectangle(imgContour, (x , y ), (x + w , y + h ), (0, 255, 0), 5)
def auto_canny(image, sigma=0.33):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
imagePath = "image3.jpg"
image = cv2.imread(imagePath)
imgContour = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (3, 3), 0)
auto = auto_canny(blurred)
getContours(auto,imgContour)
After making a circular ROI in an image, how can I get the information (average, standar deviation, variance) from that image region using script?
Can I link the position in the ciruclar ROI with original image?
This task is unfortunately not as straight forward and easy as one would hope.
While scripting supports a convenient shortcut to restrict image operations to rectangular ROIs ( using the img[] notation ), there is nothing like that for irregular ROIs.
In such a case, one has to manually create a binary mask of a ROI and perform the wanted operations manually. The example script at the bottom of this post shows how the average value of an irregular ROI may be computed.
CreateImageWithROI() Creates a test image with two ROIs on it
GetFirstIrregularROIOfImage() just returns the first found, irregular ROI of an image
GetROIMean() is the actual example
The command ROIAddToMask() is used to create the mask. Note, that there is also a similar command which would perform the action with all ROIs of an image display at once: ImageDisplayAccumulateROIsToMask()
So far, so good.
However, it turns out that the newly introduced Circular ROIs do not yet support the mask-creation commands correctly (Tested with GMS 3.1).
Instead, they always use the bounding rectangle of the ROI:
It is therefore necessary to go even one step back and read the ROI's coordinates to create a mask from it manually. Get the ROI's bounding-box and create a mask using an icol and irow expression for an ellipse. In the example below:
GetFirstOvalROIOfImage() just returns the first found, oval ROI of an image
MyAddOvalROIToMask() is the manual mask creation for oval ROIs
Example code:
image CreateImageWithROI()
{
// Create and show image
number sx = 256, sy = 256
image img := RealImage( "Image", 4, sx, sy )
img = sin( 0.1 * iradius ) * cos( 7 * itheta )
img.ShowImage()
// Create an irregular, closed ROI
ROI myIrRoi = NewROI()
myIrRoi.ROIAddVertex( 0.3 * sx, 0.1 * sy )
myIrRoi.ROIAddVertex( 0.7 * sx, 0.2 * sy )
myIrRoi.ROIAddVertex( 0.5 * sx, 0.6 * sy )
myIrRoi.ROIAddVertex( 0.1 * sx, 0.8 * sy )
myIrRoi.ROISetIsClosed(1)
myIRRoi.ROISetVolatile(0)
// Create an oval ROI
ROI myOvalROI = NewROI()
myOvalROI.ROISetOval( 0.7 * sy, 0.7 * sx, 0.9 * sy, 0.8 * sx )
myOvalROI.ROISetVolatile(0)
// AddROIs
imageDisplay disp = img.ImageGetImageDisplay( 0 )
disp.ImageDisplayAddROI( myIRRoi )
disp.ImageDisplayAddROI( myOvalROI )
return img
}
ROI GetFirstIrregularROIOfImage( image img )
{
if ( img.ImageIsValid() )
{
if ( 0 != img.ImageCountImageDisplays() )
{
imageDisplay disp = img.ImageGetImageDisplay( 0 )
number nRois = disp.ImageDisplayCountROIs()
for ( number i = 0; i < nRois; i++ )
{
ROI testROI = disp.ImageDisplayGetRoi( i )
number isIrregularClosed = 1
isIrregularClosed *= testROI.ROIIsClosed();
isIrregularClosed *= !testROI.ROIIsOval();
isIrregularClosed *= !testROI.ROIIsRectangle();
isIrregularClosed *= ( 2 < testROI.ROICountVertices());
if ( isIrregularClosed )
return testROI
}
}
}
Throw( "No irregular ROI found" )
}
ROI GetFirstOvalROIOfImage( image img )
{
if ( img.ImageIsValid() )
{
if ( 0 != img.ImageCountImageDisplays() )
{
imageDisplay disp = img.ImageGetImageDisplay( 0 )
number nRois = disp.ImageDisplayCountROIs()
for ( number i = 0; i < nRois; i++ )
{
ROI testROI = disp.ImageDisplayGetRoi( i )
if ( testROI.ROIIsOval() )
return testROI
}
}
}
Throw( "No oval ROI found" )
}
void MyAddOvalROIToMask( image img, ROI ovalROI )
{
number top, left, bottom, right
ovalROI.ROIGetOval( top, left, bottom, right )
number sx = ( right - left )
number sy = ( bottom - top )
number cx = sx/2 // Used as both center x coordiante and x radius!
number cy = sy/2 // Used as both center y coordiante and y radius!
// Create mask of just the rect area
image maskCut := RealImage( "", 4, sx, sy )
maskCut = ( ((cx-icol)/cx)**2 + ((cy-irow)/cy)**2 <= 1 ) ? 1 : 0
// Apply mask to image
img[top, left, bottom, right] = maskCut
}
number GetROIMean( image img, ROI theRoi )
{
if ( !img.ImageIsValid() ) Throw( "Invalid image in GetROIMean()" )
if ( !theRoi.ROIIsValid() ) Throw( "Invalid roi in GetROIMean()" )
// Create a binary mask of "img" size using the ROI's coordinates
image mask = img * 0; // image of same size as "img" with 0 values
number sx, sy
img.GetSize( sx, sy )
// Oval ROIs are not supported by the command correctly
// Hence check and compute mask manually..
if ( theROI.ROIIsOval() )
MyAddOvalROIToMask( mask, theROI )
else
theROI.ROIAddToMask( mask, 0, 0, sx, sy )
if ( TwoButtonDialog( "Show mask?", "Yes", "No" ) )
mask.ShowImage()
// Do meanValue as sums of masked points
number maskedPoints = sum( mask )
number maskedSum
if ( 0 < maskedPoints )
maskedSum = sum( mask * img ) / maskedPoints
else
maskedSum = sum( img )
return maskedSum
}
Result( "\n Testing irregular and oval ROIs on image.\n" )
image testImg := CreateImageWithROI()
ROI testROIir = GetFirstIrregularROIOfImage( testImg )
number ROIirMean = GetROIMean( testImg, testROIir )
Result( "\n Mean value (irregular ROI): "+ ROIirMean )
ROI testROIoval = GetFirstOvalROIOfImage( testImg )
number ROIovalMean = GetROIMean( testImg, testROIoval )
Result( "\n Mean value (oval ROI) : "+ ROIovalMean )
I would like to do cumulative sum along x or y direction of the image data.
Is there any function in DM-scripting like "cumsum" in Matlib?
Thanks!
for example an image of 4x4 pixels the pixel values are
1 2 3 4
2 3 4 5
3 4 5 6
4 5 6 7
cumulative sum along x direction will result in:
1 1+2=3 1+2+3=6 1+2+3+4=10
2 5 9 14
3 7 12 18
4 9 15 22
There are differnt ways to achieve this, but potentially the fastest and easiest is to create a "fully binned" version of the image.
image img := GetFrontImage()
number sizeX, sizeY
img.GetSize( sizeX, sizeY )
image vSum = Rebin( img, 1, sizeY )
image hSum = Rebin( img, sizeX, 1 )
vSum.SetName( "vertical sum" )
vSum.ShowImage()
hSum.SetName( "horizontal sum" )
hSum.ShowImage()
If you want a 2D image as a result, where each pixel holds the sum of all its pixels to the left, you can do this by adding up offset images:
image img := GetFrontImage()
number sizeX, sizeY
img.GetSize( sizeX, sizeY )
image vCumSum := img.ImageClone()
for( number x = 1; x<sizeX ; x++ )
{
hCumSum += offset( img, -x, 0 )
}
hCumSum.SetName( "horizontal sum (cumulative)" )
hCumSum.ShowImage()
Alternatively, you can create an expression using intrinsic variables as in
image img := GetFrontImage()
image hCumSum := 0 * img.ImageClone()
hCumSum += img[icol,irow] + hCumSum[ icol - 1, irow ]
hCumSum.SetName( "horizontal sum (cumulative)" )
hCumSum.ShowImage()
GMS 3.4 also offers a dedicated, speed optimized command:
RealImage Project( BasicImage img, Number axis )
RealImage Project( BasicImage img, Number axis, Boolean rescale )
void Project( BasicImage img, BasicImage dst, Number axis )
void Project( BasicImage img, BasicImage dst, Number axis, Boolean rescale )
Another way to do projection is by matrix multiplication. multiply a 2-D image by a 1-D matrix of 1's will project the image onto 1-D accumulation.
number d0, d1
image HProject, VProject, ones, img
img:=getfrontImage()
img.getSize(d0,d1)
ones:=exprSize(1,d0,1)
HProject=MatrixMultiply(img,ones)
HProject.rotateLeft()
HProject.showImage()
ones:=exprSize(d1,1,1)
VProject=MatrixMultiply(ones,img)
VProject.showImage()
I also have one
image cumsum(image img)
// computes the cumulative sum along x direction
{
number sx, sy
img.GetSize(sx,sy)
for(number i=1; i<sx; i++)
{
img[0,i,sy,i+1]=img[0,i-1,sy,i]+img[0,i,sy,i+1]
}
return img
}
image im=getfrontimage()
im=im.cumsum()
im.showimage()
I asked a similar question here but that is focused more on tesseract.
I have a sample image as below. I would like to make the white square my Region of Interest and then crop out that part (square) and create a new image with it. I will be working with different images so the square won't always be at the same location in all images. So I will need to somehow detect the edges of the square.
What are some pre-processing methods I can perform to achieve the result?
Using your test image I was able to remove all the noises with a simple erosion operation.
After that, a simple iteration on the Mat to find for the corner pixels is trivial, and I talked about that on this answer. For testing purposes we can draw green lines between those points to display the area we are interested at in the original image:
At the end, I set the ROI in the original image and crop out that part.
The final result is displayed on the image below:
I wrote a sample code that performs this task using the C++ interface of OpenCV. I'm confident in your skills to translate this code to Python. If you can't do it, forget the code and stick with the roadmap I shared on this answer.
#include <cv.h>
#include <highgui.h>
int main(int argc, char* argv[])
{
cv::Mat img = cv::imread(argv[1]);
std::cout << "Original image size: " << img.size() << std::endl;
// Convert RGB Mat to GRAY
cv::Mat gray;
cv::cvtColor(img, gray, CV_BGR2GRAY);
std::cout << "Gray image size: " << gray.size() << std::endl;
// Erode image to remove unwanted noises
int erosion_size = 5;
cv::Mat element = cv::getStructuringElement(cv::MORPH_CROSS,
cv::Size(2 * erosion_size + 1, 2 * erosion_size + 1),
cv::Point(erosion_size, erosion_size) );
cv::erode(gray, gray, element);
// Scan the image searching for points and store them in a vector
std::vector<cv::Point> points;
cv::Mat_<uchar>::iterator it = gray.begin<uchar>();
cv::Mat_<uchar>::iterator end = gray.end<uchar>();
for (; it != end; it++)
{
if (*it)
points.push_back(it.pos());
}
// From the points, figure out the size of the ROI
int left, right, top, bottom;
for (int i = 0; i < points.size(); i++)
{
if (i == 0) // initialize corner values
{
left = right = points[i].x;
top = bottom = points[i].y;
}
if (points[i].x < left)
left = points[i].x;
if (points[i].x > right)
right = points[i].x;
if (points[i].y < top)
top = points[i].y;
if (points[i].y > bottom)
bottom = points[i].y;
}
std::vector<cv::Point> box_points;
box_points.push_back(cv::Point(left, top));
box_points.push_back(cv::Point(left, bottom));
box_points.push_back(cv::Point(right, bottom));
box_points.push_back(cv::Point(right, top));
// Compute minimal bounding box for the ROI
// Note: for some unknown reason, width/height of the box are switched.
cv::RotatedRect box = cv::minAreaRect(cv::Mat(box_points));
std::cout << "box w:" << box.size.width << " h:" << box.size.height << std::endl;
// Draw bounding box in the original image (debugging purposes)
//cv::Point2f vertices[4];
//box.points(vertices);
//for (int i = 0; i < 4; ++i)
//{
// cv::line(img, vertices[i], vertices[(i + 1) % 4], cv::Scalar(0, 255, 0), 1, CV_AA);
//}
//cv::imshow("Original", img);
//cv::waitKey(0);
// Set the ROI to the area defined by the box
// Note: because the width/height of the box are switched,
// they were switched manually in the code below:
cv::Rect roi;
roi.x = box.center.x - (box.size.height / 2);
roi.y = box.center.y - (box.size.width / 2);
roi.width = box.size.height;
roi.height = box.size.width;
std::cout << "roi # " << roi.x << "," << roi.y << " " << roi.width << "x" << roi.height << std::endl;
// Crop the original image to the defined ROI
cv::Mat crop = img(roi);
// Display cropped ROI
cv::imshow("Cropped ROI", crop);
cv::waitKey(0);
return 0;
}
Seeing that the text is the only large blob, and everything else is barely larger than a pixel, a simple morphological opening should suffice
You can do this in opencv
or with imagemagic
Afterwards the white rectangle should be the only thing left in the image. You can find it with opencvs findcontours, with the CvBlobs library for opencv or with the imagemagick -crop function
Here is your image with 2 steps of erosion followed by 2 steps of dilation applied:
You can simply plug this image into the opencv findContours function as in the Squares tutorial example to get the position
input
#objective:
#1)compress large images to less than 1000x1000
#2)identify region of interests
#3)save rois in top to bottom order
import cv2
import os
def get_contour_precedence(contour, cols):
tolerance_factor = 10
origin = cv2.boundingRect(contour)
return ((origin[1] // tolerance_factor) * tolerance_factor) * cols + origin[0]
# Load image, grayscale, Gaussian blur, adaptive threshold
image = cv2.imread('./images/sample_0.jpg')
#compress the image if image size is >than 1000x1000
height, width, color = image.shape #unpacking tuple (height, width, colour) returned by image.shape
while(width > 1000):
height = height/2
width = width/2
print(int(height), int(width))
height = int(height)
width = int(width)
image = cv2.resize(image, (width, height))
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (9,9), 0)
thresh = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,11,30)
# Dilate to combine adjacent text contours
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9,9))
ret,thresh3 = cv2.threshold(image,127,255,cv2.THRESH_BINARY_INV)
dilate = cv2.dilate(thresh, kernel, iterations=4)
# Find contours, highlight text areas, and extract ROIs
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#cnts = cv2.findContours(thresh3, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
#ORDER CONTOURS top to bottom
cnts.sort(key=lambda x:get_contour_precedence(x, image.shape[1]))
#delete previous roi images in folder roi to avoid
dir = './roi/'
for f in os.listdir(dir):
os.remove(os.path.join(dir, f))
ROI_number = 0
for c in cnts:
area = cv2.contourArea(c)
if area > 10000:
x,y,w,h = cv2.boundingRect(c)
#cv2.rectangle(image, (x, y), (x + w, y + h), (36,255,12), 3)
cv2.rectangle(image, (x, y), (x + w, y + h), (100,100,100), 1)
#use below code to write roi when results are good
ROI = image[y:y+h, x:x+w]
cv2.imwrite('roi/ROI_{}.jpg'.format(ROI_number), ROI)
ROI_number += 1
cv2.imshow('thresh', thresh)
cv2.imshow('dilate', dilate)
cv2.imshow('image', image)
cv2.waitKey()
roi detection
output