Related
I would like to resize, and specifically shrink, a mask (2D array of 1s and 0s) so that any pixel in the low-resolution-mask that maps to a group of pixels in the high-resolution-mask (original) containing at least one value of 1 will be set to 1 itself (example at bottom).
I've tried using cv2.resize() using cv2.INTER_MAX but it returned an error:
error: OpenCV(4.6.0) /io/opencv/modules/imgproc/src/resize.cpp:3927: error: (-5:Bad argument) Unknown interpolation method in function 'resize'
It doesn't seem that Pillow Image or scipy have an interpolation method to do so.
I'm looking for a solution for the defined shrink_max()
>>> orig_mask = [[1,0,0],[0,0,0],[0,0,0]]
>>> orig_mask
[[1,0,0]
,[0,0,0]
,[0,0,0]]
>>> mini_mask = shrink_max(orig_mask, (2,2))
>>> mini_mask
[[1,0]
,[0,0]]
>>> mini_mask = shrink_max(orig_mask, (1,1))
>>> mini_mask
[[1]]
I'm not aware of a direct method but try this for shrinking the mask to half-size, i.e. each low-res pixel maps to 4 original pixels (modify to any ratio as per your needs):
import numpy as np
orig_mask = np.array([[1,0,0],[0,0,0],[0,0,0]])
# first make the original mask divisible by 2
pad_row = orig_mask.shape[0] % 2
pad_col = orig_mask.shape[1] % 2
# i.e. pad the right and bottom of the mask with zeros
orig_mask_padded = np.pad(orig_mask, ((0,pad_row), (0,pad_col)))
# get the new shape
new_rows = orig_mask_padded.shape[0] // 2
new_cols = orig_mask_padded.shape[1] // 2
# group the original pixels by fours and max each group
shrunk_mask = orig_mask_padded.reshape(new_rows, 2, new_cols, 2).max(axis=(1,3))
print(shrunk_mask)
Check working with submatrixes here: Numpy: efficiently sum sub matrix m of M
Here's the complete function for shrinking to any desired shape:
def shrink_max(mask, shrink_to_shape):
r, c = shrink_to_shape
m, n = mask.shape
padded_mask = np.pad(mask, ((0, -m % r), (0, -n % c)))
pr, pc = padded_mask.shape
return padded_mask.reshape(r, pr // r, c, pc // c).max(axis=(1, 3))
For example print(shrink_max(orig_mask, (2,1))) returns:
[[1]
[0]]
i'm new to machine learning and pytorch. I'm using imgaug library for images augmentation (https://github.com/aleju/imgaug)
I have this code:
class ImgAugTransform:
def __init__(self):
self.aug = seq = iaa.Sequential(
[
# Apply the following augmenters to most images
iaa.Fliplr(0.5), # horizontally flip 50% of all images
iaa.Flipud(0.2), # vertically flip 20% of all images
random_aug_use(iaa.CropAndPad( # crop images by -5% to 10% of their height/width
percent=(-0.1, 0.2),
pad_mode=ia.ALL,
pad_cval=(0.,255)
)),
random_aug_use(iaa.Affine(
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis
translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)}, # translate by -20 to +20 percent (per axis)
rotate=(-45, 45), # rotate by -45 to +45 degrees
shear=(-16, 16), # shear by -16 to +16 degrees
order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)
cval=(0, 255), # if mode is constant, use a cval between 0 and 255
mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)
))
],
random_order=True)
def __call__(self, img):
img = np.array(img)
return self.aug.augment_image(img)
train_transforms = ImgAugTransform()
train_dataset = torchvision.datasets.ImageFolder(train_dir, train_transforms)
train_dataloader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, shuffle=True, num_workers=batch_size)
So now i cant do this:
X_batch, y_batch = next(iter(train_dataloader))
I get error:
ValueError: some of the strides of a given numpy array are negative. This is currently not supported, but will be added in future releases.
I came across this error as well.
The solution that worked to me was:
def __call__(self, img):
img = np.array(img)
return self.aug.augment_image(img).copy()
But, if you're composing imgaug with torchvision.transforms you can do something like:
def __call__(self, img):
img = self.aug.augment_image(np.array(img))
transforms = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(self.normalization[0],
self.normalization[1]),
])
return transforms(img.copy())
You should make your augmented numpy arrays contiguous again.
try modifying your augmenter code to:
def __call__(self, img):
img = np.array(img)
return np.ascontiguousarray(self.aug.augment_image(img))
I have to detect mice in a cage, input images look like following:
at the moment I am using cv.createBackgroundSubtractorMOG2() in the video stream to find the area containing the mice and afterwards Canny Edge detector to extract the contours of the mice.
However, this is not working that well.. the more the mice is moving the better, but I guess there could be a better approach to detect the mice.
Does anyne have a different idea how to detect the mice?
thanks in advance
After subtracting the background, you could use a threshold to remove noise. Try saving the subtracted image and seeing what it looks like. Here's a script I use to tweak filter parameters (run it with the subtracted image):
import cv2
import numpy as np
screenshot_path = 'screenshot.bmp'
def nothing(x):
pass
# Creating a window for later use
cv2.namedWindow('mask', cv2.WINDOW_NORMAL)
cv2.namedWindow('trackbar', cv2.WINDOW_NORMAL)
# Starting with 100's to prevent error while masking
h, s, v = 100, 100, 100
# Creating track bar
cv2.createTrackbar('h', 'trackbar', 0, 180, nothing)
cv2.createTrackbar('s', 'trackbar', 0, 255, nothing)
cv2.createTrackbar('v', 'trackbar', 164, 255, nothing)
cv2.createTrackbar('h2', 'trackbar', 120, 180, nothing)
cv2.createTrackbar('s2', 'trackbar', 12, 255, nothing)
cv2.createTrackbar('v2', 'trackbar', 253, 255, nothing)
frame = cv2.imread(screenshot_path)
# converting to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
while (1):
# get info from track bar and appy to result
h = cv2.getTrackbarPos('h', 'trackbar')
s = cv2.getTrackbarPos('s', 'trackbar')
v = cv2.getTrackbarPos('v', 'trackbar')
h2 = cv2.getTrackbarPos('h2', 'trackbar')
s2 = cv2.getTrackbarPos('s2', 'trackbar')
v2 = cv2.getTrackbarPos('v2', 'trackbar')
# Normal masking algorithm
lower = np.array([h, s, v])
upper = np.array([h2, s2, v2])
mask = cv2.inRange(hsv, lower, upper)
result = cv2.bitwise_and(frame,frame,mask = mask)
cv2.imshow('result', result)
print(h, s, v, h2, s2, v2)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
If that doesn't work, I would use an object tracker API like CSRT
# python opencv_object_tracking.py
# python opencv_object_tracking.py --video dashcam_boston.mp4 --tracker csrt
# import the necessary packages
from imutils.video import VideoStream
from imutils.video import FPS
import argparse
import imutils
import time
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", type=str,
help="path to input video file")
ap.add_argument("-t", "--tracker", type=str, default="kcf",
help="OpenCV object tracker type")
args = vars(ap.parse_args())
# extract the OpenCV version info
(major, minor) = cv2.__version__.split(".")[:2]
# if we are using OpenCV 3.2 OR BEFORE, we can use a special factory
# function to create our object tracker
if int(major) == 3 and int(minor) < 3:
tracker = cv2.Tracker_create(args["tracker"].upper())
# otherwise, for OpenCV 3.3 OR NEWER, we need to explicity call the
# approrpiate object tracker constructor:
else:
# initialize a dictionary that maps strings to their corresponding
# OpenCV object tracker implementations
OPENCV_OBJECT_TRACKERS = {
"csrt": cv2.TrackerCSRT_create,
"kcf": cv2.TrackerKCF_create,
"boosting": cv2.TrackerBoosting_create,
"mil": cv2.TrackerMIL_create,
"tld": cv2.TrackerTLD_create,
"medianflow": cv2.TrackerMedianFlow_create,
"mosse": cv2.TrackerMOSSE_create
}
# grab the appropriate object tracker using our dictionary of
# OpenCV object tracker objects
tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]()
# initialize the bounding box coordinates of the object we are going
# to track
initBB = None
# if a video path was not supplied, grab the reference to the web cam
if not args.get("video", False):
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(1.0)
# otherwise, grab a reference to the video file
else:
vs = cv2.VideoCapture(args["video"])
# initialize the FPS throughput estimator
fps = None
# loop over frames from the video stream
while True:
# grab the current frame, then handle if we are using a
# VideoStream or VideoCapture object
frame = vs.read()
frame = frame[1] if args.get("video", False) else frame
# check to see if we have reached the end of the stream
if frame is None:
break
# resize the frame (so we can process it faster) and grab the
# frame dimensions
frame = imutils.resize(frame, width=500)
(H, W) = frame.shape[:2]
# check to see if we are currently tracking an object
if initBB is not None:
# grab the new bounding box coordinates of the object
(success, box) = tracker.update(frame)
# check to see if the tracking was a success
if success:
(x, y, w, h) = [int(v) for v in box]
cv2.rectangle(frame, (x, y), (x + w, y + h),
(0, 255, 0), 2)
# update the FPS counter
fps.update()
fps.stop()
# initialize the set of information we'll be displaying on
# the frame
info = [
("Tracker", args["tracker"]),
("Success", "Yes" if success else "No"),
("FPS", "{:.2f}".format(fps.fps())),
]
# loop over the info tuples and draw them on our frame
for (i, (k, v)) in enumerate(info):
text = "{}: {}".format(k, v)
cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the 's' key is selected, we are going to "select" a bounding
# box to track
if key == ord("s"):
# select the bounding box of the object we want to track (make
# sure you press ENTER or SPACE after selecting the ROI)
initBB = cv2.selectROI("Frame", frame, fromCenter=False,
showCrosshair=True)
# start OpenCV object tracker using the supplied bounding box
# coordinates, then start the FPS throughput estimator as well
tracker.init(frame, initBB)
fps = FPS().start()
# if the `q` key was pressed, break from the loop
elif key == ord("q"):
break
# if we are using a webcam, release the pointer
if not args.get("video", False):
vs.stop()
# otherwise, release the file pointer
else:
vs.release()
# close all windows
cv2.destroyAllWindows()
for my project I want to save the Bounding Boxes found by the Object Detection API as .jpg for feeding in another CNN for further classification.
Here is my code (derived from EdjeElectronics GitHub):
import os
import cv2
import numpy as np
import tensorflow as tf
import sys
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# Import utilites
from utils import label_map_util
from utils import visualization_utils as vis_util
# Name of the directory containing the object detection module we're using
MODEL_NAME = '_model_ssd_v2'
IMAGE_NAME = 'image.jpg'
# Grab path to current working directory
CWD_PATH = os.getcwd()
# Path to frozen detection graph .pb file, which contains the model that is used
# for object detection.
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,'_data','label_map.pbtxt')
# Path to image
PATH_TO_IMAGE = os.path.join(CWD_PATH,"_images",IMAGE_NAME)
# Number of classes the object detector can identify
NUM_CLASSES = 6
# Load the label map.
# Label maps map indices to category names, so that when our convolution
# network predicts `5`, we know that this corresponds to `king`.
# Here we use internal utility functions, but anything that returns a
# dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load the Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
# Define input and output tensors (i.e. data) for the object detection classifier
# Input tensor is the image
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Number of objects detected
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Load image using OpenCV and
# expand image dimensions to have shape: [1, None, None, 3]
# i.e. a single-column array, where each item in the column has the pixel RGB value
image = cv2.imread(PATH_TO_IMAGE)
image_expanded = np.expand_dims(image, axis=0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_expanded})
# Draw the results of the detection (aka 'visulaize the results')
vis_util.visualize_boxes_and_labels_on_image_array(
image,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.3)
# All the results have been drawn on image. Now display the image.
# cv2.imshow('Object detector', cv2.resize(image, (int(2592/2),int(1944/2))))
# # Press any key to close the image
# cv2.waitKey(0)
# # Clean up
# cv2.destroyAllWindows()
cv2.imwrite("C:/tensorflow/models/research/object_detection/_images/test1.jpg", image)
A similar question was asked here but I donĀ“t know how to apply it with the Tensorflow Object Detection API.
Thank You!
I've found the function draw_bounding_boxes_on_image in the vis_util. Try this:
#create a white back ground image with the same shape as image
white_bg_img = 255*np.ones(image.shape, np.uint8)
vis_util.draw_bounding_boxes_on_image(
white_bg_img ,
np.squeeze(boxes),
color='red',
thickness=4)
cv2.imwrite("bounding_boxes.jpg", white_bg_img )
To draw the image within the bounding boxes.
boxes = np.squeeze(boxes)
for i in range(len(boxes)):
ymin = box[i,0]
xmin = box[i,1]
ymax = box[i,2]
xmax = box[i,3]
roi = image[ymin:ymax,xmin:xmax].copy()
cv2.imwrite("box_{}.jpg".format(str(i)), roi)
Save files will be like box_1.jpg, box_2.jpg ...
I followed this link and it worked. Add the following code:
min_score_thresh=0.60
true_boxes = boxes[0][scores[0] > min_score_thresh]
for i in range(true_boxes.shape[0]):
ymin = int(true_boxes[i,0]*height)
xmin = int(true_boxes[i,1]*width)
ymax = int(true_boxes[i,2]*height)
xmax = int(true_boxes[i,3]*width)
roi = image[ymin:ymax,xmin:xmax].copy()
cv2.imwrite("box_{}.jpg".format(str(i)), roi)
Make sure you define true height and width of image.
this will work
enter code here
box = np.squeeze(boxes)
for i in range(len(boxes)):
ymin = (int(box[i,0]*height))
xmin = (int(box[i,1]*width))
ymax = (int(box[i,2]*height))
xmax = (int(box[i,3]*width))
print(ymin,xmin,ymax,xmax)
roi =image[ymin:ymax,xmin:xmax].copy()
I need some direction on whats the best programming approach to overlay a live video from a webcam (or a pre loaded video), with paintbrush. i.e, drawing lines , circles and such, to mark areas in the video while its playing.
right now using OpenCV + Tkinter, is there a better way to approach this?
Thanks
EDIT: trying to paint directly on the video window, but cant get it to work.. here is my code:
import cv2
import numpy as np
def interactive_drawing(event,x,y,flags,param):
global ix,iy,drawing, mode
if event==cv2.EVENT_LBUTTONDOWN:
drawing=True
ix,iy=x,y
elif event==cv2.EVENT_MOUSEMOVE:
if drawing==True:
if mode==True:
cv2.line(frame,(ix,iy),(x,y),(0,0,255),10)
ix=x
iy=y
print x,y
elif event==cv2.EVENT_LBUTTONUP:
drawing=False
if mode==True:
cv2.line(frame,(ix,iy),(x,y),(0,0,255),10)
ix=x
iy=y
return x,y
drawing=False # true if mouse is pressed
mode=True # if True, draw rectangle. Press 'm' to toggle to curve
cap = cv2.VideoCapture('track.avi')
while(cap.isOpened()):
ret, frame = cap.read()
if frame is None:
break
# cv2.namedWindow("frame", cv2.WND_PROP_FULLSCREEN)
# cv2.setWindowProperty("frame",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
cv2.setMouseCallback('frame',interactive_drawing)
cv2.imshow('frame',frame)
if cv2.waitKey(15) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# mouse callback function
##img = np.zeros((512,512,3), np.uint8)
##
##cv2.namedWindow('begueradj')
##cv2.setMouseCallback('begueradj',interactive_drawing)
##while(1):
## cv2.imshow('begueradje',img)
## k=cv2.waitKey(1)&0xFF
## if k==27:
## break
##cv2.destroyAllWindows()
I would only use OpenCV for that purpose, as it already provides drawing functions such as :
Lines
Rectangles
Circles
Ellipses
Polygons
etc.
EDIT: This code snippet should help you get started. The important point here is that you have to save the drawing elements in a variable (curve_points in my case) and draw them on each new frames:
import cv2
import numpy as np
def interactive_drawing(event,x,y,flags,param):
global drawing, mode
if event==cv2.EVENT_LBUTTONDOWN:
drawing=True
elif event==cv2.EVENT_MOUSEMOVE:
if drawing==True:
if mode==True:
curves[len(curves)-1].append((x,y)) #append new points to the last list of curves
elif event==cv2.EVENT_LBUTTONUP:
drawing=False
if mode==True:
curves.append([]) #adding a new list to curves
return x,y
def draw_curves(myArray):
for j in range(0, len(myArray)):
for i in range(1, len(myArray[j])):
cv2.line(frame,myArray[j][i-1],myArray[j][i],(0,0,255),10)
drawing=False # true if mouse is pressed
mode=True # if True, draw rectangle. Press 'm' to toggle to curve
cap = cv2.VideoCapture(0) #cap = cv2.VideoCapture('track.avi')
curves = [[]] # initializing curves list with an empty list
cv2.namedWindow('frame')
cv2.setMouseCallback('frame',interactive_drawing)
while(cap.isOpened()):
ret, frame = cap.read()
if frame is None:
break
draw_curves(curves)
cv2.imshow('frame',frame)
if cv2.waitKey(15) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()