I am working on a VCS (vehicle counting system) project. The scope of the project is to classify and count vehicles. I have built a custom model using Faster-RCNN in Tensorflow-object-detection-API This model only contains 7 classes such as car motorbike, bicycle and etc. The model works perfectly, But, the problem is "COUNTING". It is very hard to count vehicles in video frame. I did a pre-research on the internet. I tried a lot. but i could not find any useful information. There are some projects on github, they use tracking methods.
I want the following things. I want to draw an horizontal line in the frame. when the vehicle touch it, the counting should take place. How to do it. I don't know the algorithm behind it. I heard that centroid tracking would help me.
My question is, i want to count vehicles when it touch the horizontal line. I have linked a sample image bellow.
Sample_Image
import os
import cv2
import numpy as np
import tensorflow as tf
import sys
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# Import utilites
from utils import label_map_util
from utils import visualization_utils as vis_util
# Name of the directory containing the object detection module we're using
MODEL_NAME = 'inference_graph'
VIDEO_NAME = 'Video_105.mp4'
# Grab path to current working directory
CWD_PATH = os.getcwd()
# Path to frozen detection graph .pb file, which contains the model that is used
# for object detection.
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,'training','labelmap.pbtxt')
# Path to video
PATH_TO_VIDEO = os.path.join(CWD_PATH,VIDEO_NAME)
# Number of classes the object detector can identify
NUM_CLASSES = 7
# Load the label map.
# Label maps map indices to category names, so that when our convolution
# network predicts `5`, we know that this corresponds to `king`.
# Here we use internal utility functions, but anything that returns a
# dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load the Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
# Define input and output tensors (i.e. data) for the object detection classifier
# Input tensor is the image
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Number of objects detected
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Open video file
video = cv2.VideoCapture(PATH_TO_VIDEO)
while(video.isOpened()):
# Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]
# i.e. a single-column array, where each item in the column has the pixel RGB value
ret, frame = video.read()
frame_expanded = np.expand_dims(frame, axis=0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: frame_expanded})
# Draw the results of the detection (aka 'visulaize the results')
vis_util.visualize_boxes_and_labels_on_image_array(
frame,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.90)
# All the results have been drawn on the frame, so it's time to display it.
final_score = np.squeeze(scores)
count = 0
cv2.line(frame, (1144, 568), (1723,664), (0,0,255), 2) #Line
for i in range(100):
if scores is None or final_score[i] > 0.90:
min_score_thresh = 0.90
bboxes = boxes[scores > min_score_thresh]
im_width = video.get(cv2.CAP_PROP_FRAME_WIDTH)
im_height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
final_box = []
for box in bboxes:
ymin, xmin, ymax, xmax = box
print("Ymin:{}:Xmin:{}:Ymax:{}Xmax{}".format(ymin*im_width,xmin*im_width,ymax*im_width,xmax*im_width))
final_box.append([xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height])
#print(final_box)
cv2.imshow('Object detector', frame)
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
# Clean up
video.release()
cv2.destroyAllWindows()
# import the necessary packages
from imutils.video import VideoStream
from imutils.video import FPS
import argparse
import imutils
import time
import cv2
tracker = cv2.TrackerCSRT_create()
vs = cv2.VideoCapture("Video.mp4")
initBB = None
detec = []
def pega_centro(x, y, w, h):
x1 = int(w / 2)
y1 = int(h / 2)
cx = x + x1
cy = y + y1
return cx,cy
roi = 480
counter = 0
offset = 6
# loop over frames from the video stream
while vs.isOpened():
ret,frame = vs.read()
cv2.line(frame, (769 , roi), (1298 , roi), (255,0,0), 3)
# check to see if we are currently tracking an object
if initBB is not None:
# grab the new bounding box coordinates of the object
(success, box) = tracker.update(frame)
# check to see if the tracking was a success
if success:
(x, y, w, h) = [int(v) for v in box]
cv2.rectangle(frame, (x, y), (x + w, y + h),
(0, 255, 0), 2)
cX = int((x + x+w) / 2.0)
cY = int((y + y+h) / 2.0)
cv2.circle(frame, (cX, cY), 3, (0, 0, 255), -1)
c=pega_centro(x, y, w, h)
detec.append(c)
for (x,y) in detec:
if y<(roi+offset) and y>(roi-offset):
counter+=1
print(counter)
cv2.line(frame, (769 , roi), (1298 , roi), (0,0,255), 3)
detec.remove((x,y))
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("s"):
# select the bounding box of the object we want to track (make
# sure you press ENTER or SPACE after selecting the ROI)
initBB = cv2.selectROI("Frame", frame, fromCenter=False,
showCrosshair=True)
# start OpenCV object tracker using the supplied bounding box
# coordinates, then start the FPS throughput estimator as well
tracker.init(frame, initBB)
fps = FPS().start()
# if the `q` key was pressed, break from the loop
elif key == ord("q"):
break
else:
vs.release()
cv2.destroyAllWindows()
Related
I have to detect mice in a cage, input images look like following:
at the moment I am using cv.createBackgroundSubtractorMOG2() in the video stream to find the area containing the mice and afterwards Canny Edge detector to extract the contours of the mice.
However, this is not working that well.. the more the mice is moving the better, but I guess there could be a better approach to detect the mice.
Does anyne have a different idea how to detect the mice?
thanks in advance
After subtracting the background, you could use a threshold to remove noise. Try saving the subtracted image and seeing what it looks like. Here's a script I use to tweak filter parameters (run it with the subtracted image):
import cv2
import numpy as np
screenshot_path = 'screenshot.bmp'
def nothing(x):
pass
# Creating a window for later use
cv2.namedWindow('mask', cv2.WINDOW_NORMAL)
cv2.namedWindow('trackbar', cv2.WINDOW_NORMAL)
# Starting with 100's to prevent error while masking
h, s, v = 100, 100, 100
# Creating track bar
cv2.createTrackbar('h', 'trackbar', 0, 180, nothing)
cv2.createTrackbar('s', 'trackbar', 0, 255, nothing)
cv2.createTrackbar('v', 'trackbar', 164, 255, nothing)
cv2.createTrackbar('h2', 'trackbar', 120, 180, nothing)
cv2.createTrackbar('s2', 'trackbar', 12, 255, nothing)
cv2.createTrackbar('v2', 'trackbar', 253, 255, nothing)
frame = cv2.imread(screenshot_path)
# converting to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
while (1):
# get info from track bar and appy to result
h = cv2.getTrackbarPos('h', 'trackbar')
s = cv2.getTrackbarPos('s', 'trackbar')
v = cv2.getTrackbarPos('v', 'trackbar')
h2 = cv2.getTrackbarPos('h2', 'trackbar')
s2 = cv2.getTrackbarPos('s2', 'trackbar')
v2 = cv2.getTrackbarPos('v2', 'trackbar')
# Normal masking algorithm
lower = np.array([h, s, v])
upper = np.array([h2, s2, v2])
mask = cv2.inRange(hsv, lower, upper)
result = cv2.bitwise_and(frame,frame,mask = mask)
cv2.imshow('result', result)
print(h, s, v, h2, s2, v2)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
If that doesn't work, I would use an object tracker API like CSRT
# python opencv_object_tracking.py
# python opencv_object_tracking.py --video dashcam_boston.mp4 --tracker csrt
# import the necessary packages
from imutils.video import VideoStream
from imutils.video import FPS
import argparse
import imutils
import time
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", type=str,
help="path to input video file")
ap.add_argument("-t", "--tracker", type=str, default="kcf",
help="OpenCV object tracker type")
args = vars(ap.parse_args())
# extract the OpenCV version info
(major, minor) = cv2.__version__.split(".")[:2]
# if we are using OpenCV 3.2 OR BEFORE, we can use a special factory
# function to create our object tracker
if int(major) == 3 and int(minor) < 3:
tracker = cv2.Tracker_create(args["tracker"].upper())
# otherwise, for OpenCV 3.3 OR NEWER, we need to explicity call the
# approrpiate object tracker constructor:
else:
# initialize a dictionary that maps strings to their corresponding
# OpenCV object tracker implementations
OPENCV_OBJECT_TRACKERS = {
"csrt": cv2.TrackerCSRT_create,
"kcf": cv2.TrackerKCF_create,
"boosting": cv2.TrackerBoosting_create,
"mil": cv2.TrackerMIL_create,
"tld": cv2.TrackerTLD_create,
"medianflow": cv2.TrackerMedianFlow_create,
"mosse": cv2.TrackerMOSSE_create
}
# grab the appropriate object tracker using our dictionary of
# OpenCV object tracker objects
tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]()
# initialize the bounding box coordinates of the object we are going
# to track
initBB = None
# if a video path was not supplied, grab the reference to the web cam
if not args.get("video", False):
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(1.0)
# otherwise, grab a reference to the video file
else:
vs = cv2.VideoCapture(args["video"])
# initialize the FPS throughput estimator
fps = None
# loop over frames from the video stream
while True:
# grab the current frame, then handle if we are using a
# VideoStream or VideoCapture object
frame = vs.read()
frame = frame[1] if args.get("video", False) else frame
# check to see if we have reached the end of the stream
if frame is None:
break
# resize the frame (so we can process it faster) and grab the
# frame dimensions
frame = imutils.resize(frame, width=500)
(H, W) = frame.shape[:2]
# check to see if we are currently tracking an object
if initBB is not None:
# grab the new bounding box coordinates of the object
(success, box) = tracker.update(frame)
# check to see if the tracking was a success
if success:
(x, y, w, h) = [int(v) for v in box]
cv2.rectangle(frame, (x, y), (x + w, y + h),
(0, 255, 0), 2)
# update the FPS counter
fps.update()
fps.stop()
# initialize the set of information we'll be displaying on
# the frame
info = [
("Tracker", args["tracker"]),
("Success", "Yes" if success else "No"),
("FPS", "{:.2f}".format(fps.fps())),
]
# loop over the info tuples and draw them on our frame
for (i, (k, v)) in enumerate(info):
text = "{}: {}".format(k, v)
cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the 's' key is selected, we are going to "select" a bounding
# box to track
if key == ord("s"):
# select the bounding box of the object we want to track (make
# sure you press ENTER or SPACE after selecting the ROI)
initBB = cv2.selectROI("Frame", frame, fromCenter=False,
showCrosshair=True)
# start OpenCV object tracker using the supplied bounding box
# coordinates, then start the FPS throughput estimator as well
tracker.init(frame, initBB)
fps = FPS().start()
# if the `q` key was pressed, break from the loop
elif key == ord("q"):
break
# if we are using a webcam, release the pointer
if not args.get("video", False):
vs.stop()
# otherwise, release the file pointer
else:
vs.release()
# close all windows
cv2.destroyAllWindows()
for my project I want to save the Bounding Boxes found by the Object Detection API as .jpg for feeding in another CNN for further classification.
Here is my code (derived from EdjeElectronics GitHub):
import os
import cv2
import numpy as np
import tensorflow as tf
import sys
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# Import utilites
from utils import label_map_util
from utils import visualization_utils as vis_util
# Name of the directory containing the object detection module we're using
MODEL_NAME = '_model_ssd_v2'
IMAGE_NAME = 'image.jpg'
# Grab path to current working directory
CWD_PATH = os.getcwd()
# Path to frozen detection graph .pb file, which contains the model that is used
# for object detection.
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,'_data','label_map.pbtxt')
# Path to image
PATH_TO_IMAGE = os.path.join(CWD_PATH,"_images",IMAGE_NAME)
# Number of classes the object detector can identify
NUM_CLASSES = 6
# Load the label map.
# Label maps map indices to category names, so that when our convolution
# network predicts `5`, we know that this corresponds to `king`.
# Here we use internal utility functions, but anything that returns a
# dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load the Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
# Define input and output tensors (i.e. data) for the object detection classifier
# Input tensor is the image
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Number of objects detected
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Load image using OpenCV and
# expand image dimensions to have shape: [1, None, None, 3]
# i.e. a single-column array, where each item in the column has the pixel RGB value
image = cv2.imread(PATH_TO_IMAGE)
image_expanded = np.expand_dims(image, axis=0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_expanded})
# Draw the results of the detection (aka 'visulaize the results')
vis_util.visualize_boxes_and_labels_on_image_array(
image,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.3)
# All the results have been drawn on image. Now display the image.
# cv2.imshow('Object detector', cv2.resize(image, (int(2592/2),int(1944/2))))
# # Press any key to close the image
# cv2.waitKey(0)
# # Clean up
# cv2.destroyAllWindows()
cv2.imwrite("C:/tensorflow/models/research/object_detection/_images/test1.jpg", image)
A similar question was asked here but I donĀ“t know how to apply it with the Tensorflow Object Detection API.
Thank You!
I've found the function draw_bounding_boxes_on_image in the vis_util. Try this:
#create a white back ground image with the same shape as image
white_bg_img = 255*np.ones(image.shape, np.uint8)
vis_util.draw_bounding_boxes_on_image(
white_bg_img ,
np.squeeze(boxes),
color='red',
thickness=4)
cv2.imwrite("bounding_boxes.jpg", white_bg_img )
To draw the image within the bounding boxes.
boxes = np.squeeze(boxes)
for i in range(len(boxes)):
ymin = box[i,0]
xmin = box[i,1]
ymax = box[i,2]
xmax = box[i,3]
roi = image[ymin:ymax,xmin:xmax].copy()
cv2.imwrite("box_{}.jpg".format(str(i)), roi)
Save files will be like box_1.jpg, box_2.jpg ...
I followed this link and it worked. Add the following code:
min_score_thresh=0.60
true_boxes = boxes[0][scores[0] > min_score_thresh]
for i in range(true_boxes.shape[0]):
ymin = int(true_boxes[i,0]*height)
xmin = int(true_boxes[i,1]*width)
ymax = int(true_boxes[i,2]*height)
xmax = int(true_boxes[i,3]*width)
roi = image[ymin:ymax,xmin:xmax].copy()
cv2.imwrite("box_{}.jpg".format(str(i)), roi)
Make sure you define true height and width of image.
this will work
enter code here
box = np.squeeze(boxes)
for i in range(len(boxes)):
ymin = (int(box[i,0]*height))
xmin = (int(box[i,1]*width))
ymax = (int(box[i,2]*height))
xmax = (int(box[i,3]*width))
print(ymin,xmin,ymax,xmax)
roi =image[ymin:ymax,xmin:xmax].copy()
So this is what I have now:
As you can see, the neural style transfer thing is only going over the area the detection box is detecting. I am trying to put the transformed cool picture (which will always be less than 1200 x 900 because the detection box is 1200 x 900) in a black picture with dimensions 1200 x 900 so that I can save the video file.
My box is measured with: startX, endX, startY, and endY. The way I am trying to put the cool picture over the background right now is: black_background[startY:endY, startX:endX] = output, where output also has the size (endY - startY, endX - startX).
My way is not working, any insights? And also, for some reason, when I do "*black_background[startY:endY, startX:endX] = output", there is often a few pixel off broadcasting issue, like can't add (859, 100, 3) with (860, 100, 3). Is there a non-buggy solution to the black background issue? I feel like manually doing *black_background[startY:endY, startX:endX] = output is weird.
Here's my full code, I marked the if loop that actually matters with -----, thank you!
from __future__ import print_function
from imutils.video import VideoStream
from imutils.video import FPS
import numpy as np
import argparse
import imutils
import time
import cv2
from imutils import paths
import itertools
# We need to input model prototxt
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=True,
help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", required=True,
help="path to Caffe pre-trained model")
ap.add_argument("-c", "--confidence", type=float, default=0.2,
help="minimum probability to filter weak detections")
ap.add_argument("-nm", "--neuralmodels", required=True,
help="path to directory containing neural style transfer models")
args = vars(ap.parse_args())
# we should identify the class first, and then transfer that block
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
# load our serialized model from disk
print("[INFO] loading model...")
DetectionNet = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
# grab the paths to all neural style transfer models in our 'models'
# directory, provided all models end with the '.t7' file extension
modelPaths = paths.list_files(args["neuralmodels"], validExts=(".t7",))
modelPaths = sorted(list(modelPaths))
# generate unique IDs for each of the model paths, then combine the
# two lists together
models = list(zip(range(0, len(modelPaths)), (modelPaths)))
# use the cycle function of itertools that can loop over all model
# paths, and then when the end is reached, restart again
modelIter = itertools.cycle(models)
(modelID, modelPath) = next(modelIter)
NTSnet = cv2.dnn.readNetFromTorch(modelPath)
# initialize the video stream, allow the cammera sensor to warmup,
# and initialize the FPS counter
print("[INFO] starting video stream...")
vs = VideoStream(src=1).start()
fps = FPS().start()
fourcc = cv2.VideoWriter_fourcc(*'XVID')
output_video = cv2.VideoWriter('output.avi', fourcc, 20.0, (1200, 900))
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
frame = imutils.resize(frame, width=1200, height=900)
# grab the frame dimensions and convert it to a blob
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
0.007843, (300, 300), 127.5)
# pass the blob through the network and obtain the detections and
# predictions
DetectionNet.setInput(blob)
detections = DetectionNet.forward()
# loop over the detections
for i in np.arange(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the `confidence` is
# greater than the minimum confidence
if confidence > args["confidence"]:
# extract the index of the class label from the
# `detections`, then compute the (x, y)-coordinates of
# the bounding box for the object
idx = int(detections[0, 0, i, 1])
if(CLASSES[idx] == "person" and confidence > .90):
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# draw the prediction on the frame
label = "{}: {:.2f}%".format("PERSON",
confidence * 100)
cv2.rectangle(frame, (startX, startY), (endX, endY),
COLORS[idx], 2)
y = startY - 15 if startY - 15 > 15 else startY + 15
cv2.putText(frame, label, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
# print box area in background
newimage = frame[startY:endY, startX:endX]
(h, w) = newimage.shape[:2]
#print(h,w)
#print(startX, endX, startY, endY)
noise_picture = cv2.imread('white_noise.jpg')
black_background = cv2.imread('black.png')
-------------------------------------------------------------------
*if(h > 0 and w > 0):
# to_be_transformed is the detection box area
# resize that area for MobileNetSSD
#to_be_transformed = imutils.resize(to_be_transformed, height=450)
(height_orig, width_orig) = noise_picture.shape[:2]
noise_picture[startY:endY, startX:endX] = newimage
noise_picture = imutils.resize(noise_picture, height=450)
# run it through the network, output is the image
(h, w) = noise_picture.shape[:2]
# print(h, w)
blob2 = cv2.dnn.blobFromImage(noise_picture, 1.0, (w, h), (103.939, 116.779, 123.680), swapRB=False, crop=False)
NTSnet.setInput(blob2)
output = NTSnet.forward()
output = output.reshape((3, output.shape[2], output.shape[3]))
output[0] += 103.939
output[1] += 116.779
output[2] += 123.680
output /= 255.0
output = output.transpose(1, 2, 0)
# set the 600 x 450 back to the original size
black_background = imutils.resize(black_background, width=1200, height = 900)
output = imutils.resize(output, width=1200)
#black_background[startY:endY, startX:endX] = output[startY:endY, startX:endX]
output = output[startY:endY, startX:endX]
(h2, w2) = output.shape[:2]
if(h2>0 and w2>0 ):
cv2.imshow('hmm', output)
black_background[startY:endY, startX:endX] = output
cv2.imshow("uh", black_background)
#output_video.write(black_background)
#output_video.write(background)*
---------------------------------------------------------------
# show the output frame, which is the whole thing
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# update the FPS counter
fps.update()
# stop the timer and display FPS information
fps.stop()
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
Oh man, second time I made this mistake. You have to do * 255 when you are adding your output picture to your background. This is really weird, it seems like imread works if you only put numbers in [0, 1], but once you have a value that goes over 1, it treats the range as [0, 255], don't take my words on it though.
Trying to recognize objects real time using TensorFlow Object Detection API OpenCV using ssd_mobilenet_v1_coco_11_06_2017 model in GPU.
# What model to archieve.
MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
NUM_CLASSES = 90
# ## Loading label map
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
def detect_objects(image_np, sess, detection_graph):
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
rect_points, class_names, class_colors = draw_boxes_and_labels(
boxes=np.squeeze(boxes),
classes=np.squeeze(classes).astype(np.int32),
scores=np.squeeze(scores),
category_index=category_index,
min_score_thresh=.5
)
return dict(rect_points=rect_points, class_names=class_names, class_colors=class_colors)
# Archieving Model
# Load a (frozen) Tensorflow model into memory.
def initiateModel(input_queue, output_queue ):
# Load a (frozen) Tensorflow model into memory.
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=.8)
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=True), graph=detection_graph)
while True:
frame = input_queue.get()
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
output_queue.put(detect_objects(frame_rgb, sess, detection_graph))
sess.close()
if __name__ == '__main__':
input_queue = Queue(5)
output_queue = Queue()
for i in range(1):
t = Thread(target=initiateModel, args=(input_queue, output_queue ))
t.daemon = True
t.start()
video_capture = WebcamVideoStream(src=args.video_source,
width=args.width,
height=args.height).start()
frameRate=0
start_time = time.time()
while True:
frame = video_capture.read()
frame = cv2.resize(frame, (640,480))
height, width, channels = frame.shape
#print("height,width : ", height, width)
input_queue.put(frame)
if output_queue.empty():
pass # fill up queue
else:
font = cv2.FONT_HERSHEY_SIMPLEX
data = output_q.get()
rec_points = data['rect_points']
class_names = data['class_names']
class_colors = data['class_colors']
for point, name, color in zip(rec_points, class_names, class_colors):
cv2.rectangle(frame, (int(point['xmin'] * args.width), int(point['ymin'] * args.height)),
(int(point['xmax'] * args.width), int(point['ymax'] * args.height)), color, 3)
cv2.rectangle(frame, (int(point['xmin'] * args.width), int(point['ymin'] * args.height)),
(int(point['xmin'] * args.width) + len(name[0]) * 6,
int(point['ymin'] * args.height) - 10), color, -1, cv2.LINE_AA)
cv2.putText(frame, name[0], (int(point['xmin'] * args.width), int(point['ymin'] * args.height)), font,
0.3, (0, 0, 0), 1)
cv2.namedWindow("Video", cv2.WINDOW_NORMAL)
cv2.imshow('Video', frame)
if (time.time() >= start_time+1):
print ("Frame Rate : ", frameRate)
start_time = time.time()
frameRate=0
else:
frameRate=frameRate+1
fps.update()
#print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
fps.stop()
video_capture.stop()
cv2.destroyAllWindows()
Getting around 17~18 on single GPU( NVIDIA 1070 6 gb).
GPU uses most of its dedicated memory but still overall GPU usage is around 30-32%.
How can I enhance usage of GPU to increase performance?
I need to achieve around 60 fps with custom object detector to detect only particularly patterned objects.
-Python 3.6
-Tensorflow 1.11 with GPU support.
-Opencv 3.4.2
I am working on Tensorflow Api, and I have already trained my dataset. It works fine. But I have to crop the detected object and make some preprocess on it. It seems easy, because Tensroflow draws the detected object with green box as well. When I try to find the coordinates of the object it gives me numbers of range 0 to 1. When I put the coordinates on Opencv Crop Image I have to multply the image with pictures height and width, but it works wrong.
Tensorflow.org says that I can use "tf.image.crop_and_resize" function. But I can't run it on my own code.
This is my run_inference_for_single_image function and returns output_dict:
def run_inference_for_single_image(image, graph):
with graph.as_default():
#with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(
detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
return output_dict
This is my video capture funtion. It Crops the wrong coordinates.
video = cv2.VideoCapture(0)
ret = video.set(3,1080)
ret = video.set(4,720)
while(True):
# Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]
# i.e. a single-column array, where each item in the column has the pixel RGB value
ret,frame = video.read()
frame = cv2.flip(frame, 1)
frame_expanded = np.expand_dims(frame, axis=0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: frame_expanded})
vis_util.visualize_boxes_and_labels_on_image_array(
frame,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.50)
# Draw the results of the detection (aka 'visulaize the results')
output_dict = run_inference_for_single_image(frame, detection_graph)
max_boxes_to_draw = output_dict['detection_boxes'].shape[0]
for i in range(min(max_boxes_to_draw, output_dict['detection_boxes'].shape[0])):
if output_dict['detection_scores'][i] > 0.95:
if output_dict['detection_classes'][i] in category_index.keys():
class_name = category_index[output_dict['detection_classes'][i]]['name']
print(output_dict['detection_boxes'][i])
crop_img = frame[int((output_dict['detection_boxes'][i][0]) * 720): int(
(output_dict['detection_boxes'][i][2]) * 720),
int((output_dict['detection_boxes'][i][1]) * 1080):int(
(output_dict['detection_boxes'][i][3]) * 1080)]
cv2.imshow("asdasd", crop_img)
print(class_name)
cv2.imshow('Object detector', frame)
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
It might be about output_dict.
class_name = category_index[output_dict['detection_classes'][i]]['name'] => This codes give me the name of the class. It works well.
I found an answer for my question. This is the solution code:
while(True):
# Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]
# i.e. a single-column array, where each item in the column has the pixel RGB value
ret,frame = video.read()
frame = cv2.flip(frame, 1)
frame_expanded = np.expand_dims(frame, axis=0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: frame_expanded})
vis_util.visualize_boxes_and_labels_on_image_array(
frame,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.50)
# Draw the results of the detection (aka 'visulaize the results')
output_dict = run_inference_for_single_image(frame, detection_graph)
max_boxes_to_draw = output_dict['detection_boxes'].shape[0]
for i in range(min(max_boxes_to_draw, output_dict['detection_boxes'].shape[0])):
if output_dict['detection_scores'][i] > 0.80:
if output_dict['detection_classes'][i] in category_index.keys():
class_name = category_index[output_dict['detection_classes'][i]]['name']
#print(output_dict['detection_boxes'][i])
ymin = boxes[0, i, 0]
xmin = boxes[0, i, 1]
ymax = boxes[0, i, 2]
xmax = boxes[0, i, 3]
im_width = 1280
im_height = 720
(xminn, xmaxx, yminn, ymaxx) = (xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height)
crop_img=tf.image.crop_to_bounding_box(frame,int(yminn), int(xminn), int(ymaxx-yminn), int(xmaxx-xminn))
crop_img=frame[int(yminn):int(ymaxx),int(xminn):int(xmaxx)]
# print(session.run(file))
"""crop_img = frame[int((output_dict['detection_boxes'][i][0]) * 720): int(
(output_dict['detection_boxes'][i][2]) * 720),
int((output_dict['detection_boxes'][i][1]) * 1080):int(
(output_dict['detection_boxes'][i][3]) * 1080)]"""
cv2.imshow("asdsda",crop_img)
#print(class_name)
cv2.imshow('Object detector', frame)
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
im_width = 1280 means nothing for me but it works on my project, but It works. Thanks for helps.