Is it possible to reduce the image quality of Motion JPEG file produced using OpenCV? - opencv

I am writing a Python script that records video from a usb webcam and stores the resulting file locally on a hard drive. Currently a 60 second file is 27mb.
Format is motion jpeg (MJPG) because the target being observed is in moving water. Moving water confuses video compression algorithms. I'd like to reduce the size of the overall file by lowering the quality of all of the individual JPEG images in the file. Can this be done in OpenCV? Below is what I have so far. Thanks in advance.
#!/usr/bin/env python3
import cv2
import numpy as np
import time
import imutils
cap = cv2.VideoCapture(0)
if (cap.isOpened() == False):
print("Unable to read camera feed")
capture_duration = 60
sleep_duration = 0
frame_width = int('320')
frame_height = int('240')
frame_per_sec = int('1')
out = cv2.VideoWriter('C:\\videoPy\\LZ\\outputvideo.avi',cv2.VideoWriter_fourcc('m','j','p','g'),frame_per_sec, (frame_width,frame_height))
start_time = time.time()
while( int(time.time() - start_time) < capture_duration ):
ret, frame = cap.read()
if ret==True:
frame = imutils.resize(frame, width=320)
out.write(frame)
time.sleep(sleep_duration)
cv2.imshow('frame',frame)
# Press Q on keyboard to stop recording
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()

Related

The function "hog.detectMultiScale(frame)" doesn't return any thing

I wrote this and it works. but the last function (hog.detectMultiScale) doesn't return any thing. I don't know why.
'''
import numpy as np
import cv2
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
cv2.startWindowThread()
# open video stream
cap = cv2.VideoCapture("E:\\3.mp4")
# the output will be written to output.avi
out = cv2.VideoWriter('output.avi',cv2.VideoWriter_fourcc(*'MJPG'),15.,
(640, 480))
while (True):
# Capture frame-by-frame
ret, frame = cap.read()
# resizing for faster detection
frame = cv2.resize(frame, (640, 480))
returns the bounding boxes for the detected objects
(boxes, weights) = hog.detectMultiScale(frame)
'''

Frame is not responding while imageAi detection

when i run the program, i frame is hanging and not responding. i am using imageAI package for video detection. what i want to do, is to show the video stream while the video detection is running.
and idea?
import cv2
from imageai.Detection import VideoObjectDetection
from matplotlib import pyplot as plt
camera = cv2.VideoCapture(0)
while (True):
_, frame = camera.read()
cv2.imshow("frame", frame)
plt.show()
k = cv2.waitKey(60) & 0xff
if k == 27:
break
detector = VideoObjectDetection()
detector.setModelTypeAsYOLOv3()
detector.setModelPath("yolo.h5")
detector.loadModel()
cv2.imshow("frame", frame)
plt.show()
video_path = detector.detectObjectsFromVideo(camera_input=camera,
output_file_path="traffic_detected"
, frames_per_second=20, log_progress=True, minimum_percentage_probability=30)
print(video_path)
# cv2.imshow('frame',frame)
camera.release()
cv2.destroyAllWindows()

I use opencv example code to capture video,I don't know why it's reversed?

This is my code. I'm trying to capture the video and write it in to my memory.But it's wrong.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
out = cv2.VideoWriter('ouput.avi',fourcc,20.0,(640,480))
while(1):
ret,frame=cap.read()
if ret==True:
frame = cv2.flip(frame,0)
out.write(frame)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF ==ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
the windows show the video is reversed.how could i turn it back?
thanks!
Dont use flip command.
frame = cv2.flip(frame, 0)
Check flip docs.

OpenCV - Fixing foreground in Background subtractor MOG

How can we fix foreground using mog background subtractor in opencv-python? I'm trying to have a more stable foreground that can keep showing foreground once it could correctly subtract foreground from background (for example fix foreground for about 5 seconds)
here is my code:
cap = cv2.VideoCapture(0)
history = 500 # or whatever you want it to be
accelerate = 5
fgbg = cv2.createBackgroundSubtractorMOG2(history=500, varThreshold=20, detectShadows=True)
count=0
while(1):
for i in (1, accelerate):
ret, frame = cap.read()
fgmask = fgbg.apply(frame, learningRate=1.0/history)
imageproc(fgmask,count)
# time.sleep(5)
k = cv2.waitKey(0) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
import numpy as np
import cv2
import time
cap = cv2.VideoCapture('video.avi')
fgbg = cv2.createBackgroundSubtractorMOG()
while(1):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
cv2.imshow('frame',fgmask)
time.sleep(5)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()

Background subtraction in opencv2

I am trying to detect foreground motion using opencv2 by removing static (mostly) BG elements. The method I am using is based on taking the mean of a series of images - representing the background. Then calculating one Standard deviation above and below that mean. Using that as a window to detect foreground motion.
This mechanism reportedly works well for moderately noisy environments like waving trees in the BG.
The desired output is a mask that can be used in a subsequent operation so as to minimise further processing. Specifically I am going to use optical flow detection within that region.
cv2 has made this much easier and the code is much simpler to read and understand. Thanks cv2 and numpy.
But I am having difficulty doing the correct FG detection.
Ideally I also want to erode/dilate the BG mean so as to eleminate 1 pixel noise.
The code is all togethr so you have a number of frames at the start (BGsample) to gather the BG data before FG detection starts. the only dependencies are opencv2 (> 2.3.1 ) and numpy (which should be included in > opencv 2.3.1 )
import cv2
import numpy as np
if __name__ == '__main__':
cap = cv2.VideoCapture(0) # webcam
cv2.namedWindow("input")
cv2.namedWindow("sig2")
cv2.namedWindow("detect")
BGsample = 20 # number of frames to gather BG samples from at start of capture
success, img = cap.read()
width = cap.get(3)
height = cap.get(4)
# can use img.shape(:-1) # cut off extra channels
if success:
acc = np.zeros((height, width), np.float32) # 32 bit accumulator
sqacc = np.zeros((height, width), np.float32) # 32 bit accumulator
for i in range(20): a = cap.read() # dummy to warm up sensor
# gather BG samples
for i in range(BGsample):
success, img = cap.read()
frame = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.accumulate(frame, acc)
cv2.accumulateSquare(frame, sqacc)
#
M = acc/float(BGsample)
sqaccM = sqacc/float(BGsample)
M2 = M*M
sig2 = sqaccM-M2
# have BG samples now
# start FG detection
key = -1
while(key < 0):
success, img = cap.read()
frame = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#Ideally we create a mask for future use that is B/W for FG objects
# (using erode or dilate to remove noise)
# this isn't quite right
level = M+sig2-frame
grey = cv2.morphologyEx(level, cv2.MORPH_DILATE,
cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3)), iterations=2)
cv2.imshow("input", frame)
cv2.imshow("sig2", sig2/60)
cv2.imshow("detect", grey/20)
key = cv2.waitKey(1)
cv2.destroyAllWindows()
I don't think you need to manually compute the mean and standard deviation use cv2.meanStdDev instead. In the code below, I'm using your average background matrix computed from
M = acc/float(BGsample)
So, now we can compute the mean and standard deviation of the average background image, and finally inRange is used to pull out the range that you wanted (i.e., the mean +/- 1 standard deviation).
(mu, sigma) = cv2.meanStdDev(M)
fg = cv2.inRange(M, (mu[0] - sigma[0]), (mu[0] + sigma[0]))
# proceed with morphological clean-up here...
Hope that helps!
my best guess so far. Using detectmin, max to coerce the fp sigma into grayscale for the cv2.inRange to use.
Seems to work OK but was hoping for better... plenty of holes in valid FG data.
I suppose it would work better in rgb instead of grayscale.
Can't get noise reduction using dilate or erode to work.
Any improvements ?
import cv2
import numpy as np
if __name__ == '__main__':
cap = cv2.VideoCapture(1)
cv2.namedWindow("input")
#cv2.namedWindow("sig2")
cv2.namedWindow("detect")
BGsample = 20 # number of frames to gather BG samples from at start of capture
success, img = cap.read()
width = cap.get(3)
height = cap.get(4)
if success:
acc = np.zeros((height, width), np.float32) # 32 bit accumulator
sqacc = np.zeros((height, width), np.float32) # 32 bit accumulator
for i in range(20): a = cap.read() # dummy to warm up sensor
# gather BG samples
for i in range(BGsample):
success, img = cap.read()
frame = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.accumulate(frame, acc)
cv2.accumulateSquare(frame, sqacc)
#
M = acc/float(BGsample)
sqaccM = sqacc/float(BGsample)
M2 = M*M
sig2 = sqaccM-M2
# have BG samples now
# calculate upper and lower bounds of detection window around mean.
# coerce into 8bit image space for cv2.inRange compare
detectmin = cv2.convertScaleAbs(M-sig2)
detectmax = cv2.convertScaleAbs(M+sig2)
# start FG detection
key = -1
while(key < 0):
success, img = cap.read()
frame = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
level = cv2.inRange(frame, detectmin, detectmax)
cv2.imshow("input", frame)
#cv2.imshow("sig2", M/200)
cv2.imshow("detect", level)
key = cv2.waitKey(1)
cv2.destroyAllWindows()

Resources