I've followed this tutorial and managed to make a face detection,age+gender like in this video
Now the problem I'm facing is that the window size of the application is very small and I don't know and can't find a way to resize it (in this image bottom right you can see the window).
The code of the applciation can be found here
Solution whatever you set the size,,,opencv resize window for a live cam
import cv2
def main():
windowName = "Main"
cv2.namedWindow(windowName)
cap = cv2.VideoCapture(0)
print('Width :' + str(cap.get(3)))
print('Height :' + str(cap.get(4)))
cap.set(3, 620)
cap.set(4, 720)
if cap.isOpened():
ret, frame = cap.read()
else:
ret = False
while ret:
ret, frame = cap.read()
output = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow(windowName, frame)
if cv2.waitKey(1) == 27:
break
cv2.destroyAllWindow()
cap.release()
if __name__== "__main__":
main()
import cv2
def main():
windowName = "Main"
cv2.namedWindow(windowName)
cap = cv2.VideoCapture(0)
print('Width :' + str(cap.get(3)))
print('Height :' + str(cap.get(4)))
cap.set(3, 620)
cap.set(4, 720)
if cap.isOpened():
ret, frame = cap.read()
else:
ret = False
while ret:
ret, frame = cap.read()
output = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow(windowName, frame)
if cv2.waitKey(1) == 27:
break
cv2.destroyAllWindow()
cap.release()
if __name__== "__main__":
main()
Related
When I try to run a video using opencv, the video keeps rotating so that the wide side of the video is displayed on horizon. I wonder how I can set the original video to display correctly....
import sys
import numpy as np
import cv2
cap = cv2.VideoCapture('/Users/junsuk/Desktop/python/ch13/testbar.mp4')
if not cap.isOpened():
print('Video open failed!')
sys.exit()
tracker = cv2.TrackerCSRT_create()
ret, frame = cap.read()
cv2.imwrite('/Users/junsuk/Desktop/python/ch10/photo.jpg', frame)
if not ret:
print('Frame read failed!')
sys.exit()
newdraw=cv2.imread('/Users/junsuk/Desktop/python/ch10/photo.jpg')
cv2.line(newdraw, (50, 50), (200, 50), (0, 0, 255), 5)
cv2.imwrite('/Users/junsuk/Desktop/python/ch10/photo.jpg', newdraw)
rc = cv2.selectROI('frame', frame)
print(rc)
tracker.init(frame, rc)
while True:
ret, frame = cap.read()
if not ret:
print('Frame read failed!')
sys.exit()
ret, rc = tracker.update(frame)
rc = tuple([int(_) for _ in rc])
cv2.rectangle(frame, rc, (0, 0, 255), 2)
cv2.imshow('frame', frame)
if cv2.waitKey(25) == 45:
break
cv2.destroyAllWindows()
In this code it is showing whole video as a frame but in the end it is also returning None for the last frame ?
cap = cv2.VideoCapture("demo.mp4")
while(cap.isOpened()):
status, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2_imshow(gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
error: OpenCV(4.1.2) /io/opencv/modules/imgproc/src/color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cvtColor'
This is showing as an error.
This can be solved by modifying the code slightly as follows by ensuring the frame is not empty:
cap = cv2.VideoCapture("demo.mp4")
while(cap.isOpened()):
status, frame = cap.read()
if status:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2_imshow(gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
cv2.destroyAllWindows()
when i run the program, i frame is hanging and not responding. i am using imageAI package for video detection. what i want to do, is to show the video stream while the video detection is running.
and idea?
import cv2
from imageai.Detection import VideoObjectDetection
from matplotlib import pyplot as plt
camera = cv2.VideoCapture(0)
while (True):
_, frame = camera.read()
cv2.imshow("frame", frame)
plt.show()
k = cv2.waitKey(60) & 0xff
if k == 27:
break
detector = VideoObjectDetection()
detector.setModelTypeAsYOLOv3()
detector.setModelPath("yolo.h5")
detector.loadModel()
cv2.imshow("frame", frame)
plt.show()
video_path = detector.detectObjectsFromVideo(camera_input=camera,
output_file_path="traffic_detected"
, frames_per_second=20, log_progress=True, minimum_percentage_probability=30)
print(video_path)
# cv2.imshow('frame',frame)
camera.release()
cv2.destroyAllWindows()
This is my code. I'm trying to capture the video and write it in to my memory.But it's wrong.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
out = cv2.VideoWriter('ouput.avi',fourcc,20.0,(640,480))
while(1):
ret,frame=cap.read()
if ret==True:
frame = cv2.flip(frame,0)
out.write(frame)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF ==ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
the windows show the video is reversed.how could i turn it back?
thanks!
Dont use flip command.
frame = cv2.flip(frame, 0)
Check flip docs.
How can we fix foreground using mog background subtractor in opencv-python? I'm trying to have a more stable foreground that can keep showing foreground once it could correctly subtract foreground from background (for example fix foreground for about 5 seconds)
here is my code:
cap = cv2.VideoCapture(0)
history = 500 # or whatever you want it to be
accelerate = 5
fgbg = cv2.createBackgroundSubtractorMOG2(history=500, varThreshold=20, detectShadows=True)
count=0
while(1):
for i in (1, accelerate):
ret, frame = cap.read()
fgmask = fgbg.apply(frame, learningRate=1.0/history)
imageproc(fgmask,count)
# time.sleep(5)
k = cv2.waitKey(0) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
import numpy as np
import cv2
import time
cap = cv2.VideoCapture('video.avi')
fgbg = cv2.createBackgroundSubtractorMOG()
while(1):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
cv2.imshow('frame',fgmask)
time.sleep(5)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()