Camera remains active after VideoCapture destructor is called - opencv

I've run this sample code:
#include "cv.h"
#include "highgui.h"
using namespace cv;
int main(int, char**)
{
VideoCapture cap(0);
if(!cap.isOpened()) return -1;
Mat frame, edges;
namedWindow("edges",1);
for(;;)
{
cap >> frame;
cvtColor(frame, edges, CV_BGR2GRAY);
GaussianBlur(edges, edges, Size(7,7), 1.5, 1.5);
Canny(edges, edges, 0, 30, 3);
imshow("edges", edges);
if(waitKey(30) >= 0) break;
}
return 0;
}
It works fine, but after the application closes, the camera remains active. I know this because the flash led stays on until I kill the HPMediaSmartWebcam.exe process.
How do I close the camera after I've finished using VideoCapture?

According to the docs...here the camera will be deinitializd automatically in the class destructor...the destructor calls a virtual function cv::VideoCapture.release()...run the camera for a fixed number of frames and then see whether the LED of the webcam goes off or not..
int frames = 0;
while(frames!=1000)
{
//do frame capture from webcam and image processing...
++frames;
}

Related

a simple frame-differencing

The background scene often evolves over time because, for instance, the lighting condition might change (for example,from sunrise to sunset), or because new objects could be added or removed from the background.
Therefore, it is necessary to dynamically build a model of the background scene.
based on above, I wrote a simple frame differencing code.It works good But it's very slow.
how can I make it faster? Any suggestions?
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/core/core.hpp>
#include <iostream>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/video/background_segm.hpp >
using namespace cv;
using namespace std;
#include <iostream>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/video/tracking.hpp>
int main()
{
cv::Mat gray; // current gray-level image
cv::Mat background; // accumulated background
cv::Mat backImage; // background image
cv::Mat foreground; // foreground image
// learning rate in background accumulation
double learningRate;
int threshold; // threshold for foreground extraction
cv::VideoCapture capture("video.mp4");
// check if video successfully opened
if (!capture.isOpened())
return 0;
// current video frame
cv::Mat frame;
double rate= capture.get(CV_CAP_PROP_FPS);
int delay= 1000/rate;
// foreground binary image
//cv::Mat foreground;
cv::Mat output;
bool stop(false);
while (!stop){
if(!capture.read(frame))
break;
cv::cvtColor(frame, gray, CV_BGR2GRAY);
cv::namedWindow("back");
cv::imshow("back",gray);
// initialize background to 1st frame
if (background.empty())
gray.convertTo(background, CV_32F);
// convert background to 8U
background.convertTo(backImage,CV_8U);
// compute difference between image and background
cv::absdiff(backImage,gray,foreground);
// apply threshold to foreground image
cv::threshold(foreground,output, 10,255,cv::THRESH_BINARY_INV);
// accumulate background
cv::accumulateWeighted(gray, background, 0.01, output);
cv::namedWindow("out");
cv::imshow("out",output);
if (cv::waitKey(delay)>=0)
stop= true;
}
}
I modified and corrected some parts of your code:
in the while loop you call to cv::namedWindow("back") and cv::namedWindow("out"), this is only necessary to do once.
you use if (background.empty()) to see if the array is empty or not, this is just necessary for the first cycle in which the matrix background is empty because in the remaining matrix will be filled, so that your code does not error the first cycle initialize to zero background=cv::Mat::zeros(rows,cols,CV_32F) taking into account the type and size that will be required in the iteration while loop. Also it does not affect the operation of accumulation.
Here the updated code:
int main()
{
cv::Mat gray; // current gray-level image
cv::Mat background; // accumulated background
cv::Mat backImage; // background image
cv::Mat foreground; // foreground image
// learning rate in background accumulation
double learningRate;
int threshold; // threshold for foreground extraction
cv::VideoCapture capture("C:/Users/Pedram91/Pictures/Camera Roll/videoplayback.mp4");////C:/Users/Pedram91/Downloads/Video/videoplayback.mp4//C:/FLIR.mp4
// check if video successfully opened
if (!capture.isOpened())
return 0;
// current video frame
cv::Mat frame;
double rate= capture.get(CV_CAP_PROP_FPS);
int delay= 1000/rate;
// foreground binary image
//cv::Mat foreground;
cv::Mat output;
bool stop(false);
cv::namedWindow("back");//This should go here,You only need to call once
cv::namedWindow("out");//This should go here,You only need to call once
int cols=capture.get(CV_CAP_PROP_FRAME_HEIGHT);
int rows=capture.get(CV_CAP_PROP_FRAME_WIDTH);
background=cv::Mat::zeros(rows,cols,CV_32F);//this will save the "if (background.empty())" in the while loop
while (!stop){
if(!capture.read(frame))
break;
cv::cvtColor(frame, gray, CV_BGR2GRAY);
cv::imshow("back",gray);
// initialize background to 1st frame
// if (background.empty())
gray.convertTo(background, CV_32F);
// convert background to 8U
background.convertTo(backImage,CV_8U);
// compute difference between image and background
cv::absdiff(backImage,gray,foreground);
// apply threshold to foreground image
cv::threshold(foreground,output, 10,255,cv::THRESH_BINARY_INV);
// accumulate background
cv::accumulateWeighted(gray, background, 0.01, output);
cv::imshow("out",output);
if (cv::waitKey(delay)>=0)
stop= true;
}
}

Gray image from Webcam with Opencv C program but C++ program works perfectly

I am trying to get webcam live stream. Here is my C program:
int main()
{
cvNamedWindow("Webcam feed", 1);
printf("Checking if camera is working\n");
CvCapture *cap = cvCaptureFromCAM(0);
if (!cap)
{
printf("Error opening camera\n");
return -1;
}
printf("yes it is in loop");
IplImage *frame = cvQueryFrame(cap);
if (!frame)
{
printf("Error in capturing frames from webcam\n");
return -1;
}
cvSaveImage("C:/Users/shru/Desktop/mee.jpg", frame);
key = cvWaitKey(10);
if (char(key) == 27)
{
return -1;
}
cvReleaseCapture(&cap);
cvDestroyWindow("Webcam Feed");
return 0;
}
And Here is my C++ program:
int main(int, char**)
{
VideoCapture cap(0); // open the default camera
if (!cap.isOpened()) // check if we succeeded
return -1;
for (;;)
{
Mat frame;
cap >> frame; // get a new frame from camera
//cvtColor(frame, edges, COLOR_BGR2GRAY);
//GaussianBlur(edges, edges, Size(7, 7), 1.5, 1.5);
//Canny(edges, edges, 0, 30, 3);
imshow("Webcam feed", frame);
if (waitKey(30) >= 0) break;
}
// the camera will be deinitialized automatically in VideoCapture destructor
return 0;
}
But the problem is the C output shows a grey screen and the C++ program provides the positive result. Am I doing something wrong or is there a different issue. I am using Opencv 3.0 alpha version with Visual Studio 2013.
If you are using OpenCV 3.0 You should not be using the C API. It is deprecated, either use an old version of OpenCV (if you need the C API) or just use C++. There is no way of solving this issue whilst using 3.0

Is it possible to have a square resolution with a webcam video stream using OpenCV?

I wrote a simple OpenCV program that recovers my webcam video stream and display it on a simple window. I wante to resize this window to the resolution 256x256 but it changed it to 320x240.
Here's my source code :
#include <iostream>
#include <opencv/cv.h>
#include <opencv/highgui.h>
using namespace std;
int main(int argc, char** argv)
{
char key;
cvNamedWindow("Camera_Output", cv::WINDOW_NORMAL);
CvCapture *capture = cvCaptureFromCAM(CV_CAP_ANY);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, 256);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, 256);
while(1){
IplImage *frame = cvQueryFrame(capture);
cvShowImage("Camera_Output", frame);
key = cvWaitKey(10);
if (key == 27){
break;
}
}
cvReleaseCapture(&capture);
cvDestroyWindow("Camera_Output");
return 0;
}
The output resolution is 320x240 and I want a 256x256 resolution. I think it's not possible because the camera manages its output video stream buffer and it has to keep the same ratio (width/height). What do you think about this idea ?
Is there a function which can force the resolution as a square resolution using OpenCV ?
Thanks a lot in advance for your help.
Seems like you video source does not handle 256x256 resolution. If you want to display it as such, you will have to crop the image yourself before displaying it.
Simple, you can do this by:
VideoCapture cap;
cap.open(0); // open your web-camera
cap.set(CV_CAP_PROP_FRAME_WIDTH, 256);
cap.set(CV_CAP_PROP_FRAME_HEIGHT, 256);
If this doesn't work, you need to resize it manually by calling cv::resize().

Why is the speed of AVI video increasing when reading it using OpenCV?

I am trying to read an AVI file using openCV. After getting the capture, the problem comes when I give a condition to the while loop which governs the extent to which queryFrame will be done.
There are total 1251 frames in the video.
When I use while (counter <= number_of_frames), the video runs fine and,
when I use while (cvQueryFrame(capture)), the video runs fine till 200-250th frame, then suddenly it starts running faster and finishes by 625th frame. I printed the FPS, it remains same all the time.
Why is this happening ??
Please help!
try the following...
C style reading..
#include <opencv2/video/video.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace cv;
int main()
{
CvCapture *video;
video = cvCreateFileCapture("ADDRESS TO THE FILE");
IplImage *frame;
while(true)
{
frame = cvQueryFrame(video);
if(frame->imageData==NULL)
{
std::cout<<"END OF VIDEO"<<std::endl;
break;
}
cvShowImage("VIDEO",frame);
cvWiatKey(25);//SINCE MOST OF THE VIDEOS RUN AT 25 FPS
}
return 0;
}
C++ STYLE....
int main()
{
VideoCapture video("ADDRESS OF VIDEO");
Mat frame;
while(true)
{
video >> frame;
if(frame.data==NULL)
{
std::cout<<"END OF VIDEO FILE"<<std::endl;
break;
}
imshow("VIDEO",frame);
waitKey(25);
}
return 0;
}
try this...and check if it gives an uniform rate of play...

opcv videowriter, i don't know why it does'nt work

I've just written a first program for videocaptur and videowriter. I copied the source from the wiki and changed the only video file name, but it made error.
Here is the source from the wiki.
The opencv is 2.1 and the compiler is visual c++ 2008 express.
#include "cv.h"
#include "highgui.h"
using namespace cv;
int main(int, char**)
{
VideoCapture capture(1); // open the default camera
if( !capture.isOpened() ) {
printf("Camera failed to open!\n");
return -1;
}
Mat frame;
capture >> frame; // get first frame for size
// record video
VideoWriter record("RobotVideo.avi", CV_FOURCC('D','I','V','X'), 30, frame.size(), true);
if( !record.isOpened() ) {
printf("VideoWriter failed to open!\n");
return -1;
}
namedWindow("video",1);
for(;;)
{
// get a new frame from camera
capture >> frame;
// show frame on screen
imshow("video", frame);
// add frame to recorded video
record << frame;
if(waitKey(30) >= 0) break;
}
// the camera will be deinitialized automatically in VideoCapture destructor
// the recorded video will be closed automatically in the VideoWriter destructor
return 0;
}
With the source, I changed 2 parts. One is for VideoCapture. (I don't have tunercard or camera.) The source is
VideoCapture capture(1); // open the default camera
and changed to
VideoCapture capture("C:/Users/Public/Videos/Sample Videos/WildlifeTest.wmv");
And the other is for VideoWriter:
// record video
VideoWriter record("RobotVideo.avi", CV_FOURCC('D','I','V','X'), 30, frame.size(), true);
and changed to
VideoWriter record("C:/Users/Public/Videos/Sample Videos/WildlifeRec.wmv",
CV_FOURCC('W','M','V','1'), 30,frame.size(), true);
and the part of error is:
// add frame to recorded video
record << frame;
Please show me what is my mistake!
P.S.
when I delete the line record << frame;, it works well. I think the error caused at the line.
And I found that even if without change, the wiki source program make same error.
The first error that i see is the file paths. You have to give them like this : C:\\Users\\....
please make sure you opencv_ffmpegXXX.dll work right

Resources