I built opencv 2.4.8 x86 using visual studio express 2012, with media foundation support, and then wrote a simple program to test camera, all things done on Microsoft surface pro 2.
Problem 1, image shown in window is upside down
problem 2, setting image width and height not work
code is pasted here, and thank you for help!
#include <iostream>
#include <opencv2\opencv.hpp>
bool quitNow=false;
void mouseCallBackFunc(int event, int x, int y, int flags, void* userdata){
if (event==CV_EVENT_LBUTTONDBLCLK) quitNow=true;
}
int main()
{
int key;
std::string winName="show cam";
cv::namedWindow(winName,1);
cv::setMouseCallback(winName,mouseCallBackFunc);
cv::Mat img;
cv::VideoCapture vc(1);
if (vc.isOpened()){
std::cout<<"cam open good\n";
}else{
std::cout<<"cam open bad\n";
vc.release();
return 2;
}
if (!vc.set(CV_CAP_PROP_FRAME_HEIGHT,720)) return 3;
if (!vc.set(CV_CAP_PROP_FRAME_HEIGHT,1280)) return 4;
vc>>img;
std::cout<<std::endl<<"size is "<<img.size()<<std::endl;
do
{
vc>>img;
if(img.empty()) return 9;
cv::imshow(winName,img);
key=cv::waitKey(10) & 0xff;
} while (!quitNow);
return 0;
}
Related
Can anyone tell me what's wrong with my code? I can use my webcam by other code, so there is nothing do with the supported problem. In my code below, I must set the camera index into a loop so that i can motivate my camera(the led inductor is on, simply set "CvCapture* camera=cvCaptureFromCAM(0)" can not run! that's wierd!).However, I just can obtain a greysreen,why?
#include "highgui.h"
#include "cv.h"
int main(int grac, char** grav)
{
CvCapture* camera;
int index;
for (index = -1; index <1; index++)
{
camera = cvCaptureFromCAM(index);
if (camera)
{
printf("%d\n", index);
IplImage* f;
cvNamedWindow("camera", CV_WINDOW_AUTOSIZE);
while (1)
{
f = cvQueryFrame(camera);
cvShowImage("camera", f);
char c = cvWaitKey(33);
if (c == 27)break;
}
}
}
cvReleaseCapture(&camera);
cvDestroyAllWindows;
}
I’m beginner in C++ and OpenCV. As you know stasm is a C++ software library for finding features in faces that introduced in: http://www.milbo.users.sonic.net/stasm I want to use stasm 4.1 library for image recognition. I read manual stasm but I don’t understand how add "stasm_lib.h" to my project!! Please guide me step by step for locating face landmarks, after running I got this error:
cannot open include file: fetal error C1083 'stasm_lib.h' No such file or directory
#include <stdio.h>
#include <stdlib.h>
#include "opencv/highgui.h"
#include "stasm_lib.h"
int main()
{
static const char* path = "../data/testface.jpg";
cv::Mat_<unsigned char> img(cv::imread(path, CV_LOAD_IMAGE_GRAYSCALE));
if (!img.data)
{
printf("Cannot load %s\n", path);
exit(1);
}
int foundface;
float landmarks[2 * stasm_NLANDMARKS]; // x,y coords
if (!stasm_search_single(&foundface, landmarks,
(char*)img.data, img.cols, img.rows, path, "../data"))
{
printf("Error in stasm_search_single: %s\n", stasm_lasterr());
exit(1);
}
if (!foundface)
printf("No face found in %s\n", path);
else
{
// draw the landmarks on the image as white dots
stasm_force_points_into_image(landmarks, img.cols, img.rows);
for (int i = 0; i < stasm_NLANDMARKS; i++)
img(cvRound(landmarks[i*2+1]), cvRound(landmarks[i*2])) = 255;
}
cv::imshow("stasm minimal", img);
cv::waitKey();
return 0;
}`
Thanks a lot
I have calibrated and stereo rectified images in MATLAB using Caltech's toolbox (http://www.vision.caltech.edu/bouguetj/calib_doc/). I tried the disaprity in MATLAB and it is not returning good results now I would like to try it in OPENCV. I could not find any OPENCV sample code for disparity from their website. so this is the code I found so far:(code coming from http://www.jayrambhia.com/blog/disparity-maps/)
#include "opencv2/core/core.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "opencv2/contrib/contrib.hpp"
#include <stdio.h>
#include <string.h>
using namespace cv;
using namespace std;
int main(int argc, char* argv[])
{
Mat img1, img2, g1, g2;
Mat disp, disp8;
//char* method = argv[3];
char* method = "SGBM";
//img1 = imread(argv[1]);
//img2 = imread(argv[2]);
img1 = imread("leftImage.jpg");
img2 = imread("rightImage.jpg");
cvtColor(img1, g1, CV_BGR2GRAY);
cvtColor(img2, g2, CV_BGR2GRAY);
if (!(strcmp(method, "BM")))
{
StereoBM sbm;
sbm.state->SADWindowSize = 9;
sbm.state->numberOfDisparities = 112;
sbm.state->preFilterSize = 5;
sbm.state->preFilterCap = 61;
sbm.state->minDisparity = -39;
sbm.state->textureThreshold = 507;
sbm.state->uniquenessRatio = 0;
sbm.state->speckleWindowSize = 0;
sbm.state->speckleRange = 8;
sbm.state->disp12MaxDiff = 1;
sbm(g1, g2, disp);
}
else if (!(strcmp(method, "SGBM")))
{
StereoSGBM sbm;
sbm.SADWindowSize = 3;
sbm.numberOfDisparities = 144;
sbm.preFilterCap = 63;
sbm.minDisparity = -39;
sbm.uniquenessRatio = 10;
sbm.speckleWindowSize = 100;
sbm.speckleRange = 32;
sbm.disp12MaxDiff = 1;
sbm.fullDP = false;
sbm.P1 = 216;
sbm.P2 = 864;
sbm(g1, g2, disp);
}
normalize(disp, disp8, 0, 255, CV_MINMAX, CV_8U);
imshow("left", img1);
imshow("right", img2);
imshow("disp", disp8);
waitKey(0);
return(0);
}
and this is the error I get:
Unhandled exception at at 0x000007FEFD4D940D in OPEN_CV_TEST.exe: Microsoft C++ exception: cv::Exception at memory location 0x0000000000149260.
I am new to C++ and there is no description on the procedure to run the code. so I just put those left and right images in the \x64\Debug folder of my project and running the code in MS visual studio 2012 windows 7 64 bit. I created the project before and ran a sample test and it worked. so now I am just copying the above code in the main C++ source file. I assume there should not be any library file or header files missing.
also please note that I do not need need to rectify images and no need for stereo matching either right now.
any help is greatly appreciated.
I figured it out! it was the "imread" function in OPENCV which was causing problems! I used "cvLoadImage" instead. I also put the images in the folder of the project right next to CPP files and also in DEBUG folders. It is working fine now. Apparently the "IMREAD" function is a known problem in OPENCV!
First time diving into OF on iOS...exciting! As a first run I'm trying to port an app I made before into an iOS app. Its a pretty simple rutt etra-like effect on video coming in from the video camera. I have it working as a mac app, but I can't seem to get it displaying properly on my iPhone. The mesh is drawing, but I don't think I'm getting pixel values from my camera to vidPixels in order to change the color of my mesh. I'm basing this off of the videoGrabberExample in OF iOS 0072. I'm on a MacBook Pro, 10.7.5, running Xcode 4.5.2.
Can anyone give this a look and let me know if I'm doing something wrong? :) Thanks so much in advance.
Code:
testApp.mm
#include "testApp.h"
#include "ofGLUtils.h"
#include "ofGLRenderer.h"
//--------------------------------------------------------------
void testApp::setup(){
ofxiPhoneSetOrientation(OFXIPHONE_ORIENTATION_LANDSCAPE_RIGHT);
ofSetFrameRate(30);
grabber.initGrabber(480, 360);
yStep = 5;
xStep = 5;
// drawRuttEtra = false;
ofBackground(0, 0, 0);
}
//--------------------------------------------------------------
void testApp::update(){
//ofBackground(255,255,255);
grabber.update();
if(grabber.isFrameNew()){
vidPixels = grabber.getPixelsRef();
}
}
//--------------------------------------------------------------
void testApp::draw(){
glEnable(GL_DEPTH_TEST);
ofMesh mesh;
int rowCount = 0;
for (int y = 0; y<grabber.height; y+=yStep){
ofNoFill();
mesh.setMode(OF_PRIMITIVE_LINE_STRIP);
if (rowCount % 2 == 0) {
for (int x = 0; x < grabber.width; x += xStep){
ofColor curColor = vidPixels.getColor(x, y);
mesh.addColor(ofColor(curColor));
mesh.addVertex(ofVec3f(x,y, curColor.getBrightness() * 0.3));
}
} else {
for (int x = grabber.width-1; x >= 0; x -= xStep){
ofColor curColor = vidPixels.getColor(x, y);
mesh.addColor(ofColor(curColor));
mesh.addVertex(ofVec3f(x,y, curColor.getBrightness() * 0.3)); }
}
rowCount++;
}
mesh.draw();
// grabber.draw(0,0);
}
testApp.h
#pragma once
#include "ofMain.h"
#include "ofxiPhone.h"
#include "ofxiPhoneExtras.h"
class testApp : public ofxiPhoneApp{
public:
void setup();
void update();
void draw();
void exit();
void touchDown(ofTouchEventArgs & touch);
void touchMoved(ofTouchEventArgs & touch);
void touchUp(ofTouchEventArgs & touch);
void touchDoubleTap(ofTouchEventArgs & touch);
void touchCancelled(ofTouchEventArgs & touch);
void lostFocus();
void gotFocus();
void gotMemoryWarning();
void deviceOrientationChanged(int newOrientation);
ofVideoGrabber grabber;
ofTexture tex;
unsigned char * pix;
//rutt etra effect
int yStep;
int xStep;
bool drawRuttEtra;
ofPixels vidPixels;
};
main.mm
#include "ofMain.h"
#include "testApp.h"
int main(){
ofSetupOpenGL(1024,768, OF_FULLSCREEN); // <-------- setup the GL context
ofRunApp(new testApp);
}
I tried doing some debugging to see what might be happening.
I wanted to make sure ifFrameNew() works. Trying:
if(grabber.isFrameNew()){
cout<< "i'm grabbing new pixels!" << endl;
vidPixels = grabber.getPixelsRef();
}
prints "i'm grabbing new pixels!", so that if block is working.
In my double for loop, if I cout the value of vidPixels.getColor(x,y)
cout<<vidPixels.getColor(x,y) << endl;
I get all 255...which makes me think grabber.getPixelsRef() isn't working how I thought it should.
Any ideas?
I'm been trying to capture video from a cam and write it into an AVI file. I'm using Qt 4.8.2 with MSVC 2010 (x86) on Windows 7. I have 2 versions of the code: one using cv::Mat and the other using IplImage*. However, only the IplImage* version is working. Here's my code using cv::Mat:
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
int main() {
VideoCapture* capture2 = new VideoCapture( CV_CAP_DSHOW );
Size size2 = Size(640,480);
int codec = CV_FOURCC('M', 'J', 'P', 'G');
VideoWriter* writer2 = new VideoWriter("video.avi",codec,15,size2);
int a = 100;
Mat frame2;
while ( a > 0 ) {
capture2->read(frame2);
writer2->write(frame2);
a--;
}
writer2->release();
capture2->release();
return 0;
}
And here's the code using IplImage*:
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
int main() {
CvCapture* capture = cvCaptureFromCAM( CV_CAP_DSHOW );
CvSize size = cvSize(640,480);
int codec = CV_FOURCC('M', 'J', 'P', 'G');
CvVideoWriter* writer = cvCreateVideoWriter("video.avi",codec,15,size);
int a = 100;
while ( a > 0 ) {
IplImage* frame = cvQueryFrame( capture );
cvWriteToAVI(writer,frame);
a--;
}
cvReleaseVideoWriter(&writer);
cvReleaseCapture( &capture );
return 0;
}
It's basically the same, or at least it looks like the same thing to me. It reads 100 frames and should write them into "video.avi". It compiles and runs without errors, but the cv::Mat version doesn't write anything, and the IplImage* version works perfectly.
Does someone have any idea on what's going on?
The syntax in Opencv C++ reference is bit different, and here is a working code in C++.
I Just added imshow and waitkey, for checking you can remove them if you want.
int main()
{
VideoCapture* capture2 = new VideoCapture(CV_CAP_DSHOW);
Size size2 = Size(640, 480);
int codec = CV_FOURCC('M', 'J', 'P', 'G');
// Unlike in C, here we use an object of the class VideoWriter//
VideoWriter writer2("video_.avi", codec, 15.0, size2, true);
writer2.open("video_.avi", codec, 15.0, size2, true);
if (writer2.isOpened())
{
int a = 100;
Mat frame2;
while (a > 0)
{
capture2->read(frame2);
imshow("live", frame2);
waitKey(100);
writer2.write(frame2);
a--;
}
}
else
{
cout << "ERROR while opening" << endl;
}
// No Need to release the Writer as the distructor will called automatically
capture2->release();
return 0;
}
I had the same problem over and over again, and none of the solutions I found online helped.
Strange enough, the problem (identified purely with a trial and error method) was with the write permission. Everything worked after I sudo chmod u+rwx the python script.
I have the same problem and after a few time i realize that the input video isn't the same size with the output. Resize the input video may help u.
capture2->read(frame2);
cv::resize(frame2,frame2,cv::Size(640,480);
writer2->write(frame2);