How to initialize multiple OpenNI sensors with OpenCV - opencv

I'd like to use two Asus Xtion Pro sensors with OpenCV (2.4.4) and not sure how to initialize both devices.
I can initialize one like so:
VideoCapture capture;
capture.open(CV_CAP_OPENNI);
How can I initialize two VideoCapture instances for two separate sensors ?

Turns out it's as simple as this:
VideoCapture sensor1;sensor1.open(CV_CAP_OPENNI_ASUS);
VideoCapture sensor2;sensor2.open(CV_CAP_OPENNI_ASUS+1);
A very basic runnable example is:
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
int main(){
cout << "opening device(s)" << endl;
VideoCapture sensor1;sensor1.open(CV_CAP_OPENNI_ASUS);
VideoCapture sensor2;sensor2.open(CV_CAP_OPENNI_ASUS+1);
if( !sensor1.isOpened() ){
cout << "Can not open capture object 1." << endl;
return -1;
}
for(;;){
Mat depth1,depth2;
if( !sensor1.grab() ){
cout << "Sensor1 can not grab images." << endl;
return -1;
}else if( sensor1.retrieve( depth1, CV_CAP_OPENNI_DEPTH_MAP ) ) imshow("depth1",depth1);
if( !sensor2.grab() ){
cout << "Sensor2 can not grab images." << endl;
return -1;
}else if( sensor2.retrieve( depth2, CV_CAP_OPENNI_DEPTH_MAP ) ) imshow("depth2",depth2);
if( waitKey( 30 ) == 27 ) break;
}
}

Related

video capturing works in the example code, but doesn't in my own project

I built opencv with openni2 using Cmake, and I succeeded to run the example 'openni_capture' which is in OpenCV.sln. It clearly shows the video being captured. I'm using Orbbec Astra camera.
But when I try to make my own project, copy and paste the code, and run it, it says 'can not open a capture object' even if it was successfully built.
The code is like below. The problem is that 'capture.isOpened()' is TRUE in the example project, but it is FALSE in my own project which has exactly same code as the example project.
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
static void colorizeDisparity( const Mat& gray, Mat& rgb, double maxDisp=-1.f, float S=1.f, float V=1.f )
{
.
.
.
}
static float getMaxDisparity( VideoCapture& capture )
{
.
.
.
}
static void printCommandLineParams()
{
cout << "-cd= Colorized disparity? (0 or 1; 1 by default) Ignored if disparity map is not selected to show." << endl;
cout << "-fmd= Fixed max disparity? (0 or 1; 0 by default) Ignored if disparity map is not colorized (-cd 0)." << endl;
cout << "-mode= image mode: resolution and fps, supported three values: 0 - CAP_OPENNI_VGA_30HZ, 1 - CAP_OPENNI_SXGA_15HZ," << endl;
cout << " 2 - CAP_OPENNI_SXGA_30HZ (0 by default). Ignored if rgb image or gray image are not selected to show." << endl;
cout << "-m= Mask to set which output images are need. It is a string of size 5. Each element of this is '0' or '1' and" << endl;
cout << " determine: is depth map, disparity map, valid pixels mask, rgb image, gray image need or not (correspondently), ir image" << endl ;
cout << " By default -m=010100 i.e. disparity map and rgb image will be shown." << endl ;
cout << "-r= Filename of .oni video file. The data will grabbed from it." << endl ;
}
static void parseCommandLine( int argc, char* argv[], bool& isColorizeDisp, bool& isFixedMaxDisp, int& imageMode, bool retrievedImageFlags[],
string& filename, bool& isFileReading )
{
filename.clear();
cv::CommandLineParser parser(argc, argv, "{h help||}{cd|0|}{fmd|0|}{mode|-1|}{m|000100|}{r||}");
if (parser.has("h"))
{
help();
printCommandLineParams();
exit(0);
}
isColorizeDisp = (parser.get<int>("cd") != 0);
isFixedMaxDisp = (parser.get<int>("fmd") != 0);
imageMode = parser.get<int>("mode");
int flags = parser.get<int>("m");
isFileReading = parser.has("r");
if (isFileReading)
filename = parser.get<string>("r");
if (!parser.check())
{
parser.printErrors();
help();
exit(-1);
}
if (flags % 1000000 == 0)
{
cout << "No one output image is selected." << endl;
exit(0);
}
for (int i = 0; i < 6; i++)
{
retrievedImageFlags[5 - i] = (flags % 10 != 0);
flags /= 10;
}
}
int main( int argc, char* argv[] )
{
bool isColorizeDisp, isFixedMaxDisp;
int imageMode;
bool retrievedImageFlags[6];
string filename;
bool isVideoReading;
parseCommandLine( argc, argv, isColorizeDisp, isFixedMaxDisp, imageMode, retrievedImageFlags, filename, isVideoReading );
cout << "Device opening ..." << endl;
VideoCapture capture;
if( isVideoReading )
capture.open( filename );
else
{
capture.open( CAP_OPENNI2 );
if (!capture.isOpened())
{
capture.open(CAP_OPENNI);
}
}
cout << "done." << endl;
if( !capture.isOpened() )
{
cout << "Can not open a capture object." << endl;
return -1;
}
.
.
.
I added to VC++ directory-include directory that
C:\OpenCV_end\Source\opencv-3.4.0\build\install\include ,C:\Program Files\OpenNI2\Include
I added to VC++ directory-library directory that
C:\OpenCV_end\Source\opencv-3.4.0\build\install\x64\vc14\lib ,C:\Program Files\OpenNI2\Lib
I added to Linker-input that
opencv_world340d.lib ,OpenNI2.lib
and I copied the dll files to the folder which contains my project source. opencv_world340d.dll and all the files which are in C:\Program Files\OpenNI2\Redist
but it never wants to work.. Please help me
Thank you.

Frames Lost while processing video with opencv

I am capturing a video with 30fps, but when I process the video with openCV for AruCo marker detection I am loosing almost half of the frames. So for a 5 min video I expect 5x60x30 = 9000 frames but I am getting only about 4500 frames. I tried different resolution and fps while recording but the problem still persists. My code is as follows. I later want to sync the video with the audio recorded from the camera, so even if I could know the frames which are being lost can solve my problem. Any pointers or suggestions are welcome.
#include "opencv2\core.hpp"
#include "opencv2\imgcodecs.hpp"
#include "opencv2\imgproc.hpp"
#include "opencv2\highgui.hpp"
#include "opencv2\aruco.hpp"
#include "opencv2\calib3d.hpp"
#include <time.h>
#include <sstream>
#include <iostream>
#include <fstream>
#include "stdafx.h"
using namespace std;
using namespace cv;
#define _CRT_SECURE_NO_WARNINGS 1
int startWebcamMonitoring() //(const Mat& cameraMatrix, const Mat&
distanceCoefficients, float arucoSquareDimensions)
{
Mat frame4;
Scalar_<double> borderColor, borderColor2, borderColor3;
vector<int> markerIds;
vector < vector<Point2f>> markerCorners, rejectedCandidates ;
aruco::DetectorParameters parameters;
Ptr<aruco::Dictionary> makerDiktionary = aruco::getPredefinedDictionary(aruco::PREDEFINED_DICTIONARY_NAME::DICT_4X4_50);
VideoCapture cap("sample.mp4");
double fps = cap.get(CV_CAP_PROP_FPS);
cout << "Frames per second : " << fps << endl;
while (true)
{
cap >> sample;
if (!cap.read(frame4))
break;
aruco::detectMarkers(frame4, makerDiktionary, markerCorners, markerIds);
aruco::drawDetectedMarkers(frame4, markerCorners, markerIds, borderColor);
aruco::estimatePoseSingleMarkers(markerCorners, arucoSquareDimension,
cameraMatrix, distanceCoefficients, rotationVectors, translationVectors);
Mat rotationMatrix
for (int i = 0; i < markerIds.size(); i++)
{
aruco::drawAxis(frame4, cameraMatrix, distanceCoefficients, rotationVectors[i], translationVectors[i], 0.01f);
Rodrigues(rotationVectors[i], rotationMatrix);
time_t current = time(0);
cout << " Translation " << translationVectors[i] << " ID" << markerIds[i] << " Euler angles " << 180 / 3.1415*rotationMatrixToEulerAngles(rotationMatrix) << "current time " << ctime(&current) << endl;
}
freopen("output_sample", "a", stdout);
imshow("recording", frame4);
if (waitKey(30) >= 0) break;
}
The problem is:
cap >> sample;
if (!cap.read(frame4))
break;
the program is reading a frame from source twice in every iteration.
You should remove the cap >> sample; line and it will be fine.

How to initialize a cv::Mat using a vector of floats?

Is there a way of initializing a opencv cv::Mat using a vector<float> object?
Or do I need to loop over every entry of the vector and write it into the cv::Mat object?
I wrote the following test code ( including #Miki 's comment ) to myself to understand in detail.
you will understand well when you test it.
#include <opencv2/highgui.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main(int argc, char* argv[])
{
vector<float> vec{0.1,0.9,0.2,0.8,0.3,0.7,0.4,0.6,0.5,1};
Mat m1( vec );
imshow("m1",m1);
waitKey();
Mat m2( 1,vec.size(), CV_32FC1,vec.data());
imshow("m2",m2);
waitKey();
Mat1f m3( vec.size(), 1, vec.data());
imshow("m3",m3);
waitKey();
Mat1f m4( 1, vec.size(), vec.data());
imshow("m4",m4);
waitKey();
cout << "as seen below all Mat and vector use same data" << endl;
cout << vec[0] << endl;
m1 *= 2;
cout << vec[0] << endl;
m2 *= 2;
cout << vec[0] << endl;
m3 *= 2;
cout << vec[0] << endl;
m4 *= 2;
cout << vec[0] << endl;
return 0;
}

OpenCV Capture from Camera Results in Noisy-like Image

I am using OpenCV on an embedded target board (FriendlyARM mini6410, processor arm 1176 running linux kernel 2.6.38).
I compiled OpenCV 2.4.4 library using toolchain provided for the board, found in the ftp (pls see the website of FriendlyARM). I disabled GTK, ffmpeg and enable v4l. The library is compiled successfully.
Then I write code:
#include <opencv.hpp>
#include <highgui/highgui.hpp>
#include <imgproc/imgproc.hpp>
#include <iostream>
#include <stdio.h>
using namespace cv;
using namespace std;
int main()
{
int i;
cout << "initialise" << endl;
IplImage* img=0;
cout << "capturing ..." << endl;
CvCapture* capture = cvCaptureFromCAM(2);
cout << "get here" << endl;
if(!capture){
cout << "not capture" << endl;
return -1;
}
cout << "captured" << endl;
img=cvQueryFrame(capture);
IplImage* img1 = cvCreateImage(cvGetSize(img),8,3);
// cvCvtColor(img,img1,CV_RGB2GRAY);
cvCopy(img, img1);
cvSaveImage("cam_snap.jpg",img1);
cvReleaseImage( &img1 );
cvReleaseImage( &img );
cvReleaseCapture( &capture );
cout << "exit" << endl;
return 0;
}
The code is built successfully. I run the .elf executable in the target board, connected to camera (PS3 eye), but the resulting image looks like a broken television (noise-like):
While in my host, the resulting image is as expected (scene in front of camera). Can you provide me suggestion as to what went wrong or where should I start on debugging?
You should check your depth and channels. It is probably a matter of alignement, moreover be careful your image is probably in BGR and not in RGB.
And you should use cv::Mat instead of IplImage in C++ and VideoCapture instead of CVCapture.
This sample of code should work. (Not tested on the same arch as your)
#include <opencv.hpp>
#include <highgui/highgui.hpp>
#include <imgproc/imgproc.hpp>
#include <iostream>
#include <stdio.h>
using namespace cv;
using namespace std;
int main()
{
VideoCapture capture = cv::VideoCapture(0);
cout << "get here" << endl;
if(!capture.isOpened()) // check if we succeeded
return -1;
cout << "captured" << endl;
Mat img;
capture >> img;
imwrite("./test.png", img);
capture.release();
cout << "exit" << endl;
return 0;
}
Hope it helped.
Okay, confirmed. mini 6410 runs USB 1.0, and ps3 eye needs USB 2.0. I tried the program using standard webcam (chinese product, itech pc camera), works wonderfully. Saved image is showing scene in front of camera

Grabbing Pixel data from a video strem

I am having trouble if understanding certain coding i am sorry if this comes off as stupid but i have a code to capture a video from my webcam i want to get the RGB valuee from the frame, if this is impossible would have to to save a frame as a picture and then get values from it?
const char window_name[]="Webcam";
int main(int argc, char* argv[])
{
/* attempt to capture from any connected device */
CvCapture *capture=cvCaptureFromCAM(CV_CAP_ANY);
if(!capture)
{
printf("Failed to initialise webcam\n");
return -1;
}
/* create the output window */
cvNamedWindow(window_name, CV_WINDOW_NORMAL);
do
{
/* attempt to grab a frame */
IplImage *frame=cvQueryFrame(capture);
if(!frame)
{
printf("Failed to get frame\n");
break;
}
COLORREF myColAbsolute = GetPixel(frame, 10,10);//error in saying frame is not compatible with HDC.
cout << "Red - " << (int)GetRValue(myColAbsolute) << endl;
cout << "Green - " << (int)GetGValue(myColAbsolute) << endl;
cout << "Blue - " << (int)GetBValue(myColAbsolute) << endl;
/* show the frame */
cvShowImage(window_name, frame);
ha ! ( obviously caught with a copy & paste bummer )
GetPixel() is a windows function, not an opencv one. same for GetRValue() and sisters.
you'd use them in the native win32 api, to get a pixel from an HDC, but it won't work with opencv/highgui, since neither HDC, nor HWND are exposed.
since you're obviously a beginner(nothing wrong with that, again!) let me try to talk you out of using the old, 1.0 opencv api(IplImages, cv*Functions) as well,
you should be using the new one(cv::Mat, namespace cv::Functions) instead.
#include "opencv2/opencv.hpp"
#include "opencv2/highgui/highgui.hpp"
using namespace cv;
using namespace std;
int main()
{
Mat frame;
namedWindow("video", 1);
VideoCapture cap(0);
while ( cap.isOpened() )
{
cap >> frame;
if(frame.empty()) break;
int x=3, y=5;
// Ladies and Gentlemen, the PIXEL!
Vec3b pixel = frame.at<Vec3b>(y,x); // row,col, not x,y!
cerr << "b:" << int(pixel[0]) << " g:" << int(pixel[1]) << " r:" << int(pixel[2]) << endl;
imshow("video", frame);
if(waitKey(30) >= 0) break;
}
return 0;
}

Resources