Finding camera and distortion matrix usinc cvCalibrateCamera2( ) - opencv

I was trying to find out the camera matrix and distortion coefficients using cvCalibrateCamera2. There were no compilation errors, but when I am trying to execute the program it gives:
OpenCV error: Sizes of input arguments do not match( both matrices must have the same number of points) in cvConvertPointsHomogenous, file /build/buildd/opencv-2.3.1/modules/calib3d/src/fundam.cpp
size of the matrix storing object points in 4 x 3 and that of the matrix storing image points in 4 X2, what could be wrong?
Now I made certain changes to my code.
This is the code that I am using:
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/calib3d/calib3d.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
//function to call cvCalibrateCamera2()
void calibrate(CvMat* object_points, CvMat* image_points, CvMat* intrinsic,CvMat* distortion)
{
const int point_count= object_points->rows;
const int image_count=object_points->rows/point_count;
CvMat* const full_object_points = cvCreateMat(image_count*point_count,3,CV_32FC1);
CvMat* const point_counts= cvCreateMat(image_count,1,CV_32SC1);
for(int i =0; i<image_count;i++)
{
CV_MAT_ELEM(*point_counts,float , i,0)= point_count;
for(int j=0;j<point_count;j++)
{
for(int k=0; k<3;k++){
CV_MAT_ELEM(*full_object_points,float,i*point_count+j,k)=CV_MAT_ELEM(*object_points,float,j,k);
}
}
}
cvCalibrateCamera2(full_object_points,image_points,point_counts,cvSize(1,1),intrinsic, distortion,NULL,NULL,0);
}
int main()
{
const float points[][2]={{1,2},{0,0},{3,5},{5,2}};
const int image_count=5;
const int point_count=sizeof(points)/sizeof(points[1]);
CvMat* const object_points=cvCreateMat(point_count,3,CV_32FC1);
for(int i=0; i<point_count;i++)
{
CV_MAT_ELEM(*object_points, float, i,0)=points[i][0];
CV_MAT_ELEM(*object_points, float, i,1)=points[i][1];
CV_MAT_ELEM(*object_points, float, i,2)=0;
}
CvMat* const image_points=cvCreateMat(image_count*point_count,2,CV_32FC1);
CvMat* const intrinsic=cvCreateMat(3,3,CV_32FC1);
CvMat* const distortion=cvCreateMat(5,1,CV_32FC1);
calibrate(object_points,image_points,intrinsic,distortion);
}
On execution I am getting the following error:
OpenCV Error: Bad argument (The total number of matrix elements is not divisible by the new number of rows) in cvReshape, file /build/buildd/opencv-2.3.1/modules/core/src/array.cpp, line 2755
terminate called after throwing an instance of 'cv::Exception'
what(): /build/buildd/opencv-2.3.1/modules/core/src/array.cpp:2755: error: (-5) The total number of matrix elements is not divisible by the new number of rows in function cvReshape
Aborted (core dumped)

Exactly what the error message means. It needs an equal amount of points for the object as for the image points. If you would try to calculate with say 10 sets of image points, but with 9 sets of object points, this error is returned.
I'd recommend using the c++ openCV, as this accepts vectors with points and matrices as input. Checking the length of such vectors is far easier than for matrices.

Related

cudaMemcpy invalid argument: in simple vector example

The following example:
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <cuda.h>
#include <math.h>
#define N 100
#define t_num 256
int main(){
int vector_one_h[t_num], vector_one_g[t_num];
cudaError_t err = cudaMalloc((void**)&vector_one_g, t_num * sizeof(int));
printf("Cuda malloc vector swap one: %s \n", cudaGetErrorString(err));
printf("Device Vector: %p \n:" , vector_one_g);
for(int m = 0; m < t_num; m++){
vector_one_h[m] = rand() % N;
}
err = cudaMemcpy(vector_one_g, vector_one_h, t_num * sizeof(int), cudaMemcpyHostToDevice);
printf("Cuda mem copy vector swap one: %s \n", cudaGetErrorString(err));
}
Will return:
Cuda malloc vector swap one: no error
Device Vector: 0x7ffcf028eea0
:Cuda mem copy vector swap one: invalid argument
So why is cudaMemcpy receiving an invalid argument?
From the documentation for cudaMemcpy() here I thought the problem may be that I need to give the second argument as the address, &vector_one_h, but placing that in the code returns the exact same error.
And also, while there are many posts about cudaMemcpy invalid arguments, I believe this is not a duplicate as most of the other questions have very complicated examples while this is a very simple and minimal example.
Try changing the first line to:
int vector_one_h[t_num], *vector_one_g;
BTW, prefixing an array name with an & has no effect. Array names are constant pointers by themselves, by the definition of C syntax.

OpenCV and DAISY descriptors

I am trying to do feature matching between 2 perspectives of the same image using DAISY and the FlannBasedMatcher.
I don't think there is even a single match that is correct.
Note: I also get different results each time I run the program but I think this is expected behaviour as explained here: FlannBasedMatcher returning different results
So what am I doing wrong? Why are these matches so bad?
Input Images
Wrong & non-deterministic results
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/xfeatures2d.hpp>
#include <iostream>
#include <vector>
#include <stdio.h>
using namespace cv;
using std::vector;
const float nn_match_ratio = 0.7f; // Nearest neighbor matching ratio
const float keypoint_diameter = 15.0f;
int main(int argc, char ** argv){
// Load images
Mat img1 = imread(argv[1]);
Mat img2 = imread(argv[2]);
vector<KeyPoint> keypoints1, keypoints2;
// Add every pixel to the list of keypoints for each image
for (float xx = keypoint_diameter; xx < img1.size().width - keypoint_diameter; xx++) {
for (float yy = keypoint_diameter; yy < img1.size().height - keypoint_diameter; yy++) {
keypoints1.push_back(KeyPoint(xx, yy, keypoint_diameter));
keypoints2.push_back(KeyPoint(xx, yy, keypoint_diameter));
}
}
Mat desc1, desc2;
Ptr<cv::xfeatures2d::DAISY> descriptor_extractor = cv::xfeatures2d::DAISY::create();
// Compute DAISY descriptors for both images
descriptor_extractor->compute(img1, keypoints1, desc1);
descriptor_extractor->compute(img2, keypoints2, desc2);
vector <vector<DMatch>> matches;
// For each descriptor in image1, find 2 closest matched in image2 (note: couldn't get BF matcher to work here at all)
FlannBasedMatcher flannmatcher;
flannmatcher.add(desc1);
flannmatcher.train();
flannmatcher.knnMatch(desc2, matches, 2);
// ignore matches with high ambiguity -- i.e. second closest match not much worse than first
// push all remaining matches back into DMatch Vector "good_matches" so we can draw them using DrawMatches
int num_good = 0;
vector<KeyPoint> matched1, matched2;
vector<DMatch> good_matches;
for (int i = 0; i < matches.size(); i++) {
DMatch first = matches[i][0];
DMatch second = matches[i][1];
if (first.distance < nn_match_ratio * second.distance) {
matched1.push_back(keypoints1[first.queryIdx]);
matched2.push_back(keypoints2[first.trainIdx]);
good_matches.push_back(DMatch(num_good, num_good, 0));
num_good++;
}
}
Mat res;
drawMatches(img1, matched1, img2, matched2, good_matches, res);
imwrite("_res.png", res);
return 0;
}
Sorry. I found my bug. I have the Indexes reversed in the lines that read:
matched1.push_back(keypoints1[first.queryIdx]);
matched2.push_back(keypoints2[first.trainIdx]);
how can i get the coordinates of the matches find in the two images,that is the coordinates of the matches in the first image and the coordinates of the matches in the second?

split a BGR matrix without use split() function

I am programming with Visual Studio 2012 and the Opencv library, in the 2.4.6 version.
Someone can help me about splitting a BGR image into three images, one for every channel?
I know that there is the split function in OpenCV, but it causes me an unhandled exception, probably because I have a 64 bit processor with the 32 bit library, or probably it's the version of the library, so I want to know how to iterate on the pixel values of a BGR matrix without use split().
Thanks in advance.
If you don't want to use split() then you can read each r,g,b pixel value from your source image and write to destination image and which should be single channel.
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main( int argc, const char** argv ){
Mat src = imread("ball.jpg", 1);
Mat r(src.rows,src.cols,CV_8UC1);
Mat g(src.rows,src.cols,CV_8UC1);
Mat b(src.rows,src.cols,CV_8UC1);
for(int i=0;i<src.rows;i++){
for(int j=0;j<src.cols;j++){
Vec3b pixel = src.at<Vec3b>(i, j);
b.at<uchar>(i,j) = pixel[0];
g.at<uchar>(i,j) = pixel[1];
r.at<uchar>(i,j) = pixel[2];
}
}
imshow("src", src);
imshow("r", r);
imshow("g", g);
imshow("b", b);
waitKey(0);
}

cvSobel problems - opencv

i've got the code below:
// Image Processing.cpp : Defines the entry point for the console application.
//
//Save an available image.
#include "stdafx.h"
#include "cv.h"
#include "highgui.h"
#include "cxcore.h"
/*
The purpose of this program is to show an example of THRESHOLDING.
*/
int _tmain(int argc, _TCHAR* argv[])
{
IplImage* src = cvLoadImage("D:\\document\\Study\\university of technology\\semester_8\\Computer Vision\\Pics for test\\black-white 4.jpg");
IplImage* dst = cvCreateImage(cvGetSize(src),IPL_DEPTH_8U,3);
IplImage* temp1 = cvCreateImage(cvGetSize(src),IPL_DEPTH_8U,1);
IplImage* temp2 = cvCreateImage(cvGetSize(src),IPL_DEPTH_8U,1);
cvCvtColor(src,temp1,CV_RGB2GRAY);
cvSobel(temp1,temp2,0,1,3);
cvMerge(temp2,temp2,temp2,NULL,dst);
cvNamedWindow("src",1);
cvNamedWindow("dst",1);
cvShowImage("src",src);
cvShowImage("dst",temp2);
cvWaitKey(0);
cvReleaseImage(&src);
//cvReleaseImage(&dst);
cvDestroyAllWindows();
return 0;
}
when i run it, there's an warning as the picture below:
but if i still click on "countinue" button, the result is displayed!
hope someone can give me an explaination !
The result is correct. The description of the program is not. Your xorder=0 and yorder=1 which means that you are detecting the first derivative in the y-direction. The white pixels in the image correspond to boundaries that can be detected by a vertical derivative, namely as close to horizontal boundaries as possible. This is why the vertical lines are barely ever detected.
CvSobel by itself has NOTHING to do with thresholding. CvSobel is a function used for finding boundaries and contours. Thresholding is most commonly an operation that creates a black-and-white image from a greyscale image. It is also called image binarization.
If you want to threshold an image, start with cvThreshold and cvAdaptiveThreshold.
i've fixed it, here is my code:
// Image Processing.cpp : Defines the entry point for the console application.
//
//Save an available image.
#include "stdafx.h"
#include "cv.h"
#include "highgui.h"
#include "cxcore.h"
/*
The purpose of this program is to show an example of Sobel method.
*/
int _tmain(int argc, _TCHAR* argv[])
{
IplImage* src = cvLoadImage("D:\\document\\Study\\university of technology\\semester_8\\Computer Vision\\Pics for test\\black-white 4.jpg");
IplImage* dst = cvCreateImage(cvGetSize(src),IPL_DEPTH_8U,1);
IplImage* dst_x = cvCreateImage(cvGetSize(src),IPL_DEPTH_8U,1);
IplImage* dst_y = cvCreateImage(cvGetSize(src),IPL_DEPTH_8U,1);
IplImage* temp1 = cvCreateImage(cvGetSize(src),IPL_DEPTH_8U,1);
IplImage* temp2 = cvCreateImage(cvGetSize(src),IPL_DEPTH_16S,1);
cvCvtColor(src,temp1,CV_RGB2GRAY);
cvSobel(temp1,temp2,0,1,3);
cvConvertScale(temp2,dst_y,1.0,0);
cvSobel(temp1,temp2,1,0,3);
cvConvertScale(temp2,dst_x,1.0,0);
//k nen dao ham cung luc theo x va y ma nen dao ham rieng roi dung ham cvAdd.
//cvSobel(temp1,temp2,1,1,3);
//cvConvertScale(temp2,dst,1.0,0);
cvAdd(dst_x,dst_y,dst,NULL);
cvNamedWindow("src",1);
cvNamedWindow("dst",1);
cvNamedWindow("dst_x",1);
cvNamedWindow("dst_y",1);
cvShowImage("src",src);
cvShowImage("dst",dst);
cvShowImage("dst_x",dst_x);
cvShowImage("dst_y",dst_y);
cvWaitKey(0);
cvReleaseImage(&src);
cvReleaseImage(&dst);
cvReleaseImage(&temp1);
cvReleaseImage(&temp2);
cvDestroyAllWindows();
return 0;
}

How to obtain a contour from points with OpenCV

I'm trying to obtain a ROI from an image using VC++ and OpenCV.
I managed to display an image, get the coordinates of a point when I click on it, store these coordinates in a vector and draw lines between these points on my image.
Here is my code:
//Includes
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <stdio.h>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace std;
using namespace cv;
static int app;
static vector<Point2f> cont(6);
static Mat img = imread("C:/img.jpg",0);
void on_mouse(int, int, int, int, void* );
int main()
{
app = 0;
namedWindow("myWindow", CV_WINDOW_AUTOSIZE);
cvSetMouseCallback("myWindow", on_mouse, 0);
imshow("myWindow", img);
waitKey(0);
}
void on_mouse(int evt, int x, int y, int flags, void* param)
{
if(evt == CV_EVENT_LBUTTONDOWN)
{
Point pt(x,y);
if(app<6)
{
cont[app]=pt;
app++;
}
cout<<"Coordonnees du point pt : "<<x<<","<<y<<endl;
for (int i=0; i<6;i++)
{cout<<cont[i]<<endl;}
}
if(evt == CV_EVENT_RBUTTONDOWN)
{
for (int j=0;j<5;j++)
{
line(img,cont[(j)],cont[(j+1)],CV_RGB(255,0,0),2);
}
line(img,cont[(5)],cont[(0)],CV_RGB(255,0,0),2);
imshow("myWindow", img);
}
}
What I would like to obtain is a vector that contains the coordinates of all the points of the contour and ultimately a bianary matrix the size of my image that contains 0 if the pixel is not in the contour, else 1.
Thanks for your help.
Make single element vector< vector< Point> > and then use drawContours with CV_FILLED. Then you will have binary matrix you wanted.
I currently don't have IDE but code will be like following
vector< vector< Point> > contours;
contours.push_back(cont);//your cont
Mat output(img.rows,img.cols,CV_8UC1);//your img
drawContours(output, contours, 0, Scalar(1), CV_FILLED);//now you have binary image

Resources