I'm writing a function with some lines to convert from a 2d STL vector to OpenCV Mat. Since, OpenCV supports Mat initialization from vector with Mat(vector). But this time, I try a 2D vector and not successful.
the function is simple like:
template <class NumType>
Mat Vect2Mat(vector<vector<NumType>> vect)
{
Mat mtx = Mat(vect.size(), vect[0].size(), CV_64F, 0); // don't need to init??
//Mat mtx;
// copy data
for (int i=0; i<vect.size(); i++)
for (int j=0; j<vect[i].size(); j++)
{
mtx.at<NumType>(i,j) = vect[i][j];
//cout << vect[i][j] << " ";
}
return mtx;
}
So is there a way to initalize Mat mtx accordingly with NumType?? the syntax is always fixed with CV_32F, CV_64F, .... and therefore, very restricted
Thank you!
I think I've found the answer which is given from OpenCV documentation. They call the technique "Class Trait" by the use of DataType class.
it's like:
Mat mtx = Mat::zeros(vect.size(), vect[0].size(), DataType<NumType>::type);
For example:
template <class NumType>
cv::Mat Vect2Mat(std::vector<std::vector<NumType>> vect)
{
cv::Mat mtx = cv::Mat::zeros(vect.size(), vect[0].size(), cv::DataType<NumType>::type);
//Mat mtx;
// copy data
for (int i=0; i<vect.size(); i++)
for (int j=0; j<vect[i].size(); j++)
{
mtx.at<NumType>(i,j) = vect[i][j];
//cout << vect[i][j] << " ";
}
return mtx;
}
Related
I am trying to build a test project to compare the openCv solvePnP implementation with the openGv one.
the opencv is detailed here:
https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#solvepnp
and the openGv here:
https://laurentkneip.github.io/opengv/page_how_to_use.html
Using the opencv example code, I am finding a chessboard in an image, and constructing the matching 3d points. i run the cv pnp, then set up the Gv solver. the cv pnp runs fine, and prints the values:
//rotation
-0.003040771263293328, 0.9797142824436152, -0.2003763421317906;
0.0623096853748876, 0.2001735322445355, 0.977777101438374]
//translation
[-12.06549797067309;
-9.533070368412945;
37.6825295047483]
I test by reprojecting the 3d points, and it looks good.
The Gv Pnp, however, prints nan for all values. i have tried to follow the example code, but I must be making a mistake somewhere. The code is:
int main(int argc, char **argv) {
cv::Mat matImg = cv::imread("chess.jpg");
cv::Size boardSize(8, 6);
//Construct the chessboard model
double squareSize = 2.80;
std::vector<cv::Point3f> objectPoints;
for (int i = 0; i < boardSize.height; i++) {
for (int j = 0; j < boardSize.width; j++) {
objectPoints.push_back(
cv::Point3f(double(j * squareSize), float(i * squareSize), 0));
}
}
cv::Mat rvec, tvec;
cv::Mat cameraMatrix, distCoeffs;
cv::FileStorage fs("CalibrationData.xml", cv::FileStorage::READ);
fs["cameraMatrix"] >> cameraMatrix;
fs["dist_coeffs"] >> distCoeffs;
//Found chessboard corners
std::vector<cv::Point2f> imagePoints;
bool found = cv::findChessboardCorners(matImg, boardSize, imagePoints, cv::CALIB_CB_FAST_CHECK);
if (found) {
cv::drawChessboardCorners(matImg, boardSize, cv::Mat(imagePoints), found);
//SolvePnP
cv::solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec);
drawAxis(matImg, cameraMatrix, distCoeffs, rvec, tvec, squareSize);
}
//cv to matrix
cv::Mat R;
cv::Rodrigues(rvec, R);
std::cout << "results from cv:" << R << tvec << std::endl;
//START OPEN GV
//vars
bearingVectors_t bearingVectors;
points_t points;
rotation_t rotation;
//add points to the gv type
for (int i = 0; i < objectPoints.size(); ++i)
{
point_t pnt;
pnt.x() = objectPoints[i].x;
pnt.y() = objectPoints[i].y;
pnt.z() = objectPoints[i].z;
points.push_back(pnt);
}
/*
K is the common 3x3 camera matrix that you can compose with cx, cy, fx, and fy.
You put the image point into homogeneous form (append a 1),
multiply it with the inverse of K from the left, which gives you a normalized image point (a spatial direction vector).
You normalize that to norm 1.
*/
//to homogeneous
std::vector<cv::Point3f> imagePointsH;
convertPointsToHomogeneous(imagePoints, imagePointsH);
//multiply by K.Inv
for (int i = 0; i < imagePointsH.size(); i++)
{
cv::Point3f pt = imagePointsH[i];
cv::Mat ptMat(3, 1, cameraMatrix.type());
ptMat.at<double>(0, 0) = pt.x;
ptMat.at<double>(1, 0) = pt.y;
ptMat.at<double>(2, 0) = pt.z;
cv::Mat dstMat = cameraMatrix.inv() * ptMat;
//store as bearing vector
bearingVector_t bvec;
bvec.x() = dstMat.at<double>(0, 0);
bvec.y() = dstMat.at<double>(1, 0);
bvec.z() = dstMat.at<double>(2, 0);
bvec.normalize();
bearingVectors.push_back(bvec);
}
//create a central absolute adapter
absolute_pose::CentralAbsoluteAdapter adapter(
bearingVectors,
points,
rotation);
size_t iterations = 50;
std::cout << "running epnp (all correspondences)" << std::endl;
transformation_t epnp_transformation;
for (size_t i = 0; i < iterations; i++)
epnp_transformation = absolute_pose::epnp(adapter);
std::cout << "results from epnp algorithm:" << std::endl;
std::cout << epnp_transformation << std::endl << std::endl;
return 0;
}
Where am i going wrong in setting up the openGv Pnp solver?
Years later, i had this same issue, and solved it. To convert openCv to openGV bearing vectors, you can do this:
bearingVectors_t bearingVectors;
std::vector<cv::Point2f> dd2;
const int N1 = static_cast<int>(dd2.size());
cv::Mat points1_mat = cv::Mat(dd2).reshape(1);
// first rectify points and construct homogeneous points
// construct homogeneous points
cv::Mat ones_col1 = cv::Mat::ones(N1, 1, CV_32F);
cv::hconcat(points1_mat, ones_col1, points1_mat);
// undistort points
cv::Mat points1_rect = points1_mat * cameraMatrix.inv();
// compute bearings
points2bearings3(points1_rect, &bearingVectors);
using this function for the final conversion:
// Convert a set of points to bearing
// points Matrix of size Nx3 with the set of points.
// bearings Vector of bearings.
void points2bearings3(const cv::Mat& points,
opengv::bearingVectors_t* bearings) {
double l;
cv::Vec3f p;
opengv::bearingVector_t bearing;
for (int i = 0; i < points.rows; ++i) {
p = cv::Vec3f(points.row(i));
l = std::sqrt(p[0] * p[0] + p[1] * p[1] + p[2] * p[2]);
for (int j = 0; j < 3; ++j) bearing[j] = p[j] / l;
bearings->push_back(bearing);
}
}
I have an image data set that I would like to partition into k clusters. I am trying to use the opencv implementation of k-means clustering.
Firstly, I store my Mat images into a vector of Mat and then I am trying to use the kmeans function. However, I am getting an assertion error.
Should the images be stored into a different kind of structure? I have read the k-means documentation and I dont seem to understand what I am doing wrong. This is my code:
Thank you in advance,
vector <Mat> images;
string folder = "D:\\football\\positive_clustering\\";
string mask = "*.bmp";
vector<string> files = getFileList(folder + mask);
for (int i = 0; i < files.size(); i++)
{
Mat img = imread(folder + files[i]);
images.push_back(img);
}
cout << "Vector of positive samples created" << endl;
int k = 10;
cv::Mat bestLabels;
cv::kmeans(images, k, bestLabels, TermCriteria(), 3, KMEANS_PP_CENTERS);
//have a look
vector<cv::Mat> clusterViz(bestLabels.rows);
for (int i = 0; i<bestLabels.rows; i++)
{
clusterViz[bestLabels.at<int>(i)].push_back(cv::Mat(images[bestLabels.at<int>(i)]));
}
namedWindow("clusters", WINDOW_NORMAL);
for (int i = 0; i<clusterViz.size(); i++)
{
cv::imshow("clusters", clusterViz[i]);
cv::waitKey();
}
Hello and thanks for your help.
I would like to test the use of shapes for matching in OpenCV and managed to do the matching part.
To locate the rotated shape, i tought the AffineTransformer Class would be the right choice. As I don't know how the matching would work internally, it would be nice if someone has a link where the proceedings are described.
As shawshank mentioned my following code throw an Assertion failed-error because the variable matches is empty when passed to estimateTransformation function. Does anybody know how to use this function in the right way -respectively what it really does?
#include<opencv2/opencv.hpp>
#include<algorithm>
#include<iostream>
#include<string>
#include<opencv2/highgui/highgui.hpp>
using namespace std;
using namespace cv;
bool rotateImage(Mat src, Mat &dst, double angle)
{
// get rotation matrix for rotating the image around its center
cv::Point2f center(src.cols/2.0, src.rows/2.0);
cv::Mat rot = cv::getRotationMatrix2D(center, angle, 1.0);
// determine bounding rectangle
cv::Rect bbox = cv::RotatedRect(center,src.size(), angle).boundingRect();
// adjust transformation matrix
rot.at<double>(0,2) += bbox.width/2.0 - center.x;
rot.at<double>(1,2) += bbox.height/2.0 - center.y;
cv::warpAffine(src, dst, rot, bbox.size());
return 1;
}
static vector<Point> sampleContour( const Mat& image, int n=300 )
{
vector<vector<Point>> contours;
vector<Point> all_points;
findContours(image, contours, cv::RETR_LIST, cv::CHAIN_APPROX_NONE);
for (size_t i=0; i <contours.size(); i++)
{
for (size_t j=0; j<contours[i].size(); j++)
{
all_points.push_back(contours[i][j]);
}
}
int dummy=0;
for (int add=(int)all_points.size(); add<n; add++)
{
all_points.push_back(all_points[dummy++]);
}
// shuffel
random_shuffle(all_points.begin(), all_points.end());
vector<Point> sampled;
for (int i=0; i<n; i++)
{
sampled.push_back(all_points[i]);
}
return sampled;
}
int main(void)
{
Mat img1, img2;
vector<Point> img1Points, img2Points;
float distSC, distHD;
// read images
string img1Path = "testimage.jpg";
img1 = imread(img1Path, IMREAD_GRAYSCALE);
rotateImage(img1, img2, 45);
imshow("original", img1);
imshow("transformed", img2);
waitKey();
// Contours
img1Points = sampleContour(img1);
img2Points = sampleContour(img2);
//Calculate Distances
Ptr<ShapeContextDistanceExtractor> mysc = createShapeContextDistanceExtractor();
Ptr<HausdorffDistanceExtractor> myhd = createHausdorffDistanceExtractor();
distSC = mysc->computeDistance( img1Points, img2Points );
distHD = myhd -> computeDistance( img1Points, img2Points );
cout << distSC << endl << distHD << endl;
vector<DMatch> matches;
Ptr<AffineTransformer> transformerHD = createAffineTransformer(0);
transformerHD -> estimateTransformation(img1Points, img2Points, matches);
return 0;
}
I have used AffineTransformer class on a 2D image. Below is the basic code which will give you an idea of what it does.
// My OpenCv AffineTransformer demo code
// I have tested this on a 500 x 500 resolution image
#include <iostream>
#include "opencv2/opencv.hpp"
#include <vector>
using namespace cv;
using namespace std;
int arrSize = 10;
int sourcePx[]={154,155,159,167,182,209,238,265,295,316};
int sourcePy[]={190,222,252,285,314,338,344,340,321,290};
int tgtPx[]={120,127,137,150,188,230,258,285,305,313};
int tgtPy[]={207,245,275,305,336,345,342,332,305,274};
int main()
{
// Prepare 'vector of points' from above hardcoded points
int sInd=0, eInd=arrSize;
vector<Point2f> sourceP; for(int i=sInd; i<eInd; i++) sourceP.push_back(Point2f(sourcePx[i], sourcePy[i]));
vector<Point2f> tgtP; for(int i=sInd; i<eInd; i++) tgtP.push_back(Point2f(tgtPx[i], tgtPy[i]));
// Create object of AffineTransformer
bool fullAffine = true; // change its value and see difference in result
auto aft = cv::createAffineTransformer(fullAffine);
// Prepare vector<cv::DMatch> - this is just mapping of corresponding points indices
std::vector<cv::DMatch> matches;
for(int i=0; i<sourceP.size(); ++i) matches.push_back(cv::DMatch(i, i, 0));
// Read image
Mat srcImg = imread("image1.jpg");
Mat tgtImg;
// estimate points transformation
aft->estimateTransformation(sourceP, tgtP, matches);
// apply transformation
aft->applyTransformation(sourceP, tgtP);
// warp image
aft->warpImage(srcImg, tgtImg);
// show generated output
imshow("warped output", tgtImg);
waitKey(0);
return 0;
}
I have this simple question.I want to create a normal 2D Matrix to use as bin to put integers, and increment some elements, but it doesn't work,why?it just prints some unknown symboes.
here is my code
Mat img = imread("img.jpg", 0);
Mat bin = Mat::zeros(img.size(),CV_8U);//also tried 8UC1
for (size_t i = 0; i < 100; i++)
{
bin.at<uchar>(i, 50) = 200;
cout << bin.at<uchar>(i, 50) << endl;
//(bin.at<uchar>(i,50))++;//if above statement works then I will use this incrementer
}
I have a scanner which is big enough to scan multiple pictures at once.
Unfortunatelly, all the pictures are stored in one jpg file, separated only by
white borders. Is there any way to automatically find the sub images and store them
in separate files? I was thinking about using OpenCV to get the job done, but
I can't find the right functions. Does anybody know which OpenCV function would work, or if there is any other approach (using linux)?
Thanks,
Konstantin
My quick and dirty solution that worked with my images looks like this. I hope people with similar problem can use this as a starting point on how to use OpenCV.
// g++ `pkg-config --cflags --libs opencv` parse.cp
// include standard OpenCV headers, same as before
#include <cv.h>
#include <highgui.h>
#include <stdio.h>
// all the new API is put into "cv" namespace. Export its content
using namespace cv;
using namespace std;
int main( int argc, char** argv )
{
string imagename = argc > 1 ? argv[1] : "lena.jpg";
// the newer cvLoadImage alternative with MATLAB-style name
Mat imgf = imread("original/"+imagename);
if( !imgf.data ) // check if the image has been loaded properly
return -1;
int border = 1000;
Mat img(imgf.rows+2*border,imgf.cols+2*border,CV_8UC3,Scalar(255,255,255));
for (int i=0; i<imgf.cols; ++i) {
for (int j=0; j<imgf.rows; ++j) {
img.at<Vec3b>(j+border,i+border) = imgf.at<Vec3b>(j,i);
}
}
cout << "created border\n";
Mat mask;
img.copyTo(mask);
Scalar diff(2,2,2);
floodFill(mask, Point(0,0), Scalar(0,0,255), NULL, diff, diff);
cout << "flood filled\n";
imwrite("flood.png",mask);
for (int i=0; i<mask.cols; ++i) {
for (int j=0; j<mask.rows; ++j) {
if(mask.at<Vec3b>(j,i) != Vec3b(0,0,255)) {
mask.at<Vec3b>(j,i) = Vec3b(0,0,0);
} else {
mask.at<Vec3b>(j,i) = Vec3b(255,255,255);
}
}
}
cvtColor( mask, mask, CV_RGB2GRAY );
cout << "mask created\n";
imwrite("binary.png",mask);
Mat sobelX;
Mat sobelY;
Mat sobel;
Sobel(mask,sobelX,CV_16S,1,0);
Sobel(mask,sobelY,CV_16S,0,1);
sobel = abs(sobelX)+abs(sobelY);
for (int i=0; i<mask.cols; ++i) {
for (int j=0; j<mask.rows; ++j) {
mask.at<char>(j,i) = abs(sobelX.at<short>(j,i))+abs(sobelY.at<short>(j,i));
}
}
threshold(mask, mask, 127, 255, THRESH_BINARY);
cout << "sobel done\n";
imwrite("sobel.png",mask);
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours(mask, contours, hierarchy,
CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
imwrite("contours.png",mask);
cout << "contours done\n";
// iterate through all the top-level contours
int idx = 0;
for( ; idx >= 0; idx = hierarchy[idx][0] )
{
RotatedRect box = minAreaRect(contours[idx]);
if(box.size.width > 100 && box.size.height > 100) {
Mat rot = getRotationMatrix2D(box.center,box.angle,1.0);
Mat rotimg;
warpAffine(img,rotimg,rot,Size(img.cols,img.rows));
imwrite("rotimg.png",rotimg);
Mat subimg(box.size.width,box.size.height,CV_8UC3);
getRectSubPix(rotimg,box.size,box.center,subimg);
stringstream name;
name << "subimg_"<< imagename << "_" << idx << ".png";
cout << name.str() << "\n";
imwrite(name.str(),subimg);
}
}
imwrite("img.png",img);
imwrite("mask.png",mask);
cout << "Done\n";
return 0;
}