I have RGB data as rs2::frame, I converted it to cv::Mat and send via TCP connection, on the server (receiver) side I am storing buffer into a cv::Mat. My question is How can I convert cv::Mat to rs2::frame on the receiver side, so I can use SDK functions that support rs2::frame type?
You need to simulate a software device in order to have a rs2::frame.
Following this example you can write your own class that creates the synthetic streams, taking data from the cv::Mat instances.
For example, here's something I have done to solve the problem.
rsImageConverter.h
#include <librealsense2/rs.hpp>
#include <librealsense2/hpp/rs_internal.hpp>
class rsImageConverter
{
public:
rsImageConverter(int w, int h, int bpp);
bool convertFrame(uint8_t* depth_data, uint8_t* color_data);
rs2::frame getDepth() const;
rs2::frame getColor() const;
private:
int w = 640;
int h = 480;
int bpp = 2;
rs2::software_device dev;
rs2::software_sensor depth_sensor;
rs2::software_sensor color_sensor;
rs2::stream_profile depth_stream;
rs2::stream_profile color_stream;
rs2::syncer syncer;
rs2::frame depth;
rs2::frame color;
int ind = 0;
};
rsImageConverter.cpp
#include "rsimageconverter.h"
rsImageConverter::rsImageConverter(int w, int h, int bpp) :
w(w),
h(h),
bpp(bpp),
depth_sensor(dev.add_sensor("Depth")), // initializing depth sensor
color_sensor(dev.add_sensor("Color")) // initializing color sensor
{
rs2_intrinsics depth_intrinsics{ w, h, (float)(w / 2), (float)(h / 2), (float) w , (float) h , RS2_DISTORTION_BROWN_CONRADY ,{ 0,0,0,0,0 } };
depth_stream = depth_sensor.add_video_stream({ RS2_STREAM_DEPTH, 0, 0,
w, h, 60, bpp,
RS2_FORMAT_Z16, depth_intrinsics });
depth_sensor.add_read_only_option(RS2_OPTION_DEPTH_UNITS, 0.001f); // setting depth units option to the virtual sensor
rs2_intrinsics color_intrinsics = { w, h,
(float)w / 2, (float)h / 2,
(float)w / 2, (float)h / 2,
RS2_DISTORTION_BROWN_CONRADY ,{ 0,0,0,0,0 } };
color_stream = color_sensor.add_video_stream({ RS2_STREAM_COLOR, 0, 1, w,
h, 60, bpp,
RS2_FORMAT_RGB8, color_intrinsics });
dev.create_matcher(RS2_MATCHER_DLR_C); // create the matcher with the RGB frame
depth_sensor.open(depth_stream);
color_sensor.open(color_stream);
depth_sensor.start(syncer);
color_sensor.start(syncer);
depth_stream.register_extrinsics_to(color_stream, { { 1,0,0,0,1,0,0,0,1 },{ 0,0,0 } });
}
bool rsImageConverter::convertFrame(uint8_t* depth_data, uint8_t* color_data)
{
depth_sensor.on_video_frame({ depth_data, // Frame pixels
[](void*) {}, // Custom deleter (if required)
w*bpp, bpp, // Stride and Bytes-per-pixel
(rs2_time_t)ind * 16, RS2_TIMESTAMP_DOMAIN_HARDWARE_CLOCK, ind, // Timestamp, Frame# for potential sync services
depth_stream });
color_sensor.on_video_frame({ color_data, // Frame pixels from capture API
[](void*) {}, // Custom deleter (if required)
w*bpp, bpp, // Stride and Bytes-per-pixel
(rs2_time_t)ind * 16, RS2_TIMESTAMP_DOMAIN_HARDWARE_CLOCK, ind, // Timestamp, Frame# for potential sync services
color_stream });
ind++;
rs2::frameset fset = syncer.wait_for_frames();
depth = fset.first_or_default(RS2_STREAM_DEPTH);
color = fset.first_or_default(RS2_STREAM_COLOR);
return (depth && color); // return true if everything went good
}
rs2::frame rsImageConverter::getDepth() const
{
return depth;
}
rs2::frame rsImageConverter::getColor() const
{
return color;
}
And then you can use it like this (assuming depth and rgb as two cv::Mat, where depth is converted in CV_16U and rgb is CV_8UC3 with a conversion from BGR to RGB):
rsImageConverter* converter = new rsImageConverter(640, 480, 2);
...
if(converter->convertFrame(depth.data, rgb.data))
{
rs2::frame rs2depth = converter->getDepth();
rs2::frame rs2rgb = converter->getColor();
... // Here you use these frames
}
By the way, I designed this class with the use of both depth and RGB. To convert only one of these you can simply pass an empty frame to the other argument, or change the class.
I'm using KNN to classify images. Now my problem is how to draw the results.
Click here to get the documentation for KNN in OpenCV
I'm using the function find_nearest, which constructor looks like this:
C++: float CvKNearest::find_nearest(const Mat& samples, int k, Mat& results, Mat& neighborResponses, Mat& dists)
Where the parameters are:
samples : Input samples stored by rows. It is a single-precision floating-point matrix of number\_of\_samples \times number\_of\_features size.
k : Number of used nearest neighbors. It must satisfy constraint: k \le CvKNearest::get_max_k().
results : Vector with results of prediction (regression or classification) for each input sample. It is a single-precision floating-point vector with number_of_samples elements.
neighbors : Optional output pointers to the neighbor vectors themselves. It is an array of k*samples->rows pointers.
neighborResponses : Optional output values for corresponding neighbors. It is a single-precision floating-point matrix of number\_of\_samples \times k size.
dist : Optional output distances from the input vectors to the corresponding neighbors. It is a single-precision floating-point matrix of number\_of\_samples \times k size.
A posible implementation would look like this:
#include "ml.h"
#include "highgui.h"
int main( int argc, char** argv )
{
const int K = 10;
int i, j, k, accuracy;
float response;
int train_sample_count = 100;
CvRNG rng_state = cvRNG(-1);
CvMat* trainData = cvCreateMat( train_sample_count, 2, CV_32FC1 );
CvMat* trainClasses = cvCreateMat( train_sample_count, 1, CV_32FC1 );
IplImage* img = cvCreateImage( cvSize( 500, 500 ), 8, 3 );
float _sample[2];
CvMat sample = cvMat( 1, 2, CV_32FC1, _sample );
cvZero( img );
CvMat trainData1, trainData2, trainClasses1, trainClasses2;
// form the training samples
cvGetRows( trainData, &trainData1, 0, train_sample_count/2 );
cvRandArr( &rng_state, &trainData1, CV_RAND_NORMAL, cvScalar(200,200), cvScalar(50,50) );
cvGetRows( trainData, &trainData2, train_sample_count/2, train_sample_count );
cvRandArr( &rng_state, &trainData2, CV_RAND_NORMAL, cvScalar(300,300), cvScalar(50,50) );
cvGetRows( trainClasses, &trainClasses1, 0, train_sample_count/2 );
cvSet( &trainClasses1, cvScalar(1) );
cvGetRows( trainClasses, &trainClasses2, train_sample_count/2, train_sample_count );
cvSet( &trainClasses2, cvScalar(2) );
// learn classifier
CvKNearest knn( trainData, trainClasses, 0, false, K );
CvMat* nearests = cvCreateMat( 1, K, CV_32FC1);
for( i = 0; i < img->height; i++ )
{
for( j = 0; j < img->width; j++ )
{
sample.data.fl[0] = (float)j;
sample.data.fl[1] = (float)i;
// estimate the response and get the neighbors' labels
response = knn.find_nearest(&sample,K,0,0,nearests,0);
// compute the number of neighbors representing the majority
for( k = 0, accuracy = 0; k < K; k++ )
{
if( nearests->data.fl[k] == response)
accuracy++;
}
}
}
Now back to the problem. I want to use the function DrawMatches. Click here to see the description. This function expects its input as DMatch-Type matrix. So as you see Knn.find_nearest does not give me any return of this type. Do you have any suggestion how to convert those?
Thanks in advance!
I am new to OpenCV and want to develop a program which takes the camera input and compares it with a known image of an object which would be input to it as a .jpg image and if the input of the Webcam matches with the fed in image upto a certain level of accuracy, then some message etc should be displayed that the required object has been found.
Eg: If I get a Computer Cable before the webcam, it needs to be detected and compared to the image of the Computer cable I have fed into the program.
I've tried many techniques and find Template matching to be effective as mentioned in the foll0wing link---
Real-time template matching - OpenCV, C++
However after drawing the rectangle and getting the roiImage..I want to compare its likeliness with a known image on my disk(in the opencv working directory). For this I am trying to convert the roiImg and my other images in HSV format and get 4 values according to the Algorithms.
I have tried to combine the 2 codes but it doesn;t seem to work as roiImg is being made at runtime and is not being able to compare with the other 2 Images using imread.
#include <iostream>
#include "opencv2/opencv.hpp"
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include <sstream>
using namespace cv;
using namespace std;
Point point1, point2; /* vertical points of the bounding box */
int drag = 0;
Rect rect; /* bounding box */
Mat img, roiImg; /* roiImg - the part of the image in the bounding box */
int select_flag = 0;
bool go_fast = false;
Mat mytemplate;
Mat src_base, hsv_base;
Mat src_test1, hsv_test1;
Mat src_test2, hsv_test2;
Mat hsv_half_down;
///------- template matching -----------------------------------------------------------------------------------------------
Mat TplMatch( Mat &img, Mat &mytemplate )
{
Mat result;
matchTemplate( img, mytemplate, result, CV_TM_SQDIFF_NORMED );
normalize( result, result, 0, 1, NORM_MINMAX, -1, Mat() );
return result;
}
///------- Localizing the best match with minMaxLoc ------------------------------------------------------------------------
Point minmax( Mat &result )
{
double minVal, maxVal;
Point minLoc, maxLoc, matchLoc;
minMaxLoc( result, &minVal, &maxVal, &minLoc, &maxLoc, Mat() );
matchLoc = minLoc;
return matchLoc;
}
///------- tracking --------------------------------------------------------------------------------------------------------
void track()
{
if (select_flag)
{
//roiImg.copyTo(mytemplate);
// select_flag = false;
go_fast = true;
}
// imshow( "mytemplate", mytemplate ); waitKey(0);
Mat result = TplMatch( img, mytemplate );
Point match = minmax( result );
rectangle( img, match, Point( match.x + mytemplate.cols , match.y + mytemplate.rows ), CV_RGB(255, 255, 255), 0.5 );
std::cout << "match: " << match << endl;
/// latest match is the new template
Rect ROI = cv::Rect( match.x, match.y, mytemplate.cols, mytemplate.rows );
roiImg = img( ROI );
roiImg.copyTo(mytemplate);
imshow( "roiImg", roiImg ); //waitKey(0);
//Compare the roiImg with a know image to calculate resemblence
/*Method Base - Base Base - Half Base - Test 1 Base - Test 2
Correlation 1.000000 0.930766 0.182073 0.120447
Chi-square 0.000000 4.940466 21.184536 49.273437
Intersection 24.391548 14.959809 3.889029 5.775088
Bhattacharyya 0.000000 0.222609 0.646576 0.801869
For the Correlation and Intersection methods, the higher the metric, the more accurate the match. As we can see,
the match base-base is the highest of all as expected. Also we can observe that the match base-half is the second best match (as we predicted).
For the other two metrics, the less the result, the better the match. We can observe that the matches between the test 1 and test 2 with respect
to the base are worse, which again, was expected.)*/
src_base = imread("roiImg");
src_test1 = imread("Samarth.jpg");
src_test2 = imread("Samarth2.jpg");
//double l2_norm = cvNorm( src_base, src_test1 );
/// Convert to HSV
cvtColor( src_base, hsv_base, COLOR_BGR2HSV );
cvtColor( src_test1, hsv_test1, COLOR_BGR2HSV );
cvtColor( src_test2, hsv_test2, COLOR_BGR2HSV );
hsv_half_down = hsv_base( Range( hsv_base.rows/2, hsv_base.rows - 1 ), Range( 0, hsv_base.cols - 1 ) );
/// Using 50 bins for hue and 60 for saturation
int h_bins = 50; int s_bins = 60;
int histSize[] = { h_bins, s_bins };
// hue varies from 0 to 179, saturation from 0 to 255
float h_ranges[] = { 0, 180 };
float s_ranges[] = { 0, 256 };
const float* ranges[] = { h_ranges, s_ranges };
// Use the o-th and 1-st channels
int channels[] = { 0, 1 };
/// Histograms
MatND hist_base;
MatND hist_half_down;
MatND hist_test1;
MatND hist_test2;
/// Calculate the histograms for the HSV images
calcHist( &hsv_base, 1, channels, Mat(), hist_base, 2, histSize, ranges, true, false );
normalize( hist_base, hist_base, 0, 1, NORM_MINMAX, -1, Mat() );
calcHist( &hsv_half_down, 1, channels, Mat(), hist_half_down, 2, histSize, ranges, true, false );
normalize( hist_half_down, hist_half_down, 0, 1, NORM_MINMAX, -1, Mat() );
calcHist( &hsv_test1, 1, channels, Mat(), hist_test1, 2, histSize, ranges, true, false );
normalize( hist_test1, hist_test1, 0, 1, NORM_MINMAX, -1, Mat() );
calcHist( &hsv_test2, 1, channels, Mat(), hist_test2, 2, histSize, ranges, true, false );
normalize( hist_test2, hist_test2, 0, 1, NORM_MINMAX, -1, Mat() );
/// Apply the histogram comparison methods
for( int i = 0; i < 4; i++ )
{
int compare_method = i;
double base_base = compareHist( hist_base, hist_base, compare_method );
double base_half = compareHist( hist_base, hist_half_down, compare_method );
double base_test1 = compareHist( hist_base, hist_test1, compare_method );
double base_test2 = compareHist( hist_base, hist_test2, compare_method );
printf( " Method [%d] Perfect, Base-Half, Base-Test(1), Base-Test(2) : %f, %f, %f, %f \n", i, base_base, base_half , base_test1, base_test2 );
}
printf( "Done \n" );
}
///------- MouseCallback function ------------------------------------------------------------------------------------------
void mouseHandler(int event, int x, int y, int flags, void *param)
{
if (event == CV_EVENT_LBUTTONDOWN && !drag)
{
/// left button clicked. ROI selection begins
point1 = Point(x, y);
drag = 1;
}
if (event == CV_EVENT_MOUSEMOVE && drag)
{
/// mouse dragged. ROI being selected
Mat img1 = img.clone();
point2 = Point(x, y);
rectangle(img1, point1, point2, CV_RGB(255, 0, 0), 3, 8, 0);
imshow("image", img1);
}
if (event == CV_EVENT_LBUTTONUP && drag)
{
point2 = Point(x, y);
rect = Rect(point1.x, point1.y, x - point1.x, y - point1.y);
drag = 0;
roiImg = img(rect);
roiImg.copyTo(mytemplate);
// imshow("MOUSE roiImg", roiImg); waitKey(0);
}
if (event == CV_EVENT_LBUTTONUP)
{
/// ROI selected
select_flag = 1;
drag = 0;
}
}
///------- Main() ----------------------------------------------------------------------------------------------------------
int main()
{
int k;
///open webcam
VideoCapture cap(0);
if (!cap.isOpened())
return 1;
/* ///open video file
VideoCapture cap;
cap.open( "Wildlife.wmv" );
if ( !cap.isOpened() )
{ cout << "Unable to open video file" << endl; return -1; }*/
/*
/// Set video to 320x240
cap.set(CV_CAP_PROP_FRAME_WIDTH, 320);
cap.set(CV_CAP_PROP_FRAME_HEIGHT, 240);*/
cap >> img;
GaussianBlur( img, img, Size(7,7), 3.0 );
imshow( "image", img );
while (1)
{
cap >> img;
if ( img.empty() )
break;
// Flip the frame horizontally and add blur
cv::flip( img, img, 1 );
GaussianBlur( img, img, Size(7,7), 3.0 );
if ( rect.width == 0 && rect.height == 0 )
cvSetMouseCallback( "image", mouseHandler, NULL );
else
track();
imshow("image", img);
// waitKey(100); k = waitKey(75);
k = waitKey(go_fast ? 30 : 10000);
if (k == 27)
break;
}
return 0;
}
if you want to detect a object in live feed , detecting the object in each frame is not efficient .. for the first time you have to detect after you have to track the object.
so this process involving both detection and tracking..
for detection you have to segment the object from the rest, opencv provides many algorithms for segmenting an object from background based on colors color based detection.other than color you can use the objects's shape to segment the object from backgroundshape based segmentation.
you can use lk optical flow algorithm as a starting to tracking.
additionally, you can use template matching or camshift or medial flow tracker.. etc to obtain quick results.all the above algorithm will be useful based on scale change of the object and lighting change of the feed. opencv has sample programs to the above algorithms.
Given a set of 2D points, how can I apply the opposite of undistortPoints?
I have the camera intrinsics and distCoeffs and would like to (for example) create a square, and distort it as if the camera had viewed it through the lens.
I have found a 'distort' patch here : http://code.opencv.org/issues/1387 but it would seem this is only good for images, I want to work on sparse points.
This question is rather old but since I ended up here from a google search without seeing a neat answer I decided to answer it anyway.
There is a function called projectPoints that does exactly this. The C version is used internally by OpenCV when estimating camera parameters with functions like calibrateCamera and stereoCalibrate
EDIT:
To use 2D points as input, we can set all z-coordinates to 1 with convertPointsToHomogeneous and use projectPoints with no rotation and no translation.
cv::Mat points2d = ...;
cv::Mat points3d;
cv::Mat distorted_points2d;
convertPointsToHomogeneous(points2d, points3d);
projectPoints(points3d, cv::Vec3f(0,0,0), cv::Vec3f(0,0,0), camera_matrix, dist_coeffs, distorted_points2d);
A simple solution is to use initUndistortRectifyMap to obtain a map from undistorted coordinates to distorted ones:
cv::Mat K = ...; // 3x3 intrinsic parameters
cv::Mat D = ...; // 4x1 or similar distortion parameters
int W = 640; // image width
int H = 480; // image height
cv::Mat mapx, mapy;
cv::initUndistortRectifyMap(K, D, cv::Mat(), K, cv::Size(W, H),
CV_32F, mapx, mapy);
float distorted_x = mapx.at<float>(y, x);
float distorted_y = mapy.at<float>(y, x);
I edit to clarify the code is correct:
I cite the documentation of initUndistortRectifyMap:
for each pixel (u, v) in the destination (corrected and rectified)
image, the function computes the corresponding coordinates in the
source image (that is, in the original image from camera.
map_x(u,v) = x''f_x + c_x
map_y(u,v) = y''f_y + c_y
undistortPoint is a simple reverse version of project points
In my case I would like to do the following:
Undistort points:
int undisortPoints(const vector<cv::Point2f> &uv, vector<cv::Point2f> &xy, const cv::Mat &M, const cv::Mat &d)
{
cv::undistortPoints(uv, xy, M, d, cv::Mat(), M);
return 0;
}
This will undistort the points to the very similar coordinate to the origin of the image, but without distortion. This is the default behavior for the cv::undistort() function.
Redistort points:
int distortPoints(const vector<cv::Point2f> &xy, vector<cv::Point2f> &uv, const cv::Mat &M, const cv::Mat &d)
{
vector<cv::Point2f> xy2;
vector<cv::Point3f> xyz;
cv::undistortPoints(xy, xy2, M, cv::Mat());
for (cv::Point2f p : xy2)xyz.push_back(cv::Point3f(p.x, p.y, 1));
cv::Mat rvec = cv::Mat::zeros(3, 1, CV_64FC1);
cv::Mat tvec = cv::Mat::zeros(3, 1, CV_64FC1);
cv::projectPoints(xyz, rvec, tvec, M, d, uv);
return 0;
}
The little tricky thing here is to first project the points to the z=1 plane with a linear camera model. After that, you must project them with the original camera model.
I found these useful, I hope it also works for you.
I have had exactly the same need.
Here is a possible solution :
void MyDistortPoints(const std::vector<cv::Point2d> & src, std::vector<cv::Point2d> & dst,
const cv::Mat & cameraMatrix, const cv::Mat & distorsionMatrix)
{
dst.clear();
double fx = cameraMatrix.at<double>(0,0);
double fy = cameraMatrix.at<double>(1,1);
double ux = cameraMatrix.at<double>(0,2);
double uy = cameraMatrix.at<double>(1,2);
double k1 = distorsionMatrix.at<double>(0, 0);
double k2 = distorsionMatrix.at<double>(0, 1);
double p1 = distorsionMatrix.at<double>(0, 2);
double p2 = distorsionMatrix.at<double>(0, 3);
double k3 = distorsionMatrix.at<double>(0, 4);
//BOOST_FOREACH(const cv::Point2d &p, src)
for (unsigned int i = 0; i < src.size(); i++)
{
const cv::Point2d &p = src[i];
double x = p.x;
double y = p.y;
double xCorrected, yCorrected;
//Step 1 : correct distorsion
{
double r2 = x*x + y*y;
//radial distorsion
xCorrected = x * (1. + k1 * r2 + k2 * r2 * r2 + k3 * r2 * r2 * r2);
yCorrected = y * (1. + k1 * r2 + k2 * r2 * r2 + k3 * r2 * r2 * r2);
//tangential distorsion
//The "Learning OpenCV" book is wrong here !!!
//False equations from the "Learning OpenCv" book
//xCorrected = xCorrected + (2. * p1 * y + p2 * (r2 + 2. * x * x));
//yCorrected = yCorrected + (p1 * (r2 + 2. * y * y) + 2. * p2 * x);
//Correct formulae found at : http://www.vision.caltech.edu/bouguetj/calib_doc/htmls/parameters.html
xCorrected = xCorrected + (2. * p1 * x * y + p2 * (r2 + 2. * x * x));
yCorrected = yCorrected + (p1 * (r2 + 2. * y * y) + 2. * p2 * x * y);
}
//Step 2 : ideal coordinates => actual coordinates
{
xCorrected = xCorrected * fx + ux;
yCorrected = yCorrected * fy + uy;
}
dst.push_back(cv::Point2d(xCorrected, yCorrected));
}
}
void MyDistortPoints(const std::vector<cv::Point2d> & src, std::vector<cv::Point2d> & dst,
const cv::Matx33d & cameraMatrix, const cv::Matx<double, 1, 5> & distorsionMatrix)
{
cv::Mat cameraMatrix2(cameraMatrix);
cv::Mat distorsionMatrix2(distorsionMatrix);
return MyDistortPoints(src, dst, cameraMatrix2, distorsionMatrix2);
}
void TestDistort()
{
cv::Matx33d cameraMatrix = 0.;
{
//cameraMatrix Init
double fx = 1000., fy = 950.;
double ux = 324., uy = 249.;
cameraMatrix(0, 0) = fx;
cameraMatrix(1, 1) = fy;
cameraMatrix(0, 2) = ux;
cameraMatrix(1, 2) = uy;
cameraMatrix(2, 2) = 1.;
}
cv::Matx<double, 1, 5> distorsionMatrix;
{
//distorsion Init
const double k1 = 0.5, k2 = -0.5, k3 = 0.000005, p1 = 0.07, p2 = -0.05;
distorsionMatrix(0, 0) = k1;
distorsionMatrix(0, 1) = k2;
distorsionMatrix(0, 2) = p1;
distorsionMatrix(0, 3) = p2;
distorsionMatrix(0, 4) = k3;
}
std::vector<cv::Point2d> distortedPoints;
std::vector<cv::Point2d> undistortedPoints;
std::vector<cv::Point2d> redistortedPoints;
distortedPoints.push_back(cv::Point2d(324., 249.));// equals to optical center
distortedPoints.push_back(cv::Point2d(340., 200));
distortedPoints.push_back(cv::Point2d(785., 345.));
distortedPoints.push_back(cv::Point2d(0., 0.));
cv::undistortPoints(distortedPoints, undistortedPoints, cameraMatrix, distorsionMatrix);
MyDistortPoints(undistortedPoints, redistortedPoints, cameraMatrix, distorsionMatrix);
cv::undistortPoints(redistortedPoints, undistortedPoints, cameraMatrix, distorsionMatrix);
//Poor man's unit test ensuring we have an accuracy that is better than 0.001 pixel
for (unsigned int i = 0; i < undistortedPoints.size(); i++)
{
cv::Point2d dist = redistortedPoints[i] - distortedPoints[i];
double norm = sqrt(dist.dot(dist));
std::cout << "norm = " << norm << std::endl;
assert(norm < 1E-3);
}
}
For those still searching, here is a simple python function that will distort points back:
def distortPoints(undistortedPoints, k, d):
undistorted = np.float32(undistortedPoints[:, np.newaxis, :])
kInv = np.linalg.inv(k)
for i in range(len(undistorted)):
srcv = np.array([undistorted[i][0][0], undistorted[i][0][1], 1])
dstv = kInv.dot(srcv)
undistorted[i][0][0] = dstv[0]
undistorted[i][0][1] = dstv[1]
distorted = cv2.fisheye.distortPoints(undistorted, k, d)
return distorted
Example:
undistorted = np.array([(639.64, 362.09), (234, 567)])
distorted = distortPoints(undistorted, camK, camD)
print(distorted)
This question and it's related questions on SO have been around for nearly a decade, but there still isn't an answer that satisfies the criteria below so I'm proposing a new answer that
uses methods readily available in OpenCV,
works for points, not images, (and also points at subpixel locations),
can be used beyond fisheye distortion models,
does not involve manual interpolation or maps and
can be used in the context of rectification
Preliminaries
It is important to distinquish between ideal coordinates (also called 'normalized' or 'sensor' coordinates) which are the input variables to the distortion model or 'x' and 'y' in the OpenCV docs vs. observed coordinates (also called 'image' coordinates) or 'u' and 'v' in OpenCV docs. Ideal coordinates have been normalized by the intrinsic parameters so that they have been scaled by the focal length and are relative to the image centroid at (cx,cy). This is important to point out because the undistortPoints() method can return either ideal or observed coordinates depending on the input arguments.
undistortPoints() can essentially do any combination of two things: remove distortions and apply a rotational transformation with the output either being in ideal or observed coordinates, depending on if a projection mat (InputArray P) is provided in the input. The input coordinates (InputArray src) for undistortPoints() is always in observed or image coordinates.
At a high level undistortPoints() converts the input coordinates from observed to ideal coordinates and uses an iterative process to remove distortions from the ideal or normalized points. The reason the process is iterative is because the OpenCV distortion model is not easy to invert analytically.
In the example below, we use undistortPoints() twice. First, we apply a reverse rotational transformation to undo image rectification. This step can be skipped if you are not working with rectified images. The output of this first step is in observed coordinates so we use undistortPoints() again to convert these to ideal coordinates. The conversion to ideal coordinates makes setting up the input for projectPoints() easier (which we use to apply the distortions). With the ideal coordinates, we can simply convert them to homogeneous by appending a 1 to each point. This is equivalent to projecting the points to a plane in 3D world coordinates with a linear camera model.
As of currently, there isn't a method in OpenCV to apply distortions to a set of ideal coordinates (with the exception of fisheye distortions using distort()) so we employ the projectPoints() method which can apply distortions as well as transformations as part of its projection algorithm. The tricky part about using projectPoints() is that the input is in terms of world or model coordinates in 3D, which is why we homogenized the output of the second use of undistortPoints(). By using projectPoints() with a dummy, zero-valued rotation vector (InputArray rvec) and translation vector (Input Array tvec) the result is simply a distorted set of coordinates which is conveniently output in observed or image coordinates.
Some helpful links
Difference between undistortPoints() and projectPoints() in OpenCV
https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html#ga1019495a2c8d1743ed5cc23fa0daff8c
https://docs.opencv.org/3.4/da/d54/group__imgproc__transform.html#ga55c716492470bfe86b0ee9bf3a1f0f7e
Re-distort points with camera intrinsics/extrinsics
https://stackoverflow.com/questions/28678985/exact-definition-of-the-matrices-in-opencv-stereorectify#:~:text=Normally%20the%20definition%20of%20a,matrix%20with%20the%20extrinsic%20parameters
https://docs.opencv.org/4.x/db/d58/group__calib3d__fisheye.html#ga75d8877a98e38d0b29b6892c5f8d7765
https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html#ga617b1685d4059c6040827800e72ad2b6
Does OpenCV's undistortPoints also rectify them?
Removing distortions in rectified image coordinates
Before providing the solution to recovering the original image coordinates with distortions we provide a short snippet to convert from the original distorted image coordinates to the corresponding rectified, undistorted coordinates that can be used for testing the reverse solution below.
The rotation matrix R1 and the projection matrix P1 come from stereoRectify(). The intrinsic parameters M1 and distortion parameters D1 come from stereoCalibrate().
const size_t img_w = 2448;
const size_t img_h = 2048;
const size_t num_rand_pts = 100;
// observed coordinates of the points in the original
// distorted image (used as a benchmark for testing)
std::vector<cv::Point2f> benchmark_obs_dist_points;
// undistorted and rectified obnserved coordinates
std::vector<cv::Point2f> obs_rect_undist_points;
// initialize with uniform random numbers
cv::RNG rng( 0xFFFFFFFF );
for(size_t i =0;i<num_rand_pts;++i)
benchmark_obs_dist_points.push_back(
cv::Point2f(rng.uniform(0.0,(double)img_w),
rng.uniform(0.0,(double)img_h))
);
// undistort and rectify
cv::undistortPoints(benchmark_obs_dist_points,obs_rect_undist_points,
M1,D1,R1,P1);
Re-distorting and unrectifying points to recover the original image coordinates
We will need three mats to reverse the rectification: the inverse of the rectification rotation matrix from stereoRectify R1, and two others to 'swap' the P1 and M1 projections that happen in undistortPoints(). P1_prime is the rotation matrix sub-portion of the projection matrix and M1_prime converts the rectification rotation matrix into a projection matrix with no translation. Note this only works if the output of stereoRectify has no translation, i.e. the last column of P1 is zeros which can be easily verified.
assert(cv::norm(P1(cv::Rect(3,0,1,3))==0.0));
// create a 3x3 shallow copy of the rotation matrix portion of the projection P1
cv::Mat P1_prime = P1(cv::Rect(0,0,3,3));
// create a 3x4 projection matrix with the rotation portion of
// the rectification rotation matrix R1
cv::Mat M1_prime = cv::Mat::zeros(3,4,CV_64F);
M1.copyTo(M1_prime(cv::Rect(0,0,3,3)));
With these mats, the reversal can proceed as follows
// reverse the image rectification transformation
// (result will still be undistorted)
std::vector<cv::Point2f> obs_undist_points;
cv::undistortPoints(obs_rect_undist_points,obs_undist_points,
P1_prime,cv::Mat(),R1.inv(),M1_prime);
// convert the image coordinates into sensor or normalized or ideal coordinates
// (again, still undistorted)
std::vector<cv::Point2f> ideal_undist_points;
cv::undistortPoints(obs_undist_points,ideal_undist_points,M1,cv::Mat());
// artificially project the ideal 2d points to a plane in world coordinates
// using a linear camera model (z=1)
std::vector<cv::Point3f> world_undist_points;
for (cv::Point2f pt : ideal_undist_points)
world_undist_points.push_back(cv::Point3f(pt.x,pt.y,1));
// add the distortions back in to get the original coordinates
cv::Mat rvec = cv::Mat::zeros(3,1,CV_64FC1); // dummy zero rotation vec
cv::Mat tvec = cv::Mat::zeros(3,1,CV_64FC1); // dummy zero translation vec
std::vector<cv::Point2f> obs_dist_points;
cv::projectPoints(world_undist_points,rvec,tvec,M1,D1,obs_dist_points);
To test the results, we can compare them to the benchmark values
for(size_t i=0;i<num_rand_pts;++i)
std::cout << "benchmark_x: " << benchmark_obs_dist_points[i].x
<< " benchmark_y: " << benchmark_obs_dist_points[i].y
<< " computed_x: " << obs_dist_points[i].x
<< " computed_y: " << obs_dist_points[i].y
<< " diff_x: "
<< std::abs(benchmark_obs_dist_points[i].x-obs_dist_points[i].x)
<< " diff_y: "
<< std::abs(benchmark_obs_dist_points[i].y-obs_dist_points[i].y)
<< std::endl;
This is main.cpp. It is self-sufficient and does not need anything else but opencv. I don't remember where I found this, it works, I used it in my project. The program eats the set of standard chessboard images and generates json/xml files with all the distortions of the camera.
#include <iostream>
#include <sstream>
#include <time.h>
#include <stdio.h>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/highgui/highgui.hpp>
#ifndef _CRT_SECURE_NO_WARNINGS
# define _CRT_SECURE_NO_WARNINGS
#endif
using namespace cv;
using namespace std;
static void help()
{
cout << "This is a camera calibration sample." << endl
<< "Usage: calibration configurationFile" << endl
<< "Near the sample file you'll find the configuration file, which has detailed help of "
"how to edit it. It may be any OpenCV supported file format XML/YAML." << endl;
}
class Settings
{
public:
Settings() : goodInput(false) {}
enum Pattern { NOT_EXISTING, CHESSBOARD, CIRCLES_GRID, ASYMMETRIC_CIRCLES_GRID };
enum InputType {INVALID, CAMERA, VIDEO_FILE, IMAGE_LIST};
void write(FileStorage& fs) const //Write serialization for this class
{
fs << "{" << "BoardSize_Width" << boardSize.width
<< "BoardSize_Height" << boardSize.height
<< "Square_Size" << squareSize
<< "Calibrate_Pattern" << patternToUse
<< "Calibrate_NrOfFrameToUse" << nrFrames
<< "Calibrate_FixAspectRatio" << aspectRatio
<< "Calibrate_AssumeZeroTangentialDistortion" << calibZeroTangentDist
<< "Calibrate_FixPrincipalPointAtTheCenter" << calibFixPrincipalPoint
<< "Write_DetectedFeaturePoints" << bwritePoints
<< "Write_extrinsicParameters" << bwriteExtrinsics
<< "Write_outputFileName" << outputFileName
<< "Show_UndistortedImage" << showUndistorsed
<< "Input_FlipAroundHorizontalAxis" << flipVertical
<< "Input_Delay" << delay
<< "Input" << input
<< "}";
}
void read(const FileNode& node) //Read serialization for this class
{
node["BoardSize_Width" ] >> boardSize.width;
node["BoardSize_Height"] >> boardSize.height;
node["Calibrate_Pattern"] >> patternToUse;
node["Square_Size"] >> squareSize;
node["Calibrate_NrOfFrameToUse"] >> nrFrames;
node["Calibrate_FixAspectRatio"] >> aspectRatio;
node["Write_DetectedFeaturePoints"] >> bwritePoints;
node["Write_extrinsicParameters"] >> bwriteExtrinsics;
node["Write_outputFileName"] >> outputFileName;
node["Calibrate_AssumeZeroTangentialDistortion"] >> calibZeroTangentDist;
node["Calibrate_FixPrincipalPointAtTheCenter"] >> calibFixPrincipalPoint;
node["Input_FlipAroundHorizontalAxis"] >> flipVertical;
node["Show_UndistortedImage"] >> showUndistorsed;
node["Input"] >> input;
node["Input_Delay"] >> delay;
interprate();
}
void interprate()
{
goodInput = true;
if (boardSize.width <= 0 || boardSize.height <= 0)
{
cerr << "Invalid Board size: " << boardSize.width << " " << boardSize.height << endl;
goodInput = false;
}
if (squareSize <= 10e-6)
{
cerr << "Invalid square size " << squareSize << endl;
goodInput = false;
}
if (nrFrames <= 0)
{
cerr << "Invalid number of frames " << nrFrames << endl;
goodInput = false;
}
if (input.empty()) // Check for valid input
inputType = INVALID;
else
{
if (input[0] >= '0' && input[0] <= '9')
{
stringstream ss(input);
ss >> cameraID;
inputType = CAMERA;
}
else
{
if (readStringList(input, imageList))
{
inputType = IMAGE_LIST;
nrFrames = (nrFrames < (int)imageList.size()) ? nrFrames : (int)imageList.size();
}
else
inputType = VIDEO_FILE;
}
if (inputType == CAMERA)
inputCapture.open(cameraID);
if (inputType == VIDEO_FILE)
inputCapture.open(input);
if (inputType != IMAGE_LIST && !inputCapture.isOpened())
inputType = INVALID;
}
if (inputType == INVALID)
{
cerr << " Inexistent input: " << input << endl;
goodInput = false;
}
flag = 0;
if(calibFixPrincipalPoint) flag |= CV_CALIB_FIX_PRINCIPAL_POINT;
if(calibZeroTangentDist) flag |= CV_CALIB_ZERO_TANGENT_DIST;
if(aspectRatio) flag |= CV_CALIB_FIX_ASPECT_RATIO;
calibrationPattern = NOT_EXISTING;
if (!patternToUse.compare("CHESSBOARD")) calibrationPattern = CHESSBOARD;
if (!patternToUse.compare("CIRCLES_GRID")) calibrationPattern = CIRCLES_GRID;
if (!patternToUse.compare("ASYMMETRIC_CIRCLES_GRID")) calibrationPattern = ASYMMETRIC_CIRCLES_GRID;
if (calibrationPattern == NOT_EXISTING)
{
cerr << " Inexistent camera calibration mode: " << patternToUse << endl;
goodInput = false;
}
atImageList = 0;
}
Mat nextImage()
{
Mat result;
if( inputCapture.isOpened() )
{
Mat view0;
inputCapture >> view0;
view0.copyTo(result);
}
else if( atImageList < (int)imageList.size() )
result = imread(imageList[atImageList++], CV_LOAD_IMAGE_COLOR);
return result;
}
static bool readStringList( const string& filename, vector<string>& l )
{
l.clear();
FileStorage fs(filename, FileStorage::READ);
if( !fs.isOpened() )
return false;
FileNode n = fs.getFirstTopLevelNode();
if( n.type() != FileNode::SEQ )
return false;
FileNodeIterator it = n.begin(), it_end = n.end();
for( ; it != it_end; ++it )
l.push_back((string)*it);
return true;
}
public:
Size boardSize; // The size of the board -> Number of items by width and height
Pattern calibrationPattern;// One of the Chessboard, circles, or asymmetric circle pattern
float squareSize; // The size of a square in your defined unit (point, millimeter,etc).
int nrFrames; // The number of frames to use from the input for calibration
float aspectRatio; // The aspect ratio
int delay; // In case of a video input
bool bwritePoints; // Write detected feature points
bool bwriteExtrinsics; // Write extrinsic parameters
bool calibZeroTangentDist; // Assume zero tangential distortion
bool calibFixPrincipalPoint;// Fix the principal point at the center
bool flipVertical; // Flip the captured images around the horizontal axis
string outputFileName; // The name of the file where to write
bool showUndistorsed; // Show undistorted images after calibration
string input; // The input ->
int cameraID;
vector<string> imageList;
int atImageList;
VideoCapture inputCapture;
InputType inputType;
bool goodInput;
int flag;
private:
string patternToUse;
};
static void read(const FileNode& node, Settings& x, const Settings& default_value = Settings())
{
if(node.empty())
x = default_value;
else
x.read(node);
}
enum { DETECTION = 0, CAPTURING = 1, CALIBRATED = 2 };
bool runCalibrationAndSave(Settings& s, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs,
vector<vector<Point2f> > imagePoints );
int main(int argc, char* argv[])
{
// help();
Settings s;
const string inputSettingsFile = argc > 1 ? argv[1] : "default.xml";
FileStorage fs(inputSettingsFile, FileStorage::READ); // Read the settings
if (!fs.isOpened())
{
cout << "Could not open the configuration file: \"" << inputSettingsFile << "\"" << endl;
return -1;
}
fs["Settings"] >> s;
fs.release(); // close Settings file
if (!s.goodInput)
{
cout << "Invalid input detected. Application stopping. " << endl;
return -1;
}
vector<vector<Point2f> > imagePoints;
Mat cameraMatrix, distCoeffs;
Size imageSize;
int mode = s.inputType == Settings::IMAGE_LIST ? CAPTURING : DETECTION;
clock_t prevTimestamp = 0;
const Scalar RED(0,0,255), GREEN(0,255,0);
const char ESC_KEY = 27;
for(int i = 0;;++i)
{
Mat view;
bool blinkOutput = false;
view = s.nextImage();
//----- If no more image, or got enough, then stop calibration and show result -------------
if( mode == CAPTURING && imagePoints.size() >= (unsigned)s.nrFrames )
{
if( runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints))
mode = CALIBRATED;
else
mode = DETECTION;
}
if(view.empty()) // If no more images then run calibration, save and stop loop.
{
if( imagePoints.size() > 0 )
runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints);
break;
}
imageSize = view.size(); // Format input image.
if( s.flipVertical ) flip( view, view, 0 );
vector<Point2f> pointBuf;
bool found;
switch( s.calibrationPattern ) // Find feature points on the input format
{
case Settings::CHESSBOARD:
found = findChessboardCorners( view, s.boardSize, pointBuf,
CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK | CV_CALIB_CB_NORMALIZE_IMAGE);
break;
case Settings::CIRCLES_GRID:
found = findCirclesGrid( view, s.boardSize, pointBuf );
break;
case Settings::ASYMMETRIC_CIRCLES_GRID:
found = findCirclesGrid( view, s.boardSize, pointBuf, CALIB_CB_ASYMMETRIC_GRID );
break;
default:
found = false;
break;
}
if ( found) // If done with success,
{
// improve the found corners' coordinate accuracy for chessboard
if( s.calibrationPattern == Settings::CHESSBOARD)
{
Mat viewGray;
cvtColor(view, viewGray, COLOR_BGR2GRAY);
cornerSubPix( viewGray, pointBuf, Size(11,11),
Size(-1,-1), TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
}
if( mode == CAPTURING && // For camera only take new samples after delay time
(!s.inputCapture.isOpened() || clock() - prevTimestamp > s.delay*1e-3*CLOCKS_PER_SEC) )
{
imagePoints.push_back(pointBuf);
prevTimestamp = clock();
blinkOutput = s.inputCapture.isOpened();
}
// Draw the corners.
drawChessboardCorners( view, s.boardSize, Mat(pointBuf), found );
}
//----------------------------- Output Text ------------------------------------------------
string msg = (mode == CAPTURING) ? "100/100" :
mode == CALIBRATED ? "Calibrated" : "Press 'g' to start";
int baseLine = 0;
Size textSize = getTextSize(msg, 1, 1, 1, &baseLine);
Point textOrigin(view.cols - 2*textSize.width - 10, view.rows - 2*baseLine - 10);
if( mode == CAPTURING )
{
if(s.showUndistorsed)
msg = format( "%d/%d Undist", (int)imagePoints.size(), s.nrFrames );
else
msg = format( "%d/%d", (int)imagePoints.size(), s.nrFrames );
}
putText( view, msg, textOrigin, 1, 1, mode == CALIBRATED ? GREEN : RED);
if( blinkOutput )
bitwise_not(view, view);
//------------------------- Video capture output undistorted ------------------------------
if( mode == CALIBRATED && s.showUndistorsed )
{
Mat temp = view.clone();
undistort(temp, view, cameraMatrix, distCoeffs);
}
//------------------------------ Show image and check for input commands -------------------
imshow("Image View", view);
char key = (char)waitKey(s.inputCapture.isOpened() ? 50 : s.delay);
if( key == ESC_KEY )
break;
if( key == 'u' && mode == CALIBRATED )
s.showUndistorsed = !s.showUndistorsed;
if( s.inputCapture.isOpened() && key == 'g' )
{
mode = CAPTURING;
imagePoints.clear();
}
}
// -----------------------Show the undistorted image for the image list ------------------------
if( s.inputType == Settings::IMAGE_LIST && s.showUndistorsed )
{
Mat view, rview, map1, map2;
initUndistortRectifyMap(cameraMatrix, distCoeffs, Mat(),
getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0),
imageSize, CV_16SC2, map1, map2);
for(int i = 0; i < (int)s.imageList.size(); i++ )
{
view = imread(s.imageList[i], 1);
if(view.empty())
continue;
remap(view, rview, map1, map2, INTER_LINEAR);
imshow("Image View", rview);
char c = (char)waitKey();
if( c == ESC_KEY || c == 'q' || c == 'Q' )
break;
}
}
return 0;
}
static double computeReprojectionErrors( const vector<vector<Point3f> >& objectPoints,
const vector<vector<Point2f> >& imagePoints,
const vector<Mat>& rvecs, const vector<Mat>& tvecs,
const Mat& cameraMatrix , const Mat& distCoeffs,
vector<float>& perViewErrors)
{
vector<Point2f> imagePoints2;
int i, totalPoints = 0;
double totalErr = 0, err;
perViewErrors.resize(objectPoints.size());
for( i = 0; i < (int)objectPoints.size(); ++i )
{
projectPoints( Mat(objectPoints[i]), rvecs[i], tvecs[i], cameraMatrix,
distCoeffs, imagePoints2);
err = norm(Mat(imagePoints[i]), Mat(imagePoints2), CV_L2);
int n = (int)objectPoints[i].size();
perViewErrors[i] = (float) std::sqrt(err*err/n);
totalErr += err*err;
totalPoints += n;
}
return std::sqrt(totalErr/totalPoints);
}
static void calcBoardCornerPositions(Size boardSize, float squareSize, vector<Point3f>& corners,
Settings::Pattern patternType /*= Settings::CHESSBOARD*/)
{
corners.clear();
switch(patternType)
{
case Settings::CHESSBOARD:
case Settings::CIRCLES_GRID:
for( int i = 0; i < boardSize.height; ++i )
for( int j = 0; j < boardSize.width; ++j )
corners.push_back(Point3f(float( j*squareSize ), float( i*squareSize ), 0));
break;
case Settings::ASYMMETRIC_CIRCLES_GRID:
for( int i = 0; i < boardSize.height; i++ )
for( int j = 0; j < boardSize.width; j++ )
corners.push_back(Point3f(float((2*j + i % 2)*squareSize), float(i*squareSize), 0));
break;
default:
break;
}
}
static bool runCalibration( Settings& s, Size& imageSize, Mat& cameraMatrix, Mat& distCoeffs,
vector<vector<Point2f> > imagePoints, vector<Mat>& rvecs, vector<Mat>& tvecs,
vector<float>& reprojErrs, double& totalAvgErr)
{
cameraMatrix = Mat::eye(3, 3, CV_64F);
if( s.flag & CV_CALIB_FIX_ASPECT_RATIO )
cameraMatrix.at<double>(0,0) = 1.0;
distCoeffs = Mat::zeros(8, 1, CV_64F);
vector<vector<Point3f> > objectPoints(1);
calcBoardCornerPositions(s.boardSize, s.squareSize, objectPoints[0], s.calibrationPattern);
objectPoints.resize(imagePoints.size(),objectPoints[0]);
//Find intrinsic and extrinsic camera parameters
double rms = calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix,
distCoeffs, rvecs, tvecs, s.flag|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5);
cout << "Re-projection error reported by calibrateCamera: "<< rms << endl;
bool ok = checkRange(cameraMatrix) && checkRange(distCoeffs);
totalAvgErr = computeReprojectionErrors(objectPoints, imagePoints,
rvecs, tvecs, cameraMatrix, distCoeffs, reprojErrs);
return ok;
}
// Print camera parameters to the output file
static void saveCameraParams( Settings& s, Size& imageSize, Mat& cameraMatrix, Mat& distCoeffs,
const vector<Mat>& rvecs, const vector<Mat>& tvecs,
const vector<float>& reprojErrs, const vector<vector<Point2f> >& imagePoints,
double totalAvgErr )
{
FileStorage fs( s.outputFileName, FileStorage::WRITE );
time_t tm;
time( &tm );
struct tm *t2 = localtime( &tm );
char buf[1024];
strftime( buf, sizeof(buf)-1, "%c", t2 );
fs << "calibration_Time" << buf;
if( !rvecs.empty() || !reprojErrs.empty() )
fs << "nrOfFrames" << (int)std::max(rvecs.size(), reprojErrs.size());
fs << "image_Width" << imageSize.width;
fs << "image_Height" << imageSize.height;
fs << "board_Width" << s.boardSize.width;
fs << "board_Height" << s.boardSize.height;
fs << "square_Size" << s.squareSize;
if( s.flag & CV_CALIB_FIX_ASPECT_RATIO )
fs << "FixAspectRatio" << s.aspectRatio;
if( s.flag )
{
sprintf( buf, "flags: %s%s%s%s",
s.flag & CV_CALIB_USE_INTRINSIC_GUESS ? " +use_intrinsic_guess" : "",
s.flag & CV_CALIB_FIX_ASPECT_RATIO ? " +fix_aspectRatio" : "",
s.flag & CV_CALIB_FIX_PRINCIPAL_POINT ? " +fix_principal_point" : "",
s.flag & CV_CALIB_ZERO_TANGENT_DIST ? " +zero_tangent_dist" : "" );
cvWriteComment( *fs, buf, 0 );
}
fs << "flagValue" << s.flag;
fs << "Camera_Matrix" << cameraMatrix;
fs << "Distortion_Coefficients" << distCoeffs;
fs << "Avg_Reprojection_Error" << totalAvgErr;
if( !reprojErrs.empty() )
fs << "Per_View_Reprojection_Errors" << Mat(reprojErrs);
if( !rvecs.empty() && !tvecs.empty() )
{
CV_Assert(rvecs[0].type() == tvecs[0].type());
Mat bigmat((int)rvecs.size(), 6, rvecs[0].type());
for( int i = 0; i < (int)rvecs.size(); i++ )
{
Mat r = bigmat(Range(i, i+1), Range(0,3));
Mat t = bigmat(Range(i, i+1), Range(3,6));
CV_Assert(rvecs[i].rows == 3 && rvecs[i].cols == 1);
CV_Assert(tvecs[i].rows == 3 && tvecs[i].cols == 1);
//*.t() is MatExpr (not Mat) so we can use assignment operator
r = rvecs[i].t();
t = tvecs[i].t();
}
cvWriteComment( *fs, "a set of 6-tuples (rotation vector + translation vector) for each view", 0 );
fs << "Extrinsic_Parameters" << bigmat;
}
if( !imagePoints.empty() )
{
Mat imagePtMat((int)imagePoints.size(), (int)imagePoints[0].size(), CV_32FC2);
for( int i = 0; i < (int)imagePoints.size(); i++ )
{
Mat r = imagePtMat.row(i).reshape(2, imagePtMat.cols);
Mat imgpti(imagePoints[i]);
imgpti.copyTo(r);
}
fs << "Image_points" << imagePtMat;
}
}
bool runCalibrationAndSave(Settings& s, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs,vector<vector<Point2f> > imagePoints )
{
vector<Mat> rvecs, tvecs;
vector<float> reprojErrs;
double totalAvgErr = 0;
bool ok = runCalibration(s,imageSize, cameraMatrix, distCoeffs, imagePoints, rvecs, tvecs,
reprojErrs, totalAvgErr);
cout << (ok ? "Calibration succeeded" : "Calibration failed")
<< ". avg re projection error = " << totalAvgErr ;
if( ok )
saveCameraParams( s, imageSize, cameraMatrix, distCoeffs, rvecs ,tvecs, reprojErrs,
imagePoints, totalAvgErr);
return ok;
}
I am working on an OpenCV project and am using cvMatchTemplate to locate part of an image I am then using cvMinMaxLoc to find the maximum area, therefore best match, my problem is that cvMinMaxLoc only returns one max location were as there may be multiple matches in one image.
Is there any way to return all the max locations above a particular threshold
I.e.
for each location > threshold
add location to array
I'm new to OpenCV and dont know if something like this already exists but so far I haven't been able to find anything
Any help greatly appreciated
I modified the matchTemplate tutorial to get you started. It basically uses a queue to track the top X match points, and later plots all of them. Hope that is helpful!
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <vector>
#include <limits>
#include <queue>
using namespace cv;
using namespace std;
void maxLocs(const Mat& src, queue<Point>& dst, size_t size)
{
float maxValue = -1.0f * numeric_limits<float>::max();
float* srcData = reinterpret_cast<float*>(src.data);
for(int i = 0; i < src.rows; i++)
{
for(int j = 0; j < src.cols; j++)
{
if(srcData[i*src.cols + j] > maxValue)
{
maxValue = srcData[i*src.cols + j];
dst.push(Point(j, i));
// pop the smaller one off the end if we reach the size threshold.
if(dst.size() > size)
{
dst.pop();
}
}
}
}
}
/// Global Variables
Mat img; Mat templ; Mat result;
string image_window = "Source Image";
string result_window = "Result window";
int match_method;
int max_Trackbar = 5;
/// Function Headers
void MatchingMethod( int, void* );
int main(int argc, char* argv[])
{
/// Load image and template
img = imread( "dogs.jpg", 1 );
templ = imread( "dog_templ.jpg", 1 );
/// Create windows
namedWindow( image_window, CV_WINDOW_AUTOSIZE );
namedWindow( result_window, CV_WINDOW_AUTOSIZE );
/// Create Trackbar
string trackbar_label = "Method: \n 0: SQDIFF \n 1: SQDIFF NORMED \n 2: TM CCORR \n 3: TM CCORR NORMED \n 4: TM COEFF \n 5: TM COEFF NORMED";
createTrackbar( trackbar_label, image_window, &match_method, max_Trackbar, MatchingMethod );
MatchingMethod( 0, 0 );
waitKey(0);
return 0;
}
/**
* #function MatchingMethod
* #brief Trackbar callback
*/
void MatchingMethod( int, void* )
{
/// Source image to display
Mat img_display;
img.copyTo( img_display );
/// Create the result matrix
int result_cols = img.cols - templ.cols + 1;
int result_rows = img.rows - templ.rows + 1;
result.create( result_cols, result_rows, CV_32FC1 );
/// Do the Matching and Normalize
matchTemplate( img, templ, result, match_method );
normalize( result, result, 0, 1, NORM_MINMAX, -1, Mat() );
/// For SQDIFF and SQDIFF_NORMED, the best matches are lower values. For all the other methods, the higher the better
if( match_method == CV_TM_SQDIFF || match_method == CV_TM_SQDIFF_NORMED )
{
result = 1.0 - result;
}
// get the top 100 maximums...
queue<Point> locations;
maxLocs(result, locations, 100);
/// Show me what you got
while(!locations.empty())
{
Point matchLoc = locations.front();
rectangle( img_display, matchLoc, Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), Scalar::all(0), 2, 8, 0 );
rectangle( result, matchLoc, Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), Scalar::all(0), 2, 8, 0 );
locations.pop();
}
imshow( image_window, img_display );
imshow( result_window, result );
return;
}
Try cvThreshold(src, dst, threshold, CV_THRESH_BINARY)
This would return an image in dst with all pixels above threshold as white and all others as black. You would then iterate through all the pixels and check if it is greater than 0 then that is a location you want. Something like this
char* data = dst->imageData;
int size = (dst->height) * (dst->width)
for (int i=0; i<size; i++)
{
if(data[i] > 0)
//copy i into your array
}