I have binarized images like this one:
I need to determine the center and radius of the inner solid disk. As you can see, it is surrounded by a textured area which touches it, so that simple connected component detection doesn't work. Anyway, there is a void margin on a large part of the perimeter.
A possible cure could be by eroding until all the texture disappears or disconnects from the disk, but this can be time consuming and the number of iterations is unsure. (In addition, in some unlucky cases there are tiny holes in the disk, which will grow with erosion.)
Any better suggestion to address this problem in a robust and fast way ? (I tagged OpenCV, but this is not mandated, what matters is the approach.)
You can:
Invert the image
Find the largest axis-aligned rectangle containing only zeros, (I used my C++ code from this answer). The algorithm is pretty fast.
Get the center and radius of the circle from the rectangle
Code:
#include <opencv2\opencv.hpp>
using namespace std;
using namespace cv;
// https://stackoverflow.com/a/30418912/5008845
cv::Rect findMaxRect(const cv::Mat1b& src)
{
cv::Mat1f W(src.rows, src.cols, float(0));
cv::Mat1f H(src.rows, src.cols, float(0));
cv::Rect maxRect(0,0,0,0);
float maxArea = 0.f;
for (int r = 0; r < src.rows; ++r)
{
for (int c = 0; c < src.cols; ++c)
{
if (src(r, c) == 0)
{
H(r, c) = 1.f + ((r>0) ? H(r-1, c) : 0);
W(r, c) = 1.f + ((c>0) ? W(r, c-1) : 0);
}
float minw = W(r,c);
for (int h = 0; h < H(r, c); ++h)
{
minw = std::min(minw, W(r-h, c));
float area = (h+1) * minw;
if (area > maxArea)
{
maxArea = area;
maxRect = cv::Rect(cv::Point(c - minw + 1, r - h), cv::Point(c+1, r+1));
}
}
}
}
return maxRect;
}
int main()
{
cv::Mat1b img = cv::imread("path/to/img", cv::IMREAD_GRAYSCALE);
// Correct image
img = img > 127;
cv::Rect r = findMaxRect(~img);
cv::Point center ( std::round(r.x + r.width / 2.f), std::round(r.y + r.height / 2.f));
int radius = std::sqrt(r.width*r.width + r.height*r.height) / 2;
cv::Mat3b out;
cv::cvtColor(img, out, cv::COLOR_GRAY2BGR);
cv::rectangle(out, r, cv::Scalar(0, 255, 0));
cv::circle(out, center, radius, cv::Scalar(0, 0, 255));
return 0;
}
My method is to use morph-open, findcontours, and minEnclosingCircle as follow:
#!/usr/bin/python3
# 2018/11/29 20:03
import cv2
fname = "test.png"
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
th, threshed = cv2.threshold(gray, 200, 255, cv2.THRESH_BINARY)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
morphed = cv2.morphologyEx(threshed, cv2.MORPH_OPEN, kernel, iterations = 3)
cnts = cv2.findContours(morphed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
cnt = max(cnts, key=cv2.contourArea)
pt, r = cv2.minEnclosingCircle(cnt)
pt = (int(pt[0]), int(pt[1]))
r = int(r)
print("center: {}\nradius: {}".format(pt, r))
The final result:
center: (184, 170)
radius: 103
My second attempt on this case. This time I am using morphological closing operation to weaken the noise and maintain the signal. This is followed by a simple threshold and a connectedcomponent analysis. I hope this code can run faster.
Using this method, i can find the centroid with subpixel accuracy
('center : ', (184.12244328746746, 170.59771290442544))
Radius is derived from the area of the circle.
('radius : ', 101.34704439389715)
Here is the full code
import cv2
import numpy as np
# load image in grayscale
image = cv2.imread('radius.png',0)
r,c = image.shape
# remove noise
blured = cv2.blur(image,(5,5))
# Morphological closing
morph = cv2.erode(blured,None,iterations = 3)
morph = cv2.dilate(morph,None,iterations = 3)
cv2.imshow("morph",morph)
cv2.waitKey(0)
# Get the strong signal
th, th_img = cv2.threshold(morph,200,255,cv2.THRESH_BINARY)
cv2.imshow("th_img",th_img)
cv2.waitKey(0)
# Get connected components
num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(th_img)
print(num_labels)
print(stats)
# displat labels
labels_disp = np.uint8(255*labels/np.max(labels))
cv2.imshow("labels",labels_disp)
cv2.waitKey(0)
# Find center label
cnt_label = labels[r/2,c/2]
# Find circle center and radius
# Radius calculated by averaging the height and width of bounding box
area = stats[cnt_label][4]
radius = np.sqrt(area / np.pi)#stats[cnt_label][2]/2 + stats[cnt_label][3]/2)/2
cnt_pt = ((centroids[cnt_label][0]),(centroids[cnt_label][1]))
print('center : ',cnt_pt)
print('radius : ',radius)
# Display final result
edges_color = cv2.cvtColor(image,cv2.COLOR_GRAY2BGR)
cv2.circle(edges_color,(int(cnt_pt[0]),int(cnt_pt[1])),int(radius),(0,0,255),1)
cv2.circle(edges_color,(int(cnt_pt[0]),int(cnt_pt[1])),5,(0,0,255),-1)
x1 = stats[cnt_label][0]
y1 = stats[cnt_label][1]
w1 = stats[cnt_label][2]
h1 = stats[cnt_label][3]
cv2.rectangle(edges_color,(x1,y1),(x1+w1,y1+h1),(0,255,0))
cv2.imshow("edges_color",edges_color)
cv2.waitKey(0)
Here is an example of using hough circle. It can work if you set the min and max radius to a proper range.
import cv2
import numpy as np
# load image in grayscale
image = cv2.imread('radius.png',0)
r , c = image.shape
# remove noise
dst = cv2.blur(image,(5,5))
# Morphological closing
dst = cv2.erode(dst,None,iterations = 3)
dst = cv2.dilate(dst,None,iterations = 3)
# Find Hough Circle
circles = cv2.HoughCircles(dst
,cv2.HOUGH_GRADIENT
,2
,minDist = 0.5* r
,param2 = 150
,minRadius = int(0.5 * r / 2.0)
,maxRadius = int(0.75 * r / 2.0)
)
# Display
edges_color = cv2.cvtColor(image,cv2.COLOR_GRAY2BGR)
for i in circles[0]:
print(i)
cv2.circle(edges_color,(i[0],i[1]),i[2],(0,0,255),1)
cv2.imshow("edges_color",edges_color)
cv2.waitKey(0)
Here is the result
[185. 167. 103.6]
Have you tried something along the lines of the Circle Hough Transform?
I see that OpenCv has its own implementation. Some preprocessing (median filtering?) might be necessary here, though.
Here is a simple approach:
Erode the image (using a large, circular SE), then find the centroid of the result. This should be really close to the centroid of the central disk.
Compute the mean as a function of the radius of the original image, using the computed centroid as the center.
The output looks like this:
From here, determining the radius is quite simple.
Here is the code, I'm using PyDIP (we don't yet have a binary distribution, you'll need to download and build form sources):
import matplotlib.pyplot as pp
import PyDIP as dip
import numpy as np
img = dip.Image(pp.imread('/home/cris/tmp/FDvQm.png')[:,:,0])
b = dip.Erosion(img, 30)
c = dip.CenterOfMass(b)
rmean = dip.RadialMean(img, center=c)
pp.plot(rmean)
r = np.argmax(rmean < 0.5)
Here, r is 102, as the radius in integer number of pixels, I'm sure it's possible to interpolate to improve precision. c is [184.02, 170.45].
I use openCV function projectPoints() to rotate, translate and project a set of 3D points and solvePnp() to find this rotation and translation. This works well when the lens distortion coefficients are all zero but fails otherwise. It takes as little distortion as this to fail completely:
distCoeffs << 0.0, 0.01, 0.0, 0.0, 0.0;
The code is below:
#include <iostream>
#include "opencv.hpp"
using namespace std;
using namespace cv;
#define DEG2RAD (3.1415293/180.0)
#define RAD2DEG (1.0/DEG2RAD)
int main() {
const int npoints = 10; // number of points
// extrinsic
const Point3f tvec(10, 20, 30);
Point3f rvec(3, 5, 7);
cout << "Finding extrinsic parameters (PnP)" << endl;
cout<<"Test transformations: ";
cout<<"Rotation: "<<rvec<<"; translation: "<<tvec<<endl;
rvec*=DEG2RAD;
// intrinsic
Mat_ <double>cameraMatrix(3, 3);
cameraMatrix << 300., 0., 200., 0, 300., 100., 0., 0., 1.;
Mat_ <double>distCoeffs(1, 5); // (k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements.
//distCoeffs << 1.2, 0.2, 0., 0., 0.; // non-zero distortion
distCoeffs << 0.0, 0.0, 0.0, 0.0, 0.0; // zero distortion
cout<<"distrotion coeff: "<<distCoeffs<<endl;
cout<<"============= Running PnP..."<<endl;
vector<Point3f> objPts(npoints);
vector<Point2f> imagePoints(npoints);
Mat rvec_est, tvec_est;
randu(Mat(objPts), 0.0f, 100.0f);
// project
projectPoints(Mat(objPts), Mat(rvec), Mat(tvec), cameraMatrix, distCoeffs, Mat(imagePoints));
// extrinsic
solvePnP(objPts, imagePoints, cameraMatrix, distCoeffs, rvec_est, tvec_est);
cout<<"Rotation: "<<rvec_est*RAD2DEG<<endl;
cout<<"Translation "<<tvec_est<<endl;
return 0;
}
When all distortion coefficients are 0 the result is OK:
Finding extrinsic parameters (PnP)
Test transformations: Rotation: [3, 5, 7]; translation: [10, 20, 30]
distrotion coeff: [0, 0, 0, 0, 0]
============= Running PnP...
Rotation: [2.999999581709123; 4.999997813985293; 6.999999826089725]
Translation [9.999999792663072; 19.99999648222693; 29.99999699621362]
However when they aren't zero the result is totally wrong:
Finding extrinsic parameters (PnP)
Test transformations: Rotation: [3, 5, 7]; translation: [10, 20, 30]
distrotion coeff: [1.2, 0.2, 0, 0, 0]
============= Running PnP...
Rotation: [-91.56479629305277; -124.3631985067845; -74.46486950666471]
Translation [-69.72473511009439; -117.7463271636532; -87.27777166027946]
Since people asked, I am adding intermediate input - some 3D points and their projections for non-zero distortion coefficients. My camera matrix was
cameraMatrix << 300., 0., 200., 0, 300., 100., 0., 0., 1.;
3d points [53.0283, 19.9259, 40.1059]; 2D projection [1060.34, 700.59]
3d points [81.4385, 43.7133, 24.879]; 2D projection [6553.88, 5344.22]
3d points [77.3105, 76.2094, 30.7794]; 2D projection [5143.32, 6497.12]
3d points [70.2432, 47.8447, 79.219]; 2D projection [771.497, 611.726]
Another interesting observation: applying undistort when distCoeff are non zero doesn’t really works (but it does produce identical 2D points when distortion coefficients are all 0):
cout<<"applying undistort..."<<endl;
vector<Point2f> imagePointsUndistort(npoints);
undistortPoints(Mat(imagePoints), Mat(imagePointsUndistort), cameraMatrix, distCoeffs);
for (int i=0; i<4; i++)
cout<<"2d original "<<imagePoints[i]<<"; 2d undistort "<<imagePointsUndistort[i]<<endl;
applying undistort...
2d original [1060.34, 700.59]; 2d undistort [0, 0]
2d original [6553.88, 5344.22]; 2d undistort [0, 0]
2d original [5143.32, 6497.12]; 2d undistort [0, 0]
2d original [771.497, 611.726]; 2d undistort [0, 0]
The reason why I tried undistort() is because if one undoes the effect of known intrinsic parameters PnP becomes just a minimum direction problem of the form Ax=0. It needs min. 6 points for an approximate linear solution which is probably further improved with LMA (flags=CV_ITERATIVE). Technically there are only 6DOF and thus 3 points required so other methods (flags=CV_P3P, CV_EPNP) take less points. Anyways, regardless of a method or number of points the result is still invalid with non-zero distortion coefficients. The last thing I will try is to put all points on a 3D plane. It still fails:
for (int i=0; i<npoints; i++)
objPts[i].z=0.0f;
Finding extrinsic parameters (PnP)
Test transformations: Rotation: [3, 5, 7]; translation: [10, 20, 30]
distrotion coeff: [1.2, 0.2, 0, 0, 0]
============= Running PnP...
Rotation: [-1830.321574903016; 2542.206083947917; 2532.255948350521]
Translation [1407.918216894239; 1391.373407846455; 556.7108606094299]
How to make your code work?
I am able to reproduce the described behavior using the code you provided, however, either one of the two following options solve the problem:
Replace const Point3f tvec(10, 20, 30); by const Point3f tvec(10, 20, N); where N is much lower than 0 (e.g. -300) or much larger than 100 (e.g. 300).
Replace your call to solvePnP by a call to solvePnPRansac.
Why does each of these changes fix the undesired behavior?
First, consider what your original code requests from the solvePnP function. You are using a rotation of rather small magnitude, hence for simplicity of the explanation, I will assume that the rotation is identity. Then, the camera is positionned at world coordinates X=10, Y=20 and Z=30 and you generate object points randomly with world coordinates (X,Y,Z) uniformly drawn in [0,100]3. Hence, the camera is in the middle of the possible range for the object points, as illustrated on the following picture:
This means that object points may be generated very close to the focal plane (i.e. the plane going through the optical center and perpendicularly with respect to the optical axis). The projection in the camera image for such object points is undefined. However, in practice the non-linear optimization algorithm for undistortPoints is unstable even for object points close to the focal plane. This unstability causes the iterative algorithm for undistortPoints to diverge, except when the coefficients are all zero since in that case the initial values remain strictly constant during the estimation.
Hence, the two possible solutions to avoid this behavior are the following:
Avoid generating object points near the focal plane of the camera, i.e. change the translation vector or the range of the coordinates of the object points.
Eliminate the object points too close to the focal plane of the camera, whose undistorted estimation diverged (outliers), before the PnP estimation for example using solvePnPRansac.
Details about why undistortPoints fails:
NB: As we know the 3D world points, I used the following call to obtain the true undistorted coordinates, independently from the result of undistortPoints:
cv::projectPoints(obj_pts, rvec, tvec, cv::Mat_<double>::eye(3,3), cv::Mat_<double>::zeros(5,1), true_norm_pts);
The following function is a simplified version of what undistortPoints is doing:
void simple_undistort_point(const cv::Mat &img_pt,
const cv::Mat_<double> &K,
const cv::Mat_<double> &D,
cv::Mat &norm_pt)
{
// Define temporary variables
double k[8]={D.at<double>(0),
D.at<double>(1),
D.at<double>(2),
D.at<double>(3),
D.at<double>(4)},
fx, fy, ifx, ify, cx, cy;
fx = K.at<double>(0,0);
fy = K.at<double>(1,1);
ifx = 1./fx;
ify = 1./fy;
cx = K.at<double>(0,2);
cy = K.at<double>(1,2);
// Cancel distortion iteratively
const int iters = 5;
double x, y, x0, y0;
x0=x=(img_pt.at<double>(0)-cx)*ifx;
y0=y=(img_pt.at<double>(1)-cy)*ify;
for(int j = 0; j < iters; ++j)
{
double r2 = x*x + y*y;
double icdist = 1/(1 + ((k[4]*r2 + k[1])*r2 + k[0])*r2);
double deltaX = 2*k[2]*x*y + k[3]*(r2 + 2*x*x);
double deltaY = k[2]*(r2 + 2*y*y) + 2*k[3]*x*y;
x = (x0 - deltaX)*icdist;
y = (y0 - deltaY)*icdist;
}
// Store result
norm_pt.create(1,2,CV_64F);
norm_pt.at<double>(0) = x;
norm_pt.at<double>(1) = y;
}
If you add code to check how x and y change with each iteration, you'll see that the iterative optimization diverges due to r2 being very large at the beginning. Here is a log example:
#0: [2.6383300, 1.7651500] r2=10.0766000, icdist=0.0299408, deltaX=0, deltaY=0
#1: [0.0789937, 0.0528501] r2=0.00903313, icdist=0.9892610, deltaX=0, deltaY=0
#2: [2.6100000, 1.7462000] r2=9.86128000, icdist=0.0309765, deltaX=0, deltaY=0
#3: [0.0817263, 0.0546783] r2=0.00966890, icdist=0.9885120, deltaX=0, deltaY=0
#4: [2.6080200, 1.7448800] r2=9.84637000, icdist=0.0310503, deltaX=0, deltaY=0
end: [0.0819209, 0.0548085]
true: [0.9327440, 0.6240440]
When r2 is large, r2*r2*r2 is huge hence icdist is very small, hence the next iteration starts with a very small r2. When r2 is very small, icdist is close to 1, hence x and y are respectively set to x0 and y0 and we are back with a large r2, etc.
So why is r2 so large in the first place? Because the points may be generated close to the focal plane, in which case they are far from the optical axis (hence a very large r2). See the following log example:
img_pt#0=[991.4992804037340, 629.5460091483255], r2=10.07660, norm(cv_undist-true)=1.0236800
img_pt#1=[5802.666489402056, 4402.387472311543], r2=554.4490, norm(cv_undist-true)=2.1568300
img_pt#2=[5040.551339386630, 5943.173381042060], r2=639.7070, norm(cv_undist-true)=2.1998700
img_pt#3=[741.9742544382640, 572.9513930063181], r2=5.749100, norm(cv_undist-true)=0.8158670
img_pt#4=[406.9101658356062, 403.0152736214052], r2=1.495890, norm(cv_undist-true)=0.1792810
img_pt#5=[516.2079583447821, 1038.026553216831], r2=10.88760, norm(cv_undist-true)=1.0494500
img_pt#6=[1876.220394606081, 8129.280202695572], r2=747.5450, norm(cv_undist-true)=2.2472900
img_pt#7=[236.9935231831764, 329.3418854620716], r2=0.599625, norm(cv_undist-true)=0.0147487
img_pt#8=[1037.586015858139, 1346.494838992490], r2=25.05890, norm(cv_undist-true)=1.2998400
img_pt#9=[499.9808133105154, 715.6213031242644], r2=5.210870, norm(cv_undist-true)=0.7747020
You can see that for most points, r2 is very large, except for a few (#3, #4 & #7) which are also those associated with the best undistortion accuracy.
This problem is due to the particular undistortion algorithm implemented in OpenCV, which has been chosen for its efficiency. Other non-linear optimization algorithm (e.g. Levenberg-Marquardt) would be more accurate but also much slower, and would definitely be an overkill in most applications.
Let me go through opencv sources. But first I present "pure" opencv function that works as in the sources (please read below how I got this point) merged with your code to show it works as the library one:
#include <iostream>
#include <opencv2\opencv.hpp>
using namespace std;
using namespace cv;
#define DEG2RAD (3.1415293/180.0)
#define RAD2DEG (1.0/DEG2RAD)
Point2f Project(Point3f p, double R[], double t[], double k[], double fx, double fy, double cx, double cy) {
double X = p.x, Y = p.y, Z = p.z;
double x = R[0]*X + R[1]*Y + R[2]*Z + t[0];
double y = R[3]*X + R[4]*Y + R[5]*Z + t[1];
double z = R[6]*X + R[7]*Y + R[8]*Z + t[2];
double r2, r4, r6, a1, a2, a3, cdist, icdist2;
double xd, yd;
z = z ? 1./z : 1;
x *= z; y *= z;
r2 = x*x + y*y;
r4 = r2*r2;
r6 = r4*r2;
a1 = 2*x*y;
a2 = r2 + 2*x*x;
a3 = r2 + 2*y*y;
cdist = 1 + k[0]*r2 + k[1]*r4 + k[4]*r6;
icdist2 = 1./(1 + k[5]*r2 + k[6]*r4 + k[7]*r6);
xd = x*cdist*icdist2 + k[2]*a1 + k[3]*a2;
yd = y*cdist*icdist2 + k[2]*a3 + k[3]*a1;
double xRet = xd*fx + cx;
double yRet = yd*fy + cy;
return Point2f(xRet, yRet);
}
int main() {
const int npoints = 10; // number of points
// extrinsic
const Point3f tvec(10, 20, 30);
Point3f rvec(3, 5, 7);
cout << "Finding extrinsic parameters (PnP)" << endl;
cout<<"Test transformations: ";
cout<<"Rotation: "<<rvec<<"; translation: "<<tvec<<endl;
rvec*=DEG2RAD;
// intrinsic
Mat_ <double>cameraMatrix(3, 3);
cameraMatrix << 300., 0., 200., 0, 300., 100., 0., 0., 1.;
Mat_ <double>distCoeffs(1, 5); // (k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements.
distCoeffs << 1.2, 0.2, 0., 0., 0.; // non-zero distortion
//distCoeffs << 0.0, 0.0, 0.0, 0.0, 0.0; // zero distortion
//distCoeffs << 1.8130418031666484e+000, -1.3285019729932657e+001, -1.6921715019797313e-002, -1.3327183367510961e-001, -5.2725832482783389e+001;
cout<<"distrotion coeff: "<<distCoeffs<<endl;
cout<<"============= Running PnP..."<<endl;
vector<Point3f> objPts(npoints);
vector<Point2f> imagePoints(npoints);
Mat rvec_est, tvec_est;
randu(Mat(objPts), 0.0f, 100.0f);
// project
projectPoints(Mat(objPts), Mat(rvec), Mat(tvec), cameraMatrix, distCoeffs, Mat(imagePoints));
std::cout << objPts << std::endl;
std::cout << imagePoints << std::endl;
double R[9];
Mat matR( 3, 3, CV_64F, R);
Mat_<double> m(1,3);
m << (double)rvec.x, (double)rvec.y, (double)rvec.z;
Rodrigues(m, matR);
std::cout << matR << std::endl;
double t[3] = {tvec.x, tvec.y, tvec.z};
double k[8] = {1.2, 0.2, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
double fx = 300, fy = 300, cx = 200, cy = 100;
for(int i=0;i<objPts.size();i++)
std::cout << Project(objPts[i], R, t, k, fx, fy, cx, cy) << "; ";
std::cout << std::endl;
// extrinsic
solvePnP(objPts, imagePoints, cameraMatrix, distCoeffs, rvec_est, tvec_est);
cout<<"Rotation: "<<rvec_est*RAD2DEG<<endl;
cout<<"Translation "<<tvec_est<<endl;
return 0;
}
R is rotation, t translation, k distortion. Look at the 'r2' computation - it is x*x + y*y, but x,y is the position (scaled by z though) just after applying translation and rotation. And this r stands for (as wikpedia says) for "square distance in image projected by ideal pinhole model". We can say projectPoints implementation is OK.
How I got this result:
I'm digging up version 2.4.8. If you go to the calibration.cpp in the calib3d module, start with
void cv::projectPoints( InputArray _opoints,
InputArray _rvec,
InputArray _tvec,
InputArray _cameraMatrix,
InputArray _distCoeffs,
OutputArray _ipoints,
OutputArray _jacobian,
double aspectRatio )
{
Mat opoints = _opoints.getMat();
int npoints = opoints.checkVector(3), depth = opoints.depth();
CV_Assert(npoints >= 0 && (depth == CV_32F || depth == CV_64F));
CvMat dpdrot, dpdt, dpdf, dpdc, dpddist;
CvMat *pdpdrot=0, *pdpdt=0, *pdpdf=0, *pdpdc=0, *pdpddist=0;
_ipoints.create(npoints, 1, CV_MAKETYPE(depth, 2), -1, true);
CvMat c_imagePoints = _ipoints.getMat();
CvMat c_objectPoints = opoints;
Mat cameraMatrix = _cameraMatrix.getMat();
Mat rvec = _rvec.getMat(), tvec = _tvec.getMat();
CvMat c_cameraMatrix = cameraMatrix;
CvMat c_rvec = rvec, c_tvec = tvec;
double dc0buf[5]={0};
Mat dc0(5,1,CV_64F,dc0buf);
Mat distCoeffs = _distCoeffs.getMat();
if( distCoeffs.empty() )
distCoeffs = dc0;
CvMat c_distCoeffs = distCoeffs;
int ndistCoeffs = distCoeffs.rows + distCoeffs.cols - 1;
if( _jacobian.needed() )
{
// cut out, we dont use this part
}
cvProjectPoints2( &c_objectPoints, &c_rvec, &c_tvec, &c_cameraMatrix, &c_distCoeffs,
&c_imagePoints, pdpdrot, pdpdt, pdpdf, pdpdc, pdpddist, aspectRatio );
}
Nothing special, right? No content manipulation at all. Let's go deeper:
CV_IMPL void cvProjectPoints2( const CvMat* objectPoints,
const CvMat* r_vec,
const CvMat* t_vec,
const CvMat* A,
const CvMat* distCoeffs,
CvMat* imagePoints, CvMat* dpdr,
CvMat* dpdt, CvMat* dpdf,
CvMat* dpdc, CvMat* dpdk,
double aspectRatio )
{
Ptr<CvMat> matM, _m;
Ptr<CvMat> _dpdr, _dpdt, _dpdc, _dpdf, _dpdk;
int i, j, count;
int calc_derivatives;
const CvPoint3D64f* M;
CvPoint2D64f* m;
double r[3], R[9], dRdr[27], t[3], a[9], k[8] = {0,0,0,0,0,0,0,0}, fx, fy, cx, cy;
CvMat _r, _t, _a = cvMat( 3, 3, CV_64F, a ), _k;
CvMat matR = cvMat( 3, 3, CV_64F, R ), _dRdr = cvMat( 3, 9, CV_64F, dRdr );
// some code not important ...
if( r_vec->rows == 3 && r_vec->cols == 3 )
{
_r = cvMat( 3, 1, CV_64FC1, r );
cvRodrigues2( r_vec, &_r );
cvRodrigues2( &_r, &matR, &_dRdr );
cvCopy( r_vec, &matR );
}
else
{
_r = cvMat( r_vec->rows, r_vec->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(r_vec->type)), r );
cvConvert( r_vec, &_r );
cvRodrigues2( &_r, &matR, &_dRdr );
}
Last part is important, because we use cv::Rodriguez to create an rotation matrix from rotation vector. And later in the function we also create translation matrix, but still no data manipulation. Going further in the ProjectPoints2:
fx = a[0]; fy = a[4];
cx = a[2]; cy = a[5];
if( fixedAspectRatio )
fx = fy*aspectRatio;
if( distCoeffs )
{
if( !CV_IS_MAT(distCoeffs) ||
(CV_MAT_DEPTH(distCoeffs->type) != CV_64F &&
CV_MAT_DEPTH(distCoeffs->type) != CV_32F) ||
(distCoeffs->rows != 1 && distCoeffs->cols != 1) ||
(distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) != 4 &&
distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) != 5 &&
distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) != 8) )
CV_Error( CV_StsBadArg, cvDistCoeffErr );
_k = cvMat( distCoeffs->rows, distCoeffs->cols,
CV_MAKETYPE(CV_64F,CV_MAT_CN(distCoeffs->type)), k );
cvConvert( distCoeffs, &_k );
}
Here we set focal lengths from camera matrix and principal point coords. Also we set array k which contains distortion coefs. Now we finished setting up variables. Let's go to the computations:
double X = M[i].x, Y = M[i].y, Z = M[i].z;
double x = R[0]*X + R[1]*Y + R[2]*Z + t[0];
double y = R[3]*X + R[4]*Y + R[5]*Z + t[1];
double z = R[6]*X + R[7]*Y + R[8]*Z + t[2];
double r2, r4, r6, a1, a2, a3, cdist, icdist2;
double xd, yd;
z = z ? 1./z : 1;
x *= z; y *= z;
r2 = x*x + y*y;
r4 = r2*r2;
r6 = r4*r2;
a1 = 2*x*y;
a2 = r2 + 2*x*x;
a3 = r2 + 2*y*y;
cdist = 1 + k[0]*r2 + k[1]*r4 + k[4]*r6;
icdist2 = 1./(1 + k[5]*r2 + k[6]*r4 + k[7]*r6);
xd = x*cdist*icdist2 + k[2]*a1 + k[3]*a2;
yd = y*cdist*icdist2 + k[2]*a3 + k[3]*a1;
m[i].x = xd*fx + cx; // here projection
m[i].y = yd*fy + cy;
And we have the function exactly as the one I presented on the top/
I use OpenCV to undestort set of points after camera calibration.
The code follows.
const int npoints = 2; // number of point specified
// Points initialization.
// Only 2 ponts in this example, in real code they are read from file.
float input_points[npoints][2] = {{0,0}, {2560, 1920}};
CvMat * src = cvCreateMat(1, npoints, CV_32FC2);
CvMat * dst = cvCreateMat(1, npoints, CV_32FC2);
// fill src matrix
float * src_ptr = (float*)src->data.ptr;
for (int pi = 0; pi < npoints; ++pi) {
for (int ci = 0; ci < 2; ++ci) {
*(src_ptr + pi * 2 + ci) = input_points[pi][ci];
}
}
cvUndistortPoints(src, dst, &camera1, &distCoeffs1);
After the code above dst contains following numbers:
-8.82689655e-001 -7.05507338e-001 4.16228324e-001 3.04863811e-001
which are too small in comparison with numbers in src.
At the same time if I undistort image via the call:
cvUndistort2( srcImage, dstImage, &camera1, &dist_coeffs1 );
I receive good undistorted image which means that pixel coordinates are not modified so drastically in comparison with separate points.
How to obtain the same undistortion for specific points as for images?
Thanks.
The points should be "unnormalized" using camera matrix.
More specifically, after call of cvUndistortPoints following transformation should be also added:
double fx = CV_MAT_ELEM(camera1, double, 0, 0);
double fy = CV_MAT_ELEM(camera1, double, 1, 1);
double cx = CV_MAT_ELEM(camera1, double, 0, 2);
double cy = CV_MAT_ELEM(camera1, double, 1, 2);
float * dst_ptr = (float*)dst->data.ptr;
for (int pi = 0; pi < npoints; ++pi) {
float& px = *(dst_ptr + pi * 2);
float& py = *(dst_ptr + pi * 2 + 1);
// perform transformation.
// In fact this is equivalent to multiplication to camera matrix
px = px * fx + cx;
py = py * fy + cy;
}
More info on camera matrix at OpenCV 'Camera Calibration and 3D Reconstruction'
UPDATE:
Following C++ function call should work as well:
std::vector<cv::Point2f> inputDistortedPoints = ...
std::vector<cv::Point2f> outputUndistortedPoints;
cv::Mat cameraMatrix = ...
cv::Mat distCoeffs = ...
cv::undistortPoints(inputDistortedPoints, outputUndistortedPoints, cameraMatrix, distCoeffs, cv::noArray(), cameraMatrix);
It may be your matrix size :)
OpenCV expects a vector of points - a column or a row matrix with two channels. But because your input matrix is only 2 pts, and the number of channels is also 1, it cannot figure out what's the input, row or colum.
So, fill a longer input mat with bogus values, and keep only the first:
const int npoints = 4; // number of point specified
// Points initialization.
// Only 2 ponts in this example, in real code they are read from file.
float input_points[npoints][4] = {{0,0}, {2560, 1920}}; // the rest will be set to 0
CvMat * src = cvCreateMat(1, npoints, CV_32FC2);
CvMat * dst = cvCreateMat(1, npoints, CV_32FC2);
// fill src matrix
float * src_ptr = (float*)src->data.ptr;
for (int pi = 0; pi < npoints; ++pi) {
for (int ci = 0; ci < 2; ++ci) {
*(src_ptr + pi * 2 + ci) = input_points[pi][ci];
}
}
cvUndistortPoints(src, dst, &camera1, &distCoeffs1);
EDIT
While OpenCV specifies undistortPoints accept only 2-channel input, actually, it accepts
1-column, 2-channel, multi-row mat or (and this case is not documented)
2 column, multi-row, 1-channel mat or
multi-column, 1 row, 2-channel mat
(as seen in undistort.cpp, line 390)
But a bug inside (or lack of available info), makes it wrongly mix the second one with the third one, when the number of columns is 2. So, your data is considered a 2-column, 2-row, 1-channel.
I also reach this problems, and I take some time to research an finally understand.
Formula
You see the formula above, in the open system, distort operation is before camera matrix, so the process order is:
image_distorted ->camera_matrix -> un-distort function->camera_matrix->back to image_undistorted.
So you need a small fix to and camera1 again.
Mat eye3 = Mat::eye(3, 3, CV_64F);
cvUndistortPoints(src, dst, &camera1, &distCoeffs1, &eye3,&camera1);
Otherwise, if the last two parameters is empty, It would be project to a Normalized image coordinate.
See codes: opencv-3.4.0-src\modules\imgproc\src\undistort.cpp :297
cvUndistortPointsInternal()
I have a piece of code for rotating and translating image:
Point2f pt(0, in.rows);
double angle = atan(trans.c / trans.b) * 180 / M_PI;
Mat r = getRotationMatrix2D(pt, -angle, 1.0);
warpAffine(in, out, r, in.size(), interpolation); /* rotation */
Mat t = (Mat_<double>(2, 3) << 1, 0, trans.a, 0, 1, -trans.d);
warpAffine(out, out, t, in.size(), interpolation); /* translation */
The problem is that I'm doing this in two times. So if I have an angle of 90degree for example, the first "out" variable will be empty because all data are out of bounds. Is there a way to do it in one pass ? In order to avoid loosing my data and having black image.
I think that the best thing would be to combine r and t in one matrix but I'm a little lost.
Best regards,
Here is an example on how to combine 2 homographies by simple multiplication and how to extract an affine transformation from a 3x3 homography.
int main(int argc, char* argv[])
{
cv::Mat input = cv::imread("C:/StackOverflow/Input/Lenna.png");
// create to 3x3 identity homography matrices
cv::Mat homography1 = cv::Mat::eye(3, 3, CV_64FC1);
cv::Mat homography2 = cv::Mat::eye(3, 3, CV_64FC1);
double alpha1 = -13; // degrees
double t1_x = -86; // pixel
double t1_y = -86; // pixel
double alpha2 = 21; // degrees
double t2_x = 86; // pixel
double t2_y = 86; // pixel
// hope there is no error in the signs:
// combine homography1
homography1.at<double>(0, 0) = cos(CV_PI*alpha1 / 180);
homography1.at<double>(0, 1) = -sin(CV_PI*alpha1 / 180);
homography1.at<double>(1, 0) = sin(CV_PI*alpha1 / 180);
homography1.at<double>(1, 1) = cos(CV_PI*alpha1 / 180);
homography1.at<double>(0, 2) = t1_x;
homography1.at<double>(1, 2) = t1_y;
// compose homography2
homography2.at<double>(0, 0) = cos(CV_PI*alpha2 / 180);
homography2.at<double>(0, 1) = -sin(CV_PI*alpha2 / 180);
homography2.at<double>(1, 0) = sin(CV_PI*alpha2 / 180);
homography2.at<double>(1, 1) = cos(CV_PI*alpha2 / 180);
homography2.at<double>(0, 2) = t2_x;
homography2.at<double>(1, 2) = t2_y;
cv::Mat affine1 = homography1(cv::Rect(0, 0, 3, 2));
cv::Mat affine2 = homography2(cv::Rect(0, 0, 3, 2));
cv::Mat dst1;
cv::Mat dst2;
cv::warpAffine(input, dst1, affine1, input.size());
cv::warpAffine(input, dst2, affine2, input.size());
cv::Mat combined_homog = homography1*homography2;
cv::Mat combined_affine = combined_homog(cv::Rect(0, 0, 3, 2));
cv::Mat dst_combined;
cv::warpAffine(input, dst_combined, combined_affine, input.size());
cv::imshow("input", input);
cv::imshow("dst1", dst1);
cv::imshow("dst2", dst2);
cv::imshow("combined", dst_combined);
cv::waitKey(0);
return 0;
}
In this example, an image is first rotated and translated to the left, later to the right. If the two transformations are performed after each other, significant image areas would get lost. Instead if they are combined by homograhy multiplication, it is like the full operation done in a single step without losing image parts in the intemediate step.
input:
if image was first transformed with H1, later with H2:
if the image is transformed with the combination of H1*H2 directly:
One typical application of this homography combination is to first translate the image center to the origin, then rotate, then translate back to original position. This has the effect as if the image was rotated around its center of gravity.