Image lens distortion correction - image-processing

I am using Aptina 5Mp sensor with Fish-eye lens for capturing an image.
I am using following algorithm to correct lens distortion.
http://www.tannerhelland.com/4743/simple-algorithm-correcting-lens-distortion/
this is not correcting the image properly.
Any help will be appreciated.
//code----
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <stdio.h>
#include <math.h>
using namespace cv;
using namespace std;
// globals
Mat src, dst;
Mat map_x, map_y;
#define REMAP_WINDOW "Remap Circle"
void make_circle_map(float , float , float , float );
int main(int argc, char** argv) {
// load image
src = imread(argv[1], 1);
float qvDepth = atof(argv[2]);
float fixStrength = atof(argv[3]);
float fixZoom = atof(argv[4]);
float lensRadius = atof(argv[5]);
// create destination and the maps
dst.create(src.size(), src.type());
map_x.create(src.size(), CV_32FC1);
map_y.create(src.size(), CV_32FC1);
// create window
// namedWindow(REMAP_WINDOW, CV_WINDOW_AUTOSIZE);
make_circle_map(qvDepth, fixStrength, fixZoom, lensRadius);
remap(src, dst, map_x, map_y, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0,0, 0));
//imshow(REMAP_WINDOW, dst);
imwrite("got1.jpg",dst);
// while(27 != waitKey()) {
// just wait
// }
// cvDestroyWindow(REMAP_WINDOW);
return 0;
}
void make_circle_map(float qvDepth, float fixStrength, float fixZoom, float lensRadius ) {
//ApplyLensCorrection(double fixStrength, double fixZoom, double lensRadius, long long edgeHandling, long long superSamplingAmount
cout<<"qvDepth :"<<qvDepth<<" fixStrength :"<<fixStrength<<" fixZoom :"<<fixZoom<<" lensRadius :"<<lensRadius<<endl;
//float qvDepth = 32;//24;
//float fixStrength = 4.5; // has to utilized further
//float fixZoom = 0.5;
//float lensRadius =2;
//Calculate the center of the image
//double midX = 0;
//double midY = 0;
long tWidth = 1944;
long tHeight = 2580;
// the center
double midX = (double)src.cols/2;
double midY = (double)src.rows/2;
//Rotation values
double theta = 0;
double sRadius = 0;
double sRadius2 = 0;
double sDistance = 0;
double radius = 0;
double j = 0;
double k = 0;
//X and Y values, remapped around a center point of (0, 0)
double nX = 0;
double nY = 0;
double QuickVal =0;
float ssX;
float ssY;
//Source X and Y values, which may or may not be used as part of a bilinear interpolation function
double srcX = 0;
double srcY = 0;
sRadius = sqrt(tWidth * tWidth + tHeight * tHeight) / 2;
cout<<"sRadius :"<<sRadius<<endl;
double refDistance = 0;//modified 0 to 2
if (fixStrength == 0)
{
fixStrength = 0.00000001;
}
refDistance = sRadius * 2 / fixStrength;
sRadius = sRadius * (lensRadius / 100);
sRadius2 = sRadius * sRadius;
cout<<"refDistance :"<<refDistance<<" sRadius :"<<sRadius<<" sRadius2 :"<<sRadius2<<endl;
float sampleIndex =1; //has to be changed in future
for (int x = 0; x <= tWidth; x++)
{
QuickVal = x * qvDepth;
for (int y = 0; y <= tHeight; y++)
{
//Remap the coordinates around a center point of (0, 0)
nX = x - midX;
nY = y - midY;
//Offset the pixel amount by the supersampling lookup table
for(int ii = 1; ii<4;ii++){
j = nX + ii;
k = nY + ii;
//Calculate distance automatically
sDistance = (j * j) + (k * k);
//cout<<"nx :"<<nX<<" ny :"<<nY<<" j :"<<j<<" k :"<<k<<" sDistance :"<<sDistance<<" sRadius2 :"<<sRadius2<<endl;
if (sDistance <= sRadius2)
{
sDistance = sqrt(sDistance);
radius = sDistance / refDistance;
if (radius == 0)
{
theta = 1;
}
else
{
theta = atan(radius) / radius;
}
//srcX = midX + theta * j * fixZoom;
//srcY = midY + theta * k * fixZoom;
map_x.at<float>(x,y) = midX + cos(fabs(theta)) * j * fixZoom;
map_y.at<float>(x,y) = midY + sin(fabs(theta)) * k * fixZoom;
}
else
{
map_x.at<float>(x,y) = x + cos(fabs(theta)) ;//* fixZoom;//x;
map_y.at<float>(x,y) = y + sin(fabs(theta)) ;//* fixZoom;//y;
}
}
}
}
}
Image

replace the following line.
map_x.at<float>(x,y) = midX + theta * j * fixZoom;
map_y.at<float>(x,y) = midY + theta * k * fixZoom;
}
else
{
map_x.at<float>(x,y) = x ;//* fixZoom;//x;
map_y.at<float>(x,y) = y ;//* fixZoom;//y;
use argument executable [image name], BBP, correction parameter, zoom parameter, applied ratio.
ex-> ./lensdistortcorrect image.jpg 24 6.2 2.2 100

Related

I am facing difficulties in removing black borders from stablized images recieved after warping in OpenCV

Right now i am using this code to remove the black borders but i am still left with some black part in most of the cases after using this.
Am i doing something wrong in this ?
void cropImage(const vector<cv::Mat>& input, vector<cv::Mat>& output){
// CHECK(!input.empty());
const int imgWidth = input[0].cols;
const int imgHeight = input[0].rows;
Mat cropMask(imgHeight, imgWidth, CV_32F, Scalar::all(0));
for(auto y=0; y<imgHeight; ++y){
for(auto x=0; x<imgWidth; ++x){
bool has_black = false;
for(auto v=0; v<input.size(); ++v){
if(input[v].at<Vec3b>(y,x) == Vec3b(0,0,0)){
has_black = true;
break;
}
}
if(has_black)
cropMask.at<float>(y,x) = -1000;
else
cropMask.at<float>(y,x) = 1;
}
}
Mat integralImage;
cv::integral(cropMask, integralImage, CV_32F);
Vector4i roi;
//int x11=0,x22=0,y11=0,y22=0;
float optValue = -1000 * imgWidth * imgHeight;
const int stride = 20;
for(auto x1=0; x1<imgWidth; x1+=stride) {
for (auto y1 = 0; y1 < imgHeight; y1+=stride) {
for (auto x2 = x1 + stride; x2 < imgWidth; x2+=stride) {
for (auto y2 = y1 + stride; y2 < imgHeight; y2+=stride) {
float curValue = integralImage.at<float>(y2, x2) +
integralImage.at<float>(y1, x1)
- integralImage.at<float>(y2, x1) -
integralImage.at<float>(y1, x2);
if(curValue > optValue){
optValue = curValue;
roi = Vector4i(x1,y1,x2,y2);
}
}
}
}
}
output.resize(input.size());
for(auto i=0; i<output.size(); ++i){
output[i] = input[i].colRange(roi[0],roi[2]).rowRange(roi[1],
roi[3]).clone();
cv::resize(output[i], output[i], cv::Size(imgWidth, imgHeight));
}
}
Also this code seems to be slow. Is there any fast method to achieve the same ? Thanks in advance.

How to convert cv::Mat to pcl::pointcloud

How to get from a opencv Mat pointcloud to a pcl::pointcloud? The color is not important for me only the points itself.
you can do this like:
pcl::PointCloud<pcl::PointXYZ>::Ptr SimpleOpenNIViewer::MatToPoinXYZ(cv::Mat OpencVPointCloud)
{
/*
* Function: Get from a Mat to pcl pointcloud datatype
* In: cv::Mat
* Out: pcl::PointCloud
*/
//char pr=100, pg=100, pb=100;
pcl::PointCloud<pcl::PointXYZ>::Ptr point_cloud_ptr(new pcl::PointCloud<pcl::PointXYZ>);//(new pcl::pointcloud<pcl::pointXYZ>);
for(int i=0;i<OpencVPointCloud.cols;i++)
{
//std::cout<<i<<endl;
pcl::PointXYZ point;
point.x = OpencVPointCloud.at<float>(0,i);
point.y = OpencVPointCloud.at<float>(1,i);
point.z = OpencVPointCloud.at<float>(2,i);
// when color needs to be added:
//uint32_t rgb = (static_cast<uint32_t>(pr) << 16 | static_cast<uint32_t>(pg) << 8 | static_cast<uint32_t>(pb));
//point.rgb = *reinterpret_cast<float*>(&rgb);
point_cloud_ptr -> points.push_back(point);
}
point_cloud_ptr->width = (int)point_cloud_ptr->points.size();
point_cloud_ptr->height = 1;
return point_cloud_ptr;
}
And also the otherway
cv::Mat MVW_ICP::PoinXYZToMat(pcl::PointCloud<pcl::PointXYZ>::Ptr point_cloud_ptr){
cv::Mat OpenCVPointCloud(3, point_cloud_ptr->points.size(), CV_64FC1);
for(int i=0; i < point_cloud_ptr->points.size();i++){
OpenCVPointCloud.at<double>(0,i) = point_cloud_ptr->points.at(i).x;
OpenCVPointCloud.at<double>(1,i) = point_cloud_ptr->points.at(i).y;
OpenCVPointCloud.at<double>(2,i) = point_cloud_ptr->points.at(i).z;
}
return OpenCVPointCloud;
}
To convert from a range image captured by a Kinect sensor and represented by depthMat to a pcl::PointCloud you can try this function. The calibration parameters are those used here.
{
pcl::PointCloud<pcl::PointXYZ>::Ptr MatToPoinXYZ(cv::Mat depthMat)
{
pcl::PointCloud<pcl::PointXYZ>::Ptr ptCloud (new pcl::PointCloud<pcl::PointXYZ>);
// calibration parameters
float const fx_d = 5.9421434211923247e+02;
float const fy_d = 5.9104053696870778e+02;
float const cx_d = 3.3930780975300314e+02;
float const cy_d = 2.4273913761751615e+02;
unsigned char* p = depthMat.data;
for (int i = 0; i<depthMat.rows; i++)
{
for (int j = 0; j < depthMat.cols; j++)
{
float z = static_cast<float>(*p);
pcl::PointXYZ point;
point.z = 0.001 * z;
point.x = point.z*(j - cx_d) / fx_d;
point.y = point.z *(cy_d - i) / fy_d;
ptCloud->points.push_back(point);
++p;
}
}
ptCloud->width = (int)depthMat.cols;
ptCloud->height = (int)depthMat.rows;
return ptCloud;
}
}

Calcluate minimum incircle for an polygon

I worked on iOS and have an polygon with geographical coordinates, like (-27.589846, 151.982112)(-27.590174, 151.983045)(-27.590773, 151.982680)(-27.590602, 151.981908).
I want to find out its excircle incircle: center and radius?
Is there any way to do that?
Thanks?
you can use this to determine the center of a non self intersecting polygon:
#include <iostream>
struct Point2D
{
double x;
double y;
};
Point2D compute2DPolygonCentroid(const Point2D* vertices, int vertexCount)
{
Point2D centroid = {0, 0};
double signedArea = 0.0;
double x0 = 0.0; // Current vertex X
double y0 = 0.0; // Current vertex Y
double x1 = 0.0; // Next vertex X
double y1 = 0.0; // Next vertex Y
double a = 0.0; // Partial signed area
// For all vertices except last
int i=0;
for (i=0; i<vertexCount-1; ++i)
{
x0 = vertices[i].x;
y0 = vertices[i].y;
x1 = vertices[i+1].x;
y1 = vertices[i+1].y;
a = x0*y1 - x1*y0;
signedArea += a;
centroid.x += (x0 + x1)*a;
centroid.y += (y0 + y1)*a;
}
// Do last vertex
x0 = vertices[i].x;
y0 = vertices[i].y;
x1 = vertices[0].x;
y1 = vertices[0].y;
a = x0*y1 - x1*y0;
signedArea += a;
centroid.x += (x0 + x1)*a;
centroid.y += (y0 + y1)*a;
signedArea *= 0.5;
centroid.x /= (6.0*signedArea);
centroid.y /= (6.0*signedArea);
return centroid;
}
int main()
{
Point2D polygon[] = {{0.0,0.0}, {0.0,10.0}, {10.0,10.0}, {10.0,0.0}};
size_t vertexCount = sizeof(polygon) / sizeof(polygon[0]);
Point2D centroid = compute2DPolygonCentroid(polygon, vertexCount);
std::cout << "Centroid is (" << centroid.x << ", " << centroid.y << ")\n";
}
To get the radius then determine the distance between the center each vertex and pick the largest one !

What's the best way to fit a set of points in an image one or more good lines using RANSAC using OpenCV?

What's the best way to fit a set of points in an image one or more good lines using RANSAC using OpenCV?
Is RANSAC is the most efficient way to fit a line?
RANSAC is not the most efficient but it is better for a large number of outliers. Here is how to do it using opencv:
A useful structure-
struct SLine
{
SLine():
numOfValidPoints(0),
params(-1.f, -1.f, -1.f, -1.f)
{}
cv::Vec4f params;//(cos(t), sin(t), X0, Y0)
int numOfValidPoints;
};
Total Least squares used to make a fit for a successful pair
cv::Vec4f TotalLeastSquares(
std::vector<cv::Point>& nzPoints,
std::vector<int> ptOnLine)
{
//if there are enough inliers calculate model
float x = 0, y = 0, x2 = 0, y2 = 0, xy = 0, w = 0;
float dx2, dy2, dxy;
float t;
for( size_t i = 0; i < nzPoints.size(); ++i )
{
x += ptOnLine[i] * nzPoints[i].x;
y += ptOnLine[i] * nzPoints[i].y;
x2 += ptOnLine[i] * nzPoints[i].x * nzPoints[i].x;
y2 += ptOnLine[i] * nzPoints[i].y * nzPoints[i].y;
xy += ptOnLine[i] * nzPoints[i].x * nzPoints[i].y;
w += ptOnLine[i];
}
x /= w;
y /= w;
x2 /= w;
y2 /= w;
xy /= w;
//Covariance matrix
dx2 = x2 - x * x;
dy2 = y2 - y * y;
dxy = xy - x * y;
t = (float) atan2( 2 * dxy, dx2 - dy2 ) / 2;
cv::Vec4f line;
line[0] = (float) cos( t );
line[1] = (float) sin( t );
line[2] = (float) x;
line[3] = (float) y;
return line;
}
The actual RANSAC
SLine LineFitRANSAC(
float t,//distance from main line
float p,//chance of hitting a valid pair
float e,//percentage of outliers
int T,//number of expected minimum inliers
std::vector<cv::Point>& nzPoints)
{
int s = 2;//number of points required by the model
int N = (int)ceilf(log(1-p)/log(1 - pow(1-e, s)));//number of independent trials
std::vector<SLine> lineCandidates;
std::vector<int> ptOnLine(nzPoints.size());//is inlier
RNG rng((uint64)-1);
SLine line;
for (int i = 0; i < N; i++)
{
//pick two points
int idx1 = (int)rng.uniform(0, (int)nzPoints.size());
int idx2 = (int)rng.uniform(0, (int)nzPoints.size());
cv::Point p1 = nzPoints[idx1];
cv::Point p2 = nzPoints[idx2];
//points too close - discard
if (cv::norm(p1- p2) < t)
{
continue;
}
//line equation -> (y1 - y2)X + (x2 - x1)Y + x1y2 - x2y1 = 0
float a = static_cast<float>(p1.y - p2.y);
float b = static_cast<float>(p2.x - p1.x);
float c = static_cast<float>(p1.x*p2.y - p2.x*p1.y);
//normalize them
float scale = 1.f/sqrt(a*a + b*b);
a *= scale;
b *= scale;
c *= scale;
//count inliers
int numOfInliers = 0;
for (size_t i = 0; i < nzPoints.size(); ++i)
{
cv::Point& p0 = nzPoints[i];
float rho = abs(a*p0.x + b*p0.y + c);
bool isInlier = rho < t;
if ( isInlier ) numOfInliers++;
ptOnLine[i] = isInlier;
}
if ( numOfInliers < T)
{
continue;
}
line.params = TotalLeastSquares( nzPoints, ptOnLine);
line.numOfValidPoints = numOfInliers;
lineCandidates.push_back(line);
}
int bestLineIdx = 0;
int bestLineScore = 0;
for (size_t i = 0; i < lineCandidates.size(); i++)
{
if (lineCandidates[i].numOfValidPoints > bestLineScore)
{
bestLineIdx = i;
bestLineScore = lineCandidates[i].numOfValidPoints;
}
}
if ( lineCandidates.empty() )
{
return SLine();
}
else
{
return lineCandidates[bestLineIdx];
}
}
Take a look at Least Mean Square metod. It's faster and simplier than RANSAC.
Also take look at OpenCV's fitLine method.
RANSAC performs better when you have a lot of outliers in your data, or a complex hypothesis.

Angle and Scale Invariant template matching using OpenCV

Function rotates the template image from 0 to 180 (or upto 360) degrees to search all related matches(in all angles) in source image even with different scale.
The function had been written in OpenCV C interface. When I tried to port it to openCV C++ interface , I am getting lot of errors. Some one please help me to port it to OpenCV C++ interface.
void TemplateMatch()
{
int i, j, x, y, key;
double minVal;
char windowNameSource[] = "Original Image";
char windowNameDestination[] = "Result Image";
char windowNameCoefficientOfCorrelation[] = "Coefficient of Correlation Image";
CvPoint minLoc;
CvPoint tempLoc;
IplImage *sourceImage = cvLoadImage("template_source.jpg", CV_LOAD_IMAGE_ANYDEPTH | CV_LOAD_IMAGE_ANYCOLOR);
IplImage *templateImage = cvLoadImage("template.jpg", CV_LOAD_IMAGE_ANYDEPTH | CV_LOAD_IMAGE_ANYCOLOR);
IplImage *graySourceImage = cvCreateImage(cvGetSize(sourceImage), IPL_DEPTH_8U, 1);
IplImage *grayTemplateImage =cvCreateImage(cvGetSize(templateImage),IPL_DEPTH_8U,1);
IplImage *binarySourceImage = cvCreateImage(cvGetSize(sourceImage), IPL_DEPTH_8U, 1);
IplImage *binaryTemplateImage = cvCreateImage(cvGetSize(templateImage), IPL_DEPTH_8U, 1);
IplImage *destinationImage = cvCreateImage(cvGetSize(sourceImage), IPL_DEPTH_8U, 3);
cvCopy(sourceImage, destinationImage);
cvCvtColor(sourceImage, graySourceImage, CV_RGB2GRAY);
cvCvtColor(templateImage, grayTemplateImage, CV_RGB2GRAY);
cvThreshold(graySourceImage, binarySourceImage, 200, 255, CV_THRESH_OTSU );
cvThreshold(grayTemplateImage, binaryTemplateImage, 200, 255, CV_THRESH_OTSU);
int templateHeight = templateImage->height;
int templateWidth = templateImage->width;
float templateScale = 0.5f;
for(i = 2; i <= 3; i++)
{
int tempTemplateHeight = (int)(templateWidth * (i * templateScale));
int tempTemplateWidth = (int)(templateHeight * (i * templateScale));
IplImage *tempBinaryTemplateImage = cvCreateImage(cvSize(tempTemplateWidth, tempTemplateHeight), IPL_DEPTH_8U, 1);
// W - w + 1, H - h + 1
IplImage *result = cvCreateImage(cvSize(sourceImage->width - tempTemplateWidth + 1, sourceImage->height - tempTemplateHeight + 1), IPL_DEPTH_32F, 1);
cvResize(binaryTemplateImage, tempBinaryTemplateImage, CV_INTER_LINEAR);
float degree = 20.0f;
for(j = 0; j <= 9; j++)
{
IplImage *rotateBinaryTemplateImage = cvCreateImage(cvSize(tempBinaryTemplateImage- >width, tempBinaryTemplateImage->height), IPL_DEPTH_8U, 1);
//cvShowImage(windowNameSource, tempBinaryTemplateImage);
//cvWaitKey(0);
for(y = 0; y < tempTemplateHeight; y++)
{
for(x = 0; x < tempTemplateWidth; x++)
{
rotateBinaryTemplateImage->imageData[y * tempTemplateWidth + x] = 255;
}
}
for(y = 0; y < tempTemplateHeight; y++)
{
for(x = 0; x < tempTemplateWidth; x++)
{
float radian = (float)j * degree * CV_PI / 180.0f;
int scale = y * tempTemplateWidth + x;
int rotateY = - sin(radian) * ((float)x - (float)tempTemplateWidth / 2.0f) + cos(radian) * ((float)y - (float)tempTemplateHeight / 2.0f) + tempTemplateHeight / 2;
int rotateX = cos(radian) * ((float)x - (float)tempTemplateWidth / 2.0f) + sin(radian) * ((float)y - (float)tempTemplateHeight / 2.0f) + tempTemplateWidth / 2;
if(rotateY < tempTemplateHeight && rotateX < tempTemplateWidth && rotateY >= 0 && rotateX >= 0)
rotateBinaryTemplateImage->imageData[scale] = tempBinaryTemplateImage->imageData[rotateY * tempTemplateWidth + rotateX];
}
}
//cvShowImage(windowNameSource, rotateBinaryTemplateImage);
//cvWaitKey(0);
cvMatchTemplate(binarySourceImage, rotateBinaryTemplateImage, result, CV_TM_SQDIFF_NORMED);
//cvMatchTemplate(binarySourceImage, rotateBinaryTemplateImage, result, CV_TM_SQDIFF);
cvMinMaxLoc(result, &minVal, NULL, &minLoc, NULL, NULL);
printf(": %f%%\n", (int)(i * 0.5 * 100), j * 20, (1 - minVal) * 100);
if(minVal < 0.065) // 1 - 0.065 = 0.935 : 93.5%
{
tempLoc.x = minLoc.x + tempTemplateWidth;
tempLoc.y = minLoc.y + tempTemplateHeight;
cvRectangle(destinationImage, minLoc, tempLoc, CV_RGB(0, 255, 0), 1, 8, 0);
}
}
//cvShowImage(windowNameSource, result);
//cvWaitKey(0);
cvReleaseImage(&tempBinaryTemplateImage);
cvReleaseImage(&result);
}
// cvShowImage(windowNameSource, sourceImage);
// cvShowImage(windowNameCoefficientOfCorrelation, result);
cvShowImage(windowNameDestination, destinationImage);
key = cvWaitKey(0);
cvReleaseImage(&sourceImage);
cvReleaseImage(&templateImage);
cvReleaseImage(&graySourceImage);
cvReleaseImage(&grayTemplateImage);
cvReleaseImage(&binarySourceImage);
cvReleaseImage(&binaryTemplateImage);
cvReleaseImage(&destinationImage);
cvDestroyWindow(windowNameSource);
cvDestroyWindow(windowNameDestination);
cvDestroyWindow(windowNameCoefficientOfCorrelation);
}
RESULT :
Template Image:
Result image:
The function above puts rectangles around the perfect matches (angle and scale invariant) in this image .....
Now, I have been trying to port the code into C++ interface. If anyone needs more details please let me know.
C++ Port of above code:
Mat TemplateMatch(Mat sourceImage, Mat templateImage){
double minVal;
Point minLoc;
Point tempLoc;
Mat graySourceImage = Mat(sourceImage.size(),CV_8UC1);
Mat grayTemplateImage = Mat(templateImage.size(),CV_8UC1);
Mat binarySourceImage = Mat(sourceImage.size(),CV_8UC1);
Mat binaryTemplateImage = Mat(templateImage.size(),CV_8UC1);
Mat destinationImage = Mat(sourceImage.size(),CV_8UC3);
sourceImage.copyTo(destinationImage);
cvtColor(sourceImage, graySourceImage, CV_BGR2GRAY);
cvtColor(templateImage, grayTemplateImage, CV_BGR2GRAY);
threshold(graySourceImage, binarySourceImage, 200, 255, CV_THRESH_OTSU );
threshold(grayTemplateImage, binaryTemplateImage, 200, 255, CV_THRESH_OTSU);
int templateHeight = templateImage.rows;
int templateWidth = templateImage.cols;
float templateScale = 0.5f;
for(int i = 2; i <= 3; i++){
int tempTemplateHeight = (int)(templateWidth * (i * templateScale));
int tempTemplateWidth = (int)(templateHeight * (i * templateScale));
Mat tempBinaryTemplateImage = Mat(Size(tempTemplateWidth,tempTemplateHeight),CV_8UC1);
Mat result = Mat(Size(sourceImage.cols - tempTemplateWidth + 1,sourceImage.rows - tempTemplateHeight + 1),CV_32FC1);
resize(binaryTemplateImage,tempBinaryTemplateImage,Size(tempBinaryTemplateImage.cols,tempBinaryTemplateImage.rows),0,0,INTER_LINEAR);
float degree = 20.0f;
for(int j = 0; j <= 9; j++){
Mat rotateBinaryTemplateImage = Mat(Size(tempBinaryTemplateImage.cols, tempBinaryTemplateImage.rows), CV_8UC1);
for(int y = 0; y < tempTemplateHeight; y++){
for(int x = 0; x < tempTemplateWidth; x++){
rotateBinaryTemplateImage.data[y * tempTemplateWidth + x] = 255;
}
}
for(int y = 0; y < tempTemplateHeight; y++){
for(int x = 0; x < tempTemplateWidth; x++){
float radian = (float)j * degree * CV_PI / 180.0f;
int scale = y * tempTemplateWidth + x;
int rotateY = - sin(radian) * ((float)x - (float)tempTemplateWidth / 2.0f) + cos(radian) * ((float)y - (float)tempTemplateHeight / 2.0f) + tempTemplateHeight / 2;
int rotateX = cos(radian) * ((float)x - (float)tempTemplateWidth / 2.0f) + sin(radian) * ((float)y - (float)tempTemplateHeight / 2.0f) + tempTemplateWidth / 2;
if(rotateY < tempTemplateHeight && rotateX < tempTemplateWidth && rotateY >= 0 && rotateX >= 0)
rotateBinaryTemplateImage.data[scale] = tempBinaryTemplateImage.data[rotateY * tempTemplateWidth + rotateX];
}
}
matchTemplate(binarySourceImage, rotateBinaryTemplateImage, result, CV_TM_SQDIFF_NORMED);
minMaxLoc(result, &minVal, 0, &minLoc, 0, Mat());
cout<<(int)(i * 0.5 * 100)<<" , "<< j * 20<<" , "<< (1 - minVal) * 100<<endl;
if(minVal < 0.065){ // 1 - 0.065 = 0.935 : 93.5%
tempLoc.x = minLoc.x + tempTemplateWidth;
tempLoc.y = minLoc.y + tempTemplateHeight;
rectangle(destinationImage, minLoc, tempLoc, CV_RGB(0, 255, 0), 1, 8, 0);
}
}
}
return destinationImage;
}

Resources