create a customized sequence of CvPoint in OpenCv - opencv

I want to use cvDrawContours to draw my own contours created from CvSeq (normally, contours are retured from other functions of OpenCV). This is my solution but it does not work :(
IplImage* g_gray = NULL;
CvMemStorage *memStorage = cvCreateMemStorage(0);
CvSeq* seq = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvPoint)*4, memStorage);
CvPoint points[4];
points[0].x = 10;
points[0].y = 10;
points[1].x = 1;
points[1].y = 1;
points[2].x = 20;
points[2].y = 50;
points[3].x = 10;
points[3].y = 10;
cvSeqPush(seq, &points);
g_gray = cvCreateImage( cvSize(300,300), 8, 1 );
cvNamedWindow( "MyContour", CV_WINDOW_AUTOSIZE );
cvDrawContours(
g_gray,
seq,
cvScalarAll(100),
cvScalarAll(255),
0,
3);
cvShowImage( "MyContour", g_gray );
cvWaitKey(0);
cvReleaseImage( &g_gray );
cvDestroyWindow("MyContour");
return 0;
I picked the method to create a customized contour sequence from CvPoint from this post
OpenCV sequences -- how to create a sequence of point pairs?
FOr the second try, I did it with Cpp OpenCV:
vector<vector<Point2i>> contours;
Point2i P;
P.x = 0;
P.y = 0;
contours.push_back(P);
P.x = 50;
P.y = 10;
contours.push_back(P);
P.x = 20;
P.y = 100;
contours.push_back(P);
Mat img = imread(file, 1);
drawContours(img, contours, -1, CV_RGB(0,0,255), 5, 8);
Perhaps I used the data incorrectly. The compiler alerts errors & does not allow push_back points to vectors like that. Why??
The error is like this:
Error 2 error C2664: 'std::vector<_Ty>::push_back' : cannot convert parameter 1 from 'cv::Point2i' to 'const std::vector<_Ty> &'

I've finally finished it.
Mat g_gray_cpp = imread(file, 0);
vector<vector<Point2i>> contours;
vector<Point2i> pvect;
Point2i P(0,0);
pvect.push_back(P);
P.x = 50;
P.y = 10;
pvect.push_back(P);
P.x = 20;
P.y = 100;
pvect.push_back(P);
contours.push_back(pvect);
Mat img = imread(file, 1);
drawContours(img, contours, -1, CV_RGB(0,0,255), 5, 8);
namedWindow( "Contours", 0 );
imshow( "Contours", img );
because 'contours' is vector>, contours.push_back(var) -> var should be a vector
Thanks! I've learnt a bug

In your first example you created a sequence of point foursomes with single element. Sequence elem_size should be sizeof(CvPoint) (not multiplied by four) and add points one by one:
CvMemStorage *memStorage = cvCreateMemStorage(0);
// without these flags the drawContours() method does not consider the sequence
// as contour and just draws nothing
CvSeq* seq = cvCreateSeq(CV_32SC2 | CV_SEQ_KIND_CURVE,
sizeof(CvSeq), sizeof(CvPoint), memStorage);
cvSeqPush(cvPoint(10, 10));
cvSeqPush(cvPoint(1, 1));
cvSeqPush(cvPoint(20, 50));
Note you don't need to insert the last point to draw the contour, contour is automatically closed.

Related

Extract Contours path attributes

I am currently working on extracting Contours path attributes from a particular image file. I am able to extract Contours using Open CV function findContours() the output look like this
[98, 81][97, 80][95, 80][94, 79][93, 79][92, 78][91, 78][88, 75][87, 75][85, 73][84, 73][83, 72][82, 72]
But my desired output is look like this
M 398.7,106.8 c -5.5,-2.7 -20.7,-4.7 -36.1,-4.6 -15.4,0.1
How can I get it
This is my code:
using namespace cv;
using namespace std;
Mat src_grays;
int threshs = 100;
int max_threshs = 255;
RNG rng(12345);
void thresh_callbacks(int, void* );
void main( )
{
Mat src = imread( "F:/academic/pro4/t/download.jpg" );
imshow("real Image", src);
Mat gray,edge,edges, draw,draws;
Mat samples(src.rows * src.cols, 3, CV_32F);
for( int y = 0; y < src.rows; y++ )
for( int x = 0; x < src.cols; x++ )
for( int z = 0; z < 3; z++)
samples.at<float>(y + x*src.rows, z) = src.at<Vec3b>(y,x)[z];
int clusterCount = 5;
Mat labels;
int attempts = 10;
Mat centers;
kmeans(samples, clusterCount, labels, TermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 10000, 0.0001), attempts, KMEANS_PP_CENTERS, centers );
Mat new_image( src.size(), src.type() );
for( int y = 0; y < src.rows; y++ )
for( int x = 0; x < src.cols; x++ )
{
int cluster_idx = labels.at<int>(y + x*src.rows,0);
new_image.at<Vec3b>(y,x)[0] = centers.at<float>(cluster_idx, 0);
new_image.at<Vec3b>(y,x)[1] = centers.at<float>(cluster_idx, 1);
new_image.at<Vec3b>(y,x)[2] = centers.at<float>(cluster_idx, 2);
}
imshow( "clustered image", new_image );
char filename[80];
sprintf(filename,"F:/academic/pro4/t/seg.png");
imwrite(filename, new_image);
cvtColor(src, gray, CV_BGR2GRAY);
Canny( new_image, edges, 50, 150, 3);
edges.convertTo(draws, CV_8U);
namedWindow("imageAfterSegmnetation", CV_WINDOW_AUTOSIZE);
imshow("imagesAfterCluster", draws);
cvtColor( new_image, src_grays, CV_BGR2GRAY );
blur( src_grays, src_grays, Size(3,3) );
char* source_window = "Source";
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
imshow( source_window, src );
createTrackbar( " Canny thresh:", "Source", &threshs, max_threshs, thresh_callbacks );
thresh_callbacks( 0, 0 );
waitKey( 0 );
}
void thresh_callbacks(int, void* )
{
Mat canny_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
/// Detect edges using canny
Canny( src_grays, canny_output, threshs, threshs*2, 3 );
/// Find contours
findContours( canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
for(int i= 0; i < contours.size(); i++)
{
for(int j= 0; j < contours[i].size();j++) // run until j < contours[i].size();
{
int a= contours[i][j].x ;
int b =contours[i][j].y ;
// printf("Point(x,y)=" + a, b);
std::cout << contours[i][j] << std::endl;
}
printf ("%i", i + "\n");
}
/// Draw contours
int a=contours.size();
for( int i = 0; i<contours.size(); i++ )
{
Mat drawing_i = Mat::zeros( canny_output.size(), CV_8UC3 );
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing_i, contours, i, color, 2, 8, hierarchy, 0, Point() );
namedWindow( "Contours_i", CV_WINDOW_AUTOSIZE );
imshow( "Contours_i", drawing_i );
}
}
Note:
I need Contours path, that mean how to contours connected for example it can be M = moveto L = lineto H = horizontal lineto V = vertical lineto C = curveto S = smooth curveto Q = quadratic Bézier curve T = smooth quadratic Bézier curveto A = elliptical Arc Z = closepath just like SVG path

Approximate photo of a simple drawing using lines

As an input I have a photo of a simple symbol, e.g.: https://www.dropbox.com/s/nrmsvfd0le0bkke/symbol.jpg
I would like to detect the straight lines in it, like points of start and ends of the lines. In this case, assuming the top left of the symbol is (0,0), the lines would be defined like this:
start end (coordinates of beginning and end of a line)
1. (0,0); (0,10) (vertical line)
2. (0,10); (15, 15)
3. (15,15); (0, 20)
4. (0,20); (0,30)
How can I do it (pereferably using OpenCV)? I though about Hough lines, but they seem to be good for perfect thin straight lines, which is not the case in a drawing. I'll probably work on binarized image, too.
Give a try on this,
Apply thinning algorithm on threshold image.
Find contours.
approxPolyDP for the found contour.
See some reference:
approxpolydp-for-edge-maps
Creating Bounding boxes and circles for contours
maybe you can work on this one.
assume a perfect binarization:
run HoughLinesP
(not implemented) try to group those detected lines
I used this code:
int main()
{
cv::Mat image = cv::imread("HoughLinesP_perfect.png");
cv::Mat gray;
cv::cvtColor(image,gray,CV_BGR2GRAY);
cv::Mat output; image.copyTo(output);
cv::Mat g_thres = gray == 0;
std::vector<cv::Vec4i> lines;
//cv::HoughLinesP( binary, lines, 1, 2*CV_PI/180, 100, 100, 50 );
// cv::HoughLinesP( h_thres, lines, 1, CV_PI/180, 100, image.cols/2, 10 );
cv::HoughLinesP( g_thres, lines, 1, CV_PI/(4*180.0), 50, image.cols/20, 10 );
for( size_t i = 0; i < lines.size(); i++ )
{
cv::line( output, cv::Point(lines[i][0], lines[i][3]),
cv::Point(lines[i][4], lines[i][3]), cv::Scalar(155,255,155), 1, 8 );
}
cv::imshow("g thres", g_thres);
cv::imwrite("HoughLinesP_out.png", output);
cv::resize(output, output, cv::Size(), 0.5,0.5);
cv::namedWindow("output"); cv::imshow("output", output);
cv::waitKey(-1);
std::cout << "finished" << std::endl;
return 0;
}
EDIT:
updated code with simple line clustering (`minimum_distance function taken from SO):
giving this result:
float minimum_distance(cv::Point2f v, cv::Point2f w, cv::Point2f p) {
// Return minimum distance between line segment vw and point p
const float l2 = cv::norm(w-v) * cv::norm(w-v); // i.e. |w-v|^2 - avoid a sqrt
if (l2 == 0.0) return cv::norm(p-v); // v == w case
// Consider the line extending the segment, parameterized as v + t (w - v).
// We find projection of point p onto the line.
// It falls where t = [(p-v) . (w-v)] / |w-v|^2
//const float t = dot(p - v, w - v) / l2;
float t = ((p-v).x * (w-v).x + (p-v).y * (w-v).y)/l2;
if (t < 0.0) return cv::norm(p-v); // Beyond the 'v' end of the segment
else if (t > 1.0) return cv::norm(p-w); // Beyond the 'w' end of the segment
const cv::Point2f projection = v + t * (w - v); // Projection falls on the segment
return cv::norm(p - projection);
}
int main()
{
cv::Mat image = cv::imread("HoughLinesP_perfect.png");
cv::Mat gray;
cv::cvtColor(image,gray,CV_BGR2GRAY);
cv::Mat output; image.copyTo(output);
cv::Mat g_thres = gray == 0;
std::vector<cv::Vec4i> lines;
cv::HoughLinesP( g_thres, lines, 1, CV_PI/(4*180.0), 50, image.cols/20, 10 );
float minDist = 100;
std::vector<cv::Vec4i> lines_filtered;
for( size_t i = 0; i < lines.size(); i++ )
{
bool keep = true;
int overwrite = -1;
cv::Point2f a(lines[i][0], lines[i][6]);
cv::Point2f b(lines[i][7], lines[i][3]);
float lengthAB = cv::norm(a-b);
for( size_t j = 0; j < lines_filtered.size(); j++ )
{
cv::Point2f c(lines_filtered[j][0], lines_filtered[j][8]);
cv::Point2f d(lines_filtered[j][9], lines_filtered[j][3]);
float distCDA = minimum_distance(c,d,a);
float distCDB = minimum_distance(c,d,b);
float lengthCD = cv::norm(c-d);
if((distCDA < minDist) && (distCDB < minDist))
{
if(lengthCD >= lengthAB)
{
keep = false;
}
else
{
overwrite = j;
}
}
}
if(keep)
{
if(overwrite >= 0)
{
lines_filtered[overwrite] = lines[i];
}
else
{
lines_filtered.push_back(lines[i]);
}
}
}
for( size_t i = 0; i < lines_filtered.size(); i++ )
{
cv::line( output, cv::Point(lines_filtered[i][0], lines_filtered[i][10]),
cv::Point(lines_filtered[i][11], lines_filtered[i][3]), cv::Scalar(155,255,155), 2, 8 );
}
cv::imshow("g thres", g_thres);
cv::imwrite("HoughLinesP_out.png", output);
cv::resize(output, output, cv::Size(), 0.5,0.5);
cv::namedWindow("output"); cv::imshow("output", output);
cv::waitKey(-1);
std::cout << "finished" << std::endl;
return 0;
}
You should try the Hough Line Transform. And here is an example from this website
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
int main()
{
Mat src = imread("building.jpg", 0);
Mat dst, cdst;
Canny(src, dst, 50, 200, 3);
cvtColor(dst, cdst, CV_GRAY2BGR);
vector<Vec2f> lines;
// detect lines
HoughLines(dst, lines, 1, CV_PI/180, 150, 0, 0 );
// draw lines
for( size_t i = 0; i < lines.size(); i++ )
{
float rho = lines[i][0], theta = lines[i][1];
Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));
line( cdst, pt1, pt2, Scalar(0,0,255), 3, CV_AA);
}
imshow("source", src);
imshow("detected lines", cdst);
waitKey();
return 0;
}
With this you should be able to tweak and get the proprieties you are looking for (vertices).

Sizes of input arguments do not match in cvcalcopticalflowbm opencv 2.4.7

I want to calculate optical flow using cvcalcopticalflowBM function in opencv 2.4.7
When I complied the belowed code. The error message is "Sizes of input arguments do not macth() in cvcalcopticalflowbm
I do not understand why it is. Please help me. Thank you advance.
#define BS 5
IplImage *imgA = NULL, *imgB = NULL;
IplImage *grayA = NULL, *grayB = NULL;
IplImage *velx = NULL, *vely = NULL;
IplImage *result = NULL;
imgA = cvLoadImage("00.jpg", 1);
imgB = cvLoadImage("01.jpg", 1);
grayA = cvCreateImage(cvGetSize(imgA), IPL_DEPTH_8U, 1);
grayB = cvCreateImage(cvGetSize(imgA), IPL_DEPTH_8U, 1);
cvCvtColor(imgA, grayA, CV_BGR2GRAY);
cvCvtColor(imgB, grayB, CV_BGR2GRAY);
CvSize size = cvGetSize(imgA);
size.width /= BS;
size.height /= BS;
result = cvCreateImage(size, IPL_DEPTH_8U, 1);
for (int i=0; i<size.height; i++) {
for (int j=0; j<size.width; j++) {
cvSet(result, CV_RGB(255,255,255), NULL);
}
}
velx = cvCreateImage(size, IPL_DEPTH_32F, 1);
vely = cvCreateImage(size, IPL_DEPTH_32F, 1);
cvCalcOpticalFlowBM(grayB, grayA, cvSize(BS, BS), cvSize(1, 1), cvSize(1, 1), 0, velx, vely);
//
cvNamedWindow("HorFlowBM", CV_WINDOW_AUTOSIZE);
cvShowImage("HorFlowBM", velx);
cvNamedWindow("VerFlowBM", CV_WINDOW_AUTOSIZE);
cvShowImage("VerFlowBM", vely);
for (int i=0; i<size.height; i+=2) {
for (int j=0; j<size.width; j+=2) {
int dx = (int)cvGetReal2D(velx, i, j);
int dy = (int)cvGetReal2D(vely, i, j);
cvLine(result, cvPoint(j, i), cvPoint(j+dx, i+dy), CV_RGB(0,0,0), 1, 8, 0);
}
}
cvNamedWindow("OpticalFlow", CV_WINDOW_AUTOSIZE);
cvShowImage("OpticalFlow", result);
cvWaitKey(0);
Are you sure that the input images are getting load. Try to show them after loading them i.e. cvShowImage("input1", imgA);. Also, try to print the size of both the images to check that the size of both the images is same.
I recognized this error.
The size of velx and vely should be
CvSize velSize =
{
(grayA->width - BLOCK_SIZE + SHIFT_SIZE)/SHIFT_SIZE,
(grayA->height - BLOCK_SIZE + SHIFT_SIZE)/SHIFT_SIZE
};
It becomes correctly when complie the program

Disparity map colors are backwards in opencv

My problem is that the colors in my disparity map are backwards. As in the farther away things are lighter than the things closer to the camera.
I have tried many things (i.e. convertTo, convertScaleAbs, and various combinations of values in them, etc.) and cannot seem to get the colors in the disparity map to reverse (i.e. be normal - where things closer are lighter than things farther away).
I need some help in doing that.
Also, out of curiosity, how can i change the color space of the disparity map to be like the colorful ones in MATLAB that I see online?
Here's my code and also on pastebin. http://pastebin.com/E3vVN6UU
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <iostream>
#include <string>
using namespace cv;
using namespace std;
void show(const char* windowname, Mat image)
{
namedWindow(windowname, CV_WINDOW_AUTOSIZE);
imshow(windowname, image);
}
int main()
{
Mat image1, image2;
Mat camMat1 = (Mat_<double>(3,3) << 793.1338, 0, 337.2309, 0, 792.0555, 256.9991, 0, 0, 1);
Mat camMat2 = (Mat_<double>(3,3) << 799.1271, 0, 319.8581, 0, 797.2460, 243.4638, 0, 0, 1);
Mat dispCoeffs1 = (Mat_<double>(1,5) << 0.0033, -0.1320, -0.0019, 0.0026, 0);
Mat dispCoeffs2 = (Mat_<double>(1,5) << -0.0109, -0.0188, -0.0014, -0.0055, 0);
Mat RotMat = (Mat_<double>(3,3) << 0.9998, -0.0023, 0.0221, 0.0022, 1, 0.0031, -0.0221, -0.0031, 0.9998);
Mat TransMat = (Mat_<double>(3,1) << 374.2306, -1.8319, 5.5745);
//Rectify
Mat R1, R2, P1, P2, Q;
stereoRectify(camMat1, dispCoeffs1, camMat2, dispCoeffs2, Size(640,480), RotMat, TransMat, R1, R2, P1, P2, Q, CV_CALIB_ZERO_DISPARITY, 1, Size(640,480));
//Define the mapping to the done
Mat rx1, ry1;
Mat rx2, ry2;
initUndistortRectifyMap(camMat1, dispCoeffs1, R1, P1, Size(640,480), CV_16SC2, rx1, ry1);
initUndistortRectifyMap(camMat2, dispCoeffs2, R2, P2, Size(640,480), CV_16SC2, rx2, ry2);
//SET THE BM STATE VARIABLES BEGIN - DONE GLOBALLY
StereoBM bm;
bm.state->preFilterSize = 31;
bm.state->preFilterCap = 63;
bm.state->SADWindowSize = 9;
bm.state->minDisparity = -128;
//bm.state->disp12MaxDiff = 2;
bm.state->numberOfDisparities = 128;
bm.state->textureThreshold = 50;
bm.state->uniquenessRatio = 15;
bm.state->speckleWindowSize = 100;
bm.state->speckleRange = 16;
//SET THE BM STATE VARIABLES END
VideoCapture cap3 = VideoCapture(0);
VideoCapture cap4 = VideoCapture(1);
//cap3.set(CV_CAP_PROP_FRAME_WIDTH, 320);
//cap3.set(CV_CAP_PROP_FRAME_HEIGHT, 240);
//cap4.set(CV_CAP_PROP_FRAME_WIDTH, 320);
//cap4.set(CV_CAP_PROP_FRAME_HEIGHT, 240);
cap3 >> image1;
cap4 >> image2;
Size imageSize = image1.size();
Mat gray_image1;
Mat gray_image2;
Mat frame1r;
//frame1r.create(image1.size(), CV_8U);
Mat frame2r;
//frame2r.create(image2.size(), CV_8U);
Mat frame1rf;
Mat frame2rf;
//Mat disp(image1.size(), CV_16S);
//Mat vdisp(image1.size(), CV_8U);
Mat disp, vdisp;
//Mat image3d(image1.size(), CV_32FC3);
Mat image3d;
Mat rectified_pair;
rectified_pair.create(imageSize.height, (imageSize.width)*2, CV_8UC3);
//Actually do the mapping -- based on the mapping definition
while(1)
{
bm.state->preFilterSize = 31;
bm.state->preFilterCap = 63;
bm.state->SADWindowSize = 21;
bm.state->minDisparity = -128;
//bm.state->disp12MaxDiff = 2;
bm.state->numberOfDisparities = 64;
bm.state->textureThreshold = 20;
bm.state->uniquenessRatio = 10;
bm.state->speckleWindowSize = 100;
bm.state->speckleRange = 32;
cvtColor(image1, gray_image1, CV_BGR2GRAY);
cvtColor(image2, gray_image2, CV_BGR2GRAY);
remap(gray_image1, frame1r, rx1, ry1, CV_INTER_LINEAR);
remap(gray_image2, frame2r, rx2, ry2, CV_INTER_LINEAR);
bm(frame1r, frame2r, disp);
normalize(disp, vdisp, 0, 255, NORM_MINMAX, CV_8U);
//convertScaleAbs(vdisp, vdisp, 1, 0);
disp.convertTo(vdisp, CV_8U, 255/(64*16.));
show("disparity", vdisp);
//reprojectImageTo3D(disp, image3d, Q, true);
//show("depth map", image3d);
//display image side by side for rectified window
//copy frame1r to the left side
cvtColor(frame1r, frame1rf, CV_GRAY2BGR);
frame1rf.copyTo(rectified_pair(Rect(0,0,imageSize.width, imageSize.height)));
//copy frame2r to the right side
cvtColor(frame2r, frame2rf, CV_GRAY2BGR);
frame2rf.copyTo(rectified_pair(Rect(imageSize.width,0,imageSize.width, imageSize.height)));
for(int i=0; i<imageSize.height; i+=32)
line(rectified_pair, Point(0,i), Point((imageSize.width)*2, i), CV_RGB(0,255,0));
show("rectified", rectified_pair);
cap3 >> image1;
cap4 >> image2;
if(waitKey(15) == 27)
break;
}
return 0;
}
I'm not using stereo pairs but get the same result using Kinect - far = light, near = dark
To change this I have used the below :
double min, max;
minMaxLoc(depthImage, &min, &max);
depthImage.convertTo(rImage, CV_8U, -255.0/max, 255);
I was facing the same problem then I tried swapping right and left images,and it worked!
Now I am getting correct image.

Drawing spectrum of an image in C++ (fftw, OpenCV)

I'm trying to create a program that will draw a 2d greyscale spectrum of a given image. I'm using OpenCV and FFTW libraries. By using tips and codes from the internet and modifying them I've managed to load an image, calculate fft of this image and recreate the image from the fft (it's the same). What I'm unable to do is to draw the fourier spectrum itself. Could you please help me?
Here's the code (less important lines removed):
/* Copy input image */
/* Create output image */
/* Allocate input data for FFTW */
in = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * N);
dft = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * N);
/* Create plans */
plan_f = fftw_plan_dft_2d(w, h, in, dft, FFTW_FORWARD, FFTW_ESTIMATE);
/* Populate input data in row-major order */
for (i = 0, k = 0; i < h; i++)
{
for (j = 0; j < w; j++, k++)
{
in[k][0] = ((uchar*)(img1->imageData + i * img1->widthStep))[j];
in[k][1] = 0.;
}
}
/* forward DFT */
fftw_execute(plan_f);
/* spectrum */
for (i = 0, k = 0; i < h; i++)
{
for (j = 0; j < w; j++, k++)
((uchar*)(img2->imageData + i * img2->widthStep))[j] = sqrt(pow(dft[k][0],2) + pow(dft[k][1],2));
}
cvShowImage("iplimage_dft(): original", img1);
cvShowImage("iplimage_dft(): result", img2);
cvWaitKey(0);
/* Free memory */
}
The problem is in the "Spectrum" section. Instead of a spectrum I get some noise. What am I doing wrong? I would be grateful for your help.
You need to draw magnitude of spectrum. here is the code.
void ForwardFFT(Mat &Src, Mat *FImg)
{
int M = getOptimalDFTSize( Src.rows );
int N = getOptimalDFTSize( Src.cols );
Mat padded;
copyMakeBorder(Src, padded, 0, M - Src.rows, 0, N - Src.cols, BORDER_CONSTANT, Scalar::all(0));
// Создаем комплексное представление изображения
// planes[0] содержит само изображение, planes[1] его мнимую часть (заполнено нулями)
Mat planes[] = {Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F)};
Mat complexImg;
merge(planes, 2, complexImg);
dft(complexImg, complexImg);
// После преобразования результат так-же состоит из действительной и мнимой части
split(complexImg, planes);
// обрежем спектр, если у него нечетное количество строк или столбцов
planes[0] = planes[0](Rect(0, 0, planes[0].cols & -2, planes[0].rows & -2));
planes[1] = planes[1](Rect(0, 0, planes[1].cols & -2, planes[1].rows & -2));
Recomb(planes[0],planes[0]);
Recomb(planes[1],planes[1]);
// Нормализуем спектр
planes[0]/=float(M*N);
planes[1]/=float(M*N);
FImg[0]=planes[0].clone();
FImg[1]=planes[1].clone();
}
void ForwardFFT_Mag_Phase(Mat &src, Mat &Mag,Mat &Phase)
{
Mat planes[2];
ForwardFFT(src,planes);
Mag.zeros(planes[0].rows,planes[0].cols,CV_32F);
Phase.zeros(planes[0].rows,planes[0].cols,CV_32F);
cv::cartToPolar(planes[0],planes[1],Mag,Phase);
}
Mat LogMag;
LogMag.zeros(Mag.rows,Mag.cols,CV_32F);
LogMag=(Mag+1);
cv::log(LogMag,LogMag);
//---------------------------------------------------
imshow("Логарифм амплитуды", LogMag);
imshow("Фаза", Phase);
imshow("Результат фильтрации", img);
Can you try to do the IFFT step and see if you recover the original image ? then , you can check step by step where is your problem. Another solution to find the problem is to do this process with a small matrix predefined by you ,and calculate it FFT in MATLAB, and check step by step, it worked for me!

Resources