I'm trying to apply a cuFFT, forward then inverse, to a 2D image. I need the real and complex parts as separate outputs so I can compute a phase and magnitude image. I haven't been able to recreate the input image, and also a non-zero phase is returned. In particular I am unsure if I'm correctly creating a full-size image from the reduced-size cuFFT complex output, which apparently stores only the left side of the spectrum. Here's my current code:
// Load image
cv::Mat_<float> img;
img = cv::imread(path,0);
if(!img.isContinuous()){
std::cout<<"Input cv::Mat is not continuous!"<<std::endl;
return -1;
}
float *h_Data, *d_Data;
h_Data = img.ptr<float>(0);
// Complex device pointers
cufftComplex
*d_DataSpectrum,
*d_Result,
*h_Result;
// Plans for cuFFT execution
cufftHandle
fftPlanFwd,
fftPlanInv;
// Image dimensions
const int dataH = img.rows;
const int dataW = img.cols;
const int complexW = dataW/2+1;
// Allocate memory
h_Result = (cufftComplex *)malloc(dataH * complexW * sizeof(cufftComplex));
checkCudaErrors(cudaMalloc((void **)&d_DataSpectrum, dataH * complexW * sizeof(cufftComplex)));
checkCudaErrors(cudaMalloc((void **)&d_Data, dataH * dataW * sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_Result, dataH * complexW * sizeof(cufftComplex)));
// Copy image to GPU
checkCudaErrors(cudaMemcpy(d_Data, h_Data, dataH * dataW * sizeof(float), cudaMemcpyHostToDevice));
// Forward FFT
checkCudaErrors(cufftPlan2d(&fftPlanFwd, dataH, dataW, CUFFT_R2C));
checkCudaErrors(cufftExecR2C(fftPlanFwd, (cufftReal *)d_Data, (cufftComplex *)d_DataSpectrum));
// Inverse FFT
checkCudaErrors(cufftPlan2d(&fftPlanInv, dataH, dataW, CUFFT_C2C));
checkCudaErrors(cufftExecC2C(fftPlanInv, (cufftComplex *)d_DataSpectrum, (cufftComplex *)d_Result, CUFFT_INVERSE));
// Copy result to host memory
checkCudaErrors(cudaMemcpy(h_Result, d_Result, dataH * complexW * sizeof(cufftComplex), cudaMemcpyDeviceToHost));
// Convert cufftComplex to OpenCV real and imag Mat
Mat_<float> resultReal = Mat_<float>(dataH, dataW);
Mat_<float> resultImag = Mat_<float>(dataH, dataW);
for(int i=0; i<dataH; i++){
float* rowPtrReal = resultReal.ptr<float>(i);
float* rowPtrImag = resultImag.ptr<float>(i);
for(int j=0; j<dataW; j++){
if(j<complexW){
rowPtrReal[j] = h_Result[i*complexW+j].x/(dataH*dataW);
rowPtrImag[j] = h_Result[i*complexW+j].y/(dataH*dataW);
}else{
// Right side?
rowPtrReal[j] = h_Result[i*complexW+(dataW-j)].x/(dataH*dataW);
rowPtrImag[j] = -h_Result[i*complexW+(dataW-j)].y/(dataH*dataW);
}
}
}
// Compute phase and normalize to 8 bit
Mat_<float> resultPhase;
phase(resultReal, resultImag, resultPhase);
cv::subtract(resultPhase, 2*M_PI, resultPhase, (resultPhase > M_PI));
resultPhase = ((resultPhase+M_PI)*255)/(2*M_PI);
Mat_<uchar> normalized = Mat_<uchar>(dataH, dataW);
resultPhase.convertTo(normalized, CV_8U);
// Save phase image
cv::imwrite("cuda_propagation_phase.png",normalized);
// Compute amplitude and normalize to 8 bit
Mat_<float> resultAmplitude;
magnitude(resultReal, resultImag, resultAmplitude);
Mat_<uchar> normalizedAmplitude = Mat_<uchar>(dataH, dataW);
resultAmplitude.convertTo(normalizedAmplitude, CV_8U);
// Save phase image
cv::imwrite("cuda_propagation_amplitude.png",normalizedAmplitude);
I'm not sure where my error is. Is that the correct way to get back the whole image from the reduced version (the for loop)?
I think I got it now. The 'trick' is to start with a complex matrix. Starting with a real one, you need to apply an R2C transform--which uses reduced size due to symmetry of the spectrum--and then a C2C transform, which preserves that reduced size. The solution was to create a complex input from the real one by inserting zeros as complex part, then applying two C2C transforms in a row which both preserve the whole image and make it easy to get the full sized real and imaginary matrices afterwards:
// Load image
cv::Mat_<float> img;
img = cv::imread(path,0);
if(!img.isContinuous()){
std::cout<<"Input cv::Mat is not continuous!"<<std::endl;
return -1;
}
float *h_DataReal = img.ptr<float>(0);
cufftComplex *h_DataComplex;
// Image dimensions
const int dataH = img.rows;
const int dataW = img.cols;
// Convert real input to complex
h_DataComplex = (cufftComplex *)malloc(dataH * dataW * sizeof(cufftComplex));
for(int i=0; i<dataH*dataW; i++){
h_DataComplex[i].x = h_DataReal[i];
h_DataComplex[i].y = 0.0f;
}
// Complex device pointers
cufftComplex
*d_Data,
*d_DataSpectrum,
*d_Result,
*h_Result;
// Plans for cuFFT execution
cufftHandle
fftPlanFwd,
fftPlanInv;
// Allocate memory
h_Result = (cufftComplex *)malloc(dataH * dataW * sizeof(cufftComplex));
checkCudaErrors(cudaMalloc((void **)&d_DataSpectrum, dataH * dataW * sizeof(cufftComplex)));
checkCudaErrors(cudaMalloc((void **)&d_Data, dataH * dataW * sizeof(cufftComplex)));
checkCudaErrors(cudaMalloc((void **)&d_Result, dataH * dataW * sizeof(cufftComplex)));
// Copy image to GPU
checkCudaErrors(cudaMemcpy(d_Data, h_DataComplex, dataH * dataW * sizeof(cufftComplex), cudaMemcpyHostToDevice));
// Forward FFT
checkCudaErrors(cufftPlan2d(&fftPlanFwd, dataH, dataW, CUFFT_C2C));
checkCudaErrors(cufftExecC2C(fftPlanFwd, (cufftComplex *)d_Data, (cufftComplex *)d_DataSpectrum, CUFFT_FORWARD));
// Inverse FFT
checkCudaErrors(cufftPlan2d(&fftPlanInv, dataH, dataW, CUFFT_C2C));
checkCudaErrors(cufftExecC2C(fftPlanInv, (cufftComplex *)d_DataSpectrum, (cufftComplex *)d_Result, CUFFT_INVERSE));
// Copy result to host memory
checkCudaErrors(cudaMemcpy(h_Result, d_Result, dataH * dataW * sizeof(cufftComplex), cudaMemcpyDeviceToHost));
// Convert cufftComplex to OpenCV real and imag Mat
Mat_<float> resultReal = Mat_<float>(dataH, dataW);
Mat_<float> resultImag = Mat_<float>(dataH, dataW);
for(int i=0; i<dataH; i++){
float* rowPtrReal = resultReal.ptr<float>(i);
float* rowPtrImag = resultImag.ptr<float>(i);
for(int j=0; j<dataW; j++){
rowPtrReal[j] = h_Result[i*dataW+j].x/(dataH*dataW);
rowPtrImag[j] = h_Result[i*dataW+j].y/(dataH*dataW);
}
}
This is an old question, but I'd like to provide additional information: the R2C preserves the same amount of information as a C2C transform, it's just doing so with about half as many elements. The R2C (and C2R) transforms take advantage of Hermitian symmetry to reduce the number of elements that are computed and stored in memory (e.g. the FFT is symmetric, so you actually don't need ~half of the terms that are being stored in a C2C transform).
To generate a 2D image of the real and imaginary components, you could use the R2C transform and then write a kernel that translates the (Nx/2+1)Ny output array into a pair of arrays of size (NxNy), taking advantage of the symmetry yourself to write the terms to the correct positions. But using a C2C transform is a bit less code, and more foolproof.
Related
I want to pass an OpenCL Mat to a selfwritten OpenCL Kernel for a FGPA (doesnt´t support the OpenCV OpenCL).
Host- Code:
Mat img = imread( "template.jpg", IMREAD_GRAYSCALE );
Mat output(img.rows, img.cols, CV_8UC1);
// Program, Context already declared
// Create Kernel
cl_kernel kernel = NULL;
kernel = clCreateKernel(program, "copy", &status);
// Create Command Queue and associate it with the device you want to execute on
cl_command_queue cmdQueue;
cmdQueue = clCreateCommandQueue(context,devices[0], 0, &status);
// Buffer, prob i do something wrong here
cl_mem buffer_img = clCreateBuffer(context,CL_MEM_READ_ONLY, sizeof(uint) * img.cols * img.rows, NULL,&status);
cl_mem buffer_outputimg = clCreateBuffer(context,CL_MEM_WRITE_ONLY, sizeof(uint) * img.cols * img.rows,NULL,&status);
status = clEnqueueWriteBuffer(cmdQueue, buffer_img,CL_FALSE,0,sizeof(uint) * img.cols * img.rows,&img,0,NULL,NULL);
// set kernel arguments
status = clSetKernelArg(kernel,0,sizeof(cl_mem),&buffer_img);
status = clSetKernelArg(kernel,1,sizeof(cl_mem),&buffer_outputimg);
size_t globalWorkSize[2];
globalWorkSize[0] = img.cols;
globalWorkSize[1] = img.rows;
status = clEnqueueNDRangeKernel(cmdQueue,kernel,2,NULL, globalWorkSize, NULL,0, NULL,NULL);
clEnqueueReadBuffer(cmdQueue,buffer_outputimg,CL_TRUE,0,sizeof(uint) * img.cols * img.rows, &output, 0, NULL, NULL);
//stop cpu till queue is finish
clFinish(cmdQueue);
Kernel-Code:
__kernel void copy(__global uchar * input, __global uchar * output)
{
const int x = get_global_id(0);
const int y = get_global_id(1);
//copy
output[y * get_global_size(0) + x] = input[y * get_global_size(0) + x] ;
}
When excecuting it on the FPGA i get a Segmentation fault, whichs is propably due the wrong handling with the OpenCV Mat.
EDIT:
Edited Host-Code as suggested by api55 solved the problem:
Mat img = imread( "scene.jpg", IMREAD_GRAYSCALE );
Mat output(img.rows, img.cols, CV_8UC1);
// Program, Context already declared
// Create Kernel
cl_kernel kernel = NULL;
kernel = clCreateKernel(program, "copy", &status);
// Create Command Queue and associate it with the device you want to execute on
cl_command_queue cmdQueue;
cmdQueue = clCreateCommandQueue(context,devices[0], 0, &status);
checkError(status, "Failed to create commadnqueue");
// Buffer
cl_mem buffer_img = clCreateBuffer(context,CL_MEM_READ_ONLY, sizeof(uchar) * img.cols * img.rows, NULL,&status);
cl_mem buffer_outputimg = clCreateBuffer(context,CL_MEM_WRITE_ONLY, sizeof(uchar) * img.cols * img.rows,NULL,&status);
checkError(status, "Failed to create buffer_mask");
status = clEnqueueWriteBuffer(cmdQueue, buffer_img,CL_FALSE,0,sizeof(uchar) * img.cols * img.rows,img.data,0,NULL,NULL);
checkError(status, "Failed to enqueue buffer_img");
status = clSetKernelArg(kernel,0,sizeof(cl_mem),&buffer_img);
status = clSetKernelArg(kernel,1,sizeof(cl_mem),&buffer_outputimg);
size_t globalWorkSize[2];
globalWorkSize[0] = img.cols;
globalWorkSize[1] = img.rows;
status = clEnqueueNDRangeKernel(cmdQueue,kernel,2,NULL, globalWorkSize, NULL,0, NULL,NULL);
clEnqueueReadBuffer(cmdQueue,buffer_outputimg,CL_TRUE,0,sizeof(uchar) * img.cols * img.rows, output.data,0,NULL,NULL);
imwrite("output.jpg", output);
I do not have much experience with opencl, but i think it is an opencv/c++ problem.
The opencv mat data lies in img.data which is an uchar* of the size sizeof(T) * channels * rows * cols.
Usually, T is uchar when loading images, and channels is 3 (unless that is a greyscale img). 3 channel uchar is 24 bits per pixel and greyscale (as you are loading) is 8 bits per pixel and you are using uint which is size of 32 bits. At some point it will go outside the memory and do the segmentation error. Also, if you do not use the data pointer in the structure, you may be copying the header information and just the pointer to the data and not the data itself.
I suggest you to change &img in:
status = clEnqueueWriteBuffer(cmdQueue, buffer_img,CL_FALSE,0,sizeof(uint) * img.cols * img.rows,&img,0,NULL,NULL);
to img.data
Finally, you need to have the correct data. I am not sure if opencl may use uchar, but if it can't, change the cv::Mat to another type like this:
img.convertTo(img, CV_32S);
After loading the image. This will change it to int... opencv does not support matrices with unsigned int... just make sure to change it accordingly in the other places (i.e. sizeof(uint)) and if you convert the input, remember to create the output with the same type.
If you prefer float, use CV_32F and if you like double CV_64F.
int sizeOfChannel = (_width / 2) * (_height / 2);
double* channel_gr = new double[sizeOfChannel];
// filling the data into channel_gr....
cv::Mat my( _width/2, _height/2, CV_32F,channel_gr);
cv::Mat src(_width/2, _height/2, CV_32F);
for (int i = 0; i < (_width/2) * (_height/2); ++i)
{
src.at<float>(i) = channel_gr[i];
}
cv::imshow("src",src);
cv::imshow("my",my);
cv::waitKey(0);
I'm wondering why i'm not getting the same image in my and src imshow
update:
I have changed my array into double* still same result;
I think it is something to do with steps?
my image output
src image output
this one works for me:
int halfWidth = _width/2;
int halfHeight = _height/2;
int sizeOfChannel = halfHeight*halfWidth;
// ******************************* //
// you use CV_321FC1 later so it is single precision float
float* channel_gr = new float[sizeOfChannel];
// filling the data into channel_gr....
for(int i=0; i<sizeOfChannel; ++i) channel_gr[i] = i/(float)sizeOfChannel;
// ******************************* //
// changed row/col ordering, but this shouldnt be important
cv::Mat my( halfHeight , halfWidth , CV_32FC1,channel_gr);
cv::Mat src(halfHeight , halfWidth, CV_32FC1);
// ******************************* //
// changed from 1D indexing to 2D indexing
for(int y=0; y<src.rows; ++y)
for(int x=0; x<src.cols; ++x)
{
int arrayPos = y*halfWidth + x;
// you have a 2D mat so access it in 2D
src.at<float>(y,x) = channel_gr[arrayPos ];
}
cv::imshow("src",src);
cv::imshow("my",my);
// check for differences
cv::imshow("diff1 > 0",src-my > 0);
cv::imshow("diff2 > 0",my-src > 0);
cv::waitKey(0);
'my' is array of floats but you give it pointer to arrays of double. There no way it can get data from this array properly.
It seems that the constructor version that you are using is
Mat::Mat(int rows, int cols, int type, const Scalar& s)
This is from OpenCV docs. Seems like you are using float for src and assigning from channel_gr (declared as double). Isn't that some form of precision loss?
I'm very new to working with image processing at a low level and have just had a go at implementing a gaussian kernel with both GPU and CPU - however both yield the same output, an image which is severely skewed by a grid:
I'm aware I could use OpenCV's pre-built functions to handle the filters, but I wanted to learn the methodology behind it, so I built my own.
Convolution kernel:
// Convolution kernel - this manipulates the given channel and writes out a new blurred channel.
void convoluteChannel_cpu(
const unsigned char* const channel, // Input channel
unsigned char* const channelBlurred, // Output channel
const size_t numRows, const size_t numCols, // Channel width/height (rows, cols)
const float *filter, // The weight of sigma, to convulge
const int filterWidth // This is normally a sample of 9
)
{
// Loop through the images given R, G or B channel
for(int rows = 0; rows < (int)numRows; rows++)
{
for(int cols = 0; cols < (int)numCols; cols++)
{
// Declare new pixel colour value
float newColor = 0.f;
// Loop for every row along the stencil size (3x3 matrix)
for(int filter_x = -filterWidth/2; filter_x <= filterWidth/2; filter_x++)
{
// Loop for every col along the stencil size (3x3 matrix)
for(int filter_y = -filterWidth/2; filter_y <= filterWidth/2; filter_y++)
{
// Clamp to the boundary of the image to ensure we don't access a null index.
int image_x = __min(__max(rows + filter_x, 0), static_cast<int>(numRows -1));
int image_y = __min(__max(cols + filter_y, 0), static_cast<int>(numCols -1));
// Assign the new pixel value to the current pixel, numCols and numRows are both 3, so we only
// need to use one to find the current pixel index (similar to how we find the thread in a block)
float pixel = static_cast<float>(channel[image_x * numCols + image_y]);
// Sigma is the new weight to apply to the image, we perform the equation to get a radnom weighting,
// if we don't do this the image will become choppy.
float sigma = filter[(filter_x + filterWidth / 2) * filterWidth + filter_y + filterWidth/2];
//float sigma = 1 / 81.f;
// Set the new pixel value
newColor += pixel * sigma;
}
}
// Set the value of the next pixel at the current image index with the newly declared color
channelBlurred[rows * numCols + cols] = newColor;
}
}
}
I call this 3 times from another method which splits the image into respective R, G, B channels, but I don't believe this would cause the image to be so severely mutated.
Has anybody encountered a problem similar to this before, and if so how did you solve it?
EDIT Channel Splitting Func:
void gaussian_cpu(
const uchar4* const rgbaImage, // Our input image from the camera
uchar4* const outputImage, // The image we are writing back for display
size_t numRows, size_t numCols, // Width and Height of the input image (rows/cols)
const float* const filter, // The value of sigma
const int filterWidth // The size of the stencil (3x3) 9
)
{
// Build an array to hold each channel for the given image
unsigned char *r_c = new unsigned char[numRows * numCols];
unsigned char *g_c = new unsigned char[numRows * numCols];
unsigned char *b_c = new unsigned char[numRows * numCols];
// Build arrays for each of the output (blurred) channels
unsigned char *r_bc = new unsigned char[numRows * numCols];
unsigned char *g_bc = new unsigned char[numRows * numCols];
unsigned char *b_bc = new unsigned char[numRows * numCols];
// Separate the image into R,G,B channels
for(size_t i = 0; i < numRows * numCols; i++)
{
uchar4 rgba = rgbaImage[i];
r_c[i] = rgba.x;
g_c[i] = rgba.y;
b_c[i] = rgba.z;
}
// Convolute each of the channels using our array
convoluteChannel_cpu(r_c, r_bc, numRows, numCols, filter, filterWidth);
convoluteChannel_cpu(g_c, g_bc, numRows, numCols, filter, filterWidth);
convoluteChannel_cpu(b_c, b_bc, numRows, numCols, filter, filterWidth);
// Recombine the channels to build the output image - 255 for alpha as we want 0 transparency
for(size_t i = 0; i < numRows * numCols; i++)
{
uchar4 rgba = make_uchar4(r_bc[i], g_bc[i], b_bc[i], 255);
outputImage[i] = rgba;
}
}
EDIT Calling the kernel
while(gpu_frames > 0)
{
//cout << gpu_frames << "\n";
camera >> frameIn;
// Allocate I/O Pointers
beginStream(&h_inputFrame, &h_outputFrame, &d_inputFrame, &d_outputFrame, &d_redBlurred, &d_greenBlurred, &d_blueBlurred, &_h_filter, &filterWidth, frameIn);
// Show the source image
imshow("Source", frameIn);
g_timer.Start();
// Allocate mem to GPU
allocateMemoryAndCopyToGPU(numRows(), numCols(), _h_filter, filterWidth);
// Apply the gaussian kernel filter and then free any memory ready for the next iteration
gaussian_gpu(h_inputFrame, d_inputFrame, d_outputFrame, numRows(), numCols(), d_redBlurred, d_greenBlurred, d_blueBlurred, filterWidth);
// Output the blurred image
cudaMemcpy(h_outputFrame, d_frameOut, sizeof(uchar4) * numPixels(), cudaMemcpyDeviceToHost);
g_timer.Stop();
cudaDeviceSynchronize();
gpuTime += g_timer.Elapsed();
cout << "Time for this kernel " << g_timer.Elapsed() << "\n";
Mat outputFrame(Size(numCols(), numRows()), CV_8UC1, h_outputFrame, Mat::AUTO_STEP);
clean_mem();
imshow("Dest", outputFrame);
// 1ms delay to prevent system from being interrupted whilst drawing the new frame
waitKey(1);
gpu_frames--;
}
And then within the beginStream() method, images are converted to uchar4:
// Allocate host variables, casting the frameIn and frameOut vars to uchar4 elements, these will
// later be processed by the kernel
*h_inputFrame = (uchar4 *)frameIn.ptr<unsigned char>(0);
*h_outputFrame = (uchar4 *)frameOut.ptr<unsigned char>(0);
There are many doubts in the problem.
At the start of the code, its mentioned that the filter width is 9, thus making it a 9x9 kernel. But in some other comments its said to be 3. So I am guessing that you are actually using a 9x9 kernel and the filter do have the 81 weights in them.
But the above output can never be due to the above mentioned confusion.
uchar4 is of 4-byte size. Thus in gaussian_cpu while splitting the data by running the loop over rgbaImage[i] on an image that doesnot contain alpha value (it could be inferred from the above mentioned loop that alpha is not present) what actually gets done is that your are copying R1,G2,B3,R5,G6,B7 and so on to the red-channel. Better you initially try the code on a grayscale image and make sure you are using uchar instead of uchar4.
The output image seems exactly 1/3rd the width of the original image, which makes the above assumption to be true.
EDIT 1:
Is the input rgbaImage to guassian_cpu function RGBA or RGB? videoCapture must be giving a 3 channel output. The initialization of *h_inputFrame (to uchar4) itself is wrong as its pointing to 3 channel data.
Similarly the output data is four channel data, but Mat outputFrame is declared as a single channel which points to this four channel data. Try Mat outputFrame as 8UC3 type and see the result.
Also, how is the code working, the guassian_cpu() function has 7 input parameters in the definition, but when you call the function 8 parameters are used. Hope this is just a typo.
I've been using this web page as a guideline for formant tracking of speech...
http://iitg.vlab.co.in/?sub=59&brch=164&sim=615&cnt=1
It all seems to be going pretty well, except for the last step, which is the converting of the cepstrum into a smoothed representation for simple peak picking for the formant tracking. The spectrograph looks good, and the cepstrograph (can I say that? :P) also looks good (from what I can tell), but the final stage the results (smoothed formant representation) are not what I expected.
I uploaded a sample of each stage as visual images to...
http://imgur.com/a/62duS
This sample is for the speech of the sound 'i' as in 'beed'. According to this site...
http://home.cc.umanitoba.ca/~robh/howto.html#formants
the first formant should come in around 500hz, and the second and third around 2200hz and 2800 hz respectively. The spetrograph shows something very similar, but on the last stage I am gettings results similar to...
F1 - 891
F2 - 1550
F3 - 2329
Any insight would be greatly appreciated. I've been going round in circles on this for some time. My code looks as follows...
// set up fft parameters
UInt32 log2n = 9;
UInt32 n = 512;
UInt32 window = n;
UInt32 halfN = n/2;
UInt32 stride = 1;
FFTSetup setupReal = [appDelegate getFftSetup];
int stepSize = (hpBuffer.sampleCount-window) / quantizeCount;
// calculate volume from raw samples, because it seems more reliable that fft
UInt32 volumeWindow = 128;
volumeBuffer = malloc(sizeof(float)*quantizeCount);
int windowPos = 0;
for (int i=0; i < quantizeCount; i++) {
windowPos += stepSize;
float total = 0.0f;
float max = 0.0f;
for (int p=windowPos; p < windowPos+volumeWindow; p++) {
total += sampleBuffer.buffer[p];
if (sampleBuffer.buffer[p] > max)
max = sampleBuffer.buffer[p];
}
volumeBuffer[i] = max;
}
// normalize volumebuffer
[FloatAudioBuffer normalizePositiveBuffer:volumeBuffer ofSize:quantizeCount];
// allocate memory for complex array
COMPLEX_SPLIT complexArray;
complexArray.realp = (float*)malloc(4096*sizeof(float));
complexArray.imagp = (float*)malloc(4096*sizeof(float));
// allocate some space for temporary hamming buffer
float *hamBuffer = malloc(n*sizeof(float));
// create spectrum and feature buffer
spectrumBuffer = malloc(sizeof(float)*halfN*quantizeCount);
formantBuffer = malloc(sizeof(float)*4096*quantizeCount);
cepstrumBuffer = malloc(sizeof(float)*halfN*quantizeCount);
lowCepstrumBuffer = malloc(sizeof(float)*featureCount*quantizeCount);
featureBuffer = malloc(sizeof(float)*featureCount*quantizeCount);
// create data point for each quantize segment
float TWOPI = 2.0f * M_PI;
for (int s=0; s < quantizeCount; s++) {
// copy buffer data into a seperate array and apply hamming window
int offset = (int)(s * stepSize);
for (int i=0; i < n; i++)
hamBuffer[i] = hpBuffer.buffer[offset+i] * ((1.0f-0.46f) - 0.46f*cos(TWOPI*i/((float)n-1.0f)));
// configure float array into acceptable input array format (interleaved)
vDSP_ctoz((COMPLEX*)hamBuffer, 2, &complexArray, 1, halfN);
// run FFT
vDSP_fft_zrip(setupReal, &complexArray, stride, log2n, FFT_FORWARD);
// Absolute square (equivalent to mag^2)
complexArray.imagp[0] = 0.0f;
vDSP_zvmags(&complexArray, 1, complexArray.realp, 1, halfN);
bzero(complexArray.imagp, (halfN) * sizeof(float));
// scale
float scale = 1.0f / (2.0f*(float)n);
vDSP_vsmul(complexArray.realp, 1, &scale, complexArray.realp, 1, halfN);
// get log of absolute values for passing to inverse FFT for cepstrum
for (int i=0; i < halfN; i++)
complexArray.realp[i] = logf(sqrtf(complexArray.realp[i]));
// save this into spectrum buffer
memcpy(&spectrumBuffer[s*halfN], complexArray.realp, halfN*sizeof(float));
// convert spectrum to interleaved ready for inverse fft
vDSP_ctoz((COMPLEX*)&spectrumBuffer[s*halfN], 2, &complexArray, 1, halfN/2);
// create cepstrum
vDSP_fft_zrip(setupReal, &complexArray, stride, log2n-1, FFT_INVERSE);
//convert interleaved to real and straight into cepstrum buffer
vDSP_ztoc(&complexArray, 1, (COMPLEX*)&cepstrumBuffer[s*halfN], 2, halfN/2);
// copy first part of cepstrum into low cepstrum buffer
memcpy(&lowCepstrumBuffer[s*featureCount], &cepstrumBuffer[s*halfN], featureCount*sizeof(float));
// make 8000 point array based on the first 15 values
float *tempArray = malloc(8192*sizeof(float));
for (int i=0; i < 8192; i++) {
if (i < 15)
tempArray[i] = cepstrumBuffer[s*halfN+i];
else
tempArray[i] = 0.0f;
}
vDSP_ctoz((COMPLEX*)tempArray, 2, &complexArray, 1, 4096);
float newLog2n = log2f(8192.0f);
complexArray.imagp[0] = 0.0f;
vDSP_fft_zrip(setupReal, &complexArray, stride, newLog2n, FFT_FORWARD);
vDSP_zvmags(&complexArray, 1, complexArray.realp, 1, 4096);
bzero(complexArray.imagp, (4096) * sizeof(float));
// scale
scale = 1.0f / (2.0f*(float)8192);
vDSP_vsmul(complexArray.realp, 1, &scale, complexArray.realp, 1, 4096);
// get magnitude
for (int i=0; i < 4096; i++)
complexArray.realp[i] = sqrtf(complexArray.realp[i]);
// write to formant buffer
memcpy(&formantBuffer[s*4096], complexArray.realp, 4096*sizeof(float));
// complex array now contains formant spectrum
// it's large, so get features here!
// try simple peak picking algorithm for first 3 formants
int formantIndex = 0;
float *peaks = malloc(6*sizeof(float));
for (int i=0; i < 6; i++)
peaks[i] = 0.0f;
for (int i=1; i < 4096-1 && formantIndex < 6; i++) {
if (complexArray.realp[i-1] < complexArray.realp[i] &&
complexArray.realp[i+1] < complexArray.realp[i])
peaks[formantIndex++] = i;
}
I'm trying to create a program that will draw a 2d greyscale spectrum of a given image. I'm using OpenCV and FFTW libraries. By using tips and codes from the internet and modifying them I've managed to load an image, calculate fft of this image and recreate the image from the fft (it's the same). What I'm unable to do is to draw the fourier spectrum itself. Could you please help me?
Here's the code (less important lines removed):
/* Copy input image */
/* Create output image */
/* Allocate input data for FFTW */
in = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * N);
dft = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * N);
/* Create plans */
plan_f = fftw_plan_dft_2d(w, h, in, dft, FFTW_FORWARD, FFTW_ESTIMATE);
/* Populate input data in row-major order */
for (i = 0, k = 0; i < h; i++)
{
for (j = 0; j < w; j++, k++)
{
in[k][0] = ((uchar*)(img1->imageData + i * img1->widthStep))[j];
in[k][1] = 0.;
}
}
/* forward DFT */
fftw_execute(plan_f);
/* spectrum */
for (i = 0, k = 0; i < h; i++)
{
for (j = 0; j < w; j++, k++)
((uchar*)(img2->imageData + i * img2->widthStep))[j] = sqrt(pow(dft[k][0],2) + pow(dft[k][1],2));
}
cvShowImage("iplimage_dft(): original", img1);
cvShowImage("iplimage_dft(): result", img2);
cvWaitKey(0);
/* Free memory */
}
The problem is in the "Spectrum" section. Instead of a spectrum I get some noise. What am I doing wrong? I would be grateful for your help.
You need to draw magnitude of spectrum. here is the code.
void ForwardFFT(Mat &Src, Mat *FImg)
{
int M = getOptimalDFTSize( Src.rows );
int N = getOptimalDFTSize( Src.cols );
Mat padded;
copyMakeBorder(Src, padded, 0, M - Src.rows, 0, N - Src.cols, BORDER_CONSTANT, Scalar::all(0));
// Создаем комплексное представление изображения
// planes[0] содержит само изображение, planes[1] его мнимую часть (заполнено нулями)
Mat planes[] = {Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F)};
Mat complexImg;
merge(planes, 2, complexImg);
dft(complexImg, complexImg);
// После преобразования результат так-же состоит из действительной и мнимой части
split(complexImg, planes);
// обрежем спектр, если у него нечетное количество строк или столбцов
planes[0] = planes[0](Rect(0, 0, planes[0].cols & -2, planes[0].rows & -2));
planes[1] = planes[1](Rect(0, 0, planes[1].cols & -2, planes[1].rows & -2));
Recomb(planes[0],planes[0]);
Recomb(planes[1],planes[1]);
// Нормализуем спектр
planes[0]/=float(M*N);
planes[1]/=float(M*N);
FImg[0]=planes[0].clone();
FImg[1]=planes[1].clone();
}
void ForwardFFT_Mag_Phase(Mat &src, Mat &Mag,Mat &Phase)
{
Mat planes[2];
ForwardFFT(src,planes);
Mag.zeros(planes[0].rows,planes[0].cols,CV_32F);
Phase.zeros(planes[0].rows,planes[0].cols,CV_32F);
cv::cartToPolar(planes[0],planes[1],Mag,Phase);
}
Mat LogMag;
LogMag.zeros(Mag.rows,Mag.cols,CV_32F);
LogMag=(Mag+1);
cv::log(LogMag,LogMag);
//---------------------------------------------------
imshow("Логарифм амплитуды", LogMag);
imshow("Фаза", Phase);
imshow("Результат фильтрации", img);
Can you try to do the IFFT step and see if you recover the original image ? then , you can check step by step where is your problem. Another solution to find the problem is to do this process with a small matrix predefined by you ,and calculate it FFT in MATLAB, and check step by step, it worked for me!