How to get more precise output out of an FFT? - ios

I am trying to make a colored waveform using the output of the following code. But when I run it, I only get certain numbers (see the freq variable, it uses the bin size, frame rate and index to make these frequencies) as output frequencies. I'm no math expert, even though I cobbled this together from existing code and answers.
//
// colored_waveform.c
// MixDJ
//
// Created by Jonathan Silverman on 3/14/19.
// Copyright © 2019 Jonathan Silverman. All rights reserved.
//
#include "colored_waveform.h"
#include "fftw3.h"
#include <math.h>
#include "sndfile.h"
//int N = 1024;
// helper function to apply a windowing function to a frame of samples
void calcWindow(double* in, double* out, int size) {
for (int i = 0; i < size; i++) {
double multiplier = 0.5 * (1 - cos(2*M_PI*i/(size - 1)));
out[i] = multiplier * in[i];
}
}
// helper function to compute FFT
void fft(double* samples, fftw_complex* out, int size) {
fftw_plan p;
p = fftw_plan_dft_r2c_1d(size, samples, out, FFTW_ESTIMATE);
fftw_execute(p);
fftw_destroy_plan(p);
}
// find the index of array element with the highest absolute value
// probably want to take some kind of moving average of buf[i]^2
// and return the maximum found
double maxFreqIndex(fftw_complex* buf, int size, float fS) {
double max_freq = 0;
double last_magnitude = 0;
for(int i = 0; i < (size / 2) - 1; i++) {
double freq = i * fS / size;
// printf("freq: %f\n", freq);
double magnitude = sqrt(buf[i][0]*buf[i][0] + buf[i][1]*buf[i][1]);
if(magnitude > last_magnitude)
max_freq = freq;
last_magnitude = magnitude;
}
return max_freq;
}
//
//// map a frequency to a color, red = lower freq -> violet = high freq
//int freqToColor(int i) {
//
//}
void generateWaveformColors(const char path[]) {
printf("Generating waveform colors\n");
SNDFILE *infile = NULL;
SF_INFO sfinfo;
infile = sf_open(path, SFM_READ, &sfinfo);
sf_count_t numSamples = sfinfo.frames;
// sample rate
float fS = 44100;
// float songLengLengthSeconds = numSamples / fS;
// printf("seconds: %f", songLengLengthSeconds);
// size of frame for analysis, you may want to play with this
float frameMsec = 5;
// samples in a frame
int frameSamples = (int)(fS / (frameMsec * 1000));
// how much overlap each frame, you may want to play with this one too
int frameOverlap = (frameSamples / 2);
// color to use for each frame
// int outColors[(numSamples / frameOverlap) + 1];
// scratch buffers
double* tmpWindow;
fftw_complex* tmpFFT;
tmpWindow = (double*) fftw_malloc(sizeof(double) * frameSamples);
tmpFFT = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * frameSamples);
printf("Processing waveform for colors\n");
for (int i = 0, outptr = 0; i < numSamples; i += frameOverlap, outptr++)
{
double inSamples[frameSamples];
sf_read_double(infile, inSamples, frameSamples);
// window another frame for FFT
calcWindow(inSamples, tmpWindow, frameSamples);
// compute the FFT on the next frame
fft(tmpWindow, tmpFFT, frameSamples);
// which frequency is the highest?
double freqIndex = maxFreqIndex(tmpFFT, frameSamples, fS);
printf("%i: ", i);
printf("Max freq: %f\n", freqIndex);
// map to color
// outColors[outptr] = freqToColor(freqIndex);
}
printf("Done.");
sf_close (infile);
}
Here is some of the output:
2094216: Max freq: 5512.500000
2094220: Max freq: 0.000000
2094224: Max freq: 0.000000
2094228: Max freq: 0.000000
2094232: Max freq: 5512.500000
2094236: Max freq: 5512.500000
It only shows certain numbers, not a wide variety of frequencies like it maybe should. Or am I wrong? Is there anything wrong with my code you guys can see? The color stuff is commented out because I haven't done it yet.

The frequency resolution of an FFT is limited by the length of the data sample you have. The more samples you have, the higher the frequency resolution.
In your specific case you chose frames of 5 milliseconds, which is then transformed to a number of samples on the following line:
// samples in a frame
int frameSamples = (int)(fS / (frameMsec * 1000));
This corresponds to only 8 samples at the specified 44100Hz sampling rate. The frequency resolution with such a small frame size can be computed to be
44100 / 8
or 5512.5Hz, a rather poor resolution. Correspondingly, the observed frequencies will always be one of 0, 5512.5, 11025, 16537.5 or 22050Hz.
To get a higher resolution you should increase the number of samples used for analysis by increasing frameMsec (as suggested by the comment "size of frame for analysis, you may want to play with this").

Related

Efficiently generate a Sine wave in IOS

What is the most efficient way of generating a sine wave for a device running IOS. For the purposes of the exercise assume a frequency of 440Hz and a sampling rate of 44100Hz and 1024 samples.
A vanilla C implementation looks something like.
#define SAMPLES 1024
#define TWO_PI (3.14159 * 2)
#define FREQUENCY 440
#define SAMPLING_RATE 44100
int main(int argc, const char * argv[]) {
float samples[SAMPLES];
float phaseIncrement = TWO_PI * FREQUENCY / SAMPLING_RATE;
float currentPhase = 0.0;
for (int i = 0; i < SAMPLES; i ++){
samples[i] = sin(currentPhase);
currentPhase += phaseIncrement;
}
return 0;
}
To take advantage of the Accelerate Framework and the vecLib vvsinf function the loop can be changed to only do the addition.
#define SAMPLES 1024
#define TWO_PI (3.14159 * 2)
#define FREQUENCY 440
#define SAMPLING_RATE 44100
int main(int argc, const char * argv[]) {
float samples[SAMPLES] __attribute__ ((aligned));
float results[SAMPLES] __attribute__ ((aligned));
float phaseIncrement = TWO_PI * FREQUENCY / SAMPLING_RATE;
float currentPhase = 0.0;
for (int i = 0; i < SAMPLES; i ++){
samples[i] = currentPhase;
currentPhase += phaseIncrement;
}
vvsinf(results, samples, SAMPLES);
return 0;
}
But is just applying the vvsinf function as far as I should go in terms of efficiency?
I don't really understand the Accelerate framework well enough to know if I can also replace the loop. Is there a vecLib or vDSP function I can use?
For that matter is it possible to use an entirely different alogrithm to fill a buffer with a sine wave?
Given that you are computing the sine of a phase argument which increases in fixed increments, it is generally much faster to implement the signal generation with a recurrence equation as described in this "How to Create Oscillators in Software" post and some more in this "DSP Trick: Sinusoidal Tone Generator" post, both on dspguru:
y[n] = 2*cos(w)*y[n-1] - y[n-2]
Note that this recurrence equation can be subject to numerical roundoff error accumulation, you should avoid computing too many samples at a time (your selection of SAMPLES == 1024 should be fine). This recurrence equation can be used after you have obtained the first two values y[0] and y[1] (the initial conditions). Since you are generating with an initial phase of 0, those are simply:
samples[0] = 0;
samples[1] = sin(phaseIncrement);
or more generally with an arbitrary initial phase (particularly useful to reinitialize the recurrence equation every so often to avoid the numerical roundoff error accumulation I mentioned earlier):
samples[0] = sin(initialPhase);
samples[1] = sin(initialPhase+phaseIncrement);
The recurrence equation can then be implemented directly with:
float scale = 2*cos(phaseIncrement);
// initialize first 2 samples for the 0 initial phase case
samples[0] = 0;
samples[1] = sin(phaseIncrement);
for (int i = 2; i < SAMPLES; i ++){
samples[i] = scale * samples[i-1] - samples[i-2];
}
Note that this implementation could be vectorized by computing multiple tones (each with the same frequency, but with larger phase increments between samples) with appropriate relative phase shifts, then interleaving the results to obtain the original tone (e.g. computing sin(4*w*n), sin(4*w*n+w), sin(4*w*n+2*w) and sin(4*w*n+3*w)). This would however make the implementation a lot more obscure, for a relatively small gain.
Alternatively the equation can be implemented by making use of vDsp_deq22:
// setup dummy array which will hold zeros as input
float nullInput[SAMPLES];
memset(nullInput, 0, SAMPLES * sizeof(float));
// setup filter coefficients
float coefficients[5];
coefficients[0] = 0;
coefficients[1] = 0;
coefficients[2] = 0;
coefficients[3] = -2*cos(phaseIncrement);
coefficients[4] = 1.0;
// initialize first 2 samples for the 0 initial phase case
samples[0] = 0;
samples[1] = sin(phaseIncrement);
vDsp_deq22(nullInput, 1, coefficients, samples, 1, SAMPLES-2);
If efficiency is required, you could pre-load a 440hz (44100 / 440) sine waveform look-up table and loop around it without further mapping or pre-load a 1hz (44100 / 44100) sine waveform look-up table and loop around by skipping samples to reach 440hz just as you did by incrementing a phase counter. Using look-up tables should be faster than computing sin().
Method A (using 440hz sine waveform):
#define SAMPLES 1024
#define FREQUENCY 440
#define SAMPLING_RATE 44100
#define WAVEFORM_LENGTH (SAMPLING / FREQUENCY)
int main(int argc, const char * argv[]) {
float waveform[WAVEFORM_LENGTH];
LoadSinWaveForm(waveform);
float samples[SAMPLES] __attribute__ ((aligned));
float results[SAMPLES] __attribute__ ((aligned));
for (int i = 0; i < SAMPLES; i ++){
samples[i] = waveform[i % WAVEFORM_LENGTH];
}
vvsinf(results, samples, SAMPLES);
return 0;
}
Method B (using 1hz sine waveform):
#define SAMPLES 1024
#define FREQUENCY 440
#define TWO_PI (3.14159 * 2)
#define SAMPLING_RATE 44100
#define WAVEFORM_LENGTH SAMPLING_RATE // since it's 1hz
int main(int argc, const char * argv[]) {
float waveform[WAVEFORM_LENGTH];
LoadSinWaveForm(waveform);
float samples[SAMPLES] __attribute__ ((aligned));
float results[SAMPLES] __attribute__ ((aligned));
float phaseIncrement = TWO_PI * FREQUENCY / SAMPLING_RATE;
float currentPhase = 0.0;
for (int i = 0; i < SAMPLES; i ++){
samples[i] = waveform[floor(currentPhase) % WAVEFORM_LENGTH];
currentPhase += phaseIncrement;
}
vvsinf(results, samples, SAMPLES);
return 0;
}
Please note that:
Method A is susceptible to frequency inaccuracy due to assuming that your frequency always divides correctly the sampling rate, which is not true. That means you may get 441hz or 440hz with a glitch.
Method B is susceptible to aliasing as the frequency goes up an gets closer to the Nyquist frequency, but it's a good trade-off between performance, quality and memory consumption if synthesizing reasonable low frequencies such as the one in your example.

Goertzel algorithm giving infinite result

I have a sinewave at 20hz - 1 amplitude that I have created using Audacity software. It is also only 500ms.
I am using following algorithm to detect the frequency.
All I want to detect if tone amplitude passes a threshold and gives me positive result at 20 hz frequency cycles.
static float goertzel_mag(int numSamples,int TARGET_FREQUENCY,int SAMPLING_RATE, float* data)
{
int k,i;
float floatnumSamples;
float omega,sine,cosine,coeff,q0,q1,q2,magnitude,real,imag;
float scalingFactor = numSamples / 2.0;
floatnumSamples = (float) numSamples;
k = (int) (0.5 + ((floatnumSamples * TARGET_FREQUENCY) / SAMPLING_RATE));
omega = (2.0 * M_PI * k) / floatnumSamples;
sine = sin(omega);
cosine = cos(omega);
coeff = 2.0 * cosine;
q0=0;
q1=0;
q2=0;
for(i=0; i<numSamples; i++)
{
q0 = coeff * q1 - q2 + data[i];
q2 = q1;
q1 = q0;
}
// calculate the real and imaginary results
// scaling appropriately
real = (q1 - q2 * cosine) / scalingFactor;
imag = (q2 * sine) / scalingFactor;
magnitude = sqrtf(real*real + imag*imag);
return magnitude;
}
call the function
// If there's more packets, read them
inCompleteAQBuffer->mAudioDataByteSize = numBytes;
CheckError(AudioQueueEnqueueBuffer(inAQ,
inCompleteAQBuffer,
(sound->packetDescs?nPackets:0),
sound->packetDescs),
"couldn't enqueue buffer");
sound->packetPosition += nPackets;
NSLog(#"number of packets %i",nPackets);
float *data=(float*)inCompleteAQBuffer->mAudioData;
int nn = sizeof(data)/sizeof(float);
float gort = goertzel_mag(nn, 20, 44100, data);
NSLog(#"gort:%f", gort);
if (gort == INFINITY)
NSLog(#"positive infinity");
break point inside of the function
output
number of packets 8192
gort:36029896530591744.000000
number of packets 8192
gort:inf
positive infinity
number of packets 5666
gort:inf
positive infinity
Why I am getting inf result? I don't know how to read the return value, I understand magnitude always has to be positive value but I create the file with 1 amplitude, shouldn't I be getting 0 to 1 results?
EDIT;
Aduio info
afinfo 500ms.aiff
File: 500ms.aiff
File type ID: AIFF
Num Tracks: 1
----
Data format: 1 ch, 44100 Hz, 'lpcm' (0x0000000E) 16-bit big-endian signed integer
no channel layout.
estimated duration: 0.500000 sec
audio bytes: 44100
audio packets: 22050
bit rate: 705600 bits per second
packet size upper bound: 2
maximum packet size: 2
audio data file offset: 54
optimized
source bit depth: I16
I think one problem is with these lines:
float *data=(float*)inCompleteAQBuffer->mAudioData;
int nn = sizeof(data)/sizeof(float);
which I believe is intended to tell you the number of samples. I don't have the information or resources to reproduce your code, but can reproduce the bug with this:
#include <stdio.h>
#include <stdlib.h>
int main(void) {
float *data=malloc(sizeof(float) * 10);
printf ("Sizeof 'data' = %d\n", sizeof(data));
return 0;
}
Program output
Sizeof 'data' = 4
which on my 32-bit compilation is the size of the array pointer, not the array. And using sizeof(*data) won't get you anywhere since that just tells you the size of the data type float, not the array.
There is no way you can ascertain the size of the array, or number of elements, from its pointer, so my answer is, sadly, you need more information, perhaps numBytes? Or numBytes/sizeof(float)?

OpenCL :Access proper index by using globalid(.)

Hi,
I am coding in OpenCL.
I am converting a "C function" having 2D array starting from i=1 and j=1 .PFB .
cv::Mat input; //Input :having some data in it ..
//Image input size is :input.rows=288 ,input.cols =640
cv::Mat output(input.rows-2,input.cols-2,CV_32F); //Output buffer
//Image output size is :output.rows=286 ,output.cols =638
This is a code Which I want to modify in OpenCL:
for(int i=1;i<output.rows-1;i++)
{
for(int j=1;j<output.cols-1;j++)
{
float xVal = input.at<uchar>(i-1,j-1)-input.at<uchar>(i-1,j+1)+ 2*(input.at<uchar>(i,j-1)-input.at<uchar>(i,j+1))+input.at<uchar>(i+1,j-1) - input.at<uchar>(i+1,j+1);
float yVal = input.at<uchar>(i-1,j-1) - input.at<uchar>(i+1,j-1)+ 2*(input.at<uchar>(i-1,j) - input.at<uchar>(i+1,j))+input.at<uchar>(i-1,j+1)-input.at<uchar>(i+1,j+1);
output.at<float>(i-1,j-1) = xVal*xVal+yVal*yVal;
}
}
...
Host code :
//Input Image size is :input.rows=288 ,input.cols =640
//Output Image size is :output.rows=286 ,output.cols =638
OclStr->global_work_size[0] =(input.cols);
OclStr->global_work_size[1] =(input.rows);
size_t outBufSize = (output.rows) * (output.cols) * 4;//4 as I am copying all 4 uchar values into one float variable space
cl_mem cl_input_buffer = clCreateBuffer(
OclStr->context, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR ,
(input.rows) * (input.cols),
static_cast<void *>(input.data), &OclStr->returnstatus);
cl_mem cl_output_buffer = clCreateBuffer(
OclStr->context, CL_MEM_WRITE_ONLY| CL_MEM_USE_HOST_PTR ,
(output.rows) * (output.cols) * sizeof(float),
static_cast<void *>(output.data), &OclStr->returnstatus);
OclStr->returnstatus = clSetKernelArg(OclStr->objkernel, 0, sizeof(cl_mem), (void *)&cl_input_buffer);
OclStr->returnstatus = clSetKernelArg(OclStr->objkernel, 1, sizeof(cl_mem), (void *)&cl_output_buffer);
OclStr->returnstatus = clEnqueueNDRangeKernel(
OclStr->command_queue,
OclStr->objkernel,
2,
NULL,
OclStr->global_work_size,
NULL,
0,
NULL,
NULL
);
clEnqueueMapBuffer(OclStr->command_queue, cl_output_buffer, true, CL_MAP_READ, 0, outBufSize, 0, NULL, NULL, &OclStr->returnstatus);
kernel Code :
__kernel void Sobel_uchar (__global uchar *pSrc, __global float *pDstImage)
{
const uint cols = get_global_id(0)+1;
const uint rows = get_global_id(1)+1;
const uint width= get_global_size(0);
uchar Opsoble[8];
Opsoble[0] = pSrc[(cols-1)+((rows-1)*width)];
Opsoble[1] = pSrc[(cols+1)+((rows-1)*width)];
Opsoble[2] = pSrc[(cols-1)+((rows+0)*width)];
Opsoble[3] = pSrc[(cols+1)+((rows+0)*width)];
Opsoble[4] = pSrc[(cols-1)+((rows+1)*width)];
Opsoble[5] = pSrc[(cols+1)+((rows+1)*width)];
Opsoble[6] = pSrc[(cols+0)+((rows-1)*width)];
Opsoble[7] = pSrc[(cols+0)+((rows+1)*width)];
float gx = Opsoble[0]-Opsoble[1]+2*(Opsoble[2]-Opsoble[3])+Opsoble[4]-Opsoble[5];
float gy = Opsoble[0]-Opsoble[4]+2*(Opsoble[6]-Opsoble[7])+Opsoble[1]-Opsoble[5];
pDstImage[(cols-1)+(rows-1)*width] = gx*gx + gy*gy;
}
Here I am not able to get the output as expected.
I am having some questions that
My for loop is starting from i=1 instead of zero, then How can I get proper index by using the global_id() in x and y direction
What is going wrong in my above kernel code :(
I am suspecting there is a problem in buffer stride but not able to further break my head as already broke it throughout a day :(
I have observed that with below logic output is skipping one or two frames after some 7/8 frames sequence.
I have added the screen shot of my output which is compared with the reference output.
My above logic is doing partial sobelling on my input .I changed the width as -
const uint width = get_global_size(0)+1;
PFB
Your suggestions are most welcome !!!
It looks like you may be fetching values in (y,x) format in your opencl version. Also, you need to add 1 to the global id to replicate your for loops starting from 1 rather than 0.
I don't know why there is an unused iOffset variable. Maybe your bug is related to this? I removed it in my version.
Does this kernel work better for you?
__kernel void simple(__global uchar *pSrc, __global float *pDstImage)
{
const uint i = get_global_id(0) +1;
const uint j = get_global_id(1) +1;
const uint width = get_global_size(0) +2;
uchar Opsoble[8];
Opsoble[0] = pSrc[(i-1) + (j - 1)*width];
Opsoble[1] = pSrc[(i-1) + (j + 1)*width];
Opsoble[2] = pSrc[i + (j-1)*width];
Opsoble[3] = pSrc[i + (j+1)*width];
Opsoble[4] = pSrc[(i+1) + (j - 1)*width];
Opsoble[5] = pSrc[(i+1) + (j + 1)*width];
Opsoble[6] = pSrc[(i-1) + (j)*width];
Opsoble[7] = pSrc[(i+1) + (j)*width];
float gx = Opsoble[0]-Opsoble[1]+2*(Opsoble[2]-Opsoble[3])+Opsoble[4]-Opsoble[5];
float gy = Opsoble[0]-Opsoble[4]+2*(Opsoble[6]-Opsoble[7])+Opsoble[1]-Opsoble[5];
pDstImage[(i-1) + (j-1)*width] = gx*gx + gy*gy ;
}
I am a bit apprehensive about posting an answer suggesting optimizations to your kernel, seeing as the original output has not been reproduced exactly as of yet. There is a major improvement available to be made for problems related to image processing/filtering.
Using local memory will help you out by reducing the number of global reads by a factor of eight, as well as grouping the global writes together for potential gains with the single write-per-pixel output.
The kernel below reads a block of up to 34x34 from pSrc, and outputs a 32x32(max) area of the pDstImage. I hope the comments in the code are enough to guide you in using the kernel. I have not been able to give this a complete test, so there could be changes required. Any comments are appreciated as well.
__kernel void sobel_uchar_wlocal (__global uchar *pSrc, __global float *pDstImage, __global uint2 dimDstImage)
{
//call this kernel 1-dimensional work group size: 32x1
//calculates 32x32 region of output with 32 work items
const uint wid = get_local_id(0);
const uint wid_1 = wid+1; // corrected for the calculation step
const uint2 gid = (uint2)(get_group_id(0),get_group_id(1));
const uint localDim = get_local_size(0);
const uint2 globalTopLeft = (uint2)(localDim * gid.x, localDim * gid.y); //position in pSrc to copy from/to
//dimLocalBuff is used for the right and bottom edges of the image, where the work group may run over the border
const uint2 dimLocalBuff = (uint2)(localDim,localDim);
if(dimDstImage.x - globalTopLeft.x < dimLocalBuff.x){
dimLocalBuff.x = dimDstImage.x - globalTopLeft.x;
}
if(dimDstImage.y - globalTopLeft.y < dimLocalBuff.y){
dimLocalBuff.y = dimDstImage.y - globalTopLeft.y;
}
int i,j;
//save region of data into local memory
__local uchar srcBuff[34][34]; //34^2 uchar = 1156 bytes
for(j=-1;j<dimLocalBuff.y+1;j++){
for(i=x-1;i<dimLocalBuff.x+1;i+=localDim){
srcBuff[i+1][j+1] = pSrc[globalTopLeft.x+i][globalTopLeft.y+j];
}
}
mem_fence(CLK_LOCAL_MEM_FENCE);
//compute output and store locally
__local float dstBuff[32][32]; //32^2 float = 4096 bytes
if(wid_1 < dimLocalBuff.x){
for(i=0;i<dimLocalBuff.y;i++){
float gx = srcBuff[(wid_1-1)+ (i - 1)]-srcBuff[(wid_1-1)+ (i + 1)]+2*(srcBuff[wid_1+ (i-1)]-srcBuff[wid_1+ (i+1)])+srcBuff[(wid_1+1)+ (i - 1)]-srcBuff[(wid_1+1)+ (i + 1)];
float gy = srcBuff[(wid_1-1)+ (i - 1)]-srcBuff[(wid_1+1)+ (i - 1)]+2*(srcBuff[(wid_1-1)+ (i)]-srcBuff[(wid_1+1)+ (i)])+srcBuff[(wid_1-1)+ (i + 1)]-srcBuff[(wid_1+1)+ (i + 1)];
dstBuff[wid][i] = gx*gx + gy*gy;
}
}
mem_fence(CLK_LOCAL_MEM_FENCE);
//copy results to output
for(j=0;j<dimLocalBuff.y;j++){
for(i=0;i<dimLocalBuff.x;i+=localDim){
srcBuff[i][j] = pSrc[globalTopLeft.x+i][globalTopLeft.y+j];
}
}
}

Why are my frequency values for iPhone FFT incorrect?

I've been trying to get exact frequencies using the FFT in Apple's Accelerate framework, but I'm having trouble working out why my values are off the true frequency.
I have been using this article http://www.dspdimension.com/admin/pitch-shifting-using-the-ft/ as the basis for my implementation, and after really struggling to get to the point I'm at now, I am totally stumped.
So far I've got audio in -> Hanning window -> FFT -> phase calculation -> weird final output. I'd think that there will be a problem with my maths somewhere, but I'm really out of ideas by now.
The outputs are a lot lower what they should be, e.g., I input 440Hz and it prints out 190Hz, or I input 880Hz and it prints out 400Hz. For the most part these results are consistent, but not always, and there doesn't seem to be any common factor between anything either...
Here is my code:
enum
{
sampleRate = 44100,
osamp = 4,
samples = 4096,
range = samples * 7 / 16,
step = samples / osamp
};
NSMutableArray *fftResults;
static COMPLEX_SPLIT A;
static FFTSetup setupReal;
static uint32_t log2n, n, nOver2;
static int32_t stride;
static float expct = 2*M_PI*((double)step/(double)samples);
static float phase1[range];
static float phase2[range];
static float dPhase[range];
- (void)fftSetup
{
// Declaring integers
log2n = 12;
n = 1 << log2n;
stride = 1;
nOver2 = n / 2;
// Allocating memory for complex vectors
A.realp = (float *) malloc(nOver2 * sizeof(float));
A.imagp = (float *) malloc(nOver2 * sizeof(float));
// Allocating memory for FFT
setupReal = vDSP_create_fftsetup(log2n, FFT_RADIX2);
// Setting phase
memset(phase2, 0, range * sizeof(float));
}
// For each sample in buffer...
for (int bufferCount = 0; bufferCount < audioBufferList.mNumberBuffers; bufferCount++)
{
// Declaring samples from audio buffer list
SInt16 *samples = (SInt16*)audioBufferList.mBuffers[bufferCount].mData;
// Creating Hann window function
for (int i = 0; i < nOver2; i++)
{
double hannMultiplier = 0.5 * (1 - cos((2 * M_PI * i) / (nOver2 - 1)));
// Applying window to each sample
A.realp[i] = hannMultiplier * samples[i];
A.imagp[i] = 0;
}
// Applying FFT
vDSP_fft_zrip(setupReal, &A, stride, log2n, FFT_FORWARD);
// Detecting phase
vDSP_zvphas(&A, stride, phase1, stride, range);
// Calculating phase difference
vDSP_vsub(phase2, stride, phase1, stride, dPhase, stride, range);
// Saving phase
memcpy(phase2, phase1, range * sizeof(float));
// Extracting DSP outputs
for (size_t j = 0; j < nOver2; j++)
{
NSNumber *realNumbers = [NSNumber numberWithFloat:A.realp[j]];
NSNumber *imagNumbers = [NSNumber numberWithFloat:A.imagp[j]];
[real addObject:realNumbers];
[imag addObject:imagNumbers];
}
// Combining real and imaginary parts
[resultsCombined addObject:real];
[resultsCombined addObject:imag];
// Filling FFT output array
[fftResults addObject:resultsCombined];
}
}
int fftCount = [fftResults count];
NSLog(#"FFT Count: %d",fftCount);
// For each FFT...
for (int i = 0; i < fftCount; i++)
{
// Declaring integers for peak detection
float peak = 0;
float binNumber = 0;
// Declaring integers for phase detection
float deltaPhase;
static float trueFrequency[range];
for (size_t j = 1; j < range; j++)
{
// Calculating bin magnitiude
float realVal = [[[[fftResults objectAtIndex:i] objectAtIndex:0] objectAtIndex:j] floatValue];
float imagVal = [[[[fftResults objectAtIndex:i] objectAtIndex:1] objectAtIndex:j] floatValue];
float magnitude = sqrtf(realVal*realVal + imagVal*imagVal);
// Peak detection
if (magnitude > peak)
{
peak = magnitude;
binNumber = (float)j;
}
// Getting phase difference
deltaPhase = dPhase[j];
// Subtract expected difference
deltaPhase -= (float)j*expct;
// Map phase difference into +/- pi interval
int qpd = deltaPhase / M_PI;
if (qpd >= 0)
qpd += qpd&1;
else
qpd -= qpd&1;
deltaPhase -= M_PI * (float)qpd;
// Getting bin deviation from +/i interval
float deltaFrequency = osamp * deltaPhase / (2 * M_PI);
// Calculating true frequency at the j-th partial
trueFrequency[j] = (j * (sampleRate/samples)) + (deltaFrequency * (sampleRate/samples));
}
UInt32 mag;
mag = binNumber;
// Extracting frequency at bin peak
float f = trueFrequency[mag];
NSLog(#"True frequency = %fHz", f);
float b = roundf(binNumber*(sampleRate/nOver2));
NSLog(#" Bin frequency = %fHz", b);
}
Note that the expected phase difference (even for a bin-centered frequency) depends on both the window offset or overlap of the FFT pairs, and the bin number or frequency of the FFT result. e.g. If you offset the windows by very little (1 sample), then the unwrapped phase difference between 2 FFTs will be smaller than with a larger offset. At the same offset, if the frequency is higher, the expected phase difference between the same bin of two FFTs will be greater (or it will wrap more).

Cepstrum and Formant Tracking Using Apple Accelerate Framework

I've been using this web page as a guideline for formant tracking of speech...
http://iitg.vlab.co.in/?sub=59&brch=164&sim=615&cnt=1
It all seems to be going pretty well, except for the last step, which is the converting of the cepstrum into a smoothed representation for simple peak picking for the formant tracking. The spectrograph looks good, and the cepstrograph (can I say that? :P) also looks good (from what I can tell), but the final stage the results (smoothed formant representation) are not what I expected.
I uploaded a sample of each stage as visual images to...
http://imgur.com/a/62duS
This sample is for the speech of the sound 'i' as in 'beed'. According to this site...
http://home.cc.umanitoba.ca/~robh/howto.html#formants
the first formant should come in around 500hz, and the second and third around 2200hz and 2800 hz respectively. The spetrograph shows something very similar, but on the last stage I am gettings results similar to...
F1 - 891
F2 - 1550
F3 - 2329
Any insight would be greatly appreciated. I've been going round in circles on this for some time. My code looks as follows...
// set up fft parameters
UInt32 log2n = 9;
UInt32 n = 512;
UInt32 window = n;
UInt32 halfN = n/2;
UInt32 stride = 1;
FFTSetup setupReal = [appDelegate getFftSetup];
int stepSize = (hpBuffer.sampleCount-window) / quantizeCount;
// calculate volume from raw samples, because it seems more reliable that fft
UInt32 volumeWindow = 128;
volumeBuffer = malloc(sizeof(float)*quantizeCount);
int windowPos = 0;
for (int i=0; i < quantizeCount; i++) {
windowPos += stepSize;
float total = 0.0f;
float max = 0.0f;
for (int p=windowPos; p < windowPos+volumeWindow; p++) {
total += sampleBuffer.buffer[p];
if (sampleBuffer.buffer[p] > max)
max = sampleBuffer.buffer[p];
}
volumeBuffer[i] = max;
}
// normalize volumebuffer
[FloatAudioBuffer normalizePositiveBuffer:volumeBuffer ofSize:quantizeCount];
// allocate memory for complex array
COMPLEX_SPLIT complexArray;
complexArray.realp = (float*)malloc(4096*sizeof(float));
complexArray.imagp = (float*)malloc(4096*sizeof(float));
// allocate some space for temporary hamming buffer
float *hamBuffer = malloc(n*sizeof(float));
// create spectrum and feature buffer
spectrumBuffer = malloc(sizeof(float)*halfN*quantizeCount);
formantBuffer = malloc(sizeof(float)*4096*quantizeCount);
cepstrumBuffer = malloc(sizeof(float)*halfN*quantizeCount);
lowCepstrumBuffer = malloc(sizeof(float)*featureCount*quantizeCount);
featureBuffer = malloc(sizeof(float)*featureCount*quantizeCount);
// create data point for each quantize segment
float TWOPI = 2.0f * M_PI;
for (int s=0; s < quantizeCount; s++) {
// copy buffer data into a seperate array and apply hamming window
int offset = (int)(s * stepSize);
for (int i=0; i < n; i++)
hamBuffer[i] = hpBuffer.buffer[offset+i] * ((1.0f-0.46f) - 0.46f*cos(TWOPI*i/((float)n-1.0f)));
// configure float array into acceptable input array format (interleaved)
vDSP_ctoz((COMPLEX*)hamBuffer, 2, &complexArray, 1, halfN);
// run FFT
vDSP_fft_zrip(setupReal, &complexArray, stride, log2n, FFT_FORWARD);
// Absolute square (equivalent to mag^2)
complexArray.imagp[0] = 0.0f;
vDSP_zvmags(&complexArray, 1, complexArray.realp, 1, halfN);
bzero(complexArray.imagp, (halfN) * sizeof(float));
// scale
float scale = 1.0f / (2.0f*(float)n);
vDSP_vsmul(complexArray.realp, 1, &scale, complexArray.realp, 1, halfN);
// get log of absolute values for passing to inverse FFT for cepstrum
for (int i=0; i < halfN; i++)
complexArray.realp[i] = logf(sqrtf(complexArray.realp[i]));
// save this into spectrum buffer
memcpy(&spectrumBuffer[s*halfN], complexArray.realp, halfN*sizeof(float));
// convert spectrum to interleaved ready for inverse fft
vDSP_ctoz((COMPLEX*)&spectrumBuffer[s*halfN], 2, &complexArray, 1, halfN/2);
// create cepstrum
vDSP_fft_zrip(setupReal, &complexArray, stride, log2n-1, FFT_INVERSE);
//convert interleaved to real and straight into cepstrum buffer
vDSP_ztoc(&complexArray, 1, (COMPLEX*)&cepstrumBuffer[s*halfN], 2, halfN/2);
// copy first part of cepstrum into low cepstrum buffer
memcpy(&lowCepstrumBuffer[s*featureCount], &cepstrumBuffer[s*halfN], featureCount*sizeof(float));
// make 8000 point array based on the first 15 values
float *tempArray = malloc(8192*sizeof(float));
for (int i=0; i < 8192; i++) {
if (i < 15)
tempArray[i] = cepstrumBuffer[s*halfN+i];
else
tempArray[i] = 0.0f;
}
vDSP_ctoz((COMPLEX*)tempArray, 2, &complexArray, 1, 4096);
float newLog2n = log2f(8192.0f);
complexArray.imagp[0] = 0.0f;
vDSP_fft_zrip(setupReal, &complexArray, stride, newLog2n, FFT_FORWARD);
vDSP_zvmags(&complexArray, 1, complexArray.realp, 1, 4096);
bzero(complexArray.imagp, (4096) * sizeof(float));
// scale
scale = 1.0f / (2.0f*(float)8192);
vDSP_vsmul(complexArray.realp, 1, &scale, complexArray.realp, 1, 4096);
// get magnitude
for (int i=0; i < 4096; i++)
complexArray.realp[i] = sqrtf(complexArray.realp[i]);
// write to formant buffer
memcpy(&formantBuffer[s*4096], complexArray.realp, 4096*sizeof(float));
// complex array now contains formant spectrum
// it's large, so get features here!
// try simple peak picking algorithm for first 3 formants
int formantIndex = 0;
float *peaks = malloc(6*sizeof(float));
for (int i=0; i < 6; i++)
peaks[i] = 0.0f;
for (int i=1; i < 4096-1 && formantIndex < 6; i++) {
if (complexArray.realp[i-1] < complexArray.realp[i] &&
complexArray.realp[i+1] < complexArray.realp[i])
peaks[formantIndex++] = i;
}

Resources