When implementing the predefined inline C++ method, I am getting
Implicit declaration of function 'FillOutASBDForLPCM' is invalid in
C99
error. Please help me what i made the mistake or Do i import or add anything to access C++ method. I added this function, whenever I try to compile I am getting error.
void Convert()
{
//File URLs
CFURLRef micUrl = CFURLCreateWithFileSystemPath(NULL, (CFStringRef)kMicFilePath, kCFURLPOSIXPathStyle, false);
CFURLRef speakerUrl = CFURLCreateWithFileSystemPath(NULL, (CFStringRef)kSpeakerFilePath, kCFURLPOSIXPathStyle, false);
CFURLRef mixUrl = CFURLCreateWithFileSystemPath(NULL, (CFStringRef)kResultFilePath, kCFURLPOSIXPathStyle, false);
ExtAudioFileRef micFile = NULL;
ExtAudioFileRef speakerFile = NULL;
ExtAudioFileRef mixFile = NULL;
//Opening input files (speaker and mic)
ExtAudioFileOpenURL(micUrl, &micFile);
ExtAudioFileOpenURL(speakerUrl, &speakerFile);
//Reading input file audio format (mono LPCM)
AudioStreamBasicDescription inputFormat, outputFormat;
UInt32 descSize = sizeof(inputFormat);
ExtAudioFileGetProperty(micFile, kExtAudioFileProperty_FileDataFormat, &descSize, &inputFormat);
int sampleSize = inputFormat.mBytesPerFrame;
//Filling input stream format for output file (stereo LPCM)
FillOutASBDForLPCM(inputFormat, inputFormat.mSampleRate, 2, inputFormat.mBitsPerChannel, inputFormat.mBitsPerChannel, true, false, false);
//Filling output file audio format (AAC)
memset(&outputFormat, 0, sizeof(outputFormat));
outputFormat.mFormatID = kAudioFormatMPEG4AAC;
outputFormat.mSampleRate = 8000;
outputFormat.mFormatFlags = kMPEG4Object_AAC_Main;
outputFormat.mChannelsPerFrame = 2;
//Opening output file
ExtAudioFileCreateWithURL(mixUrl, kAudioFileM4AType, &outputFormat, NULL, kAudioFileFlags_EraseFile, &mixFile);
ExtAudioFileSetProperty(mixFile, kExtAudioFileProperty_ClientDataFormat, sizeof(inputFormat), &inputFormat);
//Freeing URLs
CFRelease(micUrl);
CFRelease(speakerUrl);
CFRelease(mixUrl);
//Setting up audio buffers
int bufferSizeInSamples = 64 * 1024;
AudioBufferList micBuffer;
micBuffer.mNumberBuffers = 1;
micBuffer.mBuffers[0].mNumberChannels = 1;
micBuffer.mBuffers[0].mDataByteSize = sampleSize * bufferSizeInSamples;
micBuffer.mBuffers[0].mData = malloc(micBuffer.mBuffers[0].mDataByteSize);
AudioBufferList speakerBuffer;
speakerBuffer.mNumberBuffers = 1;
speakerBuffer.mBuffers[0].mNumberChannels = 1;
speakerBuffer.mBuffers[0].mDataByteSize = sampleSize * bufferSizeInSamples;
speakerBuffer.mBuffers[0].mData = malloc(speakerBuffer.mBuffers[0].mDataByteSize);
AudioBufferList mixBuffer;
mixBuffer.mNumberBuffers = 1;
mixBuffer.mBuffers[0].mNumberChannels = 2;
mixBuffer.mBuffers[0].mDataByteSize = sampleSize * bufferSizeInSamples * 2;
mixBuffer.mBuffers[0].mData = malloc(mixBuffer.mBuffers[0].mDataByteSize);
//Converting
while (true)
{
//Reading data from input files
UInt32 framesToRead = bufferSizeInSamples;
ExtAudioFileRead(micFile, &framesToRead, &micBuffer);
ExtAudioFileRead(speakerFile, &framesToRead, &speakerBuffer);
if (framesToRead == 0)
{
break;
}
//Building interleaved stereo buffer - left channel is mic, right - speaker
for (int i = 0; i < framesToRead; i++)
{
memcpy((char*)mixBuffer.mBuffers[0].mData + i * sampleSize * 2, (char*)micBuffer.mBuffers[0].mData + i * sampleSize, sampleSize);
memcpy((char*)mixBuffer.mBuffers[0].mData + i * sampleSize * 2 + sampleSize, (char*)speakerBuffer.mBuffers[0].mData + i * sampleSize, sampleSize);
}
//Writing to output file - LPCM will be converted to AAC
ExtAudioFileWrite(mixFile, framesToRead, &mixBuffer);
}
//Closing files
ExtAudioFileDispose(micFile);
ExtAudioFileDispose(speakerFile);
ExtAudioFileDispose(mixFile);
//Freeing audio buffers
free(micBuffer.mBuffers[0].mData);
free(speakerBuffer.mBuffers[0].mData);
free(mixBuffer.mBuffers[0].mData);
}
Provided you have this function included from CoreAudio framework:
#import <CoreAudio/CoreAudio.h>
You merely need to inform the compiler that the code takes C++ code. Provided it's a mixed with Objective-C code file, you apparently want it to be of .mm format OR need to specify the language explicitly when compiling the file with use of -ObjC++ flag (in addition to other parameters):
% clang -ObjC++ MyFile.m
Related
I'm writing a stereo wave file with AudioFileWriteBytes (CoreAudio / iOS) and the only way I can get it to work is by calling it for each sample on each channel.
The following code works:
// Prepare the format AudioStreamBasicDescription;
AudioStreamBasicDescription asbd = {
.mSampleRate = session.samplerate,
.mFormatID = kAudioFormatLinearPCM,
.mFormatFlags = kAudioFormatFlagIsBigEndian| kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked,
.mChannelsPerFrame = 2,
.mBitsPerChannel = 16,
.mFramesPerPacket = 1, // Always 1 for uncompressed formats
.mBytesPerPacket = 4, // 16 bits for 2 channels = 4 bytes
.mBytesPerFrame = 4 // 16 bits for 2 channels = 4 bytes
};
// Set up the file
AudioFileID audioFile;
OSStatus audioError = noErr;
audioError = AudioFileCreateWithURL((__bridge CFURLRef)fileURL, kAudioFileAIFFType, &asbd, kAudioFileFlags_EraseFile, &audioFile);
if (audioError != noErr) {
NSLog(#"Error creating file");
return;
}
// Write samples
UInt64 currentFrame = 0;
while (currentFrame < totalLengthInFrames) {
UInt64 numberOfFramesToWrite = totalLengthInFrames - currentFrame;
if (numberOfFramesToWrite > 2048) {
numberOfFramesToWrite = 2048;
}
UInt32 sampleByteCount = sizeof(int16_t);
UInt32 bytesToWrite = (UInt32)numberOfFramesToWrite * sampleByteCount;
int16_t *sampleBufferLeft = (int16_t *)malloc(bytesToWrite);
int16_t *sampleBufferRight = (int16_t *)malloc(bytesToWrite);
// Some magic to fill the buffers
for (int j = 0; j < numberOfFramesToWrite; j++) {
int16_t left = CFSwapInt16HostToBig(sampleBufferLeft[j]);
int16_t right = CFSwapInt16HostToBig(sampleBufferRight[j]);
audioError = AudioFileWriteBytes(audioFile, false, (currentFrame + j) * 4, &sampleByteCount, &left);
assert(audioError == noErr);
audioError = AudioFileWriteBytes(audioFile, false, (currentFrame + j) * 4 + 2, &sampleByteCount, &right);
assert(audioError == noErr);
}
free(sampleBufferLeft);
free(sampleBufferRight);
currentFrame += numberOfFramesToWrite;
}
However, it is (obviously) very slow and inefficient.
I can't find anything on how to use it with a big buffer so that I can write more than a single sample while also writing 2 channels.
I tried making a buffer going LRLRLRLR (left / right), and then write that with just one AudioFileWriteBytes call. I expected that to work, but it produced a file filled with noise.
This is the code:
UInt64 currentFrame = 0;
UInt64 bytePos = 0;
while (currentFrame < totalLengthInFrames) {
UInt64 numberOfFramesToWrite = totalLengthInFrames - currentFrame;
if (numberOfFramesToWrite > 2048) {
numberOfFramesToWrite = 2048;
}
UInt32 sampleByteCount = sizeof(int16_t);
UInt32 bytesInBuffer = (UInt32)numberOfFramesToWrite * sampleByteCount;
UInt32 bytesInOutputBuffer = (UInt32)numberOfFramesToWrite * sampleByteCount * 2;
int16_t *sampleBufferLeft = (int16_t *)malloc(bytesInBuffer);
int16_t *sampleBufferRight = (int16_t *)malloc(bytesInBuffer);
int16_t *outputBuffer = (int16_t *)malloc(bytesInOutputBuffer);
// Some magic to fill the buffers
for (int j = 0; j < numberOfFramesToWrite; j++) {
int16_t left = CFSwapInt16HostToBig(sampleBufferLeft[j]);
int16_t right = CFSwapInt16HostToBig(sampleBufferRight[j]);
outputBuffer[(j * 2)] = left;
outputBuffer[(j * 2) + 1] = right;
}
audioError = AudioFileWriteBytes(audioFile, false, bytePos, &bytesInOutputBuffer, &outputBuffer);
assert(audioError == noErr);
free(sampleBufferLeft);
free(sampleBufferRight);
free(outputBuffer);
bytePos += bytesInOutputBuffer;
currentFrame += numberOfFramesToWrite;
}
I also tried to just write the buffers at once (2048*L, 2048*R, etc.) which I did not expect to work, and it didn't.
How do I speed this up AND get a working wave file?
I tried making a buffer going LRLRLRLR (left / right), and then write that with just one AudioFileWriteBytes call.
This is the correct approach if using (the rather difficult) Audio File Services.
If possible, instead of the very low level Audio File Services, use Extended Audio File Services. It is a wrapper around Audio File Services that has built in format converters. Or even better yet, use AVAudioFile it is a wrapper around Extended Audio File Services that covers most common use cases.
If you are set on using Audio File Services, you'll have to interleave the audio manually like you had tried. Maybe show the code where you attempted this.
First Export from iPod Library m4a file.
Second Convert to AAC using Extended Audio File Services
Third transport the music file to other iPhone.
Fourth Streaming audio using Audio File Stream Services
It usually works.But the audio file playtime over about 5minitues it can't work.
However,I try 15minitues audio file mp3 and aac into my project.and streaming it.it worked.
So,I think the cause is Convert Step. Something wrong with my code ?
Could you give me some advices ?
-(void)convertFrom:(NSURL*)fromURL
toURL:(NSURL*)toURL{
ExtAudioFileRef infile,outfile;
OSStatus err;
//ExtAudioFileの作成
err = ExtAudioFileOpenURL((__bridge CFURLRef)fromURL, &infile);
checkError(err,"ExtAudioFileOpenURL");
AudioStreamBasicDescription inputFormat;
AudioStreamBasicDescription outputFormat;
AVAudioSession * audiosession =[AVAudioSession sharedInstance];
[audiosession setActive:YES error:nil];
[audiosession setCategory:AVAudioSessionCategoryAudioProcessing error:nil];
//変換するフォーマット(AAC)
memset(&outputFormat, 0, sizeof(AudioStreamBasicDescription));
outputFormat.mSampleRate = 44100.0;
outputFormat.mFormatID = kAudioFormatMPEG4AAC;//AAC
outputFormat.mChannelsPerFrame = 1;
UInt32 size = sizeof(AudioStreamBasicDescription);
AudioFormatGetProperty(kAudioFormatProperty_FormatInfo,
0, NULL,
&size,
&outputFormat);//変換後のフォーマット
err = ExtAudioFileGetProperty(infile,//変換前のファイルのプロパティを取得
kExtAudioFileProperty_FileDataFormat,
&size,
&inputFormat);
checkError(err,"ExtAudioFileGetProperty");
//リニアPCM以外からの変換であれば、リニアPCMとして読み込む
if(inputFormat.mFormatID != kAudioFormatLinearPCM){
//一旦変換するフォーマット(リニアPCM Little Endian)
AudioStreamBasicDescription linearPCMFormat;
linearPCMFormat.mSampleRate = outputFormat.mSampleRate;
linearPCMFormat.mFormatID = kAudioFormatLinearPCM;
linearPCMFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
linearPCMFormat.mFramesPerPacket = 1;
linearPCMFormat.mChannelsPerFrame = outputFormat.mChannelsPerFrame;
linearPCMFormat.mBitsPerChannel = 16;
linearPCMFormat.mBytesPerPacket = 2 * outputFormat.mChannelsPerFrame;
linearPCMFormat.mBytesPerFrame = 2 * outputFormat.mChannelsPerFrame;
linearPCMFormat.mReserved = 0;
//読み出すフォーマットをリニアPCMにする(中間フォーマット)
inputFormat = linearPCMFormat;
}
//読み込むフォーマットを設定
//必ずlinearPCMで読み出される
err = ExtAudioFileSetProperty(infile,
kExtAudioFileProperty_ClientDataFormat,
sizeof(AudioStreamBasicDescription),
&inputFormat);
checkError(err,"ExtAudioFileSetProperty");
err = ExtAudioFileCreateWithURL((__bridge CFURLRef)toURL,
kAudioFileM4AType, //AAC
&outputFormat,
NULL,
kAudioFileFlags_EraseFile,
&outfile);
checkError(err,"ExtAudioFileCreateWithURL");
//書き込むファイルに、入力がリニアPCMであることを設定
err = ExtAudioFileSetProperty(outfile,
kExtAudioFileProperty_ClientDataFormat,
sizeof(AudioStreamBasicDescription),
&inputFormat);
checkError(err,"kExtAudioFileProperty_ClientDataFormat");
//読み込み位置を0に移動
err = ExtAudioFileSeek(infile, 0);
checkError(err,"ExtAudioFileSeek");
//一度に読み込むフレーム数
UInt32 readFrameSize = 1024;
//読み込むバッファ領域を確保
UInt32 bufferByteSize = sizeof(char) * readFrameSize * inputFormat.mBytesPerPacket;
char *buffer = malloc(bufferByteSize);
//AudioBufferListの作成
AudioBufferList audioBufferList;
audioBufferList.mNumberBuffers = 1;
audioBufferList.mBuffers[0].mNumberChannels = inputFormat.mChannelsPerFrame;
audioBufferList.mBuffers[0].mDataByteSize = bufferByteSize;
audioBufferList.mBuffers[0].mData = buffer;
while(1){
UInt32 numPacketToRead = readFrameSize;
err = ExtAudioFileRead(infile, &numPacketToRead, &audioBufferList);
checkError(err,"ExtAudioFileRead");
//読み込むフレームが無くなったら終了する
if(numPacketToRead == 0){
NSLog(#"変換完了");
break;
}
err = ExtAudioFileWrite(outfile,
numPacketToRead,
&audioBufferList);
checkError(err,"ExtAudioFileWrite");
}
ExtAudioFileDispose(infile);
ExtAudioFileDispose(outfile);
free(buffer);
void propertyListenerProc(
void * inClientData,
AudioFileStreamID inAudioFileStream,
AudioFileStreamPropertyID inPropertyID,
UInt32 * ioFlags
){
StreamInfo* streamInfo = (StreamInfo*)inClientData;
OSStatus err;
NSLog(#"property%u",(unsigned int)inPropertyID);
I have successfully generated a tone using iOS with the following code. After that, I want to save the generated tone to an audio file. How can I do this?
- (void)createToneUnit
{
// Configure the search parameters to find the default playback output unit
// (called the kAudioUnitSubType_RemoteIO on iOS but
// kAudioUnitSubType_DefaultOutput on Mac OS X)
AudioComponentDescription defaultOutputDescription;
defaultOutputDescription.componentType = kAudioUnitType_Output;
defaultOutputDescription.componentSubType = kAudioUnitSubType_RemoteIO;
defaultOutputDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
defaultOutputDescription.componentFlags = 0;
defaultOutputDescription.componentFlagsMask = 0;
// Get the default playback output unit
AudioComponent defaultOutput = AudioComponentFindNext(NULL, &defaultOutputDescription);
NSAssert(defaultOutput, #"Can't find default output");
// Create a new unit based on this that we'll use for output
OSErr err = AudioComponentInstanceNew(defaultOutput, &toneUnit);
NSAssert1(toneUnit, #"Error creating unit: %ld", err);
// Set our tone rendering function on the unit
AURenderCallbackStruct input;
input.inputProc = RenderTone;
input.inputProcRefCon = (__bridge void *)(self);
err = AudioUnitSetProperty(toneUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
&input,
sizeof(input));
NSAssert1(err == noErr, #"Error setting callback: %ld", err);
// Set the format to 32 bit, single channel, floating point, linear PCM
const int four_bytes_per_float = 4;
const int eight_bits_per_byte = 8;
AudioStreamBasicDescription streamFormat;
streamFormat.mSampleRate = sampleRate;
streamFormat.mFormatID = kAudioFormatLinearPCM;
streamFormat.mFormatFlags =
kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved;
streamFormat.mBytesPerPacket = four_bytes_per_float;
streamFormat.mFramesPerPacket = 1;
streamFormat.mBytesPerFrame = four_bytes_per_float;
streamFormat.mChannelsPerFrame = 1;
streamFormat.mBitsPerChannel = four_bytes_per_float * eight_bits_per_byte;
err = AudioUnitSetProperty (toneUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
0,
&streamFormat,
sizeof(AudioStreamBasicDescription));
NSAssert1(err == noErr, #"Error setting stream format: %ld", err);
}
The Render Code :
OSStatus RenderTone(
void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData)
{
// Fixed amplitude is good enough for our purposes
const double amplitude = 0.25;
// Get the tone parameters out of the view controller
ViewController *viewController =
(__bridge ViewController *)inRefCon;
double theta = viewController->theta;
double theta_increment = 2.0 * M_PI * viewController->frequency / viewController->sampleRate;
// This is a mono tone generator so we only need the first buffer
const int channel = 0;
Float32 *buffer = (Float32 *)ioData->mBuffers[channel].mData;
// Generate the samples
for (UInt32 frame = 0; frame < inNumberFrames; frame++)
{
buffer[frame] = sin(theta) * amplitude;
theta += theta_increment;
if (theta > 2.0 * M_PI)
{
theta -= 2.0 * M_PI;
}
}
// Store the theta back in the view controller
viewController->theta = theta;
return noErr;
}
And to play the generated tone, I just :
OSErr err = AudioUnitInitialize(toneUnit);
err = AudioOutputUnitStart(toneUnit);
The extended audio file api provides an easy way to write audio files to disk.
I have an app that selects a song from the iPod Library then copies that song into the app's directory as a '.caf' file. I now need to play and at the same time read that file into Apples FFT from the Accelerate framework so I can visualize the data like a spectrogram. Here is the code for the FFT:
void FFTAccelerate::doFFTReal(float samples[], float amp[], int numSamples)
{
int i;
vDSP_Length log2n = log2f(numSamples);
//Convert float array of reals samples to COMPLEX_SPLIT array A
vDSP_ctoz((COMPLEX*)samples,2,&A,1,numSamples/2);
//Perform FFT using fftSetup and A
//Results are returned in A
vDSP_fft_zrip(fftSetup, &A, 1, log2n, FFT_FORWARD);
//Convert COMPLEX_SPLIT A result to float array to be returned
amp[0] = A.realp[0]/(numSamples*2);
for(i=1;i<numSamples;i++)
amp[i]=sqrt(A.realp[i]*A.realp[i]+A.imagp[i]*A.imagp[i])/numSamples;
}
//Constructor
FFTAccelerate::FFTAccelerate (int numSamples)
{
vDSP_Length log2n = log2f(numSamples);
fftSetup = vDSP_create_fftsetup(log2n, FFT_RADIX2);
int nOver2 = numSamples/2;
A.realp = (float *) malloc(nOver2*sizeof(float));
A.imagp = (float *) malloc(nOver2*sizeof(float));
}
My question is how to I loop through the '.caf' audio file to feed the FFT while at the same time playing the song? I only need one channel. Im guessing I need to get 1024 samples of the song, process that in the FTT and then move further down the file and grab another 1024 samples. But I dont understand how to read an audio file to do this. The file has a sample rate of 44100.0 hz, is in linear PCM format, 16 Bit and I believe is also interleaved if that helps...
Try the ExtendedAudioFile API (requires AudioToolbox.framework).
#include <AudioToolbox/ExtendedAudioFile.h>
NSURL *urlToCAF = ...;
ExtAudioFileRef caf;
OSStatus status;
status = ExtAudioFileOpenURL((__bridge CFURLRef)urlToCAF, &caf);
if(noErr == status) {
// request float format
const UInt32 NumFrames = 1024;
const int ChannelsPerFrame = 1; // Mono, 2 for Stereo
// request float format
AudioStreamBasicDescription clientFormat;
clientFormat.mChannelsPerFrame = ChannelsPerFrame;
clientFormat.mSampleRate = 44100;
clientFormat.mFormatID = kAudioFormatLinearPCM;
clientFormat.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagIsNonInterleaved; // float
int cmpSize = sizeof(float);
int frameSize = cmpSize*ChannelsPerFrame;
clientFormat.mBitsPerChannel = cmpSize*8;
clientFormat.mBytesPerPacket = frameSize;
clientFormat.mFramesPerPacket = 1;
clientFormat.mBytesPerFrame = frameSize;
status = ExtAudioFileSetProperty(caf, kExtAudioFileProperty_ClientDataFormat, sizeof(clientFormat), &clientFormat);
if(noErr != status) { /* handle it */ }
while(1) {
float buf[ChannelsPerFrame*NumFrames];
AudioBuffer ab = { ChannelsPerFrame, sizeof(buf), buf };
AudioBufferList abl;
abl.mNumberBuffers = 1;
abl.mBuffers[0] = ab;
UInt32 ioNumFrames = NumFrames;
status = ExtAudioFileRead(caf, &ioNumFrames, &abl);
if(noErr == status) {
// process ioNumFrames here in buf
if(0 == ioNumFrames) {
// EOF!
break;
} else if(ioNumFrames < NumFrames) {
// TODO: pad buf with zeroes out to NumFrames
} else {
float amp[NumFrames]; // scratch space
doFFTReal(buf, amp, NumFrames);
}
}
}
// later
status = ExtAudioFileDispose(caf);
if(noErr != status) { /* hmm */ }
}
I have a really short audio file, say a 10th of a second in (say) .PCM format
I want to use RemoteIO to loop through the file repeatedly to produce a continuous musical tone. So how do I read this into an array of floats?
EDIT: while I could probably dig out the file format, extract the file into an NSData and process it manually, I'm guessing there is a more sensible generic approach... ( that eg copes with different formats )
You can use ExtAudioFile to read data from any supported data format in numerous client formats. Here is an example to read a file as 16-bit integers:
CFURLRef url = /* ... */;
ExtAudioFileRef eaf;
OSStatus err = ExtAudioFileOpenURL((CFURLRef)url, &eaf);
if(noErr != err)
/* handle error */
AudioStreamBasicDescription format;
format.mSampleRate = 44100;
format.mFormatID = kAudioFormatLinearPCM;
format.mFormatFlags = kAudioFormatFormatFlagIsPacked;
format.mBitsPerChannel = 16;
format.mChannelsPerFrame = 2;
format.mBytesPerFrame = format.mChannelsPerFrame * 2;
format.mFramesPerPacket = 1;
format.mBytesPerPacket = format.mFramesPerPacket * format.mBytesPerFrame;
err = ExtAudioFileSetProperty(eaf, kExtAudioFileProperty_ClientDataFormat, sizeof(format), &format);
/* Read the file contents using ExtAudioFileRead */
If you wanted Float32 data, you would set up format like this:
format.mFormatID = kAudioFormatLinearPCM;
format.mFormatFlags = kAudioFormatFlagsNativeFloatPacked;
format.mBitsPerChannel = 32;
This is the code I have used to convert my audio data (audio file ) into floating point representation and saved into an array.
-(void) PrintFloatDataFromAudioFile {
NSString * name = #"Filename"; //YOUR FILE NAME
NSString * source = [[NSBundle mainBundle] pathForResource:name ofType:#"m4a"]; // SPECIFY YOUR FILE FORMAT
const char *cString = [source cStringUsingEncoding:NSASCIIStringEncoding];
CFStringRef str = CFStringCreateWithCString(
NULL,
cString,
kCFStringEncodingMacRoman
);
CFURLRef inputFileURL = CFURLCreateWithFileSystemPath(
kCFAllocatorDefault,
str,
kCFURLPOSIXPathStyle,
false
);
ExtAudioFileRef fileRef;
ExtAudioFileOpenURL(inputFileURL, &fileRef);
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 44100; // GIVE YOUR SAMPLING RATE
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kLinearPCMFormatFlagIsFloat;
audioFormat.mBitsPerChannel = sizeof(Float32) * 8;
audioFormat.mChannelsPerFrame = 1; // Mono
audioFormat.mBytesPerFrame = audioFormat.mChannelsPerFrame * sizeof(Float32); // == sizeof(Float32)
audioFormat.mFramesPerPacket = 1;
audioFormat.mBytesPerPacket = audioFormat.mFramesPerPacket * audioFormat.mBytesPerFrame; // = sizeof(Float32)
// 3) Apply audio format to the Extended Audio File
ExtAudioFileSetProperty(
fileRef,
kExtAudioFileProperty_ClientDataFormat,
sizeof (AudioStreamBasicDescription), //= audioFormat
&audioFormat);
int numSamples = 1024; //How many samples to read in at a time
UInt32 sizePerPacket = audioFormat.mBytesPerPacket; // = sizeof(Float32) = 32bytes
UInt32 packetsPerBuffer = numSamples;
UInt32 outputBufferSize = packetsPerBuffer * sizePerPacket;
// So the lvalue of outputBuffer is the memory location where we have reserved space
UInt8 *outputBuffer = (UInt8 *)malloc(sizeof(UInt8 *) * outputBufferSize);
AudioBufferList convertedData ;//= malloc(sizeof(convertedData));
convertedData.mNumberBuffers = 1; // Set this to 1 for mono
convertedData.mBuffers[0].mNumberChannels = audioFormat.mChannelsPerFrame; //also = 1
convertedData.mBuffers[0].mDataByteSize = outputBufferSize;
convertedData.mBuffers[0].mData = outputBuffer; //
UInt32 frameCount = numSamples;
float *samplesAsCArray;
int j =0;
double floatDataArray[882000] ; // SPECIFY YOUR DATA LIMIT MINE WAS 882000 , SHOULD BE EQUAL TO OR MORE THAN DATA LIMIT
while (frameCount > 0) {
ExtAudioFileRead(
fileRef,
&frameCount,
&convertedData
);
if (frameCount > 0) {
AudioBuffer audioBuffer = convertedData.mBuffers[0];
samplesAsCArray = (float *)audioBuffer.mData; // CAST YOUR mData INTO FLOAT
for (int i =0; i<1024 /*numSamples */; i++) { //YOU CAN PUT numSamples INTEAD OF 1024
floatDataArray[j] = (double)samplesAsCArray[i] ; //PUT YOUR DATA INTO FLOAT ARRAY
printf("\n%f",floatDataArray[j]); //PRINT YOUR ARRAY'S DATA IN FLOAT FORM RANGING -1 TO +1
j++;
}
}
}}
I'm not familiar with RemoteIO, but I am familiar with WAV's and thought I'd post some format information on them. If you need, you should be able to easily parse out information such as duration, bit rate, etc...
First, here is an excellent website detailing the WAVE PCM soundfile format. This site also does an excellent job illustrating what the different byte addresses inside the "fmt" sub-chunk refer to.
WAVE File format
A WAVE is composed of a "RIFF" chunk and subsequent sub-chunks
Every chunk is at least 8 bytes
First 4 bytes is the Chunk ID
Next 4 bytes is the Chunk Size (The Chunk Size gives the size of the remainder of the chunk excluding the 8 bytes used for the Chunk ID and Chunk Size)
Every WAVE has the following chunks / sub chunks
"RIFF" (first and only chunk. All the rest are technically sub-chunks.)
"fmt " (usually the first sub-chunk after "RIFF" but can be anywhere between "RIFF" and "data". This chunk has information about the WAV such as number of channels, sample rate, and byte rate)
"data" (must be the last sub-chunk and contains all the sound data)
Common WAVE Audio Formats:
PCM
IEEE_Float
PCM_EXTENSIBLE (with a sub format of PCM or IEEE_FLOAT)
WAVE Duration and Size
A WAVE File's duration can be calculated as follows:
seconds = DataChunkSize / ByteRate
Where
ByteRate = SampleRate * NumChannels * BitsPerSample/8
and DataChunkSize does not include the 8 bytes reserved for the ID and Size of the "data" sub-chunk.
Knowing this, the DataChunkSize can be calculated if you know the duration of the WAV and the ByteRate.
DataChunkSize = seconds * ByteRate
This can be useful for calculating the size of the wav data when converting from formats like mp3 or wma. Note that a typical wav header is 44 bytes followed by DataChunkSize (this is always the case if the wav was converted using the Normalizer tool - at least as of this writing).
Update for Swift 5
This is a simple function that helps get your audio file into an array of floats. This is for both mono and stereo audio, To get the second channel of stereo audio, just uncomment sample 2
import AVFoundation
//..
do {
guard let url = Bundle.main.url(forResource: "audio_example", withExtension: "wav") else { return }
let file = try AVAudioFile(forReading: url)
if let format = AVAudioFormat(commonFormat: .pcmFormatFloat32, sampleRate: file.fileFormat.sampleRate, channels: file.fileFormat.channelCount, interleaved: false), let buf = AVAudioPCMBuffer(pcmFormat: format, frameCapacity: AVAudioFrameCount(file.length)) {
try file.read(into: buf)
guard let floatChannelData = buf.floatChannelData else { return }
let frameLength = Int(buf.frameLength)
let samples = Array(UnsafeBufferPointer(start:floatChannelData[0], count:frameLength))
// let samples2 = Array(UnsafeBufferPointer(start:floatChannelData[1], count:frameLength))
print("samples")
print(samples.count)
print(samples.prefix(10))
// print(samples2.prefix(10))
}
} catch {
print("Audio Error: \(error)")
}