Opus for iOS, crashing with 16000 sample rate - ios

I am developing Voip application with Opus for iOS (Objective-C and C++).
It works fine with 8000, 12000, 24000 and 48000 sampling rate except with 16000, where the application crashes on opus_encode method.
Here is what i am doing:
m_oAudioSession = [AVAudioSession sharedInstance];
[m_oAudioSession setCategory:AVAudioSessionCategoryPlayAndRecord error:&m_oError];
[m_oAudioSession setMode:AVAudioSessionModeVoiceChat error:&m_oError];
[m_oAudioSession setPreferredSampleRate:VOIP_AUDIO_DRIVER_DEFAULT_SAMPLE_RATE error:&m_oError];
[m_oAudioSession setPreferredInputNumberOfChannels:VOIP_AUDIO_DRIVER_DEFAULT_INPUT_CHANNELS error:&m_oError];
[m_oAudioSession setPreferredOutputNumberOfChannels:VOIP_AUDIO_DRIVER_DEFAULT_OUTPUT_CHANNELS error:&m_oError];
[m_oAudioSession setPreferredIOBufferDuration:VOIP_AUDIO_DRIVER_DEFAULT_BUFFER_DURATION error:&m_oError];
[m_oAudioSession setActive:YES error:&m_oError];
Constants:
VOIP_AUDIO_DRIVER_DEFAULT_SAMPLE_RATE is 16000
VOIP_AUDIO_DRIVER_DEFAULT_INPUT_CHANNELS is 1
VOIP_AUDIO_DRIVER_DEFAULT_OUTPUT_CHANNELS is 1
VOIP_AUDIO_DRIVER_DEFAULT_BUFFER_DURATION is 0.02
VOIP_AUDIO_DRIVER_FRAMES_PER_PACKET is 1
After that i am using a real sampling rate and buffer duration from m_oAudioSession.sampleRate and m_oAudioSession.IOBufferDuration. They are set into m_fSampleRate and m_fBufferDuration variables.
The configurations are:
//Describes audio component:
m_sAudioDescription.componentType = kAudioUnitType_Output;
m_sAudioDescription.componentSubType = kAudioUnitSubType_VoiceProcessingIO/*kAudioUnitSubType_RemoteIO*/;
m_sAudioDescription.componentFlags = 0;
m_sAudioDescription.componentFlagsMask = 0;
m_sAudioDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
m_sAudioFormat.mSampleRate = m_fSampleRate;
m_sAudioFormat.mFormatID = kAudioFormatLinearPCM;
m_sAudioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
m_sAudioFormat.mFramesPerPacket = VOIP_AUDIO_DRIVER_FRAMES_PER_PACKET;
m_sAudioFormat.mChannelsPerFrame = VOIP_AUDIO_DRIVER_DEFAULT_INPUT_CHANNELS;
m_sAudioFormat.mBitsPerChannel = (UInt32)(8 * m_iBytesPerSample);
m_sAudioFormat.mBytesPerFrame = (UInt32)((m_sAudioFormat.mBitsPerChannel / 8) * m_sAudioFormat.mChannelsPerFrame);
m_sAudioFormat.mBytesPerPacket = m_sAudioFormat.mBytesPerFrame * m_sAudioFormat.mFramesPerPacket;
m_sAudioFormat.mReserved = 0;
The calculations i make are:
m_iBytesPerSample = sizeof(/*AudioSampleType*/SInt16);
//Calculating buffer size:
int samplesPerFrame = (int)(m_fBufferDuration * m_fSampleRate) + 1;
m_iBufferSizeBytes = samplesPerFrame * m_iBytesPerSample;
//Allocating input buffer:
UInt32 inputBufferListSize = offsetof(AudioBufferList, mBuffers[0]) + (sizeof(AudioBuffer) * m_sAudioFormat.mChannelsPerFrame);
m_sInputBuffer = (AudioBufferList *)VoipAlloc(inputBufferListSize);
m_sInputBuffer->mNumberBuffers = m_sAudioFormat.mChannelsPerFrame;
//Pre-mallocating buffers for AudioBufferLists
for(VoipUInt32 tmp_int1 = 0; tmp_int1 < m_sInputBuffer->mNumberBuffers; tmp_int1++)
{
m_sInputBuffer->mBuffers[tmp_int1].mNumberChannels = VOIP_AUDIO_DRIVER_DEFAULT_INPUT_CHANNELS;
m_sInputBuffer->mBuffers[tmp_int1].mDataByteSize = (UInt32)m_iBufferSizeBytes;
m_sInputBuffer->mBuffers[tmp_int1].mData = VoipAlloc(m_iBufferSizeBytes);
memset(m_sInputBuffer->mBuffers[tmp_int1].mData, 0, m_iBufferSizeBytes);
}
The reading and writing from audio unit are done using m_sInputBuffer.
Here is the Opus creation:
m_oEncoder = opus_encoder_create(m_iSampleRate, m_iNumberOfChannels, VOIP_AUDIO_CODECS_OPUS_APPLICATION_TYPE, &_error);
if (_error < 0)
{
fprintf(stderr, "VoipAudioCodecs error: failed to create an encoder: %s\n", opus_strerror(_error));
return;
}
_error = opus_encoder_ctl(m_oEncoder, OPUS_SET_BITRATE(VOIP_AUDIO_CODECS_OPUS_BITRATE));
if (_error < 0)
{
fprintf(stderr, "VoipAudioCodecs error: failed to set the bitrate: %s\n", opus_strerror(_error));
return;
}
m_oDecoder = opus_decoder_create(m_iSampleRate, m_iNumberOfChannels, &_error);
if (_error < 0)
{
fprintf(stderr, "VoipAudioCodecs error: failed to create a decoder: %s\n", opus_strerror(_error));
return;
}
Opus configurations are:
VOIP_AUDIO_CODECS_OPUS_BITRATE is OPUS_BITRATE_MAX
//64000 //70400 //84800 //112000
VOIP_AUDIO_CODECS_OPUS_APPLICATION_TYPE is OPUS_APPLICATION_VOIP
//OPUS_APPLICATION_AUDIO
VOIP_AUDIO_CODECS_OPUS_MAX_FRAME_SIZE is 5760
//Minimum: (120ms; 5760 for 48kHz)
VOIP_AUDIO_CODECS_OPUS_BYTES_SIZE is 960
//120, 240, 480, 960, 1920, 2880
When i encode and decode i use these methods:
Encode_Opus(VoipInt16* rawSamples, int rawSamplesSize)
{
unsigned char encodedData[m_iMaxPacketSize];
VoipInt32 bytesEncoded;
int frameSize = rawSamplesSize / m_iBytesPerSample;
bytesEncoded = opus_encode(m_oEncoder, rawSamples, frameSize, encodedData, m_iMaxPacketSize);
if (bytesEncoded < 0)
{
fprintf(stderr, "VoipAudioCodecs error: encode failed: %s\n", opus_strerror(bytesEncoded));
return nullptr;
}
sVoipAudioCodecOpusEncoded* resultStruct = (sVoipAudioCodecOpusEncoded* )VoipAlloc(sizeof(sVoipAudioCodecOpusEncoded));
resultStruct->m_data = (unsigned char*)VoipAlloc(bytesEncoded);
memcpy(resultStruct->m_data, encodedData, bytesEncoded);
resultStruct->m_dataSize = bytesEncoded;
return resultStruct;
}
Decode_Opus(void* encodedSamples, VoipInt32 encodedSamplesSize)
{
VoipInt16 decodedPacket[VOIP_AUDIO_CODECS_OPUS_MAX_FRAME_SIZE];
int _frameSize = opus_decode(m_oDecoder, (const unsigned char*)encodedSamples, encodedSamplesSize, decodedPacket, VOIP_AUDIO_CODECS_OPUS_MAX_FRAME_SIZE, 0);
if (_frameSize < 0)
{
fprintf(stderr, "VoipAudioCodecs error: decoder failed: %s\n", opus_strerror(_frameSize));
return nullptr;
}
size_t frameSize = (size_t)_frameSize;
sVoipAudioCodecOpusDecoded* resultStruct = (sVoipAudioCodecOpusDecoded* )VoipAlloc(sizeof(sVoipAudioCodecOpusDecoded));
resultStruct->m_data = (VoipInt16*)VoipAlloc(frameSize * m_iBytesPerSample);
memcpy(resultStruct->m_data, decodedPacket, (frameSize * m_iBytesPerSample));
resultStruct->m_dataSize = frameSize * m_iBytesPerSample;
return resultStruct;
}
When the app should send data:
VoipUInt32 itemsForProcess = inputAudioQueue->getItemCount();
for (int tmp_queueItems = 0; tmp_queueItems < itemsForProcess; tmp_queueItems++)
{
sVoipQueue* tmp_samples = inputAudioQueue->popItem();
m_oCircularTempInputBuffer->writeDataToBuffer(tmp_samples->m_pData, tmp_samples->m_iDataSize);
while (void* tmp_buffer = m_oCircularTempInputBuffer->readDataFromBuffer(VOIP_AUDIO_CODECS_OPUS_BYTES_SIZE))
{
sVoipAudioCodecOpusEncoded* encodedSamples = Encode_Opus((VoipInt16*)tmp_buffer, VOIP_AUDIO_CODECS_OPUS_BYTES_SIZE);
//Then packeting and the real sending using tcp socket…
}
//Rest of the code…
}
Here is the reading:
sVoipAudioCodecOpusDecoded* decodedSamples = Decode_Opus(inputPacket->m_pPacketData, (VoipInt32)inputPacket->m_iPacketSize);
if (decodedSamples != nullptr)
{
m_oCircularTempOutputBuffer->writeDataToBuffer(decodedSamples->m_data, decodedSamples->m_dataSize);
VoipFree((void**)&decodedSamples->m_data);
VoipFree((void**)&decodedSamples);
}
while (void* tmp_buffer = m_oCircularTempOutputBuffer->readDataFromBuffer(m_iBufferSizeBytes))
{
outputAudioQueue->pushItem(tmp_buffer, m_iBufferSizeBytes);
}
inputAudioQueue is queue with recorded data from my audio unit’s callback.
outputAudioQueue is a queue used from my audio unit’s callback to play the sound.
m_iMaxPacketSize is the same as m_iBufferSizeBytes.
My questions are:
I was wondering, are my calculations correct?
And if not, how can i improve them?
Do you see any mistake into the code?
Do you have a suggestion for fixing the crash bug on opus_encode method when the sampling rate is set to 16000?
Thank you in advance.
PS. I made some tests with sampling rate on 16000 and found this: If i use this formula: frame_duration = frame_size / sample rate, and if i set frame_duration to preferedIOBufferDuration: 120 / 16000 = 0.0075 //AVAudioSession sets 0.008000 —— Crashes
240 / 16000 = 0.015 //AVAudioSession sets 0.016000 —— Crashes
480 / 16000 = 0.03 //AVAudioSession sets 0.032000 —— Crashes
960 / 16000 = 0.06 //AVAudioSession sets 0.064000 —— Crashes
1920 / 16000 = 0.12 //AVAudioSession sets 0.128000 —— Works
2880 / 16000 = 0.18 //AVAudioSession sets 0.128000 —— CrashesThen i found that there is no encoder crash with sampling rate 16000 and preferredIOBufferDuration 0.12(1920), where AVAudioSession sets 0.128000. So it works only in this case.
Any ideas ?

Related

How to encode and decode Real-time Audio using OpusCodec in IOS?

I am working on a app which has following requirements :
Record real time audio from iOS device (iPhone)
Encode this audio data to Opus data and send it to server over WebSocket
Decode received data to pcm again
Play received audio from WebSocket server on iOS device(iPhone)
I've used AVAudioEngine for this.
var engine = AVAudioEngine()
var input: AVAudioInputNode = engine.inputNode
var format: AVAudioFormat = input.outputFormat(forBus: AVAudioNodeBus(0))
input.installTap(onBus: AVAudioNodeBus(0), bufferSize: AVAudioFrameCount(8192), format: format, block: { buf, when in
// ‘buf' contains audio captured from input node at time 'when'
})
// start engine
And I converted AVAudioPCMBuffer to Data using this function
func toData(PCMBuffer: AVAudioPCMBuffer) -> Data {
let channelCount = 1
let channels = UnsafeBufferPointer(start: PCMBuffer.floatChannelData, count: channelCount)
let ch0Data = NSData(bytes: channels[0], length:Int(PCMBuffer.frameLength * PCMBuffer.format.streamDescription.pointee.mBytesPerFrame))
return ch0Data as Data
}
I've found Opus Library from CocoaPod libopus libopus
I have searched a lot on how to use OpusCodec in IOS but haven't got the solution.
How to encode and decode this data using OpusCodec ? And Am I need jitterBuffer ? If I need How to use it in IOS
This Code for Opus Codec but voice doesn't clear
#import "OpusManager.h"
#import <opus/opus.h>
#define SAMPLE_RATE 16000
#define CHANNELS 1
#define BITRATE SAMPLE_RATE * CHANNELS
/**
* Audio frame size
* It is divided by time. When calling, you must use the audio data of
exactly one frame (multiple of 2.5ms: 2.5, 5, 10, 20, 40, 60ms).
* Fs/ms 2.5 5 10 20 40 60
* 8kHz 20 40 80 160 320 480
* 16kHz 40 80 160 320 640 960
* 24KHz 60 120 240 480 960 1440
* 48kHz 120 240 480 960 1920 2880
*/
#define FRAME_SIZE 320
#define APPLICATION OPUS_APPLICATION_VOIP
#define MAX_PACKET_BYTES (FRAME_SIZE * CHANNELS * sizeof(float))
#define MAX_FRAME_SIZE (FRAME_SIZE * CHANNELS * sizeof(float))
typedef opus_int16 OPUS_DATA_SIZE_T;
#implementation OpusManager {
OpusEncoder *_encoder;
OpusDecoder *_decoder;
}
int size;
int error;
unsigned char encodedPacket[MAX_PACKET_BYTES];
- (instancetype)init {
self = [super init];
if (self) {
size = opus_encoder_get_size(CHANNELS);
_encoder = malloc(size);
error = opus_encoder_init(_encoder, SAMPLE_RATE, CHANNELS, APPLICATION);
_encoder = opus_encoder_create(SAMPLE_RATE, CHANNELS, APPLICATION, &error);
_decoder = opus_decoder_create(SAMPLE_RATE, CHANNELS, &error);
opus_encoder_ctl(_encoder, OPUS_SET_BITRATE(BITRATE));
opus_encoder_ctl(_encoder, OPUS_SET_COMPLEXITY(10));
opus_encoder_ctl(_encoder, OPUS_SET_SIGNAL(OPUS_SIGNAL_VOICE));
opus_encoder_ctl(_encoder, OPUS_SET_VBR(0));
opus_encoder_ctl(_encoder, OPUS_SET_APPLICATION(APPLICATION));
opus_encoder_ctl(_encoder, OPUS_SET_DTX(1));
opus_encoder_ctl(_encoder, OPUS_SET_INBAND_FEC(0));
opus_encoder_ctl(_encoder, OPUS_SET_BANDWIDTH(12000));
opus_encoder_ctl(_encoder, OPUS_SET_PACKET_LOSS_PERC(1));
opus_encoder_ctl(_encoder, OPUS_SET_INBAND_FEC(1));
opus_encoder_ctl(_encoder, OPUS_SET_FORCE_CHANNELS(CHANNELS));
opus_encoder_ctl(_encoder, OPUS_SET_PACKET_LOSS_PERC(1));
}
return self;
}
- (NSData *)encode:(NSData *)PCM {
opus_int16 *PCMPtr = (opus_int16 *)PCM.bytes;
int PCMSize = (int)PCM.length / sizeof(opus_int16);
opus_int16 *PCMEnd = PCMPtr + PCMSize;
NSMutableData *mutData = [NSMutableData data];
unsigned char encodedPacket[MAX_PACKET_BYTES];
// Record opus block size
OPUS_DATA_SIZE_T encodedBytes = 0;
while (PCMPtr + FRAME_SIZE < PCMEnd) {
encodedBytes = opus_encode_float(_encoder, (const float *) PCMPtr, FRAME_SIZE, encodedPacket, MAX_PACKET_BYTES);
if (encodedBytes <= 0) {
NSLog(#"ERROR: encodedBytes<=0");
return nil;
}
NSLog(#"encodedBytes: %d", encodedBytes);
// Save the opus block size
[mutData appendBytes:&encodedBytes length:sizeof(encodedBytes)];
// Save opus data
[mutData appendBytes:encodedPacket length:encodedBytes];
PCMPtr += FRAME_SIZE;
}
NSLog(#"mutData: %lu", (unsigned long)mutData.length);
NSLog(#"encodedPacket: %s", encodedPacket);
return mutData.length > 0 ? mutData : nil;
}
- (NSData *)decode:(NSData *)opus {
unsigned char *opusPtr = (unsigned char *)opus.bytes;
int opusSize = (int)opus.length;
unsigned char *opusEnd = opusPtr + opusSize;
NSMutableData *mutData = [NSMutableData data];
float decodedPacket[MAX_FRAME_SIZE];
int decodedSamples = 0;
// Save data for opus block size
OPUS_DATA_SIZE_T nBytes = 0;
while (opusPtr < opusEnd) {
// Take out the opus block size data
nBytes = *(OPUS_DATA_SIZE_T *)opusPtr;
opusPtr += sizeof(nBytes);
decodedSamples = opus_decode_float(_decoder, opusPtr, nBytes,decodedPacket, MAX_FRAME_SIZE, 0);
if (decodedSamples <= 0) {
NSLog(#"ERROR: decodedSamples<=0");
return nil;
}
NSLog(#"decodedSamples:%d", decodedSamples);
[mutData appendBytes:decodedPacket length:decodedSamples *sizeof(opus_int16)];
opusPtr += nBytes;
}
NSLog(#"mutData: %lu", (unsigned long)mutData.length);
return mutData.length > 0 ? mutData : nil;
}
#end
Try to lower the bandwidth or set an higher bitrate. I think 16kbit for a 12kHz bandwidth mono audio is probably too low. Think it will be better to leave bandwidth to auto with application VOIP setted. There could be other problems around but the "doesn't sound good" is not enough to analyze.
I would suggest playing around with bitrate and bandwidth.
I have succeeded to make it work with the parameters described here:
https://ddanilov.me/how-to-enable-in-band-fec-for-opus-codec/.

AudioFileWriteBytes performance for stereo file

I'm writing a stereo wave file with AudioFileWriteBytes (CoreAudio / iOS) and the only way I can get it to work is by calling it for each sample on each channel.
The following code works:
// Prepare the format AudioStreamBasicDescription;
AudioStreamBasicDescription asbd = {
.mSampleRate = session.samplerate,
.mFormatID = kAudioFormatLinearPCM,
.mFormatFlags = kAudioFormatFlagIsBigEndian| kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked,
.mChannelsPerFrame = 2,
.mBitsPerChannel = 16,
.mFramesPerPacket = 1, // Always 1 for uncompressed formats
.mBytesPerPacket = 4, // 16 bits for 2 channels = 4 bytes
.mBytesPerFrame = 4 // 16 bits for 2 channels = 4 bytes
};
// Set up the file
AudioFileID audioFile;
OSStatus audioError = noErr;
audioError = AudioFileCreateWithURL((__bridge CFURLRef)fileURL, kAudioFileAIFFType, &asbd, kAudioFileFlags_EraseFile, &audioFile);
if (audioError != noErr) {
NSLog(#"Error creating file");
return;
}
// Write samples
UInt64 currentFrame = 0;
while (currentFrame < totalLengthInFrames) {
UInt64 numberOfFramesToWrite = totalLengthInFrames - currentFrame;
if (numberOfFramesToWrite > 2048) {
numberOfFramesToWrite = 2048;
}
UInt32 sampleByteCount = sizeof(int16_t);
UInt32 bytesToWrite = (UInt32)numberOfFramesToWrite * sampleByteCount;
int16_t *sampleBufferLeft = (int16_t *)malloc(bytesToWrite);
int16_t *sampleBufferRight = (int16_t *)malloc(bytesToWrite);
// Some magic to fill the buffers
for (int j = 0; j < numberOfFramesToWrite; j++) {
int16_t left = CFSwapInt16HostToBig(sampleBufferLeft[j]);
int16_t right = CFSwapInt16HostToBig(sampleBufferRight[j]);
audioError = AudioFileWriteBytes(audioFile, false, (currentFrame + j) * 4, &sampleByteCount, &left);
assert(audioError == noErr);
audioError = AudioFileWriteBytes(audioFile, false, (currentFrame + j) * 4 + 2, &sampleByteCount, &right);
assert(audioError == noErr);
}
free(sampleBufferLeft);
free(sampleBufferRight);
currentFrame += numberOfFramesToWrite;
}
However, it is (obviously) very slow and inefficient.
I can't find anything on how to use it with a big buffer so that I can write more than a single sample while also writing 2 channels.
I tried making a buffer going LRLRLRLR (left / right), and then write that with just one AudioFileWriteBytes call. I expected that to work, but it produced a file filled with noise.
This is the code:
UInt64 currentFrame = 0;
UInt64 bytePos = 0;
while (currentFrame < totalLengthInFrames) {
UInt64 numberOfFramesToWrite = totalLengthInFrames - currentFrame;
if (numberOfFramesToWrite > 2048) {
numberOfFramesToWrite = 2048;
}
UInt32 sampleByteCount = sizeof(int16_t);
UInt32 bytesInBuffer = (UInt32)numberOfFramesToWrite * sampleByteCount;
UInt32 bytesInOutputBuffer = (UInt32)numberOfFramesToWrite * sampleByteCount * 2;
int16_t *sampleBufferLeft = (int16_t *)malloc(bytesInBuffer);
int16_t *sampleBufferRight = (int16_t *)malloc(bytesInBuffer);
int16_t *outputBuffer = (int16_t *)malloc(bytesInOutputBuffer);
// Some magic to fill the buffers
for (int j = 0; j < numberOfFramesToWrite; j++) {
int16_t left = CFSwapInt16HostToBig(sampleBufferLeft[j]);
int16_t right = CFSwapInt16HostToBig(sampleBufferRight[j]);
outputBuffer[(j * 2)] = left;
outputBuffer[(j * 2) + 1] = right;
}
audioError = AudioFileWriteBytes(audioFile, false, bytePos, &bytesInOutputBuffer, &outputBuffer);
assert(audioError == noErr);
free(sampleBufferLeft);
free(sampleBufferRight);
free(outputBuffer);
bytePos += bytesInOutputBuffer;
currentFrame += numberOfFramesToWrite;
}
I also tried to just write the buffers at once (2048*L, 2048*R, etc.) which I did not expect to work, and it didn't.
How do I speed this up AND get a working wave file?
I tried making a buffer going LRLRLRLR (left / right), and then write that with just one AudioFileWriteBytes call.
This is the correct approach if using (the rather difficult) Audio File Services.
If possible, instead of the very low level Audio File Services, use Extended Audio File Services. It is a wrapper around Audio File Services that has built in format converters. Or even better yet, use AVAudioFile it is a wrapper around Extended Audio File Services that covers most common use cases.
If you are set on using Audio File Services, you'll have to interleave the audio manually like you had tried. Maybe show the code where you attempted this.

Using CMSampleTimingInfo, CMSampleBuffer and AudioBufferList from raw PCM 16000 sample rate stream

I recevie audio data and size from outside, the audio appears to be linear PCM, signed int16, but when recording this using an AssetWriter it saves to the audio file highly distorted and higher pitch.
#define kSamplingRate 16000
#define kNumberChannels 1
UInt32 framesAlreadyWritten = 0;
-(AudioStreamBasicDescription) getAudioFormat {
AudioStreamBasicDescription format;
format.mSampleRate = kSamplingRate;
format.mFormatID = kAudioFormatLinearPCM;
format.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
format.mChannelsPerFrame = 1; // mono
format.mBitsPerChannel = 16;
format.mBytesPerFrame = sizeof(SInt16);
format.mFramesPerPacket = 1;
format.mBytesPerPacket = format.mBytesPerFrame * format.mFramesPerPacket;
format.mReserved = 0;
return format;
}
- (CMSampleBufferRef)createAudioSample:(const void *)audioData frames: (UInt32)len {
AudioStreamBasicDescription asbd = [self getAudioFormat];
CMSampleBufferRef buff = NULL;
static CMFormatDescriptionRef format = NULL;
OSStatus error = 0;
if(format == NULL) {
AudioChannelLayout acl;
bzero(&acl, sizeof(acl));
acl.mChannelLayoutTag = kAudioChannelLayoutTag_Mono;
error = CMAudioFormatDescriptionCreate(kCFAllocatorDefault, &asbd, sizeof(acl), &acl, 0, NULL, NULL, &format);
}
CMTime duration = CMTimeMake(1, kSamplingRate);
CMTime pts = CMTimeMake(framesAlreadyWritten, kSamplingRate);
NSLog(#"-----------pts");
CMTimeShow(pts);
CMSampleTimingInfo timing = {duration , pts, kCMTimeInvalid };
error = CMSampleBufferCreate(kCFAllocatorDefault, NULL, false, NULL, NULL, format, len, 1, &timing, 0, NULL, &buff);
framesAlreadyWritten += len;
if (error) {
NSLog(#"CMSampleBufferCreate returned error: %ld", (long)error);
return NULL;
}
AudioBufferList audioBufferList;
audioBufferList.mNumberBuffers = 1;
audioBufferList.mBuffers[0].mNumberChannels = asbd.mChannelsPerFrame;
audioBufferList.mBuffers[0].mDataByteSize = (UInt32)(number_of_frames * audioFormat.mBytesPerFrame);
audioBufferList.mBuffers[0].mData = audioData;
error = CMSampleBufferSetDataBufferFromAudioBufferList(buff, kCFAllocatorDefault, kCFAllocatorDefault, 0, &audioBufferList);
if(error) {
NSLog(#"CMSampleBufferSetDataBufferFromAudioBufferList returned error: %ld", (long)error);
return NULL;
}
return buff;
}
Not sure why you're dividing len by two, but your time should progress instead of being constant, something like
CMTime time = CMTimeMake(framesAlreadyWritten , kSamplingRate);

How to play and read .caf PCM audio file

I have an app that selects a song from the iPod Library then copies that song into the app's directory as a '.caf' file. I now need to play and at the same time read that file into Apples FFT from the Accelerate framework so I can visualize the data like a spectrogram. Here is the code for the FFT:
void FFTAccelerate::doFFTReal(float samples[], float amp[], int numSamples)
{
int i;
vDSP_Length log2n = log2f(numSamples);
//Convert float array of reals samples to COMPLEX_SPLIT array A
vDSP_ctoz((COMPLEX*)samples,2,&A,1,numSamples/2);
//Perform FFT using fftSetup and A
//Results are returned in A
vDSP_fft_zrip(fftSetup, &A, 1, log2n, FFT_FORWARD);
//Convert COMPLEX_SPLIT A result to float array to be returned
amp[0] = A.realp[0]/(numSamples*2);
for(i=1;i<numSamples;i++)
amp[i]=sqrt(A.realp[i]*A.realp[i]+A.imagp[i]*A.imagp[i])/numSamples;
}
//Constructor
FFTAccelerate::FFTAccelerate (int numSamples)
{
vDSP_Length log2n = log2f(numSamples);
fftSetup = vDSP_create_fftsetup(log2n, FFT_RADIX2);
int nOver2 = numSamples/2;
A.realp = (float *) malloc(nOver2*sizeof(float));
A.imagp = (float *) malloc(nOver2*sizeof(float));
}
My question is how to I loop through the '.caf' audio file to feed the FFT while at the same time playing the song? I only need one channel. Im guessing I need to get 1024 samples of the song, process that in the FTT and then move further down the file and grab another 1024 samples. But I dont understand how to read an audio file to do this. The file has a sample rate of 44100.0 hz, is in linear PCM format, 16 Bit and I believe is also interleaved if that helps...
Try the ExtendedAudioFile API (requires AudioToolbox.framework).
#include <AudioToolbox/ExtendedAudioFile.h>
NSURL *urlToCAF = ...;
ExtAudioFileRef caf;
OSStatus status;
status = ExtAudioFileOpenURL((__bridge CFURLRef)urlToCAF, &caf);
if(noErr == status) {
// request float format
const UInt32 NumFrames = 1024;
const int ChannelsPerFrame = 1; // Mono, 2 for Stereo
// request float format
AudioStreamBasicDescription clientFormat;
clientFormat.mChannelsPerFrame = ChannelsPerFrame;
clientFormat.mSampleRate = 44100;
clientFormat.mFormatID = kAudioFormatLinearPCM;
clientFormat.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagIsNonInterleaved; // float
int cmpSize = sizeof(float);
int frameSize = cmpSize*ChannelsPerFrame;
clientFormat.mBitsPerChannel = cmpSize*8;
clientFormat.mBytesPerPacket = frameSize;
clientFormat.mFramesPerPacket = 1;
clientFormat.mBytesPerFrame = frameSize;
status = ExtAudioFileSetProperty(caf, kExtAudioFileProperty_ClientDataFormat, sizeof(clientFormat), &clientFormat);
if(noErr != status) { /* handle it */ }
while(1) {
float buf[ChannelsPerFrame*NumFrames];
AudioBuffer ab = { ChannelsPerFrame, sizeof(buf), buf };
AudioBufferList abl;
abl.mNumberBuffers = 1;
abl.mBuffers[0] = ab;
UInt32 ioNumFrames = NumFrames;
status = ExtAudioFileRead(caf, &ioNumFrames, &abl);
if(noErr == status) {
// process ioNumFrames here in buf
if(0 == ioNumFrames) {
// EOF!
break;
} else if(ioNumFrames < NumFrames) {
// TODO: pad buf with zeroes out to NumFrames
} else {
float amp[NumFrames]; // scratch space
doFFTReal(buf, amp, NumFrames);
}
}
}
// later
status = ExtAudioFileDispose(caf);
if(noErr != status) { /* hmm */ }
}

AudioConverter number of packets is wrong

I've set up a class to convert audio from one format to another given an input and output AudioStreamBasicDescription. When I convert Linear PCM from the mic to iLBC, it works and gives me 6 packets when I give it 1024 packets from the AudioUnitRender function. I then send those 226 bytes via UDP to the same app running on a different device. The problem is that when I use the same class to convert back into Linear PCM for giving to an audio unit input, the AudioConverterFillComplexBuffer function doesn't give 1024 packets, it gives 960... This means that the audio unit input is expecting 4096 bytes (2048 x 2 for stereo) but I can only give it 3190 or so, and so it sounds really crackly and distorted...
If I give AudioConverter 1024 packets of LinearPCM, convert to iLBC, convert back to LinearPCM, surely I should get 1024 packets again?
Audio converter function:
-(void) doConvert {
// Start converting
if (converting) return;
converting = YES;
while (true) {
// Get next buffer
id bfr = [buffers getNextBuffer];
if (!bfr) {
converting = NO;
return;
}
// Get info
NSArray* bfrs = ([bfr isKindOfClass:[NSArray class]] ? bfr : #[bfr]);
int bfrSize = 0;
for (NSData* dat in bfrs) bfrSize += dat.length;
int inputPackets = bfrSize / self.inputFormat.mBytesPerPacket;
int outputPackets = (inputPackets * self.inputFormat.mFramesPerPacket) / self.outputFormat.mFramesPerPacket;
// Create output buffer
AudioBufferList* bufferList = (AudioBufferList*) malloc(sizeof(AudioBufferList) * self.outputFormat.mChannelsPerFrame);
bufferList -> mNumberBuffers = self.outputFormat.mChannelsPerFrame;
for (int i = 0 ; i < self.outputFormat.mChannelsPerFrame ; i++) {
bufferList -> mBuffers[i].mNumberChannels = 1;
bufferList -> mBuffers[i].mDataByteSize = 4*1024;
bufferList -> mBuffers[i].mData = malloc(bufferList -> mBuffers[i].mDataByteSize);
}
// Create input buffer
AudioBufferList* inputBufferList = (AudioBufferList*) malloc(sizeof(AudioBufferList) * bfrs.count);
inputBufferList -> mNumberBuffers = bfrs.count;
for (int i = 0 ; i < bfrs.count ; i++) {
inputBufferList -> mBuffers[i].mNumberChannels = 1;
inputBufferList -> mBuffers[i].mDataByteSize = [[bfrs objectAtIndex:i] length];
inputBufferList -> mBuffers[i].mData = (void*) [[bfrs objectAtIndex:i] bytes];
}
// Create sound data payload
struct SoundDataPayload payload;
payload.data = inputBufferList;
payload.numPackets = inputPackets;
payload.packetDescriptions = NULL;
payload.used = NO;
// Convert data
UInt32 numPackets = outputPackets;
OSStatus err = AudioConverterFillComplexBuffer(converter, acvConverterComplexInput, &payload, &numPackets, bufferList, NULL);
if (err)
continue;
// Check how to output
if (bufferList -> mNumberBuffers > 1) {
// Output as array
NSMutableArray* array = [NSMutableArray arrayWithCapacity:bufferList -> mNumberBuffers];
for (int i = 0 ; i < bufferList -> mNumberBuffers ; i++)
[array addObject:[NSData dataWithBytes:bufferList -> mBuffers[i].mData length:bufferList -> mBuffers[i].mDataByteSize]];
// Save
[convertedBuffers addBuffer:array];
} else {
// Output as data
NSData* newData = [NSData dataWithBytes:bufferList -> mBuffers[0].mData length:bufferList -> mBuffers[0].mDataByteSize];
// Save
[convertedBuffers addBuffer:newData];
}
// Free memory
for (int i = 0 ; i < bufferList -> mNumberBuffers ; i++)
free(bufferList -> mBuffers[i].mData);
free(inputBufferList);
free(bufferList);
// Tell delegate
if (self.convertHandler)
//dispatch_async(dispatch_get_main_queue(), self.convertHandler);
self.convertHandler();
}
}
Formats when converting to iLBC:
// Get input format from mic
UInt32 size = sizeof(AudioStreamBasicDescription);
AudioStreamBasicDescription inputDesc;
AudioUnitGetProperty(self.ioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &inputDesc, &size);
// Set output stream description
size = sizeof(AudioStreamBasicDescription);
AudioStreamBasicDescription outputDescription;
memset(&outputDescription, 0, size);
outputDescription.mFormatID = kAudioFormatiLBC;
OSStatus err = AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &size, &outputDescription);
Formats when converting from iLBC:
// Set input stream description
size = sizeof(AudioStreamBasicDescription);
AudioStreamBasicDescription inputDescription;
memset(&inputDescription, 0, size);
inputDescription.mFormatID = kAudioFormatiLBC;
AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &size, &inputDescription);
// Set output stream description
UInt32 size = sizeof(AudioStreamBasicDescription);
AudioStreamBasicDescription outputDesc;
AudioUnitGetProperty(unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &outputDesc, &size);
You have to use an intermediate buffer to save up enough bytes from enough incoming packets to exactly match the number requested by the audio unit input. Depending on any one UDP packet in compressed format to be exactly the right size won't work.
The AudioConverter may buffer samples and change the packet sizes depending on the compression format.

Resources