How do I convert flac to wav on iOS? - ios

I have a file which is encoded with FLAC and I want to convert it to WAV.
I have added this FFMpeg lib to my project and imported it.
I see some code from this answer, but I am unclear on how to use it:
#import "avformat.h"
// Some code goes here
/*
* avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
*/
int openInputValue = avformat_open_input(&pFormatCtx, utf8FilePath, inputFormat, nil);
NSLog(#"%s - %d # openInputValue = %d", __PRETTY_FUNCTION__, __LINE__, openInputValue);
I currently have this code in a function which takes NSData holding the FLAC file. If avformat_open_input is the correct call, how do I set the variable? If it is not the correct call, what is?
This question seems like a duplicate but it doesn't really have a good answer.
Also note that I don't want a player. This file contains MQA so I need to run it through my own custom decoder.

I was able to implement this using this code for decoding and this code to actually write the WAV header/body.
As an added bonus, this was very helpful in decoding NSData instead of a file.
Here is my finished decoder, though I wouldn't expect it to work in any case except mine.
//
// FlacToWavConverter.m
// SuperpoweredMQAExample
//
// Created by Tony Lawrence on 5/18/17.
// Copyright © 2017 imect. All rights reserved.
//
#import "FlacToWavConverter.h"
#import "avformat.h"
#import "avcodec.h"
#import "avutil.h"
#import "swresample.h"
#import "file.h"
#implementation FlacToWavConverter
+(NSURL*)convertFlacToWav:(NSData*)data {
//const char* input_filename = [filePath UTF8String];
int buffer_size = 16384;
// This call is necessarily done once in your app to initialize
// libavformat to register all the muxers, demuxers and protocols.
av_register_all();
// A media container
AVFormatContext* container = avformat_alloc_context();
//Make a custom IO context so that we can read from memory instead of a file...
unsigned char* iobuffer = av_malloc(buffer_size);
struct buffer_data bd = { 0 };
bd.ptr = (uint8_t*)data.bytes;
bd.size = data.length;
AVIOContext* ioContext = avio_alloc_context(iobuffer, buffer_size, 0, &bd, &read_packet, NULL, NULL);
container->pb = ioContext;
if (avformat_open_input(&container, "arbitrary", NULL, NULL) < 0) {
NSLog(#"Could not open file");
}
if (avformat_find_stream_info(container, NULL) < 0) {
NSLog(#"Could not find file info");
}
int stream_id = -1;
// To find the first audio stream. This process may not be necessary
// if you can gurarantee that the container contains only the desired
// audio stream
int i;
for (i = 0; i < container->nb_streams; i++) {
if (container->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
stream_id = i;
break;
}
}
if (stream_id == -1) {
NSLog(#"Could not find an audio stream");
}
// Extract some metadata
AVDictionary* metadata = container->metadata;
// Find the apropriate codec and open it
AVCodecContext* codec_context = container->streams[stream_id]->codec;
AVCodec* codec = avcodec_find_decoder(codec_context->codec_id);
if (avcodec_open2(codec_context, codec, NULL) < 0) {
NSLog(#"Could not find open the needed codec");
}
NSMutableData *pcmFile = [NSMutableData new];
AVPacket packet;
int8_t buffer[buffer_size];
while (1) {
//buffer_size = AVCODEC_MAX_AUDIO_FRAME_SIZE;
// Read one packet into `packet`
if (av_read_frame(container, &packet) < 0) {
break; // End of stream. Done decoding.
}
// Decodes from `packet` into the buffer
if (avcodec_decode_audio3(codec_context, (int16_t*)buffer, &buffer_size, &packet) < 1) {
break; // Error in decoding
}
// Send the buffer contents to the audio device
[pcmFile appendBytes:buffer length:buffer_size];
}
avformat_close_input(&container);
//fprintf(stdout, "Done playing. Exiting...");
NSURL *file = [FlacToWavConverter getAndCreatePlayableFileFromPcmData:pcmFile];
NSLog(#"Got a playable file maybe? %#", [file absoluteString]);
return file;
}
+(NSURL *) getAndCreatePlayableFileFromPcmData:(NSData *)data
{
NSArray *dirPaths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
NSString *docsDir = [dirPaths objectAtIndex:0];
NSString *wavFilePath = [docsDir stringByAppendingPathComponent:#"output.wav"];
//NSLog(#"PCM data : %#",data);
FILE *fout;
short NumChannels = 2;
short BitsPerSample = 16;
int SamplingRate = 44100;
int numOfSamples = [data length];
int ByteRate = NumChannels*BitsPerSample*SamplingRate/8;
short BlockAlign = NumChannels*BitsPerSample/8;
int DataSize = NumChannels*numOfSamples*BitsPerSample/8;
int chunkSize = 16;
int totalSize = 46 + DataSize;
short audioFormat = 1;
if((fout = fopen([wavFilePath cStringUsingEncoding:1], "w")) == NULL)
{
printf("Error opening out file ");
}
fwrite("RIFF", sizeof(char), 4,fout);
fwrite(&totalSize, sizeof(int), 1, fout);
fwrite("WAVE", sizeof(char), 4, fout);
fwrite("fmt ", sizeof(char), 4, fout);
fwrite(&chunkSize, sizeof(int),1,fout);
fwrite(&audioFormat, sizeof(short), 1, fout);
fwrite(&NumChannels, sizeof(short),1,fout);
fwrite(&SamplingRate, sizeof(int), 1, fout);
fwrite(&ByteRate, sizeof(int), 1, fout);
fwrite(&BlockAlign, sizeof(short), 1, fout);
fwrite(&BitsPerSample, sizeof(short), 1, fout);
fwrite("data", sizeof(char), 4, fout);
fwrite(&DataSize, sizeof(int), 1, fout);
fclose(fout);
NSMutableData *pamdata = [NSMutableData dataWithData:data];
NSFileHandle *handle;
handle = [NSFileHandle fileHandleForUpdatingAtPath:wavFilePath];
[handle seekToEndOfFile];
[handle writeData:pamdata];
[handle closeFile];
NSLog(#"Saved wav: %#", wavFilePath);
return [NSURL URLWithString:wavFilePath];
}
struct buffer_data {
uint8_t *ptr;
size_t size; ///< size left in the buffer
};
static int read_packet(void *opaque, uint8_t *buf, int buf_size)
{
struct buffer_data *bd = (struct buffer_data *)opaque;
buf_size = FFMIN(buf_size, bd->size);
//printf("ptr:%p size:%zu\n", bd->ptr, bd->size);
/* copy internal buffer data to buf */
memcpy(buf, bd->ptr, buf_size);
bd->ptr += buf_size;
bd->size -= buf_size;
return buf_size;
}
#end

Related

How to encode and decode audio using opus

I am trying integrate opus into my application, the encode and decode function returns positive value which means successfully, but the output audio can't play. Raw audio data can play as well.
Here is how I encode data. I use 4 bytes prefix to separate from each packet.
self.encoder = opus_encoder_create(24000, 1, OPUS_APPLICATION_VOIP, &opusError);
opus_encoder_ctl(self.encoder, OPUS_SET_BANDWIDTH(OPUS_BANDWIDTH_SUPERWIDEBAND));
- (void) encodeBufferList:(AudioBufferList *)bufferList {
BOOL success = TPCircularBufferProduceBytes(_circularBuffer, bufferList->mBuffers[0].mData, bufferList->mBuffers[0].mDataByteSize);
if (!success) {
NSLog(#"insufficient space in circular buffer!");
}
if (!_encoding) {
_encoding = YES;
dispatch_async(self.processingQueue, ^{
[self startEncodingLoop];
});
}
}
-(void)startEncodingLoop
{
int32_t availableBytes = 0;
opus_int16 *data = (opus_int16*)TPCircularBufferTail(_circularBuffer, &availableBytes);
int availableSamples = availableBytes / _inputASBD.mBytesPerFrame;
/*!
* Use dynamic duration
*/
// int validSamples[6] = {2.5, 5, 10, 20, 40, 60}; // in milisecond
// int esample = validSamples[0] * self.sampleRate / 1000;
// for (int i = 0; i < 6; i++) {
// int32_t samp = validSamples[i] * self.sampleRate / 1000;
// if (availableSamples < samp) {
// break;
// }
// esample = samp;
// }
/*!
* Use 20ms
*/
int esample = 20 * self.sampleRate / 1000;
if (availableSamples < esample) {
/*!
* Out of data. Finish encoding
*/
self.encoding = NO;
[self.eDelegate didFinishEncode];
return;
}
// printf("raw input value for packet \n");
// for (int i = 0; i < esample * self.numberOfChannels; i++) {
// printf("%d :", data[i]);
// }
int returnValue = opus_encode(_encoder, data, esample, _encoderOutputBuffer, 1000);
TPCircularBufferConsume(_circularBuffer, esample * sizeof(opus_int16) * self.numberOfChannels);
// printf("output encode \n");
// for (int i = 0; i < returnValue; i++) {
// printf("%d :", _encoderOutputBuffer[i]);
// }
NSMutableData *outputData = [NSMutableData new];
NSError *error = nil;
if (returnValue <= 0) {
error = [OKUtilities errorForOpusErrorCode:returnValue];
}else {
[outputData appendBytes:_encoderOutputBuffer length:returnValue * sizeof(unsigned char)];
unsigned char int_field[4];
int_to_char(returnValue , int_field);
NSData *header = [NSData dataWithBytes:&int_field[0] length:4 * sizeof(unsigned char)];
if (self.eDelegate) {
[self.eDelegate didEncodeWithData:header];
}
}
if (self.eDelegate) {
[self.eDelegate didEncodeWithData:outputData];
}
[self startEncodingLoop];
}
And here is decode function:
self.decoder = opus_decoder_create(24000, 1, &opusError);
opus_decoder_ctl(self.decoder, OPUS_SET_SIGNAL(OPUS_SIGNAL_VOICE));
opus_decoder_ctl(self.decoder, OPUS_SET_GAIN(10));
-(void)startParseData:(unsigned char*)data remainingLen:(int)len
{
if (len <= 0) {
[self.dDelegate didFinishDecode];
return;
}
int headLen = sizeof(unsigned char) * 4;
unsigned char h[4];
h[0] = data[0];
h[1] = data[1];
h[2] = data[2];
h[3] = data[3];
int packetLen = char_to_int(h);
data += headLen;
packetLen = packetLen * sizeof(unsigned char) * self.numberOfChannels;
[self decodePacket:data length:packetLen remainingLen:len - headLen];
}
-(void)decodePacket:(unsigned char*)inputData length:(int)len remainingLen:(int)rl
{
int bw = opus_packet_get_bandwidth(inputData); //TEST: return OPUS_BANDWIDTH_SUPERWIDEBAND here
int32_t decodedSamples = 0;
// int validSamples[6] = {2.5, 5, 10, 20, 40, 60}; // in milisecond
/*!
* Use 60ms
*/
int esample = 60 * self.sampleRate / 1000;
// printf("input decode \n");
// for (int i = 0; i < len; i++) {
// printf("%d :", inputData[i]);
// }
_decoderBufferLength = esample * self.numberOfChannels * sizeof(opus_int16);
int returnValue = opus_decode(_decoder, inputData, len, _outputBuffer, esample, 1);
if (returnValue < 0) {
NSError *error = [OKUtilities errorForOpusErrorCode:returnValue];
NSLog(#"decode error %#", error);
inputData += len;
[self startParseData:inputData remainingLen:rl - len];
return;
}
decodedSamples = returnValue;
NSUInteger length = decodedSamples * self.numberOfChannels;
// printf("raw decoded data \n");
// for (int i = 0; i < length; i++) {
// printf("%d :", _outputBuffer[i]);
// }
NSData *audioData = [NSData dataWithBytes:_outputBuffer length:length * sizeof(opus_int16)];
if (self.dDelegate) {
[self.dDelegate didDecodeData:audioData];
}
inputData += len;
[self startParseData:inputData remainingLen:rl - len];
}
Please help me to point out what I am missing. An example would be great.
I think the problem is on the decode side:
You pass 1 as the fec argument to opus_decode(). This asks the decoder to generate the full packet duration's worth of data from error correction data in the current packet. I don't see any lost packet tracking in your code, so 0 should be passed instead. With that change your input and output duration should match.
You configure the decoder for mono output, but later use self.numberOfChannels in length calculations. Those should match or you may get unexpected behaviour.
OPUS_SET_SIGNAL doesn't do anything in opus_decoder_ctl() but it will just return OPUS_UNIMPLEMENTED without affecting behaviour.
Opus packets can be up to 120 ms in duration, so your limit of 60 ms could fail to decode some streams. If you're only talking to your own app that won't cause a problem the way you've configured it, since libopus defaults to 20ms frames.
I found what the problem is. I have set the audio format is float kAudioFormatFlagIsPacked|kAudioFormatFlagIsFloat;. I should use opus_encode_float and opus_decode_float instead of opus_encode opus_decode.
As #Ralph says, we should use fec = 0 in opus_decode. Thanks to #Ralph.
One thing I notice is that you're treating the return value of opus_encode() as a number of samples encoded, when it's the number of bytes in the compressed packet. that means you're writing 50% or 75% garbage data from the end of _encoderOutputBuffer into your encoded stream.
Also make sure _encoderOutputBuffer has room for the hard-coded 1000 byte packet-length limit you're passing in.

Issue with digital signature generated using Podofo library

I'm using OpenSSL to generate digital signature for a PDF by PoDoFo library.
Here is the logic for signature handler
OpenSSLSignatureHandler.h
#import <Foundation/Foundation.h>
// OpenSSL includes
#include <openssl/err.h>
#include <openssl/evp.h>
#include <openssl/pkcs12.h>
#include <openssl/pkcs7.h>
#include <openssl/rsa.h>
#include <openssl/sha.h>
#interface OpenSSLSignatureHandler : NSObject
{
SHA_CTX m_sha_ctx;
EVP_PKEY* mp_pkey; // private key
X509* mp_x509; // signing certificate
STACK_OF(X509)* mp_ca; // certificate chain up to the CA
}
- (id) initWithCert:(NSString*) p12file password: (NSString*) password;
- (void) AppendData: (NSData*)data;
- (NSData*) getSignature;
#end
OpenSSLSignatureHandler.m
#import "OpenSSLSignatureHandler.h"
#include <string>
#implementation OpenSSLSignatureHandler
- (id) initWithCert:(NSString*) p12file password: (NSString*) password{
if (self = [super init]) {
// Initialize OpenSSL library
CRYPTO_malloc_init();
ERR_load_crypto_strings();
OpenSSL_add_all_algorithms();
FILE* fp = fopen([p12file cStringUsingEncoding: NSASCIIStringEncoding], "rb");
if (fp == NULL)
#throw ([NSException exceptionWithName: #"PDFNet Exception" reason: #"Cannot open private key." userInfo: nil]);
PKCS12* p12 = d2i_PKCS12_fp(fp, NULL);
fclose(fp);
if (p12 == NULL)
#throw ([NSException exceptionWithName: #"PDFNet Exception" reason: #"Cannot parse private key." userInfo: nil]);
mp_pkey = NULL;
mp_x509 = NULL;
mp_ca = NULL;
int parseResult = PKCS12_parse(p12, [password cStringUsingEncoding: NSASCIIStringEncoding], &mp_pkey, &mp_x509, &mp_ca);
PKCS12_free(p12);
if (parseResult == 0)
#throw ([NSException exceptionWithName: #"PDFNet Exception" reason: #"Cannot parse private key." userInfo: nil]);
//initialize sha context
SHA1_Init(&m_sha_ctx);
}
return self;
}
- (void) AppendData: (NSData*)data
{
SHA1_Update(&m_sha_ctx, [data bytes], [data length]);
return;
}
- (BOOL) Reset
{
SHA1_Init(&m_sha_ctx);
return (YES);
}
- (NSData*) getSignature
{
unsigned char sha_buffer[SHA_DIGEST_LENGTH];
memset((void*) sha_buffer, 0, SHA_DIGEST_LENGTH);
SHA1_Final(sha_buffer, &m_sha_ctx);
PKCS7* p7 = PKCS7_new();
PKCS7_set_type(p7, NID_pkcs7_signed);
PKCS7_SIGNER_INFO* p7Si = PKCS7_add_signature(p7, mp_x509, mp_pkey, EVP_sha1());
PKCS7_add_attrib_content_type(p7Si, OBJ_nid2obj(NID_pkcs7_data));
PKCS7_add0_attrib_signing_time(p7Si, NULL);
PKCS7_add1_attrib_digest(p7Si, (const unsigned char*) sha_buffer, SHA_DIGEST_LENGTH);
PKCS7_add_certificate(p7, mp_x509);
int c = 0;
for ( ; c < sk_X509_num(mp_ca); c++) {
X509* cert = sk_X509_value(mp_ca, c);
PKCS7_add_certificate(p7, cert);
}
PKCS7_set_detached(p7, 1);
PKCS7_content_new(p7, NID_pkcs7_data);
PKCS7_SIGNER_INFO_sign(p7Si);
int p7Len = i2d_PKCS7(p7, NULL);
NSMutableData* signature = [NSMutableData data];
unsigned char* p7Buf = (unsigned char*) malloc(p7Len);
if (p7Buf != NULL) {
unsigned char* pP7Buf = p7Buf;
i2d_PKCS7(p7, &pP7Buf);
[signature appendBytes: (const void*) p7Buf length: p7Len];
free(p7Buf);
}
PKCS7_free(p7);
return (signature);
}
- (void) dealloc
{
sk_X509_free(mp_ca);
X509_free(mp_x509);
EVP_PKEY_free(mp_pkey);
// Release OpenSSL resource usage
ERR_free_strings();
EVP_cleanup();
[super dealloc];
}
#end
Using podofo to embed signature
void CreateSimpleForm( PoDoFo::PdfPage* pPage, PoDoFo::PdfStreamedDocument* pDoc, const PoDoFo::PdfData &signatureData )
{
PoDoFo::PdfPainter painter;
PoDoFo::PdfFont* pFont = pDoc->CreateFont( "Courier" );
painter.SetPage( pPage );
painter.SetFont( pFont );
painter.DrawText( 10000 * CONVERSION_CONSTANT, 280000 * CONVERSION_CONSTANT, "PoDoFo Sign Test" );
painter.FinishPage();
PoDoFo::PdfSignatureField signField( pPage, PoDoFo::PdfRect( 0, 0, 0, 0 ), pDoc );
signField.SetFieldName("SignatureFieldName");
signField.SetSignature(signatureData);
signField.SetSignatureReason("Document verification");
// Set time of signing
signField.SetSignatureDate( PoDoFo::PdfDate() );
}
+(void)addDigitalSignatureOnPage:(NSInteger)pageIndex outpath:(NSString*)path/*doc:(PoDoFo::PdfMemDocument*)aDoc*/{
PoDoFo::PdfPage* pPage;
PoDoFo::PdfSignOutputDevice signer([path UTF8String]);
// Reserve space for signature
signer.SetSignatureSize(1024);
if([[NSFileManager defaultManager] fileExistsAtPath:path]){
PoDoFo::PdfStreamedDocument writer( &signer, PoDoFo::ePdfVersion_1_5 );
// Disable default appearance
writer.GetAcroForm(PoDoFo::ePdfCreateObject, PoDoFo::PdfAcroForm::ePdfAcroFormDefaultAppearance_None);
pPage = writer.CreatePage(PoDoFo::PdfPage::CreateStandardPageSize(PoDoFo::ePdfPageSize_A4 ) );
TEST_SAFE_OP( CreateSimpleForm( pPage, &writer, *signer.GetSignatureBeacon() ) );
TEST_SAFE_OP( writer.Close() );
}
// Check if position of signature was found
if(signer.HasSignaturePosition()) {
// Adjust ByteRange for signature
signer.AdjustByteRange();
// Read data for signature and count it
// We have to seek at the beginning of the file
signer.Seek(0);
//OpenSSLSignatureHandler
NSString * p12certpath = [[NSBundle mainBundle] pathForResource:#"iphone-cert" ofType:#"p12"];
OpenSSLSignatureHandler*signatureHandler = [[OpenSSLSignatureHandler alloc] initWithCert:p12certpath password:#"test123$"];
char buff[65536];
size_t len;
while( (len = signer.ReadForSignature(buff, 65536))>0 )
{
NSData* data = [NSData dataWithBytes:(const void *)buff length:len];
[signatureHandler AppendData:data];
}
const PoDoFo::PdfData *pSignature = NULL;
// NSString *pkcsMessage = [[signatureHandler getSignature] base64EncodedString];
// NSLog(#"OpenSSLSignatureHandler signature message = %#",pkcsMessage);
// const char * cstr = [pkcsMessage UTF8String];
// if(pSignature==NULL)pSignature = new PoDoFo::PdfData(cstr, sizeof(cstr));
unsigned char *bytePtr = (unsigned char *)[[signatureHandler getSignature] bytes];
std::string str;
str.append(reinterpret_cast<const char*>(bytePtr));
// Paste signature to the file
if(pSignature==NULL)pSignature = new PoDoFo::PdfData(str.c_str(), sizeof(str));
NSLog(#"str = %s",str.c_str());
NSLog(#"sizeof(str) = %lu",sizeof(str));
signer.SetSignature(*pSignature);
}
signer.Flush();
}
But the signature that's embeded in the PDF is always empty
can some help with this issue ?

Decode MP3 File from NSData

For my application, I need to decode an MP3 file which is stored in an NSData object.
For security reasons, it is undesirable to write the NSData object to disk and re-open it using a System URL reference, even if its only locally stored for a few moments.
I would like to take advantage Extended Audio File Services (or Audio File Services) to do this, but I'm having trouble getting a representation of the NSData, which exists only in memory, that can be read by these Audio File Services.
Edit: I want to decode the MP3 data so I can get access to linear, PCM audio samples for manipulation. Playing back from NSData object is not a problem.
My code is as follows:
decryptedData; //an NSData object which has already been initialized
const void *dataBytes = decryptedData.bytes; //pointer to the bytes in my NSData object
//this creates a CFURLRef from the pointer to the byte data
//I have printed out the resulting CFURL and have confirmed that it is indeed reading the bytes correctly
CFURLRef audioFileURLFromBytes = CFURLCreateWithBytes (kCFAllocatorDefault,
dataBytes,
decryptedData.length,
kCFStringEncodingASCII,
NULL);
//attempt to open the the URL using Extended Audio File Services
ExtAudioFileRef outExtAudioFile;
OSStatus err = 0;
err = ExtAudioFileOpenURL(audioFileURLFromBytes, &outExtAudioFile);
if (err != noErr) {
NSLog(#"ExtAudioFileOpenURL failed with OSStatus Code %i \n", err);
}
//Attempt to open the URL using Audio File Services
AudioFileID audioFile;
OSStatus res = 0;
res = AudioFileOpenURL(audioFileURLFromBytes, kAudioFileReadPermission, kAudioFileMP3Type, &audioFile);
if (res != noErr) {
NSLog(#"AudioFileOpenURL failed with OSStatus Code %i \n", res);
}
Both attempts at opening the URL result in an OSStatus Code 43, which is "file not found".
I have verified that my pointer is pointing to the correct address in memory for the NSData and that the bytes can be read correctly.
Is there some limitation to the Extended Audio File Services that prohibit references to bytes stored in memory?
Thanks for any help you can provide.
Edit: I figured out how to do it using Sbooth's suggestion. Code below:
This function takes an NSData object containing an mp3 representation of an audio file. It decodes it as linear PCM so you can get the samples and then re-encodes it as AAC. I don't think MP3 encoding is available in CoreAudio across all platforms (mobile/desktop). This code was tested on my Mac and gets the job done.
-(void) audioFileReaderWithData: (NSData *) audioData {
AudioFileID refAudioFileID;
ExtAudioFileRef inputFileID;
ExtAudioFileRef outputFileID;
OSStatus result = AudioFileOpenWithCallbacks(audioData, readProc, 0, getSizeProc, 0, kAudioFileMP3Type, &refAudioFileID);
if(result != noErr){
NSLog(#"problem in theAudioFileReaderWithData function: result code %i \n", result);
}
result = ExtAudioFileWrapAudioFileID(refAudioFileID, false, &inputFileID);
if (result != noErr){
NSLog(#"problem in theAudioFileReaderWithData function Wraping the audio FileID: result code %i \n", result);
}
// Client Audio Format Description
AudioStreamBasicDescription clientFormat;
memset(&clientFormat, 0, sizeof(clientFormat));
clientFormat.mFormatID = kAudioFormatLinearPCM;
clientFormat.mFramesPerPacket = 1;
clientFormat.mChannelsPerFrame = 2;
clientFormat.mBitsPerChannel = 32;
clientFormat.mBytesPerPacket = clientFormat.mBytesPerFrame = 4 * clientFormat.mChannelsPerFrame;
clientFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked;
clientFormat.mSampleRate = 44100;
//Output Audio Format Description
AudioStreamBasicDescription outputFormat;
memset(&outputFormat, 0, sizeof(outputFormat));
outputFormat.mChannelsPerFrame = 2;
outputFormat.mSampleRate = 44100;
outputFormat.mFormatID = kAudioFormatMPEG4AAC;
outputFormat.mFormatFlags = kMPEG4Object_AAC_Main;
outputFormat.mBitsPerChannel = 0;
outputFormat.mBytesPerFrame = 0;
outputFormat.mBytesPerPacket = 0;
outputFormat.mFramesPerPacket = 1024;
// create the outputFile that we're writing to here....
UInt32 outputFormatSize = sizeof(outputFormat);
result = 0;
result = AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &outputFormatSize, &outputFormat);
if(result != noErr)
NSLog(#"could not set the output format with status code %i \n",result);
NSMutableString *outputFilePath = [NSMutableString stringWithCapacity: 100];
[outputFilePath setString:#"/Users/You/Desktop/testAudio.m4a"];
NSURL *sourceURL = [NSURL fileURLWithPath:outputFilePath];
result = 0;
result = ExtAudioFileCreateWithURL((CFURLRef)sourceURL, kAudioFileM4AType, &outputFormat, NULL, kAudioFileFlags_EraseFile, &outputFileID);
if(result != noErr){
NSLog(#"ExtAudioFileCreateWithURL failed for outputFileID with status %i \n", result);
}
int size = sizeof(clientFormat);
result = 0;
result = ExtAudioFileSetProperty(inputFileID, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat);
if(result != noErr)
NSLog(#"error on ExtAudioFileSetProperty for input File with result code %i \n", result);
size = sizeof(clientFormat);
result = 0;
result = ExtAudioFileSetProperty(outputFileID, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat);
if(result != noErr)
NSLog(#"error on ExtAudioFileSetProperty for output File with result code %i \n", result);
int totalFrames = 0;
UInt32 outputFilePacketPosition = 0; //in bytes
UInt32 encodedBytes = 0;
while (1) {
UInt32 bufferByteSize = 22050 * 4 * 2;
char srcBuffer[bufferByteSize];
UInt32 numFrames = (bufferByteSize/clientFormat.mBytesPerFrame);
AudioBufferList fillBufList;
fillBufList.mNumberBuffers = 1;
fillBufList.mBuffers[0].mNumberChannels = clientFormat.mChannelsPerFrame;
fillBufList.mBuffers[0].mDataByteSize = bufferByteSize;
fillBufList.mBuffers[0].mData = srcBuffer;
result = 0;
result = ExtAudioFileRead(inputFileID, &numFrames, &fillBufList);
if (result != noErr) {
NSLog(#"Error on ExtAudioFileRead with result code %i \n", result);
totalFrames = 0;
break;
}
if (!numFrames)
break;
totalFrames = totalFrames + numFrames;
result = 0;
result = ExtAudioFileWrite(outputFileID,
numFrames,
&fillBufList);
if(result!= noErr){
NSLog(#"ExtAudioFileWrite failed with code %i \n", result);
}
encodedBytes += numFrames * clientFormat.mBytesPerFrame;
}
//Clean up
ExtAudioFileDispose(inputFileID);
ExtAudioFileDispose(outputFileID);
AudioFileClose(refAudioFileID);
}
And you'll need these functions as well...
static OSStatus readProc(void* clientData,
SInt64 position,
UInt32 requestCount,
void* buffer,
UInt32* actualCount)
{
NSData *inAudioData = (NSData *) clientData;
size_t dataSize = inAudioData.length;
size_t bytesToRead = 0;
if(position < dataSize) {
size_t bytesAvailable = dataSize - position;
bytesToRead = requestCount <= bytesAvailable ? requestCount : bytesAvailable;
[inAudioData getBytes: buffer range:NSMakeRange(position, bytesToRead)];
} else {
NSLog(#"data was not read \n");
bytesToRead = 0;
}
if(actualCount)
*actualCount = bytesToRead;
return noErr;
}
static SInt64 getSizeProc(void* clientData) {
NSData *inAudioData = (NSData *) clientData;
size_t dataSize = inAudioData.length;
return dataSize;
}
The problem is that you're trying to create a CFURLRef object from the audio bytes (MP3 frames) using the ASCII encoding. CFURLCreateWithBytes is meant to be used with byte strings, not binary data (i.e., "http://www.apple.com" as a char *). To accomplish what you want use AudioFileOpenWithCallbacks, pass your NSData object as the refcon, and handle raw reading/seeking in your custom callbacks operating on the NSData that you passed in.
Use Audio Queue Services or AVPlayer for playing audio from a stream or memory.

clicking/tapping between buffers for AudioQueue

As you can see from the code, within my callback I extract out the audio data and place it into NSData data, then send that off to another class to upload that to the server. This all works, meaning the server receives and plays the audio data. HOWEVER there is a clicking or tapping noise between the buffers. I am hoping someone might show me what is causing that and how it can be fixed.
I have read other related postings however they all seemed to refer to only using 1 buffer and that adding more was the fix but I am using 3 buffers and have tried adjusting that number which did not fix it
AQRecorder.mm
#include "AQRecorder.h"
RestClient * restClient;
NSData* data;
// ____________________________________________________________________________________
// Determine the size, in bytes, of a buffer necessary to represent the supplied number
// of seconds of audio data.
int AQRecorder::ComputeRecordBufferSize(const AudioStreamBasicDescription *format, float seconds)
{
int packets, frames, bytes = 0;
try {
frames = (int)ceil(seconds * format->mSampleRate);
if (format->mBytesPerFrame > 0)
bytes = frames * format->mBytesPerFrame;
else {
UInt32 maxPacketSize;
if (format->mBytesPerPacket > 0)
maxPacketSize = format->mBytesPerPacket; // constant packet size
else {
UInt32 propertySize = sizeof(maxPacketSize);
XThrowIfError(AudioQueueGetProperty(mQueue, kAudioQueueProperty_MaximumOutputPacketSize, &maxPacketSize,
&propertySize), "couldn't get queue's maximum output packet size");
}
if (format->mFramesPerPacket > 0)
packets = frames / format->mFramesPerPacket;
else
packets = frames; // worst-case scenario: 1 frame in a packet
if (packets == 0) // sanity check
packets = 1;
bytes = packets * maxPacketSize;
}
} catch (CAXException e) {
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
return 0;
}
return bytes;
}
// ____________________________________________________________________________________
// AudioQueue callback function, called when an input buffers has been filled.
void AQRecorder::MyInputBufferHandler( void * inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp * inStartTime,
UInt32 inNumPackets,
const AudioStreamPacketDescription* inPacketDesc)
{
AQRecorder *aqr = (AQRecorder *)inUserData;
try {
if (inNumPackets > 0) {
// write packets to file
// XThrowIfError(AudioFileWritePackets(aqr->mRecordFile, FALSE, inBuffer->mAudioDataByteSize,
// inPacketDesc, aqr->mRecordPacket, &inNumPackets, inBuffer->mAudioData),
// "AudioFileWritePackets failed");
aqr->mRecordPacket += inNumPackets;
// int numBytes = inBuffer->mAudioDataByteSize;
// SInt8 *testBuffer = (SInt8*)inBuffer->mAudioData;
//
// for (int i=0; i < numBytes; i++)
// {
// SInt8 currentData = testBuffer[i];
// printf("Current data in testbuffer is %d", currentData);
//
// NSData * temp = [NSData dataWithBytes:currentData length:sizeof(currentData)];
// }
data=[[NSData dataWithBytes:inBuffer->mAudioData length:inBuffer->mAudioDataByteSize]retain];
[restClient uploadAudioData:data url:nil];
}
// if we're not stopping, re-enqueue the buffer so that it gets filled again
if (aqr->IsRunning())
XThrowIfError(AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL), "AudioQueueEnqueueBuffer failed");
} catch (CAXException e) {
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}
}
AQRecorder::AQRecorder()
{
mIsRunning = false;
mRecordPacket = 0;
data = [[NSData alloc]init];
restClient = [[RestClient sharedManager]retain];
}
AQRecorder::~AQRecorder()
{
AudioQueueDispose(mQueue, TRUE);
AudioFileClose(mRecordFile);
if (mFileName){
CFRelease(mFileName);
}
[restClient release];
[data release];
}
// ____________________________________________________________________________________
// Copy a queue's encoder's magic cookie to an audio file.
void AQRecorder::CopyEncoderCookieToFile()
{
UInt32 propertySize;
// get the magic cookie, if any, from the converter
OSStatus err = AudioQueueGetPropertySize(mQueue, kAudioQueueProperty_MagicCookie, &propertySize);
// we can get a noErr result and also a propertySize == 0
// -- if the file format does support magic cookies, but this file doesn't have one.
if (err == noErr && propertySize > 0) {
Byte *magicCookie = new Byte[propertySize];
UInt32 magicCookieSize;
XThrowIfError(AudioQueueGetProperty(mQueue, kAudioQueueProperty_MagicCookie, magicCookie, &propertySize), "get audio converter's magic cookie");
magicCookieSize = propertySize; // the converter lies and tell us the wrong size
// now set the magic cookie on the output file
UInt32 willEatTheCookie = false;
// the converter wants to give us one; will the file take it?
err = AudioFileGetPropertyInfo(mRecordFile, kAudioFilePropertyMagicCookieData, NULL, &willEatTheCookie);
if (err == noErr && willEatTheCookie) {
err = AudioFileSetProperty(mRecordFile, kAudioFilePropertyMagicCookieData, magicCookieSize, magicCookie);
XThrowIfError(err, "set audio file's magic cookie");
}
delete[] magicCookie;
}
}
void AQRecorder::SetupAudioFormat(UInt32 inFormatID)
{
memset(&mRecordFormat, 0, sizeof(mRecordFormat));
UInt32 size = sizeof(mRecordFormat.mSampleRate);
XThrowIfError(AudioSessionGetProperty( kAudioSessionProperty_CurrentHardwareSampleRate,
&size,
&mRecordFormat.mSampleRate), "couldn't get hardware sample rate");
//override samplearate to 8k from device sample rate
mRecordFormat.mSampleRate = 8000.0;
size = sizeof(mRecordFormat.mChannelsPerFrame);
XThrowIfError(AudioSessionGetProperty( kAudioSessionProperty_CurrentHardwareInputNumberChannels,
&size,
&mRecordFormat.mChannelsPerFrame), "couldn't get input channel count");
// mRecordFormat.mChannelsPerFrame = 1;
mRecordFormat.mFormatID = inFormatID;
if (inFormatID == kAudioFormatLinearPCM)
{
// if we want pcm, default to signed 16-bit little-endian
mRecordFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
mRecordFormat.mBitsPerChannel = 16;
mRecordFormat.mBytesPerPacket = mRecordFormat.mBytesPerFrame = (mRecordFormat.mBitsPerChannel / 8) * mRecordFormat.mChannelsPerFrame;
mRecordFormat.mFramesPerPacket = 1;
}
if (inFormatID == kAudioFormatULaw) {
NSLog(#"is ulaw");
mRecordFormat.mSampleRate = 8000.0;
mRecordFormat.mFormatFlags = 0;
mRecordFormat.mFramesPerPacket = 1;
mRecordFormat.mChannelsPerFrame = 1;
mRecordFormat.mBitsPerChannel = 8;
mRecordFormat.mBytesPerPacket = 1;
mRecordFormat.mBytesPerFrame = 1;
}
}
NSString * GetDocumentDirectory(void)
{
NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
NSString *basePath = ([paths count] > 0) ? [paths objectAtIndex:0] : nil;
return basePath;
}
void AQRecorder::StartRecord(CFStringRef inRecordFile)
{
int i, bufferByteSize;
UInt32 size;
CFURLRef url;
try {
mFileName = CFStringCreateCopy(kCFAllocatorDefault, inRecordFile);
// specify the recording format
SetupAudioFormat(kAudioFormatULaw /*kAudioFormatLinearPCM*/);
// create the queue
XThrowIfError(AudioQueueNewInput(
&mRecordFormat,
MyInputBufferHandler,
this /* userData */,
NULL /* run loop */, NULL /* run loop mode */,
0 /* flags */, &mQueue), "AudioQueueNewInput failed");
// get the record format back from the queue's audio converter --
// the file may require a more specific stream description than was necessary to create the encoder.
mRecordPacket = 0;
size = sizeof(mRecordFormat);
XThrowIfError(AudioQueueGetProperty(mQueue, kAudioQueueProperty_StreamDescription,
&mRecordFormat, &size), "couldn't get queue's format");
NSString *basePath = GetDocumentDirectory();
NSString *recordFile = [basePath /*NSTemporaryDirectory()*/ stringByAppendingPathComponent: (NSString*)inRecordFile];
url = CFURLCreateWithString(kCFAllocatorDefault, (CFStringRef)recordFile, NULL);
// create the audio file
XThrowIfError(AudioFileCreateWithURL(url, kAudioFileCAFType, &mRecordFormat, kAudioFileFlags_EraseFile,
&mRecordFile), "AudioFileCreateWithURL failed");
CFRelease(url);
// copy the cookie first to give the file object as much info as we can about the data going in
// not necessary for pcm, but required for some compressed audio
CopyEncoderCookieToFile();
// allocate and enqueue buffers
bufferByteSize = ComputeRecordBufferSize(&mRecordFormat, kBufferDurationSeconds); // enough bytes for half a second
for (i = 0; i < kNumberRecordBuffers; ++i) {
XThrowIfError(AudioQueueAllocateBuffer(mQueue, bufferByteSize, &mBuffers[i]),
"AudioQueueAllocateBuffer failed");
XThrowIfError(AudioQueueEnqueueBuffer(mQueue, mBuffers[i], 0, NULL),
"AudioQueueEnqueueBuffer failed");
}
// start the queue
mIsRunning = true;
XThrowIfError(AudioQueueStart(mQueue, NULL), "AudioQueueStart failed");
}
catch (CAXException &e) {
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}
catch (...) {
fprintf(stderr, "An unknown error occurred\n");
}
}
void AQRecorder::StopRecord()
{
// end recording
mIsRunning = false;
// XThrowIfError(AudioQueueReset(mQueue), "AudioQueueStop failed");
XThrowIfError(AudioQueueStop(mQueue, true), "AudioQueueStop failed");
// a codec may update its cookie at the end of an encoding session, so reapply it to the file now
CopyEncoderCookieToFile();
if (mFileName)
{
CFRelease(mFileName);
mFileName = NULL;
}
AudioQueueDispose(mQueue, true);
AudioFileClose(mRecordFile);
}
I changed my #define kBufferDurationSeconds from .5 to 5.0 and although the clicking is still there it is alot less noticeable.
Please if you have suggestions/answer still post as this is not a fix merely a work around thats somewhat better then before
I also tried to append data to data for a number of times prior to sending the data to the server. This also seems to have helped.

stream media FROM iphone

I need to stream audio from the mic to a http server.
These recording settings are what I need:
NSDictionary *audioOutputSettings = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithInt: kAudioFormatULaw],AVFormatIDKey,
[NSNumber numberWithFloat:8000.0],AVSampleRateKey,//was 44100.0
[NSData dataWithBytes: &acl length: sizeof( AudioChannelLayout ) ], AVChannelLayoutKey,
[NSNumber numberWithInt:1],AVNumberOfChannelsKey,
[NSNumber numberWithInt:64000],AVEncoderBitRateKey,
nil];
API im coding to states:
Send a continuous stream of audio to the currently viewed camera.
Audio needs to be encoded at G711 mu-law at 64 kbit/s for transfer to
the Axis camera at the bedside. send (this should be a POST URL in SSL
to connected server): POST /transmitaudio?id=
Content-type: audio/basic Content-Length: 99999 (length is ignored)
Below are a list of links I have tried to work with.
LINK - (SO)basic explanation that only audio unit and audio queues will allow for nsdata as output when recording via the mic | not an example but a good definition of whats needed (audio queues, or audio units)
LINK - (SO)audio callback example | only includes the callback
LINK - (SO)REMOTE IO example | doesnt have start/stop and is for saving to a file
LINK - (SO)REMOTE IO example | unanswered not working
LINK - (SO)Basic audio recording example | good example but records to file
LINK - (SO)Question that guided me to InMemoryAudioFile class (couldnt get working) | followed links to inMemoryFile (or something like that) but couldn't get it working.
LINK - (SO)more audio unit and remote io example/problems | got this one working but once again there isn't a stop function, and even when I tried to figure out what the call is and made it stop, it still didn't not seem to transmit the audio to the server.
LINK - Decent remoteIO and audio queue example but | another good example and almost got it working but had some problems with the code (compiler thinking its not obj-c++) and once again dont know how to get audio "data" from it instead of to a file.
LINK - Apple docs for audio queue | had problems with frameworks. worked through it (see question below) but in the end couldn't get it working however probably didn't give this one as much time as the others, and maybe should have.
LINK - (SO)problems I have had when trying to implement audio queue/unit | not an example
LINK - (SO)another remoteIO example | another good example but cant figure out how to get it to data instead of file.
LINK - also looks interesting, circular buffers | couldn't figure out how to incorporate this with the audio callback
Here is my current class attempting to stream. This seems to work although there is static coming out of the speakers at the receivers end (connected to the server). Which seems to indicate a problem with the audio data format.
IOS VERSION (minus delegate methods for GCD socket):
#implementation MicCommunicator {
AVAssetWriter * assetWriter;
AVAssetWriterInput * assetWriterInput;
}
#synthesize captureSession = _captureSession;
#synthesize output = _output;
#synthesize restClient = _restClient;
#synthesize uploadAudio = _uploadAudio;
#synthesize outputPath = _outputPath;
#synthesize sendStream = _sendStream;
#synthesize receiveStream = _receiveStream;
#synthesize socket = _socket;
#synthesize isSocketConnected = _isSocketConnected;
-(id)init {
if ((self = [super init])) {
_receiveStream = [[NSStream alloc]init];
_sendStream = [[NSStream alloc]init];
_socket = [[GCDAsyncSocket alloc] initWithDelegate:self delegateQueue:dispatch_get_main_queue()];
_isSocketConnected = FALSE;
_restClient = [RestClient sharedManager];
_uploadAudio = false;
NSArray *searchPaths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
_outputPath = [NSURL fileURLWithPath:[[searchPaths objectAtIndex:0] stringByAppendingPathComponent:#"micOutput.output"]];
NSError * assetError;
AudioChannelLayout acl;
bzero(&acl, sizeof(acl));
acl.mChannelLayoutTag = kAudioChannelLayoutTag_Mono; //kAudioChannelLayoutTag_Stereo;
NSDictionary *audioOutputSettings = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithInt: kAudioFormatULaw],AVFormatIDKey,
[NSNumber numberWithFloat:8000.0],AVSampleRateKey,//was 44100.0
[NSData dataWithBytes: &acl length: sizeof( AudioChannelLayout ) ], AVChannelLayoutKey,
[NSNumber numberWithInt:1],AVNumberOfChannelsKey,
[NSNumber numberWithInt:64000],AVEncoderBitRateKey,
nil];
assetWriterInput = [[AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeAudio outputSettings:audioOutputSettings]retain];
[assetWriterInput setExpectsMediaDataInRealTime:YES];
assetWriter = [[AVAssetWriter assetWriterWithURL:_outputPath fileType:AVFileTypeWAVE error:&assetError]retain]; //AVFileTypeAppleM4A
if (assetError) {
NSLog (#"error initing mic: %#", assetError);
return nil;
}
if ([assetWriter canAddInput:assetWriterInput]) {
[assetWriter addInput:assetWriterInput];
} else {
NSLog (#"can't add asset writer input...!");
return nil;
}
}
return self;
}
-(void)dealloc {
[_output release];
[_captureSession release];
[_captureSession release];
[assetWriter release];
[assetWriterInput release];
[super dealloc];
}
-(void)beginStreaming {
NSLog(#"avassetwrter class is %#",NSStringFromClass([assetWriter class]));
self.captureSession = [[AVCaptureSession alloc] init];
AVCaptureDevice *audioCaptureDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio];
NSError *error = nil;
AVCaptureDeviceInput *audioInput = [AVCaptureDeviceInput deviceInputWithDevice:audioCaptureDevice error:&error];
if (audioInput)
[self.captureSession addInput:audioInput];
else {
NSLog(#"No audio input found.");
return;
}
self.output = [[AVCaptureAudioDataOutput alloc] init];
dispatch_queue_t outputQueue = dispatch_queue_create("micOutputDispatchQueue", NULL);
[self.output setSampleBufferDelegate:self queue:outputQueue];
dispatch_release(outputQueue);
self.uploadAudio = FALSE;
[self.captureSession addOutput:self.output];
[assetWriter startWriting];
[self.captureSession startRunning];
}
-(void)pauseStreaming
{
self.uploadAudio = FALSE;
}
-(void)resumeStreaming
{
self.uploadAudio = TRUE;
}
-(void)finishAudioWork
{
[self dealloc];
}
-(void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection {
AudioBufferList audioBufferList;
NSMutableData *data= [[NSMutableData alloc] init];
CMBlockBufferRef blockBuffer;
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(sampleBuffer, NULL, &audioBufferList, sizeof(audioBufferList), NULL, NULL, 0, &blockBuffer);
for (int y = 0; y < audioBufferList.mNumberBuffers; y++) {
AudioBuffer audioBuffer = audioBufferList.mBuffers[y];
Float32 *frame = (Float32*)audioBuffer.mData;
[data appendBytes:frame length:audioBuffer.mDataByteSize];
}
// append [data bytes] to your NSOutputStream
// These two lines write to disk, you may not need this, just providing an example
[assetWriter startSessionAtSourceTime:CMSampleBufferGetPresentationTimeStamp(sampleBuffer)];
[assetWriterInput appendSampleBuffer:sampleBuffer];
//start upload audio data
if (self.uploadAudio) {
if (!self.isSocketConnected) {
[self connect];
}
NSString *requestStr = [NSString stringWithFormat:#"POST /transmitaudio?id=%# HTTP/1.0\r\n\r\n",self.restClient.sessionId];
NSData *requestData = [requestStr dataUsingEncoding:NSUTF8StringEncoding];
[self.socket writeData:requestData withTimeout:5 tag:0];
[self.socket writeData:data withTimeout:5 tag:0];
}
//stop upload audio data
CFRelease(blockBuffer);
blockBuffer=NULL;
[data release];
}
And the JAVA version:
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.BufferedReader;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.PrintWriter;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.Arrays;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLSocket;
import javax.net.ssl.SSLSocketFactory;
import javax.net.ssl.TrustManager;
import javax.net.ssl.X509TrustManager;
import android.media.AudioFormat;
import android.media.AudioManager;
import android.media.AudioRecord;
import android.media.AudioTrack;
import android.media.MediaRecorder.AudioSource;
import android.util.Log;
public class AudioWorker extends Thread
{
private boolean stopped = false;
private String host;
private int port;
private long id=0;
boolean run=true;
AudioRecord recorder;
//ulaw encoder stuff
private final static String TAG = "UlawEncoderInputStream";
private final static int MAX_ULAW = 8192;
private final static int SCALE_BITS = 16;
private InputStream mIn;
private int mMax = 0;
private final byte[] mBuf = new byte[1024];
private int mBufCount = 0; // should be 0 or 1
private final byte[] mOneByte = new byte[1];
////
/**
* Give the thread high priority so that it's not canceled unexpectedly, and start it
*/
public AudioWorker(String host, int port, long id)
{
this.host = host;
this.port = port;
this.id = id;
android.os.Process.setThreadPriority(android.os.Process.THREAD_PRIORITY_URGENT_AUDIO);
// start();
}
#Override
public void run()
{
Log.i("AudioWorker", "Running AudioWorker Thread");
recorder = null;
AudioTrack track = null;
short[][] buffers = new short[256][160];
int ix = 0;
/*
* Initialize buffer to hold continuously recorded AudioWorker data, start recording, and start
* playback.
*/
try
{
int N = AudioRecord.getMinBufferSize(8000,AudioFormat.CHANNEL_IN_MONO,AudioFormat.ENCODING_PCM_16BIT);
recorder = new AudioRecord(AudioSource.MIC, 8000, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, N*10);
track = new AudioTrack(AudioManager.STREAM_MUSIC, 8000, AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT, N*10, AudioTrack.MODE_STREAM);
recorder.startRecording();
// track.play();
/*
* Loops until something outside of this thread stops it.
* Reads the data from the recorder and writes it to the AudioWorker track for playback.
*/
SSLContext sc = SSLContext.getInstance("SSL");
sc.init(null, trustAllCerts, new java.security.SecureRandom());
SSLSocketFactory sslFact = sc.getSocketFactory();
SSLSocket socket = (SSLSocket)sslFact.createSocket(host, port);
socket.setSoTimeout(10000);
InputStream inputStream = socket.getInputStream();
DataInputStream in = new DataInputStream(new BufferedInputStream(inputStream));
OutputStream outputStream = socket.getOutputStream();
DataOutputStream os = new DataOutputStream(new BufferedOutputStream(outputStream));
PrintWriter socketPrinter = new PrintWriter(os);
BufferedReader br = new BufferedReader(new InputStreamReader(in));
// socketPrinter.println("POST /transmitaudio?patient=1333369798370 HTTP/1.0");
socketPrinter.println("POST /transmitaudio?id="+id+" HTTP/1.0");
socketPrinter.println("Content-Type: audio/basic");
socketPrinter.println("Content-Length: 99999");
socketPrinter.println("Connection: Keep-Alive");
socketPrinter.println("Cache-Control: no-cache");
socketPrinter.println();
socketPrinter.flush();
while(!stopped)
{
Log.i("Map", "Writing new data to buffer");
short[] buffer = buffers[ix++ % buffers.length];
N = recorder.read(buffer,0,buffer.length);
track.write(buffer, 0, buffer.length);
byte[] bytes2 = new byte[buffer.length * 2];
ByteBuffer.wrap(bytes2).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer().put(buffer);
read(bytes2, 0, bytes2.length);
os.write(bytes2,0,bytes2.length);
//
// ByteBuffer byteBuf = ByteBuffer.allocate(2*N);
// System.out.println("byteBuf length "+2*N);
// int i = 0;
// while (buffer.length > i) {
// byteBuf.putShort(buffer[i]);
// i++;
// }
// byte[] b = new byte[byteBuf.remaining()];
}
os.close();
}
catch(Throwable x)
{
Log.w("AudioWorker", "Error reading voice AudioWorker", x);
}
/*
* Frees the thread's resources after the loop completes so that it can be run again
*/
finally
{
recorder.stop();
recorder.release();
track.stop();
track.release();
}
}
/**
* Called from outside of the thread in order to stop the recording/playback loop
*/
public void close()
{
stopped = true;
}
public void resumeThread()
{
stopped = false;
run();
}
TrustManager[] trustAllCerts = new TrustManager[]{
new X509TrustManager() {
public java.security.cert.X509Certificate[] getAcceptedIssuers() {
return null;
}
public void checkClientTrusted(
java.security.cert.X509Certificate[] certs, String authType) {
}
public void checkServerTrusted(
java.security.cert.X509Certificate[] chain, String authType) {
for (int j=0; j<chain.length; j++)
{
System.out.println("Client certificate information:");
System.out.println(" Subject DN: " + chain[j].getSubjectDN());
System.out.println(" Issuer DN: " + chain[j].getIssuerDN());
System.out.println(" Serial number: " + chain[j].getSerialNumber());
System.out.println("");
}
}
}
};
public static void encode(byte[] pcmBuf, int pcmOffset,
byte[] ulawBuf, int ulawOffset, int length, int max) {
// from 'ulaw' in wikipedia
// +8191 to +8159 0x80
// +8158 to +4063 in 16 intervals of 256 0x80 + interval number
// +4062 to +2015 in 16 intervals of 128 0x90 + interval number
// +2014 to +991 in 16 intervals of 64 0xA0 + interval number
// +990 to +479 in 16 intervals of 32 0xB0 + interval number
// +478 to +223 in 16 intervals of 16 0xC0 + interval number
// +222 to +95 in 16 intervals of 8 0xD0 + interval number
// +94 to +31 in 16 intervals of 4 0xE0 + interval number
// +30 to +1 in 15 intervals of 2 0xF0 + interval number
// 0 0xFF
// -1 0x7F
// -31 to -2 in 15 intervals of 2 0x70 + interval number
// -95 to -32 in 16 intervals of 4 0x60 + interval number
// -223 to -96 in 16 intervals of 8 0x50 + interval number
// -479 to -224 in 16 intervals of 16 0x40 + interval number
// -991 to -480 in 16 intervals of 32 0x30 + interval number
// -2015 to -992 in 16 intervals of 64 0x20 + interval number
// -4063 to -2016 in 16 intervals of 128 0x10 + interval number
// -8159 to -4064 in 16 intervals of 256 0x00 + interval number
// -8192 to -8160 0x00
// set scale factors
if (max <= 0) max = MAX_ULAW;
int coef = MAX_ULAW * (1 << SCALE_BITS) / max;
for (int i = 0; i < length; i++) {
int pcm = (0xff & pcmBuf[pcmOffset++]) + (pcmBuf[pcmOffset++] << 8);
pcm = (pcm * coef) >> SCALE_BITS;
int ulaw;
if (pcm >= 0) {
ulaw = pcm <= 0 ? 0xff :
pcm <= 30 ? 0xf0 + (( 30 - pcm) >> 1) :
pcm <= 94 ? 0xe0 + (( 94 - pcm) >> 2) :
pcm <= 222 ? 0xd0 + (( 222 - pcm) >> 3) :
pcm <= 478 ? 0xc0 + (( 478 - pcm) >> 4) :
pcm <= 990 ? 0xb0 + (( 990 - pcm) >> 5) :
pcm <= 2014 ? 0xa0 + ((2014 - pcm) >> 6) :
pcm <= 4062 ? 0x90 + ((4062 - pcm) >> 7) :
pcm <= 8158 ? 0x80 + ((8158 - pcm) >> 8) :
0x80;
} else {
ulaw = -1 <= pcm ? 0x7f :
-31 <= pcm ? 0x70 + ((pcm - -31) >> 1) :
-95 <= pcm ? 0x60 + ((pcm - -95) >> 2) :
-223 <= pcm ? 0x50 + ((pcm - -223) >> 3) :
-479 <= pcm ? 0x40 + ((pcm - -479) >> 4) :
-991 <= pcm ? 0x30 + ((pcm - -991) >> 5) :
-2015 <= pcm ? 0x20 + ((pcm - -2015) >> 6) :
-4063 <= pcm ? 0x10 + ((pcm - -4063) >> 7) :
-8159 <= pcm ? 0x00 + ((pcm - -8159) >> 8) :
0x00;
}
ulawBuf[ulawOffset++] = (byte)ulaw;
}
}
public static int maxAbsPcm(byte[] pcmBuf, int offset, int length) {
int max = 0;
for (int i = 0; i < length; i++) {
int pcm = (0xff & pcmBuf[offset++]) + (pcmBuf[offset++] << 8);
if (pcm < 0) pcm = -pcm;
if (pcm > max) max = pcm;
}
return max;
}
public int read(byte[] buf, int offset, int length) throws IOException {
if (recorder == null) throw new IllegalStateException("not open");
// return at least one byte, but try to fill 'length'
while (mBufCount < 2) {
int n = recorder.read(mBuf, mBufCount, Math.min(length * 2, mBuf.length - mBufCount));
if (n == -1) return -1;
mBufCount += n;
}
// compand data
int n = Math.min(mBufCount / 2, length);
encode(mBuf, 0, buf, offset, n, mMax);
// move data to bottom of mBuf
mBufCount -= n * 2;
for (int i = 0; i < mBufCount; i++) mBuf[i] = mBuf[i + n * 2];
return n;
}
}
My work on this topic has been staggering and long. I have finally gotten this to work however hacked it may be. Because of that I will list some warnings prior to posting the answer:
There is still a clicking noise between buffers
I get warnings due to the way I use my obj-c classes in the obj-c++ class, so there is something wrong there (however from my research using a pool does the same as release so I dont believe this matters to much):
Object 0x13cd20 of class __NSCFString autoreleased with no pool in
place - just leaking - break on objc_autoreleaseNoPool() to debug
In order to get this working I had to comment out all AQPlayer references from SpeakHereController (see below) due to errors I couldnt fix any other way. It didnt matter for me however since I am only recording
So the main answer to the above is that there is a bug in AVAssetWriter that stopped it from appending the bytes and writing the audio data. I finally found this out after contacting apple support and have them notify me about this. As far as I know the bug is specific to ulaw and AVAssetWriter though I havnt tried many other formats to verify.
In response to this the only other option is/was to use AudioQueues. Something I had tried before but had brought a bunch of problems. The biggest problem being my lack of knowledge in obj-c++. The class below that got things working is from the speakHere example with slight changes so that the audio is ulaw formatted. The other problems came about trying to get all files to play nicely. However this was easily remedied by changing all filenames in the chain to .mm . The next problem was trying to use the classes in harmony. This is still a WIP, and ties into warning number 2. But my basic solution to this was to use the SpeakHereController (also included in the speakhere example) instead of directly accessing AQRecorder.
Anyways here is the code:
Using the SpeakHereController from an obj-c class
.h
#property(nonatomic,strong) SpeakHereController * recorder;
.mm
[init method]
//AQRecorder wrapper (SpeakHereController) allocation
_recorder = [[SpeakHereController alloc]init];
//AQRecorder wrapper (SpeakHereController) initialization
//technically this class is a controller and thats why its init method is awakeFromNib
[_recorder awakeFromNib];
[recording]
bool buttonState = self.audioRecord.isSelected;
[self.audioRecord setSelected:!buttonState];
if ([self.audioRecord isSelected]) {
[self.recorder startRecord];
}else {
[self.recorder stopRecord];
}
SpeakHereController
#import "SpeakHereController.h"
#implementation SpeakHereController
#synthesize player;
#synthesize recorder;
#synthesize btn_record;
#synthesize btn_play;
#synthesize fileDescription;
#synthesize lvlMeter_in;
#synthesize playbackWasInterrupted;
char *OSTypeToStr(char *buf, OSType t)
{
char *p = buf;
char str[4], *q = str;
*(UInt32 *)str = CFSwapInt32(t);
for (int i = 0; i < 4; ++i) {
if (isprint(*q) && *q != '\\')
*p++ = *q++;
else {
sprintf(p, "\\x%02x", *q++);
p += 4;
}
}
*p = '\0';
return buf;
}
-(void)setFileDescriptionForFormat: (CAStreamBasicDescription)format withName:(NSString*)name
{
char buf[5];
const char *dataFormat = OSTypeToStr(buf, format.mFormatID);
NSString* description = [[NSString alloc] initWithFormat:#"(%d ch. %s # %g Hz)", format.NumberChannels(), dataFormat, format.mSampleRate, nil];
fileDescription.text = description;
[description release];
}
#pragma mark Playback routines
-(void)stopPlayQueue
{
// player->StopQueue();
[lvlMeter_in setAq: nil];
btn_record.enabled = YES;
}
-(void)pausePlayQueue
{
// player->PauseQueue();
playbackWasPaused = YES;
}
-(void)startRecord
{
// recorder = new AQRecorder();
if (recorder->IsRunning()) // If we are currently recording, stop and save the file.
{
[self stopRecord];
}
else // If we're not recording, start.
{
// btn_play.enabled = NO;
// Set the button's state to "stop"
// btn_record.title = #"Stop";
// Start the recorder
recorder->StartRecord(CFSTR("recordedFile.caf"));
[self setFileDescriptionForFormat:recorder->DataFormat() withName:#"Recorded File"];
// Hook the level meter up to the Audio Queue for the recorder
// [lvlMeter_in setAq: recorder->Queue()];
}
}
- (void)stopRecord
{
// Disconnect our level meter from the audio queue
// [lvlMeter_in setAq: nil];
recorder->StopRecord();
// dispose the previous playback queue
// player->DisposeQueue(true);
// now create a new queue for the recorded file
recordFilePath = (CFStringRef)[NSTemporaryDirectory() stringByAppendingPathComponent: #"recordedFile.caf"];
// player->CreateQueueForFile(recordFilePath);
// Set the button's state back to "record"
// btn_record.title = #"Record";
// btn_play.enabled = YES;
}
- (IBAction)play:(id)sender
{
if (player->IsRunning())
{
if (playbackWasPaused) {
// OSStatus result = player->StartQueue(true);
// if (result == noErr)
// [[NSNotificationCenter defaultCenter] postNotificationName:#"playbackQueueResumed" object:self];
}
else
// [self stopPlayQueue];
nil;
}
else
{
// OSStatus result = player->StartQueue(false);
// if (result == noErr)
// [[NSNotificationCenter defaultCenter] postNotificationName:#"playbackQueueResumed" object:self];
}
}
- (IBAction)record:(id)sender
{
if (recorder->IsRunning()) // If we are currently recording, stop and save the file.
{
[self stopRecord];
}
else // If we're not recording, start.
{
// btn_play.enabled = NO;
//
// // Set the button's state to "stop"
// btn_record.title = #"Stop";
// Start the recorder
recorder->StartRecord(CFSTR("recordedFile.caf"));
[self setFileDescriptionForFormat:recorder->DataFormat() withName:#"Recorded File"];
// Hook the level meter up to the Audio Queue for the recorder
[lvlMeter_in setAq: recorder->Queue()];
}
}
#pragma mark AudioSession listeners
void interruptionListener( void * inClientData,
UInt32 inInterruptionState)
{
SpeakHereController *THIS = (SpeakHereController*)inClientData;
if (inInterruptionState == kAudioSessionBeginInterruption)
{
if (THIS->recorder->IsRunning()) {
[THIS stopRecord];
}
else if (THIS->player->IsRunning()) {
//the queue will stop itself on an interruption, we just need to update the UI
[[NSNotificationCenter defaultCenter] postNotificationName:#"playbackQueueStopped" object:THIS];
THIS->playbackWasInterrupted = YES;
}
}
else if ((inInterruptionState == kAudioSessionEndInterruption) && THIS->playbackWasInterrupted)
{
// we were playing back when we were interrupted, so reset and resume now
// THIS->player->StartQueue(true);
[[NSNotificationCenter defaultCenter] postNotificationName:#"playbackQueueResumed" object:THIS];
THIS->playbackWasInterrupted = NO;
}
}
void propListener( void * inClientData,
AudioSessionPropertyID inID,
UInt32 inDataSize,
const void * inData)
{
SpeakHereController *THIS = (SpeakHereController*)inClientData;
if (inID == kAudioSessionProperty_AudioRouteChange)
{
CFDictionaryRef routeDictionary = (CFDictionaryRef)inData;
//CFShow(routeDictionary);
CFNumberRef reason = (CFNumberRef)CFDictionaryGetValue(routeDictionary, CFSTR(kAudioSession_AudioRouteChangeKey_Reason));
SInt32 reasonVal;
CFNumberGetValue(reason, kCFNumberSInt32Type, &reasonVal);
if (reasonVal != kAudioSessionRouteChangeReason_CategoryChange)
{
/*CFStringRef oldRoute = (CFStringRef)CFDictionaryGetValue(routeDictionary, CFSTR(kAudioSession_AudioRouteChangeKey_OldRoute));
if (oldRoute)
{
printf("old route:\n");
CFShow(oldRoute);
}
else
printf("ERROR GETTING OLD AUDIO ROUTE!\n");
CFStringRef newRoute;
UInt32 size; size = sizeof(CFStringRef);
OSStatus error = AudioSessionGetProperty(kAudioSessionProperty_AudioRoute, &size, &newRoute);
if (error) printf("ERROR GETTING NEW AUDIO ROUTE! %d\n", error);
else
{
printf("new route:\n");
CFShow(newRoute);
}*/
if (reasonVal == kAudioSessionRouteChangeReason_OldDeviceUnavailable)
{
if (THIS->player->IsRunning()) {
[THIS pausePlayQueue];
[[NSNotificationCenter defaultCenter] postNotificationName:#"playbackQueueStopped" object:THIS];
}
}
// stop the queue if we had a non-policy route change
if (THIS->recorder->IsRunning()) {
[THIS stopRecord];
}
}
}
else if (inID == kAudioSessionProperty_AudioInputAvailable)
{
if (inDataSize == sizeof(UInt32)) {
UInt32 isAvailable = *(UInt32*)inData;
// disable recording if input is not available
THIS->btn_record.enabled = (isAvailable > 0) ? YES : NO;
}
}
}
#pragma mark Initialization routines
- (void)awakeFromNib
{
// Allocate our singleton instance for the recorder & player object
recorder = new AQRecorder();
player = nil;//new AQPlayer();
OSStatus error = AudioSessionInitialize(NULL, NULL, interruptionListener, self);
if (error) printf("ERROR INITIALIZING AUDIO SESSION! %d\n", error);
else
{
UInt32 category = kAudioSessionCategory_PlayAndRecord;
error = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category);
if (error) printf("couldn't set audio category!");
error = AudioSessionAddPropertyListener(kAudioSessionProperty_AudioRouteChange, propListener, self);
if (error) printf("ERROR ADDING AUDIO SESSION PROP LISTENER! %d\n", error);
UInt32 inputAvailable = 0;
UInt32 size = sizeof(inputAvailable);
// we do not want to allow recording if input is not available
error = AudioSessionGetProperty(kAudioSessionProperty_AudioInputAvailable, &size, &inputAvailable);
if (error) printf("ERROR GETTING INPUT AVAILABILITY! %d\n", error);
// btn_record.enabled = (inputAvailable) ? YES : NO;
// we also need to listen to see if input availability changes
error = AudioSessionAddPropertyListener(kAudioSessionProperty_AudioInputAvailable, propListener, self);
if (error) printf("ERROR ADDING AUDIO SESSION PROP LISTENER! %d\n", error);
error = AudioSessionSetActive(true);
if (error) printf("AudioSessionSetActive (true) failed");
}
// [[NSNotificationCenter defaultCenter] addObserver:self selector:#selector(playbackQueueStopped:) name:#"playbackQueueStopped" object:nil];
// [[NSNotificationCenter defaultCenter] addObserver:self selector:#selector(playbackQueueResumed:) name:#"playbackQueueResumed" object:nil];
// UIColor *bgColor = [[UIColor alloc] initWithRed:.39 green:.44 blue:.57 alpha:.5];
// [lvlMeter_in setBackgroundColor:bgColor];
// [lvlMeter_in setBorderColor:bgColor];
// [bgColor release];
// disable the play button since we have no recording to play yet
// btn_play.enabled = NO;
// playbackWasInterrupted = NO;
// playbackWasPaused = NO;
}
# pragma mark Notification routines
- (void)playbackQueueStopped:(NSNotification *)note
{
btn_play.title = #"Play";
[lvlMeter_in setAq: nil];
btn_record.enabled = YES;
}
- (void)playbackQueueResumed:(NSNotification *)note
{
btn_play.title = #"Stop";
btn_record.enabled = NO;
[lvlMeter_in setAq: player->Queue()];
}
#pragma mark Cleanup
- (void)dealloc
{
[btn_record release];
[btn_play release];
[fileDescription release];
[lvlMeter_in release];
// delete player;
delete recorder;
[super dealloc];
}
#end
AQRecorder
(.h has 2 lines of importance
#define kNumberRecordBuffers 3
#define kBufferDurationSeconds 5.0
)
#include "AQRecorder.h"
//#include "UploadAudioWrapperInterface.h"
//#include "RestClient.h"
RestClient * restClient;
NSData* data;
// ____________________________________________________________________________________
// Determine the size, in bytes, of a buffer necessary to represent the supplied number
// of seconds of audio data.
int AQRecorder::ComputeRecordBufferSize(const AudioStreamBasicDescription *format, float seconds)
{
int packets, frames, bytes = 0;
try {
frames = (int)ceil(seconds * format->mSampleRate);
if (format->mBytesPerFrame > 0)
bytes = frames * format->mBytesPerFrame;
else {
UInt32 maxPacketSize;
if (format->mBytesPerPacket > 0)
maxPacketSize = format->mBytesPerPacket; // constant packet size
else {
UInt32 propertySize = sizeof(maxPacketSize);
XThrowIfError(AudioQueueGetProperty(mQueue, kAudioQueueProperty_MaximumOutputPacketSize, &maxPacketSize,
&propertySize), "couldn't get queue's maximum output packet size");
}
if (format->mFramesPerPacket > 0)
packets = frames / format->mFramesPerPacket;
else
packets = frames; // worst-case scenario: 1 frame in a packet
if (packets == 0) // sanity check
packets = 1;
bytes = packets * maxPacketSize;
}
} catch (CAXException e) {
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
return 0;
}
return bytes;
}
// ____________________________________________________________________________________
// AudioQueue callback function, called when an input buffers has been filled.
void AQRecorder::MyInputBufferHandler( void * inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp * inStartTime,
UInt32 inNumPackets,
const AudioStreamPacketDescription* inPacketDesc)
{
AQRecorder *aqr = (AQRecorder *)inUserData;
try {
if (inNumPackets > 0) {
// write packets to file
// XThrowIfError(AudioFileWritePackets(aqr->mRecordFile, FALSE, inBuffer->mAudioDataByteSize,
// inPacketDesc, aqr->mRecordPacket, &inNumPackets, inBuffer->mAudioData),
// "AudioFileWritePackets failed");
aqr->mRecordPacket += inNumPackets;
// int numBytes = inBuffer->mAudioDataByteSize;
// SInt8 *testBuffer = (SInt8*)inBuffer->mAudioData;
//
// for (int i=0; i < numBytes; i++)
// {
// SInt8 currentData = testBuffer[i];
// printf("Current data in testbuffer is %d", currentData);
//
// NSData * temp = [NSData dataWithBytes:currentData length:sizeof(currentData)];
// }
data=[[NSData dataWithBytes:inBuffer->mAudioData length:inBuffer->mAudioDataByteSize]retain];
[restClient uploadAudioData:data url:nil];
}
// if we're not stopping, re-enqueue the buffer so that it gets filled again
if (aqr->IsRunning())
XThrowIfError(AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL), "AudioQueueEnqueueBuffer failed");
} catch (CAXException e) {
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}
}
AQRecorder::AQRecorder()
{
mIsRunning = false;
mRecordPacket = 0;
data = [[NSData alloc]init];
restClient = [[RestClient sharedManager]retain];
}
AQRecorder::~AQRecorder()
{
AudioQueueDispose(mQueue, TRUE);
AudioFileClose(mRecordFile);
if (mFileName){
CFRelease(mFileName);
}
[restClient release];
[data release];
}
// ____________________________________________________________________________________
// Copy a queue's encoder's magic cookie to an audio file.
void AQRecorder::CopyEncoderCookieToFile()
{
UInt32 propertySize;
// get the magic cookie, if any, from the converter
OSStatus err = AudioQueueGetPropertySize(mQueue, kAudioQueueProperty_MagicCookie, &propertySize);
// we can get a noErr result and also a propertySize == 0
// -- if the file format does support magic cookies, but this file doesn't have one.
if (err == noErr && propertySize > 0) {
Byte *magicCookie = new Byte[propertySize];
UInt32 magicCookieSize;
XThrowIfError(AudioQueueGetProperty(mQueue, kAudioQueueProperty_MagicCookie, magicCookie, &propertySize), "get audio converter's magic cookie");
magicCookieSize = propertySize; // the converter lies and tell us the wrong size
// now set the magic cookie on the output file
UInt32 willEatTheCookie = false;
// the converter wants to give us one; will the file take it?
err = AudioFileGetPropertyInfo(mRecordFile, kAudioFilePropertyMagicCookieData, NULL, &willEatTheCookie);
if (err == noErr && willEatTheCookie) {
err = AudioFileSetProperty(mRecordFile, kAudioFilePropertyMagicCookieData, magicCookieSize, magicCookie);
XThrowIfError(err, "set audio file's magic cookie");
}
delete[] magicCookie;
}
}
void AQRecorder::SetupAudioFormat(UInt32 inFormatID)
{
memset(&mRecordFormat, 0, sizeof(mRecordFormat));
UInt32 size = sizeof(mRecordFormat.mSampleRate);
XThrowIfError(AudioSessionGetProperty( kAudioSessionProperty_CurrentHardwareSampleRate,
&size,
&mRecordFormat.mSampleRate), "couldn't get hardware sample rate");
//override samplearate to 8k from device sample rate
mRecordFormat.mSampleRate = 8000.0;
size = sizeof(mRecordFormat.mChannelsPerFrame);
XThrowIfError(AudioSessionGetProperty( kAudioSessionProperty_CurrentHardwareInputNumberChannels,
&size,
&mRecordFormat.mChannelsPerFrame), "couldn't get input channel count");
// mRecordFormat.mChannelsPerFrame = 1;
mRecordFormat.mFormatID = inFormatID;
if (inFormatID == kAudioFormatLinearPCM)
{
// if we want pcm, default to signed 16-bit little-endian
mRecordFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
mRecordFormat.mBitsPerChannel = 16;
mRecordFormat.mBytesPerPacket = mRecordFormat.mBytesPerFrame = (mRecordFormat.mBitsPerChannel / 8) * mRecordFormat.mChannelsPerFrame;
mRecordFormat.mFramesPerPacket = 1;
}
if (inFormatID == kAudioFormatULaw) {
// NSLog(#"is ulaw");
mRecordFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger;
mRecordFormat.mSampleRate = 8000.0;
// mRecordFormat.mFormatFlags = 0;
mRecordFormat.mFramesPerPacket = 1;
mRecordFormat.mChannelsPerFrame = 1;
mRecordFormat.mBitsPerChannel = 16;//was 8
mRecordFormat.mBytesPerPacket = 1;
mRecordFormat.mBytesPerFrame = 1;
}
}
NSString * GetDocumentDirectory(void)
{
NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
NSString *basePath = ([paths count] > 0) ? [paths objectAtIndex:0] : nil;
return basePath;
}
void AQRecorder::StartRecord(CFStringRef inRecordFile)
{
int i, bufferByteSize;
UInt32 size;
CFURLRef url;
try {
mFileName = CFStringCreateCopy(kCFAllocatorDefault, inRecordFile);
// specify the recording format
SetupAudioFormat(kAudioFormatULaw /*kAudioFormatLinearPCM*/);
// create the queue
XThrowIfError(AudioQueueNewInput(
&mRecordFormat,
MyInputBufferHandler,
this /* userData */,
NULL /* run loop */, NULL /* run loop mode */,
0 /* flags */, &mQueue), "AudioQueueNewInput failed");
// get the record format back from the queue's audio converter --
// the file may require a more specific stream description than was necessary to create the encoder.
mRecordPacket = 0;
size = sizeof(mRecordFormat);
XThrowIfError(AudioQueueGetProperty(mQueue, kAudioQueueProperty_StreamDescription,
&mRecordFormat, &size), "couldn't get queue's format");
NSString *basePath = GetDocumentDirectory();
NSString *recordFile = [basePath /*NSTemporaryDirectory()*/ stringByAppendingPathComponent: (NSString*)inRecordFile];
url = CFURLCreateWithString(kCFAllocatorDefault, (CFStringRef)recordFile, NULL);
// create the audio file
XThrowIfError(AudioFileCreateWithURL(url, kAudioFileCAFType, &mRecordFormat, kAudioFileFlags_EraseFile,
&mRecordFile), "AudioFileCreateWithURL failed");
CFRelease(url);
// copy the cookie first to give the file object as much info as we can about the data going in
// not necessary for pcm, but required for some compressed audio
CopyEncoderCookieToFile();
// allocate and enqueue buffers
bufferByteSize = ComputeRecordBufferSize(&mRecordFormat, kBufferDurationSeconds); // enough bytes for half a second
for (i = 0; i < kNumberRecordBuffers; ++i) {
XThrowIfError(AudioQueueAllocateBuffer(mQueue, bufferByteSize, &mBuffers[i]),
"AudioQueueAllocateBuffer failed");
XThrowIfError(AudioQueueEnqueueBuffer(mQueue, mBuffers[i], 0, NULL),
"AudioQueueEnqueueBuffer failed");
}
// start the queue
mIsRunning = true;
XThrowIfError(AudioQueueStart(mQueue, NULL), "AudioQueueStart failed");
}
catch (CAXException &e) {
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}
catch (...) {
fprintf(stderr, "An unknown error occurred\n");
}
}
void AQRecorder::StopRecord()
{
// end recording
mIsRunning = false;
// XThrowIfError(AudioQueueReset(mQueue), "AudioQueueStop failed");
XThrowIfError(AudioQueueStop(mQueue, true), "AudioQueueStop failed");
// a codec may update its cookie at the end of an encoding session, so reapply it to the file now
CopyEncoderCookieToFile();
if (mFileName)
{
CFRelease(mFileName);
mFileName = NULL;
}
AudioQueueDispose(mQueue, true);
AudioFileClose(mRecordFile);
}
Please feel free to comment or refine my answer, I will accept it as the answer if its a better solution. Please note this was my first attempt and Im sure it is not the most elegant or proper solution.
You could use the gamekit Framework? Then send the audio over bluetooth. There are examples in the ios developer library

Resources