read segments from multiple audio files to one buffer with extaudiofile - ios

I need to be able to assemble audio from several files into a single buffer (stereo). My code is working as expected if I load each file into its own buffer. Looping through several files and losing into one larger buffer only plays back the segment from the last file.
Its possible that the header info is getting copied each time, or that the same area of the buffer is just being over-written with each new file.
Any suggestions would be appreciated.
Some code is listed below. I'm reading from encrypted files, so I'm using NSData and AudioFileOpenWithCallbacks.
// Assign the frame count to the soundStructArray instance variable
UInt64 totalFrames = [[inputNotes.stopTimes lastObject] intValue];
self.soundStructArray[0]->frameCount = (UInt32)totalFrames;
self.soundStructArray[0]->audioDataLeft =
(AudioUnitSampleType *) calloc (totalFrames, sizeof (AudioUnitSampleType));
AudioStreamBasicDescription importFormat = {0};
// if (2 == channelCount) {
self.soundStructArray[0]->isStereo = YES;
self.soundStructArray[0]->audioDataRight =
(AudioUnitSampleType *) calloc (totalFrames, sizeof (AudioUnitSampleType));
// Allocate memory for the buffer list struct according to the number of
// channels it represents.
AudioBufferList *bufferList;
UInt32 channelCount = 2;
bufferList = (AudioBufferList *) malloc (
sizeof (AudioBufferList) + sizeof (AudioBuffer) * (channelCount - 1)
);
if (NULL == bufferList) {DLog (#"*** malloc failure for allocating bufferList memory"); return;}
// initialize the mNumberBuffers member
bufferList->mNumberBuffers = channelCount;
// initialize the mBuffers member to 0
AudioBuffer emptyBuffer = {0};
size_t arrayIndex;
for (arrayIndex = 0; arrayIndex < channelCount; arrayIndex++) {
bufferList->mBuffers[arrayIndex] = emptyBuffer;
}
// set up the AudioBuffer structs in the buffer list
bufferList->mBuffers[0].mNumberChannels = 1;
bufferList->mBuffers[0].mDataByteSize = (UInt32)totalFrames * sizeof (AudioUnitSampleType);
bufferList->mBuffers[0].mData = self.soundStructArray[0]->audioDataLeft;
if (2 == channelCount) {
bufferList->mBuffers[1].mNumberChannels = 1;
bufferList->mBuffers[1].mDataByteSize = (UInt32)totalFrames * sizeof (AudioUnitSampleType);
bufferList->mBuffers[1].mData = self.soundStructArray[0]->audioDataRight;
}
NSString *fileType = #"m4a";
for (int audioFile = 0; audioFile < inputVoicesCount; ++audioFile) {
#autoreleasepool {
NSData *encData;
NSData *audioData;
AudioFileID refAudioFileID;
DLog (#"readAudioFilesIntoMemory - file %i", audioFile);
NSString *source = [[NSBundle mainBundle] pathForResource:[inputNotes.notes objectAtIndex:audioFile] ofType:fileType];
// NSURL *url = [NSURL encryptedFileURLWithPath:source];
if ([[NSFileManager defaultManager] fileExistsAtPath:source])
{
//File exists
encData = [[NSData alloc] initWithContentsOfFile:source];
if (encData)
{
NSError *error;
audioData = [RNDecryptor decryptData:encData
withPassword:key
error:&error];
}
}
else
{
DLog(#"File does not exist");
}
OSStatus result = AudioFileOpenWithCallbacks((__bridge void *)(audioData), readProc, 0, getSizeProc, NULL, kAudioFileMPEG4Type, &refAudioFileID);
if(result != noErr){
DLog(#"problem in theAudioFileReaderWithData function: result code %i \n", result);
}
// Instantiate an extended audio file object.
ExtAudioFileRef audioFileObject = 0;
result = ExtAudioFileWrapAudioFileID(refAudioFileID, NO, &audioFileObject);
if (result != noErr){
DLog(#"problem in theAudioFileReaderWithData function Wraping the audio FileID: result code %i \n", result);
}
// Get the audio file's number of channels.
AudioStreamBasicDescription fileAudioFormat = {0};
UInt32 formatPropertySize = sizeof (fileAudioFormat);
result = ExtAudioFileGetProperty (
audioFileObject,
kExtAudioFileProperty_FileDataFormat,
&formatPropertySize,
&fileAudioFormat
);
if (noErr != result) {[self printErrorMessage: #"ExtAudioFileGetProperty (file audio format)" withStatus: result]; return;}
importFormat = stereoStreamFormat;
result = ExtAudioFileSetProperty (
audioFileObject,
kExtAudioFileProperty_ClientDataFormat,
sizeof (importFormat),
&importFormat
);
if (noErr != result) {[self printErrorMessage: #"ExtAudioFileSetProperty (client data format)" withStatus: result]; return;}
// Assign the frame count to the soundStructArray instance variable
UInt64 desiredFrames = (UInt64) ([[inputNotes.stopTimes objectAtIndex:audioFile] intValue] - [[inputNotes.startTimes objectAtIndex:audioFile] intValue]);
// Perform a synchronous, sequential read of the audio data out of the file and
// into the soundStructArray[audioFile].audioDataLeft and (if stereo) .audioDataRight members.
UInt32 numberOfPacketsToRead = (UInt32) desiredFrames;
result = ExtAudioFileRead (
audioFileObject,
&numberOfPacketsToRead,
bufferList
);
if (noErr != result) {
[self printErrorMessage: #"ExtAudioFileRead failure - " withStatus: result];
// If reading from the file failed, then free the memory for the sound buffer.
// free (soundStructArray[audioFile].audioDataLeft);
// soundStructArray[audioFile].audioDataLeft = 0;
free (self.soundStructArray[0]->audioDataLeft);
self.soundStructArray[0]->audioDataLeft = 0;
free (self.soundStructArray[0]->audioDataRight);
self.soundStructArray[0]->audioDataRight = 0;
ExtAudioFileDispose (audioFileObject);
return;
}
ExtAudioFileDispose (audioFileObject);
AudioFileClose(refAudioFileID);
}
}//end of #autoreleasepool
free (bufferList);
// Set the sample index to zero, so that playback starts at the
// beginning of the sound.
self.soundStructArray[0]->sampleNumber = 0;
DLog (#"Finished reading all files into memory");
readingFiles = NO;
}

Related

How do you set the duration on a new file created with audioFileWritePackets?

I have concatenated 2 audio files using AudioFileReadPacketData to read the existing files and writing the data to a third file using AudioFileWritePackets. the source files seem to contain information about their duration. I can access it using the code below passing it the file url
-(float)lengthForUrl:(NSURL *)url
{
AVURLAsset* audioAsset = [AVURLAsset URLAssetWithURL:url options:nil];
CMTime audioDuration = audioAsset.duration;
float audioDurationSeconds = CMTimeGetSeconds(audioDuration);
return audioDurationSeconds;
}
when I make the same call on my new ( combined file) i get a duration of 0.
how can a set the duration on my new file ?
or preserve it in the transfer process ?
I did not figure out how to set the duration on my combined file but did
figure out how to create it with the duration intact.
I switched from using AssetReader / Writer to using ExtFileReader / Writer
-- working code is below
- (BOOL)combineFilesFor:(NSURL*)url
{
dispatch_async(dispatch_get_main_queue(), ^{
activity.hidden = NO;
[activity startAnimating];
});
NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
NSString *documentsDirectory = [paths objectAtIndex:0];
NSString *soundOneNew = [documentsDirectory stringByAppendingPathComponent:Kcombined];
NSLog(#"%#",masterUrl);
NSLog(#"%#",url);
// create the url for the combined file
CFURLRef out_url = (__bridge CFURLRef)[NSURL fileURLWithPath:soundOneNew];
NSLog(#"out file url : %#", out_url);
ExtAudioFileRef af_in;
ExtAudioFileRef af_out = NULL;
Float64 existing_audioDurationSeconds = [self lengthForUrl:masterUrl];
NSLog(#"duration of existing file : %f", existing_audioDurationSeconds);
Float64 new_audioDurationSeconds = [self lengthForUrl:url];
NSLog(#"duration of existing file : %f", new_audioDurationSeconds);
Float64 combined_audioDurationSeconds = existing_audioDurationSeconds + new_audioDurationSeconds;
NSLog(#"duration of combined : %f", combined_audioDurationSeconds);
// put the urls of the files to combine in to an array
NSArray *sourceURLs = [NSArray arrayWithObjects:(__bridge id)((__bridge CFURLRef)
AudioStreamBasicDescription inputFileFormat;
AudioStreamBasicDescription converterFormat;
UInt32 thePropertySize = sizeof(inputFileFormat);
AudioStreamBasicDescription outputFileFormat;
#define BUFFER_SIZE 4096
UInt8 *buffer = NULL;
for (id in_url in sourceURLs) {
NSLog(#"url in = %#",in_url);
OSStatus in_stat = ExtAudioFileOpenURL ( (__bridge CFURLRef)(in_url), &af_in);
NSLog(#"in file open status : %d", (int)in_stat);
bzero(&inputFileFormat, sizeof(inputFileFormat));
in_stat = ExtAudioFileGetProperty(af_in, kExtAudioFileProperty_FileDataFormat,
&thePropertySize, &inputFileFormat);
NSLog(#"in file get property status : %d", (int)in_stat);
memset(&converterFormat, 0, sizeof(converterFormat));
converterFormat.mFormatID = kAudioFormatLinearPCM;
converterFormat.mSampleRate = inputFileFormat.mSampleRate;
converterFormat.mChannelsPerFrame = 1;
converterFormat.mBytesPerPacket = 2;
converterFormat.mFramesPerPacket = 1;
converterFormat.mBytesPerFrame = 2;
converterFormat.mBitsPerChannel = 16;
converterFormat.mFormatFlags = kAudioFormatFlagsNativeEndian |kAudioFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger;
in_stat = ExtAudioFileSetProperty(af_in, kExtAudioFileProperty_ClientDataFormat,
sizeof(converterFormat), &converterFormat);
NSLog(#"in file set property status : %d", (int)in_stat);
if (af_out == NULL) {
memset(&outputFileFormat, 0, sizeof(outputFileFormat));
outputFileFormat.mFormatID = kAudioFormatMPEG4AAC;
outputFileFormat.mFormatFlags = kMPEG4Object_AAC_Main;
outputFileFormat.mSampleRate = inputFileFormat.mSampleRate;
outputFileFormat.mChannelsPerFrame = inputFileFormat.mChannelsPerFrame;
// create the new file to write the combined file 2
OSStatus out_stat = ExtAudioFileCreateWithURL(out_url, kAudioFileM4AType, &outputFileFormat,
NULL, kAudioFileFlags_EraseFile, &af_out);
NSLog(#"out file create status : %d", out_stat);
out_stat = ExtAudioFileSetProperty(af_out, kExtAudioFileProperty_ClientDataFormat,
sizeof(converterFormat), &converterFormat);
NSLog(#"out file set property status : %d", out_stat);
}
// Buffer to read from source file and write to dest file
buffer = malloc(BUFFER_SIZE);
assert(buffer);
AudioBufferList conversionBuffer;
conversionBuffer.mNumberBuffers = 1;
conversionBuffer.mBuffers[0].mNumberChannels = inputFileFormat.mChannelsPerFrame;
conversionBuffer.mBuffers[0].mData = buffer;
conversionBuffer.mBuffers[0].mDataByteSize = BUFFER_SIZE;
while (TRUE) {
conversionBuffer.mBuffers[0].mDataByteSize = BUFFER_SIZE;
UInt32 frameCount = INT_MAX;
if (inputFileFormat.mBytesPerFrame > 0) {
frameCount = (conversionBuffer.mBuffers[0].mDataByteSize / inputFileFormat.mBytesPerFrame);
}
// Read a chunk of input
OSStatus err = ExtAudioFileRead(af_in, &frameCount, &conversionBuffer);
if (err) {
NSLog(#"error read from input file : %d", (int)err);
}else{
NSLog(#"read %d frames from inout file", frameCount);
}
// If no frames were returned, conversion is finished
if (frameCount == 0)
break;
// Write pcm data to output file
err = ExtAudioFileWrite(af_out, frameCount, &conversionBuffer);
if (err) {
NSLog(#"error write out file : %d", (int)err);
}else{
NSLog(#"wrote %d frames to out file", frameCount);
}
}
ExtAudioFileDispose(af_in);
}
ExtAudioFileDispose (af_out);
NSLog(#"combined file duration =: %f", [self lengthForUrl:[NSURL fileURLWithPath:soundOneNew]]);
// move the new file into place
[self deleteFileWithName:KUpdate forType:#""];
[self deleteFileWithName:KMaster forType:#""];
player = nil;
[self replaceFileAtPath:KMaster withData:[NSData dataWithContentsOfFile:soundOneNew]];
[self deleteFileWithName:Kcombined forType:#""];
[self pauseRecording];
dispatch_async(dispatch_get_main_queue(), ^{
activity.hidden = YES;
[activity stopAnimating];
record.enabled = YES;
});
return YES;
}

AvAssetReader getting incorrect no of audio samples

I am trying to grab the audio track from a video as raw pcm data. The plan is pass this data into a float* table array in libpd. I have managed to get some sample data but the number of samples reported is way too low. For example for a 29sec clip I am getting a reported 3968 samples. What I am looking forward is the amplitudes at each sample. For a track of audio length of 29 secs I would expect to have an array of 1278900 in size (at 44.1khz).
Here is what I have put together based on other examples:
- (void)audioCapture {
NSLog(#"capturing");
NSError *error;
AVAssetReader* reader = [[AVAssetReader alloc] initWithAsset:self.videoAsset error:&error];
NSArray *audioTracks = [self.videoAsset tracksWithMediaType:AVMediaTypeAudio];
AVAssetTrack *audioTrack = nil;
if ([audioTracks count] > 0)
audioTrack = [audioTracks objectAtIndex:0];
// Decompress to Linear PCM with the asset reader
NSDictionary *decompressionAudioSettings = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithUnsignedInt:kAudioFormatLinearPCM], AVFormatIDKey,
nil];
AVAssetReaderOutput *output = [AVAssetReaderTrackOutput assetReaderTrackOutputWithTrack:audioTrack outputSettings:decompressionAudioSettings];
[reader addOutput:output];
[reader startReading];
CMSampleBufferRef sample = [output copyNextSampleBuffer];
//array for our sample amp values
SInt16* samples = NULL;
//sample count;
CMItemCount numSamplesInBuffer = 0;
//sample buffer
CMBlockBufferRef buffer;
while( sample != NULL )
{
sample = [output copyNextSampleBuffer];
if( sample == NULL )
continue;
buffer = CMSampleBufferGetDataBuffer( sample );
size_t lengthAtOffset;
size_t totalLength;
char* data;
if( CMBlockBufferGetDataPointer( buffer, 0, &lengthAtOffset, &totalLength, &data ) != noErr )
{
NSLog( #"error!" );
break;
}
numSamplesInBuffer = CMSampleBufferGetNumSamples(sample);
AudioBufferList audioBufferList;
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(
sample,
NULL,
&audioBufferList,
sizeof(audioBufferList),
NULL,
NULL,
kCMSampleBufferFlag_AudioBufferList_Assure16ByteAlignment,
&buffer
);
for (int bufferCount=0; bufferCount < audioBufferList.mNumberBuffers; bufferCount++) {
samples = (SInt16 *)audioBufferList.mBuffers[bufferCount].mData;
NSLog(#"idx %f",(double)samples[bufferCount]);
}
}
NSLog(#"num samps in buf %ld",numSamplesInBuffer);
//[PdBase copyArray:(float *)samples
//toArrayNamed:#"DestTable" withOffset:0
//count:pdTableSize];
}

Converting 1ch 16000Hz PCM to 2ch 44100Hz PCM

I need to convert PCM audio data with format:
Data format: 1 ch, 16000 Hz, 'lpcm' (0x0000000C) 16-bit little-endian signed integer
no channel layout.
estimated duration: 1.101063 sec
audio bytes: 35234
audio packets: 17617
bit rate: 256000 bits per second
packet size upper bound: 2
maximum packet size: 2
audio data file offset: 44
optimized
source bit depth: I16
to 16bit 2ch (stereo) 44100Hz PCM.
My input file comes as NSData and ideally would be if I could end up with NSData instead of saving output to file. I've seen many tutorials and examples of converting different audio formats but they seems very complicated and I'm wondering if there is any simple solution to do that. This is code I've tried so far:
-(void)convertAudioToRequiredFormat:(NSData *)data {
AudioFileID refAudioFileID;
ExtAudioFileRef inputFileID;
ExtAudioFileRef outputFileID;
OSStatus result = AudioFileOpenWithCallbacks((__bridge void *)(data), readProc, 0, getSizeProc, 0, kAudioFormatLinearPCM, &refAudioFileID);
if (result != noErr) {
DLog(#"error reading input audio file");
}
result = ExtAudioFileWrapAudioFileID(refAudioFileID, false, &inputFileID);
if (result != noErr){
DLog(#"problem in theAudioFileReaderWithData function Wraping the audio FileID: result code %i \n", (int)result);
}
AudioStreamBasicDescription clientFormat;
memset(&clientFormat, 0, sizeof(clientFormat));
clientFormat.mFormatID = kAudioFormatLinearPCM;
clientFormat.mSampleRate = 16000;
clientFormat.mFramesPerPacket = 1;
clientFormat.mBytesPerPacket = 2; //16 bits * 1 channel
clientFormat.mBytesPerFrame = 2;
clientFormat.mChannelsPerFrame = 1; //1 channel
clientFormat.mBitsPerChannel = 16;
clientFormat.mFormatFlags = kCAFLinearPCMFormatFlagIsLittleEndian;
clientFormat.mReserved = 0;
AudioStreamBasicDescription outputFormat;
memset(&outputFormat, 0, sizeof(outputFormat));
outputFormat.mFormatID = kAudioFormatLinearPCM;
outputFormat.mSampleRate = 44100;
outputFormat.mFramesPerPacket = 1; //it is always 1 for PCM
outputFormat.mBytesPerPacket = 4; //4 Bytes = 2 * 16 bits
outputFormat.mBytesPerFrame = 4;
outputFormat.mChannelsPerFrame = 2; //2 channels = stereo
outputFormat.mBitsPerChannel = 16; //16 bits per channel
outputFormat.mFormatFlags = kCAFLinearPCMFormatFlagIsLittleEndian;
clientFormat.mReserved = 0;
UInt32 outputFormatSize = sizeof(outputFormat);
result = 0;
result = AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &outputFormatSize, &outputFormat);
if(result != noErr)
NSLog(#"could not set the output format with status code %i \n",(int)result);
NSArray *docPaths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
NSString *docPath = [docPaths objectAtIndex:0];
NSString *path = [docPath stringByAppendingPathComponent:#"newFormat.wav"];
CFURLRef sourceURL = (__bridge CFURLRef)[[NSURL alloc] initFileURLWithPath:path];
NSFileManager *fm = [NSFileManager defaultManager];
if (![fm fileExistsAtPath:path]) {
NSData *content = [NSData dataWithBytes:NULL length:0];
[fm createFileAtPath:path contents:content attributes:nil];
}
result = 0;
result = ExtAudioFileCreateWithURL(sourceURL, kAudioFileM4AType, &outputFormat, NULL, kAudioFileFlags_EraseFile, &outputFileID);
if(result != noErr){
NSLog(#"ExtAudioFileCreateWithURL failed for outputFileID with status %i \n", (int)result);
}
int size = sizeof(clientFormat);
result = 0;
result = ExtAudioFileSetProperty(inputFileID, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat);
if(result != noErr)
NSLog(#"error on ExtAudioFileSetProperty for input File with result code %i \n", (int)result);
size = sizeof(clientFormat);
result = 0;
result = ExtAudioFileSetProperty(outputFileID, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat);
if(result != noErr)
NSLog(#"error on ExtAudioFileSetProperty for output File with result code %i \n", (int)result);
int totalFrames = 0;
UInt32 outputFilePacketPosition = 0; //in bytes
UInt32 encodedBytes = 0;
while (1) {
UInt32 bufferByteSize = 22050 * 4 * 2;
char srcBuffer[bufferByteSize];
UInt32 numFrames = (bufferByteSize/clientFormat.mBytesPerFrame);
AudioBufferList fillBufList;
fillBufList.mNumberBuffers = 1;
fillBufList.mBuffers[0].mNumberChannels = clientFormat.mChannelsPerFrame;
fillBufList.mBuffers[0].mDataByteSize = bufferByteSize;
fillBufList.mBuffers[0].mData = srcBuffer;
result = 0;
result = ExtAudioFileRead(inputFileID, &numFrames, &fillBufList);
if (result != noErr) {
NSLog(#"Error on ExtAudioFileRead with result code %i \n", (int)result);
totalFrames = 0;
break;
}
if (!numFrames)
break;
totalFrames = totalFrames + numFrames;
result = 0;
result = ExtAudioFileWrite(outputFileID,
numFrames,
&fillBufList);
if(result!= noErr){
NSLog(#"ExtAudioFileWrite failed with code %i \n", (int)result);
}
encodedBytes += numFrames * clientFormat.mBytesPerFrame;
}
//Clean up
ExtAudioFileDispose(inputFileID);
ExtAudioFileDispose(outputFileID);
AudioFileClose(refAudioFileID);
}
static OSStatus readProc(void* clientData, SInt64 position, UInt32 requestCount, void* buffer, UInt32* actualCount)
{
NSData *inAudioData = (__bridge NSData *) clientData;
size_t dataSize = inAudioData.length;
size_t bytesToRead = 0;
if(position < dataSize) {
size_t bytesAvailable = dataSize - position;
bytesToRead = requestCount <= bytesAvailable ? requestCount : bytesAvailable;
[inAudioData getBytes: buffer range:NSMakeRange(position, bytesToRead)];
} else {
NSLog(#"data was not read \n");
bytesToRead = 0;
}
if(actualCount)
*actualCount = bytesToRead;
return noErr;
}
static SInt64 getSizeProc(void* clientData) {
NSData *inAudioData = (__bridge NSData *) clientData;
size_t dataSize = inAudioData.length;
return dataSize;
}
Unfortunately it doesn't work and I'm ending up with EXC_BAD_ACCESS in that line:
result = ExtAudioFileCreateWithURL(sourceURL, kAudioFileM4AType, &outputFormat, NULL, kAudioFileFlags_EraseFile, &outputFileID);
I have no idea what's causing that error (wrong AudioStreamBasicDescription?). Can someone help me to fix it? Or maybe there is easier way to convert that audio data to desired PCM format?
Some tipps.
1.)
AudioFileID refAudioFileID;
ExtAudioFileRef inputFileID;
ExtAudioFileRef outputFileID;
^ init the pointers with NULL.
2.)
CFURLRef sourceURL = (__bridge CFURLRef)[[NSURL alloc] initFileURLWithPath:path];
^ do:
NSURL *sourceURL = [NSURL fileURLWithPath:path];
3.)
NSData *content = [NSData dataWithBytes:NULL length:0];
^ use:
NSData *content = [NSData data];
4.)
result = ExtAudioFileCreateWithURL(sourceURL, kAudioFileM4AType, &outputFormat, NULL, kAudioFileFlags_EraseFile, &outputFileID);
^ it looks as if you are using the wrong kAudioFileType: try kAudioFileAIFFType or kAudioFileWAVEType
My two cents on this. Have luck!

AVAssetReader playing MPMediaItem in low quality?

I've managed to get the raw data from a MPMediaItem using an AVAssetReader after combining the answers of a couple of SO questions like this one and this one and a nice blog post. I'm also able to play this raw data using FMOD, but then a problem arises.
It appears the resulting audio is of lower quality than the original track. Though AVAssetTrack formatDescription tells me there are 2 channels in the data, the result sounds mono. It also sounds a bit dampened (less crispy) like the bitrate is lowered.
Am I doing something wrong or is the quality of the MPMediaItem data lowered on purpose by the AVAssetReader (because of piracy)?
#define OUTPUTRATE 44100
Initializing the AVAssetReader and AVAssetReaderTrackOutput
// prepare AVAsset and AVAssetReaderOutput etc
MPMediaItem* mediaItem = ...;
NSURL* ipodAudioUrl = [mediaItem valueForProperty:MPMediaItemPropertyAssetURL];
AVURLAsset * asset = [[AVURLAsset alloc] initWithURL:ipodAudioUrl options:nil];
NSError * error = nil;
assetReader = [[AVAssetReader alloc] initWithAsset:asset error:&error];
if(error)
NSLog(#"error creating reader: %#", [error debugDescription]);
AVAssetTrack* songTrack = [asset.tracks objectAtIndex:0];
NSArray* trackDescriptions = songTrack.formatDescriptions;
numChannels = 2;
for(unsigned int i = 0; i < [trackDescriptions count]; ++i)
{
CMAudioFormatDescriptionRef item = (CMAudioFormatDescriptionRef)[trackDescriptions objectAtIndex:i];
const AudioStreamBasicDescription* bobTheDesc = CMAudioFormatDescriptionGetStreamBasicDescription (item);
if(bobTheDesc && bobTheDesc->mChannelsPerFrame == 1) {
numChannels = 1;
}
}
NSDictionary* outputSettingsDict = [[[NSDictionary alloc] initWithObjectsAndKeys:
[NSNumber numberWithInt:kAudioFormatLinearPCM],AVFormatIDKey,
[NSNumber numberWithInt:OUTPUTRATE],AVSampleRateKey,
[NSNumber numberWithInt:16],AVLinearPCMBitDepthKey,
[NSNumber numberWithBool:NO],AVLinearPCMIsBigEndianKey,
[NSNumber numberWithBool:NO],AVLinearPCMIsFloatKey,
[NSNumber numberWithBool:NO],AVLinearPCMIsNonInterleaved,
nil] autorelease];
AVAssetReaderTrackOutput * output = [[[AVAssetReaderTrackOutput alloc] initWithTrack:songTrack outputSettings:outputSettingsDict] autorelease];
[assetReader addOutput:output];
[assetReader startReading];
Initializing FMOD and the FMOD sound
// Init FMOD
FMOD_RESULT result = FMOD_OK;
unsigned int version = 0;
/*
Create a System object and initialize
*/
result = FMOD::System_Create(&system);
ERRCHECK(result);
result = system->getVersion(&version);
ERRCHECK(result);
if (version < FMOD_VERSION)
{
fprintf(stderr, "You are using an old version of FMOD %08x. This program requires %08x\n", version, FMOD_VERSION);
exit(-1);
}
result = system->setSoftwareFormat(OUTPUTRATE, FMOD_SOUND_FORMAT_PCM16, 1, 0, FMOD_DSP_RESAMPLER_LINEAR);
ERRCHECK(result);
result = system->init(32, FMOD_INIT_NORMAL | FMOD_INIT_ENABLE_PROFILE, NULL);
ERRCHECK(result);
// Init FMOD sound stream
CMTimeRange timeRange = [songTrack timeRange];
float durationInSeconds = timeRange.duration.value / timeRange.duration.timescale;
FMOD_CREATESOUNDEXINFO exinfo = {0};
memset(&exinfo, 0, sizeof(FMOD_CREATESOUNDEXINFO));
exinfo.cbsize = sizeof(FMOD_CREATESOUNDEXINFO); /* required. */
exinfo.decodebuffersize = OUTPUTRATE; /* Chunk size of stream update in samples. This will be the amount of data passed to the user callback. */
exinfo.length = OUTPUTRATE * numChannels * sizeof(signed short) * durationInSeconds; /* Length of PCM data in bytes of whole song (for Sound::getLength) */
exinfo.numchannels = numChannels; /* Number of channels in the sound. */
exinfo.defaultfrequency = OUTPUTRATE; /* Default playback rate of sound. */
exinfo.format = FMOD_SOUND_FORMAT_PCM16; /* Data format of sound. */
exinfo.pcmreadcallback = pcmreadcallback; /* User callback for reading. */
exinfo.pcmsetposcallback = pcmsetposcallback; /* User callback for seeking. */
result = system->createStream(NULL, FMOD_OPENUSER, &exinfo, &sound);
ERRCHECK(result);
result = system->playSound(FMOD_CHANNEL_FREE, sound, false, &channel);
ERRCHECK(result);
Reading from the AVAssetReaderTrackOutput into a ring buffer
AVAssetReaderTrackOutput * trackOutput = (AVAssetReaderTrackOutput *)[assetReader.outputs objectAtIndex:0];
CMSampleBufferRef sampleBufferRef = [trackOutput copyNextSampleBuffer];
if (sampleBufferRef)
{
AudioBufferList audioBufferList;
CMBlockBufferRef blockBuffer;
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(sampleBufferRef, NULL, &audioBufferList, sizeof(audioBufferList), NULL, NULL, 0, &blockBuffer);
if(blockBuffer == NULL)
{
stopLoading = YES;
continue;
}
if(&audioBufferList == NULL)
{
stopLoading = YES;
continue;
}
if(audioBufferList.mNumberBuffers != 1)
NSLog(#"numBuffers = %lu", audioBufferList.mNumberBuffers);
for( int y=0; y<audioBufferList.mNumberBuffers; y++ )
{
AudioBuffer audioBuffer = audioBufferList.mBuffers[y];
SInt8 *frame = (SInt8*)audioBuffer.mData;
for(int i=0; i<audioBufferList.mBuffers[y].mDataByteSize; i++)
{
ringBuffer->push_back(frame[i]);
}
}
CMSampleBufferInvalidate(sampleBufferRef);
CFRelease(sampleBufferRef);
}
I'm not familiar with FMOD, so I can't comment there. AVAssetReader doesn't do any "copy protection" stuff, so that's not a worry. (If you can get the AVAssetURL, the track is DRM free)
Since you are using non-interleaved buffers, there will only be one buffer, so I guess your last bit of code might be wrong
Here's an example of some code that's working well for me. Btw, your for loop is probably not going to be very performant. You may consider using memcpy or something...
If you are not restricted to your existing ring buffer, try TPCircularBuffer (https://github.com/michaeltyson/TPCircularBuffer) it is amazing.
CMSampleBufferRef nextBuffer = NULL;
if(_reader.status == AVAssetReaderStatusReading)
{
nextBuffer = [_readerOutput copyNextSampleBuffer];
}
if (nextBuffer)
{
AudioBufferList abl;
CMBlockBufferRef blockBuffer;
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(
nextBuffer,
NULL,
&abl,
sizeof(abl),
NULL,
NULL,
kCMSampleBufferFlag_AudioBufferList_Assure16ByteAlignment,
&blockBuffer);
// the correct way to get the number of bytes in the buffer
size_t size = CMSampleBufferGetTotalSampleSize(nextBuffer);
memcpy(ringBufferTail, abl.mBuffers[0].mData, size);
CFRelease(nextBuffer);
CFRelease(blockBuffer);
}
Hope this helps
You're initialiazing FMOD to output mono audio. Try
result = system->setSoftwareFormat(OUTPUTRATE, FMOD_SOUND_FORMAT_PCM16, 2, 0, FMOD_DSP_RESAMPLER_LINEAR);

How to trim audio data?

I wanted to remove last 5 second audio data and save it to different location as new audio.I'm trying to get it done by following code using ExtAudioFile service but here my audio output size is increasing from 2.5 MB to 26.5 MB..where am i wrong.
UInt32 size; NSString *docsDir;
NSArray *dirPaths;
dirPaths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
docsDir = [dirPaths objectAtIndex:0];
NSString *destinationURL = [docsDir
stringByAppendingPathComponent:#"Audio3.m4a"];
NSURL * soundFilePath = [NSURL fileURLWithPath:[[NSBundle mainBundle]
pathForResource:#"Audio1"
ofType:#"m4a"]];
ExtAudioFileRef inputFile= NULL;
ExtAudioFileRef outputFile= NULL;
ExtAudioFileOpenURL((CFURLRef)soundFilePath, &inputFile);
AudioStreamBasicDescription destFormat;
destFormat.mFormatID = kAudioFormatMPEG4AAC;
destFormat.mFormatFlags = kAudioFormatFlagsCanonical;
destFormat.mSampleRate = 441000;
destFormat.mBytesPerPacket = 2;
destFormat.mFramesPerPacket = 1;
destFormat.mBytesPerFrame = 2;
destFormat.mChannelsPerFrame = 2;
destFormat.mBitsPerChannel = 16;
destFormat.mReserved = 0;
OSStatus createStatus =ExtAudioFileCreateWithURL((CFURLRef)[NSURL fileURLWithPath:destinationURL],kAudioFileM4AType,&destFormat,NULL,kAudioFileFlags_EraseFile,&outputFile);
//ExtAudioFileDispose(outputFile);
NSLog(#"createStatus: %i", createStatus);
//this is not needed as file url is already opened.
ExtAudioFileOpenURL((CFURLRef)soundFilePath, &inputFile);
//ExtAudioFileOpenURL((CFURLRef)[NSURL fileURLWithPath:destinationURL], &outputFile);
//find out how many frames long this file is 
SInt64 length = 0;
UInt32 dataSize2 = (UInt32)sizeof(length);
ExtAudioFileGetProperty(inputFile, kExtAudioFileProperty_FileLengthFrames, &dataSize2, &length);
AudioStreamBasicDescription clientFormat;
clientFormat.mFormatID = kAudioFormatMPEG4AAC;
clientFormat.mSampleRate = 441000;
clientFormat.mFormatFlags = kAudioFormatFlagsCanonical;
clientFormat.mBitsPerChannel = 16;
clientFormat.mChannelsPerFrame = 2;
clientFormat.mFramesPerPacket = 1;
clientFormat.mBytesPerPacket = 2;
clientFormat.mBytesPerFrame = 2;
destFormat.mReserved = 0;
size = sizeof(clientFormat);
//set the intermediate format to canonical on the source file for conversion (?) 
ExtAudioFileSetProperty(inputFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat);
ExtAudioFileSetProperty(outputFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat);
OSStatus seekStatus = ExtAudioFileSeek(outputFile, 0);  
NSLog(#"seekstatus %i", seekStatus);
SInt64 newLength = length - (5); //shorten by 5 seconds worth of frames 
NSLog(#"length: %i frames", length);
UInt8 *buffer = malloc(64*1024); //64K 
UInt32 totalFramecount = 0;
while(true) {
AudioBufferList bufferList;
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0].mNumberChannels = 2;
bufferList.mBuffers[0].mData = buffer; //pointer to buffer of audio data 
bufferList.mBuffers[0].mDataByteSize =64*1024; //number of bytes in the buffer 
UInt32 frameCount = 64*1024 / 2; //2 bytes per frame 
// Read a chunk of input 
SInt64 outFrameOffset;
ExtAudioFileTell(inputFile, &outFrameOffset) ;
NSLog(#"head status %i", outFrameOffset);
OSStatus status = ExtAudioFileRead(inputFile, &frameCount, &bufferList);
totalFramecount += frameCount;
NSLog(#"read status %i", status);
NSLog(#"loaded %i frames and stopping at %i", totalFramecount, newLength);
if (!frameCount ||(totalFramecount >= newLength)) {
//termination condition 
break;
}
OSStatus writeStatus = ExtAudioFileWrite(outputFile, frameCount, &bufferList);
NSLog(#"ws: %i", writeStatus);
}
free(buffer);
ExtAudioFileDispose(inputFile);
ExtAudioFileDispose(outputFile);
You are taking compressed audio (M4A) and uncompressing it, which is what you need to do to trim down the audio content. If you want to get back to the 2.5 MB range, you will need to recompress your audio when done.
Keep in mind that repeated lossy uncompress-edit-recompress cycles will degrade the quality of your audio. If you are going to be performing a lot of audio edit operations, you should transform your audio from compressed to uncompressed, then run your transforms, and finally recompress at the end.

Resources