When is a MIDINoteMessage played in a MusicTrack? iOS - ios

I'm creating a MusicTrack of MIDI Notes that are successfully played using
MusicPlayerStart(sequencePlayer)
But I would like to know when are they played, so I can update the UI for each MIDI Note played.
// Creating MIDI Track
MusicTrack track;
UInt32 trackIndex = 0;
musicTrackAtIndex(trackIndex, &track);
//Adding Tempo
addTempoEvent(0.0, 120);
// Adding MIDI notes
const int channel = 0;
MIDINoteMessage note;
note.channel = channel;
note.velocity = 100.0;
note.duration = 0.3;
note.releaseVelocity = 0;
note.note = 68;
addMIDINoteToTrack(track, note, 1);
note.note = 78;
addMIDINoteToTrack(track, note, 2);
note.note = 88;
addMIDINoteToTrack(track, note, 3);
note.note = 98;
addMIDINoteToTrack(track, note, 4);
MusicTimeStamp trackEnd = 4;
addEndOfTrackAtTime(trackEnd);
setLoopTrack(true);
playSequence();
EDIT
Showing in more detail how I'm building the MIDI Track.
-(void) playing4NotesInLoop {
MusicTrack musicTrack;
MusicSequence musicSequence;
MusicPlayer musicPlayer;
const int loop_length = 4;
NewMusicSequence(&musicSequence);
// Create a client
MIDIClientRef virtualMidi;
MIDIClientCreate(CFSTR("Virtual Client"),
MyMIDINotifyProc,
NULL,
&virtualMidi);
// Create an endpoint
MIDIEndpointRef virtualEndpoint;
MIDIDestinationCreate(virtualMidi, #"Virtual Destination", MyMIDIReadProc, self.samplerUnit, &virtualEndpoint);
MusicSequenceNewTrack(musicSequence, &musicTrack);
MIDINoteMessage aMessage;
aMessage.channel = 1;
aMessage.duration = 0.5f;
aMessage.velocity = 100;
for(int i=0; i<4; ++i)
{
if (i==0) {
aMessage.note = 30;
}else {
aMessage.note = 60;
}
MusicTrackNewMIDINoteEvent(musicTrack, i, &aMessage);
}
MusicTrackLoopInfo loop_info;
loop_info.loopDuration = loop_length;
loop_info.numberOfLoops = 0;
MusicTrackSetProperty(musicTrack, kSequenceTrackProperty_LoopInfo, &loop_info, sizeof(MusicTrackLoopInfo));
MusicTimeStamp track_length;
track_length = loop_length;
MusicTrackSetProperty(musicTrack, kSequenceTrackProperty_TrackLength, &track_length, sizeof(MusicTimeStamp));
MusicSequenceSetMIDIEndpoint(musicSequence, virtualEndpoint);
NewMusicPlayer(&musicPlayer);
MusicPlayerSetSequence(musicPlayer, musicSequence);
MusicPlayerStart(musicPlayer);
}
With this code I'm successfully playing a loop of 4 notes, a MusicSequenceSetMIDIEndpoint(musicSequence, virtualEndpoint) has been set up specifying the read proc. I used the same code but loading a MIDI file and the read proc it's successfully called for each note.

You create a virtual endpoint with MIDIDestinationCreate and then call MusicSequenceSetMIDIEndpoint to connect your sequence to it. The read proc that you specify will be called on each event.

Related

How to decode AAC compressed frames to PCM using AudioConverterFillComplexBuffer iOS

I want to implement SIP calls in my application, and first problem, that I need to solve, is converting audio from compressed AAC format with ADTS header to linear PCM.
My input data is an NSArray of ADTS frames with different framesize. Each frame is typeof NSMutableData. Each frame is of the same format and sample rate, only difference is framesize.
I tried to implement sample code, suggested by Igor Rotaru for this issue, but can't make it work.
Now my code looks like this. First of all, I configure the AudioConverter:
- (void)configureAudioConverter {
AudioStreamBasicDescription inFormat;
memset(&inFormat, 0, sizeof(inFormat));
inputFormat.mBitsPerChannel = 0;
inputFormat.mBytesPerFrame = 0;
inputFormat.mBytesPerPacket = 0;
inputFormat.mChannelsPerFrame = 1;
inputFormat.mFormatFlags = kMPEG4Object_AAC_LC;
inputFormat.mFormatID = kAudioFormatMPEG4AAC;
inputFormat.mFramesPerPacket = 1024;
inputFormat.mReserved = 0;
inputFormat.mSampleRate = 22050;
AudioStreamBasicDescription outputFormat;
memset(&outputFormat, 0, sizeof(outputFormat));
outputFormat.mSampleRate = inputFormat.mSampleRate;
outputFormat.mFormatID = kAudioFormatLinearPCM;
outputFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger;
outputFormat.mBytesPerPacket = 2;
outputFormat.mFramesPerPacket = 1;
outputFormat.mBytesPerFrame = 2;
outputFormat.mChannelsPerFrame = 1;
outputFormat.mBitsPerChannel = 16;
outputFormat.mReserved = 0;
AudioClassDescription *description = [self
getAudioClassDescriptionWithType:kAudioFormatMPEG4AAC
fromManufacturer:kAppleSoftwareAudioCodecManufacturer];
OSStatus status = AudioConverterNewSpecific(&inputFormat, &outputFormat, 1, description, &_audioConverter);
if (status != 0) {
printf("setup converter error, status: %i\n", (int)status);
}
}
After that I wrote the callback function:
struct MyUserData {
UInt32 mChannels;
UInt32 mDataSize;
const void* mData;
AudioStreamPacketDescription mPacket;
};
OSStatus inInputDataProc(AudioConverterRef inAudioConverter,
UInt32 *ioNumberDataPackets,
AudioBufferList *ioData,
AudioStreamPacketDescription **outDataPacketDescription,
void *inUserData)
{
struct MyUserData* userData = (struct MyUserData*)(inUserData);
if (!userData->mDataSize) {
*ioNumberDataPackets = 0;
return kNoMoreDataError;
}
if (outDataPacketDescription) {
userData->mPacket.mStartOffset = 0;
userData->mPacket.mVariableFramesInPacket = 0;
userData->mPacket.mDataByteSize = userData->mDataSize;
*outDataPacketDescription = &userData->mPacket;
}
ioData->mBuffers[0].mNumberChannels = userData->mChannels;
ioData->mBuffers[0].mDataByteSize = userData->mDataSize;
ioData->mBuffers[0].mData = (void *)userData->mData;
// No more data to provide following this run.
userData->mDataSize = 0;
return noErr;
}
And my function for decoding frames looks like this:
- (void)startDecodingAudio {
if (!_converterConfigured){
return;
}
while (true){
if ([self hasFramesToDecode]){
struct MyUserData userData = {1, (UInt32)_decoderBuffer[_currPosInDecoderBuf].length, _decoderBuffer[_currPosInDecoderBuf].bytes};
uint8_t *buffer = (uint8_t *)malloc(128 * sizeof(short int));
AudioBufferList decBuffer;
decBuffer.mNumberBuffers = 1;
decBuffer.mBuffers[0].mNumberChannels = 1;
decBuffer.mBuffers[0].mDataByteSize = 128 * sizeof(short int);
decBuffer.mBuffers[0].mData = buffer;
UInt32 numFrames = 128;
AudioStreamPacketDescription outPacketDescription;
memset(&outPacketDescription, 0, sizeof(AudioStreamPacketDescription));
outPacketDescription.mDataByteSize = 128;
outPacketDescription.mStartOffset = 0;
outPacketDescription.mVariableFramesInPacket = 0;
OSStatus status = AudioConverterFillComplexBuffer(_audioConverter,
inInputDataProc,
&userData,
&numFrames,
&decBuffer,
&outPacketDescription);
NSError *error = nil;
if (status == kNoMoreDataError) {
NSLog(#"%u bytes decoded", (unsigned int)decBuffer.mBuffers[0].mDataByteSize);
[_decodedData appendData:[NSData dataWithBytes:decBuffer.mBuffers[0].mData length:decBuffer.mBuffers[0].mDataByteSize]];
_currPosInDecoderBuf += 1;
} else {
error = [NSError errorWithDomain:NSOSStatusErrorDomain code:status userInfo:nil];
}
} else {
break;
}
}
}
Each time, AudioConverterFillComplexBuffer returns status 1852797029 which is, according to Apple API, kAudioCodecIllegalOperationError. If somebody succeded in converting with such formats, please, share some examples, or advice.
Finally, I decoded my bytes with StreamingKit library (original reposiory can be found here).

How to decode AAC audio buffer to PCM buffer in iOS? [closed]

Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed 5 years ago.
Improve this question
I am trying to decode AAC audio to PCM audio in iOS, what the best way to do this?Any sample code would be very helpful...Is there any simple APIs to do this..?
I have sample code to do it.
At start you should configure in/out ASBD (AudioStreamBasicDescription) and create converter:
- (void)setupAudioConverter{
AudioStreamBasicDescription outFormat;
memset(&outFormat, 0, sizeof(outFormat));
outFormat.mSampleRate = 44100;
outFormat.mFormatID = kAudioFormatLinearPCM;
outFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger;
outFormat.mBytesPerPacket = 2;
outFormat.mFramesPerPacket = 1;
outFormat.mBytesPerFrame = 2;
outFormat.mChannelsPerFrame = 1;
outFormat.mBitsPerChannel = 16;
outFormat.mReserved = 0;
AudioStreamBasicDescription inFormat;
memset(&inFormat, 0, sizeof(inFormat));
inFormat.mSampleRate = 44100;
inFormat.mFormatID = kAudioFormatMPEG4AAC;
inFormat.mFormatFlags = kMPEG4Object_AAC_LC;
inFormat.mBytesPerPacket = 0;
inFormat.mFramesPerPacket = 1024;
inFormat.mBytesPerFrame = 0;
inFormat.mChannelsPerFrame = 1;
inFormat.mBitsPerChannel = 0;
inFormat.mReserved = 0;
OSStatus status = AudioConverterNew(&inFormat, &outFormat, &_audioConverter);
if (status != 0) {
printf("setup converter error, status: %i\n", (int)status);
}
}
After that you should make callback function for audio converter:
struct PassthroughUserData {
UInt32 mChannels;
UInt32 mDataSize;
const void* mData;
AudioStreamPacketDescription mPacket;
};
OSStatus inInputDataProc(AudioConverterRef aAudioConverter,
UInt32* aNumDataPackets /* in/out */,
AudioBufferList* aData /* in/out */,
AudioStreamPacketDescription** aPacketDesc,
void* aUserData)
{
PassthroughUserData* userData = (PassthroughUserData*)aUserData;
if (!userData->mDataSize) {
*aNumDataPackets = 0;
return kNoMoreDataErr;
}
if (aPacketDesc) {
userData->mPacket.mStartOffset = 0;
userData->mPacket.mVariableFramesInPacket = 0;
userData->mPacket.mDataByteSize = userData->mDataSize;
*aPacketDesc = &userData->mPacket;
}
aData->mBuffers[0].mNumberChannels = userData->mChannels;
aData->mBuffers[0].mDataByteSize = userData->mDataSize;
aData->mBuffers[0].mData = const_cast<void*>(userData->mData);
// No more data to provide following this run.
userData->mDataSize = 0;
return noErr;
}
And method for frame decoding:
- (void)decodeAudioFrame:(NSData *)frame withPts:(NSInteger)pts{
if(!_audioConverter){
[self setupAudioConverter];
}
PassthroughUserData userData = { 1, (UInt32)frame.length, [frame bytes]};
NSMutableData *decodedData = [NSMutableData new];
const uint32_t MAX_AUDIO_FRAMES = 128;
const uint32_t maxDecodedSamples = MAX_AUDIO_FRAMES * 1;
do{
uint8_t *buffer = (uint8_t *)malloc(maxDecodedSamples * sizeof(short int));
AudioBufferList decBuffer;
decBuffer.mNumberBuffers = 1;
decBuffer.mBuffers[0].mNumberChannels = 1;
decBuffer.mBuffers[0].mDataByteSize = maxDecodedSamples * sizeof(short int);
decBuffer.mBuffers[0].mData = buffer;
UInt32 numFrames = MAX_AUDIO_FRAMES;
AudioStreamPacketDescription outPacketDescription;
memset(&outPacketDescription, 0, sizeof(AudioStreamPacketDescription));
outPacketDescription.mDataByteSize = MAX_AUDIO_FRAMES;
outPacketDescription.mStartOffset = 0;
outPacketDescription.mVariableFramesInPacket = 0;
OSStatus rv = AudioConverterFillComplexBuffer(_audioConverter,
inInputDataProc,
&userData,
&numFrames /* in/out */,
&decBuffer,
&outPacketDescription);
if (rv && rv != kNoMoreDataErr) {
NSLog(#"Error decoding audio stream: %d\n", rv);
break;
}
if (numFrames) {
[decodedData appendBytes:decBuffer.mBuffers[0].mData length:decBuffer.mBuffers[0].mDataByteSize];
}
if (rv == kNoMoreDataErr) {
break;
}
}while (true);
//void *pData = (void *)[decodedData bytes];
//audioRenderer->Render(&pData, decodedData.length, pts);
}
You need to use Core Audio. Look for Core Audio Overview in the Apple documentation.

Blackberry Native SDK capture scrollview in invoke window

I am writing a simple application on the BB10 simulator to capture the contents of an invoke preview window (which contains a scrollable view for emails). I want to capture the entire scrollview of the invoked window/email, not just what's on screen. In the code below, I can get a handle to the Window for the entire application and screen_read_window its contents, but how do I iterate over the invoke window's controls and find the handle to the scrollview, and capture that?
InvokeRequest request;
// Set the target app
request.setTarget("sys.pim.uib.email.previewer");
// Set the action that the target app should execute
request.setAction("bb.action.VIEW");
// Set the MIME type of the data
request.setMimeType("message/rfc822");
// Specify the location of the data
request.setUri(QUrl("pim:message/rfc822:" + QString::number(accountId) + ":" + QString::number(messageId)));
//InvokeTargetReply *reply =
invokeManager->invoke(request);
sleep(2);
screen_context_t screenshot_ctx = 0;
if (screen_create_context(&screenshot_ctx,
SCREEN_APPLICATION_CONTEXT) != 0) {
return;
}
screen_pixmap_t screen_pix;
screen_buffer_t screenshot_buf;
char *screenshot_ptr = NULL;
int screenshot_stride = 0;
int usage, format;
int size[2];
screen_create_pixmap(&screen_pix, screenshot_ctx);
usage = SCREEN_USAGE_READ | SCREEN_USAGE_NATIVE;
screen_set_pixmap_property_iv(screen_pix, SCREEN_PROPERTY_USAGE, &usage);
format = SCREEN_FORMAT_RGBA8888;
screen_set_pixmap_property_iv(screen_pix, SCREEN_PROPERTY_FORMAT, &format);
size[0] = 768;
size[1] = 1280;
screen_set_pixmap_property_iv(screen_pix, SCREEN_PROPERTY_BUFFER_SIZE, size);
screen_create_pixmap_buffer(screen_pix);
screen_get_pixmap_property_pv(screen_pix, SCREEN_PROPERTY_RENDER_BUFFERS,
(void**)&screenshot_buf);
screen_get_buffer_property_pv(screenshot_buf, SCREEN_PROPERTY_POINTER,
(void**)&screenshot_ptr);
screen_get_buffer_property_iv(screenshot_buf, SCREEN_PROPERTY_STRIDE,
&screenshot_stride);
screen_read_window(Application::instance()->mainWindow()->handle(), screenshot_buf, 0, NULL ,0);
QByteArray array;
int nbytes = size[0] * size[1] * 4;
write_bitmap_header(nbytes, array, size);
for (int i = 0; i < size[1]; i++)
{
array.append(screenshot_ptr + i * screenshot_stride, size[0] * 4);
}
QImage image = QImage::fromData(array, "BMP");
QFile outFile("shared/photos/temp1.jpeg");
outFile.open(QIODevice::WriteOnly);
image.save(&outFile, "JPEG");
//Close Email
invokeManager->closeChildCard();

How to encode and decode audio using opus

I am trying integrate opus into my application, the encode and decode function returns positive value which means successfully, but the output audio can't play. Raw audio data can play as well.
Here is how I encode data. I use 4 bytes prefix to separate from each packet.
self.encoder = opus_encoder_create(24000, 1, OPUS_APPLICATION_VOIP, &opusError);
opus_encoder_ctl(self.encoder, OPUS_SET_BANDWIDTH(OPUS_BANDWIDTH_SUPERWIDEBAND));
- (void) encodeBufferList:(AudioBufferList *)bufferList {
BOOL success = TPCircularBufferProduceBytes(_circularBuffer, bufferList->mBuffers[0].mData, bufferList->mBuffers[0].mDataByteSize);
if (!success) {
NSLog(#"insufficient space in circular buffer!");
}
if (!_encoding) {
_encoding = YES;
dispatch_async(self.processingQueue, ^{
[self startEncodingLoop];
});
}
}
-(void)startEncodingLoop
{
int32_t availableBytes = 0;
opus_int16 *data = (opus_int16*)TPCircularBufferTail(_circularBuffer, &availableBytes);
int availableSamples = availableBytes / _inputASBD.mBytesPerFrame;
/*!
* Use dynamic duration
*/
// int validSamples[6] = {2.5, 5, 10, 20, 40, 60}; // in milisecond
// int esample = validSamples[0] * self.sampleRate / 1000;
// for (int i = 0; i < 6; i++) {
// int32_t samp = validSamples[i] * self.sampleRate / 1000;
// if (availableSamples < samp) {
// break;
// }
// esample = samp;
// }
/*!
* Use 20ms
*/
int esample = 20 * self.sampleRate / 1000;
if (availableSamples < esample) {
/*!
* Out of data. Finish encoding
*/
self.encoding = NO;
[self.eDelegate didFinishEncode];
return;
}
// printf("raw input value for packet \n");
// for (int i = 0; i < esample * self.numberOfChannels; i++) {
// printf("%d :", data[i]);
// }
int returnValue = opus_encode(_encoder, data, esample, _encoderOutputBuffer, 1000);
TPCircularBufferConsume(_circularBuffer, esample * sizeof(opus_int16) * self.numberOfChannels);
// printf("output encode \n");
// for (int i = 0; i < returnValue; i++) {
// printf("%d :", _encoderOutputBuffer[i]);
// }
NSMutableData *outputData = [NSMutableData new];
NSError *error = nil;
if (returnValue <= 0) {
error = [OKUtilities errorForOpusErrorCode:returnValue];
}else {
[outputData appendBytes:_encoderOutputBuffer length:returnValue * sizeof(unsigned char)];
unsigned char int_field[4];
int_to_char(returnValue , int_field);
NSData *header = [NSData dataWithBytes:&int_field[0] length:4 * sizeof(unsigned char)];
if (self.eDelegate) {
[self.eDelegate didEncodeWithData:header];
}
}
if (self.eDelegate) {
[self.eDelegate didEncodeWithData:outputData];
}
[self startEncodingLoop];
}
And here is decode function:
self.decoder = opus_decoder_create(24000, 1, &opusError);
opus_decoder_ctl(self.decoder, OPUS_SET_SIGNAL(OPUS_SIGNAL_VOICE));
opus_decoder_ctl(self.decoder, OPUS_SET_GAIN(10));
-(void)startParseData:(unsigned char*)data remainingLen:(int)len
{
if (len <= 0) {
[self.dDelegate didFinishDecode];
return;
}
int headLen = sizeof(unsigned char) * 4;
unsigned char h[4];
h[0] = data[0];
h[1] = data[1];
h[2] = data[2];
h[3] = data[3];
int packetLen = char_to_int(h);
data += headLen;
packetLen = packetLen * sizeof(unsigned char) * self.numberOfChannels;
[self decodePacket:data length:packetLen remainingLen:len - headLen];
}
-(void)decodePacket:(unsigned char*)inputData length:(int)len remainingLen:(int)rl
{
int bw = opus_packet_get_bandwidth(inputData); //TEST: return OPUS_BANDWIDTH_SUPERWIDEBAND here
int32_t decodedSamples = 0;
// int validSamples[6] = {2.5, 5, 10, 20, 40, 60}; // in milisecond
/*!
* Use 60ms
*/
int esample = 60 * self.sampleRate / 1000;
// printf("input decode \n");
// for (int i = 0; i < len; i++) {
// printf("%d :", inputData[i]);
// }
_decoderBufferLength = esample * self.numberOfChannels * sizeof(opus_int16);
int returnValue = opus_decode(_decoder, inputData, len, _outputBuffer, esample, 1);
if (returnValue < 0) {
NSError *error = [OKUtilities errorForOpusErrorCode:returnValue];
NSLog(#"decode error %#", error);
inputData += len;
[self startParseData:inputData remainingLen:rl - len];
return;
}
decodedSamples = returnValue;
NSUInteger length = decodedSamples * self.numberOfChannels;
// printf("raw decoded data \n");
// for (int i = 0; i < length; i++) {
// printf("%d :", _outputBuffer[i]);
// }
NSData *audioData = [NSData dataWithBytes:_outputBuffer length:length * sizeof(opus_int16)];
if (self.dDelegate) {
[self.dDelegate didDecodeData:audioData];
}
inputData += len;
[self startParseData:inputData remainingLen:rl - len];
}
Please help me to point out what I am missing. An example would be great.
I think the problem is on the decode side:
You pass 1 as the fec argument to opus_decode(). This asks the decoder to generate the full packet duration's worth of data from error correction data in the current packet. I don't see any lost packet tracking in your code, so 0 should be passed instead. With that change your input and output duration should match.
You configure the decoder for mono output, but later use self.numberOfChannels in length calculations. Those should match or you may get unexpected behaviour.
OPUS_SET_SIGNAL doesn't do anything in opus_decoder_ctl() but it will just return OPUS_UNIMPLEMENTED without affecting behaviour.
Opus packets can be up to 120 ms in duration, so your limit of 60 ms could fail to decode some streams. If you're only talking to your own app that won't cause a problem the way you've configured it, since libopus defaults to 20ms frames.
I found what the problem is. I have set the audio format is float kAudioFormatFlagIsPacked|kAudioFormatFlagIsFloat;. I should use opus_encode_float and opus_decode_float instead of opus_encode opus_decode.
As #Ralph says, we should use fec = 0 in opus_decode. Thanks to #Ralph.
One thing I notice is that you're treating the return value of opus_encode() as a number of samples encoded, when it's the number of bytes in the compressed packet. that means you're writing 50% or 75% garbage data from the end of _encoderOutputBuffer into your encoded stream.
Also make sure _encoderOutputBuffer has room for the hard-coded 1000 byte packet-length limit you're passing in.

How to start AUAudioFilePlayer in the middle of a track but loop to the beginning?

In iOS I'm using the AUAudioFilePlayer and I'm trying to do a 10 second lead in before looping the entire audio track. I've set the ScheduledAudioFileRegion as below to start on a non-zero frame (details here: https://developer.apple.com/library/ios/qa/qa1786/_index.html)
ScheduledAudioFileRegion playRegion;
playRegion.mTimeStamp.mFlags = kAudioTimeStampSampleTimeValid;
playRegion.mTimeStamp.mSampleTime = 0;
playRegion.mCompletionProc = NULL;
playRegion.mCompletionProcUserData = NULL;
playRegion.mAudioFile = audioFileId;
playRegion.mLoopCount = UInt32(-1);
playRegion.mStartFrame = startFrame;
playRegion.mFramesToPlay = UInt32(-1); // i.e. all of it
_(AudioUnitSetProperty(_audioUnit,
kAudioUnitProperty_ScheduledFileRegion,
kAudioUnitScope_Global,
0,
&playRegion,
sizeof(playRegion)),
#"Error setting Audio File Region on the FilePlayer AU");
The problem is that when it loops it starts again at startFrame. Any ideas how I get it start at the beginning?
The answer is to do two schedules. mTimeStamp.mSampleTime is how long relative to the scheduled start time to wait until playing:
/////////////////////////////////////////
// PRE-AMBLE
/////////////////////////////////////////
// Tell it to play the whole file with optional looping...
// more details: https://developer.apple.com/library/ios/qa/qa1786/_index.html
ScheduledAudioFileRegion playRegion;
playRegion.mTimeStamp.mFlags = kAudioTimeStampSampleTimeValid;
playRegion.mTimeStamp.mSampleTime = 0; // PLAY IMMEDIATELY...
playRegion.mCompletionProc = NULL;
playRegion.mCompletionProcUserData = NULL;
playRegion.mAudioFile = audioFileId;
playRegion.mLoopCount = 0;
playRegion.mStartFrame = startFrame; // ...FROM REQUEST START FRAME
playRegion.mFramesToPlay = UInt32(-1); // i.e. all of it
_(AudioUnitSetProperty(_audioUnit,
kAudioUnitProperty_ScheduledFileRegion,
kAudioUnitScope_Global,
0,
&playRegion,
sizeof(playRegion)),
#"Error setting Audio File Region on the FilePlayer AU");
/////////////////////////////////////////
// LOOP
/////////////////////////////////////////
playRegion.mTimeStamp.mFlags = kAudioTimeStampSampleTimeValid;
playRegion.mTimeStamp.mSampleTime = lengthInFrames - startFrame; // WAIT UNTIL PREAMBLE IS DONE...
playRegion.mCompletionProc = NULL;
playRegion.mCompletionProcUserData = NULL;
playRegion.mAudioFile = audioFileId;
playRegion.mLoopCount = UInt32(-1);
playRegion.mStartFrame = 0; // ...THEN PLAY FROM THE BEGINNING
playRegion.mFramesToPlay = UInt32(-1); // i.e. all of it
_(AudioUnitSetProperty(_audioUnit,
kAudioUnitProperty_ScheduledFileRegion,
kAudioUnitScope_Global,
0,
&playRegion,
sizeof(playRegion)),
#"Error setting Audio File Region on the FilePlayer AU");

Resources