Obtain a <AudioTimeStamp> for Audio Queue Buffer - ios

I'm attempting to create a continuous FIFO audio recorder in Swift. I'm running into and issue while trying to create the audioQueueCallback.
From the docs AudioTimeStamp has this init method:
AudioTimeStamp(mSampleTime: Float64, mHostTime: UInt64, mRateScalar: Float64, mWordClockTime: UInt64, mSMPTETime: SMPTETime, mFlags: AudioTimeStampFlags, mReserved: UInt32)
And I have not idea how to use it.
It seems to me like the device should have a reliable internal clock to be able to manage audioQueues off of but I haven't been able to find any documentation for it.
Here's my attempt at creating a BufferQueue:
ypealias WYNDRInputQueueCallback = ((Data) -> Void)
class WYNDRInputQueue {
class WYNDRInputQueueUserData {
let callback: WYNDRInputQueueCallback
let bufferStub: NSData
init(callback: #escaping WYNDRInputQueueCallback, bufferStub: NSData){
self.callback = callback
self.bufferStub = bufferStub
}
}
private var audioQueueRef: AudioQueueRef?
private let userData: WYNDRInputQueueUserData
public init(asbd: inout AudioStreamBasicDescription, callback: #escaping WYNDRInputQueueCallback, buffersCount: UInt32 = 3, bufferSize: UInt32 = 9600) throws {
self.userData = WYNDRInputQueueUserData(callback: callback, bufferStub: NSMutableData(length: Int(bufferSize))!)
let userDataUnsafe = UnsafeMutableRawPointer(Unmanaged.passRetained(self.userData).toOpaque())
let input = AudioQueueNewInput(&asbd,
audioQueueInputCallback,
userDataUnsafe,
.none,
.none,
0,
&audioQueueRef)
if input != noErr {
throw InputQueueError.genericError(input)
}
assert(audioQueueRef != nil )
for _ in 0..<buffersCount {
var bufferRef: AudioQueueBufferRef?
let bufferInput = AudioQueueAllocateBuffer(audioQueueRef!, bufferSize, &bufferRef)
if bufferInput != noErr {
throw InputQueueError.genericError(bufferInput)
}
assert(bufferRef != nil)
Here's where I'm using the audioTimeStamp:
audioQueueInputCallback(userDataUnsafe, audioQueueRef!, bufferRef!, <#T##UnsafePointer<AudioTimeStamp>#>, 0, nil)
}
}
private let audioQueueInputCallback: AudioQueueInputCallback = { (inUserData, inAQ, inBuffer, inStartTime, inNumberPacketDescriptions, inPacketDescs) in
let userData = Unmanaged<WYNDRInputQueueUserData>.fromOpaque(inUserData!).takeUnretainedValue()
let dataSize = Int(inBuffer.pointee.mAudioDataByteSize)
let inputData = Data(bytes: inBuffer.pointee.mAudioData, count: dataSize)
userData.callback(inputData)
AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, nil)
}
Any advice here would be greatly appreciated!

I'm not sure how the timestamp is going to be used or who is going to use it, but if in doubt, why not use the number of samples you've recorded as the timestamp?
var timestamp = AudioTimeStamp()
timestamp.mSampleTime = numberOfSamplesRecorded
timestamp.mFlags = .sampleHostTimeValid

Related

How to play raw audio data from socket in Swift

I need to play raw audio data coming over socket in small chunks. I have read that I suppose to use circular buffer and found few solutions in Objective C, but couldn't made any of them to work, especially in Swift 3.
Can anyone help me?
First you implement ring Buffer like so.
public struct RingBuffer<T> {
private var array: [T?]
private var readIndex = 0
private var writeIndex = 0
public init(count: Int) {
array = [T?](repeating: nil, count: count)
}
/* Returns false if out of space. */
#discardableResult public mutating func write(element: T) -> Bool {
if !isFull {
array[writeIndex % array.count] = element
writeIndex += 1
return true
} else {
return false
}
}
/* Returns nil if the buffer is empty. */
public mutating func read() -> T? {
if !isEmpty {
let element = array[readIndex % array.count]
readIndex += 1
return element
} else {
return nil
}
}
fileprivate var availableSpaceForReading: Int {
return writeIndex - readIndex
}
public var isEmpty: Bool {
return availableSpaceForReading == 0
}
fileprivate var availableSpaceForWriting: Int {
return array.count - availableSpaceForReading
}
public var isFull: Bool {
return availableSpaceForWriting == 0
}
}
After that, you implement Audio Unit like so. ( modify if necessary)
class ToneGenerator {
fileprivate var toneUnit: AudioUnit? = nil
init() {
setupAudioUnit()
}
deinit {
stop()
}
func setupAudioUnit() {
// Configure the description of the output audio component we want to find:
let componentSubtype: OSType
#if os(OSX)
componentSubtype = kAudioUnitSubType_DefaultOutput
#else
componentSubtype = kAudioUnitSubType_RemoteIO
#endif
var defaultOutputDescription = AudioComponentDescription(componentType: kAudioUnitType_Output,
componentSubType: componentSubtype,
componentManufacturer: kAudioUnitManufacturer_Apple,
componentFlags: 0,
componentFlagsMask: 0)
let defaultOutput = AudioComponentFindNext(nil, &defaultOutputDescription)
var err: OSStatus
// Create a new instance of it in the form of our audio unit:
err = AudioComponentInstanceNew(defaultOutput!, &toneUnit)
assert(err == noErr, "AudioComponentInstanceNew failed")
// Set the render callback as the input for our audio unit:
var renderCallbackStruct = AURenderCallbackStruct(inputProc: renderCallback as? AURenderCallback,
inputProcRefCon: nil)
err = AudioUnitSetProperty(toneUnit!,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
&renderCallbackStruct,
UInt32(MemoryLayout<AURenderCallbackStruct>.size))
assert(err == noErr, "AudioUnitSetProperty SetRenderCallback failed")
// Set the stream format for the audio unit. That is, the format of the data that our render callback will provide.
var streamFormat = AudioStreamBasicDescription(mSampleRate: Float64(sampleRate),
mFormatID: kAudioFormatLinearPCM,
mFormatFlags: kAudioFormatFlagsNativeFloatPacked|kAudioFormatFlagIsNonInterleaved,
mBytesPerPacket: 4 /*four bytes per float*/,
mFramesPerPacket: 1,
mBytesPerFrame: 4,
mChannelsPerFrame: 1,
mBitsPerChannel: 4*8,
mReserved: 0)
err = AudioUnitSetProperty(toneUnit!,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
0,
&streamFormat,
UInt32(MemoryLayout<AudioStreamBasicDescription>.size))
assert(err == noErr, "AudioUnitSetProperty StreamFormat failed")
}
func start() {
var status: OSStatus
status = AudioUnitInitialize(toneUnit!)
status = AudioOutputUnitStart(toneUnit!)
assert(status == noErr)
}
func stop() {
AudioOutputUnitStop(toneUnit!)
AudioUnitUninitialize(toneUnit!)
}
}
This is Fixed values
private let sampleRate = 16000
private let amplitude: Float = 1.0
private let frequency: Float = 440
/// Theta is changed over time as each sample is provided.
private var theta: Float = 0.0
private func renderCallback(_ inRefCon: UnsafeMutableRawPointer,
ioActionFlags: UnsafeMutablePointer<AudioUnitRenderActionFlags>,
inTimeStamp: UnsafePointer<AudioTimeStamp>,
inBusNumber: UInt32,
inNumberFrames: UInt32,
ioData: UnsafeMutablePointer<AudioBufferList>) -> OSStatus {
let abl = UnsafeMutableAudioBufferListPointer(ioData)
let buffer = abl[0]
let pointer: UnsafeMutableBufferPointer<Float32> = UnsafeMutableBufferPointer(buffer)
for frame in 0..<inNumberFrames {
let pointerIndex = pointer.startIndex.advanced(by: Int(frame))
pointer[pointerIndex] = sin(theta) * amplitude
theta += 2.0 * Float(M_PI) * frequency / Float(sampleRate)
}
return noErr
}
You need to put data in a Circular buffer and then play the sound.

Swift: Retrieve audio file marker list from url?

I just want to get a list of the markers in an audio file. I thought this would be an easy common task that wouldn't be too difficult. However, I can barely find any example code or documentation, so I ended up with this:
private func getMarkers(_ url: CFURL) -> AudioFileMarkerList {
var file: AudioFileID?
var size: UInt32 = 0
var markers = AudioFileMarkerList()
AudioFileOpenURL(url, .readPermission, kAudioFileWAVEType, &file)
AudioFileGetPropertyInfo(file!, kAudioFilePropertyMarkerList, &size, nil)
AudioFileGetProperty(file!, kAudioFilePropertyMarkerList, &size, &markers)
return markers
}
Sadly, this doesn't work: error: memory read failed for 0x0.
I just can't figure out the problem. I checked the url and the size (which are both valid), but it always fails to retrieve the markers. Any help with this would be fantastic!
EDIT:
This sort of works, but all the data is completely wrong, and I can't understand how a single audio file can have multiple AudioFileMarkerLists of markers:
private func getMarkers(_ url: CFURL) -> [AudioFileMarkerList] {
var file: AudioFileID?
var size: UInt32 = 0
AudioFileOpenURL(url, .readPermission, kAudioFileWAVEType, &file)
AudioFileGetPropertyInfo(file!, kAudioFilePropertyMarkerList, &size, nil)
let length = NumBytesToNumAudioFileMarkers(Int(size))
var markers = [AudioFileMarkerList](repeating: AudioFileMarkerList(), count: length)
AudioFileGetProperty(file!, kAudioFilePropertyMarkerList, &size, &markers)
return markers
}
EDIT 2: According to most answers I've seen so far, this should work, but it returns an empty array:
private func getMarkers(_ url: CFURL) -> [AudioFileMarkerList] {
var file: AudioFileID?
var size: UInt32 = 0
AudioFileOpenURL(url, .readPermission, kAudioFileWAVEType, &file)
AudioFileGetPropertyInfo(file!, kAudioFilePropertyMarkerList, &size, nil)
let length = NumBytesToNumAudioFileMarkers(Int(size))
var markers = [AudioFileMarkerList]()
markers.reserveCapacity(length)
AudioFileGetProperty(file!, kAudioFilePropertyMarkerList, &size, &markers)
return markers
}
EDIT 3:
I got rid of a bunch of error checking and useful stuff from Ryan's code for anyone wanting to quickly try and find the problem:
private func getMarkers(_ url: CFURL) -> [AudioFileMarker]? {
var file: AudioFileID?
var size: UInt32 = 0
var markers: [AudioFileMarker] = []
AudioFileOpenURL(url, .readPermission, kAudioFileWAVEType, &file)
AudioFileGetPropertyInfo(file!, kAudioFilePropertyMarkerList, &size, nil)
let length = NumBytesToNumAudioFileMarkers(Int(size))
let data = UnsafeMutablePointer<AudioFileMarkerList>.allocate(capacity: length)
AudioFileGetProperty(file!, kAudioFilePropertyMarkerList, &size, data)
markers.append(data.pointee.mMarkers)
data.deallocate(capacity: length)
return markers
}
I just hope Apple actually tested AudioFileMarkerList in the first place.
EDIT 4:
SOLVED thanks to Rhythmic Fistman and Ryan Francesconi! Final result:
private func getMarkers(_ url: CFURL) -> [AudioFileMarker]? {
var file: AudioFileID?
var size: UInt32 = 0
var markerList: [AudioFileMarker] = []
AudioFileOpenURL(url, .readPermission, kAudioFileWAVEType, &file)
AudioFileGetPropertyInfo(file!, kAudioFilePropertyMarkerList, &size, nil)
let length = NumBytesToNumAudioFileMarkers(Int(size))
let data = UnsafeMutablePointer<AudioFileMarkerList>.allocate(capacity: length)
AudioFileGetProperty(file!, kAudioFilePropertyMarkerList, &size, data)
let markers = UnsafeBufferPointer<AudioFileMarker>(start: &data.pointee.mMarkers, count: length)
for marker in markers {
markerList.append(marker)
}
data.deallocate(capacity: length)
return markerList
}
Looks like you need to use UnsafeBufferPointer to access variable length arrays (like mMarkers). So instead of
out.append(markerList.mMarkers)
which only adds the first element, do this
let markersBuffer = UnsafeBufferPointer<AudioFileMarker>(start: &data.pointee.mMarkers,
count: Int(data.pointee.mNumberMarkers))
for marker in markersBuffer {
markers.append(marker)
}
Modeled on this answer
EDIT: Simplest solution is to use AudioKit's version of EZAudioFile.markers. Note this is not the same as the original EZAudio framework as I had added this marker code to AudioKit's version only.
import AudioKit
...
if let file = EZAudioFile(url: url) {
if let markers = file.markers as? [EZAudioFileMarker] {
for m in markers {
Swift.print("NAME: \(m.name) FRAME: \(m.framePosition)")
}
}
}
If you REALLY want to try in Swift, it would look something like the below. I'm not an expert in this, but as far as I can tell, there is some issue translating the AudioFileMarkerList struct to Swift. This may be solvable, but it seems to me it's best to just use Objective C to accomplish these calls. Here is the almost finished function in Swift. I recommend using AudioKit to accomplish what you need as I have added the marker code to EZAudioFile there. Check: https://github.com/AudioKit/AudioKit/blob/master/AudioKit/Common/Internals/EZAudio/EZAudioFile.m
But for the record here is the Swift code in progress! Note it's hard coded to WAVE files for the moment... Perhaps someone else can finish this?
class func getAudioFileMarkers(_ url: URL) -> [AudioFileMarker]? {
Swift.print("getAudioFileMarkers() \(url)")
var err: OSStatus = noErr
var audioFileID: AudioFileID?
err = AudioFileOpenURL(url as CFURL,
.readPermission,
kAudioFileWAVEType,
&audioFileID)
if err != noErr {
Swift.print("AudioFileOpenURL FAILED, Error: \(err)")
return nil
}
guard audioFileID != nil else {
return nil
}
Swift.print("audioFileID: \(audioFileID)")
var outSize: UInt32 = 0
var writable: UInt32 = 0
err = AudioFileGetPropertyInfo(audioFileID!, kAudioFilePropertyMarkerList, &outSize, &writable)
if err != noErr {
Swift.print("AudioFileGetPropertyInfo kAudioFilePropertyMarkerList FAILED, Error: \(err)")
return nil
}
Swift.print("outSize: \(outSize), writable: \(writable)")
guard outSize != 0 else { return nil }
let length = NumBytesToNumAudioFileMarkers( Int(outSize) )
Swift.print("Found \(length) markers")
let theData = UnsafeMutablePointer<AudioFileMarkerList>.allocate(capacity: length)
if length == 0 {
return nil
}
// pull marker list
err = AudioFileGetProperty(audioFileID!, kAudioFilePropertyMarkerList, &outSize, theData)
if err != noErr {
Swift.print("AudioFileGetProperty kAudioFilePropertyMarkerList FAILED, Error: \(err)")
return nil
}
let markerList: AudioFileMarkerList = theData.pointee
Swift.print("markerList.mMarkers: \(markerList.mMarkers)")
// this is only showing up as a single AudioFileMarker, not an array of them.
// I DON'T KNOW WHY. It works in Obj-C. I'm obviously missing something, or there is a problem in translation
var out = [AudioFileMarker]()
let mirror = Mirror(reflecting: markerList.mMarkers)
for m in mirror.children {
Swift.print( "label: \(m.label) value: \(m.value)" )
}
// for now just append the first one.
// :(
out.append(markerList.mMarkers)
// done with this now
theData.deallocate(capacity: length)
return out
}

Audio Queue Services Player in Swift isn't calling callback

I've been playing around with Audio Queue Services for about a week and I've written a swift version of from the Apple Audio Queue Services Guide.
I'm recording in Linear PCM and saving to disk with this method:
AudioFileCreateWithURL(url, kAudioFileWAVEType, &format,
AudioFileFlags.dontPageAlignAudioData.union(.eraseFile), &audioFileID)
My AudioQueueOutputCallback isn't being called even though I can verify that my bufferSize is seemingly large enough and that it's getting passed actual data. I'm not getting any OSStatus errors and it seems like everything should work. Theres very little in the way of Swift written AudioServiceQueues and should I get this working I'd be happy to open the rest of my code.
Any and all suggestions welcome!
class SVNPlayer: SVNPlayback {
var state: PlayerState!
private let callback: AudioQueueOutputCallback = { aqData, inAQ, inBuffer in
guard let userData = aqData else { return }
let audioPlayer = Unmanaged<SVNPlayer>.fromOpaque(userData).takeUnretainedValue()
guard audioPlayer.state.isRunning,
let queue = audioPlayer.state.mQueue else { return }
var buffer = inBuffer.pointee // dereference pointers
var numBytesReadFromFile: UInt32 = 0
var numPackets = audioPlayer.state.mNumPacketsToRead
var mPacketDescIsNil = audioPlayer.state.mPacketDesc == nil // determine if the packetDesc
if mPacketDescIsNil {
audioPlayer.state.mPacketDesc = AudioStreamPacketDescription(mStartOffset: 0, mVariableFramesInPacket: 0, mDataByteSize: 0)
}
AudioFileReadPacketData(audioPlayer.state.mAudioFile, false, &numBytesReadFromFile, // read the packet at the saved file
&audioPlayer.state.mPacketDesc!, audioPlayer.state.mCurrentPacket,
&numPackets, buffer.mAudioData)
if numPackets > 0 {
buffer.mAudioDataByteSize = numBytesReadFromFile
AudioQueueEnqueueBuffer(queue, inBuffer, mPacketDescIsNil ? numPackets : 0,
&audioPlayer.state.mPacketDesc!)
audioPlayer.state.mCurrentPacket += Int64(numPackets)
} else {
AudioQueueStop(queue, false)
audioPlayer.state.isRunning = false
}
}
init(inputPath: String, audioFormat: AudioStreamBasicDescription, numberOfBuffers: Int) throws {
super.init()
var format = audioFormat
let pointer = UnsafeMutableRawPointer(Unmanaged.passUnretained(self).toOpaque()) // get an unmananged reference to self
guard let audioFileUrl = CFURLCreateFromFileSystemRepresentation(nil,
inputPath,
CFIndex(strlen(inputPath)), false) else {
throw MixerError.playerInputPath }
var audioFileID: AudioFileID?
try osStatus { AudioFileOpenURL(audioFileUrl, AudioFilePermissions.readPermission, 0, &audioFileID) }
guard audioFileID != nil else { throw MixerError.playerInputPath }
state = PlayerState(mDataFormat: audioFormat, // setup the player state with mostly initial values
mQueue: nil,
mAudioFile: audioFileID!,
bufferByteSize: 0,
mCurrentPacket: 0,
mNumPacketsToRead: 0,
isRunning: false,
mPacketDesc: nil,
onError: nil)
var dataFormatSize = UInt32(MemoryLayout<AudioStreamBasicDescription>.stride)
try osStatus { AudioFileGetProperty(audioFileID!, kAudioFilePropertyDataFormat, &dataFormatSize, &state.mDataFormat) }
var queue: AudioQueueRef?
try osStatus { AudioQueueNewOutput(&format, callback, pointer, CFRunLoopGetCurrent(), CFRunLoopMode.commonModes.rawValue, 0, &queue) } // setup output queue
guard queue != nil else { throw MixerError.playerOutputQueue }
state.mQueue = queue // add to playerState
var maxPacketSize = UInt32()
var propertySize = UInt32(MemoryLayout<UInt32>.stride)
try osStatus { AudioFileGetProperty(state.mAudioFile, kAudioFilePropertyPacketSizeUpperBound, &propertySize, &maxPacketSize) }
deriveBufferSize(maxPacketSize: maxPacketSize, seconds: 0.5, outBufferSize: &state.bufferByteSize, outNumPacketsToRead: &state.mNumPacketsToRead)
let isFormatVBR = state.mDataFormat.mBytesPerPacket == 0 || state.mDataFormat.mFramesPerPacket == 0
if isFormatVBR { //Allocating Memory for a Packet Descriptions Array
let size = UInt32(MemoryLayout<AudioStreamPacketDescription>.stride)
state.mPacketDesc = AudioStreamPacketDescription(mStartOffset: 0,
mVariableFramesInPacket: state.mNumPacketsToRead,
mDataByteSize: size)
} // if CBR it stays set to null
for _ in 0..<numberOfBuffers { // Allocate and Prime Audio Queue Buffers
let bufferRef = UnsafeMutablePointer<AudioQueueBufferRef?>.allocate(capacity: 1)
let foo = state.mDataFormat.mBytesPerPacket * 1024 / UInt32(numberOfBuffers)
try osStatus { AudioQueueAllocateBuffer(state.mQueue!, foo, bufferRef) } // allocate the buffer
if let buffer = bufferRef.pointee {
AudioQueueEnqueueBuffer(state.mQueue!, buffer, 0, nil)
}
}
let gain: Float32 = 1.0 // Set an Audio Queue’s Playback Gain
try osStatus { AudioQueueSetParameter(state.mQueue!, kAudioQueueParam_Volume, gain) }
}
func start() throws {
state.isRunning = true // Start and Run an Audio Queue
try osStatus { AudioQueueStart(state.mQueue!, nil) }
while state.isRunning {
CFRunLoopRunInMode(CFRunLoopMode.defaultMode, 0.25, false)
}
CFRunLoopRunInMode(CFRunLoopMode.defaultMode, 1.0, false)
state.isRunning = false
}
func stop() throws {
guard state.isRunning,
let queue = state.mQueue else { return }
try osStatus { AudioQueueStop(queue, true) }
try osStatus { AudioQueueDispose(queue, true) }
try osStatus { AudioFileClose(state.mAudioFile) }
state.isRunning = false
}
private func deriveBufferSize(maxPacketSize: UInt32, seconds: Float64, outBufferSize: inout UInt32, outNumPacketsToRead: inout UInt32){
let maxBufferSize = UInt32(0x50000)
let minBufferSize = UInt32(0x4000)
if state.mDataFormat.mFramesPerPacket != 0 {
let numPacketsForTime: Float64 = state.mDataFormat.mSampleRate / Float64(state.mDataFormat.mFramesPerPacket) * seconds
outBufferSize = UInt32(numPacketsForTime) * maxPacketSize
} else {
outBufferSize = maxBufferSize > maxPacketSize ? maxBufferSize : maxPacketSize
}
if outBufferSize > maxBufferSize && outBufferSize > maxPacketSize {
outBufferSize = maxBufferSize
} else if outBufferSize < minBufferSize {
outBufferSize = minBufferSize
}
outNumPacketsToRead = outBufferSize / maxPacketSize
}
}
My player state struct is :
struct PlayerState: PlaybackState {
var mDataFormat: AudioStreamBasicDescription
var mQueue: AudioQueueRef?
var mAudioFile: AudioFileID
var bufferByteSize: UInt32
var mCurrentPacket: Int64
var mNumPacketsToRead: UInt32
var isRunning: Bool
var mPacketDesc: AudioStreamPacketDescription?
var onError: ((Error) -> Void)?
}
Instead of enqueuing an empty buffer, try calling your callback so it enqueues a (hopefully) full buffer. I'm unsure about the runloop stuff, but I'm sure you know what you're doing.

Creating copy of CMSampleBuffer in Swift returns OSStatus -12743 (Invalid Media Format)

I am attempting to perform a deep clone of CMSampleBuffer to store the output of a AVCaptureSession. I am receiving the error kCMSampleBufferError_InvalidMediaFormat (OSStatus -12743) when I run the function CMSampleBufferCreateForImageBuffer. I don't see how I've mismatched the CVImageBuffer and the CMSampleBuffer format description. Anyone know where I've gone wrong? Her is my test code.
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) {
let allocator: CFAllocator = CFAllocatorGetDefault().takeRetainedValue()
func cloneImageBuffer(imageBuffer: CVImageBuffer!) -> CVImageBuffer? {
CVPixelBufferLockBaseAddress(imageBuffer, 0)
let bytesPerRow: size_t = CVPixelBufferGetBytesPerRow(imageBuffer)
let width: size_t = CVPixelBufferGetWidth(imageBuffer)
let height: size_t = CVPixelBufferGetHeight(imageBuffer)
let baseAddress = CVPixelBufferGetBaseAddress(imageBuffer)
let pixelFormatType = CVPixelBufferGetPixelFormatType(imageBuffer)
let data = NSMutableData(bytes: baseAddress, length: bytesPerRow * height)
CVPixelBufferUnlockBaseAddress(imageBuffer, 0)
var clonedImageBuffer: CVPixelBuffer?
let refCon = NSMutableData()
if CVPixelBufferCreateWithBytes(allocator, width, height, pixelFormatType, data.mutableBytes, bytesPerRow, nil, refCon.mutableBytes, nil, &clonedImageBuffer) == noErr {
return clonedImageBuffer
} else {
return nil
}
}
if let oldImageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) {
if let newImageBuffer = cloneImageBuffer(oldImageBuffer) {
if let formatDescription = CMSampleBufferGetFormatDescription(sampleBuffer) {
let dataIsReady = CMSampleBufferDataIsReady(sampleBuffer)
let refCon = NSMutableData()
var timingInfo: CMSampleTimingInfo = kCMTimingInfoInvalid
let timingInfoSuccess = CMSampleBufferGetSampleTimingInfo(sampleBuffer, 0, &timingInfo)
if timingInfoSuccess == noErr {
var newSampleBuffer: CMSampleBuffer?
let success = CMSampleBufferCreateForImageBuffer(allocator, newImageBuffer, dataIsReady, nil, refCon.mutableBytes, formatDescription, &timingInfo, &newSampleBuffer)
if success == noErr {
bufferArray.append(newSampleBuffer!)
} else {
NSLog("Failed to create new image buffer. Error: \(success)")
}
} else {
NSLog("Failed to get timing info. Error: \(timingInfoSuccess)")
}
}
}
}
}
I was able to fix the problem by creating a format description off the newly created image buffer and using it instead of the format description off the original sample buffer. Unfortunately while that fixes the problem here, the format descriptions don't match and causes problem further down.
I recently came across the same issue. After a bit of investigation, the CMVideoFormatDescriptionMatchesImageBuffer() function documentation gave a bit of insight.
This function uses the keys returned by CMVideoFormatDescriptionGetExtensionKeysCommonWithImageBuffers to compares the extensions of the given format description to the attachments of the given image buffer (if an attachment is absent in either it must be absent in both). It also checks kCMFormatDescriptionExtension_BytesPerRow against CVPixelBufferGetBytesPerRow, if applicable.
In my case, I didn't copy over some of the format description extensions as CVBuffer attachments of the copied pixel buffer. Running this bit of code after creating the new CVPixelBufferRef resolved the issue for me (Objective-C, but shouldn't be hard to convert to Swift)
NSSet *commonKeys = [NSSet setWithArray:(NSArray *)CMVideoFormatDescriptionGetExtensionKeysCommonWithImageBuffers()];
NSDictionary *attachments = (NSDictionary *)CVBufferGetAttachments(originalPixelBuffer, kCVAttachmentMode_ShouldPropagate);
[attachments enumerateKeysAndObjectsUsingBlock:^(id key, id obj, BOOL *stop)
{
if ([commonKeys containsObject:key])
{
CVBufferSetAttachment(pixelBufferCopy, (__bridge CFStringRef)(key), (__bridge CFTypeRef)(obj), kCVAttachmentMode_ShouldPropagate);
}
}];
attachments = (NSDictionary *)CVBufferGetAttachments(originalPixelBuffer, kCVAttachmentMode_ShouldNotPropagate);
[attachments enumerateKeysAndObjectsUsingBlock:^(id key, id obj, BOOL *stop)
{
if ([commonKeys containsObject:key])
{
CVBufferSetAttachment(pixelBufferCopy, (__bridge CFStringRef)(key), (__bridge CFTypeRef)(obj), kCVAttachmentMode_ShouldNotPropagate);
}
}];
The Swift version for the answer of Raymanman.
let commonKeys = NSSet(array: CMVideoFormatDescriptionGetExtensionKeysCommonWithImageBuffers() as! [Any])
let propagatedAttachments = NSDictionary(dictionary: CVBufferGetAttachments(pixelBuffer, .shouldPropagate)!)
propagatedAttachments.enumerateKeysAndObjects { key, obj, stop in
if commonKeys.contains(key) {
CVBufferSetAttachment(outputPixelBuffer, key as! CFString, obj as AnyObject, .shouldPropagate)
}
}
let nonPropagatedAttachments = NSDictionary(dictionary: CVBufferGetAttachments(pixelBuffer, .shouldPropagate)!)
nonPropagatedAttachments.enumerateKeysAndObjects { key, obj, stop in
if commonKeys.contains(key) {
CVBufferSetAttachment(outputPixelBuffer, key as! CFString, obj as AnyObject, .shouldNotPropagate)
}
}

AudioFileReadBytes from a memory block, not a file

I'd like to cache CAF files before converting them to PCM whenever they play.
For example,
char *mybuffer = malloc(mysoundsize);
FILE *f = fopen("mysound.caf", "rb");
fread(mybuffer, mysoundsize, 1, f);
fclose(f);
char *pcmBuffer = malloc(pcmsoundsize);
// Convert to PCM for playing
AudioFileReadBytes(mybuffer, false, 0, mysoundsize, &numbytes, pcmBuffer);
This way, whenever the sound plays, the compressed CAF file is already loaded into memory, avoiding disk access. How can I open a block of memory with an 'AudioFileID' to make AudioFileReadBytes happy? Is there another method I can use?
I have not done it myself, but from the documentation I would think that you have to use AudioFileOpenWithCallbacks and implement callback functions that read from your memory buffer.
You can finish it with AudioFileStreamOpen
fileprivate var streamID: AudioFileStreamID?
public func parse(data: Data) throws {
let streamID = self.streamID!
let count = data.count
_ = try data.withUnsafeBytes { (bytes: UnsafePointer<UInt8>) in
let result = AudioFileStreamParseBytes(streamID, UInt32(count), bytes, [])
guard result == noErr else {
throw ParserError.failedToParseBytes(result)
}
}
}
you can store the data in memory within the callback
func ParserPacketCallback(_ context: UnsafeMutableRawPointer, _ byteCount: UInt32, _ packetCount: UInt32, _ data: UnsafeRawPointer, _ packetDescriptions: Optional<UnsafeMutablePointer<AudioStreamPacketDescription>>) {
let parser = Unmanaged<Parser>.fromOpaque(context).takeUnretainedValue()
/// At this point we should definitely have a data format
guard let dataFormat = parser.dataFormatD else {
return
}
let format = dataFormat.streamDescription.pointee
let bytesPerPacket = Int(format.mBytesPerPacket)
for i in 0 ..< Int(packetCount) {
let packetStart = i * bytesPerPacket
let packetSize = bytesPerPacket
let packetData = Data(bytes: data.advanced(by: packetStart), count: packetSize)
parser.packetsX.append(packetData)
}
}
full code in github repo

Resources