I'm trying to live volume levels using AVCaptureDevice etc it compiles and runs but the values just seem to be random and I keep getting overflow errors as well.
EDIT:
also is it normal for the RMS range to be 0 to about 20000?
if let audioCaptureDevice : AVCaptureDevice = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeAudio){
try audioCaptureDevice.lockForConfiguration()
let audioInput = try AVCaptureDeviceInput(device: audioCaptureDevice)
audioCaptureDevice.unlockForConfiguration()
if(captureSession.canAddInput(audioInput)){
captureSession.addInput(audioInput)
print("added input")
}
let audioOutput = AVCaptureAudioDataOutput()
audioOutput.setSampleBufferDelegate(self, queue: GlobalUserInitiatedQueue)
if(captureSession.canAddOutput(audioOutput)){
captureSession.addOutput(audioOutput)
print("added output")
}
//supposed to start session not on UI queue coz it takes a while
dispatch_async(GlobalUserInitiatedQueue) {
print("starting captureSession")
self.captureSession.startRunning()
}
}
...
func captureOutput(captureOutput: AVCaptureOutput!, let didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) {
// Needs to be initialized somehow, even if we take only the address
var audioBufferList = AudioBufferList(mNumberBuffers: 1,
mBuffers: AudioBuffer(mNumberChannels: 1, mDataByteSize: 0, mData: nil))
//this needs to be in method otherwise only runs 125 times?
var blockBuffer: CMBlockBuffer?
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(
sampleBuffer,
nil,
&audioBufferList,
sizeof(audioBufferList.dynamicType),
nil,
nil,
UInt32(kCMSampleBufferFlag_AudioBufferList_Assure16ByteAlignment),
&buffer
)
let abl = UnsafeMutableAudioBufferListPointer(&audioBufferList)
for buffer in abl{
let samples = UnsafeMutableBufferPointer<Int16>(start: UnsafeMutablePointer(buffer.mData),
count: Int(buffer.mDataByteSize)/sizeof(Int16))
var sum:Int = 0
for sample in samples {
sum = sum + Int(sample*sample)
}
let rms = sqrt(Double(sum)/count)
}
Use AVCaptureAudioDataOutputSampleBufferDelegate's method
captureOutput(captureOutput: AVCaptureOutput!, let didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!)
to get AVCaptureConnection from last parameter.
Then get AVCaptureAudioChannel from connection.audioChannels
Then you can get volume levels from it:
audioChannel.averagePowerLevel
audioChannel.peakHoldLevel
Hey I don't understand much of it but here is a working Swift 5 version:
func captureOutput(_ output : AVCaptureOutput,
didOutput sampleBuffer: CMSampleBuffer,
from connection : AVCaptureConnection) {
var buffer: CMBlockBuffer? = nil
// Needs to be initialized somehow, even if we take only the address
let convenianceBuffer = AudioBuffer(mNumberChannels: 1, mDataByteSize: 0, mData: nil)
var audioBufferList = AudioBufferList(mNumberBuffers: 1,
mBuffers: convenianceBuffer)
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(
sampleBuffer,
bufferListSizeNeededOut: nil,
bufferListOut: &audioBufferList,
bufferListSize: MemoryLayout<AudioBufferList>.size(ofValue: audioBufferList),
blockBufferAllocator: nil,
blockBufferMemoryAllocator: nil,
flags: UInt32(kCMSampleBufferFlag_AudioBufferList_Assure16ByteAlignment),
blockBufferOut: &buffer
)
let abl = UnsafeMutableAudioBufferListPointer(&audioBufferList)
for buffer in abl {
let originRawPtr = buffer.mData
let ptrDataSize = Int(buffer.mDataByteSize)
// From raw pointer to typed Int16 pointer
let buffPtrInt16 = originRawPtr?.bindMemory(to: Int16.self, capacity: ptrDataSize)
// From pointer typed Int16 to pointer of [Int16]
// So we can iterate on it simply
let unsafePtrByteSize = ptrDataSize/Int16.bitWidth
let samples = UnsafeMutableBufferPointer<Int16>(start: buffPtrInt16,
count: unsafePtrByteSize)
// Average of each sample squared, then root squared
let sumOfSquaredSamples = samples.map(Float.init).reduce(0) { $0 + $1*$1 }
let averageOfSomething = sqrt(sumOfSquaredSamples / Float(samples.count))
DispatchQueue.main.async {
print("Calulcus of something: \(String(averageOfSomething))" )
}
}
}
It appears I have it working. I casted sample to an Int64 before doing any manipulations.
for buffer in abl{
let samples = UnsafeMutableBufferPointer<Int16>(start: UnsafeMutablePointer(buffer.mData),
count: Int(buffer.mDataByteSize)/sizeof(Int16))
var sum:Int64 = 0
for sample in samples {
let s = Int64(sample)
sum +=s*s
}
dispatch_async(dispatch_get_main_queue()) {
self.volLevel.text = String(sqrt(Float(sum/Int64(samples.count))))
}
I've played with your example. This is a full working swift 2 code snippet:
// also define a variable in class scope, otherwise captureOutput will not be called
var session : AVCaptureSession!
func startCapture() {
if let device : AVCaptureDevice = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeAudio){
do {
self.session = AVCaptureSession()
try device.lockForConfiguration()
let audioInput = try AVCaptureDeviceInput(device: device)
device.unlockForConfiguration()
if(self.session.canAddInput(audioInput)){
self.session.addInput(audioInput)
print("added input")
}
let audioOutput = AVCaptureAudioDataOutput()
audioOutput.setSampleBufferDelegate(self, queue: dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0))
if(self.session.canAddOutput(audioOutput)){
self.session.addOutput(audioOutput)
print("added output")
}
//supposed to start session not on UI queue coz it takes a while
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0)) {
print("starting captureSession")
self.session.startRunning()
}
} catch {
}
}
}
func captureOutput(captureOutput: AVCaptureOutput!, let didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) {
var buffer: CMBlockBuffer? = nil
// Needs to be initialized somehow, even if we take only the address
var audioBufferList = AudioBufferList(mNumberBuffers: 1,
mBuffers: AudioBuffer(mNumberChannels: 1, mDataByteSize: 0, mData: nil))
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(
sampleBuffer,
nil,
&audioBufferList,
sizeof(audioBufferList.dynamicType),
nil,
nil,
UInt32(kCMSampleBufferFlag_AudioBufferList_Assure16ByteAlignment),
&buffer
)
let abl = UnsafeMutableAudioBufferListPointer(&audioBufferList)
for buffer in abl {
let samples = UnsafeMutableBufferPointer<Int16>(start: UnsafeMutablePointer(buffer.mData),
count: Int(buffer.mDataByteSize)/sizeof(Int16))
var sum:Int64 = 0
for sample in samples {
let s = Int64(sample)
sum = (sum + s*s)
}
dispatch_async(dispatch_get_main_queue()) {
print( String(sqrt(Float(sum/Int64(samples.count)))))
}
}
}
Related
I am trying to develop an image segmentation app and process the live camera view in my coreml model. However I see some slowness on the output. Camera view with masked prediction is slower. Below is my vision manager class to predict the pixelbuffer and function calling this class to convert to colors before proceed to camera output. Anyone facing this issue before? Do you see an error in my code causing slowness?
Vision Manager Class:
class VisionManager: NSObject {
static let shared = VisionManager()
static let MODEL = ba_224_segm().model
private lazy var predictionRequest: VNCoreMLRequest = {
do{
let model = try VNCoreMLModel(for: VisionManager.MODEL)
let request = VNCoreMLRequest(model: model)
request.imageCropAndScaleOption = VNImageCropAndScaleOption.centerCrop
return request
} catch {
fatalError("can't load Vision ML Model")
}
}()
func predict(pixelBuffer: CVImageBuffer, sampleBuffer: CMSampleBuffer, onResult: ((_ observations: [VNCoreMLFeatureValueObservation]) -> Void)) {
var requestOptions: [VNImageOption: Any] = [:]
if let cameraIntrinsicData = CMGetAttachment(sampleBuffer, key: kCMSampleBufferAttachmentKey_CameraIntrinsicMatrix, attachmentModeOut: nil) {
requestOptions = [.cameraIntrinsics: cameraIntrinsicData]
}
let handler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: requestOptions)
do {
try handler.perform([predictionRequest])
} catch {
print("error handler")
}
guard let observations = predictionRequest.results as? [VNCoreMLFeatureValueObservation] else {
fatalError("unexpected result type from VNCoreMLRequest")
}
onResult(observations)
}
Predicted Camera Output function:
func handleCameraOutput(pixelBuffer: CVImageBuffer, sampleBuffer: CMSampleBuffer, onFinish: #escaping ((_ image: UIImage?) -> Void)) {
VisionManager.shared.predict(pixelBuffer: pixelBuffer, sampleBuffer: sampleBuffer) { [weak self ] (observations) in
if let multiArray: MLMultiArray = observations[0].featureValue.multiArrayValue {
mask = maskEdit.maskToRGBA(maskArray: MultiArray<Float32>(multiArray), rgba: (Float(r),Float(g),Float(b),Float(a)))!
maskInverted = maskEdit.maskToRGBAInvert(maskArray: MultiArray<Float32>(multiArray), rgba: (r: 1.0, g: 1.0, b:1.0, a: 0.4))!
let image = maskEdit.mergeMaskAndBackground( invertedMask: maskInverted, mask: mask, background: pixelBuffer, size: Int(size))
DispatchQueue.main.async {
onFinish(image)
}
}
}
I call these models under viwDidAppear as below:
CameraManager.shared.setDidOutputHandler { [weak self] (output, pixelBuffer, sampleBuffer, connection) in
self!.maskColor.getRed(&self!.r, green:&self!.g, blue:&self!.b, alpha:&self!.a)
self!.a = 0.5
self?.handleCameraOutput(pixelBuffer: pixelBuffer, sampleBuffer: sampleBuffer, onFinish: { (image) in
self?.predictionView.image = image
})
}
It takes time for your model to perform the segmentation, and then it takes time to convert the output into an image. There is not much you can do to make this delay shorter, except for making the model smaller and making sure the output -> image conversion code is as fast as possible.
I have found out my issue about not using different thread. Since I am new developer I don't know such details and still learning thanks to experts in the field and their shared knowledge. Please see my old and new captureOutput function. To use a different thread solved my problem:
old status:
public func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
else { return }
self.handler?(output, pixelBuffer, sampleBuffer, connection)
self.onCapture?(pixelBuffer, sampleBuffer)
self.onCapture = nil
}
and new status:
public func captureOutput(_ output: AVCaptureOutput,
didOutput sampleBuffer: CMSampleBuffer,
from connection: AVCaptureConnection) {
if currentBuffer == nil{
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
currentBuffer = pixelBuffer
DispatchQueue.global(qos: .userInitiated).async {
self.handler?(output, self.currentBuffer!, sampleBuffer, connection)
self.currentBuffer = nil
}
}
}
I am trying to read the frequencies values from a CMSampleBuffer returned by captureOutput of AVCaptureAudioDataOutputSampleBufferDelegate.
The idea is to create a AVAudioPCMBuffer so that then I can read its floatChannelData. But I am not sure how to pass the buffer to it.
I guess I could create it with:
public func captureOutput(_ output: AVCaptureOutput,
didOutput sampleBuffer: CMSampleBuffer,
from connection: AVCaptureConnection) {
guard let blockBuffer = CMSampleBufferGetDataBuffer(sampleBuffer) else {
return
}
let length = CMBlockBufferGetDataLength(blockBuffer)
let audioFormat = AVAudioFormat(commonFormat: .pcmFormatFloat32, sampleRate: 44100, channels: 1, interleaved: false)
let pcmBuffer = AVAudioPCMBuffer(pcmFormat: audioFormat!, frameCapacity: AVAudioFrameCount(length))
pcmBuffer?.frameLength = pcmBuffer!.frameCapacity
But how could I fill its data?
Something along these lines should help:
var asbd = CMSampleBufferGetFormatDescription(sampleBuffer)!.audioStreamBasicDescription!
var audioBufferList = AudioBufferList()
var blockBuffer : CMBlockBuffer?
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(
sampleBuffer,
bufferListSizeNeededOut: nil,
bufferListOut: &audioBufferList,
bufferListSize: MemoryLayout<AudioBufferList>.size,
blockBufferAllocator: nil,
blockBufferMemoryAllocator: nil,
flags: 0,
blockBufferOut: &blockBuffer
)
let mBuffers = audioBufferList.mBuffers
let frameLength = AVAudioFrameCount(Int(mBuffers.mDataByteSize) / MemoryLayout<Float>.size)
let pcmBuffer = AVAudioPCMBuffer(pcmFormat: AVAudioFormat(streamDescription: &asbd)!, frameCapacity: frameLength)!
pcmBuffer.frameLength = frameLength
pcmBuffer.mutableAudioBufferList.pointee.mBuffers = mBuffers
pcmBuffer.mutableAudioBufferList.pointee.mNumberBuffers = 1
This seems to create a valid AVAudioPCMBuffer at the end of it inside a capture session. But it's at the wrong frame length for my use case right now, so need to do some further buffering.
I am trying to convert the frames that I'm getting from AVCaptureVideoDataOutput delegate (as CMSampleBuffer) to UIImage. However I'm getting a fatal error: unexpectedly found nil while unwrapping an Optional value Can someone tell me what is wrong with my code? I am assuming that there is something wrong with my sampleBufferToUIImage function.
Function to convert CMSampleBuffer to UIImage:
func sampleBufferToUIImage(sampleBuffer: CMSampleBuffer) -> UIImage{
let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
CVPixelBufferLockBaseAddress(imageBuffer!, CVPixelBufferLockFlags(rawValue: 0))
let baseAddress = CVPixelBufferGetBaseAddress(imageBuffer!)
let bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer!)
let width = CVPixelBufferGetWidth(imageBuffer!)
let height = CVPixelBufferGetHeight(imageBuffer!)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.noneSkipFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue)
let context = CGContext(data: baseAddress, width: width, height: height, bitsPerComponent: 8, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo.rawValue)
// *********Getting the error from this line***********
let quartzImage = context!.makeImage()
CVPixelBufferUnlockBaseAddress(imageBuffer!, CVPixelBufferLockFlags(rawValue: 0))
let image = UIImage(cgImage: quartzImage!)
return image
}
delegate where I'm reading frame:
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
if count <= 0 {
// Calling my function to convert to UIImage.
let image = sampleBufferToUIImage(sampleBuffer: sampleBuffer)
let imageData = UIImagePNGRepresentation(image)
uploadImage(jpgData: imageData)
}
count = count + 1
}
Setting up AVSession:
func setupCameraSession() {
captureSession.sessionPreset = AVCaptureSessionPresetHigh
// Declare AVCaptureDevice to default(back camera). The "as" changes removes the optional?
let captureDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo) as AVCaptureDevice
do {
let deviceInput = try AVCaptureDeviceInput(device: captureDevice)
if (captureSession.canAddInput(deviceInput) == true) {
captureSession.addInput(deviceInput)
}
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString) : NSNumber(value: kCVPixelFormatType_420YpCbCr8BiPlanarFullRange as UInt32)]
dataOutput.alwaysDiscardsLateVideoFrames = true
if (captureSession.canAddOutput(dataOutput) == true) {
captureSession.addOutput(dataOutput)
}
} catch {
}
Try this, works for me on Swift 3
// Sample buffer handling delegate function
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
let myPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
myCIimage = CIImage(cvPixelBuffer: myPixelBuffer!)
videoImage = UIImage(ciImage: myCIimage)
uIimage.image = videoImage
}
// AV Session
func startVideoDisplay() {
do {
let tryDeviceInput = try AVCaptureDeviceInput(device: cameraDevice)
cameraCaptureSession.addInput(tryDeviceInput)
} catch { print(error.localizedDescription) }
caViewLayer = AVCaptureVideoPreviewLayer(session: cameraCaptureSession)
view.layer.addSublayer(caViewLayer)
cameraCaptureSession.startRunning()
let myQueue = DispatchQueue(label: "se.paredes.FunAV", qos: .userInteractive, attributes: .concurrent)
let theOutput = AVCaptureVideoDataOutput()
theOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString): NSNumber(value:kCVPixelFormatType_32BGRA)]
theOutput.alwaysDiscardsLateVideoFrames = true
theOutput.setSampleBufferDelegate(self, queue: myQueue)
if cameraCaptureSession.canAddOutput(theOutput) {
cameraCaptureSession.addOutput(theOutput)
}
cameraCaptureSession.commitConfiguration()
}
AVCaptureVideoDataOutput's video setting was incorrect. Change kCVPixelFormatType_420YpCbCr8BiPlanarFullRange to kCVPixelFormatType_32BGRA
I have been googling and researching for days but I can't seem to get this to work and I can't find any solution to it on the internet.
I am trying to capture my voice using the microphone and then playing it through the speakers.
Here is my code:
class ViewController: UIViewController, AVAudioRecorderDelegate, AVCaptureAudioDataOutputSampleBufferDelegate {
var recordingSession: AVAudioSession!
var audioRecorder: AVAudioRecorder!
var captureSession: AVCaptureSession!
var microphone: AVCaptureDevice!
var inputDevice: AVCaptureDeviceInput!
var outputDevice: AVCaptureAudioDataOutput!
override func viewDidLoad() {
super.viewDidLoad()
recordingSession = AVAudioSession.sharedInstance()
do{
try recordingSession.setCategory(AVAudioSessionCategoryPlayAndRecord)
try recordingSession.setMode(AVAudioSessionModeVoiceChat)
try recordingSession.setPreferredSampleRate(44000.00)
try recordingSession.setPreferredIOBufferDuration(0.2)
try recordingSession.setActive(true)
recordingSession.requestRecordPermission() { [unowned self] (allowed: Bool) -> Void in
DispatchQueue.main.async {
if allowed {
do{
self.microphone = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeAudio)
try self.inputDevice = AVCaptureDeviceInput.init(device: self.microphone)
self.outputDevice = AVCaptureAudioDataOutput()
self.outputDevice.setSampleBufferDelegate(self, queue: DispatchQueue.main)
self.captureSession = AVCaptureSession()
self.captureSession.addInput(self.inputDevice)
self.captureSession.addOutput(self.outputDevice)
self.captureSession.startRunning()
}
catch let error {
print(error.localizedDescription)
}
}
}
}
}catch let error{
print(error.localizedDescription)
}
}
And the callback function:
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
var audioBufferList = AudioBufferList(
mNumberBuffers: 1,
mBuffers: AudioBuffer(mNumberChannels: 0,
mDataByteSize: 0,
mData: nil)
)
var blockBuffer: CMBlockBuffer?
var osStatus = CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(
sampleBuffer,
nil,
&audioBufferList,
MemoryLayout<AudioBufferList>.size,
nil,
nil,
UInt32(kCMSampleBufferFlag_AudioBufferList_Assure16ByteAlignment),
&blockBuffer
)
do {
var data: NSMutableData = NSMutableData.init()
for i in 0..<audioBufferList.mNumberBuffers {
var audioBuffer = AudioBuffer(
mNumberChannels: audioBufferList.mBuffers.mNumberChannels,
mDataByteSize: audioBufferList.mBuffers.mDataByteSize,
mData: audioBufferList.mBuffers.mData
)
let frame = audioBuffer.mData?.load(as: Float32.self)
data.append(audioBuffer.mData!, length: Int(audioBuffer.mDataByteSize))
}
var dataFromNsData = Data.init(referencing: data)
var avAudioPlayer: AVAudioPlayer = try AVAudioPlayer.init(data: dataFromNsData)
avAudioPlayer.prepareToPlay()
avAudioPlayer.play()
}
}
catch let error {
print(error.localizedDescription)
//prints out The operation couldn’t be completed. (OSStatus error 1954115647.)
}
Any help with this would be amazing and it would probably help a lot of other people as well since lots of incomplete swift versions of this is out there.
Thank you.
You were very close! You were capturing audio in the didOutputSampleBuffer callback, but that's a high frequency callback so you were creating a lot of AVAudioPlayers and passing them raw LPCM data, while they only know how to parse CoreAudio file types and then they were going out of scope anyway.
You can very easily play the buffers you're capturing with AVCaptureSession using AVAudioEngine's AVAudioPlayerNode, but at that point you may as well use AVAudioEngine to record from the microphone too:
import UIKit
import AVFoundation
class ViewController: UIViewController {
var engine = AVAudioEngine()
override func viewDidLoad() {
super.viewDidLoad()
let input = engine.inputNode!
let player = AVAudioPlayerNode()
engine.attach(player)
let bus = 0
let inputFormat = input.inputFormat(forBus: bus)
engine.connect(player, to: engine.mainMixerNode, format: inputFormat)
input.installTap(onBus: bus, bufferSize: 512, format: inputFormat) { (buffer, time) -> Void in
player.scheduleBuffer(buffer)
}
try! engine.start()
player.play()
}
}
I was wondering how exactly I can implement the Core Media method CMSampleBufferGetAudioBufferList in swift.
I'm following this tutorial, which uses the method to get a list of AudioBuffers from a CMSampleBuffer.
I've tried over and over, but the compiler keeps giving me the generic
Cannot invoke CMSampleBuffer...Buffer with an argument list of type
...
which isn't very helpful.
I've already seen
this StackOverflow question, but the only answer there seems to throw the exact same error I've been getting.
Basically, I just want someone to show me how to get this method to compile without errors in swift.
I assume you have access to a sampleBuffer in a delegate method such as captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!)
I've already struggled with the CMSampleBuffer methods, and I agree it is not obvious how to make them compile.
var sizeOut = UnsafeMutablePointer<Int>.alloc(1)
var listOut = UnsafeMutablePointer<AudioBufferList>.alloc(1)
let listSize: Int = 10
var blockBufferOut: Unmanaged<CMBlockBuffer>?
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(sampleBuffer, sizeOut , listOut, listSize, kCFAllocatorDefault, kCFAllocatorDefault, UInt32(2), &blockBufferOut)
you will then need to call takeRetainedValueon the block buffer out, and handle your pointer to release them manually.
it works for me. try it...
let musicUrl: NSURL = mediaItemCollection.items[0].valueForProperty(MPMediaItemPropertyAssetURL) as! NSURL
let asset: AVURLAsset = AVURLAsset(URL: musicUrl, options: nil)
let assetOutput = AVAssetReaderTrackOutput(track: asset.tracks[0] as! AVAssetTrack, outputSettings: nil)
var error : NSError?
let assetReader: AVAssetReader = AVAssetReader(asset: asset, error: &error)
if error != nil {
print("Error asset Reader: \(error?.localizedDescription)")
}
assetReader.addOutput(assetOutput)
assetReader.startReading()
let sampleBuffer: CMSampleBufferRef = assetOutput.copyNextSampleBuffer()
var audioBufferList = AudioBufferList(mNumberBuffers: 1, mBuffers: AudioBuffer(mNumberChannels: 0, mDataByteSize: 0, mData: nil))
var blockBuffer: Unmanaged<CMBlockBuffer>? = nil
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(
sampleBuffer,
nil,
&audioBufferList,
sizeof(audioBufferList.dynamicType), // instead of UInt(sizeof(audioBufferList.dynamicType))
nil,
nil,
UInt32(kCMSampleBufferFlag_AudioBufferList_Assure16ByteAlignment),
&blockBuffer
)