Having trouble with input image with iOS Swift TensorFlowLite Image Classification Model? - ios

I've been trying to add a plant recognition classifier to my app through a Firebase cloud-hosted ML model, and I've gotten close - problem is, I'm pretty sure I'm messing up the input for the image data somewhere along the way. My classifier is churning out nonsense probabilities/results based on this classifier's output, and I've been testing the same classifier through a python script which is giving me accurate results.
The input for the model requires a 224x224 image with 3 channels scaled to 0,1. I've done all this but can't seem to figure out the CGImage through the Camera/ImagePicker. Here is the bit of the code that processes the input for the image:
if let imageData = info[.originalImage] as? UIImage {
DispatchQueue.main.async {
let resizedImage = imageData.scaledImage(with: CGSize(width:224, height:224))
let ciImage = CIImage(image: resizedImage!)
let CGcontext = CIContext(options: nil)
let image : CGImage = CGcontext.createCGImage(ciImage!, from: ciImage!.extent)!
guard let context = CGContext(
data: nil,
width: image.width, height: image.height,
bitsPerComponent: 8, bytesPerRow: image.width * 4,
space: CGColorSpaceCreateDeviceRGB(),
bitmapInfo: CGImageAlphaInfo.noneSkipFirst.rawValue
) else {
return
}
context.draw(image, in: CGRect(x: 0, y: 0, width: image.width, height: image.height))
guard let imageData = context.data else { return }
print("Image data showing as: \(imageData)")
var inputData = Data()
do {
for row in 0 ..< 224 {
for col in 0 ..< 224 {
let offset = 4 * (row * context.width + col)
// (Ignore offset 0, the unused alpha channel)
let red = imageData.load(fromByteOffset: offset+1, as: UInt8.self)
let green = imageData.load(fromByteOffset: offset+2, as: UInt8.self)
let blue = imageData.load(fromByteOffset: offset+3, as: UInt8.self)
// Normalize channel values to [0.0, 1.0].
var normalizedRed = Float32(red) / 255.0
var normalizedGreen = Float32(green) / 255.0
var normalizedBlue = Float32(blue) / 255.0
// Append normalized values to Data object in RGB order.
let elementSize = MemoryLayout.size(ofValue: normalizedRed)
var bytes = [UInt8](repeating: 0, count: elementSize)
memcpy(&bytes, &normalizedRed, elementSize)
inputData.append(&bytes, count: elementSize)
memcpy(&bytes, &normalizedGreen, elementSize)
inputData.append(&bytes, count: elementSize)
memcpy(&bytes, &normalizedBlue, elementSize)
inputData.append(&bytes, count: elementSize)
}
}
print("Successfully added inputData")
self.parent.invokeInterpreter(inputData: inputData)
} catch let error {
print("Failed to add input: \(error)")
}
}
}
Afterwards, I process the inputData with the following:
func invokeInterpreter(inputData: Data) {
do {
var interpreter = try Interpreter(modelPath: ProfileUserData.sharedUserData.modelPath)
var labels: [String] = []
try interpreter.allocateTensors()
try interpreter.copy(inputData, toInputAt: 0)
try interpreter.invoke()
let output = try interpreter.output(at: 0)
switch output.dataType {
case .uInt8:
guard let quantization = output.quantizationParameters else {
print("No results returned because the quantization values for the output tensor are nil.")
return
}
let quantizedResults = [UInt8](output.data)
let results = quantizedResults.map {
quantization.scale * Float(Int($0) - quantization.zeroPoint)
}
let sum = results.reduce(0, +)
print("Sum of all dequantized results is: \(sum)")
print("Count of dequantized results is: \(results.indices.count)")
let filename = "plantLabels"
let fileExtension = "csv"
guard let labelPath = Bundle.main.url(forResource: filename, withExtension: fileExtension) else {
print("Labels file not found in bundle. Please check labels file.")
return
}
do {
let contents = try String(contentsOf: labelPath, encoding: .utf8)
labels = contents.components(separatedBy: .newlines)
print("Count of label rows is: \(labels.indices.count)")
} catch {
fatalError("Labels file named \(filename).\(fileExtension) cannot be read. Please add a " +
"valid labels file and try again.")
}
let zippedResults = zip(labels.indices, results)
// Sort the zipped results by confidence value in descending order.
let sortedResults = zippedResults.sorted { $0.1 > $1.1 }.prefix(3)
print("Printing sortedResults: \(sortedResults)")
case .float32:
print("Output tensor data type [Float32] is unsupported for this model.")
default:
print("Output tensor data type \(output.dataType) is unsupported for this model.")
return
}
} catch {
//Error with interpreter
print("Error with running interpreter: \(error.localizedDescription)")
}
}

Related

Tensorflow Interpreter throwing error for "data count" iOS

I am using TensorFlowLiteSwift and the model I'm working with is responsible for flattening an image when the image is cropped in a trapezoidal shape.
Now, Tensorflow does not provide much of a documentation. So, I have been trying to implement things from their example projects.
But here is the catch, it throws error saying "Provided data count must match the required count" and the required count is 4. I backtracked the byteCount in Interpreter.swift but could not find the actual setter.
So, is the .tflite model responsible for the "required count?" And if no, then how does this get set?
Here is a chunk of code I think would help understanding my problem:
/// Performs image preprocessing, invokes the `Interpreter`, and processes the inference results.
func runModel(on item: ImageProcessInfo) -> UIImage? {
let rgbData = item.resizedImage.scaledData(with: CGSize(width: 1000, height: 900),
byteCount: inputWidth * inputHeight
* batchSize,
isQuantized: false)
var corner = item.corners.map { $0.map { p -> (Float, Float) in
return (Float(p.x), Float(p.y))
} }
var item = item
guard let height = NSMutableData(capacity: 0) else { return nil }
height.append(&item.originalHeight, length: 4)
guard let width = NSMutableData(capacity: 0) else { return nil }
width.append(&item.originalWidth, length: 4)
guard let corners = NSMutableData(capacity: 0) else { return nil }
corners.append(&corner, length: 4)
do {
try interpreter.copy(rgbData!, toInputAt: 0)
try interpreter.copy(height as Data, toInputAt: 1)
try interpreter.copy(width as Data, toInputAt: 2)
try interpreter.copy(corners as Data, toInputAt: 3)
try interpreter.invoke()
let outputTensor1 = try self.interpreter.output(at: 0)
guard let cgImage = postprocessImageData(data: outputTensor1.data, size: CGSize(width: 1000, height: 900)) else {
return nil
}
let outputImage = UIImage(cgImage: cgImage)
return outputImage
} catch {
dump(error)
return nil
}
}
extension UIImage {
func scaledData(with size: CGSize, byteCount: Int, isQuantized: Bool) -> Data? {
guard let cgImage = self.cgImage, cgImage.width > 0, cgImage.height > 0 else { return nil }
guard let imageData = imageData(from: cgImage, with: size) else { return nil }
var scaledBytes = [UInt8](repeating: 0, count: byteCount)
var index = 0
for component in imageData.enumerated() {
let offset = component.offset
let isAlphaComponent = (offset % 4)
== 3
guard !isAlphaComponent else { continue }
scaledBytes[index] = component.element
index += 1
}
if isQuantized { return Data(scaledBytes) }
let scaledFloats = scaledBytes.map { (Float32($0) - 127.5) / 127.5 }
return Data(copyingBufferOf: scaledFloats)
}
private func imageData(from cgImage: CGImage, with size: CGSize) -> Data? {
let bitmapInfo = CGBitmapInfo(
rawValue: CGBitmapInfo.byteOrder32Big.rawValue | CGImageAlphaInfo.premultipliedLast.rawValue
)
let width = Int(size.width)
let scaledBytesPerRow = (cgImage.bytesPerRow / cgImage.width) * width
guard let context = CGContext(
data: nil,
width: width,
height: Int(size.height),
bitsPerComponent: cgImage.bitsPerComponent,
bytesPerRow: scaledBytesPerRow,
space: CGColorSpaceCreateDeviceRGB(),
bitmapInfo: bitmapInfo.rawValue)
else {
return nil
}
context.draw(cgImage, in: CGRect(origin: .zero, size: size))
return context.makeImage()?.dataProvider?.data as Data?
}
}
#discardableResult
public func copy(_ data: Data, toInputAt index: Int) throws -> Tensor {
let maxIndex = inputTensorCount - 1
guard case 0...maxIndex = index else {
throw InterpreterError.invalidTensorIndex(index: index, maxIndex: maxIndex)
}
guard let cTensor = TfLiteInterpreterGetInputTensor(cInterpreter, Int32(index)) else {
throw InterpreterError.allocateTensorsRequired
}
/* Error here */
let byteCount = TfLiteTensorByteSize(cTensor)
guard data.count == byteCount else {
throw InterpreterError.invalidTensorDataCount(provided: data.count, required: byteCount)
}
#if swift(>=5.0)
let status = data.withUnsafeBytes {
TfLiteTensorCopyFromBuffer(cTensor, $0.baseAddress, data.count)
}
#else
let status = data.withUnsafeBytes { TfLiteTensorCopyFromBuffer(cTensor, $0, data.count) }
#endif // swift(>=5.0)
guard status == kTfLiteOk else { throw InterpreterError.failedToCopyDataToInputTensor }
return try input(at: index)
}
What are the input shapes? Can you identify which one is complaining about the size?
At the first glance, corners.append(&corner, length: 4) seems weird - does corners contain only 1 Float (byte size 4)?
The byteCount for a tensor is filled by underlying C API, and simply returns tensor->bytes for underlying TfLiteTensor struct that is filled in the model loading stage.

Issue applying a Shader after a MPSImageLanczosScale on Apple Metal

I'm having weird result when I apply a shader on a MTLTexture after applying a MPSImageLanczosScale.
Even if the transform as scale = 1 and translationX = 0 and translationY = 0.
It's working well if I don't apply the MPSImageLanczosScale. Below you can see the result without and with applying the MPSImageLanczosScale.
My render method look like this:
func filter(pixelBuffer: CVPixelBuffer) -> CVPixelBuffer? {
guard let commandQueue = commandQueue, var commandBuffer = commandQueue.makeCommandBuffer() else {
print("Failed to create Metal command queue")
CVMetalTextureCacheFlush(textureCache!, 0)
return nil
}
var newPixelBuffer: CVPixelBuffer?
CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, outputPixelBufferPool!, &newPixelBuffer)
guard var outputPixelBuffer = newPixelBuffer else {
print("Allocation failure: Could not get pixel buffer from pool (\(self.description))")
return nil
}
guard let inputTexture = makeTextureFromCVPixelBuffer(pixelBuffer: pixelBuffer, textureFormat: .bgra8Unorm) else {
return nil
}
guard var intermediateTexture = makeTextureFromCVPixelBuffer(pixelBuffer: outputPixelBuffer, textureFormat: .bgra8Unorm) else {
return nil
}
let imageLanczosScale = MPSImageLanczosScale(device: metalDevice)
let transform = MPSScaleTransform(scaleX: Double(scale), scaleY: Double(scale), translateX: Double(translationX), translateY: Double(translationY))
withUnsafePointer(to: &transform) { (transformPtr: UnsafePointer<MPSScaleTransform>) -> () in
imageLanczosScale.scaleTransform = transformPtr
}
imageLanczosScale.encode(commandBuffer: commandBuffer, sourceTexture: inputTexture, destinationTexture: outputTexture)
guard let commandEncoder = commandBuffer.makeComputeCommandEncoder(),
let outputTexture = makeTextureFromCVPixelBuffer(pixelBuffer: outputPixelBuffer, textureFormat: .bgra8Unorm) else { return nil }
commandEncoder.label = "Shader"
commandEncoder.setComputePipelineState(shaderPipline)
commandEncoder.setTexture(intermediateTexture, index: 1)
commandEncoder.setTexture(outputTexture, index: 0)
let w = shaderPipline.threadExecutionWidth
let h = shaderPipline.maxTotalThreadsPerThreadgroup / w
let threadsPerThreadgroup = MTLSizeMake(w, h, 1)
let threadgroupsPerGrid = MTLSize(width: (intermediateTexture.width + w - 1) / w, height: (intermediateTexture.height + h - 1) / h, depth: 1)
commandEncoder.dispatchThreadgroups(threadgroupsPerGrid, threadsPerThreadgroup: threadsPerThreadgroup)
commandEncoder.endEncoding()
commandBuffer.commit()
return outputPixelBuffer
}
No idea what im doing wrong. any ideas?

Copy a CVPixelBuffer on any iOS device

I'm having a great deal of difficulty coming up with code that reliably copies a CVPixelBuffer on any iOS device. My first attempt worked fine until I tried it on an iPad Pro:
extension CVPixelBuffer {
func deepcopy() -> CVPixelBuffer? {
let width = CVPixelBufferGetWidth(self)
let height = CVPixelBufferGetHeight(self)
let format = CVPixelBufferGetPixelFormatType(self)
var pixelBufferCopyOptional:CVPixelBuffer?
CVPixelBufferCreate(nil, width, height, format, nil, &pixelBufferCopyOptional)
if let pixelBufferCopy = pixelBufferCopyOptional {
CVPixelBufferLockBaseAddress(self, kCVPixelBufferLock_ReadOnly)
CVPixelBufferLockBaseAddress(pixelBufferCopy, 0)
let baseAddress = CVPixelBufferGetBaseAddress(self)
let dataSize = CVPixelBufferGetDataSize(self)
let target = CVPixelBufferGetBaseAddress(pixelBufferCopy)
memcpy(target, baseAddress, dataSize)
CVPixelBufferUnlockBaseAddress(pixelBufferCopy, 0)
CVPixelBufferUnlockBaseAddress(self, kCVPixelBufferLock_ReadOnly)
}
return pixelBufferCopyOptional
}
}
The above crashes on an iPad Pro because CVPixelBufferGetDataSize(self) is slightly larger than CVPixelBufferGetDataSize(pixelBufferCopy), so the memcpy writes to unallocated memory.
So I gave up with that and tried this:
func copy() -> CVPixelBuffer?
{
precondition(CFGetTypeID(self) == CVPixelBufferGetTypeID(), "copy() cannot be called on a non-CVPixelBuffer")
var _copy: CVPixelBuffer?
CVPixelBufferCreate(
nil,
CVPixelBufferGetWidth(self),
CVPixelBufferGetHeight(self),
CVPixelBufferGetPixelFormatType(self),
CVBufferGetAttachments(self, .shouldPropagate),
&_copy)
guard let copy = _copy else { return nil }
CVPixelBufferLockBaseAddress(self, .readOnly)
CVPixelBufferLockBaseAddress(copy, [])
defer
{
CVPixelBufferUnlockBaseAddress(copy, [])
CVPixelBufferUnlockBaseAddress(self, .readOnly)
}
for plane in 0 ..< CVPixelBufferGetPlaneCount(self)
{
let dest = CVPixelBufferGetBaseAddressOfPlane(copy, plane)
let source = CVPixelBufferGetBaseAddressOfPlane(self, plane)
let height = CVPixelBufferGetHeightOfPlane(self, plane)
let bytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(self, plane)
memcpy(dest, source, height * bytesPerRow)
}
return copy
}
That works on both my test devices, but it's just reached actual customers and it turns out it crashes on the iPad 6 (and only that device so far). It's an EXC_BAD_ACCESS on the call to memcpy() again.
Seems crazy that there isn't a simple API call for this given how hard it seems to be to make it work reliably. Or am I make it harder than it needs to be? Thanks for any advice!
This questions and answer combo is solid gold. Let me add value with a slight refactor and some control flow to account for CVPixelBuffers that do not have planes.
public extension CVPixelBuffer {
func copy() throws -> CVPixelBuffer {
precondition(CFGetTypeID(self) == CVPixelBufferGetTypeID(), "copy() cannot be called on a non-CVPixelBuffer")
var _copy: CVPixelBuffer?
let width = CVPixelBufferGetWidth(self)
let height = CVPixelBufferGetHeight(self)
let formatType = CVPixelBufferGetPixelFormatType(self)
let attachments = CVBufferGetAttachments(self, .shouldPropagate)
CVPixelBufferCreate(nil, width, height, formatType, attachments, &_copy)
guard let copy = _copy else {
throw PixelBufferCopyError.allocationFailed
}
CVPixelBufferLockBaseAddress(self, .readOnly)
CVPixelBufferLockBaseAddress(copy, [])
defer {
CVPixelBufferUnlockBaseAddress(copy, [])
CVPixelBufferUnlockBaseAddress(self, .readOnly)
}
let pixelBufferPlaneCount: Int = CVPixelBufferGetPlaneCount(self)
if pixelBufferPlaneCount == 0 {
let dest = CVPixelBufferGetBaseAddress(copy)
let source = CVPixelBufferGetBaseAddress(self)
let height = CVPixelBufferGetHeight(self)
let bytesPerRowSrc = CVPixelBufferGetBytesPerRow(self)
let bytesPerRowDest = CVPixelBufferGetBytesPerRow(copy)
if bytesPerRowSrc == bytesPerRowDest {
memcpy(dest, source, height * bytesPerRowSrc)
}else {
var startOfRowSrc = source
var startOfRowDest = dest
for _ in 0..<height {
memcpy(startOfRowDest, startOfRowSrc, min(bytesPerRowSrc, bytesPerRowDest))
startOfRowSrc = startOfRowSrc?.advanced(by: bytesPerRowSrc)
startOfRowDest = startOfRowDest?.advanced(by: bytesPerRowDest)
}
}
}else {
for plane in 0 ..< pixelBufferPlaneCount {
let dest = CVPixelBufferGetBaseAddressOfPlane(copy, plane)
let source = CVPixelBufferGetBaseAddressOfPlane(self, plane)
let height = CVPixelBufferGetHeightOfPlane(self, plane)
let bytesPerRowSrc = CVPixelBufferGetBytesPerRowOfPlane(self, plane)
let bytesPerRowDest = CVPixelBufferGetBytesPerRowOfPlane(copy, plane)
if bytesPerRowSrc == bytesPerRowDest {
memcpy(dest, source, height * bytesPerRowSrc)
}else {
var startOfRowSrc = source
var startOfRowDest = dest
for _ in 0..<height {
memcpy(startOfRowDest, startOfRowSrc, min(bytesPerRowSrc, bytesPerRowDest))
startOfRowSrc = startOfRowSrc?.advanced(by: bytesPerRowSrc)
startOfRowDest = startOfRowDest?.advanced(by: bytesPerRowDest)
}
}
}
}
return copy
}
}
Valid with Swift 5. To provide a little more background... There are many formats that AVCaptureVideoDataOutput .videoSettings property can take. Not all of them have planes especially ones that ML Models might need.
The second implementation looks quite solid. The only problem I can imagine is that a plane in the new pixel buffer is allocated with a different stride length (bytes per row). The stride length is based on width × (bytes per pixel) and then rounded up in an unspecified way to achieve optimal memory access.
So check if:
CVPixelBufferGetBytesPerRowOfPlane(self, plane) == CVPixelBufferGetBytesPerRowOfPlane(copy, plane
If not, copy the pixel plane row by row:
for plane in 0 ..< CVPixelBufferGetPlaneCount(self)
{
let dest = CVPixelBufferGetBaseAddressOfPlane(copy, plane)
let source = CVPixelBufferGetBaseAddressOfPlane(self, plane)
let height = CVPixelBufferGetHeightOfPlane(self, plane)
let bytesPerRowSrc = CVPixelBufferGetBytesPerRowOfPlane(self, plane)
let bytesPerRowDest = CVPixelBufferGetBytesPerRowOfPlane(copy, plane)
if bytesPerRowSrc == bytesPerRowDest {
memcpy(dest, source, height * bytesPerRowSrc)
} else {
var startOfRowSrc = source
var startOfRowDest = dest
for _ in 0..<height {
memcpy(startOfRowDest, startOfRowSrc, min(bytesPerRowSrc, bytesPerRowDest))
startOfRowSrc += bytesPerRowSrc
startOfRowDest += bytesPerRowDest
}
}
}

Extract meter levels from audio file

I need to extract audio meter levels from a file so I can render the levels before playing the audio. I know AVAudioPlayer can get this information while playing the audio file through
func averagePower(forChannel channelNumber: Int) -> Float.
But in my case I would like to obtain an [Float] of meter levels beforehand.
Swift 4
It takes on an iPhone:
0.538s to process an 8MByte mp3 player with a 4min47s duration, and 44,100 sampling rate
0.170s to process an 712KByte mp3 player with a 22s duration, and 44,100 sampling rate
0.089s to process caffile created by converting the file above using this command afconvert -f caff -d LEI16 audio.mp3 audio.caf in the terminal.
Let's begin:
A) Declare this class that is going to hold the necessary information about the audio asset:
/// Holds audio information used for building waveforms
final class AudioContext {
/// The audio asset URL used to load the context
public let audioURL: URL
/// Total number of samples in loaded asset
public let totalSamples: Int
/// Loaded asset
public let asset: AVAsset
// Loaded assetTrack
public let assetTrack: AVAssetTrack
private init(audioURL: URL, totalSamples: Int, asset: AVAsset, assetTrack: AVAssetTrack) {
self.audioURL = audioURL
self.totalSamples = totalSamples
self.asset = asset
self.assetTrack = assetTrack
}
public static func load(fromAudioURL audioURL: URL, completionHandler: #escaping (_ audioContext: AudioContext?) -> ()) {
let asset = AVURLAsset(url: audioURL, options: [AVURLAssetPreferPreciseDurationAndTimingKey: NSNumber(value: true as Bool)])
guard let assetTrack = asset.tracks(withMediaType: AVMediaType.audio).first else {
fatalError("Couldn't load AVAssetTrack")
}
asset.loadValuesAsynchronously(forKeys: ["duration"]) {
var error: NSError?
let status = asset.statusOfValue(forKey: "duration", error: &error)
switch status {
case .loaded:
guard
let formatDescriptions = assetTrack.formatDescriptions as? [CMAudioFormatDescription],
let audioFormatDesc = formatDescriptions.first,
let asbd = CMAudioFormatDescriptionGetStreamBasicDescription(audioFormatDesc)
else { break }
let totalSamples = Int((asbd.pointee.mSampleRate) * Float64(asset.duration.value) / Float64(asset.duration.timescale))
let audioContext = AudioContext(audioURL: audioURL, totalSamples: totalSamples, asset: asset, assetTrack: assetTrack)
completionHandler(audioContext)
return
case .failed, .cancelled, .loading, .unknown:
print("Couldn't load asset: \(error?.localizedDescription ?? "Unknown error")")
}
completionHandler(nil)
}
}
}
We are going to use its asynchronous function load, and handle its result to a completion handler.
B) Import AVFoundation and Accelerate in your view controller:
import AVFoundation
import Accelerate
C) Declare the noise level in your view controller (in dB):
let noiseFloor: Float = -80
For example, anything less than -80dB will be considered as silence.
D) The following function takes an audio context and produces the desired dB powers. targetSamples is by default set to 100, you can change that to suit your UI needs:
func render(audioContext: AudioContext?, targetSamples: Int = 100) -> [Float]{
guard let audioContext = audioContext else {
fatalError("Couldn't create the audioContext")
}
let sampleRange: CountableRange<Int> = 0..<audioContext.totalSamples
guard let reader = try? AVAssetReader(asset: audioContext.asset)
else {
fatalError("Couldn't initialize the AVAssetReader")
}
reader.timeRange = CMTimeRange(start: CMTime(value: Int64(sampleRange.lowerBound), timescale: audioContext.asset.duration.timescale),
duration: CMTime(value: Int64(sampleRange.count), timescale: audioContext.asset.duration.timescale))
let outputSettingsDict: [String : Any] = [
AVFormatIDKey: Int(kAudioFormatLinearPCM),
AVLinearPCMBitDepthKey: 16,
AVLinearPCMIsBigEndianKey: false,
AVLinearPCMIsFloatKey: false,
AVLinearPCMIsNonInterleaved: false
]
let readerOutput = AVAssetReaderTrackOutput(track: audioContext.assetTrack,
outputSettings: outputSettingsDict)
readerOutput.alwaysCopiesSampleData = false
reader.add(readerOutput)
var channelCount = 1
let formatDescriptions = audioContext.assetTrack.formatDescriptions as! [CMAudioFormatDescription]
for item in formatDescriptions {
guard let fmtDesc = CMAudioFormatDescriptionGetStreamBasicDescription(item) else {
fatalError("Couldn't get the format description")
}
channelCount = Int(fmtDesc.pointee.mChannelsPerFrame)
}
let samplesPerPixel = max(1, channelCount * sampleRange.count / targetSamples)
let filter = [Float](repeating: 1.0 / Float(samplesPerPixel), count: samplesPerPixel)
var outputSamples = [Float]()
var sampleBuffer = Data()
// 16-bit samples
reader.startReading()
defer { reader.cancelReading() }
while reader.status == .reading {
guard let readSampleBuffer = readerOutput.copyNextSampleBuffer(),
let readBuffer = CMSampleBufferGetDataBuffer(readSampleBuffer) else {
break
}
// Append audio sample buffer into our current sample buffer
var readBufferLength = 0
var readBufferPointer: UnsafeMutablePointer<Int8>?
CMBlockBufferGetDataPointer(readBuffer, 0, &readBufferLength, nil, &readBufferPointer)
sampleBuffer.append(UnsafeBufferPointer(start: readBufferPointer, count: readBufferLength))
CMSampleBufferInvalidate(readSampleBuffer)
let totalSamples = sampleBuffer.count / MemoryLayout<Int16>.size
let downSampledLength = totalSamples / samplesPerPixel
let samplesToProcess = downSampledLength * samplesPerPixel
guard samplesToProcess > 0 else { continue }
processSamples(fromData: &sampleBuffer,
outputSamples: &outputSamples,
samplesToProcess: samplesToProcess,
downSampledLength: downSampledLength,
samplesPerPixel: samplesPerPixel,
filter: filter)
//print("Status: \(reader.status)")
}
// Process the remaining samples at the end which didn't fit into samplesPerPixel
let samplesToProcess = sampleBuffer.count / MemoryLayout<Int16>.size
if samplesToProcess > 0 {
let downSampledLength = 1
let samplesPerPixel = samplesToProcess
let filter = [Float](repeating: 1.0 / Float(samplesPerPixel), count: samplesPerPixel)
processSamples(fromData: &sampleBuffer,
outputSamples: &outputSamples,
samplesToProcess: samplesToProcess,
downSampledLength: downSampledLength,
samplesPerPixel: samplesPerPixel,
filter: filter)
//print("Status: \(reader.status)")
}
// if (reader.status == AVAssetReaderStatusFailed || reader.status == AVAssetReaderStatusUnknown)
guard reader.status == .completed else {
fatalError("Couldn't read the audio file")
}
return outputSamples
}
E) render uses this function to down-sample the data from the audio file, and convert to decibels:
func processSamples(fromData sampleBuffer: inout Data,
outputSamples: inout [Float],
samplesToProcess: Int,
downSampledLength: Int,
samplesPerPixel: Int,
filter: [Float]) {
sampleBuffer.withUnsafeBytes { (samples: UnsafePointer<Int16>) in
var processingBuffer = [Float](repeating: 0.0, count: samplesToProcess)
let sampleCount = vDSP_Length(samplesToProcess)
//Convert 16bit int samples to floats
vDSP_vflt16(samples, 1, &processingBuffer, 1, sampleCount)
//Take the absolute values to get amplitude
vDSP_vabs(processingBuffer, 1, &processingBuffer, 1, sampleCount)
//get the corresponding dB, and clip the results
getdB(from: &processingBuffer)
//Downsample and average
var downSampledData = [Float](repeating: 0.0, count: downSampledLength)
vDSP_desamp(processingBuffer,
vDSP_Stride(samplesPerPixel),
filter, &downSampledData,
vDSP_Length(downSampledLength),
vDSP_Length(samplesPerPixel))
//Remove processed samples
sampleBuffer.removeFirst(samplesToProcess * MemoryLayout<Int16>.size)
outputSamples += downSampledData
}
}
F) Which in turn calls this function that gets the corresponding dB, and clips the results to [noiseFloor, 0]:
func getdB(from normalizedSamples: inout [Float]) {
// Convert samples to a log scale
var zero: Float = 32768.0
vDSP_vdbcon(normalizedSamples, 1, &zero, &normalizedSamples, 1, vDSP_Length(normalizedSamples.count), 1)
//Clip to [noiseFloor, 0]
var ceil: Float = 0.0
var noiseFloorMutable = noiseFloor
vDSP_vclip(normalizedSamples, 1, &noiseFloorMutable, &ceil, &normalizedSamples, 1, vDSP_Length(normalizedSamples.count))
}
G) Finally you can get the waveform of the audio like so:
guard let path = Bundle.main.path(forResource: "audio", ofType:"mp3") else {
fatalError("Couldn't find the file path")
}
let url = URL(fileURLWithPath: path)
var outputArray : [Float] = []
AudioContext.load(fromAudioURL: url, completionHandler: { audioContext in
guard let audioContext = audioContext else {
fatalError("Couldn't create the audioContext")
}
outputArray = self.render(audioContext: audioContext, targetSamples: 300)
})
Don't forget that AudioContext.load(fromAudioURL:) is asynchronous.
This solution is synthesized from this repo by William Entriken. All credit goes to him.
Swift 5
Here is the same code updated to Swift 5 syntax:
import AVFoundation
import Accelerate
/// Holds audio information used for building waveforms
final class AudioContext {
/// The audio asset URL used to load the context
public let audioURL: URL
/// Total number of samples in loaded asset
public let totalSamples: Int
/// Loaded asset
public let asset: AVAsset
// Loaded assetTrack
public let assetTrack: AVAssetTrack
private init(audioURL: URL, totalSamples: Int, asset: AVAsset, assetTrack: AVAssetTrack) {
self.audioURL = audioURL
self.totalSamples = totalSamples
self.asset = asset
self.assetTrack = assetTrack
}
public static func load(fromAudioURL audioURL: URL, completionHandler: #escaping (_ audioContext: AudioContext?) -> ()) {
let asset = AVURLAsset(url: audioURL, options: [AVURLAssetPreferPreciseDurationAndTimingKey: NSNumber(value: true as Bool)])
guard let assetTrack = asset.tracks(withMediaType: AVMediaType.audio).first else {
fatalError("Couldn't load AVAssetTrack")
}
asset.loadValuesAsynchronously(forKeys: ["duration"]) {
var error: NSError?
let status = asset.statusOfValue(forKey: "duration", error: &error)
switch status {
case .loaded:
guard
let formatDescriptions = assetTrack.formatDescriptions as? [CMAudioFormatDescription],
let audioFormatDesc = formatDescriptions.first,
let asbd = CMAudioFormatDescriptionGetStreamBasicDescription(audioFormatDesc)
else { break }
let totalSamples = Int((asbd.pointee.mSampleRate) * Float64(asset.duration.value) / Float64(asset.duration.timescale))
let audioContext = AudioContext(audioURL: audioURL, totalSamples: totalSamples, asset: asset, assetTrack: assetTrack)
completionHandler(audioContext)
return
case .failed, .cancelled, .loading, .unknown:
print("Couldn't load asset: \(error?.localizedDescription ?? "Unknown error")")
}
completionHandler(nil)
}
}
}
let noiseFloor: Float = -80
func render(audioContext: AudioContext?, targetSamples: Int = 100) -> [Float]{
guard let audioContext = audioContext else {
fatalError("Couldn't create the audioContext")
}
let sampleRange: CountableRange<Int> = 0..<audioContext.totalSamples
guard let reader = try? AVAssetReader(asset: audioContext.asset)
else {
fatalError("Couldn't initialize the AVAssetReader")
}
reader.timeRange = CMTimeRange(start: CMTime(value: Int64(sampleRange.lowerBound), timescale: audioContext.asset.duration.timescale),
duration: CMTime(value: Int64(sampleRange.count), timescale: audioContext.asset.duration.timescale))
let outputSettingsDict: [String : Any] = [
AVFormatIDKey: Int(kAudioFormatLinearPCM),
AVLinearPCMBitDepthKey: 16,
AVLinearPCMIsBigEndianKey: false,
AVLinearPCMIsFloatKey: false,
AVLinearPCMIsNonInterleaved: false
]
let readerOutput = AVAssetReaderTrackOutput(track: audioContext.assetTrack,
outputSettings: outputSettingsDict)
readerOutput.alwaysCopiesSampleData = false
reader.add(readerOutput)
var channelCount = 1
let formatDescriptions = audioContext.assetTrack.formatDescriptions as! [CMAudioFormatDescription]
for item in formatDescriptions {
guard let fmtDesc = CMAudioFormatDescriptionGetStreamBasicDescription(item) else {
fatalError("Couldn't get the format description")
}
channelCount = Int(fmtDesc.pointee.mChannelsPerFrame)
}
let samplesPerPixel = max(1, channelCount * sampleRange.count / targetSamples)
let filter = [Float](repeating: 1.0 / Float(samplesPerPixel), count: samplesPerPixel)
var outputSamples = [Float]()
var sampleBuffer = Data()
// 16-bit samples
reader.startReading()
defer { reader.cancelReading() }
while reader.status == .reading {
guard let readSampleBuffer = readerOutput.copyNextSampleBuffer(),
let readBuffer = CMSampleBufferGetDataBuffer(readSampleBuffer) else {
break
}
// Append audio sample buffer into our current sample buffer
var readBufferLength = 0
var readBufferPointer: UnsafeMutablePointer<Int8>?
CMBlockBufferGetDataPointer(readBuffer,
atOffset: 0,
lengthAtOffsetOut: &readBufferLength,
totalLengthOut: nil,
dataPointerOut: &readBufferPointer)
sampleBuffer.append(UnsafeBufferPointer(start: readBufferPointer, count: readBufferLength))
CMSampleBufferInvalidate(readSampleBuffer)
let totalSamples = sampleBuffer.count / MemoryLayout<Int16>.size
let downSampledLength = totalSamples / samplesPerPixel
let samplesToProcess = downSampledLength * samplesPerPixel
guard samplesToProcess > 0 else { continue }
processSamples(fromData: &sampleBuffer,
outputSamples: &outputSamples,
samplesToProcess: samplesToProcess,
downSampledLength: downSampledLength,
samplesPerPixel: samplesPerPixel,
filter: filter)
//print("Status: \(reader.status)")
}
// Process the remaining samples at the end which didn't fit into samplesPerPixel
let samplesToProcess = sampleBuffer.count / MemoryLayout<Int16>.size
if samplesToProcess > 0 {
let downSampledLength = 1
let samplesPerPixel = samplesToProcess
let filter = [Float](repeating: 1.0 / Float(samplesPerPixel), count: samplesPerPixel)
processSamples(fromData: &sampleBuffer,
outputSamples: &outputSamples,
samplesToProcess: samplesToProcess,
downSampledLength: downSampledLength,
samplesPerPixel: samplesPerPixel,
filter: filter)
//print("Status: \(reader.status)")
}
// if (reader.status == AVAssetReaderStatusFailed || reader.status == AVAssetReaderStatusUnknown)
guard reader.status == .completed else {
fatalError("Couldn't read the audio file")
}
return outputSamples
}
func processSamples(fromData sampleBuffer: inout Data,
outputSamples: inout [Float],
samplesToProcess: Int,
downSampledLength: Int,
samplesPerPixel: Int,
filter: [Float]) {
sampleBuffer.withUnsafeBytes { (samples: UnsafeRawBufferPointer) in
var processingBuffer = [Float](repeating: 0.0, count: samplesToProcess)
let sampleCount = vDSP_Length(samplesToProcess)
//Create an UnsafePointer<Int16> from samples
let unsafeBufferPointer = samples.bindMemory(to: Int16.self)
let unsafePointer = unsafeBufferPointer.baseAddress!
//Convert 16bit int samples to floats
vDSP_vflt16(unsafePointer, 1, &processingBuffer, 1, sampleCount)
//Take the absolute values to get amplitude
vDSP_vabs(processingBuffer, 1, &processingBuffer, 1, sampleCount)
//get the corresponding dB, and clip the results
getdB(from: &processingBuffer)
//Downsample and average
var downSampledData = [Float](repeating: 0.0, count: downSampledLength)
vDSP_desamp(processingBuffer,
vDSP_Stride(samplesPerPixel),
filter, &downSampledData,
vDSP_Length(downSampledLength),
vDSP_Length(samplesPerPixel))
//Remove processed samples
sampleBuffer.removeFirst(samplesToProcess * MemoryLayout<Int16>.size)
outputSamples += downSampledData
}
}
func getdB(from normalizedSamples: inout [Float]) {
// Convert samples to a log scale
var zero: Float = 32768.0
vDSP_vdbcon(normalizedSamples, 1, &zero, &normalizedSamples, 1, vDSP_Length(normalizedSamples.count), 1)
//Clip to [noiseFloor, 0]
var ceil: Float = 0.0
var noiseFloorMutable = noiseFloor
vDSP_vclip(normalizedSamples, 1, &noiseFloorMutable, &ceil, &normalizedSamples, 1, vDSP_Length(normalizedSamples.count))
}
Old solution
Here is a function you could use to pre-render the meter levels of an audio file without playing it:
func averagePowers(audioFileURL: URL, forChannel channelNumber: Int, completionHandler: #escaping(_ success: [Float]) -> ()) {
let audioFile = try! AVAudioFile(forReading: audioFileURL)
let audioFilePFormat = audioFile.processingFormat
let audioFileLength = audioFile.length
//Set the size of frames to read from the audio file, you can adjust this to your liking
let frameSizeToRead = Int(audioFilePFormat.sampleRate/20)
//This is to how many frames/portions we're going to divide the audio file
let numberOfFrames = Int(audioFileLength)/frameSizeToRead
//Create a pcm buffer the size of a frame
guard let audioBuffer = AVAudioPCMBuffer(pcmFormat: audioFilePFormat, frameCapacity: AVAudioFrameCount(frameSizeToRead)) else {
fatalError("Couldn't create the audio buffer")
}
//Do the calculations in a background thread, if you don't want to block the main thread for larger audio files
DispatchQueue.global(qos: .userInitiated).async {
//This is the array to be returned
var returnArray : [Float] = [Float]()
//We're going to read the audio file, frame by frame
for i in 0..<numberOfFrames {
//Change the position from which we are reading the audio file, since each frame starts from a different position in the audio file
audioFile.framePosition = AVAudioFramePosition(i * frameSizeToRead)
//Read the frame from the audio file
try! audioFile.read(into: audioBuffer, frameCount: AVAudioFrameCount(frameSizeToRead))
//Get the data from the chosen channel
let channelData = audioBuffer.floatChannelData![channelNumber]
//This is the array of floats
let arr = Array(UnsafeBufferPointer(start:channelData, count: frameSizeToRead))
//Calculate the mean value of the absolute values
let meanValue = arr.reduce(0, {$0 + abs($1)})/Float(arr.count)
//Calculate the dB power (You can adjust this), if average is less than 0.000_000_01 we limit it to -160.0
let dbPower: Float = meanValue > 0.000_000_01 ? 20 * log10(meanValue) : -160.0
//append the db power in the current frame to the returnArray
returnArray.append(dbPower)
}
//Return the dBPowers
completionHandler(returnArray)
}
}
And you can call it like so:
let path = Bundle.main.path(forResource: "audio.mp3", ofType:nil)!
let url = URL(fileURLWithPath: path)
averagePowers(audioFileURL: url, forChannel: 0, completionHandler: { array in
//Use the array
})
Using instruments, this solution makes high cpu usage during 1.2 seconds, takes about 5 seconds to return to the main thread with the returnArray, and up to 10 seconds when on low battery mode.
First of all, this is heavy operation, so it will take some OS time and resources to accomplish this. In below example I will use standard frame rates and sampling, but you should really sample far far less if you for example only want to display bars as an indications
OK so you don't need to play sound to analyze it. So in this i will not use AVAudioPlayer at all I assume that I will take track as URL:
let path = Bundle.main.path(forResource: "example3.mp3", ofType:nil)!
let url = URL(fileURLWithPath: path)
Then I will use AVAudioFile to get track information into AVAudioPCMBuffer. Whenever you have it in buffer you have all information regarding your track:
func buffer(url: URL) {
do {
let track = try AVAudioFile(forReading: url)
let format = AVAudioFormat(commonFormat:.pcmFormatFloat32, sampleRate:track.fileFormat.sampleRate, channels: track.fileFormat.channelCount, interleaved: false)
let buffer = AVAudioPCMBuffer(pcmFormat: format!, frameCapacity: UInt32(track.length))!
try track.read(into : buffer, frameCount:UInt32(track.length))
self.analyze(buffer: buffer)
} catch {
print(error)
}
}
As you may notice there is analyze method for it. You should have close to floatChannelData variable in your buffer. It's a plain data so you'll need to parse it. I will post a method and below explain this:
func analyze(buffer: AVAudioPCMBuffer) {
let channelCount = Int(buffer.format.channelCount)
let frameLength = Int(buffer.frameLength)
var result = Array(repeating: [Float](repeatElement(0, count: frameLength)), count: channelCount)
for channel in 0..<channelCount {
for sampleIndex in 0..<frameLength {
let sqrtV = sqrt(buffer.floatChannelData![channel][sampleIndex*buffer.stride]/Float(buffer.frameLength))
let dbPower = 20 * log10(sqrtV)
result[channel][sampleIndex] = dbPower
}
}
}
There are some calculations (heavy one) involved in it. When I was working on similar solutions couple of moths ago I came across this tutorial: https://www.raywenderlich.com/5154-avaudioengine-tutorial-for-ios-getting-started there is excelent explanation of this calculation there and also parts of the code that I pasted above and also use in my project, so I want to credit author here: Scott McAlister 👏
Based on #Jakub's answer above, here's an Objective-C version.
If you want to increase the accuracy, change the deciblesCount variable, but beware of performance hit. If you want to return more bars, you can increase the divisions variable when you call the function (with no additional performance hit). You should probably put it on a background thread in any case.
A 3:36 minute / 5.2MB song takes about 1.2s. The above images are of a shotgun firing with 30 and 100 divisions respectively
-(NSArray *)returnWaveArrayForFile:(NSString *)filepath numberOfDivisions:(int)divisions{
//pull file
NSError * error;
NSURL * url = [NSURL URLWithString:filepath];
AVAudioFile * file = [[AVAudioFile alloc] initForReading:url error:&error];
//create av stuff
AVAudioFormat * format = [[AVAudioFormat alloc] initWithCommonFormat:AVAudioPCMFormatFloat32 sampleRate:file.fileFormat.sampleRate channels:file.fileFormat.channelCount interleaved:false];
AVAudioPCMBuffer * buffer = [[AVAudioPCMBuffer alloc] initWithPCMFormat:format frameCapacity:(int)file.length];
[file readIntoBuffer:buffer frameCount:(int)file.length error:&error];
//grab total number of decibles, 1000 seems to work
int deciblesCount = MIN(1000,buffer.frameLength);
NSMutableArray * channels = [NSMutableArray new];
float frameIncrement = buffer.frameLength / (float)deciblesCount;
//needed later
float maxDecible = 0;
float minDecible = HUGE_VALF;
NSMutableArray * sd = [NSMutableArray new]; //used for standard deviation
for (int n = 0; n < MIN(buffer.format.channelCount, 2); n++){ //go through channels
NSMutableArray * decibles = [NSMutableArray new]; //holds actual decible values
//go through pulling the decibles
for (int i = 0; i < deciblesCount; i++){
int offset = frameIncrement * i; //grab offset
//equation from stack, no idea the maths
float sqr = sqrtf(buffer.floatChannelData[n][offset * buffer.stride]/(float)buffer.frameLength);
float decible = 20 * log10f(sqr);
decible += 160; //make positive
decible = (isnan(decible) || decible < 0) ? 0 : decible; //if it's not a number or silent, make it zero
if (decible > 0){ //if it has volume
[sd addObject:#(decible)];
}
[decibles addObject:#(decible)];//add to decibles array
maxDecible = MAX(maxDecible, decible); //grab biggest
minDecible = MIN(minDecible, decible); //grab smallest
}
[channels addObject:decibles]; //add to channels array
}
//find standard deviation and then deducted the bottom slag
NSExpression * expression = [NSExpression expressionForFunction:#"stddev:" arguments:#[[NSExpression expressionForConstantValue:sd]]];
float standardDeviation = [[expression expressionValueWithObject:nil context:nil] floatValue];
float deviationDeduct = standardDeviation / (standardDeviation + (maxDecible - minDecible));
//go through calculating deviation percentage
NSMutableArray * deviations = [NSMutableArray new];
NSMutableArray * returning = [NSMutableArray new];
for (int c = 0; c < (int)channels.count; c++){
NSArray * channel = channels[c];
for (int n = 0; n < (int)channel.count; n++){
float decible = [channel[n] floatValue];
float remainder = (maxDecible - decible);
float deviation = standardDeviation / (standardDeviation + remainder) - deviationDeduct;
[deviations addObject:#(deviation)];
}
//go through creating percentage
float maxTotal = 0;
int catchCount = floorf(deciblesCount / divisions); //total decible values within a segment or division
NSMutableArray * totals = [NSMutableArray new];
for (int n = 0; n < divisions; n++){
float total = 0.0f;
for (int k = 0; k < catchCount; k++){ //go through each segment
int index = n * catchCount + k; //create the index
float deviation = [deviations[index] floatValue]; //grab value
total += deviation; //add to total
}
//max out maxTotal var -> used later to calc percentages
maxTotal = MAX(maxTotal, total);
[totals addObject:#(total)]; //add to totals array
}
//normalise percentages and return
NSMutableArray * percentages = [NSMutableArray new];
for (int n = 0; n < divisions; n++){
float total = [totals[n] floatValue]; //grab the total value for that segment
float percentage = total / maxTotal; //divide by the biggest value -> making it a percentage
[percentages addObject:#(percentage)]; //add to the array
}
//add to the returning array
[returning addObject:percentages];
}
//return channel data -> array of two arrays of percentages
return (NSArray *)returning;
}
Call like this:
int divisions = 30; //number of segments you want for your display
NSString * path = [[NSBundle mainBundle] pathForResource:#"satie" ofType:#"mp3"];
NSArray * channels = [_audioReader returnWaveArrayForFile:path numberOfDivisions:divisions];
You get the two channels back in that array, which you can use to update your UI. Values in each array are between 0 and 1 which you can use to build your bars.

How to convert YUV frames (from OTVideoFrame) to CVPixelBuffer

I need to convert YUV Frames to CVPixelBuffer that I get from OTVideoFrame Class
This class provides an array of planes in the video frame which contains three elements for y,u,v frame each at index 0,1,2.
#property (nonatomic, retain) NSPointerArray *planes
and format of the video frame
#property (nonatomic, retain) OTVideoFormat *format
That contains Properties like width, height, bytesPerRow etc. of the frame
I need to add filter to the image I receive in the form of OTVideoFrame, I have already tried these answers :
How to convert from YUV to CIImage for iOS
Create CVPixelBuffer from YUV with IOSurface backed
These two links have the solutions in Objective-C but I want to do it in swift. One of the answers in second link was in swift but it lacks some information about the YUVFrame struct that the answer has reference to.
The Format that I receive is NV12
Here is what I have been trying to do till now but I don't know how to proceed next :-
/**
* Calcualte the size of each plane from OTVideoFrame.
*
* #param frame The frame to render.
* #return tuple containing three elements for size of each plane
*/
fileprivate func calculatePlaneSize(forFrame frame: OTVideoFrame)
-> (ySize: Int, uSize: Int, vSize: Int){
guard let frameFormat = frame.format
else {
return (0, 0 ,0)
}
let baseSize = Int(frameFormat.imageWidth * frameFormat.imageHeight) * MemoryLayout<GLubyte>.size
return (baseSize, baseSize / 4, baseSize / 4)
}
/**
* Renders a frame to the video renderer.
*
* #param frame The frame to render.
*/
func renderVideoFrame(_ frame: OTVideoFrame) {
let planeSize = calculatePlaneSize(forFrame: frame)
let yPlane = UnsafeMutablePointer<GLubyte>.allocate(capacity: planeSize.ySize)
let uPlane = UnsafeMutablePointer<GLubyte>.allocate(capacity: planeSize.uSize)
let vPlane = UnsafeMutablePointer<GLubyte>.allocate(capacity: planeSize.vSize)
memcpy(yPlane, frame.planes?.pointer(at: 0), planeSize.ySize)
memcpy(uPlane, frame.planes?.pointer(at: 1), planeSize.uSize)
memcpy(vPlane, frame.planes?.pointer(at: 2), planeSize.vSize)
let yStride = frame.format!.bytesPerRow.object(at: 0) as! Int
// multiply chroma strides by 2 as bytesPerRow represents 2x2 subsample
let uStride = frame.format!.bytesPerRow.object(at: 1) as! Int
let vStride = frame.format!.bytesPerRow.object(at: 2) as! Int
let width = frame.format!.imageWidth
let height = frame.format!.imageHeight
var pixelBuffer: CVPixelBuffer? = nil
var err: CVReturn;
err = CVPixelBufferCreate(kCFAllocatorDefault, Int(width), Int(height), kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange, nil, &pixelBuffer)
if (err != 0) {
NSLog("Error at CVPixelBufferCreate %d", err)
fatalError()
}
}
Taking Guidance from those two links I tried to create Pixel buffer but I got stuck every time at this point because the conversion of the Objective-C code after this is not similar to what we have in Swift 3.
For those who are looking for a fast solution, I did with swift Accelerate
using vImageConvert_AnyToAny(_:_:_:_:_:) function.
import Foundation
import Accelerate
import UIKit
import OpenTok
class Accelerater{
var infoYpCbCrToARGB = vImage_YpCbCrToARGB()
init() {
_ = configureYpCbCrToARGBInfo()
}
func configureYpCbCrToARGBInfo() -> vImage_Error {
print("Configuring")
var pixelRange = vImage_YpCbCrPixelRange(Yp_bias: 0,
CbCr_bias: 128,
YpRangeMax: 255,
CbCrRangeMax: 255,
YpMax: 255,
YpMin: 1,
CbCrMax: 255,
CbCrMin: 0)
let error = vImageConvert_YpCbCrToARGB_GenerateConversion(
kvImage_YpCbCrToARGBMatrix_ITU_R_601_4!,
&pixelRange,
&infoYpCbCrToARGB,
kvImage420Yp8_Cb8_Cr8,
kvImageARGB8888,
vImage_Flags(kvImagePrintDiagnosticsToConsole))
print("Configration done \(error)")
return error
}
public func convertFrameVImageYUV(toUIImage frame: OTVideoFrame, flag: Bool) -> UIImage {
var result: UIImage? = nil
let width = frame.format?.imageWidth ?? 0
let height = frame.format?.imageHeight ?? 0
var pixelBuffer: CVPixelBuffer? = nil
_ = CVPixelBufferCreate(kCFAllocatorDefault, Int(width), Int(height), kCVPixelFormatType_32BGRA, nil, &pixelBuffer)
_ = convertFrameVImageYUV(frame, to: pixelBuffer)
var ciImage: CIImage? = nil
if let pixelBuffer = pixelBuffer {
ciImage = CIImage(cvPixelBuffer: pixelBuffer)
}
let temporaryContext = CIContext(options: nil)
var uiImage: CGImage? = nil
if let ciImage = ciImage {
uiImage = temporaryContext.createCGImage(ciImage, from: CGRect(x: 0, y: 0, width: CVPixelBufferGetWidth(pixelBuffer!), height: CVPixelBufferGetHeight(pixelBuffer!)))
}
if let uiImage = uiImage {
result = UIImage(cgImage: uiImage)
}
CVPixelBufferUnlockBaseAddress(pixelBuffer!, [])
return result!
}
func convertFrameVImageYUV(_ frame: OTVideoFrame, to pixelBufferRef: CVPixelBuffer?) -> vImage_Error{
let start = CFAbsoluteTimeGetCurrent()
if pixelBufferRef == nil {
print("No PixelBuffer refrance found")
return vImage_Error(kvImageInvalidParameter)
}
let width = frame.format?.imageWidth ?? 0
let height = frame.format?.imageHeight ?? 0
let subsampledWidth = frame.format!.imageWidth/2
let subsampledHeight = frame.format!.imageHeight/2
print("subsample height \(subsampledHeight) \(subsampledWidth)")
let planeSize = calculatePlaneSize(forFrame: frame)
print("ysize : \(planeSize.ySize) \(planeSize.uSize) \(planeSize.vSize)")
let yPlane = UnsafeMutablePointer<GLubyte>.allocate(capacity: planeSize.ySize)
let uPlane = UnsafeMutablePointer<GLubyte>.allocate(capacity: planeSize.uSize)
let vPlane = UnsafeMutablePointer<GLubyte>.allocate(capacity: planeSize.vSize)
memcpy(yPlane, frame.planes?.pointer(at: 0), planeSize.ySize)
memcpy(uPlane, frame.planes?.pointer(at: 1), planeSize.uSize)
memcpy(vPlane, frame.planes?.pointer(at: 2), planeSize.vSize)
let yStride = frame.format!.bytesPerRow.object(at: 0) as! Int
// multiply chroma strides by 2 as bytesPerRow represents 2x2 subsample
let uStride = frame.format!.bytesPerRow.object(at: 1) as! Int
let vStride = frame.format!.bytesPerRow.object(at: 2) as! Int
var yPlaneBuffer = vImage_Buffer(data: yPlane, height: vImagePixelCount(height), width: vImagePixelCount(width), rowBytes: yStride)
var uPlaneBuffer = vImage_Buffer(data: uPlane, height: vImagePixelCount(subsampledHeight), width: vImagePixelCount(subsampledWidth), rowBytes: uStride)
var vPlaneBuffer = vImage_Buffer(data: vPlane, height: vImagePixelCount(subsampledHeight), width: vImagePixelCount(subsampledWidth), rowBytes: vStride)
CVPixelBufferLockBaseAddress(pixelBufferRef!, .readOnly)
let pixelBufferData = CVPixelBufferGetBaseAddress(pixelBufferRef!)
let rowBytes = CVPixelBufferGetBytesPerRow(pixelBufferRef!)
var destinationImageBuffer = vImage_Buffer()
destinationImageBuffer.data = pixelBufferData
destinationImageBuffer.height = vImagePixelCount(height)
destinationImageBuffer.width = vImagePixelCount(width)
destinationImageBuffer.rowBytes = rowBytes
var permuteMap: [UInt8] = [3, 2, 1, 0] // BGRA
let convertError = vImageConvert_420Yp8_Cb8_Cr8ToARGB8888(&yPlaneBuffer, &uPlaneBuffer, &vPlaneBuffer, &destinationImageBuffer, &infoYpCbCrToARGB, &permuteMap, 255, vImage_Flags(kvImagePrintDiagnosticsToConsole))
CVPixelBufferUnlockBaseAddress(pixelBufferRef!, [])
yPlane.deallocate()
uPlane.deallocate()
vPlane.deallocate()
let end = CFAbsoluteTimeGetCurrent()
print("Decoding time \((end-start)*1000)")
return convertError
}
fileprivate func calculatePlaneSize(forFrame frame: OTVideoFrame)
-> (ySize: Int, uSize: Int, vSize: Int)
{
guard let frameFormat = frame.format
else {
return (0, 0 ,0)
}
let baseSize = Int(frameFormat.imageWidth * frameFormat.imageHeight) * MemoryLayout<GLubyte>.size
return (baseSize, baseSize / 4, baseSize / 4)
}
}
Performance tested on iPhone7, one frame conversion is less than a millisecond.
Here's what worked for me (I've taken your function and changed it a bit):
func createPixelBufferWithVideoFrame(_ frame: OTVideoFrame) -> CVPixelBuffer? {
if let fLock = frameLock {
fLock.lock()
let planeSize = calculatePlaneSize(forFrame: frame)
let yPlane = UnsafeMutablePointer<GLubyte>.allocate(capacity: planeSize.ySize)
let uPlane = UnsafeMutablePointer<GLubyte>.allocate(capacity: planeSize.uSize)
let vPlane = UnsafeMutablePointer<GLubyte>.allocate(capacity: planeSize.vSize)
memcpy(yPlane, frame.planes?.pointer(at: 0), planeSize.ySize)
memcpy(uPlane, frame.planes?.pointer(at: 1), planeSize.uSize)
memcpy(vPlane, frame.planes?.pointer(at: 2), planeSize.vSize)
let width = frame.format!.imageWidth
let height = frame.format!.imageHeight
var pixelBuffer: CVPixelBuffer? = nil
var err: CVReturn;
err = CVPixelBufferCreate(kCFAllocatorDefault, Int(width), Int(height), kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange, nil, &pixelBuffer)
if (err != 0) {
NSLog("Error at CVPixelBufferCreate %d", err)
return nil
}
if let pixelBuffer = pixelBuffer {
CVPixelBufferLockBaseAddress(pixelBuffer, .readOnly)
let yPlaneTo = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0)
memcpy(yPlaneTo, yPlane, planeSize.ySize)
let uvRow: Int = planeSize.uSize*2/Int(width)
let halfWidth: Int = Int(width)/2
if let uPlaneTo = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 1) {
let uvPlaneTo = uPlaneTo.bindMemory(to: GLubyte.self, capacity: Int(uvRow*halfWidth*2))
for i in 0..<uvRow {
for j in 0..<halfWidth {
let dataIndex: Int = Int(i) * Int(halfWidth) + Int(j)
let uIndex: Int = (i * Int(width)) + Int(j) * 2
let vIndex: Int = uIndex + 1
uvPlaneTo[uIndex] = uPlane[dataIndex]
uvPlaneTo[vIndex] = vPlane[dataIndex]
}
}
}
}
fLock.unlock()
return pixelBuffer
}
return nil
}

Resources