CVPixelBufferRef outputPixelBuffer = NULL;
CMBlockBufferRef blockBuffer = NULL;
void* buffer = (void*)[videoUnit bufferWithH265LengthHeader];
OSStatus status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,
buffer,
videoUnit.length,
kCFAllocatorNull,
NULL, 0, videoUnit.length,
0, &blockBuffer);
if(status == kCMBlockBufferNoErr) {
CMSampleBufferRef sampleBuffer = NULL;
const size_t sampleSizeArray[] = {videoUnit.length};
status = CMSampleBufferCreateReady(kCFAllocatorDefault,
blockBuffer,
_decoderFormatDescription ,
1, 0, NULL, 1, sampleSizeArray,
&sampleBuffer);
if (status == kCMBlockBufferNoErr && sampleBuffer && _deocderSession) {
VTDecodeFrameFlags flags = 0;
VTDecodeInfoFlags flagOut = 0;
OSStatus decodeStatus = VTDecompressionSessionDecodeFrame(_deocderSession,
sampleBuffer,
flags,
&outputPixelBuffer,
&flagOut);
if(decodeStatus == kVTInvalidSessionErr) {
NSLog(#"IOS8VT: Invalid session, reset decoder session");
} else if(decodeStatus == kVTVideoDecoderBadDataErr) {
NSLog(#"IOS8VT: decode failed status=%d(Bad data)", decodeStatus);
} else if(decodeStatus != noErr) {
NSLog(#"IOS8VT: decode failed status=%d", decodeStatus);
}
CFRelease(sampleBuffer);
}
CFRelease(blockBuffer);
}
return outputPixelBuffer;
This is my code to decode the stream data.It was working good on iPhone 6s,but when the code running on iPhoneX or iphone11, the "outputPixelBuffer" return a nil. Can anyone help?
Without seeing the code for your decompressionSession creation, it is hard to say. It could be that your decompressionSession is providing the outputBuffer to the callback function provided at creation, so I highly recommend you add that part of your code too.
By providing &outputPixelBuffer in:
OSStatus decodeStatus = VTDecompressionSessionDecodeFrame(_deocderSession,
sampleBuffer,
flags,
&outputPixelBuffer,
&flagOut);
only means that you've provided the reference, it does not mean that it will be synchronously filled for certain.
I also recommend that you print out the OSStatus for:
CMBlockBufferCreateWithMemoryBlock
and
CMSampleBufferCreateReady
And if there's issues at those steps, there should be a way to know.
Related
I am trying to create a copy of a CMSampleBuffer as returned by captureOutput in a AVCaptureAudioDataOutputSampleBufferDelegate.
The problem I am having is that my frames coming from delegate method captureOutput:didOutputSampleBuffer:fromConnection: being dropped after I retain them in CFArray for long time.
Obviously, I need to create deep copies of incoming buffers for further processing. I also know that CMSampleBufferCreateCopy only creates shallow copies.
There are few related questions were asked on SO:
Pulling data from a CMSampleBuffer in order to create a deep copy
Creating copy of CMSampleBuffer in Swift returns OSStatus -12743 (Invalid Media Format)
Deep Copy of CMImageBuffer or CVImageBuffer
But none of them helps me to use correctly CMSampleBufferCreate function with 12 parameters:
CMSampleBufferRef copyBuffer;
CMBlockBufferRef data = CMSampleBufferGetDataBuffer(sampleBuffer);
CMFormatDescriptionRef formatDescription = CMSampleBufferGetFormatDescription(sampleBuffer);
CMItemCount itemCount = CMSampleBufferGetNumSamples(sampleBuffer);
CMTime duration = CMSampleBufferGetDuration(sampleBuffer);
CMTime presentationStamp = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
CMSampleTimingInfo timingInfo;
timingInfo.duration = duration;
timingInfo.presentationTimeStamp = presentationStamp;
timingInfo.decodeTimeStamp = CMSampleBufferGetDecodeTimeStamp(sampleBuffer);
size_t sampleSize = CMBlockBufferGetDataLength(data);
CMBlockBufferRef sampleData;
if (CMBlockBufferCopyDataBytes(data, 0, sampleSize, &sampleData) != kCMBlockBufferNoErr) {
VLog(#"error during copying sample buffer");
}
// Here I tried data and sampleData CMBlockBuffer instance, but no success
OSStatus status = CMSampleBufferCreate(kCFAllocatorDefault, data, isDataReady, nil, nil, formatDescription, itemCount, 1, &timingInfo, 1, &sampleSize, ©Buffer);
if (!self.sampleBufferArray) {
self.sampleBufferArray = CFArrayCreateMutable(NULL, 0, &kCFTypeArrayCallBacks);
//EXC_BAD_ACCESS crash when trying to add sampleBuffer to the array
CFArrayAppendValue(self.sampleBufferArray, copyBuffer);
} else {
CFArrayAppendValue(self.sampleBufferArray, copyBuffer);
}
How do you deep copy Audio CMSampleBuffer? Feel free to use any language (swift/objective-c) in your answers.
Here is a working solution I finally implemented. I sent this snippet to Apple Developer Technical support and asked them to check if it is a correct way to copy incoming sample buffer. The basic idea is copy AudioBufferList and then create a CMSampleBuffer and set AudioBufferList to this sample.
AudioBufferList audioBufferList;
CMBlockBufferRef blockBuffer;
//Create an AudioBufferList containing the data from the CMSampleBuffer,
//and a CMBlockBuffer which references the data in that AudioBufferList.
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(sampleBuffer, NULL, &audioBufferList, sizeof(audioBufferList), NULL, NULL, 0, &blockBuffer);
NSUInteger size = sizeof(audioBufferList);
char buffer[size];
memcpy(buffer, &audioBufferList, size);
//This is the Audio data.
NSData *bufferData = [NSData dataWithBytes:buffer length:size];
const void *copyBufferData = [bufferData bytes];
copyBufferData = (char *)copyBufferData;
CMSampleBufferRef copyBuffer = NULL;
OSStatus status = -1;
/* Format Description */
AudioStreamBasicDescription audioFormat = *CMAudioFormatDescriptionGetStreamBasicDescription((CMAudioFormatDescriptionRef) CMSampleBufferGetFormatDescription(sampleBuffer));
CMFormatDescriptionRef format = NULL;
status = CMAudioFormatDescriptionCreate(kCFAllocatorDefault, &audioFormat, 0, nil, 0, nil, nil, &format);
CMFormatDescriptionRef formatdes = NULL;
status = CMFormatDescriptionCreate(NULL, kCMMediaType_Audio, 'lpcm', NULL, &formatdes);
if (status != noErr)
{
NSLog(#"Error in CMAudioFormatDescriptionCreator");
CFRelease(blockBuffer);
return;
}
/* Create sample Buffer */
CMItemCount framesCount = CMSampleBufferGetNumSamples(sampleBuffer);
CMSampleTimingInfo timing = {.duration= CMTimeMake(1, 44100), .presentationTimeStamp= CMSampleBufferGetPresentationTimeStamp(sampleBuffer), .decodeTimeStamp= CMSampleBufferGetDecodeTimeStamp(sampleBuffer)};
status = CMSampleBufferCreate(kCFAllocatorDefault, nil , NO,nil,nil,format, framesCount, 1, &timing, 0, nil, ©Buffer);
if( status != noErr) {
NSLog(#"Error in CMSampleBufferCreate");
CFRelease(blockBuffer);
return;
}
/* Copy BufferList to Sample Buffer */
AudioBufferList receivedAudioBufferList;
memcpy(&receivedAudioBufferList, copyBufferData, sizeof(receivedAudioBufferList));
//Creates a CMBlockBuffer containing a copy of the data from the
//AudioBufferList.
status = CMSampleBufferSetDataBufferFromAudioBufferList(copyBuffer, kCFAllocatorDefault , kCFAllocatorDefault, 0, &receivedAudioBufferList);
if (status != noErr) {
NSLog(#"Error in CMSampleBufferSetDataBufferFromAudioBufferList");
CFRelease(blockBuffer);
return;
}
Code-Level Support answer:
This code looks ok (though you’ll want to add some additional error
checking). I've successfully tested it in an app that implements the
AVCaptureAudioDataOutput delegate
captureOutput:didOutputSampleBuffer:fromConnection: method to
capture and record audio. The captured audio I'm getting when using
this deep copy code appears to be the same as what I get when directly
using the provided sample buffer (without the deep copy).
Apple Developer Technical Support
Couldn't find a decent answer doing this in Swift. Here's an extension:
extension CMSampleBuffer {
func deepCopy() -> CMSampleBuffer? {
guard let formatDesc = CMSampleBufferGetFormatDescription(self),
let data = self.data else {
return nil
}
let nFrames = CMSampleBufferGetNumSamples(self)
let pts = CMSampleBufferGetPresentationTimeStamp(self)
let dataBuffer = data.withUnsafeBytes { (buffer) -> CMBlockBuffer? in
var blockBuffer: CMBlockBuffer?
let length: Int = data.count
guard CMBlockBufferCreateWithMemoryBlock(
allocator: kCFAllocatorDefault,
memoryBlock: nil,
blockLength: length,
blockAllocator: nil,
customBlockSource: nil,
offsetToData: 0,
dataLength: length,
flags: 0,
blockBufferOut: &blockBuffer) == noErr else {
print("Failed to create block")
return nil
}
guard CMBlockBufferReplaceDataBytes(
with: buffer.baseAddress!,
blockBuffer: blockBuffer!,
offsetIntoDestination: 0,
dataLength: length) == noErr else {
print("Failed to move bytes for block")
return nil
}
return blockBuffer
}
guard let dataBuffer = dataBuffer else {
return nil
}
var newSampleBuffer: CMSampleBuffer?
CMAudioSampleBufferCreateReadyWithPacketDescriptions(
allocator: kCFAllocatorDefault,
dataBuffer: dataBuffer,
formatDescription: formatDesc,
sampleCount: nFrames,
presentationTimeStamp: pts,
packetDescriptions: nil,
sampleBufferOut: &newSampleBuffer
)
return newSampleBuffer
}
}
LLooggaann's solution is simpler and works well, however, in case anyone is interested, I migrated the original solution to Swift 5.6:
extension CMSampleBuffer {
func deepCopy() -> CMSampleBuffer? {
var audioBufferList : AudioBufferList = AudioBufferList()
var blockBuffer : CMBlockBuffer?
let sizeOfAudioBufferList = MemoryLayout<AudioBufferList>.size
//Create an AudioBufferList containing the data from the CMSampleBuffer.
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(self,
bufferListSizeNeededOut: nil,
bufferListOut: &audioBufferList,
bufferListSize: sizeOfAudioBufferList,
blockBufferAllocator: nil,
blockBufferMemoryAllocator: nil,
flags: 0,
blockBufferOut: &blockBuffer)
guard audioBufferList.mNumberBuffers == 1 else { return nil } //TODO: Make this generic for any number of buffers
/* Deep copy the audio buffer */
let audioBufferDataSize = Int(audioBufferList.mBuffers.mDataByteSize)
let audioBuffer = audioBufferList.mBuffers
let audioBufferDataCopyPointer = UnsafeMutableRawPointer.allocate(byteCount: audioBufferDataSize, alignment: 1)
defer {
audioBufferDataCopyPointer.deallocate()
}
memcpy(audioBufferDataCopyPointer, audioBufferList.mBuffers.mData, audioBufferDataSize)
let copiedAudioBuffer = AudioBuffer(mNumberChannels: audioBuffer.mNumberChannels,
mDataByteSize: audioBufferList.mBuffers.mDataByteSize,
mData: audioBufferDataCopyPointer)
/* Create a new audio buffer list with the deep copied audio buffer */
var copiedAudioBufferList = AudioBufferList(mNumberBuffers: 1, mBuffers: copiedAudioBuffer)
/* Copy audio format description, to be used in the new sample buffer */
guard let sampleBufferFormatDescription = CMSampleBufferGetFormatDescription(self) else { return nil }
/* Create copy of timing for new sample buffer */
var duration = CMSampleBufferGetDuration(self)
duration.value /= Int64(numSamples)
var timing = CMSampleTimingInfo(duration: duration,
presentationTimeStamp: CMSampleBufferGetPresentationTimeStamp(self),
decodeTimeStamp: CMSampleBufferGetDecodeTimeStamp(self))
/* New sample buffer preparation, using the audio format description, and the timing information. */
let sampleCount = CMSampleBufferGetNumSamples(self)
var newSampleBuffer : CMSampleBuffer?
guard CMSampleBufferCreate(allocator: kCFAllocatorDefault,
dataBuffer: nil,
dataReady: false,
makeDataReadyCallback: nil,
refcon: nil,
formatDescription: sampleBufferFormatDescription,
sampleCount: sampleCount,
sampleTimingEntryCount: 1,
sampleTimingArray: &timing,
sampleSizeEntryCount: 0,
sampleSizeArray: nil,
sampleBufferOut: &newSampleBuffer) == noErr else { return nil }
//Create a CMBlockBuffer containing a copy of the data from the AudioBufferList, add to new sample buffer.
let status = CMSampleBufferSetDataBufferFromAudioBufferList(newSampleBuffer!,
blockBufferAllocator: kCFAllocatorDefault,
blockBufferMemoryAllocator: kCFAllocatorDefault,
flags: 0,
bufferList: &copiedAudioBufferList)
guard status == noErr else { return nil }
return newSampleBuffer
}
}
I have been trying to decode H264 using VTDecompressionSessionDecodeFrame but getting errors. The Parameter sets have been created previously and look fine, nothing errors up to this point so it may have something to do with my understanding of Timing information in the CMSampleBufferRef. Any input would be much appreciated
void didDecompress( void *decompressionOutputRefCon, void *sourceFrameRefCon, OSStatus status, VTDecodeInfoFlags infoFlags, CVImageBufferRef imageBuffer, CMTime presentationTimeStamp, CMTime presentationDuration ){
NSLog(#"In decompression callback routine");
}
void decodeH264 {
VTDecodeInfoFlags infoFlags;
[NALPacket appendBytes: NalPacketSize length:4];
[NALPacket appendBytes: &NALCODE length:1];
[NALPacket appendBytes: startPointer length:buflen];
void *samples = (void *)[NALTestPacket bytes];
blockBuffer = NULL;
// add the nal raw data to the CMBlockBuffer
status = CMBlockBufferCreateWithMemoryBlock(
kCFAllocatorDefault,
samples,
[NALPacket length],
kCFAllocatorDefault,
NULL,
0,
[NALPacket length],
0,
&blockBuffer);
const size_t * samplesizeArrayPointer;
size_t sampleSizeArray= buflen;
samplesizeArrayPointer = &sampleSizeArray;
int32_t timeSpan = 1000000;
CMTime PTime = CMTimeMake(presentationTime, timeSpan);
CMSampleTimingInfo timingInfo;
timingInfo.presentationTimeStamp = PTime;
timingInfo.duration = kCMTimeZero;
timingInfo.decodeTimeStamp = kCMTimeInvalid;
status = CMSampleBufferCreate(kCFAllocatorDefault, blockBuffer, YES, NULL, NULL, formatDescription, 1, 1, &timingInfo, 0, samplesizeArrayPointer, &sampleBuffer);
CFArrayRef attachmentsArray = CMSampleBufferGetSampleAttachmentsArray(sampleBuffer, true);
for (CFIndex i = 0; i < CFArrayGetCount(attachmentsArray); ++i) {
CFMutableDictionaryRef attachments = (CFMutableDictionaryRef)CFArrayGetValueAtIndex(attachmentsArray, i);
CFDictionarySetValue(attachments, kCMSampleAttachmentKey_DoNotDisplay, kCFBooleanFalse);
CFDictionarySetValue(attachments, kCMSampleAttachmentKey_DisplayImmediately, kCFBooleanTrue);
}
// I Frame
status = VTDecompressionSessionDecodeFrame(decoder, sampleBuffer, kVTDecodeFrame_1xRealTimePlayback, (void*)CFBridgingRetain(currentTime), &infoFlags);
if (status != noErr) {
NSLog(#"Decode error");
}
Discovered why this wasn't working, I had forgotten to set the CMSampleBufferRef to NULL each time a new sample was captured.
samples = NULL;
status = CMSampleBufferCreate(kCFAllocatorDefault, blockBuffer, YES, NULL, NULL, formatDescription, 1, 1, &timingInfo, 0, samplesizeArrayPointer, &sampleBuffer);
I'm trying to get the channel order from 6-channel aac files. What I have now is this:
status = AudioFileOpenURL((__bridge CFURLRef)inFileURL, kAudioFileReadPermission, 0, &inputFile);
NSAssert(status == noErr, #"Audio file open error. status:%d", (int)status);
AudioChannelLayout *fileChannelLayout;
UInt32 channelLayoutSize = sizeof(AudioChannelLayout);
fileChannelLayout = (AudioChannelLayout *) calloc(1, channelLayoutSize);
status = AudioFileGetProperty(inputFile, kAudioFilePropertyChannelLayout, &channelLayoutSize, &fileChannelLayout);
NSAssert(status == noErr, #"Get channel layout error. status:%d", (int)status);
But I'm not getting anything into the AudioChannelLayout struct. What am I doing wrong? Is there any other way of figuring out what position each channel belongs to (e.g.: L,R,C,SL,SR,LFE)?
Thanks.
Here is one example:
AudioFileID audioFie; // should already be opened
UInt32 dataSize;
OSStatus result = AudioFileGetPropertyInfo(audioFile, kAudioFilePropertyChannelLayout, &dataSize, nullptr);
if(noErr == result) {
auto channelLayout = (AudioChannelLayout *)malloc(dataSize);
result = AudioFileGetProperty(audioFile, kAudioFilePropertyChannelLayout, &dataSize, channelLayout);
if(noErr == result)
// Handle error
free(channelLayout);
}
else
// Handle error
}
I'm trying to decode a raw stream of .H264 video data but I can't find a way to create a proper
- (void)decodeFrameWithNSData:(NSData*)data presentationTime:
(CMTime)presentationTime
{
#autoreleasepool {
CMSampleBufferRef sampleBuffer = NULL;
CMBlockBufferRef blockBuffer = NULL;
VTDecodeInfoFlags infoFlags;
int sourceFrame;
if( dSessionRef == NULL )
[self createDecompressionSession];
CMSampleTimingInfo timingInfo ;
timingInfo.presentationTimeStamp = presentationTime;
timingInfo.duration = CMTimeMake(1,100000000);
timingInfo.decodeTimeStamp = kCMTimeInvalid;
//Creates block buffer from NSData
OSStatus status = CMBlockBufferCreateWithMemoryBlock(CFAllocatorGetDefault(), (void*)data.bytes,data.length*sizeof(char), CFAllocatorGetDefault(), NULL, 0, data.length*sizeof(char), 0, &blockBuffer);
//Creates CMSampleBuffer to feed decompression session
status = CMSampleBufferCreateReady(CFAllocatorGetDefault(), blockBuffer,self.encoderVideoFormat,1,1,&timingInfo, 0, 0, &sampleBuffer);
status = VTDecompressionSessionDecodeFrame(dSessionRef,sampleBuffer, kVTDecodeFrame_1xRealTimePlayback, &sourceFrame,&infoFlags);
if(status != noErr) {
NSLog(#"Decode with data error %d",status);
}
}
}
At the end of the call I'm getting -12911 error in VTDecompressionSessionDecodeFrame that translates to kVTVideoDecoderMalfunctionErr which after reading this [post] pointed me that I should make a VideoFormatDescriptor using CMVideoFormatDescriptionCreateFromH264ParameterSets. But how can I create a new VideoFormatDescription if I don't have information of the currentSps or currentPps? How can I get that information from my raw .H264 streaming?
CMFormatDescriptionRef decoderFormatDescription;
const uint8_t* const parameterSetPointers[2] =
{ (const uint8_t*)[currentSps bytes], (const uint8_t*)[currentPps bytes] };
const size_t parameterSetSizes[2] =
{ [currentSps length], [currentPps length] };
status = CMVideoFormatDescriptionCreateFromH264ParameterSets(NULL,
2,
parameterSetPointers,
parameterSetSizes,
4,
&decoderFormatDescription);
Thanks in advance,
Marcos
[post] : Decoding H264 VideoToolkit API fails with Error -8971 in VTDecompressionSessionCreate
You you MUST call CMVideoFormatDescriptionCreateFromH264ParameterSets first. The SPS/PPS may be stored/transmitted separately from the video stream. Or may come inline.
Note that for VTDecompressionSessionDecodeFrame your NALUs must be preceded with a size, and not a start code.
You can read more here:
Possible Locations for Sequence/Picture Parameter Set(s) for H.264 Stream
I am developing one app in which I need to pass through audio capturing through output audio jack at the same time record and save video.
I have looked into aurio touch apple sample code and implemented audio passthrough.
I have also implemented the video recording through AVCaptureSession.
Above both functionality individually done and works pefectly.
But when I merge functionality audio pass through not working because of audio session of the AVCapturesession.
I have also tried to pass through audio data which I am getting from AVCaptureSession delegate methods. Below is my code :
OSStatus err = noErr;
AudioBufferList audioBufferList;
CMBlockBufferRef blockBuffer;
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(sampleBuffer, NULL, &audioBufferList, sizeof(audioBufferList), NULL, NULL, 0, &blockBuffer);
CMItemCount numberOfFrames = CMSampleBufferGetNumSamples(sampleBuffer); // corresponds to the number of CoreAudio audio frames
currentSampleTime += (double)numberOfFrames;
AudioTimeStamp timeStamp;
memset(&timeStamp, 0, sizeof(AudioTimeStamp));
timeStamp.mSampleTime = currentSampleTime;
timeStamp.mFlags |= kAudioTimeStampSampleTimeValid;
AudioUnitRenderActionFlags flags = 0;
aurioTouchAppDelegate *THIS = (aurioTouchAppDelegate *)[[UIApplication sharedApplication]delegate];
err = AudioUnitRender(self.rioUnit, &flags, &timeStamp, 1, numberOfFrames, &audioBufferList);
if (err) { printf("PerformThru: error %d\n", (int)err); }
But it is giving error. Please advise what can be done further as soon as possible. I have looked into so many docs and so many codes but couldn't find any solution. Please help..
Here's some better error handling code. What error does it return? You can look up the error description by searching for it in the documentation.
static void CheckError (OSStatus error, const char *operation) {
if (error == noErr) return;
char str[20] = {};
// see if it appears to be a 4 char code
*(UInt32*)(str + 1) = CFSwapInt32HostToBig(error);
if (isprint(str[1]) && isprint(str[2]) && isprint(str[3]) && isprint(str[4])) {
str[0] = str[5] = '\'';
str[6] = '\0';
} else {
sprintf(str, "%d", (int)error);
}
fprintf(stderr, "Error: %s(%s)\n", operation, str);
exit(1);
}
- (void)yourFunction
{
AudioBufferList audioBufferList;
CMBlockBufferRef blockBuffer;
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(sampleBuffer, NULL, &audioBufferList, sizeof(audioBufferList), NULL, NULL, 0, &blockBuffer);
CMItemCount numberOfFrames = CMSampleBufferGetNumSamples(sampleBuffer); // corresponds to the number of CoreAudio audio frames
currentSampleTime += (double)numberOfFrames;
AudioTimeStamp timeStamp;
memset(&timeStamp, 0, sizeof(AudioTimeStamp));
timeStamp.mSampleTime = currentSampleTime;
timeStamp.mFlags |= kAudioTimeStampSampleTimeValid;
AudioUnitRenderActionFlags flags = 0;
aurioTouchAppDelegate *THIS = (aurioTouchAppDelegate *)[[UIApplication sharedApplication]delegate];
CheckError(AudioUnitRender(self.rioUnit, &flags, &timeStamp, 1, numberOfFrames, &audioBufferList),
"Error with AudioUnitRender");
}