I have image and I need to send that image with other parameters to server.
I convert image to base64 string.
[UIImagePNGRepresentation(image)
base64EncodedStringWithOptions:NSDataBase64Encoding64CharacterLineLength];
In header params I send this.
"Content-Type" = "application/x-www-form-urlencoded";
NSURLSessionDataTask * task = [session dataTaskWithRequest:self
completionHandler:^(NSData * _Nullable data, NSURLResponse * _Nullable
response, NSError * _Nullable error) { .... }
It's my params format
data={"img":"","imgName":"imgName"}
When I send "img":"iVBORw0KGgoAAAANSUhEUgAAAZUA.." I get 401 error
Please help me if anyone could find problem, or know how to send base64 string with other parameters. Thank You!
Try this.
UIImage *image = yourImage;
NSString *imageBase64Str = [NSString stringWithFormat:#"%#;base64,%#",
[self mimeTypeByGuessingFromData:imageData], [self encodeToBase64String:image]];
data={"doc":"true","img":"imageBase64Str","imgName":"imgName"}
- (NSString *)mimeTypeByGuessingFromData:(NSData *)data {
char bytes[12] = {0};
[data getBytes:&bytes length:12];
const char bmp[2] = {'B', 'M'};
const char gif[3] = {'G', 'I', 'F'};
const char swf[3] = {'F', 'W', 'S'};
const char swc[3] = {'C', 'W', 'S'};
const char jpg[3] = {0xff, 0xd8, 0xff};
const char psd[4] = {'8', 'B', 'P', 'S'};
const char iff[4] = {'F', 'O', 'R', 'M'};
const char webp[4] = {'R', 'I', 'F', 'F'};
const char ico[4] = {0x00, 0x00, 0x01, 0x00};
const char tif_ii[4] = {'I','I', 0x2A, 0x00};
const char tif_mm[4] = {'M','M', 0x00, 0x2A};
const char png[8] = {0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a};
const char jp2[12] = {0x00, 0x00, 0x00, 0x0c, 0x6a, 0x50, 0x20, 0x20, 0x0d, 0x0a, 0x87, 0x0a};
if (!memcmp(bytes, bmp, 2)) {
return #"image/x-ms-bmp";
} else if (!memcmp(bytes, gif, 3)) {
return #"image/gif";
} else if (!memcmp(bytes, jpg, 3)) {
return #"image/jpeg";
} else if (!memcmp(bytes, psd, 4)) {
return #"image/psd";
} else if (!memcmp(bytes, iff, 4)) {
return #"image/iff";
} else if (!memcmp(bytes, webp, 4)) {
return #"image/webp";
} else if (!memcmp(bytes, ico, 4)) {
return #"image/vnd.microsoft.icon";
} else if (!memcmp(bytes, tif_ii, 4) || !memcmp(bytes, tif_mm, 4)) {
return #"image/tiff";
} else if (!memcmp(bytes, png, 8)) {
return #"image/png";
} else if (!memcmp(bytes, jp2, 12)) {
return #"image/jp2";
}
return #"application/octet-stream"; // default type
}
- (NSString *)encodeToBase64String:(UIImage *)image {
return [UIImagePNGRepresentation(image) base64EncodedStringWithOptions:NSDataBase64Encoding64CharacterLineLength];
}
Related
i have a code in c# for aes decryption
i want make same encryption result by objective-c
but i failed.. help me
i can fix objective-c code, what can i for this?
c# for decrypt
private static readonly string AES_KEY = "asdfasdfasdfasdf";
private static readonly int BUFFER_SIZE = 1024 * 4;
private static readonly int KEY_SIZE = 128;
private static readonly int BLOCK_SIZE = 128;
static public string Composite(string value)
{
using (AesManaged aes = new AesManaged())
using (MemoryStream ims = new MemoryStream(Convert.FromBase64String(value), false))
{
aes.KeySize = KEY_SIZE;
aes.BlockSize = BLOCK_SIZE;
aes.Mode = CipherMode.CBC;
aes.Key = Encoding.UTF8.GetBytes(AES_KEY);
byte[] iv = new byte[aes.IV.Length];
ims.Read(iv, 0, iv.Length);
aes.IV = iv;
using (ICryptoTransform ce = aes.CreateDecryptor(aes.Key, aes.IV))
using (CryptoStream cs = new CryptoStream(ims, ce, CryptoStreamMode.Read))
using (DeflateStream ds = new DeflateStream(cs, CompressionMode.Decompress))
using (MemoryStream oms = new MemoryStream())
{
byte[] buf = new byte[BUFFER_SIZE];
for (int size = ds.Read(buf, 0, buf.Length); size > 0; size = ds.Read(buf, 0, buf.Length))
{
oms.Write(buf, 0, size);
}
return Encoding.UTF8.GetString(oms.ToArray());
}
}
}
objective-c for encrypt
- (NSString *)AES128EncryptWithKey:(NSString *)key
{
NSData *plainData = [self dataUsingEncoding:NSUTF8StringEncoding];
NSData *encryptedData = [plainData AES128EncryptWithKey:key];
NSString *encryptedString = [encryptedData stringUsingEncodingBase64];
return encryptedString;
}
#import "NSData+AESCrypt.h"
#import <CommonCrypto/CommonCryptor.h>
static char encodingTable[64] =
{
'A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P',
'Q','R','S','T','U','V','W','X','Y','Z','a','b','c','d','e','f',
'g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v',
'w','x','y','z','0','1','2','3','4','5','6','7','8','9','+','/'
};
#implementation NSData (AESCrypt)
- (NSData *)AES128EncryptWithKey:(NSString *)key
{
// 'key' should be 16 bytes for AES128
char keyPtr[kCCKeySizeAES128 + 1]; // room for terminator (unused)
bzero( keyPtr, sizeof( keyPtr ) ); // fill with zeroes (for padding)
// fetch key data
[key getCString:keyPtr maxLength:sizeof( keyPtr ) encoding:NSUTF8StringEncoding];
NSUInteger dataLength = [self length];
//See the doc: For block ciphers, the output size will always be less than or
//equal to the input size plus the size of one block.
//That's why we need to add the size of one block here
size_t bufferSize = dataLength + kCCBlockSizeAES128;
void *buffer = malloc( bufferSize );
size_t numBytesEncrypted = 0;
CCCryptorStatus cryptStatus = CCCrypt( kCCEncrypt, kCCAlgorithmAES128, kCCModeCBC | kCCOptionPKCS7Padding,
keyPtr, kCCKeySizeAES128,
NULL /* initialization vector (optional) */,
[self bytes], dataLength, /* input */
buffer, bufferSize, /* output */
&numBytesEncrypted );
if( cryptStatus == kCCSuccess )
{
//the returned NSData takes ownership of the buffer and will free it on deallocation
return [NSData dataWithBytesNoCopy:buffer length:numBytesEncrypted];
}
free( buffer ); //free the buffer
return nil;
}
- (NSString *)base64Encoding
{
const unsigned char *bytes = [self bytes];
NSMutableString *result = [NSMutableString stringWithCapacity:self.length];
unsigned long ixtext = 0;
unsigned long lentext = self.length;
long ctremaining = 0;
unsigned char inbuf[3], outbuf[4];
unsigned short i = 0;
unsigned short charsonline = 0, ctcopy = 0;
unsigned long ix = 0;
while( YES )
{
ctremaining = lentext - ixtext;
if( ctremaining <= 0 ) break;
for( i = 0; i < 3; i++ )
{
ix = ixtext + i;
if( ix < lentext ) inbuf[i] = bytes[ix];
else inbuf [i] = 0;
}
outbuf [0] = (inbuf [0] & 0xFC) >> 2;
outbuf [1] = ((inbuf [0] & 0x03) << 4) | ((inbuf [1] & 0xF0) >> 4);
outbuf [2] = ((inbuf [1] & 0x0F) << 2) | ((inbuf [2] & 0xC0) >> 6);
outbuf [3] = inbuf [2] & 0x3F;
ctcopy = 4;
switch( ctremaining )
{
case 1:
ctcopy = 2;
break;
case 2:
ctcopy = 3;
break;
}
for( i = 0; i < ctcopy; i++ )
[result appendFormat:#"%c", encodingTable[outbuf[i]]];
for( i = ctcopy; i < 4; i++ )
[result appendString:#"="];
ixtext += 3;
charsonline += 4;
}
return [NSString stringWithString:result];
}
------------------------------------------------------------
------------------------------------------------------------
I am using H264 algorithm below is the link which I am referring to decompress video using VideoToolbox framework
https://stackoverflow.com/a/29525001/1679255
Not all a few videos are stuck in a specific position.
Below is the error logs which i am recieving during the decompressing the frame.
NALU Raw: 00, 00, 00, 01, 41, 9a, 00, 18
~~~~~~~ Received NALU Type "1: Coded slice of a non-IDR picture (VCL)" ~~~~~~~~
Decompressed error: Error Domain=NSOSStatusErrorDomain Code=-12909 "(null)"
Xocde version is 11.2.1
Development Target is 10.0
If you need more info feel free to ask me.
My VideoDecoder Class Code is below :
#interface VideoDecoder () {
AVSampleBufferDisplayLayer *mVideoLayer;
long videoTimestamp;
}
#property (nonatomic, assign) CMVideoFormatDescriptionRef formatDesc;
#property (nonatomic, assign) VTDecompressionSessionRef decompressionSession;
#property (nonatomic, assign) int spsSize;
#property (nonatomic, assign) int ppsSize;
#property (nonatomic, retain) NSMutableData* streamVideoData;
#end
#implementation VideoDecoder
NSString * const naluTypesStrings[] = {
#"0: Unspecified (non-VCL)",
#"1: Coded slice of a non-IDR picture (VCL)", // P frame
#"2: Coded slice data partition A (VCL)",
#"3: Coded slice data partition B (VCL)",
#"4: Coded slice data partition C (VCL)",
#"5: Coded slice of an IDR picture (VCL)", // I frame
#"6: Supplemental enhancement information (SEI) (non-VCL)",
#"7: Sequence parameter set (non-VCL)", // SPS parameter
#"8: Picture parameter set (non-VCL)", // PPS parameter
#"9: Access unit delimiter (non-VCL)",
#"10: End of sequence (non-VCL)",
#"11: End of stream (non-VCL)",
#"12: Filler data (non-VCL)",
#"13: Sequence parameter set extension (non-VCL)",
#"14: Prefix NAL unit (non-VCL)",
#"15: Subset sequence parameter set (non-VCL)",
#"16: Reserved (non-VCL)",
#"17: Reserved (non-VCL)",
#"18: Reserved (non-VCL)",
#"19: Coded slice of an auxiliary coded picture without partitioning (non-VCL)",
#"20: Coded slice extension (non-VCL)",
#"21: Coded slice extension for depth view components (non-VCL)",
#"22: Reserved (non-VCL)",
#"23: Reserved (non-VCL)",
#"24: STAP-A Single-time aggregation packet (non-VCL)",
#"25: STAP-B Single-time aggregation packet (non-VCL)",
#"26: MTAP16 Multi-time aggregation packet (non-VCL)",
#"27: MTAP24 Multi-time aggregation packet (non-VCL)",
#"28: FU-A Fragmentation unit (non-VCL)",
#"29: FU-B Fragmentation unit (non-VCL)",
#"30: Unspecified (non-VCL)",
#"31: Unspecified (non-VCL)",
};
- (instancetype)init {
self = [super init];
if (self) {
// _videoLayer = [[AVSampleBufferDisplayLayer alloc] init];
//// _videoLayer.frame = self.view.frame;
//// _videoLayer.bounds = self.view.bounds;
// _videoLayer.frame = CGRectMake(0, 0, 1280, 720);
// _videoLayer.bounds = CGRectMake(0, 0, 1280, 720);
// _videoLayer.videoGravity = AVLayerVideoGravityResizeAspect;
//
// // set Timebase, you may need this if you need to display frames at specific times
// // I didn't need it so I haven't verified that the timebase is working
// CMTimebaseRef controlTimebase;
// CMTimebaseCreateWithMasterClock(CFAllocatorGetDefault(), CMClockGetHostTimeClock(), &controlTimebase);
//
// //videoLayer.controlTimebase = controlTimebase;
// CMTimebaseSetTime(_videoLayer.controlTimebase, kCMTimeZero);
// CMTimebaseSetRate(_videoLayer.controlTimebase, 1.0);
self.streamVideoData = nil;
self.startDumpData = NO;
videoTimestamp = 0;
}
return self;
}
- (void)initDebugStreamingVideo
{
self.streamVideoData = [[NSMutableData alloc] init];
}
- (void)releaseVideoDecorder
{
if (_decompressionSession != NULL)
{
VTDecompressionSessionInvalidate(_decompressionSession);
_decompressionSession = NULL;
}
videoTimestamp = 0;
}
- (long)getVideoTimeStamp
{
return videoTimestamp;
}
- (void)setVideoTimestamp:(long)timestamp
{
videoTimestamp = timestamp;
#ifdef DEBUG
NSLog(#"(sync)video: %2.1f", (float)videoTimestamp/1000000.);
#endif
}
- (NSString*)getLiveStreamingMP4Path
{
NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory,
NSUserDomainMask, YES);
NSString *rootOfCachepath = [paths objectAtIndex:0];
NSString* nalFilePath = [self getUniqueFilePath:rootOfCachepath FileNamePrefix:#"liveRecord" FileNameSubfix:#"MP4"];
return nalFilePath;
}
- (NSString*)closeAndSaveDebugStreamingVideo
{
NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory,
NSUserDomainMask, YES);
NSString *rootOfCachepath = [paths objectAtIndex:0];
NSString* nalFilePath = [self getUniqueFilePath:rootOfCachepath FileNamePrefix:#"liveRecord" FileNameSubfix:#"264"];
if (self.streamVideoData.length > 0)
[self.streamVideoData writeToFile:nalFilePath atomically:YES];
self.streamVideoData = [[NSMutableData alloc] init];
return nalFilePath;
}
- (NSString*)getUniqueFilePath:(NSString*)parentFolder FileNamePrefix:(NSString*)fileName FileNameSubfix:(NSString*)subFix
{
NSString* fullFilePath = nil;
NSString* memoFileName = #"";
NSDate *currentDate = [NSDate date];
NSDateFormatter *currentDateFormat = [[NSDateFormatter alloc] init];
[currentDateFormat setDateFormat:#"yyyyMMddHHmmss"];
NSString *currentDateString = [currentDateFormat stringFromDate:currentDate];
memoFileName = [NSString stringWithFormat:#"%#_%#.%#", fileName, currentDateString, subFix];
fullFilePath = [parentFolder stringByAppendingPathComponent:memoFileName];
return fullFilePath;
}
//- (void)setVideoLayer:(AVSampleBufferDisplayLayer *)layer {
// mVideoLayer = layer;
// CMTimebaseRef controlTimebase;
// CMTimebaseCreateWithMasterClock(CFAllocatorGetDefault(), CMClockGetHostTimeClock(), &controlTimebase);
//
// mVideoLayer.controlTimebase = controlTimebase;
// CMTimebaseSetTime(mVideoLayer.controlTimebase, kCMTimeZero);
// CMTimebaseSetRate(mVideoLayer.controlTimebase, 1.0);
//}
- (BOOL)checkIfThisIsIDRFrame:(uint8_t *)frame withSize:(uint32_t)frameSize {
BOOL isIDRFrame = NO;
int startCodeIndex = 0;
int pi = 0;
if (frame[pi] == 0x00 && frame[pi+1] == 0x00 && frame[pi+2] == 0x00 && frame[pi+3] == 0x01 && frame[pi+4] == 0x09 && frame[pi+5] == 0x50)
startCodeIndex = 6;
// NSLog(#"NALU Raw: %02X, %02x, %02x, %02x, %02X, %02x, %02x, %02x", frame[0],frame[1],frame[2],frame[3],frame[4],frame[5],frame[6],frame[7]);
int nalu_type = (frame[startCodeIndex + 4] & 0x1F);
// NSLog(#"~~~~~~~ Received NALU Type \"%#\" ~~~~~~~~", naluTypesStrings[nalu_type]);
// if we havent already set up our format description with our SPS PPS parameters, we
// can't process any frames except type 7 that has our parameters
if (nalu_type != 7 && _formatDesc == NULL) {
NSLog(#"Video error: Frame is not an I Frame and format description is null");
return isIDRFrame;
}
// NALU type 7 is the SPS parameter NALU
if (nalu_type == 7) {
isIDRFrame = YES;
}
return isIDRFrame;
}
- (void)receivedRawVideoFrame:(uint8_t *)frame withSize:(uint32_t)frameSize {
OSStatus status = 0;
uint8_t *data = NULL;
uint8_t *pps = NULL;
uint8_t *sps = NULL;
#if defined(DEBUG) || defined(_RECORD_USE_LIVE_PACKAGE)
if (self.startDumpData && self.streamVideoData && frameSize <= 512*1024)
[self.streamVideoData appendBytes:(const void*)frame length:frameSize];
//if (self.streamVideoData && frameSize <= 512*1024)
// [self.streamVideoData appendBytes:(const void*)frame length:frameSize];
#endif
// I know how my H.264 data source's NALUs looks like so I know start code index is always 0.
// if you don't know where it starts, you can use a for loop similar to how I find the 2nd and 3rd start codes
int startCodeIndex = 0;
int secondStartCodeIndex = 0;
int thirdStartCodeIndex = 0;
//#ifdef DEBUG
int pi = 0;
if (frame[pi] == 0x00 && frame[pi+1] == 0x00 && frame[pi+2] == 0x00 && frame[pi+3] == 0x01 && frame[pi+4] == 0x09 && frame[pi+5] == 0x50)
startCodeIndex = 6;
//#endif
long blockLength = 0;
BOOL withSPSPPS = NO;
CMSampleBufferRef sampleBuffer = NULL;
CMBlockBufferRef blockBuffer = NULL;
NSLog(#"NALU Raw: %02X, %02x, %02x, %02x, %02X, %02x, %02x, %02x", frame[0],frame[1],frame[2],frame[3],frame[4],frame[5],frame[6],frame[7]);
int nalu_type = (frame[startCodeIndex + 4] & 0x1F);
NSLog(#"~~~~~~~ Received NALU Type \"%#\" ~~~~~~~~", naluTypesStrings[nalu_type]);
// if we havent already set up our format description with our SPS PPS parameters, we
// can't process any frames except type 7 that has our parameters
if (nalu_type != 7 && _formatDesc == NULL) {
NSLog(#"Video error: Frame is not an I Frame and format description is null");
return;
}
// NALU type 7 is the SPS parameter NALU
if (nalu_type == 7) {
// find where the second PPS start code begins, (the 0x00 00 00 01 code)
// from which we also get the length of the first SPS code
for (int i = startCodeIndex + 4; i < startCodeIndex + 40; i++) {
if (frame[i] == 0x00 && frame[i+1] == 0x00 && frame[i+2] == 0x00 && frame[i+3] == 0x01) {
secondStartCodeIndex = i;
//_spsSize = secondStartCodeIndex; // includes the header in the size
//#ifdef DEBUG
_spsSize = secondStartCodeIndex - startCodeIndex; // includes the header in the size
//#endif
break;
}
}
// find what the second NALU type is
nalu_type = (frame[secondStartCodeIndex + 4] & 0x1F);
// NSLog(#"~~~n7~~ Received NALU Type \"%#\" ~~~~~~~~", naluTypesStrings[nalu_type]);
}
// type 8 is the PPS parameter NALU
if(nalu_type == 8) {
// find where the NALU after this one starts so we know how long the PPS parameter is
//#ifdef DEBUG
for (int i = _spsSize + 4 + startCodeIndex; i < _spsSize + 30; i++) {
//#endif
//for (int i = _spsSize + 4; i < _spsSize + 30; i++) {
if (frame[i] == 0x00 && frame[i+1] == 0x00 && frame[i+2] == 0x00 && frame[i+3] == 0x01) {
thirdStartCodeIndex = i;
//_ppsSize = thirdStartCodeIndex - _spsSize;
//#ifdef DEBUG
_ppsSize = thirdStartCodeIndex - _spsSize - startCodeIndex;
//#endif
break;
}
}
// allocate enough data to fit the SPS and PPS parameters into our data objects.
// VTD doesn't want you to include the start code header (4 bytes long) so we add the - 4 here
sps = malloc(_spsSize - 4);
pps = malloc(_ppsSize - 4);
// copy in the actual sps and pps values, again ignoring the 4 byte header
//#ifdef DEBUG
memcpy (sps, &frame[4+startCodeIndex], _spsSize-4);
memcpy (pps, &frame[_spsSize+4+startCodeIndex], _ppsSize-4);
NSLog(#"SPS Raw: %02X, %02x, %02x, %02x, %02X, %02x, %02x, %02x, %02x, %02x, %02x, %02x, %02x, %02x, %02x, %02x", sps[0],sps[1],sps[2],sps[3],sps[4],sps[5],sps[6],sps[7],sps[8],sps[9],sps[10],sps[11],sps[12],sps[13],sps[14],sps[15]);
NSLog(#"PPS Raw: %02X, %02x, %02x, %02x", pps[0],pps[1],pps[2],pps[3]);
//#endif
// memcpy (sps, &frame[4], _spsSize-4);
// memcpy (pps, &frame[_spsSize+4], _ppsSize-4);
// now we set our H264 parameters
uint8_t* parameterSetPointers[2] = {sps, pps};
size_t parameterSetSizes[2] = {_spsSize-4, _ppsSize-4};
status = CMVideoFormatDescriptionCreateFromH264ParameterSets(kCFAllocatorDefault, 2,
(const uint8_t *const*)parameterSetPointers,
parameterSetSizes, 4,
&_formatDesc);
// NSLog(#"\t\t Creation of CMVideoFormatDescription: %#", (status == noErr) ? #"successful!" : #"failed...");
if(status != noErr)
NSLog(#"\t\t Format Description ERROR type: %d", (int)status);
// See if decomp session can convert from previous format description
// to the new one, if not we need to remake the decomp session.
// This snippet was not necessary for my applications but it could be for yours
/*BOOL needNewDecompSession = (VTDecompressionSessionCanAcceptFormatDescription(_decompressionSession, _formatDesc) == NO);
if(needNewDecompSession)
{
[self createDecompSession];
}*/
// now lets handle the IDR frame that (should) come after the parameter sets
// I say "should" because that's how I expect my H264 stream to work, YMMV
nalu_type = (frame[thirdStartCodeIndex + 4] & 0x1F);
// NSLog(#"~~~n8~~ Received NALU Type \"%#\" ~~~~~~~~", naluTypesStrings[nalu_type]);
withSPSPPS = YES;
}
// create our VTDecompressionSession. This isnt neccessary if you choose to use AVSampleBufferDisplayLayer
//
if (videoTimestamp == 0 && _decompressionSession != NULL)
{
if (_decompressionSession != NULL)
{
VTDecompressionSessionInvalidate(_decompressionSession);
_decompressionSession = NULL;
}
}
if ((status == noErr) && (_decompressionSession == NULL)) {
[self createDecompSession];
}
// type 5 is an IDR frame NALU. The SPS and PPS NALUs should always be followed by an IDR (or IFrame) NALU, as far as I know
if(nalu_type == 5) {
// find the offset, or where the SPS and PPS NALUs end and the IDR frame NALU begins
//#ifdef DEBUG
int offset = _spsSize + _ppsSize + startCodeIndex;
NSLog(#"Start IDR at %d", offset);
//#endif
// int offset = _spsSize + _ppsSize;
blockLength = frameSize - offset;
// NSLog(#"Block Length : %ld", blockLength);
data = malloc(blockLength);
data = memcpy(data, &frame[offset], blockLength);
// replace the start code header on this NALU with its size.
// AVCC format requires that you do this.
// htonl converts the unsigned int from host to network byte order
uint32_t dataLength32 = htonl (blockLength - 4);
memcpy (data, &dataLength32, sizeof (uint32_t));
// create a block buffer from the IDR NALU
status = CMBlockBufferCreateWithMemoryBlock(NULL, data, // memoryBlock to hold buffered data
blockLength, // block length of the mem block in bytes.
kCFAllocatorNull, NULL,
0, // offsetToData
blockLength, // dataLength of relevant bytes, starting at offsetToData
0, &blockBuffer);
// NSLog(#"\t\t BlockBufferCreation: \t %#", (status == kCMBlockBufferNoErr) ? #"successful!" : #"failed...");
}
// NALU type 1 is non-IDR (or PFrame) picture
if (nalu_type == 1) {
// non-IDR frames do not have an offset due to SPS and PSS, so the approach
// is similar to the IDR frames just without the offset
//#ifdef DEBUG
if (withSPSPPS)
{
blockLength = frameSize-(_spsSize + _ppsSize + startCodeIndex);
data = malloc(blockLength);
data = memcpy(data, &frame[0+startCodeIndex+ _spsSize + _ppsSize], blockLength);
}
else
{
blockLength = frameSize-startCodeIndex;
data = malloc(blockLength);
data = memcpy(data, &frame[0+startCodeIndex], blockLength);
}
//#endif
// blockLength = frameSize;
// data = malloc(blockLength);
// data = memcpy(data, &frame[0], blockLength);
// again, replace the start header with the size of the NALU
uint32_t dataLength32 = htonl (blockLength - 4);
memcpy (data, &dataLength32, sizeof (uint32_t));
status = CMBlockBufferCreateWithMemoryBlock(NULL, data, // memoryBlock to hold data. If NULL, block will be alloc when needed
blockLength, // overall length of the mem block in bytes
kCFAllocatorNull, NULL,
0, // offsetToData
blockLength, // dataLength of relevant data bytes, starting at offsetToData
0, &blockBuffer);
// NSLog(#"\t\t BlockBufferCreation: \t %#", (status == kCMBlockBufferNoErr) ? #"successful!" : #"failed...");
}
// now create our sample buffer from the block buffer,
if(status == noErr) {
// here I'm not bothering with any timing specifics since in my case we displayed all frames immediately
const size_t sampleSize = blockLength;
status = CMSampleBufferCreate(kCFAllocatorDefault,
blockBuffer, true, NULL, NULL,
_formatDesc, 1, 0, NULL, 1,
&sampleSize, &sampleBuffer);
// NSLog(#"\t\t SampleBufferCreate: \t %#", (status == noErr) ? #"successful!" : #"failed...");
}
if(status == noErr) {
// set some values of the sample buffer's attachments
CFArrayRef attachments = CMSampleBufferGetSampleAttachmentsArray(sampleBuffer, YES);
CFMutableDictionaryRef dict = (CFMutableDictionaryRef)CFArrayGetValueAtIndex(attachments, 0);
CFDictionarySetValue(dict, kCMSampleAttachmentKey_DisplayImmediately, kCFBooleanTrue);
// either send the samplebuffer to a VTDecompressionSession or to an AVSampleBufferDisplayLayer
[self render:sampleBuffer];
}
// free memory to avoid a memory leak, do the same for sps, pps and blockbuffer
if (NULL != data) {
free (data);
data = NULL;
}
return;
}
- (void) createDecompSession {
// make sure to destroy the old VTD session
_decompressionSession = NULL;
VTDecompressionOutputCallbackRecord callBackRecord;
callBackRecord.decompressionOutputCallback = decompressionSessionDecodeFrameCallback;
// this is necessary if you need to make calls to Objective C "self" from within in the callback method.
callBackRecord.decompressionOutputRefCon = (__bridge void *)self;
// you can set some desired attributes for the destination pixel buffer. I didn't use this but you may
// if you need to set some attributes, be sure to uncomment the dictionary in VTDecompressionSessionCreate
/*NSDictionary *destinationImageBufferAttributes = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithBool:YES],
(id)kCVPixelBufferOpenGLESCompatibilityKey,
nil];*/
OSStatus status = VTDecompressionSessionCreate(NULL, _formatDesc, NULL,
NULL, // (__bridge CFDictionaryRef)(destinationImageBufferAttributes)
&callBackRecord, &_decompressionSession);
NSLog(#"Video Decompression Session Create: \t %#", (status == noErr) ? #"successful!" : #"failed...");
if(status != noErr)
NSLog(#"\t\t VTD ERROR type: %d", (int)status);
}
void decompressionSessionDecodeFrameCallback(void *decompressionOutputRefCon,
void *sourceFrameRefCon,
OSStatus status,
VTDecodeInfoFlags infoFlags,
CVImageBufferRef imageBuffer,
CMTime presentationTimeStamp,
CMTime presentationDuration) {
if (status != noErr) {
NSError *error = [NSError errorWithDomain:NSOSStatusErrorDomain code:status userInfo:nil];
NSLog(#"Decompressed error: %#", error);
}
else {
//NSLog(#"Decompressed sucessfully: pts: %f", CMTimeGetSeconds(presentationTimeStamp));
// NSLog(#"Decompressed sucessfully");
CIImage *ciImage = [CIImage imageWithCVPixelBuffer:imageBuffer];
CIContext *temporaryContext = [CIContext contextWithOptions:nil];
CGImageRef videoImage = [temporaryContext
createCGImage:ciImage
fromRect:CGRectMake(0, 0,
CVPixelBufferGetWidth(imageBuffer),
CVPixelBufferGetHeight(imageBuffer))];
UIImage *image = [[UIImage alloc] initWithCGImage:videoImage];
CGImageRelease(videoImage);
VideoDecoder *decoder = (__bridge VideoDecoder *)decompressionOutputRefCon;
[decoder.delegate videoDecoderImage:image];
// [decoder renderImage:image];
}
}
- (void)renderImage:(UIImage *)img {
dispatch_async(dispatch_get_main_queue(), ^{
// [self->mVideoImageView setImage:img];
[self->_delegate videoDecoderImage:img];
});
}
- (void)render:(CMSampleBufferRef)sampleBuffer {
VTDecodeFrameFlags flags = kVTDecodeFrame_EnableAsynchronousDecompression;
VTDecodeInfoFlags flagOut;
NSDate* currentTime = [NSDate date];
OSStatus status = VTDecompressionSessionDecodeFrame(_decompressionSession, sampleBuffer, flags,
(void*)CFBridgingRetain(currentTime), &flagOut);
if (noErr != status)
NSLog(#"video decode error: %d", status);
CFRelease(sampleBuffer);
// if you're using AVSampleBufferDisplayLayer, you only need to use this line of code
// if (mVideoLayer) {
// [mVideoLayer enqueueSampleBuffer:sampleBuffer];
// }
}
#end
Thanks in advance
In the cases where you are receiving 12909 error, please make sure that you are successfully able to create CMBlockBufferCreateWithMemoryBlock using sps & pps values.
In some NALUs (containing IDR Frame), you will not get SPS + PPS + IDR sequence, rather you might receive SPS + PPS + SEI + IDR sequence.
Hence if you are following the reference link that you mentioned above, it might not work in case of SPS+PPS+SEI+IDR NALU. Hence due to unsuccessful/no creation of CMBlockBufferCreateWithMemoryBlock, the later non-IDR frame may be failing to be decompressed.
In case of SEI, as it is non-VCL, simply iterate over it to find the next start code, you don't necessarily need to do anything with SEI other than that for successful decompression.
I'm trying to decode aac data with AudioToolbox in iOS environment. I consulted this thread.
'AudioConverterNew' function call succeed but AudioConverterFillComplexBuffer returns error code 1852797029, kAudioCodecIllegalOperationError.
I'm trying to find my mistakes. Thank you for reading.
- (void)initAudioToolBox {
HCAudioAsset* asset = [self.provider getAudioAsset];
AudioStreamBasicDescription outFormat;
memset(&outFormat, 0, sizeof(outFormat));
outFormat.mSampleRate = 44100;
outFormat.mFormatID = kAudioFormatLinearPCM;
outFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger;
outFormat.mBytesPerPacket = 2;
outFormat.mFramesPerPacket = 1;
outFormat.mBytesPerFrame = 2;
outFormat.mChannelsPerFrame = 1;
outFormat.mBitsPerChannel = 16;
outFormat.mReserved = 0;
AudioStreamBasicDescription inFormat;
memset(&inFormat, 0, sizeof(inFormat));
inFormat.mSampleRate = [asset sampleRate];
inFormat.mFormatID = kAudioFormatMPEG4AAC;
inFormat.mFormatFlags = kMPEG4Object_AAC_LC;
inFormat.mBytesPerPacket = 0;
inFormat.mFramesPerPacket = (UInt32)[asset framePerPacket];
inFormat.mBytesPerFrame = 0;
inFormat.mChannelsPerFrame = (UInt32)[asset channelCount];
inFormat.mBitsPerChannel = 0;
inFormat.mReserved = 0;
OSStatus status = AudioConverterNew(&inFormat, &outFormat, &audioConverter);
if (status != noErr) {
NSLog(#"setup converter error, status: %i\n", (int)status);
} else {
NSLog(#"Audio Converter is initialized successfully.");
}
}
typedef struct _PassthroughUserData PassthroughUserData;
struct _PassthroughUserData {
UInt32 mChannels;
UInt32 mDataSize;
const void* mData;
AudioStreamPacketDescription mPacket;
};
int inInputDataProc(AudioConverterRef aAudioConverter,
UInt32* aNumDataPackets,
AudioBufferList* aData,
AudioStreamPacketDescription** aPacketDesc,
void* aUserData)
{
PassthroughUserData* userData = (PassthroughUserData*)aUserData;
if (!userData->mDataSize) {
*aNumDataPackets = 0;
NSLog(#"inInputDataProc returns -1");
return -1;
}
if (aPacketDesc) {
userData->mPacket.mStartOffset = 0;
userData->mPacket.mVariableFramesInPacket = 0;
userData->mPacket.mDataByteSize = userData->mDataSize;
NSLog(#"mDataSize:%d", userData->mDataSize);
*aPacketDesc = &userData->mPacket;
}
aData->mBuffers[0].mNumberChannels = userData->mChannels;
aData->mBuffers[0].mDataByteSize = userData->mDataSize;
aData->mBuffers[0].mData = (void*)(userData->mData);
NSLog(#"buffer[0] - channel:%d, byte size:%u, data:%p",
aData->mBuffers[0].mNumberChannels,
(unsigned int)aData->mBuffers[0].mDataByteSize,
aData->mBuffers[0].mData);
// No more data to provide following this run.
userData->mDataSize = 0;
NSLog(#"inInputDataProc returns 0");
return 0;
}
- (void)decodeAudioFrame:(NSData *)frame withPts:(NSInteger)pts{
if(!audioConverter){
[self initAudioToolBox];
}
HCAudioAsset* asset = [self.provider getAudioAsset];
PassthroughUserData userData = { (UInt32)[asset channelCount], (UInt32)frame.length, [frame bytes]};
NSMutableData *decodedData = [NSMutableData new];
const uint32_t MAX_AUDIO_FRAMES = 128;
const uint32_t maxDecodedSamples = MAX_AUDIO_FRAMES * 1;
do {
uint8_t *buffer = (uint8_t *)malloc(maxDecodedSamples * sizeof(short int));
AudioBufferList decBuffer;
memset(&decBuffer, 0, sizeof(AudioBufferList));
decBuffer.mNumberBuffers = 1;
decBuffer.mBuffers[0].mNumberChannels = 2;
decBuffer.mBuffers[0].mDataByteSize = maxDecodedSamples * sizeof(short int);
decBuffer.mBuffers[0].mData = buffer;
UInt32 numFrames = MAX_AUDIO_FRAMES;
AudioStreamPacketDescription outPacketDescription;
memset(&outPacketDescription, 0, sizeof(AudioStreamPacketDescription));
outPacketDescription.mDataByteSize = MAX_AUDIO_FRAMES;
outPacketDescription.mStartOffset = 0;
outPacketDescription.mVariableFramesInPacket = 0;
NSLog(#"frame - size:%lu, buffer:%p", [frame length], [frame bytes]);
OSStatus rv = AudioConverterFillComplexBuffer(audioConverter,
inInputDataProc,
&userData,
&numFrames,
&decBuffer,
&outPacketDescription);
NSLog(#"num frames:%d, dec buffer [0] channels:%d, dec buffer [0] data byte size:%d, rv:%d",
numFrames, decBuffer.mBuffers[0].mNumberChannels,
decBuffer.mBuffers[0].mDataByteSize, (int)rv);
if (rv && rv != noErr) {
NSLog(#"Error decoding audio stream: %d\n", rv);
break;
}
if (numFrames) {
[decodedData appendBytes:decBuffer.mBuffers[0].mData length:decBuffer.mBuffers[0].mDataByteSize];
}
} while (true);
//void *pData = (void *)[decodedData bytes];
//audioRenderer->Render(&pData, decodedData.length, pts);
}
I am writing a IOS App which will decode a H.264 frame and render using AVSampleBufferDisplayLayer. I have already modified the frame to not have the NAL start code but have a 4 byte NAL size. This has been verified.
But all i see is a white frame in my IOS simulator, No error; is there a possibility to dump the decoded frame and verify? Any other debug points will really help.
#implementation DecodeClass
- (void) viewDidLoad {
}
/* method to decode and render a frame */
- (void)decodeFrame{
NSLog(#"Decode Start");
/* local variable declaration */
//OSStatus status;
size_t spsSize, ppsSize,dataLen;
//_frameSize = 320*240*1.5;
uint8_t sps[] = {0x67, 0x42, 0xC0, 0x0D, 0x96, 0x64, 0x0A, 0x0F, 0xDF, 0xF8, 0x00, 0x20, 0x00, 0x18, 0x80, 0x00,
0x00, 0x7D, 0x00, 0x00, 0x0B, 0xB5, 0x47, 0x8A, 0x15, 0x50};
uint8_t pps[] = {0x68, 0xCE, 0x32, 0xC8};
const uint8_t* props[] = {sps, pps};
spsSize = (sizeof(sps)/sizeof(uint8_t));
ppsSize = (sizeof(pps)/sizeof(uint8_t));
const size_t sizes[] = {spsSize,ppsSize};
FILE* pFile;
int result;
pFile = fopen("/Documents/input_mod1.264","r");
fseeko(pFile, 0, SEEK_END);
unsigned long fileSize = ftello(pFile);
fseek(pFile, 0, SEEK_SET);
_dataBuf = (uint8_t*)malloc(sizeof(uint8_t) * (fileSize));
memset(_dataBuf,0,sizeof(uint8_t) * (fileSize));
if (pFile ){
result = fread(_dataBuf,sizeof(uint8_t),fileSize,pFile);
fclose(pFile);
}
else
NSLog(#"Can't open file");
[self MUX_Modify_AVC_Start_Code:_dataBuf size:&fileSize Header:false];
dataLen = fileSize;
//construct h.264 parameter set
CMVideoFormatDescriptionRef formatDesc;
OSStatus formatCreateResult = CMVideoFormatDescriptionCreateFromH264ParameterSets(kCFAllocatorDefault, 2, props, sizes, 4, &formatDesc);
if (formatCreateResult)
{
NSLog(#"construct CMVideoFormatDescriptionCreateFromH264ParameterSets Failed :%ld",(long)formatCreateResult);
}
//construct cmBlockbuffer .
CMBlockBufferRef blockBufferOut = nil;
CMBlockBufferCreateEmpty (0,0,kCMBlockBufferAlwaysCopyDataFlag, &blockBufferOut);
CMBlockBufferAppendMemoryBlock(blockBufferOut,
_dataBuf,
dataLen,
NULL,
NULL,
0,
dataLen,
kCMBlockBufferAlwaysCopyDataFlag);
//construct cmsamplebuffer ok
size_t sampleSizeArray[1] = {0};
sampleSizeArray[0] = CMBlockBufferGetDataLength(blockBufferOut);
CMSampleTimingInfo tmInfos[1] = {
{CMTimeMake(5,1), CMTimeMake(5,1), CMTimeMake(5,1)}
};
CMSampleBufferRef sampBuf = nil;
formatCreateResult = CMSampleBufferCreate(kCFAllocatorDefault,
blockBufferOut,
YES,
NULL,
NULL,
formatDesc,
1,
1,
tmInfos,
1,
sampleSizeArray,
&sampBuf);
NSLog(#"Decode End :: Construct CMSampleBufferRef value of formatCreateResult is %d", formatCreateResult);
if(!_dspLayer)
{
_dspLayer = [[AVSampleBufferDisplayLayer alloc]init];
[_dspLayer setFrame:CGRectMake(0,0,320,240)];
_dspLayer.bounds = CGRectMake(0, 0, 300, 300);
_dspLayer.videoGravity = AVLayerVideoGravityResizeAspect;
_dspLayer.position = CGPointMake(500, 500);
_dspLayer.backgroundColor = [UIColor blueColor].CGColor;
CMTimebaseRef tmBase = nil;
CMTimebaseCreateWithMasterClock(NULL,CMClockGetHostTimeClock(),&tmBase);
_dspLayer.controlTimebase = tmBase;
CMTimebaseSetTime(_dspLayer.controlTimebase, kCMTimeZero);
CMTimebaseSetRate(_dspLayer.controlTimebase, 1.0);
[self.layerView.layer addSublayer:_dspLayer];
}
//put to AVSampleBufferdisplayLayer,just one frame.
if([self.dspLayer isReadyForMoreMediaData])
{
[self.dspLayer enqueueSampleBuffer:sampBuf];
}
[self.dspLayer setNeedsDisplay];
}
-(void)MUX_Modify_AVC_Start_Code:(uint8_t*)pData size:(uint32_t *)nSize Header:(bool)bHeader{
....
}
-(uint32_t)MUX_FindNextPattern:(uint8_t*)streamBuf buffSize:(uint32_t)bufSize startCode:(uint32_t)startcode{
....
}
- (void)dealloc{
//free(_dataBuf);
}
#end
int main(int argc, char * argv[]) {
//[decodeClass release];
#autoreleasepool {
DecodeClass *decodeClass = [[DecodeClass alloc]init];
[decodeClass decodeFrame];
decodeClass = nil;
return UIApplicationMain(argc, argv, nil, NSStringFromClass([AppDelegate class]));
}
}
In my app I have to implement a form which works offline as well.
There are some images to be uploaded in the form. The way I currently implement is by first saving the image in Application directory. And saving it's file location in database then delete it when I upload. This process is too lengthy and mind boggling. What do you guys suggest? Should I save it the image as Data in sqlite instead or this approach i took is better.
This is how I implemented it:
ALAssetsLibrary *lib=[ALAssetsLibrary new];
for (int i=0; i<assets.count; i++) {
ALAsset *asset=assets[i];
NSString *baseDir=[fileMgr GetDocumentDirectory];
//STORING FILE INTO LOCAL
[lib assetForURL:asset.defaultRepresentation.url
resultBlock:^(ALAsset *asset){
ALAssetRepresentation *repr = [asset defaultRepresentation];
CGImageRef cgImg = [repr fullResolutionImage];
NSString *fname = repr.filename;
UIImage *img = [UIImage imageWithCGImage:cgImg];
NSData *data = UIImagePNGRepresentation(img);
[data writeToFile:[baseDir stringByAppendingPathComponent:fname]
atomically:YES];
//FOR LOCAL URL OF THE IMAGE
NSString *imageURL = [baseDir stringByAppendingPathComponent:fname];
NSLog(#"%# URL OF IMAGE ",imageURL);
}
failureBlock:^(NSError *error){
}];
}
NSLog(#"COPIED %lu FILE INTO LOCAL MEMORY",(unsigned long)assets.count);
yes you can save the image in database as a string. create an catagory NSString+NSStringAdditions.h like this.
//
// NSString+NSStringAdditions.h
// RemoteControl
//
// Created by Bhaskar on 09/05/14.
// Copyright (c) 2014 Bhaskar. All rights reserved.
//
#import <Foundation/Foundation.h>
#interface NSString (NSStringAdditions)
+ (NSString *) base64StringFromData:(NSData *)data length:(int)length;
+ (NSData *) base64DataFromString:(NSString *)string;
#end
//
// NSString+NSStringAdditions.m
// RemoteControl
//
// Created by Bhaskar on 09/05/14.
// Copyright (c) 2014 Bhaskar. All rights reserved.
//
#import "NSString+NSStringAdditions.h"
static char base64EncodingTable[64] = {
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P',
'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f',
'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/'
};
#implementation NSString (NSStringAdditions)
+ (NSString *) base64StringFromData: (NSData *)data length: (int)length {
unsigned long ixtext, lentext;
long ctremaining;
unsigned char input[3], output[4];
short i, charsonline = 0, ctcopy;
const unsigned char *raw;
NSMutableString *result;
lentext = [data length];
if (lentext < 1)
return #"";
result = [NSMutableString stringWithCapacity: lentext];
raw = [data bytes];
ixtext = 0;
while (true) {
ctremaining = lentext - ixtext;
if (ctremaining <= 0)
break;
for (i = 0; i < 3; i++) {
unsigned long ix = ixtext + i;
if (ix < lentext)
input[i] = raw[ix];
else
input[i] = 0;
}
output[0] = (input[0] & 0xFC) >> 2;
output[1] = ((input[0] & 0x03) << 4) | ((input[1] & 0xF0) >> 4);
output[2] = ((input[1] & 0x0F) << 2) | ((input[2] & 0xC0) >> 6);
output[3] = input[2] & 0x3F;
ctcopy = 4;
switch (ctremaining) {
case 1:
ctcopy = 2;
break;
case 2:
ctcopy = 3;
break;
}
for (i = 0; i < ctcopy; i++)
[result appendString: [NSString stringWithFormat: #"%c", base64EncodingTable[output[i]]]];
for (i = ctcopy; i < 4; i++)
[result appendString: #"="];
ixtext += 3;
charsonline += 4;
if ((length > 0) && (charsonline >= length))
charsonline = 0;
}
return result;
}
+ (NSData *)base64DataFromString: (NSString *)string
{
unsigned long ixtext, lentext;
unsigned char ch, inbuf[4], outbuf[3];
short i, ixinbuf;
Boolean flignore, flendtext = false;
const unsigned char *tempcstring;
NSMutableData *theData;
if (string == nil)
{
return [NSData data];
}
ixtext = 0;
tempcstring = (const unsigned char *)[string UTF8String];
lentext = [string length];
theData = [NSMutableData dataWithCapacity: lentext];
ixinbuf = 0;
while (true)
{
if (ixtext >= lentext)
{
break;
}
ch = tempcstring [ixtext++];
flignore = false;
if ((ch >= 'A') && (ch <= 'Z'))
{
ch = ch - 'A';
}
else if ((ch >= 'a') && (ch <= 'z'))
{
ch = ch - 'a' + 26;
}
else if ((ch >= '0') && (ch <= '9'))
{
ch = ch - '0' + 52;
}
else if (ch == '+')
{
ch = 62;
}
else if (ch == '=')
{
flendtext = true;
}
else if (ch == '/')
{
ch = 63;
}
else
{
flignore = true;
}
if (!flignore)
{
short ctcharsinbuf = 3;
Boolean flbreak = false;
if (flendtext)
{
if (ixinbuf == 0)
{
break;
}
if ((ixinbuf == 1) || (ixinbuf == 2))
{
ctcharsinbuf = 1;
}
else
{
ctcharsinbuf = 2;
}
ixinbuf = 3;
flbreak = true;
}
inbuf [ixinbuf++] = ch;
if (ixinbuf == 4)
{
ixinbuf = 0;
outbuf[0] = (inbuf[0] << 2) | ((inbuf[1] & 0x30) >> 4);
outbuf[1] = ((inbuf[1] & 0x0F) << 4) | ((inbuf[2] & 0x3C) >> 2);
outbuf[2] = ((inbuf[2] & 0x03) << 6) | (inbuf[3] & 0x3F);
for (i = 0; i < ctcharsinbuf; i++)
{
[theData appendBytes: &outbuf[i] length: 1];
}
}
if (flbreak)
{
break;
}
}
}
return theData;
}
#end