It's my RTSP streaming ios application with FFMPEG decoder and it streaming fine, But the memory continuously increasing while running. Please help me, Is it a memory leak ?. And how can I track the leak ?.
Its my video streaming class: RTSPPlayer.m
#import "RTSPPlayer.h"
#import "Utilities.h"
#import "AudioStreamer.h"
#interface RTSPPlayer ()
#property (nonatomic, retain) AudioStreamer *audioController;
#end
#interface RTSPPlayer (private)
-(void)convertFrameToRGB;
-(UIImage *)imageFromAVPicture:(AVPicture)pict width:(int)width height:(int)height;
-(void)setupScaler;
#end
#implementation RTSPPlayer
#synthesize audioController = _audioController;
#synthesize audioPacketQueue,audioPacketQueueSize;
#synthesize _audioStream,_audioCodecContext;
#synthesize emptyAudioBuffer;
#synthesize outputWidth, outputHeight;
- (void)setOutputWidth:(int)newValue
{
if (outputWidth != newValue) {
outputWidth = newValue;
[self setupScaler];
}
}
- (void)setOutputHeight:(int)newValue
{
if (outputHeight != newValue) {
outputHeight = newValue;
[self setupScaler];
}
}
- (UIImage *)currentImage
{
if (!pFrame->data[0]) return nil;
[self convertFrameToRGB];
return [self imageFromAVPicture:picture width:outputWidth height:outputHeight];
}
- (double)duration
{
return (double)pFormatCtx->duration / AV_TIME_BASE;
}
- (double)currentTime
{
AVRational timeBase = pFormatCtx->streams[videoStream]->time_base;
return packet.pts * (double)timeBase.num / timeBase.den;
}
- (int)sourceWidth
{
return pCodecCtx->width;
}
- (int)sourceHeight
{
return pCodecCtx->height;
}
- (id)initWithVideo:(NSString *)moviePath usesTcp:(BOOL)usesTcp
{
if (!(self=[super init])) return nil;
AVCodec *pCodec;
// Register all formats and codecs
avcodec_register_all();
av_register_all();
avformat_network_init();
// Set the RTSP Options
AVDictionary *opts = 0;
if (usesTcp)
av_dict_set(&opts, "rtsp_transport", "tcp", 0);
if (avformat_open_input(&pFormatCtx, [moviePath UTF8String], NULL, &opts) !=0 ) {
av_log(NULL, AV_LOG_ERROR, "Couldn't open file\n");
goto initError;
}
// Retrieve stream information
if (avformat_find_stream_info(pFormatCtx,NULL) < 0) {
av_log(NULL, AV_LOG_ERROR, "Couldn't find stream information\n");
goto initError;
}
// Find the first video stream
videoStream=-1;
audioStream=-1;
for (int i=0; i<pFormatCtx->nb_streams; i++) {
if (pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
NSLog(#"found video stream");
videoStream=i;
}
if (pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO) {
audioStream=i;
NSLog(#"found audio stream");
}
}
if (videoStream==-1 && audioStream==-1) {
goto initError;
}
// Get a pointer to the codec context for the video stream
pCodecCtx = pFormatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (pCodec == NULL) {
av_log(NULL, AV_LOG_ERROR, "Unsupported codec!\n");
goto initError;
}
// Open codec
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n");
goto initError;
}
if (audioStream > -1 ) {
NSLog(#"set up audiodecoder");
[self setupAudioDecoder];
}
// Allocate video frame
pFrame = avcodec_alloc_frame();
outputWidth = pCodecCtx->width;
self.outputHeight = pCodecCtx->height;
return self;
initError:
// [self release];
return nil;
}
- (void)setupScaler
{
// Release old picture and scaler
avpicture_free(&picture);
sws_freeContext(img_convert_ctx);
// Allocate RGB picture
avpicture_alloc(&picture, PIX_FMT_RGB24, outputWidth, outputHeight);
// Setup scaler
static int sws_flags = SWS_FAST_BILINEAR;
img_convert_ctx = sws_getContext(pCodecCtx->width,
pCodecCtx->height,
pCodecCtx->pix_fmt,
outputWidth,
outputHeight,
PIX_FMT_RGB24,
sws_flags, NULL, NULL, NULL);
}
- (void)seekTime:(double)seconds
{
AVRational timeBase = pFormatCtx->streams[videoStream]->time_base;
int64_t targetFrame = (int64_t)((double)timeBase.den / timeBase.num * seconds);
avformat_seek_file(pFormatCtx, videoStream, targetFrame, targetFrame, targetFrame, AVSEEK_FLAG_FRAME);
avcodec_flush_buffers(pCodecCtx);
}
- (void)dealloc
{
// Free scaler
sws_freeContext(img_convert_ctx);
// Free RGB picture
avpicture_free(&picture);
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
// Free the YUV frame
av_free(pFrame);
// Close the codec
if (pCodecCtx) avcodec_close(pCodecCtx);
// Close the video file
if (pFormatCtx) avformat_close_input(&pFormatCtx);
[_audioController _stopAudio];
// [_audioController release];
_audioController = nil;
// [audioPacketQueue release];
audioPacketQueue = nil;
// [audioPacketQueueLock release];
audioPacketQueueLock = nil;
// [super dealloc];
}
- (BOOL)stepFrame
{
// AVPacket packet;
int frameFinished=0;
while (!frameFinished && av_read_frame(pFormatCtx, &packet) >=0 ) {
// Is this a packet from the video stream?
if(packet.stream_index==videoStream) {
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
}
if (packet.stream_index==audioStream) {
// NSLog(#"audio stream");
[audioPacketQueueLock lock];
audioPacketQueueSize += packet.size;
[audioPacketQueue addObject:[NSMutableData dataWithBytes:&packet length:sizeof(packet)]];
[audioPacketQueueLock unlock];
if (!primed) {
primed=YES;
[_audioController _startAudio];
}
if (emptyAudioBuffer) {
[_audioController enqueueBuffer:emptyAudioBuffer];
}
}
}
return frameFinished!=0;
}
- (void)convertFrameToRGB
{
sws_scale(img_convert_ctx,
pFrame->data,
pFrame->linesize,
0,
pCodecCtx->height,
picture.data,
picture.linesize);
}
- (UIImage *)imageFromAVPicture:(AVPicture)pict width:(int)width height:(int)height
{
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault;
CFDataRef data = CFDataCreateWithBytesNoCopy(kCFAllocatorDefault, pict.data[0], pict.linesize[0]*height,kCFAllocatorNull);
CGDataProviderRef provider = CGDataProviderCreateWithCFData(data);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGImageRef cgImage = CGImageCreate(width,
height,
8,
24,
pict.linesize[0],
colorSpace,
bitmapInfo,
provider,
NULL,
NO,
kCGRenderingIntentDefault);
CGColorSpaceRelease(colorSpace);
UIImage *image = [UIImage imageWithCGImage:cgImage];
CGImageRelease(cgImage);
CGDataProviderRelease(provider);
CFRelease(data);
return image;
}
- (void)setupAudioDecoder
{
if (audioStream >= 0) {
_audioBufferSize = AVCODEC_MAX_AUDIO_FRAME_SIZE;
_audioBuffer = av_malloc(_audioBufferSize);
_inBuffer = NO;
_audioCodecContext = pFormatCtx->streams[audioStream]->codec;
_audioStream = pFormatCtx->streams[audioStream];
AVCodec *codec = avcodec_find_decoder(_audioCodecContext->codec_id);
if (codec == NULL) {
NSLog(#"Not found audio codec.");
return;
}
if (avcodec_open2(_audioCodecContext, codec, NULL) < 0) {
NSLog(#"Could not open audio codec.");
return;
}
if (audioPacketQueue) {
// [audioPacketQueue release];
audioPacketQueue = nil;
}
audioPacketQueue = [[NSMutableArray alloc] init];
if (audioPacketQueueLock) {
// [audioPacketQueueLock release];
audioPacketQueueLock = nil;
}
audioPacketQueueLock = [[NSLock alloc] init];
if (_audioController) {
[_audioController _stopAudio];
// [_audioController release];
_audioController = nil;
}
_audioController = [[AudioStreamer alloc] initWithStreamer:self];
} else {
pFormatCtx->streams[audioStream]->discard = AVDISCARD_ALL;
audioStream = -1;
}
}
- (void)nextPacket
{
_inBuffer = NO;
}
- (AVPacket*)readPacket
{
if (_currentPacket.size > 0 || _inBuffer) return &_currentPacket;
NSMutableData *packetData = [audioPacketQueue objectAtIndex:0];
_packet = [packetData mutableBytes];
if (_packet) {
if (_packet->dts != AV_NOPTS_VALUE) {
_packet->dts += av_rescale_q(0, AV_TIME_BASE_Q, _audioStream->time_base);
}
if (_packet->pts != AV_NOPTS_VALUE) {
_packet->pts += av_rescale_q(0, AV_TIME_BASE_Q, _audioStream->time_base);
}
[audioPacketQueueLock lock];
audioPacketQueueSize -= _packet->size;
if ([audioPacketQueue count] > 0) {
[audioPacketQueue removeObjectAtIndex:0];
}
[audioPacketQueueLock unlock];
_currentPacket = *(_packet);
}
return &_currentPacket;
}
- (void)closeAudio
{
[_audioController _stopAudio];
primed=NO;
}
#end
Presented as an answer for formatting and images.
Use instruments to check for leaks and memory loss due to retained but not leaked memory. The latter is unused memory that is still pointed to. Use Mark Generation (Heapshot) in the Allocations instrument on Instruments.
For HowTo use Heapshot to find memory creap, see: bbum blog
Basically the method is to run Instruments allocate tool, take a heapshot, run an iteration of your code and take another heapshot repeating 3 or 4 times. This will indicate memory that is allocated and not released during the iterations.
To figure out the results disclose to see the individual allocations.
If you need to see where retains, releases and autoreleases occur for an object use instruments:
Run in instruments, in Allocations set "Record reference counts" on (For Xcode 5 and lower you have to stop recording to set the option). Cause the app to run, stop recording, drill down and you will be able to see where all retains, releases and autoreleases occurred.
Related
I'm trying to create a converter which will make a video out of set of images. Everything is at its place, AVFormatContext, AVCodecContext, AVCodec. I'm creating YUV AVFrame out of UIImage and send it to encoder by avcodec_send_frame() method. Everything goes fine until I'm trying to get AVPacket with method avcodec_receive_packet(). Every time it returns -53 which means - output is not available in the current state - user must try to send input. As I said, I'm sending an input before I'm trying to get something and sending is successful.
Here's my code:
Init ffmpeg entities:
- (BOOL)setupForConvert:(DummyFVPVideoFile *)videoFile outputPath:(NSString *)path
{
if (!videoFile) {
[self.delegate convertationFailed:#"VideoFile is nil!"];
return NO;
}
currentVideoFile = videoFile;
outputPath = path;
BOOL success = NO;
success = [self initFormatCtxAndCodecs:path];
if (!success) {
return NO;
}
success = [self addCameraStreams:videoFile];
if (!success) {
return NO;
}
success = [self openIOContext:path];
if (!success) {
return NO;
}
return YES;
}
- (BOOL)initFormatCtxAndCodecs:(NSString *)path
{
//AVOutputFormat *fmt = av_guess_format("mp4", NULL, NULL);
int ret = avformat_alloc_output_context2(&pFormatCtx, NULL, NULL, [path UTF8String]);
if (ret < 0) {
NSLog(#"Couldn't create output context");
return NO;
}
//encoder codec init
pCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!pCodec) {
NSLog(#"Couldn't find a encoder codec!");
return NO;
}
pCodecCtx = avcodec_alloc_context3(pCodec);
if (!pCodecCtx) {
NSLog(#"Couldn't alloc encoder codec context!");
return NO;
}
pCodecCtx->codec_tag = AV_CODEC_ID_H264;
pCodecCtx->bit_rate = 400000;
pCodecCtx->width = currentVideoFile.size.width;
pCodecCtx->height = currentVideoFile.size.height;
pCodecCtx->time_base = (AVRational){1, (int)currentVideoFile.framerate};
pCodecCtx->framerate = (AVRational){(int)currentVideoFile.framerate, 1};
pCodecCtx->gop_size = 10;
pCodecCtx->max_b_frames = 1;
pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
NSLog(#"Couldn't open the encoder codec!");
return NO;
}
pPacket = av_packet_alloc();
return YES;
}
- (BOOL)addCameraStreams:(DummyFVPVideoFile *)videoFile
{
AVCodecParameters *params = avcodec_parameters_alloc();
if (!params) {
NSLog(#"Couldn't allocate codec parameters!");
return NO;
}
if (avcodec_parameters_from_context(params, pCodecCtx) < 0) {
NSLog(#"Couldn't copy parameters from context!");
return NO;
}
for (int i = 0; i < videoFile.idCameras.count - 1; i++)
{
NSString *path = [videoFile.url URLByAppendingPathComponent:videoFile.idCameras[i]].path;
AVStream *stream = avformat_new_stream(pFormatCtx, pCodec);
if (!stream) {
NSLog(#"Couldn't alloc stream!");
return NO;
}
if (avcodec_parameters_copy(stream->codecpar, params) < 0) {
NSLog(#"Couldn't copy parameters into stream!");
return NO;
}
stream->avg_frame_rate.num = videoFile.framerate;
stream->avg_frame_rate.den = 1;
stream->codecpar->codec_tag = 0; //some silly workaround
stream->index = i;
streams[path] = [[VideoStream alloc] initWithStream:stream];
}
return YES;
}
- (BOOL)openIOContext:(NSString *)path
{
AVIOContext *ioCtx = nil;
if (avio_open(&ioCtx, [path UTF8String], AVIO_FLAG_WRITE) < 0) {
return NO;
}
pFormatCtx->pb = ioCtx;
return YES;
}
And here's convertation process:
- (void)launchConvert:(DummyFVPVideoFile *)videoFile
{
BOOL convertInProgress = YES;
unsigned int frameCount = 1;
unsigned long pts = 0;
BOOL success = NO;
success = [self writeHeader];
if (!success) {
NSLog(#"Couldn't write header!");
return;
}
AVRational defaultTimeBase;
defaultTimeBase.num = 1;
defaultTimeBase.den = videoFile.framerate;
AVRational streamTimeBase = streams.allValues.firstObject.stream->time_base;
while (convertInProgress)
{
pts += av_rescale_q(1, defaultTimeBase, streamTimeBase);
for (NSString *path in streams.allKeys)
{
UIImage *img = [UIImage imageWithContentsOfFile:[NSString stringWithFormat:#"%#/%u.jpg", path, frameCount]];
AVPacket *pkt = [self getAVPacket:img withPts:pts];
if (!pkt->data) { continue; }
pkt->stream_index = streams[path].stream->index;
//check all settings of pkt
if (![self writePacket:pkt]) {
NSLog(#"Couldn't write packet!");
convertInProgress = NO;
break;
}
}
frameCount++;
}
success = [self writeTrailer];
if (!success) {
NSLog(#"Couldn't write trailer!");
return;
}
NSLog(#"Convertation finished!");
//delegate convertationFinished method
}
- (BOOL)writeHeader
{
if (avformat_write_header(pFormatCtx, NULL) < 0) {
return NO;
}
return YES;
}
- (BOOL)writePacket:(AVPacket *)pkt
{
if (av_interleaved_write_frame(pFormatCtx, pkt) != 0) {
return NO;
}
return YES;
}
- (BOOL)writeTrailer
{
if (av_write_trailer(pFormatCtx) != 0) {
return NO;
}
return YES;
}
/**
This method will create AVPacket out of UIImage.
#return AVPacket
*/
- (AVPacket *)getAVPacket:(UIImage *)img withPts:(unsigned long)pts
{
if (!img) {
NSLog(#"imgData is nil!");
return nil;
}
uint8_t *imgData = [self getPixelDataFromImage:img];
AVFrame *frame_yuv = av_frame_alloc();
if (!frame_yuv) {
NSLog(#"frame_yuv is nil!");
return nil;
}
frame_yuv->format = AV_PIX_FMT_YUV420P;
frame_yuv->width = (int)img.size.width;
frame_yuv->height = (int)img.size.height;
int ret = av_image_alloc(frame_yuv->data,
frame_yuv->linesize,
frame_yuv->width,
frame_yuv->height,
frame_yuv->format,
32);
if (ret < 0) {
NSLog(#"Couldn't alloc yuv frame!");
return nil;
}
struct SwsContext *sws_ctx = nil;
sws_ctx = sws_getContext((int)img.size.width, (int)img.size.height, AV_PIX_FMT_RGB24,
(int)img.size.width, (int)img.size.height, AV_PIX_FMT_YUV420P,
0, NULL, NULL, NULL);
const uint8_t *scaleData[1] = { imgData };
int inLineSize[1] = { 4 * img.size.width };
sws_scale(sws_ctx, scaleData, inLineSize, 0, (int)img.size.height, frame_yuv->data, frame_yuv->linesize);
frame_yuv->pict_type = AV_PICTURE_TYPE_I;
frame_yuv->pts = pCodecCtx->frame_number;
ret = avcodec_send_frame(pCodecCtx, frame_yuv); //every time everything is fine
if (ret != 0) {
NSLog(#"Couldn't send yuv frame!");
return nil;
}
av_init_packet(pPacket);
pPacket->dts = pPacket->pts = pts;
do {
ret = avcodec_receive_packet(pCodecCtx, pPacket); //every time -35 error
NSLog(#"ret = %d", ret);
if (ret == AVERROR_EOF) {
NSLog(#"AVERROR_EOF!");
} else if (ret == AVERROR(EAGAIN)) {
NSLog(#"AVERROR(EAGAIN)");
} else if (ret == AVERROR(EINVAL)) {
NSLog(#"AVERROR(EINVAL)");
}
if (ret != 0) {
NSLog(#"Couldn't receive packet!");
//return nil;
}
} while ( ret == 0 );
free(imgData);
av_packet_unref(pPacket);
av_packet_free(pPacket);
av_frame_unref(&frame_yuv);
av_frame_free(&frame_yuv);
//perform other clean up and test dat shit
return pPacket;
}
Any insights would be helpful. Thanks!
There may be two reasons.
According to on of the documents of FFmpeg you may need to feed more then one packet to avcodec_send_frame() to receive packet return successful.
I cannot confirm that you allocated enough sized buffer for pPacket. The functions av_packet_alloc() and av_init_packet() won't allocate any buffer but latter sets it to NULL instead. So allocation must be done after init. Somewhere you should allocate buffer either manually or with av_new_packet(pPacket, SIZE).
Hope that helps.
I am doing a transcription app in iOS. So, I have to record the audio in buffer and stream them to the server through socket. So, I have used AudioQueue to record the audio in buffer.
The Audio is being recorded properly in local file. For streaming, I converted audio data to NSData and send it through socket. But, The Audio quality is not good in the server especially the voice is not clear at all. It contains lots of noise in the place of voice. The same logic works properly in Android. So, The server side code is working properly. But, the iOS streaming conversion is a problem. I used two different sockets (SocketRocket/PockSocket). The problem remains the same in both the sockets.
I have attached my code here. Please let me know if you can help me.
ViewController.h
#import <UIKit/UIKit.h>
#import <AudioToolbox/AudioQueue.h>
#import <AudioToolbox/AudioFile.h>
#import <SocketRocket/SocketRocket.h>
#define NUM_BUFFERS 3
#define SAMPLERATE 16000
//Struct defining recording state
typedef struct {
AudioStreamBasicDescription dataFormat;
AudioQueueRef queue;
AudioQueueBufferRef buffers[NUM_BUFFERS];
AudioFileID audioFile;
SInt64 currentPacket;
bool recording;
} RecordState;
//Struct defining playback state
typedef struct {
AudioStreamBasicDescription dataFormat;
AudioQueueRef queue;
AudioQueueBufferRef buffers[NUM_BUFFERS];
AudioFileID audioFile;
SInt64 currentPacket;
bool playing;
} PlayState;
#interface ViewController : UIViewController <SRWebSocketDelegate> {
RecordState recordState;
PlayState playState;
CFURLRef fileURL;
}
#property (nonatomic, strong) SRWebSocket * webSocket;
#property (weak, nonatomic) IBOutlet UITextView *textView;
#end
ViewController.m
#import "ViewController.h"
id thisClass;
//Declare C callback functions
void AudioInputCallback(void * inUserData, // Custom audio metada
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp * inStartTime,
UInt32 isNumberPacketDescriptions,
const AudioStreamPacketDescription * inPacketDescs);
void AudioOutputCallback(void * inUserData,
AudioQueueRef outAQ,
AudioQueueBufferRef outBuffer);
#interface ViewController ()
#end
#implementation ViewController
#synthesize webSocket;
#synthesize textView;
// Takes a filled buffer and writes it to disk, "emptying" the buffer
void AudioInputCallback(void * inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp * inStartTime,
UInt32 inNumberPacketDescriptions,
const AudioStreamPacketDescription * inPacketDescs)
{
RecordState * recordState = (RecordState*)inUserData;
if (!recordState->recording)
{
printf("Not recording, returning\n");
}
printf("Writing buffer %lld\n", recordState->currentPacket);
OSStatus status = AudioFileWritePackets(recordState->audioFile,
false,
inBuffer->mAudioDataByteSize,
inPacketDescs,
recordState->currentPacket,
&inNumberPacketDescriptions,
inBuffer->mAudioData);
if (status == 0)
{
recordState->currentPacket += inNumberPacketDescriptions;
NSData * audioData = [NSData dataWithBytes:inBuffer->mAudioData length:inBuffer->mAudioDataByteSize * NUM_BUFFERS];
[thisClass sendAudioToSocketAsData:audioData];
}
AudioQueueEnqueueBuffer(recordState->queue, inBuffer, 0, NULL);
}
// Fills an empty buffer with data and sends it to the speaker
void AudioOutputCallback(void * inUserData,
AudioQueueRef outAQ,
AudioQueueBufferRef outBuffer) {
PlayState * playState = (PlayState *) inUserData;
if(!playState -> playing) {
printf("Not playing, returning\n");
return;
}
printf("Queuing buffer %lld for playback\n", playState -> currentPacket);
AudioStreamPacketDescription * packetDescs;
UInt32 bytesRead;
UInt32 numPackets = SAMPLERATE * NUM_BUFFERS;
OSStatus status;
status = AudioFileReadPackets(playState -> audioFile, false, &bytesRead, packetDescs, playState -> currentPacket, &numPackets, outBuffer -> mAudioData);
if (numPackets) {
outBuffer -> mAudioDataByteSize = bytesRead;
status = AudioQueueEnqueueBuffer(playState -> queue, outBuffer, 0, packetDescs);
playState -> currentPacket += numPackets;
}else {
if (playState -> playing) {
AudioQueueStop(playState -> queue, false);
AudioFileClose(playState -> audioFile);
playState -> playing = false;
}
AudioQueueFreeBuffer(playState -> queue, outBuffer);
}
}
- (void) setupAudioFormat:(AudioStreamBasicDescription *) format {
format -> mSampleRate = SAMPLERATE;
format -> mFormatID = kAudioFormatLinearPCM;
format -> mFramesPerPacket = 1;
format -> mChannelsPerFrame = 1;
format -> mBytesPerFrame = 2;
format -> mBytesPerPacket = 2;
format -> mBitsPerChannel = 16;
format -> mReserved = 0;
format -> mFormatFlags = kLinearPCMFormatFlagIsBigEndian |kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
}
- (void)viewDidLoad {
[super viewDidLoad];
// Do any additional setup after loading the view, typically from a nib.
char path[256];
[self getFilename:path maxLength:sizeof path];
fileURL = CFURLCreateFromFileSystemRepresentation(NULL, (UInt8*)path, strlen(path), false);
// Init state variables
recordState.recording = false;
thisClass = self;
}
- (void) startRecordingInQueue {
[self setupAudioFormat:&recordState.dataFormat];
recordState.currentPacket = 0;
OSStatus status;
status = AudioQueueNewInput(&recordState.dataFormat, AudioInputCallback, &recordState, CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &recordState.queue);
if(status == 0) {
//Prime recording buffers with empty data
for (int i=0; i < NUM_BUFFERS; i++) {
AudioQueueAllocateBuffer(recordState.queue, SAMPLERATE, &recordState.buffers[i]);
AudioQueueEnqueueBuffer(recordState.queue, recordState.buffers[i], 0, NULL);
}
status = AudioFileCreateWithURL(fileURL, kAudioFileAIFFType, &recordState.dataFormat, kAudioFileFlags_EraseFile, &recordState.audioFile);
if (status == 0) {
recordState.recording = true;
status = AudioQueueStart(recordState.queue, NULL);
if(status == 0) {
NSLog(#"-----------Recording--------------");
NSLog(#"File URL : %#", fileURL);
}
}
}
if (status != 0) {
[self stopRecordingInQueue];
}
}
- (void) stopRecordingInQueue {
recordState.recording = false;
AudioQueueStop(recordState.queue, true);
for (int i=0; i < NUM_BUFFERS; i++) {
AudioQueueFreeBuffer(recordState.queue, recordState.buffers[i]);
}
AudioQueueDispose(recordState.queue, true);
AudioFileClose(recordState.audioFile);
NSLog(#"---Idle------");
NSLog(#"File URL : %#", fileURL);
}
- (void) startPlaybackInQueue {
playState.currentPacket = 0;
[self setupAudioFormat:&playState.dataFormat];
OSStatus status;
status = AudioFileOpenURL(fileURL, kAudioFileReadPermission, kAudioFileAIFFType, &playState.audioFile);
if (status == 0) {
status = AudioQueueNewOutput(&playState.dataFormat, AudioOutputCallback, &playState, CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &playState.queue);
if( status == 0) {
//Allocate and prime playback buffers
playState.playing = true;
for (int i=0; i < NUM_BUFFERS && playState.playing; i++) {
AudioQueueAllocateBuffer(playState.queue, SAMPLERATE, &playState.buffers[i]);
AudioOutputCallback(&playState, playState.queue, playState.buffers[i]);
}
status = AudioQueueStart(playState.queue, NULL);
if (status == 0) {
NSLog(#"-------Playing Audio---------");
}
}
}
if (status != 0) {
[self stopPlaybackInQueue];
NSLog(#"---Playing Audio Failed ------");
}
}
- (void) stopPlaybackInQueue {
playState.playing = false;
for (int i=0; i < NUM_BUFFERS; i++) {
AudioQueueFreeBuffer(playState.queue, playState.buffers[i]);
}
AudioQueueDispose(playState.queue, true);
AudioFileClose(playState.audioFile);
}
- (IBAction)startRecordingAudio:(id)sender {
NSLog(#"starting recording tapped");
[self startRecordingInQueue];
}
- (IBAction)stopRecordingAudio:(id)sender {
NSLog(#"stop recording tapped");
[self stopRecordingInQueue];
}
- (IBAction)startPlayingAudio:(id)sender {
NSLog(#"start playing audio tapped");
[self startPlaybackInQueue];
}
- (IBAction)stopPlayingAudio:(id)sender {
NSLog(#"stop playing audio tapped");
[self stopPlaybackInQueue];
}
- (BOOL) getFilename:(char *) buffer maxLength:(int) maxBufferLength {
NSArray * paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
NSString * docDir = [paths objectAtIndex:0];
NSString * file = [docDir stringByAppendingString:#"recording.aif"];
return [file getCString:buffer maxLength:maxBufferLength encoding:NSUTF8StringEncoding];
}
- (void) sendAudioToSocketAsData:(NSData *) audioData {
[self.webSocket send:audioData];
}
- (IBAction)connectToSocketTapped:(id)sender {
[self startStreaming];
}
- (void) startStreaming {
[self connectToSocket];
}
- (void) connectToSocket {
//Socket Connection Intiliazation
// create the NSURLRequest that will be sent as the handshake
NSURLRequest *request = [NSURLRequest requestWithURL:[NSURL URLWithString:#"${url}"]];
// create the socket and assign delegate
self.webSocket = [[SRWebSocket alloc] initWithURLRequest:request];
self.webSocket.delegate = self;
// open socket
[self.webSocket open];
}
///--------------------------------------
#pragma mark - SRWebSocketDelegate
///--------------------------------------
- (void)webSocketDidOpen:(SRWebSocket *)webSocket;
{
NSLog(#"Websocket Connected");
}
- (void) webSocket:(SRWebSocket *)webSocket didFailWithError:(NSError *)error {
NSLog(#":( Websocket Failed With Error %#", error);
self.webSocket = nil;
}
- (void) webSocket:(SRWebSocket *)webSocket didReceiveMessage:(id)message {
NSLog(#"Received \"%#\"", message);
textView.text = message;
}
- (void)webSocket:(SRWebSocket *)webSocket didCloseWithCode:(NSInteger)code reason:(NSString *)reason wasClean:(BOOL)wasClean;
{
NSLog(#"WebSocket closed");
self.webSocket = nil;
}
- (void)webSocket:(SRWebSocket *)webSocket didReceivePong:(NSData *)pongPayload;
{
NSLog(#"WebSocket received pong");
}
- (void)didReceiveMemoryWarning {
[super didReceiveMemoryWarning];
// Dispose of any resources that can be recreated.
}
Thanks in Advance
I made it work. It was the audio format set up which was causing the problem. I set the audio properly by checking the server side documentation. The Big-Endian was causing problem. If you specify it as big-endian, it is big endian. If you do not specify it, then, it is little-endian. I was in need of little-endian.
- (void) setupAudioFormat:(AudioStreamBasicDescription *) format {
format -> mSampleRate = 16000.0; //
format -> mFormatID = kAudioFormatLinearPCM; //
format -> mFramesPerPacket = 1;
format -> mChannelsPerFrame = 1; //
format -> mBytesPerFrame = 2;
format -> mBytesPerPacket = 2;
format -> mBitsPerChannel = 16; //
// format -> mReserved = 0;
format -> mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
}
I used followed codes to encode a video with several local pictures. but the problem is I have 30 pictures, and only get 1 second video, is there any way to get the video with 30 seconds and 24 frame rate?
- (BOOL)encodeReadySamplesFromOutput:(AVAssetReaderOutput *)output toInput:(AVAssetWriterInput *)input
{
NSLog(#"Frame init m == %d",m);
while (input.isReadyForMoreMediaData)
{
CMSampleBufferRef sampleBuffer = [output copyNextSampleBuffer];
if (sampleBuffer)
{
BOOL handled = NO;
BOOL error = NO;
CMItemCount count;
CMSampleBufferGetSampleTimingInfoArray(sampleBuffer, 0, nil, &count);
CMSampleTimingInfo *timingInfo = malloc(sizeof(CMSampleTimingInfo) * count);
CMSampleBufferGetSampleTimingInfoArray(sampleBuffer, count, timingInfo, &count);
for (CMItemCount i = 0; i < count; i++)
{
timingInfo[i].decodeTimeStamp = kCMTimeInvalid;
timingInfo[i].presentationTimeStamp = CMTimeMake(m, 24);
// timingInfo[i].duration = CMTimeMake(1, 12);
}
CMSampleBufferRef completedSampleBuffer;
CMSampleBufferCreateCopyWithNewTiming(kCFAllocatorDefault, sampleBuffer, count, timingInfo, &completedSampleBuffer);
free(timingInfo);
if (self.reader.status != AVAssetReaderStatusReading || self.writer.status != AVAssetWriterStatusWriting)
{
handled = YES;
error = YES;
}
if (!handled && self.videoOutput == output)
{
// update the video progress
++m;
NSLog(#"Frame m == %d",m);
lastSamplePresentationTime = CMSampleBufferGetPresentationTimeStamp(completedSampleBuffer);
CMTimeValue value = lastSamplePresentationTime.value;
CMTimeScale scale = lastSamplePresentationTime.timescale;
NSLog(#"Frame value == %lld", value);
NSLog(#"Frame scale == %d",scale);
self.progress = duration == 0 ? 1 : CMTimeGetSeconds(lastSamplePresentationTime) / duration;
if ([self.delegate respondsToSelector:#selector(exportSession:renderFrame:withPresentationTime:toBuffer:)])
{
CVPixelBufferRef pixelBuffer = (CVPixelBufferRef)CMSampleBufferGetImageBuffer(completedSampleBuffer);
CVPixelBufferRef renderBuffer = NULL;
CVPixelBufferPoolCreatePixelBuffer(NULL, self.videoPixelBufferAdaptor.pixelBufferPool, &renderBuffer);
[self.delegate exportSession:self renderFrame:pixelBuffer withPresentationTime:lastSamplePresentationTime toBuffer:renderBuffer];
if (![self.videoPixelBufferAdaptor appendPixelBuffer:renderBuffer withPresentationTime:lastSamplePresentationTime])
{
error = YES;
}
CVPixelBufferRelease(renderBuffer);
handled = YES;
}
}
if (!handled && ![input appendSampleBuffer:completedSampleBuffer])
{
error = YES;
}
CFRelease(sampleBuffer);
CFRelease(completedSampleBuffer);
if (error)
{
return NO;
}
}
else
{
[input markAsFinished];
return NO;
}
}
return YES;
}
Not unless you get a lot more pictures or repeat the ones you have.
In either case, you're going to have to calculate presentation time yourself, with something like CMTimeMake(m, 24), e.g.:
[self.videoPixelBufferAdaptor appendPixelBuffer:renderBuffer withPresentationTime:CMTimeMake(m, 24)];
If you dropped the 24fps requirement (why do you need that?) you could get a 30second video, of 30 images at 1fps by using CMTimeMake(m, 1) instead in appendPixelBuffer:withPresentationTime:.
I hava a Play audio class used AudioToolBox.framework ,AudioQueue.
I encountered a problem, every piece of audio data playback, the memory will be increased, after playback is complete, the memory will not be reduced. If the batch test, it will be added to the hundreds of megabytes of memory, I want to know what causes memory has been increased, the audio data on each pass of each object is released or other reasons.
Here is my playThread class code:
#interface PlayThread()
{
BOOL transferDataComplete; // if thers is no data transfer to playthread set transferDataComplete = yes;
NSMutableArray *receiveDataArray;// audio data array
BOOL isPlay;// if audioqueue start,isPlay = yes,
}
#end
#pragma mark class implementation
#implementation PlayThread
- (instancetype)init
{
if (self = [super init]) {
receiveDataArray = [[NSMutableArray alloc]init];
isPlay = NO;
transferDataComplete = false;
bufferOverCount = QUEUE_BUFFER_SIZE;
audioQueue = nil;
}
return self;
}
// audio queue callback function
static void BufferCallback(void *inUserData,AudioQueueRef inAQ,AudioQueueBufferRef buffer)
{
USCPlayThread* player=(__bridge USCPlayThread*)inUserData;
[player fillBuffer:inAQ queueBuffer:buffer];
}
// fill buffer
-(void)fillBuffer:(AudioQueueRef)queue queueBuffer:(AudioQueueBufferRef)buffer
{
while (true){
NSData *audioData = [self getAudioData];
if( transferDataComplete && audioData == nil) {
bufferOverCount --;
break;
}
else if(audioData != nil){
memcpy(buffer->mAudioData, [audioData bytes] , audioData.length);
buffer->mAudioDataByteSize = (UInt32)audioData.length;
AudioQueueEnqueueBuffer(queue, buffer, 0, NULL);
break;
}
else
break;
} // while
if(bufferOverCount == 0){
// stop audioqueue
[self stopAudioQueue];
dispatch_async(dispatch_get_main_queue(), ^{
if ([self.delegate respondsToSelector:#selector(playComplete)]) {
[self.delegate playComplete];
}
});
}
}
-(void)addPlayData:(NSData *)data
{
NSUInteger count = 0;
#synchronized(receiveDataArray){
[receiveDataArray addObject:data];
}
}
/**
* get data from receiveDataArray
*/
-(NSData*)getAudioData
{
NSData *headData = nil;
#synchronized(receiveDataArray){
if(receiveDataArray.count > 0){
headData = [receiveDataArray objectAtIndex:0];
[receiveDataArray removeObjectAtIndex:0];
}
}
return headData;
}
- (void)startPlay // start audioqueue to play audio data
{
[self reset];
[self open];
for(int i=0; i<QUEUE_BUFFER_SIZE; i++)
{
[self fillBuffer:audioQueue queueBuffer:audioQueueBuffers[i]];
}
// audioqueuestart
AudioQueueStart(audioQueue, NULL);
#synchronized(self){
isPlay = YES;
}
if ([self.delegate respondsToSelector:#selector(playBegin)]) {
[self.delegate playBegin];
}
}
-(void)createAudioQueue
{
if (audioQueue) {
return;
}
AudioQueueNewOutput(&audioDescription, BufferCallback, (__bridge void *)(self), nil, nil, 0, &audioQueue);
if(audioQueue){
for(int i=0;i<QUEUE_BUFFER_SIZE;i++){
AudioQueueAllocateBufferWithPacketDescriptions(audioQueue, EVERY_READ_LENGTH, 0, &audioQueueBuffers[i]);
}
}
}
-(void)stopAudioQueue
{
if(audioQueue == nil){
return;
}
#synchronized(self){
if(isPlay){
isPlay = NO;
}
}
AudioQueueStop(audioQueue, TRUE);
}
-(void)setAudioFormat
{
audioDescription.mSampleRate = 16000;
audioDescription.mFormatID = kAudioFormatLinearPCM;
audioDescription.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioDescription.mChannelsPerFrame = 1;
audioDescription.mFramesPerPacket = 1;
audioDescription.mBitsPerChannel = 16;
audioDescription.mBytesPerFrame = (audioDescription.mBitsPerChannel/8) * audioDescription.mChannelsPerFrame;
audioDescription.mBytesPerPacket = audioDescription.mBytesPerFrame ;
}
-(void)close
{
if (audioQueue) {
AudioQueueStop(audioQueue, true);
AudioQueueDispose(audioQueue, true);
audioQueue = nil;
isPlay = NO;
}
}
-(BOOL)open {
if([self isOpen]){
return YES;
}
[self close];
[self setAudioFormat];
[self createAudioQueue];
return YES;
}
-(BOOL)isOpen
{
return (audioQueue != nil);
}
- (void)reset
{
bufferOverCount = QUEUE_BUFFER_SIZE;
transferDataComplete = NO;
}
- (BOOL)isPlaying
{
return isPlay;
}
- (void)disposeQueue
{
if (audioQueue) {
AudioQueueDispose(audioQueue, YES);
}
audioQueue = nil;
}
- (void)dealloc
{
[self disposeQueue];
}
Here is ViewContrller.m :
- (void)viewDidLoad {
[super viewDidLoad];
PlayThread *playThread = [[PlayThread alloc]init];
playThread.delegate = self;
self.playThread = playThread;
for (int i = 0; i < 10; i++)
{ // create empth audio data to simulate
NSMutableData *data = [[NSMutableData alloc]initWithLength:10000];
[self.playThread addPlayData:data];
}
[self.playThread startPlay];
}
Here is PlayThread's delegate method:
// When the play completely,then play once again,memory will continue to increase
- (void)playComplete
{
dispatch_async(dispatch_get_main_queue(), ^{
for (int i = 0; i < 10; i++)
{
NSMutableData *data = [[NSMutableData alloc]initWithLength:10000];
[self.playThread addPlayData:data];
}
[self.playThread startPlay];
});
}
Why memory has continued to increase, how can promptly release memory?
AudioQueueNewOutput(&audioDescription, BufferCallback, (__bridge void *)(self), nil, nil, 0, &audioQueue);
here parameter can not be nil
I am working on an app that does image processing and displays the resulting image. Im using UIScrollView to let user scroll all images, because the image is not a standard jpg or png, it takes time to load. I use GCD to load asynchronously, when finished dispatch to main queue to display. the snippet is as follows:
- (void)loadImage:(NSString *)name
{
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
UIImage *image = [Reader loadImage:name];
dispatch_sync(dispatch_get_main_queue(), ^{
[self displayImage:image];
});
});
}
the loadImage method of Reader is like this:
+ (UIImage *)loadImage:(NSString *)name
{
UInt8 *data = NULL;
NSString *mfjPath = [TMP stringByAppendingPathComponent:name];
NSData *mfjData = [NSData dataWithContentsOfFile:mfjPath];
if(mfjData){
data = malloc(sizeof(UInt8)*mfjData.length);
[mfjData getBytes:data];
}
if(data){
ResultHolder *result = [sDecoder decodeData:data withOffset:0];// static id<IDecoder> sDecoder; in Reader.m before #implementation Reader.
return [result bitmap];
}
retun nil;
}
IDCoder is a protocol which is
#protocol IDecoder <NSObject>
- (ResultHolder *)decodeData:(UInt8 *) withOffset:(int)offset;
#end
ResultHolder is a class to load simple image and combine complicated image. which is as follows:
ResultHolder.h
typedef struct color24{
UInt8 R;
UInt8 G;
UInt8 B;
} Color24;
#interface ResultHolder : NSObject
{
unsigned long mWidth;
unsigned long mHeight;
UInt8 *mData;
CGImageRef mBitmap;
BOOL isMonoColor;
Color24 mMonoColor;
}
+ (ResultHolder *)resultHolderWithCGImage:(CGImageRef)image;
+ (ResultHolder *)resultHolderWithData:(UInt8 *)data Width:(unsigned long)width andHeight:(unsigned long)height;
+ (ResultHolder *)resultHolderWithMonoColor:(Color24)monoColor withWidth:(unsigned long)width andHeight:(unsigned long)height;
- (ResultHolder *)initWithData:(UInt8 *)data Width:(unsigned long)width andHeight:(unsigned long) height;
- (ResultHolder *)initWithCGImage:(CGImageRef)image;
- (ResultHolder *)initWithMonoColor:(Color24)monoColor withWidth:(unsigned long)width andHeight:(unsigned long)height;
- (BOOL)isSuccess;
- (UIImage *)bitmap;
- (void)combineFixResultHolder:(ResultHolder *)child Rect:(CGRect)bounds Width:(unsigned long)width andHeight:(unsigned long)height;
- (void)combineResultHolder:(ResultHolder *)child Bounds:(CGRect)bounds Width:(unsigned long)width andHeight:(unsigned long)height;
#end
ResultHolder.m
#implementation ResultHolder
#synthesize width = mWidth;
#synthesize height = mHeight;
#synthesize isMonoColor;
#synthesize monoColor = mMonoColor;
- (ResultHolder *)initWithData:(UInt8 *)data Width:(unsigned long)width andHeight:(unsigned long)height
{
if (self = [super init]) {
mWidth = width;
mHeight = height;
mData = malloc(mWidth*mHeight*sizeof(Color24));
memcpy(mData, data, mWidth*mHeight*sizeof(Color24));
mBitmap = NULL;
}
return self;
}
- (ResultHolder *)initWithCGImage:(CGImageRef)image
{
if (self = [super init]) {
mBitmap = CGImageRetain(image);
mWidth = CGImageGetWidth(image);
mHeight = CGImageGetHeight(image);
}
return self;
}
- (ResultHolder *)initWithMonoColor:(Color24)monoColor withWidth:(unsigned long)width andHeight:(unsigned long)height
{
if (self = [super init]) {
mMonoColor = monoColor;
isMonoColor = YES;
mWidth = width;
mHeight = height;
mBitmap = NULL;
mData = NULL;
}
return self;
}
+ (ResultHolder *)resultHolderWithCGImage:(CGImageRef)image
{
ResultHolder *resultHolder = [[ResultHolder alloc] initWithCGImage:image];
return resultHolder;
}
+ (ResultHolder *)resultHolderWithData:(UInt8 *)data Width:(unsigned long)width andHeight:(unsigned long)height
{
ResultHolder *resultHolder = [[ResultHolder alloc] initWithData:data Width:width andHeight:height];
return resultHolder;
}
+ (ResultHolder *)resultHolderWithMonoColor:(Color24)monoColor withWidth:(unsigned long)width andHeight:(unsigned long)height
{
ResultHolder *resultHolder = [[ResultHolder alloc] initWithMonoColor:monoColor withWidth:width andHeight:height];
return resultHolder;
}
- (BOOL)isSuccess
{
if ([ReaderConfigures CodecDebug])
NSLog(#"ResultHolder isSuccess");
return (mData != NULL || isMonoColor || mBitmap != nil);
}
- (void)fillMonoColor
{
if (isMonoColor) {
if (mData) {
free(mData);
}
mData = (UInt8 *)malloc(mWidth*mHeight*sizeof(Color24));
for (int i = 0; i < mHeight; i++) {
for (int j = 0; j < mWidth; j++) {
memcpy(mData+(i*mWidth+j)*3, &mMonoColor, sizeof(Color24));
}
}
isMonoColor = NO;
}
}
- (void)extractBitmap
{
if (mBitmap) {
CGDataProviderRef dataProvider = CGImageGetDataProvider(mBitmap);
CFDataRef bitmapData = CGDataProviderCopyData(dataProvider);
UInt8 * dataSource = (UInt8 *)CFDataGetBytePtr(bitmapData);
size_t width = CGImageGetWidth(mBitmap);
size_t height = CGImageGetHeight(mBitmap);
if(mData)
free(mData);
mData = malloc(width*height*3);
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
memcpy(mData+(i*width+j)*3, dataSource+(i*width+j)*4, sizeof(Color24));
}
}
CFRelease(bitmapData);
CGImageRelease(mBitmap);
mBitmap = NULL;
}
}
- (UInt8 *)getRawData
{
if (mBitmap) {
[self extractBitmap];
}
if (isMonoColor) {
[self fillMonoColor];
}
return mData;
}
- (UIImage *)bitmap
{
if (mBitmap) {
UIImage *image = [[UIImage alloc] initWithCGImage:mBitmap];
CGImageRelease(mBitmap);
mBitmap = NULL;
return image;
}
if (isMonoColor) {
[self fillMonoColor];
}
if (mData) {
CGDataProviderRef dataProvider = CGDataProviderCreateWithData(NULL, mData, mWidth*mHeight*3, NULL);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGImageRef bitmap = CGImageCreate(mWidth, mHeight, 8, 24, mWidth*3, colorSpace, kCGBitmapByteOrderDefault, dataProvider, NULL, YES, kCGRenderingIntentDefault);
CGColorSpaceRelease(colorSpace);
CGDataProviderRelease(dataProvider);
UIImage *image = [[UIImage alloc] initWithCGImage:bitmap];
CGImageRelease(bitmap);
return image;
}
return nil;
}
- (void)combineResultHolder:(ResultHolder *) child Bounds:(CGRect) bounds Width:(unsigned long)width andHeight:(unsigned long)height
{
CGRect rect = CGRectMake(MAX(0, bounds.origin.x), MAX(0, bounds.origin.y),MIN(width - 1, bounds.origin.x + bounds.size.width), MIN(height - 1, bounds.origin.y + bounds.size.height));
int w = MIN(rect.size.width + 1, child.width);
int h = MIN(rect.size.height + 1, child.height);
int dstPos = (height - 1 - (rect.origin.y + h - 1))*width;
UInt8 *dataParent = [self getRawData];
if (child.isMonoColor) {
Color24 childMonoColor = child.monoColor;
for (int i = 0; i < h; i++) {
memcpy(dataParent+(dstPos+(int)rect.origin.x)*3, &childMonoColor, w*3);
dstPos += width;
}
} else {
UInt8 *dataChild = [child getRawData];
if (dataChild != nil) {
int srcPos = 0;
for (int i = 0; i < h; i++) {
memcpy(dataParent+dstPos*3+((int)rect.origin.x)*3, dataChild+srcPos*3, w*3);
srcPos += child.width;
dstPos += width;
}
}
}
}
- (void)combineFixResultHolder:(ResultHolder *)child Rect:(CGRect)bounds Width:(unsigned long)width andHeight:(unsigned long)height
{
CGRect rect = CGRectMake(bounds.origin.x, height-1-bounds.origin.y-bounds.size.height, bounds.origin.x+bounds.size.width, height-1-bounds.origin.y);
[self combineResultHolder:child Bounds:rect Width:width andHeight:height];
}
- (void)dealloc
{
if (mData) {
free(mData);
mData = NULL;
}
if (mBitmap) {
CGImageRelease(mBitmap);
mBitmap = NULL;
}
}
#end
for simple image, for example JPEG image only, + (ResultHolder *)resultHolderWithCGImage:(CGImageRef)image; and - (UIImage *)bitmap; methods are called. for some complicated ones,
ResultHolder will extract mBitmap to mData, and then combine with sub resultHolder's mData to get the image. these methods work well if I load image in my main thread, but if I use GCD or NSThread to load image in background it is easy to crash, especially when loading complicated ones in background. when the app crashes, the main thread state a CGSConvertBGR888toRGBA8888 method error, one of other threads is running the [ResultHolder dealloc] method, actually is free(mData). It seems there is a memory conflict between the loading thread and the main thread.
when the app crashes, the error is like this:
I have struggled for this bug for days, but still cannot find how to fix it.
I do hope someone can help me.
Any suggestions are appreciated.
UPDATE:
I make a demo project ReaderDemo to simulate the situation. If you are interested, you can download to see the error. There are 15 images in this project, the 5,7,14 images will cause the crash when scrolling, they are a little complicated than others. but if you scroll through thumbnail scrollview then click, they all can be displayed.
You have a number of problems but lets start off with the first I found:
Improper test
if (index > [mPageNames count]) {
That needs to be >= or you crash.
you are calling dispatch_sync on the mainQueue, that does not seem to be a good decision (but maybe you have a really good one) - I changed it to async, seems to work OK
If you enable exceptions in this project it will really help you. Click the Break Points button in the Xcode toolbar. Then select the BreakPoints option left pane, second from the right. Tap the bottom left '+' icon and add an All Exceptions breakpoint. Now when you run the debugger stops where the problem occurrs.
I got a final crash that I'll let you fix:
2012-09-26 08:55:12.378 ReaderDemo[787:11303] MFJAtIndex index out of bounds,index:15,bounds:15
2012-09-26 08:55:12.379 ReaderDemo[787:11303] *** Assertion failure in -[ImageScrollView showLoadingForMFJ:], /Volumes/Data/Users/dhoerl/Downloads/ReaderDemo/ReaderDemo/ImageScrollView.m:247
This should get you on your way.
EDIT: Your problem relates to the management of the mData memory. You are trying to manage the lifecycle of it in your code, but this management is not sync'd with the CGImageDataProvider that is trying to use it. The crash is almost for sure (meaning I'm 99.99% convinced) a byproduct of the CGImageProvided created by CGDataProviderCreateWithData trying to access the data after your class has freed that memory in dealloc. I have had similar experiences with data providers.
The proper solution is to remove all free(data) calls, or at least most of them. Given the current structure of your code you will need to think about this carefully - you may want to replaced all the tests and malloc/frees with a flag. In the end, what you want to do is once the memory pointer is handed ovdr to CGDataProviderCreateWithData, you need to NULL out mData and let the data provider handle the removal.
The way to do this is to provide a function pointer to CGDataProviderCreateWithData in the past parameter:
CGDataProviderReleaseDataCallback
A callback function that releases data you supply to the function CGDataProviderCreateWithData.
typedef void (*CGDataProviderReleaseDataCallback) (
void *info,
const void *data,
size_t size
);
All that function needs to do is just call free(data);. So whenever the data provider is done with the allocated memory, it will free it (and you don't need to worry about it).
If you want to free() or release your resources in any class in ARC enabled environment, you have to set proper flags for your class in 'Build Phases'. To do that, select your project file in XCode, select your target, go to 'Build Phases' section, find your class, and put -fno-objc-arc flag for that class.
Or, maybe another reason, you are calling some CoreGraphics function that has to be called from main thread only in another thread.