Xcode - How to pause video capture using OpenGL - ios

I originally wrote some code using an example CapturePause code (Here is the base example code link on Github - https://github.com/cokecoffe/ios-demo/tree/master/capturepause/CapturePause) to pause video capture, obviously the pausing the capture isn't tricky but the code below looked for the pause flag and then adjusted the time stamp on the video to make sure there was not a gap, this worked very successfully and here is the method that looked at that aspect:
- (void) captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
BOOL bVideo = YES;
#synchronized(self)
{
if (!self.isCapturing || self.isPaused)
{
return;
}
if (connection != _videoConnection)
{
bVideo = NO;
}
if ((_encoder == nil) && !bVideo)
{
CMFormatDescriptionRef fmt = CMSampleBufferGetFormatDescription(sampleBuffer);
[self setAudioFormat:fmt];
NSString* filename = [NSString stringWithFormat:#"capture%d.mp4", _currentFile];
NSString* path = [NSTemporaryDirectory() stringByAppendingPathComponent:filename];
//additional quality encoding strings
if (globalheightvalue == 0){
}
else {
_cy = globalheightvalue;
_cx = globalwidthvalue;
}
_encoder = [VideoEncoder encoderForPath:path Height:_cy width:_cx channels:_channels samples:_samplerate];
}
if (_discont)
{
if (bVideo)
{
return;
}
_discont = NO;
// calc adjustment
CMTime pts = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
CMTime last = bVideo ? _lastVideo : _lastAudio;
if (last.flags & kCMTimeFlags_Valid)
{
if (_timeOffset.flags & kCMTimeFlags_Valid)
{
pts = CMTimeSubtract(pts, _timeOffset);
}
CMTime offset = CMTimeSubtract(pts, last);
NSLog(#"Setting offset from %s", bVideo?"video": "audio");
NSLog(#"Adding %f to %f (pts %f)", ((double)offset.value)/offset.timescale, ((double)_timeOffset.value)/_timeOffset.timescale, ((double)pts.value/pts.timescale));
// this stops us having to set a scale for _timeOffset before we see the first video time
if (_timeOffset.value == 0)
{
_timeOffset = offset;
}
else
{
_timeOffset = CMTimeAdd(_timeOffset, offset);
}
}
_lastVideo.flags = 0;
_lastAudio.flags = 0;
}
// retain so that we can release either this or modified one
CFRetain(sampleBuffer);
if (_timeOffset.value > 0)
{
CFRelease(sampleBuffer);
sampleBuffer = [self adjustTime:sampleBuffer by:_timeOffset];
}
// record most recent time so we know the length of the pause
CMTime pts = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
CMTime dur = CMSampleBufferGetDuration(sampleBuffer);
if (dur.value > 0)
{
pts = CMTimeAdd(pts, dur);
}
if (bVideo)
{
_lastVideo = pts;
}
else
{
_lastAudio = pts;
}
}
// pass frame to encoder
[_encoder encodeFrame:sampleBuffer isVideo:bVideo];
CFRelease(sampleBuffer);
}
- (CMSampleBufferRef) adjustTime:(CMSampleBufferRef) sample by:(CMTime) offset
{
CMItemCount count;
CMSampleBufferGetSampleTimingInfoArray(sample, 0, nil, &count);
CMSampleTimingInfo* pInfo = malloc(sizeof(CMSampleTimingInfo) * count);
CMSampleBufferGetSampleTimingInfoArray(sample, count, pInfo, &count);
for (CMItemCount i = 0; i < count; i++)
{
pInfo[i].decodeTimeStamp = CMTimeSubtract(pInfo[i].decodeTimeStamp, offset);
pInfo[i].presentationTimeStamp = CMTimeSubtract(pInfo[i].presentationTimeStamp, offset);
}
CMSampleBufferRef sout;
CMSampleBufferCreateCopyWithNewTiming(nil, sample, count, pInfo, &sout);
free(pInfo);
return sout;
}
Now I have switched to using code based on OpenGL for various reasons and trying to get the same functionality (Base code from here - https://github.com/BradLarson/GPUImage). I think I need to put something similar in this area but I am not entirely sure as the complexity of the OpenGL code is way above my head! Code area here from the file GPUImageVideoCamera.m:
- (void)processVideoSampleBuffer:(CMSampleBufferRef)sampleBuffer;
{
if (capturePaused)
{ return;
}
CFAbsoluteTime startTime = CFAbsoluteTimeGetCurrent();
CVImageBufferRef cameraFrame = CMSampleBufferGetImageBuffer(sampleBuffer);
int bufferWidth = (int) CVPixelBufferGetWidth(cameraFrame);
int bufferHeight = (int) CVPixelBufferGetHeight(cameraFrame);
CFTypeRef colorAttachments = CVBufferGetAttachment(cameraFrame, kCVImageBufferYCbCrMatrixKey, NULL);
if (colorAttachments != NULL)
{
if(CFStringCompare(colorAttachments, kCVImageBufferYCbCrMatrix_ITU_R_601_4, 0) == kCFCompareEqualTo)
{
if (isFullYUVRange)
{
_preferredConversion = kColorConversion601FullRange;
}
else
{
_preferredConversion = kColorConversion601;
}
}
else
{
_preferredConversion = kColorConversion709;
}
}
else
{
if (isFullYUVRange)
{
_preferredConversion = kColorConversion601FullRange;
}
else
{
_preferredConversion = kColorConversion601;
}
}
CMTime currentTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
[GPUImageContext useImageProcessingContext];
if ([GPUImageContext supportsFastTextureUpload] && captureAsYUV)
{
CVOpenGLESTextureRef luminanceTextureRef = NULL;
CVOpenGLESTextureRef chrominanceTextureRef = NULL;
// if (captureAsYUV && [GPUImageContext deviceSupportsRedTextures])
if (CVPixelBufferGetPlaneCount(cameraFrame) > 0) // Check for YUV planar inputs to do RGB conversion
{
CVPixelBufferLockBaseAddress(cameraFrame, 0);
if ( (imageBufferWidth != bufferWidth) && (imageBufferHeight != bufferHeight) )
{
imageBufferWidth = bufferWidth;
imageBufferHeight = bufferHeight;
}
CVReturn err;
// Y-plane
glActiveTexture(GL_TEXTURE4);
if ([GPUImageContext deviceSupportsRedTextures])
{
err = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault, [[GPUImageContext sharedImageProcessingContext] coreVideoTextureCache], cameraFrame, NULL, GL_TEXTURE_2D, GL_LUMINANCE, bufferWidth, bufferHeight, GL_LUMINANCE, GL_UNSIGNED_BYTE, 0, &luminanceTextureRef);
}
else
{
err = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault, [[GPUImageContext sharedImageProcessingContext] coreVideoTextureCache], cameraFrame, NULL, GL_TEXTURE_2D, GL_LUMINANCE, bufferWidth, bufferHeight, GL_LUMINANCE, GL_UNSIGNED_BYTE, 0, &luminanceTextureRef);
}
if (err)
{
NSLog(#"Error at CVOpenGLESTextureCacheCreateTextureFromImage %d", err);
}
luminanceTexture = CVOpenGLESTextureGetName(luminanceTextureRef);
glBindTexture(GL_TEXTURE_2D, luminanceTexture);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
// UV-plane
glActiveTexture(GL_TEXTURE5);
if ([GPUImageContext deviceSupportsRedTextures])
{
err = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault, [[GPUImageContext sharedImageProcessingContext] coreVideoTextureCache], cameraFrame, NULL, GL_TEXTURE_2D, GL_LUMINANCE_ALPHA, bufferWidth/2, bufferHeight/2, GL_LUMINANCE_ALPHA, GL_UNSIGNED_BYTE, 1, &chrominanceTextureRef);
}
else
{
err = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault, [[GPUImageContext sharedImageProcessingContext] coreVideoTextureCache], cameraFrame, NULL, GL_TEXTURE_2D, GL_LUMINANCE_ALPHA, bufferWidth/2, bufferHeight/2, GL_LUMINANCE_ALPHA, GL_UNSIGNED_BYTE, 1, &chrominanceTextureRef);
}
if (err)
{
NSLog(#"Error at CVOpenGLESTextureCacheCreateTextureFromImage %d", err);
}
chrominanceTexture = CVOpenGLESTextureGetName(chrominanceTextureRef);
glBindTexture(GL_TEXTURE_2D, chrominanceTexture);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
// if (!allTargetsWantMonochromeData)
// {
[self convertYUVToRGBOutput];
// }
int rotatedImageBufferWidth = bufferWidth, rotatedImageBufferHeight = bufferHeight;
if (GPUImageRotationSwapsWidthAndHeight(internalRotation))
{
rotatedImageBufferWidth = bufferHeight;
rotatedImageBufferHeight = bufferWidth;
}
[self updateTargetsForVideoCameraUsingCacheTextureAtWidth:rotatedImageBufferWidth height:rotatedImageBufferHeight time:currentTime];
CVPixelBufferUnlockBaseAddress(cameraFrame, 0);
CFRelease(luminanceTextureRef);
CFRelease(chrominanceTextureRef);
}
else
{
}
if (_runBenchmark)
{
numberOfFramesCaptured++;
if (numberOfFramesCaptured > INITIALFRAMESTOIGNOREFORBENCHMARK)
{
CFAbsoluteTime currentFrameTime = (CFAbsoluteTimeGetCurrent() - startTime);
totalFrameTimeDuringCapture += currentFrameTime;
NSLog(#"Average frame time : %f ms", [self averageFrameDurationDuringCapture]);
NSLog(#"Current frame time : %f ms", 1000.0 * currentFrameTime);
}
}
}
else
{
CVPixelBufferLockBaseAddress(cameraFrame, 0);
int bytesPerRow = (int) CVPixelBufferGetBytesPerRow(cameraFrame);
outputFramebuffer = [[GPUImageContext sharedFramebufferCache] fetchFramebufferForSize:CGSizeMake(bytesPerRow / 4, bufferHeight) onlyTexture:YES];
[outputFramebuffer activateFramebuffer];
glBindTexture(GL_TEXTURE_2D, [outputFramebuffer texture]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, bytesPerRow / 4, bufferHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, CVPixelBufferGetBaseAddress(cameraFrame));
[self updateTargetsForVideoCameraUsingCacheTextureAtWidth:bytesPerRow / 4 height:bufferHeight time:currentTime];
CVPixelBufferUnlockBaseAddress(cameraFrame, 0);
if (_runBenchmark)
{
numberOfFramesCaptured++;
if (numberOfFramesCaptured > INITIALFRAMESTOIGNOREFORBENCHMARK)
{
CFAbsoluteTime currentFrameTime = (CFAbsoluteTimeGetCurrent() - startTime);
totalFrameTimeDuringCapture += currentFrameTime;
}
}
}
}
For reference the example code in the OpenGL example is the SimpleVideoFilter - Any help on how to go about adding proper Pausing functionality from someone experience of this OpenGL code would be great thank you - Chaz

After much trial and error worked out a way that seems pretty robust merging the two code samples above so for anyone using the OpenGl framework from Brad Larson you can put this Property in to the GPUImageVideoCamera.h:
#property(readwrite, nonatomic) BOOL discont;
and this code in to the in place of the captureoutput function in the GPUImageVideoCamera.m file
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
if (!self.captureSession.isRunning)
{
return;
}
if (capturePaused)
{
return;
}
else if (captureOutput == audioOutput)
{
[self processAudioSampleBuffer:sampleBuffer];
}
else
{
if (dispatch_semaphore_wait(frameRenderingSemaphore, DISPATCH_TIME_NOW) != 0)
{
return;
}
if (_discont)
{
_discont = NO;
// calc adjustment
CMTime pts = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
CMTime last = _lastVideo;
if (last.flags & kCMTimeFlags_Valid)
{
if (_timeOffset.flags & kCMTimeFlags_Valid)
{
pts = CMTimeSubtract(pts, _timeOffset);
}
CMTime offset = CMTimeSubtract(pts, last);
NSLog(#"Adding %f to %f (pts %f)", ((double)offset.value)/offset.timescale, ((double)_timeOffset.value)/_timeOffset.timescale, ((double)pts.value/pts.timescale));
// this stops us having to set a scale for _timeOffset before we see the first video time
if (_timeOffset.value == 0)
{
_timeOffset = offset;
}
else
{
_timeOffset = CMTimeAdd(_timeOffset, offset);
}
}
_lastVideo.flags = 0;
_lastAudio.flags = 0;
}
// retain so that we can release either this or modified one
CFRetain(sampleBuffer);
if (_timeOffset.value > 0)
{
CFRelease(sampleBuffer);
sampleBuffer = [self adjustTime:sampleBuffer by:_timeOffset];
}
// record most recent time so we know the length of the pause
CMTime pts = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
CMTime dur = CMSampleBufferGetDuration(sampleBuffer);
if (dur.value > 0)
{
pts = CMTimeAdd(pts, dur);
}
_lastVideo = pts;
runAsynchronouslyOnVideoProcessingQueue(^{
//Feature Detection Hook.
if (self.delegate)
{
[self.delegate willOutputSampleBuffer:sampleBuffer];
}
[self processVideoSampleBuffer:sampleBuffer];
CFRelease(sampleBuffer);
dispatch_semaphore_signal(frameRenderingSemaphore);
});
}
}
From your main Viewcontroller the code is super simple (listed as button actions but can be put most places) and make sure you list _discont = NO on startup to keep it clean:
- (IBAction)PauseButton:(id)sender
{
[videoCamera pauseCameraCapture];
videoCamera.discont = YES;
}
- (IBAction)ResumeButton:(id)sender
{
[videoCamera resumeCameraCapture];
}
Hope this helps anyone facing the same challenge

Related

Changing camera Back to front while capturing the video cause a weird issue

I am creating camera app which capture video using SCRecorder. I am trying to apply multiple filters to a video. I am changing Recorder's video configuration as below
func swipeableFilterView(_ swipeableFilterView: SCSwipeableFilterView, didScrollTo filter: SCFilter?) {
selectedFilter = filter!
recorder.videoConfiguration.filter = filter!
}
I am capturing video with applying filters when i change camera back to front then I am getting Black screen at right side as Bellowed Image:
with back camera it will works perfect
here is code of appendVideoSampleBuffer
- (void)appendVideoSampleBuffer:(CMSampleBufferRef)sampleBuffer toRecordSession:(SCRecordSession *)recordSession duration:(CMTime)duration connection:(AVCaptureConnection *)connection completion:(void(^)(BOOL success))completion {
#autoreleasepool {
CVPixelBufferRef sampleBufferImage = CMSampleBufferGetImageBuffer(sampleBuffer);
size_t bufferWidth = (CGFloat)CVPixelBufferGetWidth(sampleBufferImage);
size_t bufferHeight = (CGFloat)CVPixelBufferGetHeight(sampleBufferImage);
CMTime time = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
SCFilter *filterGroup = _videoConfiguration.filter;
SCFilter *transformFilter = [self _transformFilterUsingBufferWidth:bufferWidth bufferHeight:bufferHeight mirrored:
_device == AVCaptureDevicePositionFront
];
if (filterGroup == nil && transformFilter == nil) {
[recordSession appendVideoPixelBuffer:sampleBufferImage atTime:time duration:duration completion:completion];
return;
}
CVPixelBufferRef pixelBuffer = [recordSession createPixelBuffer];
if (pixelBuffer == nil) {
completion(NO);
return;
}
CIImage *image = [CIImage imageWithCVPixelBuffer:sampleBufferImage];
CFTimeInterval seconds = CMTimeGetSeconds(time);
if (transformFilter != nil) {
image = [transformFilter imageByProcessingImage:image atTime:seconds];
}
if (filterGroup != nil) {
image = [filterGroup imageByProcessingImage:image atTime:seconds];
}
CVPixelBufferLockBaseAddress(pixelBuffer, 0);
[_context render:image toCVPixelBuffer:pixelBuffer];
[recordSession appendVideoPixelBuffer:pixelBuffer atTime:time duration:duration completion:^(BOOL success) {
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
CVPixelBufferRelease(pixelBuffer);
completion(success);
}];
}
}
I debugged the code and I think issue is with
CVPixelBufferLockBaseAddress(pixelBuffer, 0);
any one please help me!

AVAssetsWriter frame rate

I used followed codes to encode a video with several local pictures. but the problem is I have 30 pictures, and only get 1 second video, is there any way to get the video with 30 seconds and 24 frame rate?
- (BOOL)encodeReadySamplesFromOutput:(AVAssetReaderOutput *)output toInput:(AVAssetWriterInput *)input
{
NSLog(#"Frame init m == %d",m);
while (input.isReadyForMoreMediaData)
{
CMSampleBufferRef sampleBuffer = [output copyNextSampleBuffer];
if (sampleBuffer)
{
BOOL handled = NO;
BOOL error = NO;
CMItemCount count;
CMSampleBufferGetSampleTimingInfoArray(sampleBuffer, 0, nil, &count);
CMSampleTimingInfo *timingInfo = malloc(sizeof(CMSampleTimingInfo) * count);
CMSampleBufferGetSampleTimingInfoArray(sampleBuffer, count, timingInfo, &count);
for (CMItemCount i = 0; i < count; i++)
{
timingInfo[i].decodeTimeStamp = kCMTimeInvalid;
timingInfo[i].presentationTimeStamp = CMTimeMake(m, 24);
// timingInfo[i].duration = CMTimeMake(1, 12);
}
CMSampleBufferRef completedSampleBuffer;
CMSampleBufferCreateCopyWithNewTiming(kCFAllocatorDefault, sampleBuffer, count, timingInfo, &completedSampleBuffer);
free(timingInfo);
if (self.reader.status != AVAssetReaderStatusReading || self.writer.status != AVAssetWriterStatusWriting)
{
handled = YES;
error = YES;
}
if (!handled && self.videoOutput == output)
{
// update the video progress
++m;
NSLog(#"Frame m == %d",m);
lastSamplePresentationTime = CMSampleBufferGetPresentationTimeStamp(completedSampleBuffer);
CMTimeValue value = lastSamplePresentationTime.value;
CMTimeScale scale = lastSamplePresentationTime.timescale;
NSLog(#"Frame value == %lld", value);
NSLog(#"Frame scale == %d",scale);
self.progress = duration == 0 ? 1 : CMTimeGetSeconds(lastSamplePresentationTime) / duration;
if ([self.delegate respondsToSelector:#selector(exportSession:renderFrame:withPresentationTime:toBuffer:)])
{
CVPixelBufferRef pixelBuffer = (CVPixelBufferRef)CMSampleBufferGetImageBuffer(completedSampleBuffer);
CVPixelBufferRef renderBuffer = NULL;
CVPixelBufferPoolCreatePixelBuffer(NULL, self.videoPixelBufferAdaptor.pixelBufferPool, &renderBuffer);
[self.delegate exportSession:self renderFrame:pixelBuffer withPresentationTime:lastSamplePresentationTime toBuffer:renderBuffer];
if (![self.videoPixelBufferAdaptor appendPixelBuffer:renderBuffer withPresentationTime:lastSamplePresentationTime])
{
error = YES;
}
CVPixelBufferRelease(renderBuffer);
handled = YES;
}
}
if (!handled && ![input appendSampleBuffer:completedSampleBuffer])
{
error = YES;
}
CFRelease(sampleBuffer);
CFRelease(completedSampleBuffer);
if (error)
{
return NO;
}
}
else
{
[input markAsFinished];
return NO;
}
}
return YES;
}
Not unless you get a lot more pictures or repeat the ones you have.
In either case, you're going to have to calculate presentation time yourself, with something like CMTimeMake(m, 24), e.g.:
[self.videoPixelBufferAdaptor appendPixelBuffer:renderBuffer withPresentationTime:CMTimeMake(m, 24)];
If you dropped the 24fps requirement (why do you need that?) you could get a 30second video, of 30 images at 1fps by using CMTimeMake(m, 1) instead in appendPixelBuffer:withPresentationTime:.

is it possible to set GIF image with video?

I am trying to combine video with GIF image, For this I am using MainCompositionInst.animationTool = [AVVideoCompositionCoreAnimationTool videoCompositionCoreAnimationToolWithPostProcessingAsVideoLayer:videoLayer inLayer:parentLayer]; and in the video layer I was set GIF image but unfortunately it was not animating, So my question is that is it possible to do this ? please suggest me..
Thanks in advance.
Apple's support for GIF is fairly limited.
You could use this code to convert from GIF to Video:
(With the current code the gif will be cropped to 480x480. For some resolutions the output image's colors are distorted so try to use a fixed frame that you know works.
Usage:
#import "SCGIFConverter.h"
NSURL *tempFileURL = //create a NSURL to a tempfile for output
[SCGIFConverter processGIFData:data toFilePath:tempFileURL completed:^(NSString *outputFilePath, NSError *error)
{
//Now you can access your tempFileURL to read the movie
//outputFilePath can be 'nil' if there was a problem
}];
SCGIFConverter.h
FOUNDATION_EXTERN NSString * const kGIF2MP4ConversionErrorDomain;
typedef enum {
kGIF2MP4ConversionErrorInvalidGIFImage = 0,
kGIF2MP4ConversionErrorAlreadyProcessing,
kGIF2MP4ConversionErrorBufferingFailed,
kGIF2MP4ConversionErrorInvalidResolution,
kGIF2MP4ConversionErrorTimedOut,
} kGIF2MP4ConversionError;
typedef void (^kGIF2MP4ConversionCompleted) (NSString* outputFilePath, NSError* error);
#interface SCGIFConverter : NSObject
+ (BOOL) processGIFData: (NSData*) data
toFilePath: (NSURL*) outFilePath
completed: (kGIF2MP4ConversionCompleted)handler;
#end
SCGIFConverter.m
#import <AVFoundation/AVFoundation.h>
#import <ImageIO/ImageIO.h>
#import <MobileCoreServices/MobileCoreServices.h>
#import "SCGIFConverter.h"
#define FPS 30
NSString * const kGIF2MP4ConversionErrorDomain = #"GIF2MP4ConversionError";
#implementation SCGIFConverter
+ (BOOL) processGIFData: (NSData*) data
toFilePath: (NSURL*) outFilePath
completed: (kGIF2MP4ConversionCompleted) completionHandler {
[[NSFileManager defaultManager] removeItemAtURL:outFilePath error:nil];
CGImageSourceRef source = CGImageSourceCreateWithData((__bridge CFDataRef)data, NULL);
CGImageMetadataRef meta = CGImageSourceCopyMetadataAtIndex(source, 0, NULL);
NSLog(#"%#",meta);
unsigned char *bytes = (unsigned char*)data.bytes;
NSError* error = nil;
if( !CGImageSourceGetStatus(source) == kCGImageStatusComplete ) {
error = [NSError errorWithDomain: kGIF2MP4ConversionErrorDomain
code: kGIF2MP4ConversionErrorInvalidGIFImage
userInfo: nil];
CFRelease(source);
completionHandler(outFilePath.absoluteString, error);
return NO;
}
size_t sourceWidth = bytes[6] + (bytes[7]<<8), sourceHeight = bytes[8] + (bytes[9]<<8);
sourceWidth = 480;
sourceHeight = 480;
//size_t sourceFrameCount = CGImageSourceGetCount(source);
__block size_t currentFrameNumber = 0;
__block Float64 totalFrameDelay = 0.f;
AVAssetWriter* videoWriter = [[AVAssetWriter alloc] initWithURL: outFilePath
fileType: AVFileTypeQuickTimeMovie
error: &error];
if( error ) {
CFRelease(source);
completionHandler(outFilePath.absoluteString, error);
return NO;
}
if( sourceWidth > 6400 || sourceWidth == 0) {
CFRelease(source);
error = [NSError errorWithDomain: kGIF2MP4ConversionErrorDomain
code: kGIF2MP4ConversionErrorInvalidResolution
userInfo: nil];
completionHandler(outFilePath.absoluteString, error);
return NO;
}
if( sourceHeight > 4800 || sourceHeight == 0 ) {
CFRelease(source);
error = [NSError errorWithDomain: kGIF2MP4ConversionErrorDomain
code: kGIF2MP4ConversionErrorInvalidResolution
userInfo: nil];
completionHandler(outFilePath.absoluteString, error);
return NO;
}
size_t totalFrameCount = CGImageSourceGetCount(source);
if( totalFrameCount <= 0 ) {
CFRelease(source);
error = [NSError errorWithDomain: kGIF2MP4ConversionErrorDomain
code: kGIF2MP4ConversionErrorInvalidGIFImage
userInfo: nil];
completionHandler(outFilePath.absoluteString, error);
return NO;
}
NSDictionary *videoSettings = #{
AVVideoCodecKey : AVVideoCodecH264,
AVVideoWidthKey : #(sourceWidth),
AVVideoHeightKey : #(sourceHeight)
};
AVAssetWriterInput* videoWriterInput = [AVAssetWriterInput assetWriterInputWithMediaType: AVMediaTypeVideo
outputSettings: videoSettings];
videoWriterInput.expectsMediaDataInRealTime = YES;
NSAssert([videoWriter canAddInput: videoWriterInput], #"Video writer can not add video writer input");
[videoWriter addInput: videoWriterInput];
NSDictionary* attributes = #{
(NSString*)kCVPixelBufferPixelFormatTypeKey : #(kCVPixelFormatType_32ARGB),
(NSString*)kCVPixelBufferWidthKey : #(sourceWidth),
(NSString*)kCVPixelBufferHeightKey : #(sourceHeight),
(NSString*)kCVPixelBufferCGImageCompatibilityKey : #YES,
(NSString*)kCVPixelBufferCGBitmapContextCompatibilityKey : #YES
};
AVAssetWriterInputPixelBufferAdaptor* adaptor = [AVAssetWriterInputPixelBufferAdaptor assetWriterInputPixelBufferAdaptorWithAssetWriterInput: videoWriterInput
sourcePixelBufferAttributes: attributes];
[videoWriter startWriting];
[videoWriter startSessionAtSourceTime: CMTimeMakeWithSeconds(0, FPS)];
while(YES) {
if( videoWriterInput.isReadyForMoreMediaData ) {
#if DEBUG
//NSLog(#"Drawing frame %lu/%lu", currentFrameNumber, totalFrameCount);
#endif
NSDictionary* options = #{(NSString*)kCGImageSourceTypeIdentifierHint : (id)kUTTypeGIF};
CGImageRef imgRef = CGImageSourceCreateImageAtIndex(source, currentFrameNumber, (__bridge CFDictionaryRef)options);
if( imgRef ) {
CFDictionaryRef propertiesT = CGImageSourceCopyProperties(source, NULL);
CFDictionaryRef properties = CGImageSourceCopyPropertiesAtIndex(source, currentFrameNumber, NULL);
CFDictionaryRef gifProperties = CFDictionaryGetValue(properties, kCGImagePropertyGIFDictionary);
if( gifProperties ) {
CVPixelBufferRef pxBuffer = [self newBufferFrom: imgRef
withPixelBufferPool: adaptor.pixelBufferPool
andAttributes: adaptor.sourcePixelBufferAttributes];
if( pxBuffer ) {
NSNumber* delayTime = CFDictionaryGetValue(gifProperties, kCGImagePropertyGIFDelayTime);
if (currentFrameNumber!=0) {
totalFrameDelay += delayTime.floatValue;
}
CMTime time = CMTimeMakeWithSeconds(totalFrameDelay, FPS);
if( ![adaptor appendPixelBuffer: pxBuffer withPresentationTime: time] ) {
NSLog(#"Could not save pixel buffer!: %#", videoWriter.error);
CFRelease(properties);
CGImageRelease(imgRef);
CVBufferRelease(pxBuffer);
break;
}
CVBufferRelease(pxBuffer);
}
}
if( properties ) CFRelease(properties);
CGImageRelease(imgRef);
currentFrameNumber++;
}
else {
//was no image returned -> end of file?
[videoWriterInput markAsFinished];
void (^videoSaveFinished)(void) = ^{
AVAssetWriter * retainedVideoWriter = videoWriter;
completionHandler(outFilePath.absoluteString, nil);
retainedVideoWriter = nil;
};
if( [videoWriter respondsToSelector: #selector(finishWritingWithCompletionHandler:)]) {
[videoWriter finishWritingWithCompletionHandler: videoSaveFinished];
}
else {
[videoWriter finishWriting];
videoSaveFinished();
}
break;
}
}
else {
//NSLog(#"Was not ready...");
[NSThread sleepForTimeInterval: 0.1];
}
};
CFRelease(source);
return YES;
};
+ (CVPixelBufferRef) newBufferFrom: (CGImageRef) frame
withPixelBufferPool: (CVPixelBufferPoolRef) pixelBufferPool
andAttributes: (NSDictionary*) attributes {
NSParameterAssert(frame);
size_t width = 480;//CGImageGetWidth(frame);
size_t height = 480;//CGImageGetHeight(frame);
size_t frameHeight = height;
size_t frameWidth = CGImageGetWidth(frame)*height/CGImageGetHeight(frame);
if (frameWidth<width) {
frameWidth = width;
frameHeight = CGImageGetHeight(frame)*width/CGImageGetWidth(frame);
}
CGFloat relax = 0.12;
if (frameWidth>width) {
CGFloat factor = MAX(width/frameWidth,1-relax);
frameWidth*=factor;
}
if (frameHeight>height) {
CGFloat factor = MAX(height/frameHeight,1-relax);
frameHeight*=factor;
}
size_t bpc = 8;
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CVPixelBufferRef pxBuffer = NULL;
CVReturn status = kCVReturnSuccess;
if( pixelBufferPool )
status = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferPool, &pxBuffer);
else {
status = CVPixelBufferCreate(kCFAllocatorDefault, width, height, kCVPixelFormatType_32ARGB, (__bridge CFDictionaryRef)attributes, &pxBuffer);
}
NSAssert(status == kCVReturnSuccess, #"Could not create a pixel buffer");
CVPixelBufferLockBaseAddress(pxBuffer, 0);
void *pxData = CVPixelBufferGetBaseAddress(pxBuffer);
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(pxBuffer);
CGContextRef context = CGBitmapContextCreate(pxData,
width,
height,
bpc,
bytesPerRow,
colorSpace,
kCGImageAlphaNoneSkipFirst);
NSAssert(context, #"Could not create a context");
CGContextDrawImage(context,
CGRectMake(-(frameWidth-(CGFloat)width)/2, -(frameHeight-(CGFloat)height)/2, frameWidth, frameHeight), frame);
CVPixelBufferUnlockBaseAddress(pxBuffer, 0);
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
return pxBuffer;
}
#end

How to find the memory leak in ios xcode?

It's my RTSP streaming ios application with FFMPEG decoder and it streaming fine, But the memory continuously increasing while running. Please help me, Is it a memory leak ?. And how can I track the leak ?.
Its my video streaming class: RTSPPlayer.m
#import "RTSPPlayer.h"
#import "Utilities.h"
#import "AudioStreamer.h"
#interface RTSPPlayer ()
#property (nonatomic, retain) AudioStreamer *audioController;
#end
#interface RTSPPlayer (private)
-(void)convertFrameToRGB;
-(UIImage *)imageFromAVPicture:(AVPicture)pict width:(int)width height:(int)height;
-(void)setupScaler;
#end
#implementation RTSPPlayer
#synthesize audioController = _audioController;
#synthesize audioPacketQueue,audioPacketQueueSize;
#synthesize _audioStream,_audioCodecContext;
#synthesize emptyAudioBuffer;
#synthesize outputWidth, outputHeight;
- (void)setOutputWidth:(int)newValue
{
if (outputWidth != newValue) {
outputWidth = newValue;
[self setupScaler];
}
}
- (void)setOutputHeight:(int)newValue
{
if (outputHeight != newValue) {
outputHeight = newValue;
[self setupScaler];
}
}
- (UIImage *)currentImage
{
if (!pFrame->data[0]) return nil;
[self convertFrameToRGB];
return [self imageFromAVPicture:picture width:outputWidth height:outputHeight];
}
- (double)duration
{
return (double)pFormatCtx->duration / AV_TIME_BASE;
}
- (double)currentTime
{
AVRational timeBase = pFormatCtx->streams[videoStream]->time_base;
return packet.pts * (double)timeBase.num / timeBase.den;
}
- (int)sourceWidth
{
return pCodecCtx->width;
}
- (int)sourceHeight
{
return pCodecCtx->height;
}
- (id)initWithVideo:(NSString *)moviePath usesTcp:(BOOL)usesTcp
{
if (!(self=[super init])) return nil;
AVCodec *pCodec;
// Register all formats and codecs
avcodec_register_all();
av_register_all();
avformat_network_init();
// Set the RTSP Options
AVDictionary *opts = 0;
if (usesTcp)
av_dict_set(&opts, "rtsp_transport", "tcp", 0);
if (avformat_open_input(&pFormatCtx, [moviePath UTF8String], NULL, &opts) !=0 ) {
av_log(NULL, AV_LOG_ERROR, "Couldn't open file\n");
goto initError;
}
// Retrieve stream information
if (avformat_find_stream_info(pFormatCtx,NULL) < 0) {
av_log(NULL, AV_LOG_ERROR, "Couldn't find stream information\n");
goto initError;
}
// Find the first video stream
videoStream=-1;
audioStream=-1;
for (int i=0; i<pFormatCtx->nb_streams; i++) {
if (pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
NSLog(#"found video stream");
videoStream=i;
}
if (pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO) {
audioStream=i;
NSLog(#"found audio stream");
}
}
if (videoStream==-1 && audioStream==-1) {
goto initError;
}
// Get a pointer to the codec context for the video stream
pCodecCtx = pFormatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (pCodec == NULL) {
av_log(NULL, AV_LOG_ERROR, "Unsupported codec!\n");
goto initError;
}
// Open codec
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n");
goto initError;
}
if (audioStream > -1 ) {
NSLog(#"set up audiodecoder");
[self setupAudioDecoder];
}
// Allocate video frame
pFrame = avcodec_alloc_frame();
outputWidth = pCodecCtx->width;
self.outputHeight = pCodecCtx->height;
return self;
initError:
// [self release];
return nil;
}
- (void)setupScaler
{
// Release old picture and scaler
avpicture_free(&picture);
sws_freeContext(img_convert_ctx);
// Allocate RGB picture
avpicture_alloc(&picture, PIX_FMT_RGB24, outputWidth, outputHeight);
// Setup scaler
static int sws_flags = SWS_FAST_BILINEAR;
img_convert_ctx = sws_getContext(pCodecCtx->width,
pCodecCtx->height,
pCodecCtx->pix_fmt,
outputWidth,
outputHeight,
PIX_FMT_RGB24,
sws_flags, NULL, NULL, NULL);
}
- (void)seekTime:(double)seconds
{
AVRational timeBase = pFormatCtx->streams[videoStream]->time_base;
int64_t targetFrame = (int64_t)((double)timeBase.den / timeBase.num * seconds);
avformat_seek_file(pFormatCtx, videoStream, targetFrame, targetFrame, targetFrame, AVSEEK_FLAG_FRAME);
avcodec_flush_buffers(pCodecCtx);
}
- (void)dealloc
{
// Free scaler
sws_freeContext(img_convert_ctx);
// Free RGB picture
avpicture_free(&picture);
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
// Free the YUV frame
av_free(pFrame);
// Close the codec
if (pCodecCtx) avcodec_close(pCodecCtx);
// Close the video file
if (pFormatCtx) avformat_close_input(&pFormatCtx);
[_audioController _stopAudio];
// [_audioController release];
_audioController = nil;
// [audioPacketQueue release];
audioPacketQueue = nil;
// [audioPacketQueueLock release];
audioPacketQueueLock = nil;
// [super dealloc];
}
- (BOOL)stepFrame
{
// AVPacket packet;
int frameFinished=0;
while (!frameFinished && av_read_frame(pFormatCtx, &packet) >=0 ) {
// Is this a packet from the video stream?
if(packet.stream_index==videoStream) {
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
}
if (packet.stream_index==audioStream) {
// NSLog(#"audio stream");
[audioPacketQueueLock lock];
audioPacketQueueSize += packet.size;
[audioPacketQueue addObject:[NSMutableData dataWithBytes:&packet length:sizeof(packet)]];
[audioPacketQueueLock unlock];
if (!primed) {
primed=YES;
[_audioController _startAudio];
}
if (emptyAudioBuffer) {
[_audioController enqueueBuffer:emptyAudioBuffer];
}
}
}
return frameFinished!=0;
}
- (void)convertFrameToRGB
{
sws_scale(img_convert_ctx,
pFrame->data,
pFrame->linesize,
0,
pCodecCtx->height,
picture.data,
picture.linesize);
}
- (UIImage *)imageFromAVPicture:(AVPicture)pict width:(int)width height:(int)height
{
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault;
CFDataRef data = CFDataCreateWithBytesNoCopy(kCFAllocatorDefault, pict.data[0], pict.linesize[0]*height,kCFAllocatorNull);
CGDataProviderRef provider = CGDataProviderCreateWithCFData(data);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGImageRef cgImage = CGImageCreate(width,
height,
8,
24,
pict.linesize[0],
colorSpace,
bitmapInfo,
provider,
NULL,
NO,
kCGRenderingIntentDefault);
CGColorSpaceRelease(colorSpace);
UIImage *image = [UIImage imageWithCGImage:cgImage];
CGImageRelease(cgImage);
CGDataProviderRelease(provider);
CFRelease(data);
return image;
}
- (void)setupAudioDecoder
{
if (audioStream >= 0) {
_audioBufferSize = AVCODEC_MAX_AUDIO_FRAME_SIZE;
_audioBuffer = av_malloc(_audioBufferSize);
_inBuffer = NO;
_audioCodecContext = pFormatCtx->streams[audioStream]->codec;
_audioStream = pFormatCtx->streams[audioStream];
AVCodec *codec = avcodec_find_decoder(_audioCodecContext->codec_id);
if (codec == NULL) {
NSLog(#"Not found audio codec.");
return;
}
if (avcodec_open2(_audioCodecContext, codec, NULL) < 0) {
NSLog(#"Could not open audio codec.");
return;
}
if (audioPacketQueue) {
// [audioPacketQueue release];
audioPacketQueue = nil;
}
audioPacketQueue = [[NSMutableArray alloc] init];
if (audioPacketQueueLock) {
// [audioPacketQueueLock release];
audioPacketQueueLock = nil;
}
audioPacketQueueLock = [[NSLock alloc] init];
if (_audioController) {
[_audioController _stopAudio];
// [_audioController release];
_audioController = nil;
}
_audioController = [[AudioStreamer alloc] initWithStreamer:self];
} else {
pFormatCtx->streams[audioStream]->discard = AVDISCARD_ALL;
audioStream = -1;
}
}
- (void)nextPacket
{
_inBuffer = NO;
}
- (AVPacket*)readPacket
{
if (_currentPacket.size > 0 || _inBuffer) return &_currentPacket;
NSMutableData *packetData = [audioPacketQueue objectAtIndex:0];
_packet = [packetData mutableBytes];
if (_packet) {
if (_packet->dts != AV_NOPTS_VALUE) {
_packet->dts += av_rescale_q(0, AV_TIME_BASE_Q, _audioStream->time_base);
}
if (_packet->pts != AV_NOPTS_VALUE) {
_packet->pts += av_rescale_q(0, AV_TIME_BASE_Q, _audioStream->time_base);
}
[audioPacketQueueLock lock];
audioPacketQueueSize -= _packet->size;
if ([audioPacketQueue count] > 0) {
[audioPacketQueue removeObjectAtIndex:0];
}
[audioPacketQueueLock unlock];
_currentPacket = *(_packet);
}
return &_currentPacket;
}
- (void)closeAudio
{
[_audioController _stopAudio];
primed=NO;
}
#end
Presented as an answer for formatting and images.
Use instruments to check for leaks and memory loss due to retained but not leaked memory. The latter is unused memory that is still pointed to. Use Mark Generation (Heapshot) in the Allocations instrument on Instruments.
For HowTo use Heapshot to find memory creap, see: bbum blog
Basically the method is to run Instruments allocate tool, take a heapshot, run an iteration of your code and take another heapshot repeating 3 or 4 times. This will indicate memory that is allocated and not released during the iterations.
To figure out the results disclose to see the individual allocations.
If you need to see where retains, releases and autoreleases occur for an object use instruments:
Run in instruments, in Allocations set "Record reference counts" on (For Xcode 5 and lower you have to stop recording to set the option). Cause the app to run, stop recording, drill down and you will be able to see where all retains, releases and autoreleases occurred.

opengl glkit color-picking glreadpixels always returns zero colored pixels (0,0,0)

The following code can be a complete template for color-picking with glkit. I just need to understand why all the pixels in the offscreen buffer are colored (0,0,0) as printed below to the nslog.
Note: the vertices array is defined in an header file as a const array it is displayed well on the screen in another section of my project (my problem is with the offscreen).
Header file:
typedef struct {
float Position[3];
float Color[4];
float TexCoord[2];
float Normal[3];
} Vertex;
//Vertices array format: {{vertex.x, vertex.y, vertex.z}, {color.R, color.G, color.B, color.alpha}, {texture.U, texture.V}, {normal.x, normal.y, normal.z}},
const Vertex parparit51OBJVertices[] = {
{{0.057, -0.088, -0.155},{1,1,1,1},{0.848, 0.810}, {0.329, -0.157, -0.931}},
{{0.056, -0.035, -0.165},{1,1,1,1},{0.848, 0.811}, {0.338, -0.139, -0.931}}, ......
In the viewController code:
GLuint _pickFBO = 0;
int32_t glVertexAttributeBufferID = 0;
- (IBAction) tapGesture:(id)sender
{
if ([(UITapGestureRecognizer *)sender state] == UIGestureRecognizerStateEnded) {
NSLog( #"In tap ended" );
CGPoint tapLocation = [(UITapGestureRecognizer *)sender locationInView:self.view];
int tt = [self findMeshByPoint:tapLocation];
}
}
- (NSUInteger)findMeshByPoint:(CGPoint)point
{
//In openGL the y axis starts from the bottom of the screen
point.y = self.view.bounds.size.height - point.y;
GLKView *glView = (GLKView *)self.view;
NSAssert([glView isKindOfClass:[GLKView class]],
#"View controller's view is not a GLKView");
// Make the view's context current
[EAGLContext setCurrentContext:glView.context];
_height = ((GLKView *)self.view).drawableHeight;
_width = ((GLKView *)self.view).drawableWidth;
self.effect.useConstantColor = GL_TRUE;
self.effect.colorMaterialEnabled = GL_TRUE;
self.effect.light0.diffuseColor = GLKVector4Make(1.0f,1.0f,1.0f,1.0f);
glBindVertexArrayOES(0);
glDisable(GL_DITHER);
glEnable(GL_DEPTH_TEST);
glLineWidth(2.0F);
// Important to turn light off !!!
self.effect.light0.enabled = GL_TRUE;
glDisableVertexAttribArray(GLKVertexAttribTexCoord0);
//this crashes the code
//glEnableVertexAttribArray(GLKVertexAttribColor);
self.effect.constantColor = GLKVector4Make( 0.0f, //This should be meshId/255.0f
0.8f, 0.8f, 1.0f);
if(0 == _glVertexAttributeBufferID)
{
GLuint glName;
glGenBuffers(1, // STEP 1
&glName);
glBindBuffer(GL_ARRAY_BUFFER, // STEP 2
glName);
glBufferData( // STEP 3
GL_ARRAY_BUFFER, // Initialize buffer contents
sizeof(parparit51OBJVertices), parparit51OBJVertices,
GL_STATIC_DRAW); // Hint: cache in GPU memory
_glVertexAttributeBufferID = glName;
GLenum err = glGetError();
if (err != GL_NO_ERROR) {
NSLog(#"Error creating buffer %i. glError: 0x%04X", glName, err);
}
}
else
{
glBindBuffer(GL_ARRAY_BUFFER,
_glVertexAttributeBufferID);
}
[self buildFBO];
glBindFramebuffer(GL_FRAMEBUFFER, _pickFBO);
//glViewport(0, 0, _width, _height);
//???
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnableVertexAttribArray(GLKVertexAttribPosition);
glVertexAttribPointer(GLKVertexAttribPosition, 3, GL_FLOAT, GL_FALSE, 12 * sizeof(GLfloat), 0);
glEnableVertexAttribArray(GLKVertexAttribNormal);
glVertexAttribPointer(GLKVertexAttribNormal, 3, GL_FLOAT, GL_FALSE, 12 * sizeof(GLfloat), (GLvoid*) (sizeof(float) * 9));
GLKMatrix4 modelViewMatrixForParparit = GLKMatrix4MakeTranslation(0.0f, 0.3f, -3.5f );
modelViewMatrixForParparit = GLKMatrix4Scale(modelViewMatrixForParparit, 1.0, 1.0, 1.0);
self.effect.transform.modelviewMatrix = modelViewMatrixForParparit;
self.effect.constantColor = GLKVector4Make( 0.8f, 0.3f, 0.3f, 1.0f );
[self.effect prepareToDraw];
glDrawArrays(GL_TRIANGLES, 0, sizeof(parparit51OBJVertices) / sizeof(Vertex));
const GLfloat width = [glView drawableWidth];
const GLfloat height = [glView drawableHeight];
NSAssert(0 < width && 0 < height, #"Invalid drawble size");
int blackPixelsCounter = 0;
int coloredPixelsCounter = 0;
GLubyte savePixelColor[4] = {0, };
bool bFoundDifferentColors = NO;
GLubyte pixelColor[4]; // Red, Green, Blue, Alpha color
glReadPixels(50,
50,
1,
1,
GL_RGBA,
GL_UNSIGNED_BYTE,
pixelColor);
//#ifdef DEBUG
{ // Report any errors
GLenum error = glGetError();
if(GL_NO_ERROR != error)
{
NSLog(#"GL Error: 0x%x", error);
}
}
//#endif
savePixelColor[0] = pixelColor[0];
savePixelColor[1] = pixelColor[1];
savePixelColor[2] = pixelColor[2];
for (GLint xx=0; xx<_width; xx++) {
for (GLint yy=0; yy<_height; yy++) {
glReadPixels(xx,
yy,
1,
1,
GL_RGBA,
GL_UNSIGNED_BYTE,
pixelColor);
//#ifdef DEBUG
{ // Report any errors
GLenum error = glGetError();
if(GL_NO_ERROR != error)
{
NSLog(#"GL Error: 0x%x", error);
}
}
//#endif
if ( (savePixelColor[0] != pixelColor[0]) || (savePixelColor[1] != pixelColor[1]) || (savePixelColor[2] != pixelColor[2]) )
{
bFoundDifferentColors = YES;
}
if ( (pixelColor[0] !=0) || (pixelColor[1] !=0) || (pixelColor[2] !=0) ) {
//NSLog(#"pixelColor[0]=%i, pixelColor[1]=%i, pixelColor[2]=%i", pixelColor[0], pixelColor[1], pixelColor[2] );
coloredPixelsCounter++;
}
else
{
blackPixelsCounter++;
}
}
}
NSLog( #"colored pixels=%i black pixels=%i", coloredPixelsCounter, blackPixelsCounter );
if ( bFoundDifferentColors )
{
NSLog( #"Found at least 2 different pixels colors in picking buffer !" );
}
else
{
NSLog( #"All pixels have the same color: %i, %i, %i", savePixelColor[0], savePixelColor[1], savePixelColor[2]);
}
NSLog( #"******* 9" );
//--- at the end !!! -------
// Restore OpenGL state that pickTerrainEffect changed
glBindFramebuffer(GL_FRAMEBUFFER, 0); // default frame buffer
//glViewport(0, 0, _width, _height); // full area of glView
//#ifdef DEBUG
{ // Report any errors
GLenum error = glGetError();
if(GL_NO_ERROR != error)
{
NSLog(#"GL Error: 0x%x", error);
}
}
//#endif
NSLog( #"******* 10" );
return 0;
}
//tap-11
-(void) buildFBO
{
NSLog(#"before: buildFBO._pickFBO=%i", _pickFBO );
if ( 0 == _pickFBO )
{
NSLog(#"buildFBO._pickFBO=%i", _pickFBO );
GLuint colorRenderbuffer;
//GLuint framebuffer;
glGenFramebuffers(1, &_pickFBO);
glBindFramebuffer(GL_FRAMEBUFFER, _pickFBO);
glGenRenderbuffers(1, &colorRenderbuffer);
glBindRenderbuffer(GL_RENDERBUFFER, colorRenderbuffer);
glRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA8_OES, _width, _height);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0_OES, GL_RENDERBUFFER, colorRenderbuffer);
GLuint depthRenderbuffer;
glGenRenderbuffers(1, &depthRenderbuffer);
glBindRenderbuffer(GL_RENDERBUFFER, depthRenderbuffer);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, _width, _height);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, depthRenderbuffer);
if(glCheckFramebufferStatus(GL_FRAMEBUFFER) !=
GL_FRAMEBUFFER_COMPLETE)
{
NSLog(#"failed to make complete framebuffer object %x", glCheckFramebufferStatus(GL_FRAMEBUFFER));
//+++tbd+++UtilityPickTerrainEffectDestroyFBO(fboName);
return;
}
//#ifdef DEBUG
// { // Report any errors
GLenum error = glGetError();
if(GL_NO_ERROR != error)
{
NSLog(#"GL Error: 0x%x", error);
}
// }
//#endif
}
}
I think you need to close the texture out before reading from it. My color picker implementation is working fine. But I remember that for about 2 days I was also getting only 0,0,0,0.
Here's a clue for you. Is the pixel you're getting (0,0,0,0) or (0,0,0,1)? Cause you only mention that it's (0,0,0). Since you have a clearColor to (0,0,0,1), if you're getting an alpha of zero it's not reading the buffer at all.
I recommend doing it in 2 steps. One method to draw the color picker. Another to read from it. here's my exact code for reading my color buffer:
-(void)process3DTouch
{
if (colorPickerNeedsRedrawn) {
[self drawColorPicker];
}
glBindFramebuffer(GL_FRAMEBUFFER, pickerFramebuffer); // speed note - will bind twice if we just had to draw it
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, textureInfo[TEX_PICKER].texture, 0);
Byte pixelColor[4] = {0,0,0,0};
glReadPixels(colorPickerTouchPosition.x * backingScale, (backingHeight - (colorPickerTouchPosition.y * backingScale)), 1, 1, GL_RGBA, GL_UNSIGNED_BYTE, pixelColor);
ColorSpec pickerColor = ColorSpecMake((float)pixelColor[0]/255, (float)pixelColor[1]/255, (float)pixelColor[2]/255, (float)pixelColor[3]/255);
//NSLog(#"what is pixelColor at %f,%f = %f %f %f %f", colorPickerTouchPosition.x, colorPickerTouchPosition.y, pickerColor.r,pickerColor.g,pickerColor.b,pickerColor.a);
if (pickerColor.a == 0.0) {
whatTouched=TOUCHED_NOTHING;
whatSecondaryTouched=TOUCHED_NOTHING;
NSLog(#"touched nothing");
processColorPicker=false;
return;
}
// now look up the item index from the color
int itemIndex = [self itemIndexFromColor:pickerColor];
[self handleTouchedItem: itemIndex];
processColorPicker=false;
}
Another good debugging method for color pickers is to draw the color picker and then draw the color picker texture to the screen so you can see if you have something else going on in the drawing of the picker.

Resources