Its about picking a 3d object using color-picking. I draw my mesh (update: Which resides in a static header file and is displayed fine to the screen in the same project) to a new offscreen framebuffer and then I use glReadPixels to identify if the user touched it.
Following the code from the project opengles_ch10_1 from the book "Learning OpenGLES for iOS" by Erik M.Buck, I wrote the following code which always prints to nslog the color 0,0,0 for the picked (by tapping) pixel.
My code is:
- (IBAction) tapGesture:(id)sender
{
if ([(UITapGestureRecognizer *)sender state] == UIGestureRecognizerStateEnded) {
CGPoint tapLocation = [(UITapGestureRecognizer *)sender locationInView:self.view];
int tt = [self findMeshByPoint:tapLocation];
//NSLog( #"tap value: %i", tt );
}
}
- (NSUInteger)findMeshByPoint:(CGPoint)point
{
//In openGL the y axis starts from the bottom of the screen
point.y = self.view.bounds.size.height - point.y;
GLKView *glView = (GLKView *)self.view;
NSAssert([glView isKindOfClass:[GLKView class]],
#"View controller's view is not a GLKView");
// Make the view's context current
[EAGLContext setCurrentContext:glView.context];
glBindVertexArrayOES(0);
// self.effect.constantColor = GLKVector4Make( 1.0f, //This should be meshId/255.0f
if(0 == _glVertexAttributeBufferID)
{
GLuint glName;
glGenBuffers(1, // STEP 1
&glName);
glBindBuffer(GL_ARRAY_BUFFER, // STEP 2
glName);
glBufferData( // STEP 3
GL_ARRAY_BUFFER, // Initialize buffer contents
sizeof(parparit51OBJVertices), parparit51OBJVertices,
GL_STATIC_DRAW); // Hint: cache in GPU memory
_glVertexAttributeBufferID = glName;
}
else
{
glBindBuffer(GL_ARRAY_BUFFER,
_glVertexAttributeBufferID);
}
//glEnableVertexAttribArray(TETerrainPositionAttrib);
glEnableVertexAttribArray( GLKVertexAttribPosition );
if(0 == _program)
{
[self loadShadersWithName:#"UtilityPickTerrainShader"];
[self buildFBO];
NSAssert(0 != _program,
#"prepareOpenGL failed to load shaders");
}
glUseProgram(_program);
// Pre-calculate the mvpMatrix
GLKMatrix4 modelViewProjectionMatrix2 =
GLKMatrix4Multiply(
self.effect.transform.projectionMatrix,
self.effect.transform.modelviewMatrix);
// Standard matrices
glUniformMatrix4fv(uniforms[UtilityPickTerrainMVPMatrix], 1, 0,
modelViewProjectionMatrix2.m);
// glUniform2fv(uniforms[UtilityPickTerrainDimensionFactors], 1,
// &_temp1 ); //self.factors.v); //I removed this from the shaders
glUniform1f(uniforms[UtilityPickTerrainModelIndex],
1.0f ); // self.modelIndex / 255.0f);
glBindFramebuffer(GL_FRAMEBUFFER, _pickFBO);
glViewport(0, 0, 512, 512);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
//glEnableVertexAttribArray(GLKVertexAttribPosition);
glVertexAttribPointer(GLKVertexAttribPosition, 3, GL_FLOAT, GL_FALSE, 12 * sizeof(GLfloat), 0);
NSLog( #"******* 7" );
[self.effect prepareToDraw];
glDrawArrays(GL_TRIANGLES, 0, sizeof(parparit51OBJVertices) / sizeof(Vertex));
NSLog( #"******* 7.5" );
const GLfloat width = [glView drawableWidth];
const GLfloat height = [glView drawableHeight];
NSAssert(0 < width && 0 < height, #"Invalid drawble size");
// Get info for picked location
const GLKVector2 scaledProjectionPosition = {
point.x / width,
point.y / height
};
NSLog( #"******* 8" );
GLubyte pixelColor[4]; // Red, Green, Blue, Alpha color
GLint readLocationX = MIN((512 - 1),
(512 - 1) * scaledProjectionPosition.x);
GLint readLocationY = MIN((512 - 1),
(512 - 1) * scaledProjectionPosition.y);
glReadPixels(readLocationX,
readLocationY,
1,
1,
GL_RGBA,
GL_UNSIGNED_BYTE,
pixelColor);
NSLog(#"pixelColor[0]=%i, pixelColor[1]=%i, pixelColor[2]=%i", pixelColor[0], pixelColor[1], pixelColor[2] );
// Restore OpenGL state that pickTerrainEffect changed
glBindFramebuffer(GL_FRAMEBUFFER, 0); // default frame buffer
glViewport(0, 0, width, height); // full area of glView
return 0;
}
-(void) buildFBO
{
if ( 0 == _pickFBO )
{
GLuint colorTexture;
// Create a texture object to apply to model
glGenTextures(1, &colorTexture);
glBindTexture(GL_TEXTURE_2D, colorTexture);
// Set up filter and wrap modes for this texture object
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,
GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,
GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,
GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
GL_LINEAR_MIPMAP_LINEAR);
// Allocate a texture image we can render into
// Pass NULL for the data parameter since we don't need to
// load image data. We will be generating the image by
// rendering to this texture.
glTexImage2D(GL_TEXTURE_2D,
0,
GL_RGBA,
512,
512,
0,
GL_RGBA,
GL_UNSIGNED_BYTE,
NULL);
GLuint depthRenderbuffer;
glGenRenderbuffers(1, &depthRenderbuffer);
glBindRenderbuffer(GL_RENDERBUFFER, depthRenderbuffer);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16,
512, 512);
glGenFramebuffers(1, &_pickFBO);
glBindFramebuffer(GL_FRAMEBUFFER, _pickFBO);
glFramebufferTexture2D(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D, colorTexture, 0);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, depthRenderbuffer);
if(glCheckFramebufferStatus(GL_FRAMEBUFFER) !=
GL_FRAMEBUFFER_COMPLETE)
{
NSLog(#"failed to make complete framebuffer object %x", glCheckFramebufferStatus(GL_FRAMEBUFFER));
//+++tbd+++UtilityPickTerrainEffectDestroyFBO(fboName);
return;
}
//#ifdef DEBUG
// { // Report any errors
GLenum error = glGetError();
if(GL_NO_ERROR != error)
{
NSLog(#"GL Error: 0x%x", error);
}
// }
//#endif
}
}
Your error is that you are trying to access a Null Pointer -- you are almost certainly passing an array that you never actually initialized to your GLKEffect or giving it to OpenGL with a bufferData call, given that the error is occurring inside of prepareToDraw or glDrawArrays. There are several possible explanations for this that I can see, though I can't confirm any of them, since the relevant information is how you allocate the data that you are using either in perpareToDraw or in glDrawArrays.
The first is that "parparit51OBJVertices" might be allocated on the heap dynamically (are you calling malloc or the like? How do you specify the size of the array), in which case your calls to sizeof will be returning incorrect values (0 I think) which might lead to the EXEC_BAD_ACESS.
The other part of this that seems suspect is that you are calling glDrawArrays and passing in the number of vertices, but before that you bind a VBO for indices -- at best this is wasted work, since glDrawArrays will ignore the indices. Worse, if you are trying to draw an OBJ, as your variable name implies, then likely there is a considerable amount of geometry you aren't drawing at all, so even if you fix your EXEC_BAD_ACCESS problem you still will be getting bad results.
I had a BAD_ACCESS error.
I solved this by removing the code which handles the indices:
if(0 == _indexBufferID)
{
// Indices haven't been sent to GPU yet
// Create an element array buffer for mesh indices
glGenBuffers(1, &_indexBufferID);
NSAssert(0 != _indexBufferID,
#"Failed to generate element array buffer");
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _indexBufferID);
glBufferData(GL_ELEMENT_ARRAY_BUFFER,
sizeof( parparit51OBJIndices ),
parparit51OBJIndices,
GL_STATIC_DRAW);
}
else
{
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _indexBufferID);
}
Now I still need to understand why the code updated above always print 0,0,0 for any picked pixel.
Related
I'm going to show pixel data on opengl view.
But I can't see anything, only can see the empty pink gl screen.
Please check my codes and let me know what it is wrong.
#implementation GLView
+ (Class) layerClass
{
return [CAEAGLLayer class];
}
- (id)initWithFrame:(CGRect)frame
{
if ((self = [super initWithFrame:frame]))
{
// Do OpenGL Core Animation layer setup
CAEAGLLayer *eaglLayer = (CAEAGLLayer *)self.layer;
eaglLayer.opaque = YES;
eaglLayer.drawableProperties = [NSDictionary dictionaryWithObjectsAndKeys:[NSNumber numberWithBool:NO], kEAGLDrawablePropertyRetainedBacking, kEAGLColorFormatRGBA8, kEAGLDrawablePropertyColorFormat, nil];
context = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES2];
if (!context || ![EAGLContext setCurrentContext:context]
|| ![self createFramebuffers])
return nil;
}
return self;
}
- (BOOL)createFramebuffers
{
glEnable(GL_TEXTURE_2D);
glDisable(GL_DEPTH_TEST);
// Onscreen framebuffer object
glGenFramebuffers(1, &viewFramebuffer);
glBindFramebuffer(GL_FRAMEBUFFER, viewFramebuffer);
glGenRenderbuffers(1, &viewRenderbuffer);
glBindRenderbuffer(GL_RENDERBUFFER, viewRenderbuffer);
[context renderbufferStorage:GL_RENDERBUFFER fromDrawable:(CAEAGLLayer*)self.layer];
glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_WIDTH, &backingWidth);
glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_HEIGHT, &backingHeight);
NSLog(#"Backing width: %d, height: %d", backingWidth, backingHeight);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, viewRenderbuffer);
if(glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
{
NSLog(#"Failure with framebuffer generation");
return NO;
}
return YES;
}
- (void)setDisplayFramebuffer;
{
if (context)
{
// [EAGLContext setCurrentContext:context];
if (!viewFramebuffer)
[self createFramebuffers];
glBindFramebuffer(GL_FRAMEBUFFER, viewFramebuffer);
// glBindRenderbuffer(GL_RENDERBUFFER, viewRenderbuffer);
glViewport(0, 0, backingWidth, backingHeight);
}
}
- (BOOL)presentFramebuffer;
{
BOOL success = FALSE;
if (context)
{
// [EAGLContext setCurrentContext:context];
glBindRenderbuffer(GL_RENDERBUFFER, viewRenderbuffer);
success = [context presentRenderbuffer:GL_RENDERBUFFER];
}
return success;
}
and in viewcontroller
m_glView = [[GLView alloc] initWithFrame:CGRectMake(0, 0, m_viewPlayer.frame.size.width, m_viewPlayer.frame.size.height)];
[m_viewPlayer addSubview:m_glView];
[self loadVertexShader:#"DirectDisplayShader" fragmentShader:#"DirectDisplayShader" forProgram:&m_directDisplayProgram];
and in timer loop
CMSampleBufferRef buf = [m_trackOutput copyNextSampleBuffer];
if (buf == nil)
return;
[self processFrame:buf]; // draw frame to opengl view
CFRelease(buf);
- (void)processFrame:(CMSampleBufferRef)sampleBuffer
{
if (m_videoFrameTexture)
glDeleteTextures(1, &m_videoFrameTexture);
// Get a CMSampleBuffer's Core Video image buffer for the media data
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
// Lock the base address of the pixel buffer
CVPixelBufferLockBaseAddress(imageBuffer, 0);
CGFloat width = CVPixelBufferGetWidth(imageBuffer);
CGFloat height = CVPixelBufferGetHeight(imageBuffer);
// Create a new texture from the camera frame data, display that using the shaders
glGenTextures(1, &m_videoFrameTexture);
glBindTexture(GL_TEXTURE_2D, m_videoFrameTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
// This is necessary for non-power-of-two textures
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
// Using BGRA extension to pull in video frame data directly
// glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_BGRA, GL_UNSIGNED_BYTE, CVPixelBufferGetBaseAddress(imageBuffer));
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, CVPixelBufferGetBaseAddress(imageBuffer));
[self drawFrame];
// glDeleteTextures(1, &videoFrameTexture);
CVPixelBufferUnlockBaseAddress(imageBuffer, 0);
}
- (void)drawFrame
{
[m_glView setDisplayFramebuffer];
[self drawCapturedScreen];
[m_glView presentFramebuffer];
}
- (void)drawCapturedScreen
{
glUseProgram(m_directDisplayProgram);
glBindTexture(GL_TEXTURE_2D, m_videoFrameTexture);
glUniform1i(uniforms[UNIFORM_VIDEOFRAME], 0);
// Update attribute values.
static const GLfloat squareVertices[] = {
-1.0f, -1.0f,
1.0f, -1.0f,
-1.0f, 1.0f,
1.0f, 1.0f,
};
glVertexAttribPointer(ATTRIB_VERTEX, 2, GL_FLOAT, 0, 0, squareVertices);
glEnableVertexAttribArray(ATTRIB_VERTEX);
float m_fScale = 1.0f; //
GLfloat kRate = 1.0f/m_fScale;
GLfloat kX = (1.0-kRate)/2;
GLfloat kY = (1.0-kRate)/2;
GLfloat kS = kX+kRate;
GLfloat kT = kY+kRate;
{
{
GLfloat textureVertices[] = {
kS, kT,
kS, kY,
kX, kT,
kX, kY,
};
glVertexAttribPointer(ATTRIB_TEXTUREPOSITON, 2, GL_FLOAT, 0, 0, textureVertices);
glEnableVertexAttribArray(ATTRIB_TEXTUREPOSITON);
}
}
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
}
Have you managed to render anything in OpenGL so far? I would suggest breaking what you're doing down into smaller steps and get those working first. First try rendering just using GL_POINTS and see if you can get anything and then build up from there. Make your shaders as simple as possible too to check for issues there. You should try to get the bare minimum rendering working and then build up the complexity (for example, after points try lines, then triangles, then textured triangles). By breaking down the rendering you can isolate what's causing the draw call to not show anything.
I am trying to render a simple scene into a FBO that is backed by a texture as a color attachment and then draw a quad to the screen using this texture in iOS.
This will help me do some post processing of the final scene. There are a few questions on SO that address similar (but not quite the same) questions and I've tried whatever I could understand. Nothing works.
I have two shader programs. The first one _program simply takes the vertex positions and renders them with a single color. The second one quadProgram takes a texture, the texture coords and the quad Coords. (In the interest of brevity I am omitting the code for these shaders.)
Both shaders work correctly. They independently produce correct results. However, when I try to render the "rendered texture" to the quad instead of using a sample wood texture, I simply get a black screen. Here are the relevant bits of code. I setup everything like so:
- (void)setupGL
{
[EAGLContext setCurrentContext:self.context];
_program = [self createProgramWithVertexShader:#"Shader" fragShader:#"Shader"];
GLuint GLKVertexAttribPosition = glGetAttribLocation(_program, "position");
quadProgram = [self createProgramWithVertexShader:#"PostShader" fragShader:#"PostShader"];
quadTexCoord = glGetAttribLocation(quadProgram, "a_texcoord");
quadPosition = glGetAttribLocation(quadProgram, "a_position");
diffuseTexture = glGetUniformLocation(quadProgram, "s_diffuse");
glEnable(GL_DEPTH_TEST);
glGenVertexArraysOES(1, &_vertexArray);
glBindVertexArrayOES(_vertexArray);
glGenBuffers(1, &_vertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, _vertexBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(gCubeVertexData), gCubeVertexData, GL_STATIC_DRAW);
glEnableVertexAttribArray(GLKVertexAttribPosition);
glVertexAttribPointer(GLKVertexAttribPosition, 3, GL_FLOAT, GL_FALSE, 24, BUFFER_OFFSET(0));
glBindVertexArrayOES(0);
// ============
// ---- for render to texture
width = self.view.bounds.size.width;
height = self.view.bounds.size.height;
// create fbo
glGenFramebuffers(1, &framebuffer);
glBindFramebuffer(GL_FRAMEBUFFER, framebuffer);
// create and attach backing texture
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texture, 0);
// create and attach depthbuffer
glGenRenderbuffers(1, &depthRenderbuffer);
glBindRenderbuffer(GL_RENDERBUFFER, depthRenderbuffer);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, width, height);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, depthRenderbuffer);
// Check if framebuffer was loaded correctly
GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER) ;
if(status != GL_FRAMEBUFFER_COMPLETE) {
NSLog(#"failed to make complete framebuffer object %x", status);
}
// sample texture
NSError *theError;
NSString *filePath = [[NSBundle mainBundle] pathForResource:#"wood_floor_256" ofType:#"jpg"]; // 1
spriteTexture = [GLKTextureLoader textureWithContentsOfFile:filePath options:nil error:&theError]; // 2
}
And then my main loop:
- (void)update
{
// First save the default frame buffer.
static GLint default_frame_buffer = 0;
glGetIntegerv(GL_FRAMEBUFFER_BINDING, &default_frame_buffer);
// render to texture
glBindFramebuffer(GL_FRAMEBUFFER, framebuffer);
glClearColor(0.65f, 0.65f, 0.65f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glBindVertexArrayOES(_vertexArray);
glUseProgram(_program);
glDrawArrays(GL_TRIANGLES, 0, 36);
// render to screen
glBindFramebuffer(GL_FRAMEBUFFER, default_frame_buffer);
glClearColor(0.65f, 0.65f, 0.65f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glUseProgram(quadProgram);
const float quadPositions[] = { 1.0, 1.0, 0.0,
-1.0, 1.0, 0.0,
-1.0, -1.0, 0.0,
-1.0, -1.0, 0.0,
1.0, -1.0, 0.0,
1.0, 1.0, 0.0 };
const float quadTexcoords[] = { 1.0, 1.0,
0.0, 1.0,
0.0, 0.0,
0.0, 0.0,
1.0, 0.0,
1.0, 1.0 };
// stop using VBO and other bufs.
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glBindVertexArrayOES(0);
glBindTexture(GL_TEXTURE_2D, texture);
glActiveTexture(GL_TEXTURE_2D);
// setup buffer offsets
glVertexAttribPointer(quadPosition, 3, GL_FLOAT, GL_FALSE, 3*sizeof(float), quadPositions);
glVertexAttribPointer(quadTexCoord, 2, GL_FLOAT, GL_FALSE, 2*sizeof(float), quadTexcoords);
// ensure the proper arrays are enabled
glEnableVertexAttribArray(quadPosition);
glEnableVertexAttribArray(quadTexCoord);
// draw
glDrawArrays(GL_TRIANGLES, 0, 2*3);
}
I don't even know how to tell if the backing texture contains the rendered screen.
The render to texture part should render into the FBO the following:
If I replace the two lines:
glBindTexture(GL_TEXTURE_2D, texture);
glActiveTexture(GL_TEXTURE_2D);
with
glBindTexture(spriteTexture.target, spriteTexture.name);
glEnable(spriteTexture.target);
I get the following:
Which indicates that the code to render a quad to screen with a texture is correct.
I have no idea how to get the first screen rendered to the quad.
Since the texture you are using is NPOT, it needs a non-default wrap parameter.
Add the following right below the other glTexParameteri call. It should solve the problem:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
I'm developing an iOS openGL ES application.
I'm doing the usual EAGLView / ES2Render stuff.
On startup, frambuffer creation succeeds, using the following code:
- (BOOL) createFramebuffers
{
[EAGLContext setCurrentContext:_mainContext];
// [ A ] On-screen
// 1. Framebuffer
glGenFramebuffers(1, &_mainFramebuffer);
bindFramebuffer(_mainFramebuffer);
// 2. Color buffer
glGenRenderbuffers(1, &_mainColorbuffer);
bindRenderbuffer(_mainColorbuffer);
// Adjust size to view's layer:
CAEAGLLayer* layer = (CAEAGLLayer*)[_view layer];
if (![_mainContext renderbufferStorage:GL_RENDERBUFFER fromDrawable:layer]) {
// something went horribly wrong
NSLog(#"-[ES2Renderer createFramebuffers]: Failed to obtain renderbuffer storage from layer!");
return NO;
}
// Query new size:
glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_WIDTH, &_backingWidth);
glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_HEIGHT, &_backingHeight);
// Attach to color:
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, _mainColorbuffer);
// 3. Depth buffer
glGenRenderbuffers(1, &_depthBuffer);
bindRenderbuffer(_depthBuffer);
if (_useStencilBuffer) {
// Depth + Stencil
// Allocate storage:
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8_OES, _backingWidth, _backingHeight);
// Attach to depth:
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, _depthBuffer);
// Attach to stencil:
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_RENDERBUFFER, _depthBuffer);
}
else{
// Depth only
// Allocate storage:
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT24_OES, _backingWidth, _backingHeight);
// Attachto depth:
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, _depthBuffer);
}
// 4. Validate the set:
GLenum framebufferStatus = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (framebufferStatus != GL_FRAMEBUFFER_COMPLETE) {
// Something went wrong!
NSLog(#"-[ES2Renderer createFramebuffers]: Failed to make complete framebuffer object: %#",
[self stringFromFramebufferStauts:framebufferStatus]);
return NO;
}
// [ B ] Off-screen (Render-to-texture)
// 1. Framebuffer
glGenFramebuffers(1, &_transFramebuffer);
bindFramebuffer(_transFramebuffer);
// 2. Depth buffer
glGenRenderbuffers(1, &_transDepthBuffer);
bindRenderbuffer(_transDepthBuffer);
if (_useStencilBuffer) {
// Allocate storage:
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8_OES, _backingWidth, _backingHeight);
// Attach to depth:
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, _transDepthBuffer);
// Attach to stencil:
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_RENDERBUFFER, _transDepthBuffer);
}
else{
// Allocate storage
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT24_OES, _backingWidth, _backingHeight);
// Attach to depth:
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, _transDepthBuffer);
}
// 3. Textures (color buffers)
GLuint* texPtrs[2] = {&_transTexture1, &_transTexture2};
for (NSUInteger i=0; i < 2; i++) {
GLuint* texPtr = texPtrs[i];
// Create:
glGenTextures(1, texPtr);
// Bind:
bindTexture2D(*texPtr);
// Configure for pixel-aligned use:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
// Allocate storage:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, _backingWidth, _backingHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0);
// Attach:
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, *texPtr, 0);
framebufferStatus = glCheckFramebufferStatus(GL_FRAMEBUFFER);
// Validate:
if ( framebufferStatus != GL_FRAMEBUFFER_COMPLETE) {
// Something went wrong!
NSLog(#"-[ES2Renderer createFramebuffers]: Failed to make complete framebuffer object: %#",
[self stringFromFramebufferStauts:framebufferStatus]);
return NO;
}
}
// Final State:
bindFramebuffer(_mainFramebuffer);
bindRenderbuffer(_mainColorbuffer);
bindTexture2D(0);
NSLog(#"-[ES2Renderer createFramebuffers] Succeeded.");
return YES;
}
Soon after, UIView's -layoutSubviews is called and I in turn execute -resizeFromLayer::
- (BOOL) resizeFromLayer:(CAEAGLLayer *)layer
{
// [ A ] On screen framebuffer
bindFramebuffer(_mainFramebuffer);
// 1. Resize color buffer
bindRenderbuffer(_mainColorbuffer);
if (![_mainContext renderbufferStorage:GL_RENDERBUFFER fromDrawable:layer]) {
// Something went wrong
return NO; // <-- SECOND TIME ON, THIS HAPPENS
}
// Query new size:
glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_WIDTH, &_backingWidth);
glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_HEIGHT, &_backingHeight);
// 2. Resize depth buffer
bindRenderbuffer(_depthBuffer);
if (_useStencilBuffer) {
// (Depth & Stencil)
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8_OES, _backingWidth, _backingHeight);
}
else{
// (Depth only)
glRenderbufferStorage(GL_FRAMEBUFFER, GL_DEPTH_COMPONENT24_OES, _backingWidth, _backingHeight);
}
// ...Validate:
GLenum framebufferStatus = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (framebufferStatus != GL_FRAMEBUFFER_COMPLETE) {
// Something went wrong!
NSLog(#"-[ES2Renderer resizeFromLayer:]: Failed to make complete framebuffer object: %#",
[self stringFromFramebufferStauts:glCheckFramebufferStatus(GL_FRAMEBUFFER)]);
return NO;
}
// [ B ] Off screen (render-to-terxture) framebuffer
bindFramebuffer(_transFramebuffer);
// 1. Resize depth buffer
bindRenderbuffer(_transDepthBuffer);
if (_useStencilBuffer) {
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8_OES, _backingWidth, _backingHeight);
}
else{
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT24_OES, _backingWidth, _backingHeight);
}
// 2. Resize textures
GLuint* texPtrs[2] = {&_transTexture1, &_transTexture2};
for (NSUInteger i=0; i < 2; i++) {
GLuint* texPtr = texPtrs[i];
// Bind:
bindTexture2D(*texPtr);
// Configure for pixel-aligned use:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
// Allocate storage:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, _backingWidth, _backingHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0);
// Attach:
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, *texPtr, 0);
// Validate:
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) {
// Something went wrong!
NSString* statusString = [self stringFromFramebufferStauts:glCheckFramebufferStatus(GL_FRAMEBUFFER)];
NSLog(#"-[ES2Renderer resizeFromLayer:]: Failed to make complete framebuffer object: %#", statusString);
return NO;
}
}
bindFramebuffer(_mainFramebuffer);
bindRenderbuffer(_mainColorbuffer);
// Pass new ortho projection to shaders
[self initializeModelViewMatrix];
[self initializeSpriteProgram];
// Set new viewport
glViewport(0, 0, _backingWidth, _backingHeight);
NSLog(#"-[ES2Renderer resizeFromLayer:]: Succeeded.");
return YES;
}
Nothing of what I'm doing is special. I have a separate framebuffer to render scene transitions, with two textures to attach to color, and depth.
The second time -renderbufferStorage:fromDrawable: is called (-layoutSubviews -> resizeFromLayer:), it invariably fails (returns NO); Calling glGetError() right before that results in no error, but calling it right after returns GL_INVALID_OPERATION.
If I ignore this and proceed, glGetRenderbufferParameteriv() still gets me the right width and height (640 and 1136, respectively, on an iPhone 5), but glCheckFramebufferStatus() will return GL_FRAMEBUFFER_UNSUPPORTED.
Alternatively, I skipped the above resizeFromLayer: and replaced it with this:
- (BOOL) resizeFromLayer:(CAEAGLLayer *)layer
{
[self destroyFramebuffers];
return [self createFramebuffers];
}
...but the same error persists (-renderStorage:fromDrawable: fails; this time inside -createFramebuffers).
For now, I just return YES (my app only supports portrait, so no screen size change actually ever happens), but I really want to fix it because one day I'll need to support landscape, etc...
Another possible reason is that the size of your layer is too large. In addition make sure you are using a new framebuffer and renderbuffer each time. And you have destroyed your old ones before creating new ones.
You can delete them like this
if let displayFramebuffer = self.displayFramebuffer {
var temporaryFramebuffer = displayFramebuffer
glDeleteFramebuffers(1, &temporaryFramebuffer)
self.displayFramebuffer = nil
}
if let displayRenderbuffer = self.displayRenderbuffer {
var temporaryRenderbuffer = displayRenderbuffer
glDeleteRenderbuffers(1, &temporaryRenderbuffer)
self.displayRenderbuffer = nil
}
One possible reason that renderbufferStorage:fromDrawable: is failing is that the _mainContext is not the current context at that time. Even though it may seem that no other context could have stolen the 'current' status, I recommend calling [EAGLContext setCurrentContext:_mainContext] before any gl or EAGL code that operates on objects associated with that context (e.g. at the beginning of the resizeFromLayer: method).
Josh Bernfeld's answer inspired me. I check the size of MyView and the bounds of it is CGRect.zero. And the same with the CAEAGLLayer.
For me, init MyView with a nonzero CGRect solved the problem.
Hope it works for you.
First of all sorry for my broken english, I'n not native and should have worked more at school :S
Those days I'm trying to build an AR system using OpengL ES on iOS.
I retrieve camera frames using AVCaptureSession and sent then both to a tracking algorithm and my OpenGL ES display class (which wait for the rotation and translation from the tracking algorithm). In the meantime I'm reading a video to display on an OpenGL plane placed at the same position as a real plane is the scene (I don't know if I am clear).
My goal is to use only one VBO and one shader to render the whole thing (image taken from camera in the background and plan with a video texture positioned using the rotation and translation given by my tracking algorithm).
I don't know why but nothing is displayed, here is the render code (called each frame) :
glClearColor(0, 0, 0, 0);
glClear(GL_COLOR_BUFFER_BIT);
[self setProjectionMatrix];
glUniformMatrix4fv(_projectionUniform, 1, 0, projection.glMatrix);
//Wait for rotation and translation
#synchronized(self.trackeriOS){
transfo = [trackeriOS getMat];
}
[self dumpMatrix:transfo];
[self fillModelViewMatrix:transfo];
glUniformMatrix4fv(_modelViewUniform, 1, 0, finalModelView);
videoTexture = [self setupTextureFromBuffer:self.buffer];
if (videoTexture == -1) {
NSLog(#"swap");
}
if (cameraVideoTexture == -1) {
NSLog(#"swap");
}
else {
glViewport(0, 0, self.frame.size.width, self.frame.size.height);
glVertexAttribPointer(_positionSlot, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), 0);
glVertexAttribPointer(_texCoordSlot, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*) (sizeof(float) * 3));
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, videoTexture);
glUniform1i(_textureUniform, 0);
glDrawElements(GL_TRIANGLES, sizeof(Indices)/sizeof(Indices[0]), GL_UNSIGNED_BYTE, 0);
glUniformMatrix4fv(_projectionUniform, 1, 0, modelViewIdentity);
glUniformMatrix4fv(_modelViewUniform, 1, 0, modelViewIdentity);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, cameraVideoTexture);
glUniform1i(_cameraVideotextureUniform, 1);
glDrawElements(GL_TRIANGLES, sizeof(Indices)/sizeof(Indices[0]), GL_UNSIGNED_BYTE, 0);
[_context presentRenderbuffer:GL_RENDERBUFFER];
CVBufferRelease(self.buffer);
glDeleteTextures(1, &cameraVideoTexture);
glDeleteTextures(1,&videoTexture);
}
Thank you so much for your help.
Edit 2: This is how I create my OpenGL texture from an CVImageBufferRef given by the AVFoundation delegate function while reading camera video datas:
- (void)processNewCameraFrame:(CVImageBufferRef)cameraFrame;
{
dispatch_async(backgroundImageDisplay, ^(void) {
self.cameraVideoBuffer = cameraFrame;
CVPixelBufferLockBaseAddress(cameraFrame, 0);
int bufferHeight = CVPixelBufferGetHeight(cameraFrame);
int bufferWidth = CVPixelBufferGetWidth(cameraFrame);
glGenTextures(1, &cameraVideoTexture);
glBindTexture(GL_TEXTURE_2D, cameraVideoTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, bufferWidth, bufferHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, CVPixelBufferGetBaseAddress(cameraFrame));
CVPixelBufferUnlockBaseAddress(cameraFrame, 0);
CMTime outputItemTime = [videoOutput itemTimeForHostTime:CACurrentMediaTime()];
if ([[self videoOutput] hasNewPixelBufferForItemTime:outputItemTime2]) {
self.buffer = [[self videoOutput] copyPixelBufferForItemTime:outputItemTime2 itemTimeForDisplay:NULL];
//The above render function
[self render];
}
});
}
Edit : I forgot to say I set my VBO like this :
- (void)setupVBOs {
glGenBuffers(1, &vertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(Vertices), Vertices, GL_STATIC_DRAW);
glGenBuffers(1, &indexBuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexBuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(Indices), Indices, GL_STATIC_DRAW);
NSLog(#"OpenGL View, set up VBO 1");
}
And I set up the entry point to my shader like this :
-(void)setTextureProgramShader {
_positionSlot = glGetAttribLocation(programHandle1, "Position");
glEnableVertexAttribArray(_positionSlot);
_projectionUniform = glGetUniformLocation(programHandle1, "Projection");
_modelViewUniform = glGetUniformLocation(programHandle1, "Modelview");
_texCoordSlot = glGetAttribLocation(programHandle1, "TexCoordIn");
glEnableVertexAttribArray(_texCoordSlot);
_textureUniform = glGetUniformLocation(programHandle1, "Texture");
}
Finally the vertex shader is :
attribute vec4 Position; // 1
uniform mat4 Projection;
uniform mat4 Modelview;
attribute vec2 TexCoordIn;
varying vec2 TexCoordOut;
void main(void) { // 4
gl_Position = Projection * Modelview * Position; // 6
TexCoordOut = TexCoordIn;
}
I'm trying to make a screenshot on my iPad with OpenGL ES. This does work, but there are blank spots on them. These blank spots seem to be the rendered object. I've tried using the other buffers aswell, but none of them seem to contain the actual 3D object?
I'm using the example code of String SDK.
Image of the issue:
EAGLView.m
- (void)createFramebuffer
{
if (context && !defaultFramebuffer)
{
[EAGLContext setCurrentContext:context];
// Handle scale
if ([self respondsToSelector:#selector(setContentScaleFactor:)])
{
float screenScale = [UIScreen mainScreen].scale;
self.contentScaleFactor = screenScale;
}
// Create default framebuffer object.
glGenFramebuffers(1, &defaultFramebuffer);
glBindFramebuffer(GL_FRAMEBUFFER, defaultFramebuffer);
// Create color render buffer and allocate backing store.
glGenRenderbuffers(1, &colorRenderbuffer);
glBindRenderbuffer(GL_RENDERBUFFER, colorRenderbuffer);
[context renderbufferStorage:GL_RENDERBUFFER fromDrawable:(CAEAGLLayer *)self.layer];
glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_WIDTH, &framebufferWidth);
glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_HEIGHT, &framebufferHeight);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, colorRenderbuffer);
// Create and attach depth buffer
glGenRenderbuffers(1, &depthRenderbuffer);
glBindRenderbuffer(GL_RENDERBUFFER, depthRenderbuffer);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, framebufferWidth, framebufferHeight);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, depthRenderbuffer);
// Bind color buffer
glBindRenderbuffer(GL_RENDERBUFFER, colorRenderbuffer);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
NSLog(#"Failed to make complete framebuffer object %x", glCheckFramebufferStatus(GL_FRAMEBUFFER));
}
}
Screenshot code
EAGLView.m
- (UIImage*)snapshot:(UIView*)eaglview
{
GLint backingWidth, backingHeight;
// Bind the color renderbuffer used to render the OpenGL ES view
// If your application only creates a single color renderbuffer which is already bound at this point,
// this call is redundant, but it is needed if you're dealing with multiple renderbuffers.
// Note, replace "_colorRenderbuffer" with the actual name of the renderbuffer object defined in your class.
//glBindRenderbufferOES(GL_RENDERBUFFER_OES, _colorRenderbuffer);
// Get the size of the backing CAEAGLLayer
glGetRenderbufferParameterivOES(GL_RENDERBUFFER_OES, GL_RENDERBUFFER_WIDTH_OES, &backingWidth);
glGetRenderbufferParameterivOES(GL_RENDERBUFFER_OES, GL_RENDERBUFFER_HEIGHT_OES, &backingHeight);
NSInteger x = 0, y = 0, width = backingWidth, height = backingHeight;
NSInteger dataLength = width * height * 4;
GLubyte *data = (GLubyte*)malloc(dataLength * sizeof(GLubyte));
// Read pixel data from the framebuffer
glPixelStorei(GL_PACK_ALIGNMENT, 4);
glReadPixels(x, y, width, height, GL_RGBA, GL_UNSIGNED_BYTE, data);
// Create a CGImage with the pixel data
// If your OpenGL ES content is opaque, use kCGImageAlphaNoneSkipLast to ignore the alpha channel
// otherwise, use kCGImageAlphaPremultipliedLast
CGDataProviderRef ref = CGDataProviderCreateWithData(NULL, data, dataLength, NULL);
CGColorSpaceRef colorspace = CGColorSpaceCreateDeviceRGB();
CGImageRef iref = CGImageCreate(width, height, 8, 32, width * 4, colorspace, kCGBitmapByteOrder32Big | kCGImageAlphaPremultipliedLast,
ref, NULL, true, kCGRenderingIntentDefault);
// OpenGL ES measures data in PIXELS
// Create a graphics context with the target size measured in POINTS
NSInteger widthInPoints, heightInPoints;
if (NULL != UIGraphicsBeginImageContextWithOptions) {
// On iOS 4 and later, use UIGraphicsBeginImageContextWithOptions to take the scale into consideration
// Set the scale parameter to your OpenGL ES view's contentScaleFactor
// so that you get a high-resolution snapshot when its value is greater than 1.0
CGFloat scale = eaglview.contentScaleFactor;
widthInPoints = width / scale;
heightInPoints = height / scale;
UIGraphicsBeginImageContextWithOptions(CGSizeMake(widthInPoints, heightInPoints), NO, scale);
}
else {
// On iOS prior to 4, fall back to use UIGraphicsBeginImageContext
widthInPoints = width;
heightInPoints = height;
UIGraphicsBeginImageContext(CGSizeMake(widthInPoints, heightInPoints));
}
CGContextRef cgcontext = UIGraphicsGetCurrentContext();
// UIKit coordinate system is upside down to GL/Quartz coordinate system
// Flip the CGImage by rendering it to the flipped bitmap context
// The size of the destination area is measured in POINTS
CGContextSetBlendMode(cgcontext, kCGBlendModeCopy);
CGContextDrawImage(cgcontext, CGRectMake(0.0, 0.0, widthInPoints, heightInPoints), iref);
// Retrieve the UIImage from the current context
UIImage *image = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
// Clean up
free(data);
CFRelease(ref);
CFRelease(colorspace);
CGImageRelease(iref);
return image;
}
String_OGL_TutorialViewController.m
- (void)render
{
[(EAGLView *)self.view setFramebuffer];
glDisable(GL_CULL_FACE);
glEnable(GL_DEPTH_TEST);
const int maxMarkerCount = 10;
struct MarkerInfoMatrixBased markerInfo[10];
int markerCount = [stringOGL getMarkerInfoMatrixBased: markerInfo maxMarkerCount: maxMarkerCount];
for (int i = 0; i < markerCount; i++)
{
float diffuse[4] = {0, 0, 0, 0};
diffuse[markerInfo[i].imageID % 3] = 1;
if ([context API] == kEAGLRenderingAPIOpenGLES2)
{
glUseProgram(program);
glUniform4fv(uniforms[UNIFORM_COLOR], 1, diffuse);
const float translationMatrix[16] = {1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, -cubeScale, 1};
float modelViewMatrix[16];
float modelViewProjectionMatrix[16];
[String_OGL_TutorialViewController multiplyMatrix: translationMatrix withMatrix: markerInfo[i].transform into: modelViewMatrix];
[String_OGL_TutorialViewController multiplyMatrix: modelViewMatrix withMatrix: projectionMatrix into: modelViewProjectionMatrix];
glUniformMatrix4fv(uniforms[UNIFORM_MVP], 1, GL_FALSE, modelViewProjectionMatrix);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, ((float *)NULL) + 6 * 4 * 3);
// Validate program before drawing. This is a good check, but only really necessary in a debug build.
// DEBUG macro must be defined in your debug configurations if that's not already the case.
#if defined(DEBUG)
if (![self validateProgram:program])
{
NSLog(#"Failed to validate program: %d", program);
return;
}
#endif
glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_BYTE, NULL);
}
else
{
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuse);
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3, GL_FLOAT, 12, NULL);
glEnableClientState(GL_NORMAL_ARRAY);
glNormalPointer(GL_FLOAT, 12, ((float *)NULL) + 6 * 4 * 3);
glMatrixMode(GL_PROJECTION);
glLoadMatrixf(projectionMatrix);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMultMatrixf(markerInfo[i].transform);
glTranslatef(0, 0, -cubeScale);
glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_BYTE, NULL);
}
UIImage *img = [(EAGLView *)self.view snapshot: self.view];
UIImageWriteToSavedPhotosAlbum(img, self, #selector(image:didFinishSavingWithError:contextInfo:), nil);
[stringOGL pause];
}
}