I am trying to draw a texture with transparency onto a background gradient, created by a vertex shader that interpolates colors between vertices. However, only the opaque parts of texture are being drawn.
I am using the blending function:
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
Rendering code:
struct vertex {
float position[3];
float color[4];
float texCoord[2];
};
typedef struct vertex vertex;
const vertex vertices[] = {
{{1, -1, 0}, {0, 167.0/255.0, 253.0/255.0, 1}, {1, 0}}, // BR (0)
{{1, 1, 0}, {0, 222.0/255.0, 1.0, 1}, {1, 1}}, // TR (1)
{{-1, 1, 0}, {0, 222.0/255.0, 1.0, 1}, {0, 1}}, // TL (2)
{{-1, -1, 0}, {0, 167.0/255.0, 253.0/255.0, 1}, {0, 0}}, // BL (3)
};
const GLubyte indicies[] = {
3, 2, 0, 1
};
-(void) render {
glViewport(0, 0, self.frame.size.width, self.frame.size.height);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexBuffer);
glVertexAttribPointer(positionSlot, 3, GL_FLOAT, GL_FALSE, sizeof(vertex), 0);
glVertexAttribPointer(colorSlot, 4, GL_FLOAT, GL_FALSE, sizeof(vertex), (GLvoid*)(sizeof(float)*3));
glVertexAttribPointer(textureCoordSlot, 2, GL_FLOAT, GL_FALSE, sizeof(vertex), (GLvoid*)(sizeof(float)*7));
glDrawElements(GL_TRIANGLE_STRIP, sizeof(indicies)/sizeof(indicies[0]), GL_UNSIGNED_BYTE, 0);
// Not sure if required for blending to work..
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexBuffer);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture);
glUniform1i(textureUniform, 0);
glVertexAttribPointer(positionSlot, 3, GL_FLOAT, GL_FALSE, sizeof(vertex), 0);
glVertexAttribPointer(colorSlot, 4, GL_FLOAT, GL_FALSE, sizeof(vertex), (GLvoid*)(sizeof(float)*3));
glVertexAttribPointer(textureCoordSlot, 2, GL_FLOAT, GL_FALSE, sizeof(vertex), (GLvoid*)(sizeof(float)*7));
glDrawElements(GL_TRIANGLE_STRIP, sizeof(indicies)/sizeof(indicies[0]), GL_UNSIGNED_BYTE, 0);
[context presentRenderbuffer:GL_RENDERBUFFER];
}
I'm unsure whether I need to do two lots of drawing to the render buffer in order for the blend function to work, so currently I am drawing without the texture binded, then with it binded.
Fragment Shader:
varying lowp vec4 destinationColor;
varying lowp vec2 texCoordOut;
uniform sampler2D tex;
void main() {
lowp vec4 tex2D = texture2D(tex, texCoordOut);
gl_FragColor = vec4(tex2D.rgb+destinationColor.rgb, tex2D.a*destinationColor.a);
}
Vertex Shader:
attribute vec4 position;
attribute vec4 sourceColor;
varying vec4 destinationColor;
attribute vec2 texCoordIn;
varying vec2 texCoordOut;
void main() {
destinationColor = sourceColor;
gl_Position = position;
texCoordOut = texCoordIn;
}
Texture loading code:
-(GLuint) loadTextureFromImage:(UIImage*)image {
CGImageRef textureImage = image.CGImage;
size_t width = CGImageGetWidth(textureImage);
size_t height = CGImageGetHeight(textureImage);
GLubyte* spriteData = (GLubyte*) malloc(width*height*4);
CGColorSpaceRef cs = CGImageGetColorSpace(textureImage);
CGContextRef c = CGBitmapContextCreate(spriteData, width, height, 8, width*4, cs, kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGColorSpaceRelease(cs);
CGContextScaleCTM(c, 1, -1);
CGContextTranslateCTM(c, 0, -CGContextGetClipBoundingBox(c).size.height);
CGContextDrawImage(c, (CGRect){CGPointZero, {width, height}}, textureImage);
CGContextRelease(c);
GLuint glTex;
glGenTextures(1, &glTex);
glBindTexture(GL_TEXTURE_2D, glTex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, (int)width, (int)height, 0, GL_RGBA, GL_UNSIGNED_BYTE, spriteData);
glBindTexture(GL_TEXTURE_2D, 0);
free(spriteData);
return glTex;
}
Any ideas what I am doing wrong?
The answer you gave yourself might suffice in your particular situation, but I don't think it is a very nice solution. You will probably run into many problems when you want to render more than two objects.
You draw the same object twice. First without a texture bound and then with the texture bound - with the blending done in the shader. But how would you do that with a third object?
I recommend using a different set of vertices for both objects. Something like this:
const vertex gradient_background[] = {
{{1, -1, 0}, {0, 167.0/255.0, 253.0/255.0, 1}, {1, 0}},
{{1, 1, 0}, {0, 222.0/255.0, 1.0, 1}, {1, 1}},
{{-1, 1, 0}, {0, 222.0/255.0, 1.0, 1}, {0, 1}},
{{-1, -1, 0}, {0, 167.0/255.0, 253.0/255.0, 1}, {0, 0}}
};
const vertex textured_object[] = {
{{1, -1, 0}, {0, 0, 0, 0}, {1, 0}},
{{1, 1, 0}, {0, 0, 0, 0}, {1, 1}},
{{-1, 1, 0}, {0, 0, 0, 0}, {0, 1}},
{{-1, -1, 0}, {0, 0, 0, 0}, {0, 0}}
};
And adjust your render function appropriately, also unbind texture to 0 after drawing.
-(void) render {
...
glVertexAttribPointer(positionSlot, 3, GL_FLOAT, GL_FALSE, sizeof(gradient_background), 0);
glVertexAttribPointer(colorSlot, 4, GL_FLOAT, GL_FALSE, sizeof(gradient_background), (GLvoid*)(sizeof(float)*3));
glVertexAttribPointer(textureCoordSlot, 2, GL_FLOAT, GL_FALSE, sizeof(gradient_background), (GLvoid*)(sizeof(float)*7));
glDrawElements(GL_TRIANGLE_STRIP, sizeof(indicies)/sizeof(indicies[0]), GL_UNSIGNED_BYTE, 0);
...
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture);
glUniform1i(textureUniform, 0);
glVertexAttribPointer(positionSlot, 3, GL_FLOAT, GL_FALSE, sizeof(textured_object), 0);
glVertexAttribPointer(colorSlot, 4, GL_FLOAT, GL_FALSE, sizeof(textured_object), (GLvoid*)(sizeof(float)*3));
glVertexAttribPointer(textureCoordSlot, 2, GL_FLOAT, GL_FALSE, sizeof(textured_object), (GLvoid*)(sizeof(float)*7));
glDrawElements(GL_TRIANGLE_STRIP, sizeof(indicies)/sizeof(indicies[0]), GL_UNSIGNED_BYTE, 0);
// Don't forget to unbind texture for next draw
glBindTexture(GL_TEXTURE_2D, 0);
...
}
Fragment shader
varying lowp vec4 destinationColor;
varying lowp vec2 texCoordOut;
uniform sampler2D tex;
void main() {
lowp vec4 tex2D = texture2D(tex, texCoordOut); // Returns (0, 0, 0, 1) when texture 0 is bound
gl_FragColor = destinationColor + tex2D;
}
Then use
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
Or any other blending function you wish.
Okay, well I feel silly. Attempting to use the blending function outside the fragment shader didn't do anything as the texture was already drawn. I just needed to use the equivalent inside the fragment shader:
varying lowp vec4 destinationColor;
varying lowp vec2 texCoordOut;
uniform sampler2D tex;
void main() {
lowp vec4 tex2D = texture2D(tex, texCoordOut);
lowp vec4 result = tex2D + vec4(1.0 - tex2D.a) * destinationColor;
gl_FragColor = result;
}
Related
my app(ios8.0) is render streaming video in GLKViewController with Opengl-es
i have a problem that video is cut when landscape mode
problem capture: http://i.stack.imgur.com/3SrpF.png
maybe.. i need scaling texture, use vertex shader.
typedef struct {
float Position[3];
float Color[4];
float TexCoordp[2];
} Vertex;
const Vertex Vertices[] = {
{{1, -1, 0}, {1, 1, 1, 1}, {1, 1}},
{{1, 1, 0}, {1, 1, 1, 1}, {1, 0}},
{{-1, 1, 0}, {1, 1, 1, 1}, {0, 0}},
{{-1, -1, 0}, {1, 1, 1, 1}, {0, 1}}
};
const GLubyte Indices[] = {
0, 1, 2,
2, 3, 0
};
#pragma mark - shaders
#define STRINGIZE(x) #x
#define STRINGIZE2(x) STRINGIZE(x)
#define SHADER_STRING(text) # STRINGIZE2(text)
NSString *const vertexShaderString = SHADER_STRING
(
attribute vec4 Position; // 1
attribute vec4 SourceColor; // 2
varying vec4 DestinationColor; // 3
attribute vec2 TexCoordIn;
varying vec2 TexCoordOut;
void main(void) { // 4
DestinationColor = SourceColor; // 5
gl_Position = Position; // 6
TexCoordOut = TexCoordIn; // New
}
);
setup GL Code
- (void)setupGL
{
[EAGLContext setCurrentContext:self.context];
glGenBuffers(1, &_vertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, _vertexBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(Vertices), Vertices, GL_STATIC_DRAW);
glGenBuffers(1, &_indexBuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _indexBuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(Indices), Indices, GL_STATIC_DRAW);
// init the update render semaphore
_textureUpdateRenderSemaphore = dispatch_semaphore_create((long)1);
}
maybe i need change "Vertices", but i can't get any clue.
thank you so much for your help
I have a cube which i'm drawing on screen, i have activated the depth testing in order for it not to be transparent, the thing is, in the front and back side of the cube, it look's good.
but when translating and rotating the cube, the results are strange, here are some photos to demonstrate:
front:
sides:
This see through effect is not welcomed of course.
Here is some relevant code:
Indices & vertices:
const Vertex Vertices[] = {
{{1, -1, 0}, {1, 1, 1, 1}, {1, 0}},
{{1, 1, 0}, {1, 1, 1, 1}, {1, 1}},
{{-1, 1, 0}, {1, 1, 1, 1}, {0, 1}},
{{-1, -1, 0}, {1, 1, 1, 1}, {0, 0}},
{{1, -1, -1}, {1, 1, 1, 1}, {1, 0}},
{{1, 1, -1}, {1, 1, 1, 1}, {1, 1}},
{{-1, 1, -1}, {1, 1, 1, 1}, {0, 1}},
{{-1, -1, -1}, {1, 1, 1, 1}, {0, 0}}
};
const GLubyte Indices[] = {
// Front
0, 1, 2
,2, 3, 0,
// Back
4, 6, 5,
4, 7, 6,
// Left
2, 7, 3,
7, 6, 2,
// Right
0, 4, 1,
4, 1, 5,
// Top
6, 2, 1,
1, 6, 5,
// Bottom
0, 3, 7,
0, 7, 4,
};
Layer setup:
- (void)setupLayer {
_eaglLayer = (CAEAGLLayer*) self.layer;
_eaglLayer.opaque = YES;
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_NEVER);
}
Depth buffer setup:
- (void)setupDepthBuffer {
glGenRenderbuffers(1, &_depthRenderBuffer);
glBindRenderbuffer(GL_RENDERBUFFER, _depthRenderBuffer);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, self.frame.size.width, self.frame.size.height);
}
Rendering:
- (void)render:(CADisplayLink*)displayLink {
glClearColor(0, 0, 0, 1.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_DEPTH_TEST);
CC3GLMatrix *projection = [CC3GLMatrix matrix];
float h = 4.0f * self.frame.size.height / self.frame.size.width;
[projection populateFromFrustumLeft:-2 andRight:2 andBottom:-h/2 andTop:h/2 andNear:4 andFar:10];
glUniformMatrix4fv(_projectionUniform, 1, 0, projection.glMatrix);
CC3GLMatrix *modelView = [CC3GLMatrix matrix];
[modelView populateFromTranslation:CC3VectorMake(sin(CACurrentMediaTime()), 0, -5)];
_currentRotation += displayLink.duration * 90;
[modelView rotateBy:CC3VectorMake(_currentRotation, _currentRotation, 0)];
glUniformMatrix4fv(_modelViewUniform, 1, 0, modelView.glMatrix);
glViewport(0, 0, self.frame.size.width, self.frame.size.height);
glVertexAttribPointer(_positionSlot, 3, GL_FLOAT, GL_FALSE,
sizeof(Vertex), 0);
glVertexAttribPointer(_colorSlot, 4, GL_FLOAT, GL_FALSE,
sizeof(Vertex), (GLvoid*) (sizeof(float)* 3));
glVertexAttribPointer(_texCoordSlot, 2, GL_FLOAT, GL_FALSE,
sizeof(Vertex), (GLvoid*) (sizeof(float) * 7));
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, _floorTexture);
glUniform1i(_textureUniform, 0);
glDrawElements(GL_TRIANGLE_STRIP, sizeof(Indices)/sizeof(Indices[0]),
GL_UNSIGNED_BYTE, 0);
[_context presentRenderbuffer:GL_RENDERBUFFER];
}
EDIT:
adding the texture mapping function:
- (GLuint)setupTexture:(NSString *)fileName {
// 1
CGImageRef spriteImage = [UIImage imageNamed:fileName].CGImage;
if (!spriteImage) {
NSLog(#"Failed to load image %#", fileName);
exit(1);
}
// 2
size_t width = CGImageGetWidth(spriteImage);
size_t height = CGImageGetHeight(spriteImage);
GLubyte * spriteData = (GLubyte *) calloc(width*height*4, sizeof(GLubyte));
CGContextRef spriteContext = CGBitmapContextCreate(spriteData, width, height, 8, width*4,
CGImageGetColorSpace(spriteImage), kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
// 3
CGContextDrawImage(spriteContext, CGRectMake(0, 0, width, height), spriteImage);
CGContextRelease(spriteContext);
// 4
GLuint texName;
glGenTextures(1, &texName);
glBindTexture(GL_TEXTURE_2D, texName);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, spriteData);
free(spriteData);
return texName;
}
What am i doing wrong over here?
glDepthFunc(GL_NEVER); - at least replace this with GL_LEQUAL.
I am using the following code to render a .h file for Open GL.
However, The eventual result comes in triangles and not the whole thing. Please see the attached image. Can anyone please guide me why this is happening. As I am new to Open GL.
I want to develop an app like - https://itunes.apple.com/us/app/mclaren-p1/id562173543?mt=8
- (void)renderFrameQCAR
{
[self setFramebuffer];
// Clear colour and depth buffers
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Render video background and retrieve tracking state
QCAR::State state = QCAR::Renderer::getInstance().begin();
QCAR::Renderer::getInstance().drawVideoBackground();
//NSLog(#"active trackables: %d", state.getNumActiveTrackables());
if (QCAR::GL_11 & qUtils.QCARFlags) {
glEnable(GL_TEXTURE_2D);
glEnable(GL_LIGHTING);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
}
glEnable(GL_DEPTH_TEST);
// We must detect if background reflection is active and adjust the culling direction.
// If the reflection is active, this means the pose matrix has been reflected as well,
// therefore standard counter clockwise face culling will result in "inside out" models.
glEnable(GL_CULL_FACE);
glCullFace(GL_BACK);
if(QCAR::Renderer::getInstance().getVideoBackgroundConfig().mReflection == QCAR::VIDEO_BACKGROUND_REFLECTION_ON)
glFrontFace(GL_CW); //Front camera
else
glFrontFace(GL_CCW); //Back camera
for (int i = 0; i < state.getNumTrackableResults(); ++i) {
// Get the trackable
const QCAR::TrackableResult* result = state.getTrackableResult(i);
const QCAR::Trackable& trackable = result->getTrackable();
QCAR::Matrix44F modelViewMatrix = QCAR::Tool::convertPose2GLMatrix(result->getPose());
// Choose the texture based on the target name
int targetIndex = 0; // "stones"
if (!strcmp(trackable.getName(), "chips"))
targetIndex = 1;
else if (!strcmp(trackable.getName(), "tarmac"))
targetIndex = 2;
Object3D *obj3D = [objects3D objectAtIndex:targetIndex];
// Render using the appropriate version of OpenGL
if (QCAR::GL_11 & qUtils.QCARFlags) {
// Load the projection matrix
glMatrixMode(GL_PROJECTION);
glLoadMatrixf(qUtils.projectionMatrix.data);
// Load the model-view matrix
glMatrixMode(GL_MODELVIEW);
glLoadMatrixf(modelViewMatrix.data);
glTranslatef(0.0f, 0.0f, -kObjectScale);
glScalef(kObjectScale, kObjectScale, kObjectScale);
// Draw object
glBindTexture(GL_TEXTURE_2D, [obj3D.texture textureID]);
glTexCoordPointer(2, GL_FLOAT, 0, (const GLvoid*)obj3D.texCoords);
glVertexPointer(3, GL_FLOAT, 0, (const GLvoid*)obj3D.vertices);
glVertexPointer(3, GL_FLOAT, 0, MclarenInfoVerts);
glNormalPointer(GL_FLOAT, 0, MclarenInfoNormals);
glTexCoordPointer(2, GL_FLOAT, 0, MclarenInfoTexCoords);
// draw data
glDrawArrays(GL_TRIANGLES, 0, MclarenInfoNumVerts);
}
#ifndef USE_OPENGL1
else {
// OpenGL 2
QCAR::Matrix44F modelViewProjection;
ShaderUtils::translatePoseMatrix(0.0f, 0.0f, kObjectScale, &modelViewMatrix.data[0]);
ShaderUtils::scalePoseMatrix(kObjectScale, kObjectScale, kObjectScale, &modelViewMatrix.data[0]);
ShaderUtils::multiplyMatrix(&qUtils.projectionMatrix.data[0], &modelViewMatrix.data[0], &modelViewProjection.data[0]);
glUseProgram(shaderProgramID);
glVertexAttribPointer(vertexHandle, 3, GL_FLOAT, GL_FALSE, 0, (const GLvoid*)obj3D.vertices);
glVertexAttribPointer(normalHandle, 3, GL_FLOAT, GL_FALSE, 0, (const GLvoid*)obj3D.normals);
glVertexAttribPointer(textureCoordHandle, 2, GL_FLOAT, GL_FALSE, 0, (const GLvoid*)obj3D.texCoords);
glEnableVertexAttribArray(vertexHandle);
glEnableVertexAttribArray(normalHandle);
glEnableVertexAttribArray(textureCoordHandle);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, [obj3D.texture textureID]);
glUniformMatrix4fv(mvpMatrixHandle, 1, GL_FALSE, (const GLfloat*)&modelViewProjection.data[0]);
glUniform1i(texSampler2DHandle, 0 /*GL_TEXTURE0*/);
glVertexPointer(3, GL_FLOAT, 0, MclarenInfoVerts);
glNormalPointer(GL_FLOAT, 0, MclarenInfoNormals);
glTexCoordPointer(2, GL_FLOAT, 0, MclarenInfoTexCoords);
// draw data
glDrawArrays(GL_TRIANGLES, 0, MclarenInfoNumVerts);
ShaderUtils::checkGlError("EAGLView renderFrameQCAR");
}
#endif
}
glDisable(GL_DEPTH_TEST);
glDisable(GL_CULL_FACE);
if (QCAR::GL_11 & qUtils.QCARFlags) {
glDisable(GL_TEXTURE_2D);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_NORMAL_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
}
#ifndef USE_OPENGL1
else {
glDisableVertexAttribArray(vertexHandle);
glDisableVertexAttribArray(normalHandle);
glDisableVertexAttribArray(textureCoordHandle);
}
#endif
QCAR::Renderer::getInstance().end();
[self presentFramebuffer];
}
Ok. I found the issue. It so happened that my designer was giving me a file with quadilateral rendering, where as Vuforia-Qualcomm-Unity etc recognizes just triangulated .obj. For anyone who gets stuck here :) Just let your designer know to render in triangulation and not Quad.
Basically what I want to do is generate my vertex coordinates programmatically instead of storing it in a statically predefined array. Unfortunately I am not able to convert a very simple example to a dynamic array.
Everything works fine if I stick to static arrays:
typedef struct {
GLfloat Position[3];
GLfloat Color[4];
GLfloat TexCoord[2];
float Normal[3];
} Vertex;
Vertex sphereVertices[] = {
{{1, -1, 1}, {1, 0, 0, 1}, {1, 0}, {0, 0, 1}},
{{1, 1, 1}, {0, 1, 0, 1}, {1, 1}, {0, 0, 1}},
{{-1, 1, 1}, {0, 0, 1, 1}, {0, 1}, {0, 0, 1}},
{{-1, -1, 1}, {0, 0, 0, 1}, {0, 0}, {0, 0, 1}}
};
GLubyte sphereIndices [] = {
0, 1, 2,
2, 3, 0
};
...
glGenBuffers(1, &sphereIndexBuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, sphereIndexBuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(sphereIndices), sphereIndices, GL_STATIC_DRAW);
glEnableVertexAttribArray(GLKVertexAttribPosition);
glVertexAttribPointer(GLKVertexAttribPosition, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (const GLvoid *) offsetof(Vertex, Position));
...
glDrawElements(GL_TRIANGLES, 6 * sizeof(GLubyte), GL_UNSIGNED_BYTE, 0);
As soon as I switch my indices to a dynamic array only the first triangle shows up.
GLubyte *sphereIndices;
+(void)initialize {
sphereIndices = malloc(6 * sizeof(GLubyte));
sphereIndices[0] = 0;
sphereIndices[1] = 1;
sphereIndices[2] = 2;
sphereIndices[3] = 2;
sphereIndices[4] = 3;
sphereIndices[5] = 0;
}
Probably this has something to do with pointers. Does anybody know what I am doing wrong?
Thanks!
I was able to solve the problem myself. As I already expected the problem had to do with my missing understanding of pointers.
As far as I found out, there is no way to get the size of a dynamic array. Because of that sizeof(sphereIndices) always returns 4 which is the size of the pointer not the size of the data. That's why glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(sphereIndices), sphereIndices, GL_STATIC_DRAW) only sends 4 indices to openGL instead of 6.
I fixed the problem by introducing an additional variable to keep track of the number of indices.
I am learning opengl es 2.0.
I am trying to apply ortho projection in opengl es 2.0
I draw a sqaure but I actually don't get square shape on the screen.
And I am not sure which part I am missing.
Thank you for your help!
There are some methods in setupRenderingEnv that I did not post. but those methods are for setting up the frames and it works fine.
and m_program is created fine.
Again, thank you for your help.
// my vertex shader
attribute vec4 Position;
attribute vec4 SourceColor;
uniform mat4 Projection;
varying vec4 DestinationColor;
void main(void)
{
DestinationColor = SourceColor;
gl_Position = Projection * Position;
}
// my drawing file
typedef struct
{
float Position[3];
float Color[4];
}Vertex;
const Vertex Vertices[] =
{
{{100, -100, 0}, {1, 0, 0, 1}},
{{100, 100, 0}, {0, 1, 0, 1}},
{{-100, 100, 0}, {0, 0, 1, 1}},
{{-100, -100, 0}, {0, 0, 0, 1}}
};
const GLubyte Indices[] =
{
0, 1, 2,
2, 3, 0
};
- (void)setupRenderingEnv
{
[super setupRenderingEnv];
[self setupVertexBufferObjects];
[self setupRunLoop];
[self applyOrthoWithX:self.frame.size.width andY:self.frame.size.height];
glViewport(0, 0, self.frame.size.width, self.frame.size.height);
}
//-- used for applying ortho in opengl es 2.0
- (void)applyOrthoWithX:(float)maxX andY:(float)maxY
{
float a = 1.0f / maxX;
float b = 1.0f / maxY;
float ortho[16] =
{
a, 0, 0, 0,
0, b, 0, 0,
0, 0, -1, 0,
0, 0, 0, 1
};
GLint projectionUniform = glGetUniformLocation(super.m_programHandle, "Projection");
glUniformMatrix4fv(projectionUniform, 1, 0, &ortho[0]);
}
//-- overriding drawCandle. it render image, draw candle
- (void)drawCandle
{
glClearColor(0, 104.0/255, 55.0/255, 1.0);
glClear(GL_COLOR_BUFFER_BIT);
GLuint positionSlot = glGetAttribLocation(super.m_programHandle, "Position");
GLuint colorSlot = glGetAttribLocation(super.m_programHandle, "SourceColor");
glEnableVertexAttribArray(positionSlot);
glEnableVertexAttribArray(colorSlot);
glVertexAttribPointer(positionSlot, 3, GL_FLOAT, GL_FALSE,
sizeof(Vertex), 0);
glVertexAttribPointer(colorSlot, 4, GL_FLOAT, GL_FALSE,
sizeof(Vertex), (GLvoid *)(sizeof(float) * 3));
glDrawElements(GL_TRIANGLES, sizeof(Indices)/sizeof(Indices[0]), GL_UNSIGNED_BYTE, 0);
glDisableVertexAttribArray(positionSlot);
glDisableVertexAttribArray(colorSlot);
[super drawCandle];
}
What shape is your viewport? If it's not square then that's the problem. The matrix you're creating - scaling by inverse width and inverse height - is going to make the width always be 1 unit wide and the height 1 unit tall. If the width and height aren't the same number of pixels, then squares won't draw square. You need to account for the aspect ratio of your viewport. Off the top of my head, I think it would be something more like this:
float ortho [ 16 ] = {
a / b, 0, 0, 0,
0, b, 0, 0,
0, 0, -1, 0,
0, 0, 0, 1
};
(I might have a/b inverted - can't remember)