I have get the h.264 data and decoder them to YUV buffers,and i have display the yup data by opengl on ios5 and ios6,but when I try to run it on my iPad(ios4.2.1),it can not display currently,just all the screen green color.I don't know why,here is my code:
-(void)playVideoData:(void *)data
{
if (!_textureY)
{
glGenTextures(1, &_textureY);
glGenTextures(1, &_textureU);
glGenTextures(1, &_textureV);
}
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, _textureY);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RED_EXT, _videoW, _videoH, 0, GL_RED_EXT, GL_UNSIGNED_BYTE, y);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, _textureU);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RED_EXT, _videoW/2, _videoH/2, 0, GL_RED_EXT, GL_UNSIGNED_BYTE, u);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_2D, _textureV);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RED_EXT, _videoW/2, _videoH/2, 0, GL_RED_EXT, GL_UNSIGNED_BYTE, v);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
[self render];
}
- (void)render
{
glViewport(viewportx,viewporty, VIEWWIDTH, VIEWHEIGHT);
glClearColor(0.0, 0, 0.0, 0);
glClear(GL_COLOR_BUFFER_BIT);
glUseProgram(programId);
// Update uniform value
//glUniform1f(uniforms[UNIFORM_TRANSLATE], 0.0f);
GLuint textureUniformY = glGetUniformLocation(programId, "SamplerY");
GLuint textureUniformU = glGetUniformLocation(programId, "SamplerU");
GLuint textureUniformV = glGetUniformLocation(programId, "SamplerV");
// Update attribute values
glVertexAttribPointer(ARDRONE_ATTRIB_POSITION, 2, GL_FLOAT, 0, 0, squareVertices);
glEnableVertexAttribArray(ARDRONE_ATTRIB_POSITION);
glVertexAttribPointer(ARDRONE_ATTRIB_TEXCOORD, 2, GL_FLOAT, 0, 0, coordVertices);
glEnableVertexAttribArray(ARDRONE_ATTRIB_TEXCOORD);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, _textureY);
glUniform1i(textureUniformY, 0);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, _textureU);
glUniform1i(textureUniformU, 1);
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_2D, _textureV);
glUniform1i(textureUniformV, 2);
}
-(void)drawFrame2
{
if (context != nil)
{
//make it the current context for rendering
[EAGLContext setCurrentContext:context];
//if our framebuffers have not been created yet, do that now!
if (!defaultFramebuffer)
[self createFramebuffer];
glBindFramebuffer(GL_FRAMEBUFFER, defaultFramebuffer);
[self playVideoData];
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glBindRenderbuffer(GL_RENDERBUFFER, colorRenderbuffer);
[context presentRenderbuffer:GL_RENDERBUFFER];
}
else
NSLog(#"Context not set!");
}
I have put the data to the y u v buffer
here is my fsh and vsh:
vsh:
attribute vec4 position; // 1
//uniform float translate;
attribute vec2 TexCoordIn; // New
varying vec2 TexCoordOut; // New
void main(void)
{
gl_Position = position; // 6
TexCoordOut = TexCoordIn;
}
fsh:
varying lowp vec2 TexCoordOut;
uniform sampler2D SamplerY;
uniform sampler2D SamplerU;
uniform sampler2D SamplerV;
void main(void)
{
mediump vec3 yuv;
lowp vec3 rgb;
yuv.x = texture2D(SamplerY, TexCoordOut).r;
yuv.y = texture2D(SamplerU, TexCoordOut).r - 0.5;
yuv.z = texture2D(SamplerV, TexCoordOut).r - 0.5;
rgb = mat3( 1, 1, 1,
0, -0.39465, 2.03211,
1.13983, -0.58060, 0) * yuv;
gl_FragColor = vec4(rgb, 1);
}
iOs4.2 no erros ,the shaders Compile ok, but just can not display...
This is because the GL_RED_EXT extension that you are using to upload your textures was only added in iOS 5.0, and isn't present in iOS 4.2. You won't be able to use that to upload your YUV textures in this manner, so you'll need to rewrite that part of your code. Also, I believe this extension is only supported for iPad 2 and newer devices, not the original iPad and older iPhones, so you won't be able to use it on even iOS 5+ for the older ones.
Related
I have drawing code that first draws the background without a texure, then overlays the texture on top of it. I'm currently using glBindTexture(GL_TEXTURE_2D, 0) in order to un-bind the texture after performing the second lot of drawing.
However, this is generating the following warning when I analyse the GPU performance in Xcode:
When I remove the code to un-bind the texture, the warning disappears, but things aren't drawn correctly. The texture that I'm drawing doesn't have mipmapping enabled, so it's not coming from there.
Drawing Code:
glClear(GL_COLOR_BUFFER_BIT);
{ // Drawing
glViewport(0, 0, frameWidth, frameHeight);
GLuint vertexCount = animBuffer.vertexCounts[animIndex];
glBindBuffer(GL_ARRAY_BUFFER, animBuffer.vertexBuffers[animIndex]);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, animBuffer.indexBuffers[animIndex]);
glVertexAttribPointer(positionSlot, 3, GL_FLOAT, GL_FALSE, sizeof(vertex), 0);
glVertexAttribPointer(colorSlot, 4, GL_FLOAT, GL_FALSE, sizeof(vertex), (GLvoid*)(sizeof(float)*3));
glVertexAttribPointer(textureCoordSlot, 2, GL_FLOAT, GL_FALSE, sizeof(vertex), (GLvoid*)(sizeof(float)*7));
glDrawElements(GL_TRIANGLE_STRIP, vertexCount, GL_UNSIGNED_SHORT, 0);
// glEnable(GL_TEXTURE_2D) (tried this, gives another warning)
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture);
glUniform1i(textureUniform, 0);
glBindBuffer(GL_ARRAY_BUFFER, textureColorBuffer); // Transparent color buffer to allow texture to be overlayed.
glVertexAttribPointer(colorSlot, 4, GL_FLOAT, GL_FALSE, 0, (GLvoid*)(sizeof(float)*3));
glDrawElements(GL_TRIANGLE_STRIP, vertexCount, GL_UNSIGNED_SHORT, 0);
// Unbind texture.... this line is causing the error.
glBindTexture(GL_TEXTURE_2D, 0);
// glDisable(GL_TEXTURE_2D) (tried this, gives another warning)
}
[context presentRenderbuffer:GL_RENDERBUFFER];
Texture Loading Code:
CGImageRef textureImage = image.CGImage;
size_t width = CGImageGetWidth(textureImage);
size_t height = CGImageGetHeight(textureImage);
GLubyte* spriteData = malloc(width*height*4);
CGColorSpaceRef cs = CGImageGetColorSpace(textureImage);
CGContextRef c = CGBitmapContextCreate(spriteData, width, height, 8, width*4, cs, kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGColorSpaceRelease(cs);
CGContextScaleCTM(c, 1, -1);
CGContextTranslateCTM(c, 0, -CGContextGetClipBoundingBox(c).size.height);
CGContextDrawImage(c, (CGRect){CGPointZero, {width, height}}, textureImage);
CGContextRelease(c);
GLuint glTex;
glGenTextures(1, &glTex);
glBindTexture(GL_TEXTURE_2D, glTex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, (GLsizei)width, (GLsizei)height, 0, GL_RGBA, GL_UNSIGNED_BYTE, spriteData);
glBindTexture(GL_TEXTURE_2D, 0);
free(spriteData);
return glTex;
How should I go about un-binding textures in order to remove this warning?
EDIT:
I have tried using glEnable(GL_TEXTURE_2D) & glDisable(GL_TEXTURE_2D) before and after the textured drawing, although this now gives me the following warning:
I think the easiest way to avoid this warning is to keep the texture binded, but just send a uniform to the fragment shader indicating whether to blend the texture or not.
So, before drawing just add:
glUniform1i(drawTexInput, GL_FALSE); // Or GL_TRUE to draw.
And in the fragment shader:
varying lowp vec4 destinationColor;
uniform bool drawTex;
varying lowp vec2 texCoordOut;
uniform sampler2D tex;
void main() {
lowp vec4 result;
if (drawTex) {
lowp vec4 tex2D = texture2D(tex, texCoordOut);
result = tex2D + vec4(1.0 - tex2D.a) * destinationColor; // Alpha blending
} else {
result = destinationColor;
}
gl_FragColor = result;
}
I'm attempting to convert camera frames which are in YUV 4.2.2 (NV12) format to YUV 4.2.0 (YV12/i420) format. The U and V channels of NV12 are interleaved, while the YV12 are not.
For performance reasons, I'm trying to do this conversion in an OpenGl ES 2.0 shader.
The shaders are fairly easy. I separate out the Y and the UV channels and then pass the UV to a shader that separates it out into a U channel in gl_FragData[0] and V channel in gl_FragData[1]
Here is the Fragment Shader:
precision mediump float;
uniform sampler2D t_texture_uv;
varying highp vec2 uvVarying;
void main()
{
/*
vec2 uvu,uvv;
int x,y;
x = uvVarying.x;
y = uvVarying.y;
uvu = vec2(x*2,y);
uvv = vec2((x*2)+1,y);
gl_FragData[0] = texture2D(t_texture_uv, uvu);
gl_FragData[1] = texture2D(t_texture_uv, uvv);
*/
// this is the same as the logic above, but is less processor intensive
gl_FragData[0] = texture2D(t_texture_uv, vec2( uvVarying.x*2 ,uvVarying.y)); // u-plane
gl_FragData[1] = texture2D(t_texture_uv, vec2((uvVarying.x*2)+1,uvVarying.y)); // v-plane
}
And the vertex shader:
attribute vec2 position;
attribute vec2 uv;
uniform mat4 proj_matrix;
varying vec2 uvVarying;
void main()
{
gl_Position = proj_matrix * vec4(vec3(position.xy, 0.0), 1.0);
uvVarying = uv;
}
My problem is that I'm lost at how to retrieve the data from the Gl_FragData buffers back in my Objective C code. I tried glReadPixels, but it doesn't seem to work.
This is my code, but I think I've gone down the wrong path.
- (void) nv12_to_yv12_converter:(unsigned char*)YBuf UVBuff:(unsigned char*)UVBuf Width:(GLsizei)UVWidth Height:(GLsizei)UVHeight {
unsigned char *Ubufout;
unsigned char *Vbufout;
EAGLContext *context;
context = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES2];
if (!context || ![EAGLContext setCurrentContext:context]) {
NSLog(#"Failed to create ES context");
}
[self loadShaders];
glGenFramebuffers(1, &_framebuffer);
glGenRenderbuffers(1, &_renderbuffer);
glBindFramebuffer(GL_FRAMEBUFFER, _framebuffer);
glBindRenderbuffer(GL_RENDERBUFFER, _renderbuffer);
;
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, UVWidth, UVHeight);
// [context renderbufferStorage:GL_RENDERBUFFER fromDrawable:(CAEAGLLayer*)self.view.layer];
glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_WIDTH, &_backingWidth);
glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_HEIGHT, &_backingHeight);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, _renderbuffer);
_vertices[0] = -1.0f; // x0
_vertices[1] = -1.0f; // y0
_vertices[2] = 1.0f; // ..
_vertices[3] = -1.0f;
_vertices[4] = -1.0f;
_vertices[5] = 1.0f;
_vertices[6] = 1.0f; // x3
_vertices[7] = 1.0f; // y3
int orientation = 0;
static const GLfloat texCoords[] = {
0.0f, 1.0f,
1.0f, 1.0f,
0.0f, 0.0f,
1.0f, 0.0f,
};
glViewport(0, 0, _backingWidth, _backingHeight);
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
glUseProgram(_program);
if (!validateProgram(_program))
{
NSLog(#"Failed to validate program");
return;
}
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glDeleteTextures(1, &_textures[0]);
glActiveTexture(GL_TEXTURE0);
glGenTextures(1, &_textures[0]);
glBindTexture(GL_TEXTURE_2D, _textures[0]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glEnable(GL_TEXTURE_2D);
glUniform1i(glGetUniformLocation(_program, "t_texture_uv"), 0);
glTexImage2D(GL_TEXTURE_2D,
0,
GL_LUMINANCE,
16,
8,
0,
GL_LUMINANCE,
GL_UNSIGNED_BYTE,
UVBuf);
_uniformMatrix = glGetUniformLocation(_program, "proj_matrix");
GLfloat modelviewProj[16];
mat4f_LoadOrtho(-1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 1.0f, modelviewProj);
glUniformMatrix4fv(_uniformMatrix, 1, GL_FALSE, modelviewProj);
//draw RECT
glVertexAttribPointer(ATTRIB_VERTEX, 2, GL_FLOAT, 0, 0, _vertices);
glEnableVertexAttribArray(ATTRIB_VERTEX);
//ATTRIB_TEXTUREPOSITON
glVertexAttribPointer(ATTRIB_UV, 2, GL_FLOAT, 0, 0, texCoords);
glEnableVertexAttribArray(ATTRIB_UV);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glReadPixels(0, 0, UVWidth, UVHeight, GL_COLOR_ATTACHMENT0, GL_UNSIGNED_BYTE, &Ubufout);
glReadPixels(0, 0, UVWidth, UVHeight, GL_COLOR_ATTACHMENT0+1, GL_UNSIGNED_BYTE, &Vbufout);
NSLog(#"Complete");
}
Can anyone shed some light on how to retrieve the gl_FragData buffers?
I am working on video calling in PJSIP and i am facing an issue with rendering the video using openGL.
I am able to see the real time video but in green tint color, I presume that it is an issue with openGL texture.
I dont have any knowledge on openGL so i am pasting some openGL code snippets which I use in my application.
Please tell me where I am going wrong, all I need to do is somehow remove the green tint in the video.
Below is the code to the best of my knowledge
//YUV toRGB Vertex Shader
attribute vec4 position;
attribute vec2 textureCoordinate;
varying vec2 coordinate;
void main()
{
gl_Position = position;
coordinate = textureCoordinate.xy;
}
//YUV to RGB Fragment Shader
uniform sampler2D SamplerY;
uniform sampler2D SamplerUV;
varying highp vec2 coordinate;
void main()
{
mediump vec3 yuv;
lowp vec3 rgb;
yuv.x = texture2D(SamplerY, coordinate).r;
yuv.yz = texture2D(SamplerUV, coordinate).rg - vec2(0.5, 0.5);
rgb = mat3(1, 1, 1,
0, -.21482, 2.12798,
1.28033, -.38059, 0) * yuv;
gl_FragColor = vec4(rgb, 1);
}
static void uninitialize_gl_buffers(struct ios_stream *strm)
{
TRACE_((THIS_FILE, "uninitialize_gl_buffers"));
glDeleteFramebuffers(1, &strm->frameBufferHandle);
glDeleteRenderbuffers(1, &strm->colorBufferHandle);
}
static void initialize_gl_textures(struct ios_stream *strm)
{
TRACE_((THIS_FILE, "initialize_gl_textures"));
glGenTextures(1, &strm->lumaTexture);
glGenTextures(1, &strm->chromaUTexture);
glGenTextures(1, &strm->chromaVTexture);
}
static void uninitialize_gl_textures(struct ios_stream *strm)
{
TRACE_((THIS_FILE, "uninitialize_gl_textures"));
if (strm->lumaTexture) {
glDeleteTextures(1, &strm->lumaTexture);
strm->lumaTexture = 0;
}
if (strm->chromaUTexture) {
glDeleteTextures(1, &strm->chromaUTexture);
strm->chromaUTexture = 0;
}
if (strm->chromaVTexture) {
glDeleteTextures(1, &strm->chromaVTexture);
strm->chromaVTexture = 0;
}
}
static void allocate_gl_textures(struct ios_stream *strm,
int frameWidth, int frameHeight)
{
TRACE_((THIS_FILE, "allocate_gl_textures %dx%d", frameWidth, frameHeight));
// Y planes (init)
glActiveTexture(GL_TEXTURE0);
glGenTextures( 1, &strm->lumaTexture );
glBindTexture(GL_TEXTURE_2D, strm->lumaTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
//glEnable(GL_TEXTURE_2D);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, frameWidth, frameHeight, 0,
GL_LUMINANCE, GL_UNSIGNED_BYTE, 0);
// U-planes (init)
glActiveTexture(GL_TEXTURE1);
glGenTextures( 1, &strm->chromaUTexture );
glBindTexture(GL_TEXTURE_2D, strm->chromaUTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, frameWidth/2, frameHeight/2, 0,
GL_LUMINANCE, GL_UNSIGNED_BYTE, 0);
// V-plane (init)
glActiveTexture(GL_TEXTURE2);
glGenTextures( 1, &strm->chromaVTexture );
glBindTexture(GL_TEXTURE_2D, strm->chromaVTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, frameWidth/2, frameHeight/2, 0,
GL_LUMINANCE, GL_UNSIGNED_BYTE, 0);
}
static void update_gl_textures (struct ios_stream *strm)
{
//const pj_uint8_t *buffer = (const pj_uint8_t *)strm->buf;
const pj_uint8_t *buffer = (const pj_uint8_t *)strm->frame->buf;
pj_uint32_t width = strm->size.w;
pj_uint32_t height = strm->size.h;
//TRACE_((THIS_FILE, "update_gl_textures %dx%d (%d)", width, height, buffer));
// Y planes (update)
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, strm->lumaTexture);
glUniform1i(uniformLocations[UNIFORM_Y], 0);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0,
width, // source width
height, // source height
GL_LUMINANCE, GL_UNSIGNED_BYTE,
buffer);
// U-planes (update)
buffer += width*height;
width >>= 1;
height >>=1;
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, strm->chromaUTexture);
glUniform1i(uniformLocations[UNIFORM_U], 1);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0,
width, // source width
height, // source height
GL_LUMINANCE, GL_UNSIGNED_BYTE,
buffer);
// V-planes (update)
buffer += width*height;
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_2D, strm->chromaVTexture);
glUniform1i(uniformLocations[UNIFORM_V], 2);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0,
width, // source width
height, // source height
GL_LUMINANCE, GL_UNSIGNED_BYTE,
buffer);
}
First of all sorry for my broken english, I'n not native and should have worked more at school :S
Those days I'm trying to build an AR system using OpengL ES on iOS.
I retrieve camera frames using AVCaptureSession and sent then both to a tracking algorithm and my OpenGL ES display class (which wait for the rotation and translation from the tracking algorithm). In the meantime I'm reading a video to display on an OpenGL plane placed at the same position as a real plane is the scene (I don't know if I am clear).
My goal is to use only one VBO and one shader to render the whole thing (image taken from camera in the background and plan with a video texture positioned using the rotation and translation given by my tracking algorithm).
I don't know why but nothing is displayed, here is the render code (called each frame) :
glClearColor(0, 0, 0, 0);
glClear(GL_COLOR_BUFFER_BIT);
[self setProjectionMatrix];
glUniformMatrix4fv(_projectionUniform, 1, 0, projection.glMatrix);
//Wait for rotation and translation
#synchronized(self.trackeriOS){
transfo = [trackeriOS getMat];
}
[self dumpMatrix:transfo];
[self fillModelViewMatrix:transfo];
glUniformMatrix4fv(_modelViewUniform, 1, 0, finalModelView);
videoTexture = [self setupTextureFromBuffer:self.buffer];
if (videoTexture == -1) {
NSLog(#"swap");
}
if (cameraVideoTexture == -1) {
NSLog(#"swap");
}
else {
glViewport(0, 0, self.frame.size.width, self.frame.size.height);
glVertexAttribPointer(_positionSlot, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), 0);
glVertexAttribPointer(_texCoordSlot, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*) (sizeof(float) * 3));
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, videoTexture);
glUniform1i(_textureUniform, 0);
glDrawElements(GL_TRIANGLES, sizeof(Indices)/sizeof(Indices[0]), GL_UNSIGNED_BYTE, 0);
glUniformMatrix4fv(_projectionUniform, 1, 0, modelViewIdentity);
glUniformMatrix4fv(_modelViewUniform, 1, 0, modelViewIdentity);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, cameraVideoTexture);
glUniform1i(_cameraVideotextureUniform, 1);
glDrawElements(GL_TRIANGLES, sizeof(Indices)/sizeof(Indices[0]), GL_UNSIGNED_BYTE, 0);
[_context presentRenderbuffer:GL_RENDERBUFFER];
CVBufferRelease(self.buffer);
glDeleteTextures(1, &cameraVideoTexture);
glDeleteTextures(1,&videoTexture);
}
Thank you so much for your help.
Edit 2: This is how I create my OpenGL texture from an CVImageBufferRef given by the AVFoundation delegate function while reading camera video datas:
- (void)processNewCameraFrame:(CVImageBufferRef)cameraFrame;
{
dispatch_async(backgroundImageDisplay, ^(void) {
self.cameraVideoBuffer = cameraFrame;
CVPixelBufferLockBaseAddress(cameraFrame, 0);
int bufferHeight = CVPixelBufferGetHeight(cameraFrame);
int bufferWidth = CVPixelBufferGetWidth(cameraFrame);
glGenTextures(1, &cameraVideoTexture);
glBindTexture(GL_TEXTURE_2D, cameraVideoTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, bufferWidth, bufferHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, CVPixelBufferGetBaseAddress(cameraFrame));
CVPixelBufferUnlockBaseAddress(cameraFrame, 0);
CMTime outputItemTime = [videoOutput itemTimeForHostTime:CACurrentMediaTime()];
if ([[self videoOutput] hasNewPixelBufferForItemTime:outputItemTime2]) {
self.buffer = [[self videoOutput] copyPixelBufferForItemTime:outputItemTime2 itemTimeForDisplay:NULL];
//The above render function
[self render];
}
});
}
Edit : I forgot to say I set my VBO like this :
- (void)setupVBOs {
glGenBuffers(1, &vertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(Vertices), Vertices, GL_STATIC_DRAW);
glGenBuffers(1, &indexBuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexBuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(Indices), Indices, GL_STATIC_DRAW);
NSLog(#"OpenGL View, set up VBO 1");
}
And I set up the entry point to my shader like this :
-(void)setTextureProgramShader {
_positionSlot = glGetAttribLocation(programHandle1, "Position");
glEnableVertexAttribArray(_positionSlot);
_projectionUniform = glGetUniformLocation(programHandle1, "Projection");
_modelViewUniform = glGetUniformLocation(programHandle1, "Modelview");
_texCoordSlot = glGetAttribLocation(programHandle1, "TexCoordIn");
glEnableVertexAttribArray(_texCoordSlot);
_textureUniform = glGetUniformLocation(programHandle1, "Texture");
}
Finally the vertex shader is :
attribute vec4 Position; // 1
uniform mat4 Projection;
uniform mat4 Modelview;
attribute vec2 TexCoordIn;
varying vec2 TexCoordOut;
void main(void) { // 4
gl_Position = Projection * Modelview * Position; // 6
TexCoordOut = TexCoordIn;
}
sorry for my english
I want to display video from a file, where the frames of 4 bytes per pixel, BRGA, 1280x720?
on mac I just took out the frame and drew this glDrawPixels, running on a Mac but in opengl es all differently.
here's the code from the mac
int pos = 0;
NSData *data = [[NSData alloc] initWithContentsOfFile:#"video.raw"];
glViewport(0,0,width,height);
glLoadIdentity();
glOrtho(0, width, 0, height, -1.0, 1.0);
glPixelZoom(1, -1);
glClear(GL_COLOR_BUFFER_BIT);
//glRasterPos2i(0, height);
glRasterPos2i(0, 0);
glDrawPixels(1280, 720, GL_BGRA, GL_UNSIGNED_BYTE, [data bytes]+pos);
glFinish();
Push those data to texture with "glTexSubImage2D" and render the texture. Note though that texture has to be of power of 2 so for your case you can make it (2048, 1024) but you may update only the (1280, 720) part:
CGSize videoSize;
CGSize textureSize;
GLuint dimension = 1;
while (videoSize.width > dimension) {
dimension <<= 1;
}
textureSize = CGSizeMake(dimension, .0f);
dimension = 1;
while (videoSize.height > dimension) {
dimension <<= 1;
}
textureSize = CGSizeMake(textureSize.width, dimension);
GLuint texture;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, textureSize.width, textureSize.height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
GLfloat textureCoordinates[] = {
.0f, .0f,
.0f, videoSize.height/textureSize.height,
videoSize.width/textureSize.width, .0f,
videoSize.width/textureSize.width, videoSize.height/textureSize.height
};
To update the texture:
void *data;
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, videoSize.width, videoSize.height, GL_RGBA, GL_UNSIGNED_BYTE, data);
Then just draw your textured quad.