OpenGL + CUDA interop - image not displaying in window - opencv

Background: I read an image from disk using OpenCV, passed it to the GPU using CUDA, and now, I am trying to get OpenGL to render the image.
I am not using GLUT here because I compile my code and get 32-bit Windows to create a new window, inside which I will render the image. Now, I flipped the OpenCV image and got OpenGL to render the image nicely when I simply passed flipped.data to the glTexImage2D() function. However, the same image is not being rendered when I use CUDA + OpenGL.
My actual images are bigger than the current one. I am using the OpenGL pixel buffer object, and the OpenGL texture to render the image. Utilizing the texture allows me to specify the part of the image I want to display. My grayscale image has dimensions w1024xh256, and it has an 8-bit depth (unsigned char/GL_UNSIGNED_BYTE).
Question: I can't quite figure out what is going wrong in my code. I tried to carefully follow the CUDA C programming guide, and register/map the CUDA resource with the PBO and the texture as well as with the actual input data. Since my input image data comes from OpenCV, I simply copied flipped's data into the device pointer dev_inp. I (correctly?) mapped the dev_inp to the CUDA resource using cudaGraphicsResourceGetMappedPointer() as well. Yet, the window does not display anything, and remains black. There are no viewport changes, and the coordinates that I specify at glBegin().. glEnd() are correct as they properly map flipped's data to the texture.
Am I missing something else here? Am I mapping the CUDA resource incorrectly to the PBO or the device pointer?
OpenGL + CUDA interop portion: This portion is specifically only the CUDA + OpenGL interoperation in my code. The function DrawOpenGLScene() is called from the WindProc() method.
void DrawOpenGLScene()
{
initCUDADevice();
Mat image, flipped;
image = imread("K:/Ultrasound experiment images/PA_160.png", CV_LOAD_IMAGE_GRAYSCALE); // Read the file from disk
if(!image.data) // Check for invalid input
{
cout << "Could not open or find the image" << std::endl ;
}
cv::flip(image, flipped, 0);
imshow("flip", image); // displays output
//cout << "depth: " << flipped.depth() << endl;
// ===================================================================================
// opengl setup
// first, the context was created
// now, clear the window with the rendering context
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
// ====================================================================================
// generate the pixel buffer object (PBO)
// Generate a buffer ID called a PBO (Pixel Buffer Object)
glGenBuffers(1, &pbo);
// Make this the current UNPACK buffer (OpenGL is state-based)
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo);
// Allocate data for the buffer. 4-channel 8-bit image
glBufferData(GL_PIXEL_UNPACK_BUFFER, sizeof(unsigned char) * flipped.rows * flipped.cols, NULL, GL_DYNAMIC_COPY);
//gpuErrchk(cudaGLRegisterBufferObject( pbo ));
gpuErrchk(cudaGraphicsGLRegisterBuffer(&cuda_resource, pbo, cudaGraphicsMapFlagsNone));
// ====================================================================================
// create the texture object
// enable 2D texturing
glEnable(GL_TEXTURE_2D);
// bind the texture
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
//glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
// put flipped.data at the end, and it'll work for normal texturing
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, image.cols, image.rows, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, NULL);
// put tex at the end, and it'll work for normal texturing
glBindTexture(GL_TEXTURE_2D, 0);
// ====================================================================================
// copy data from openCV
unsigned char *dev_inp;
gpuErrchk( cudaMalloc((void**)&dev_inp, sizeof(unsigned char)*flipped.rows*flipped.cols) );
//cudaGLMapBufferObject((void**)dev_inp, pbo);
gpuErrchk( cudaGraphicsMapResources(1, &cuda_resource, 0) );
size_t size; // = sizeof(unsigned char)*flipped.rows*flipped.cols;
gpuErrchk( cudaGraphicsResourceGetMappedPointer((void **)&dev_inp, &size, cuda_resource) );
gpuErrchk( cudaMemcpy(dev_inp, flipped.data, sizeof(unsigned char)*flipped.rows*flipped.cols, cudaMemcpyHostToDevice) );
//cudaGLUnmapBufferObject(pbo);
gpuErrchk( cudaGraphicsUnmapResources(1, &cuda_resource, 0) );
// ====================================================================================
// bind pbo and texture to render data now
glBindBuffer( GL_PIXEL_UNPACK_BUFFER, pbo);
glBindTexture(GL_TEXTURE_2D, tex);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, flipped.cols, flipped.rows, GL_LUMINANCE, GL_UNSIGNED_BYTE, NULL);
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f); glVertex3f(-1.0f, -1.0f, 1.0f); // Bottom Left Of The Texture and Quad
glTexCoord2f(1.0f, 0.0f); glVertex3f( 1.0f, -1.0f, 1.0f); // Bottom Right Of The Texture and Quad
glTexCoord2f(1.0f, 1.0f); glVertex3f( 1.0f, 1.0f, 1.0f); // Top Right Of The Texture and Quad
glTexCoord2f(0.0f, 1.0f); glVertex3f(-1.0f, 1.0f, 1.0f); // Top Left Of The Texture and Quad
glEnd();
glFlush(); // force rendering to happen
//glBindTexture(GL_TEXTURE_2D, 0);
}
Entire code:
LRESULT CALLBACK WndProc(HWND, UINT, WPARAM, LPARAM);
void DrawOpenGLScene(void);
HGLRC SetUpOpenGLContext(HWND hWnd);
GLuint tex;
GLuint pbo;
struct cudaGraphicsResource *cuda_resource;
int WINAPI WinMain (HINSTANCE hInstance, HINSTANCE hPrevInstance,
LPSTR lpszCmdLine, int nCmdShow)
{
static char szClassName[] = "Myclass";
static char szTitle[]="A Simple Win32 API OpenGL Program";
WNDCLASS wc;
MSG msg;
HWND hWnd;
wc.style = CS_HREDRAW | CS_VREDRAW;
wc.lpfnWndProc = (WNDPROC)WndProc;
wc.cbClsExtra = 0;
wc.cbWndExtra = 0;
wc.hInstance = hInstance;
wc.hIcon = NULL;
wc.hCursor = LoadCursor(NULL, IDC_ARROW);
wc.hbrBackground = (HBRUSH)GetStockObject (BLACK_BRUSH);
wc.lpszMenuName = NULL;
wc.lpszClassName = szClassName;
if (!RegisterClass (&wc))
return 0;
hWnd = CreateWindow(szClassName, szTitle,
WS_OVERLAPPEDWINDOW |
// NEED THESE for OpenGL calls to work!
WS_CLIPCHILDREN | WS_CLIPSIBLINGS,
0, 0, 1024, 256,
NULL, NULL, hInstance, NULL);
ShowWindow(hWnd, nCmdShow);
UpdateWindow( hWnd );
while (GetMessage(&msg, NULL, 0, 0))
{
TranslateMessage( &msg );
DispatchMessage( &msg );
}
return(msg.wParam);
}
LRESULT CALLBACK WndProc( HWND hWnd, UINT msg,
WPARAM wParam, LPARAM lParam )
{
HDC hDC;
static HGLRC hRC; // Note this is STATIC!
PAINTSTRUCT ps;
switch (msg)
{
case WM_CREATE:
// Select a pixel format and create a rendering context
hRC = SetUpOpenGLContext(hWnd);
break;
case WM_PAINT:
// Draw the scene
// Get a DC, make RC current & associate it with this DC
hDC = BeginPaint(hWnd, &ps);
wglMakeCurrent(hDC, hRC);
DrawOpenGLScene(); // Draw
// We're done with the RC, so deselect it
wglMakeCurrent(NULL, NULL);
EndPaint(hWnd, &ps);
break;
case WM_DESTROY:
//cudaGLUnregisterBufferObject(pbo);
cudaGraphicsUnregisterResource(cuda_resource);
// Clean up and terminate
wglDeleteContext(hRC);
PostQuitMessage(0);
break;
default:
return DefWindowProc(hWnd, msg, wParam, lParam);
}
return (0);
}
//*******************************************************
// SetUpOpenGL sets the pixel format and a rendering
// context then returns the RC
//*******************************************************
HGLRC SetUpOpenGLContext(HWND hWnd)
{
static PIXELFORMATDESCRIPTOR pfd = {
sizeof (PIXELFORMATDESCRIPTOR), // strcut size
1, // Version number
PFD_DRAW_TO_WINDOW | // Flags, draw to a window,
PFD_SUPPORT_OPENGL, // use OpenGL
PFD_TYPE_RGBA, // RGBA pixel values
24, // 24-bit color
0, 0, 0, // RGB bits & shift sizes.
0, 0, 0, // Don't care about them
0, 0, // No alpha buffer info
0, 0, 0, 0, 0, // No accumulation buffer
32, // 32-bit depth buffer
0, // No stencil buffer
0, // No auxiliary buffers
PFD_MAIN_PLANE, // Layer type
0, // Reserved (must be 0)
0, // No layer mask
0, // No visible mask
0 // No damage mask
};
int nMyPixelFormatID;
HDC hDC;
HGLRC hRC;
hDC = GetDC(hWnd);
nMyPixelFormatID = ChoosePixelFormat(hDC, &pfd);
SetPixelFormat(hDC, nMyPixelFormatID, &pfd);
hRC = wglCreateContext(hDC);
ReleaseDC(hWnd, hDC);
return hRC;
}
//***********************************************************
// initCUDADevice uses CUDA commands to initiate the CUDA
// enabled graphics card. This is prior to resource mapping,
// and rendering.
//***********************************************************
void initCUDADevice() {
gpuErrchk(cudaGLSetGLDevice( cutGetMaxGflopsDeviceId() ));
}
//********************************************************
// DrawOpenGLScene uses OpenGL commands to draw the scene
// This is where we put the OpenGL drawing commands
//********************************************************
void DrawOpenGLScene()
{
initCUDADevice();
Mat image, flipped;
image = imread("K:/Ultrasound experiment images/PA_160.png", CV_LOAD_IMAGE_GRAYSCALE); // Read the file from disk
if(!image.data) // Check for invalid input
{
cout << "Could not open or find the image" << std::endl ;
}
cv::flip(image, flipped, 0);
imshow("flip", image); // displays output
//cout << "depth: " << flipped.depth() << endl;
// ===================================================================================
// opengl setup
// first, the context was created
// now, clear the window with the rendering context
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
// ====================================================================================
// generate the pixel buffer object (PBO)
// Generate a buffer ID called a PBO (Pixel Buffer Object)
glGenBuffers(1, &pbo);
// Make this the current UNPACK buffer (OpenGL is state-based)
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo);
// Allocate data for the buffer. 4-channel 8-bit image
glBufferData(GL_PIXEL_UNPACK_BUFFER, sizeof(unsigned char) * flipped.rows * flipped.cols, NULL, GL_DYNAMIC_COPY);
//gpuErrchk(cudaGLRegisterBufferObject( pbo ));
gpuErrchk(cudaGraphicsGLRegisterBuffer(&cuda_resource, pbo, cudaGraphicsMapFlagsNone));
// ====================================================================================
// create the texture object
// enable 2D texturing
glEnable(GL_TEXTURE_2D);
// bind the texture
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
//glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
// put flipped.data at the end, and it'll work for normal texturing
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, image.cols, image.rows, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, NULL);
// put tex at the end, and it'll work for normal texturing
glBindTexture(GL_TEXTURE_2D, 0);
// ====================================================================================
// copy data from openCV
unsigned char *dev_inp;
gpuErrchk( cudaMalloc((void**)&dev_inp, sizeof(unsigned char)*flipped.rows*flipped.cols) );
//cudaGLMapBufferObject((void**)dev_inp, pbo);
gpuErrchk( cudaGraphicsMapResources(1, &cuda_resource, 0) );
size_t size; // = sizeof(unsigned char)*flipped.rows*flipped.cols;
gpuErrchk( cudaGraphicsResourceGetMappedPointer((void **)&dev_inp, &size, cuda_resource) );
gpuErrchk( cudaMemcpy(dev_inp, flipped.data, sizeof(unsigned char)*flipped.rows*flipped.cols, cudaMemcpyHostToDevice) );
//cudaGLUnmapBufferObject(pbo);
gpuErrchk( cudaGraphicsUnmapResources(1, &cuda_resource, 0) );
// ====================================================================================
// bind pbo and texture to render data now
glBindBuffer( GL_PIXEL_UNPACK_BUFFER, pbo);
glBindTexture(GL_TEXTURE_2D, tex);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, flipped.cols, flipped.rows, GL_LUMINANCE, GL_UNSIGNED_BYTE, NULL);
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f); glVertex3f(-1.0f, -1.0f, 1.0f); // Bottom Left Of The Texture and Quad
glTexCoord2f(1.0f, 0.0f); glVertex3f( 1.0f, -1.0f, 1.0f); // Bottom Right Of The Texture and Quad
glTexCoord2f(1.0f, 1.0f); glVertex3f( 1.0f, 1.0f, 1.0f); // Top Right Of The Texture and Quad
glTexCoord2f(0.0f, 1.0f); glVertex3f(-1.0f, 1.0f, 1.0f); // Top Left Of The Texture and Quad
glEnd();
glFlush(); // force rendering to happen
//glBindTexture(GL_TEXTURE_2D, 0);
}

In case someone else runs into the same problem, this thread can serve to help them.
I solved my problem by changing only a couple of calls in DrawOpenGLScene().
It turns out that cudaGraphicsResourceGetMappedPointer() returns a pointer to and derived from the OpenGL PBO, and places that pointer in dev_inp. It internally allocates size = sizeof(unsigned char) * flipped.rows * flipped.cols memory for the dev_inp based on the previously established calls to glBufferData() and cudaGraphicsGLRegisterBuffer().
Once this is done, the memory that I had previously allocated using cudaMalloc() now ceases to exist because it is overwritten by the call to cudaGraphicsResourceGetMappedPointer() that places the pointer in dev_inp. Removing the cudaMalloc() and cudaFree() allowed the program to run as originally intended.
In order to deallocate the memory, one should deallocate the PBO as OpenGL is the "owner" of the memory, and CUDA just shares access to the memory owned by OpenGL.
The modified DrawOpenGLScene() routine is pasted below:
#define GET_PROC_ADDRESS( str ) wglGetProcAddress( str )
PFNGLBINDBUFFERARBPROC glBindBuffer = NULL;
PFNGLDELETEBUFFERSARBPROC glDeleteBuffers = NULL;
PFNGLGENBUFFERSARBPROC glGenBuffers = NULL;
PFNGLBUFFERDATAARBPROC glBufferData = NULL;
void initCUDADevice() {
gpuErrchk(cudaGLSetGLDevice( cutGetMaxGflopsDeviceId() ));
}
//********************************************************
// DrawOpenGLScene uses OpenGL commands to draw the scene
// This is where we put the OpenGL drawing commands
//********************************************************
void DrawOpenGLScene()
{
// Clear Color and Depth Buffers
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Reset transformations
glLoadIdentity();
// ====================================================================================
// initiate GPU by setting it correctly
initCUDADevice();
// ====================================================================================
// read the image that needs to be textured
Mat image, flipped;
image = imread("K:/OCT experiment images/PA_175.png", CV_LOAD_IMAGE_GRAYSCALE); // Read the file from disk
if(!image.data) // Check for invalid input
{
cout << "Could not open or find the image" << std::endl ;
}
cv::flip(image, flipped, 0);
imshow("OpenCV - image", image); // displays output
// ====================================================================================
// allocate the PBO, texture, and CUDA resource
glBindBuffer = (PFNGLBINDBUFFERARBPROC)GET_PROC_ADDRESS("glBindBuffer");
glDeleteBuffers = (PFNGLDELETEBUFFERSARBPROC)GET_PROC_ADDRESS("glDeleteBuffers");
glGenBuffers = (PFNGLGENBUFFERSARBPROC)GET_PROC_ADDRESS("glGenBuffers");
glBufferData = (PFNGLBUFFERDATAARBPROC)GET_PROC_ADDRESS("glBufferData");
// ====================================================================================
// generate the pixel buffer object (PBO)
// Generate a buffer ID called a PBO (Pixel Buffer Object)
glGenBuffers(1, &pbo);
// Make this the current UNPACK buffer (OpenGL is state-based)
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo);
// Allocate data for the buffer. 4-channel 8-bit image
glBufferData(GL_PIXEL_UNPACK_BUFFER, sizeof(unsigned char) * flipped.rows * flipped.cols, NULL, GL_STREAM_DRAW);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
gpuErrchk(cudaGraphicsGLRegisterBuffer(&cuda_resource, pbo, cudaGraphicsMapFlagsNone));
// ====================================================================================
// create the texture object
// enable 2D texturing
glEnable(GL_TEXTURE_2D);
// generate and bind the texture
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
// put flipped.data at the end for cpu rendering
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, image.cols, image.rows, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, 0 );
// put tex at the end for cpu rendering
glBindTexture(GL_TEXTURE_2D, 0);
// ====================================================================================
// copy OpenCV flipped image data into the device pointer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
unsigned char *dev_inp;
//gpuErrchk( cudaMalloc((void**)&dev_inp, sizeof(unsigned char)*flipped.rows*flipped.cols) );
gpuErrchk( cudaGraphicsMapResources(1, &cuda_resource, 0) );
size_t size;
gpuErrchk( cudaGraphicsResourceGetMappedPointer((void **)&dev_inp, &size, cuda_resource) );
gpuErrchk( cudaMemcpy(dev_inp, flipped.data, sizeof(unsigned char)*flipped.rows*flipped.cols, cudaMemcpyHostToDevice) );
gpuErrchk( cudaGraphicsUnmapResources(1, &cuda_resource, 0) );
// ====================================================================================
// bind pbo and texture to render data now
glBindBuffer( GL_PIXEL_UNPACK_BUFFER, pbo);
//
glBindTexture(GL_TEXTURE_2D, tex);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, flipped.cols, flipped.rows, GL_LUMINANCE, GL_UNSIGNED_BYTE, NULL);
gpuErrchk( cudaGraphicsUnregisterResource(cuda_resource));
gpuErrchk( cudaThreadSynchronize());
//gpuErrchk(cudaFree(dev_inp));
// ====================================================================================
// map the texture coords to the vertex coords
glBegin(GL_QUADS);
// Front Face
glTexCoord2f(0.0f, 0.0f); glVertex3f(-1.0f, -1.0f, 1.0f); // Bottom Left Of The Texture and Quad
glTexCoord2f(1.0f, 0.0f); glVertex3f( 1.0f, -1.0f, 1.0f); // Bottom Right Of The Texture and Quad
glTexCoord2f(1.0f, 1.0f); glVertex3f( 1.0f, 1.0f, 1.0f); // Top Right Of The Texture and Quad
glTexCoord2f(0.0f, 1.0f); glVertex3f(-1.0f, 1.0f, 1.0f); // Top Left Of The Texture and Quad
glEnd();
glFlush(); // force rendering
glDisable(GL_TEXTURE_2D);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
glDeleteBuffers(1, &pbo);

Related

iOS YUV 420v using GL_TEXTURE_2D shows wrong colour in OpenGL shader

Goal: To use GL_TEXTURE_2D instead of CVOpenGLESTextureRef to push the YUV data (format is '420v' kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange) to the shaders (why? Because I need to use glTexSubImage2d to manipulate pixels, and I can't use that with the target being CVOpenGLESTextureGetTarget(<name>), it has no effect. I must use GL_TEXTURE_2D)
Problem:
I am using a custom video compositor to manipulate an AVPlayer video. When I use CVOpenGLESTextureRef like in Apple's AVCustomEdit sample code, which uses 2 separate shaders, one for Y (luma) and one for UV (chroma), video looks normal like this:
But trying to use GL_TEXTURE_2D instead makes video just show green and pink colors like this:
And like this if I use the GL_TEXTURE_2D with the fragment shader that combines both Y and UV textures it looks even worse like this:
My code:
First the track buffer and destination buffer are created:
CVPixelBufferRef foregroundSourceBuffer = [request sourceFrameByTrackID:currentInstruction.foregroundTrackID];
CVPixelBufferRef dstBuffer = [_renderContext newPixelBuffer];
Then they get passed to the render function which contains the following relevant code:
CVOpenGLESTextureRef foregroundLumaTexture = [self lumaTextureForPixelBuffer:foregroundPixelBuffer];
CVOpenGLESTextureRef foregroundChromaTexture = [self chromaTextureForPixelBuffer:foregroundPixelBuffer];
CVOpenGLESTextureRef destLumaTexture = [self lumaTextureForPixelBuffer:destinationPixelBuffer];
CVOpenGLESTextureRef destChromaTexture = [self chromaTextureForPixelBuffer:destinationPixelBuffer];
The luma texture function returns this:
CVOpenGLESTextureRef luma = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault,
_videoTextureCache,
pixelBuffer,
NULL,
GL_TEXTURE_2D,
GL_RED_EXT,
(int)CVPixelBufferGetWidth(pixelBuffer),
(int)CVPixelBufferGetHeight(pixelBuffer),
GL_RED_EXT,
GL_UNSIGNED_BYTE,
0,
&lumaTexture);
The chroma texture function returns this:
CVOpenGLESTextureRef chroma = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault,
_videoTextureCache,
pixelBuffer,
NULL,
GL_TEXTURE_2D,
GL_RG_EXT,
(int)CVPixelBufferGetWidthOfPlane(pixelBuffer, 1),
(int)CVPixelBufferGetHeightOfPlane(pixelBuffer, 1),
GL_RG_EXT,
GL_UNSIGNED_BYTE,
1,
&chromaTexture);
Now the relevant body of the render function:
glBindFramebuffer(GL_FRAMEBUFFER, self.offscreenBufferHandle);
glViewport(0, 0, (int)CVPixelBufferGetWidthOfPlane(destinationPixelBuffer, 0), (int)CVPixelBufferGetHeightOfPlane(destinationPixelBuffer, 0));
#ifdef USE_GL_TEXTURE_2D
int bufferWidth = CVPixelBufferGetWidth(foregroundPixelBuffer);
int bufferHeight = CVPixelBufferGetHeight(foregroundPixelBuffer);
GLuint frameTextureY;
GLuint frameTextureUV;
glGenTextures(1, &frameTextureY);
glGenTextures(1, &frameTextureUV);
if(CVPixelBufferLockBaseAddress(foregroundPixelBuffer, 0) == kCVReturnSuccess){
glBindTexture(GL_TEXTURE_2D, frameTextureY);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, bufferWidth, bufferHeight, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, CVPixelBufferGetBaseAddressOfPlane(foregroundPixelBuffer, 0));
glBindTexture(GL_TEXTURE_2D, frameTextureUV);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE_ALPHA, bufferWidth/2, bufferHeight/2, 0, GL_LUMINANCE_ALPHA, GL_UNSIGNED_BYTE, CVPixelBufferGetBaseAddressOfPlane(foregroundPixelBuffer, 1));
CVPixelBufferUnlockBaseAddress(foregroundPixelBuffer, 0);
}
#endif
glActiveTexture(GL_TEXTURE0);
#ifdef USE_GL_TEXTURE_2D
glUseProgram(self.programYUV_2);
glBindTexture(GL_TEXTURE_2D, frameTextureY);
glUniformMatrix4fv(uniforms[UNIFORM_RENDER_TRANSFORM_YUV_2], 1, GL_FALSE, preferredRenderTransform);
#else
glUseProgram(self.programY);
glBindTexture(CVOpenGLESTextureGetTarget(foregroundLumaTexture), CVOpenGLESTextureGetName(foregroundLumaTexture));
glUniformMatrix4fv(uniforms[UNIFORM_RENDER_TRANSFORM_Y], 1, GL_FALSE, preferredRenderTransform);
#endif
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
// Attach the destination texture as a color attachment to the off screen frame buffer
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, CVOpenGLESTextureGetTarget(destLumaTexture), CVOpenGLESTextureGetName(destLumaTexture), 0);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) {
NSLog(#"Failed to make complete framebuffer object %x", glCheckFramebufferStatus(GL_FRAMEBUFFER));
goto bail;
}
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
#ifdef USE_GL_TEXTURE_2D
glUniform1i(uniforms[UNIFORM_TEXTURE_YUV_2_Y], 0);
glVertexAttribPointer(ATTRIB_VERTEX_Y_UV_INONESHADER, 2, GL_FLOAT, 0, 0, quadVertexData1);
glEnableVertexAttribArray(ATTRIB_VERTEX_Y_UV_INONESHADER);
glVertexAttribPointer(ATTRIB_TEXCOORD_Y_UV_INONESHADER, 2, GL_FLOAT, 0, 0, quadTextureData1);
glEnableVertexAttribArray(ATTRIB_TEXCOORD_Y_UV_INONESHADER);
#else
glUniform1i(uniforms[UNIFORM_TEXTURE_Y], 0);
glVertexAttribPointer(ATTRIB_VERTEX_Y, 2, GL_FLOAT, 0, 0, quadVertexData1);
glEnableVertexAttribArray(ATTRIB_VERTEX_Y);
glVertexAttribPointer(ATTRIB_TEXCOORD_Y, 2, GL_FLOAT, 0, 0, quadTextureData1);
glEnableVertexAttribArray(ATTRIB_TEXCOORD_Y);
#endif
glDrawArrays(GL_TRIANGLE_STRIP, 0, 5);
glActiveTexture(GL_TEXTURE1);
#ifdef USE_GL_TEXTURE_2D
//no need to use different program
glBindTexture(GL_TEXTURE_2D, frameTextureUV);
glUniformMatrix4fv(uniforms[UNIFORM_RENDER_TRANSFORM_YUV_2], 1, GL_FALSE, preferredRenderTransform);
#else
glUseProgram(self.programUV);
glBindTexture(CVOpenGLESTextureGetTarget(foregroundChromaTexture), CVOpenGLESTextureGetName(foregroundChromaTexture));
glUniformMatrix4fv(uniforms[UNIFORM_RENDER_TRANSFORM_UV], 1, GL_FALSE, preferredRenderTransform);
#endif
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glViewport(0, 0, (int)CVPixelBufferGetWidthOfPlane(destinationPixelBuffer, 1), (int)CVPixelBufferGetHeightOfPlane(destinationPixelBuffer, 1));
// Attach the destination texture as a color attachment to the off screen frame buffer
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, CVOpenGLESTextureGetTarget(destChromaTexture), CVOpenGLESTextureGetName(destChromaTexture), 0);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) {
NSLog(#"Failed to make complete framebuffer object %x", glCheckFramebufferStatus(GL_FRAMEBUFFER));
goto bail;
}
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
#ifdef USE_GL_TEXTURE_2D
glUniform1i(uniforms[UNIFORM_TEXTURE_YUV_2_UV], 1);
glVertexAttribPointer(ATTRIB_VERTEX_Y_UV_INONESHADER, 2, GL_FLOAT, 0, 0, quadVertexData1);
glEnableVertexAttribArray(ATTRIB_VERTEX_Y_UV_INONESHADER);
glVertexAttribPointer(ATTRIB_TEXCOORD_Y_UV_INONESHADER, 2, GL_FLOAT, 0, 0, quadTextureData1);
glEnableVertexAttribArray(ATTRIB_TEXCOORD_Y_UV_INONESHADER);#else
glUniform1i(uniforms[UNIFORM_TEXTURE_UV], 1);
glVertexAttribPointer(ATTRIB_VERTEX_UV, 2, GL_FLOAT, 0, 0, quadVertexData1);
glEnableVertexAttribArray(ATTRIB_VERTEX_UV);
glVertexAttribPointer(ATTRIB_TEXCOORD_UV, 2, GL_FLOAT, 0, 0, quadTextureData1);
glEnableVertexAttribArray(ATTRIB_TEXCOORD_UV);
#endif
glDrawArrays(GL_TRIANGLE_STRIP, 0, 5);
glFlush();
bail:
#ifdef USE_GL_TEXTURE_2D
glDeleteTextures(1, &frameTextureY);
glDeleteTextures(1, &frameTextureUV);
#endif
CFRelease(foregroundLumaTexture);
CFRelease(foregroundChromaTexture);
CFRelease(destLumaTexture);
CFRelease(destChromaTexture);
// Periodic texture cache flush every frame
CVOpenGLESTextureCacheFlush(self.videoTextureCache, 0);
Here are my fragment shaders, that I use depending on different test cases (whether I draw the Y and UV separately or together in one):
static const char kFragmentShaderY[] = {
"varying highp vec2 texCoordVarying; \n \
uniform sampler2D s_texture_y; \n \
void main() \n \
{ \n \
gl_FragColor.r = texture2D(s_texture_y, texCoordVarying).r; \n \
}"
};
static const char kFragmentShaderUV[] = {
"varying highp vec2 texCoordVarying; \n \
uniform sampler2D s_texture_uv; \n \
void main() \n \
{ \n \
gl_FragColor.rg = texture2D(s_texture_uv, texCoordVarying).rg; \n \
}"
};
static const char kFragmentShaderYUV_2Textures[] = {
"varying highp vec2 texCoordVarying; \n \
uniform sampler2D s_texture_y; \n \
uniform sampler2D s_texture_uv; \n \
\n \
void main() \n \
{ \n \
mediump vec3 yuv;// = vec3(1.1643 * (texture2D(s_texture_y, texCoordVarying).r - 0.0625), \n \
lowp vec3 rgb; \n \
yuv.x = texture2D(s_texture_y, texCoordVarying).r; \n \
yuv.yz = texture2D(s_texture_uv, texCoordVarying).rg - vec2(0.5, 0.5); \n \
\n \
rgb = mat3( 1, 1, 1, \n \
0, -.21482, 2.12798, \n \
1.28033, -.38059, 0) * yuv; \n \
gl_FragColor = vec4(rgb, 1.0); \n \
}"
};
Using GL_TEXTURE_2D, if I use the fragment shader containing both the Y and UV textures, the video looks like #3 above. If I use the two separate fragment shaders (one for Y, one for UV), the picture is #2 above (ALMOST right but the chroma colors are just greens and pinks) *(mind you I do comment out some of the code above to be able to use the 2 separate fragment shaders, and of course I glBind to the GL_TEXTURE_2D and not the CV, and so on, and so on).
Again, my problem is I need to use GL_TEXTURE_2D instead of CVOpenGLESTextureGetTarget, but it doesn't show the right chroma colour if I do. I wonder what I am doing wrong. Is it something to do with the YUV format being kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange instead of kCVPixelFormatType_420YpCbCr8BiPlanarFullRange perhaps? I have also experimented with the making 3 GL_LUMINANCE textures method as well, and many other permutations with no luck.
It turns out the problem was with using GL_LUMINANCE and GL_LUMINANCE_ALPHA, which are apparently deprecated formats. When I switched them to GL_RED_EXT and GL_RG_EXT, it worked and the chroma colors are finally right. I hope this question and answer will save other people time.

Loading a BMP and Mapping to Square with openGL C++

i am simply trying to load a 2D image with openGL and map it to a square. I have looked at many online fourms and tried many different ways to do this, tried loading many different files to load but each attempt gets a different error. I would prefer not to use an external library.
The image loads incorrectly:
Image trying to load:
#include <SFML/Graphics.hpp>
#include <SFML/OpenGL.hpp>
#include <math.h>
#include <string>
#include <fstream>
#include <sstream>
#include <vector>
#include <iostream>
#include <stdint.h>
using namespace std;
#define size 200
#define other 100
int main()
{
GLuint _vertexBufferID;
GLfloat objects[] = {
other,other,0.0f,
size, other,0.0f,
size,size,0.0f,
other,size,0.0f
};
GLfloat texture[] = {
0.0f,0.0f,
1.0f,0.0f,
1.0f,1.0f,
0.0f,1.0f
};
sf::Window window(sf::VideoMode(1000, 800, 32), "SFML OpenGL");//!< Create the main window
glEnable(GL_DEPTH_TEST);//!< Enable Z-buffer read and write
glDepthMask(GL_TRUE);
glClearColor(1.0f,1.0f,1.0f,1.0f);
glViewport(0.0f,0.0f,1000,800);
glMatrixMode(GL_PROJECTION);
gluOrtho2D(0,1000,0,800);
glMatrixMode(GL_MODELVIEW);
glEnable(GL_TEXTURE_2D);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glVertexPointer(3,GL_FLOAT,0,objects);
glTexCoordPointer(2, GL_FLOAT, 0, texture);
unsigned char* datBuff[2] = {nullptr, nullptr}; // Header buffers
unsigned char* pixels = nullptr; // Pixels
BITMAPFILEHEADER* bmpHeader = nullptr; // Header
BITMAPINFOHEADER* bmpInfo = nullptr; // Info
// The file... We open it with it's constructor
std::ifstream file("other.bmp", std::ios::binary);
// Allocate byte memory that will hold the two headers
datBuff[0] = new unsigned char[sizeof(BITMAPFILEHEADER)];
datBuff[1] = new unsigned char[sizeof(BITMAPINFOHEADER)];
file.read((char*)datBuff[0], sizeof(BITMAPFILEHEADER));
file.read((char*)datBuff[1], sizeof(BITMAPINFOHEADER));
// Construct the values from the buffers
bmpHeader = (BITMAPFILEHEADER*) datBuff[0];
bmpInfo = (BITMAPINFOHEADER*) datBuff[1];
// First allocate pixel memory
pixels = new unsigned char[bmpInfo->biSizeImage];
// Go to where image data starts, then read in image data
file.seekg(bmpHeader->bfOffBits);
file.read((char*)pixels, bmpInfo->biSizeImage);
unsigned char tmpRGB = 0; // Swap buffer
for (unsigned long i = 0; i < bmpInfo->biSizeImage; i += 3)
{
tmpRGB = pixels[i];
pixels[i] = pixels[i + 2];
pixels[i + 2] = tmpRGB;
}
GLuint textureBuf;
glGenTextures(1, &textureBuf); // Generate a texture
glBindTexture(GL_TEXTURE_2D, textureBuf); // Bind that texture temporarily
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,GL_CLAMP);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, bmpInfo->biWidth, bmpInfo->biHeight, 0, GL_RGB, GL_UNSIGNED_BYTE, pixels);
while (window.isOpen())//!< Start game loop
{
sf::Event Event;//!< Process events
while (window.pollEvent(Event))
{ }
window.setActive();
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);//!< Clear colour and depth buffer
glClear(GL_COLOR_BUFFER_BIT);
glDrawArrays(GL_QUADS,0,4);//sizeof(vertexData));
window.display();
}
return EXIT_SUCCESS;
}
To follow up on my comment guess, and assuming you can't just use an off-the-shelf BMP loader for some reason, try dumping your current attempt to load the full headers in favour of:
uint32_t data_offset;
file.seekg(10);
file.read((char *)&data_offset, 4);
uint32_t image_width, image_height;
file.seekg(18);
file.read((char *)&image_width, 4);
file.read((char *)&image_height, 4);
uint32_t image_size;
file.seekg(34);
file.read((char *)&image_size, 4);
file.seekg(data_offset);
file.read((char*)pixels, image_size);
Coupled with the rest of your code this still assumes, amongst other sins:
the machine you're running on is little endian;
the BMP is an uncompressed 24-bit BGR image (per your byte swap later);
the info header is one that either is or is an extension of BITMAPINFOHEADER.
... but if it solves the immediate problem then that would confirm that the issue is merely a false assumption about how your compiler will layout structs in memory.

How to render a textured object to a frambuffer texture, acquire by OpenCL, convert to OpenCV

So I'm trying to combine the usefulness of all 3 libraries, I load an object with a texture:
// Load the model of the store, create a program with the shaders
GLint store = OpGL::initModel(MESH_PATH);
GLuint storeProgram = OpGL::initProgram(VS_GLSL_PATH, FS_GLSL_PATH);
glUseProgram (storeProgram);
// Find the location in the shader, for the texture image
GLuint TEX_ID = glGetUniformLocation(storeProgram, "tex_glsl");
GLuint TEX = OpGL::loadTexture(TEXTURE_IMAGE_PATH, 25);
// Bind texture in Texture Unit 0
glBindTexture(GL_TEXTURE_2D, TEX);
// Set
glUniform1i(TEX_ID, 0); // use texture 0
set up a framebuffer with a texture in GL:
GLuint g_fb = 0; // frame buffer
glGenFramebuffers (1, &g_fb);
glBindFramebuffer(GL_FRAMEBUFFER, g_fb);
GLuint g_fb_tex = 0;
glGenTextures (1, &g_fb_tex);
glBindTexture (GL_TEXTURE_2D, g_fb_tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D ( GL_TEXTURE_2D,0, GL_RGBA,640,480, 0,GL_RGBA,GL_UNSIGNED_BYTE,NULL );
glFramebufferTexture2D (GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, g_fb_tex, 0);
GLuint g_db = 0; // depth buffer
glGenRenderbuffers(1, &g_db);
glBindRenderbuffer(GL_RENDERBUFFER, g_db);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT, 640, 480);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, g_db);
/* tell the framebuffer to expect a colour output attachment*/
GLenum draw_bufs[1] = { GL_COLOR_ATTACHMENT0 };
glDrawBuffers (1, draw_bufs);
create storage in CL:
cl_mem CL_image; //location for the gl rendering to reside in CL
CL_image = clCreateFromGLTexture2D(context, CL_MEM_READ_WRITE, GL_TEXTURE_2D, 0, g_fb_tex, &err);
create a UMat:
cv::UMat Umat;
re-bind the original texture:
glBindTexture(GL_TEXTURE_2D, TEX);
glBindFramebuffer(GL_FRAMEBUFFER, g_fb); // just as a precaution
render:
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glUseProgram (storeProgram);
glBindVertexArray (store);
glDrawArrays (GL_TRIANGLES, 0, 79227);
glfwPollEvents ();
glFlush();
// pass the images to CL
err = clEnqueueAcquireGLObjects(queue, 1, &CL_image, 0, NULL, NULL);
cl_event wait;
cv::ocl::convertFromImage(CL_image, Umat);
cv::flip(Umat,Umat,0);
cv::imshow("CVforCLimage", Umat);
cv::waitKey(1);
err = clEnqueueReleaseGLObjects(queue, 1, &out_toCL_image, 0, 0, 0);
err = clFinish(queue);
Everything renders fine if I just send it to the screen (glBindFramebuffer(GL_FRAMEBUFFER, 0);)... but I get a blue object instead of an orange object when I render it to CV. Almost as though the original texture I loaded is not making it to the rendering.
Thanks for the help!

fragment shader: texture2D() and texelFetch()

my programm displays an image loaded with openCV from a webcam with openGL.
The Programm below works generally but I have some questions listed after the code.
main:
#define GLEW_STATIC
#include <GL/glew.h>
#include <GLFW\glfw3.h>
#include <iostream>
#include <fstream> //std::ifstream
#include <algorithm> //std::max()
#include <opencv2\core\core.hpp>
#include <opencv2\highgui\highgui.hpp>
cv::VideoCapture capture0;
cv::VideoCapture capture1;
void captureFromWebcam(cv::Mat &frame, cv::VideoCapture &capture)
{
capture.read(frame);
}
bool initializeCapturing()
{
capture0.open(0);
capture1.open(1);
if(!capture0.isOpened() | !capture1.isOpened())
{
std::cout << "Ein oder mehrere VideoCaptures konnten nicht geöffnet werden" << std::endl;
if(!capture0.isOpened())
capture0.release();
if(!capture1.isOpened())
capture1.release();
return false;
}
return true;
}
void releaseCapturing()
{
capture0.release();
capture1.release();
}
GLuint LoadShaders(const char * vertex_file_path,const char * fragment_file_path){
// Create the shaders
GLuint VertexShaderID = glCreateShader(GL_VERTEX_SHADER);
GLuint FragmentShaderID = glCreateShader(GL_FRAGMENT_SHADER);
// Read the Vertex Shader code from the file
std::string VertexShaderCode;
std::ifstream VertexShaderStream(vertex_file_path, std::ios::in);
if(VertexShaderStream.is_open())
{
std::string Line = "";
while(getline(VertexShaderStream, Line))
VertexShaderCode += "\n" + Line;
VertexShaderStream.close();
}
// Read the Fragment Shader code from the file
std::string FragmentShaderCode;
std::ifstream FragmentShaderStream(fragment_file_path, std::ios::in);
if(FragmentShaderStream.is_open()){
std::string Line = "";
while(std::getline(FragmentShaderStream, Line))
FragmentShaderCode += "\n" + Line;
FragmentShaderStream.close();
}
GLint Result = GL_FALSE;
int InfoLogLength;
// Compile Vertex Shader
printf("Compiling shader : %s\n", vertex_file_path);
char const * VertexSourcePointer = VertexShaderCode.c_str();
glShaderSource(VertexShaderID, 1, &VertexSourcePointer , NULL);
glCompileShader(VertexShaderID);
// Check Vertex Shader
glGetShaderiv(VertexShaderID, GL_COMPILE_STATUS, &Result);
glGetShaderiv(VertexShaderID, GL_INFO_LOG_LENGTH, &InfoLogLength);
std::vector<char> VertexShaderErrorMessage(InfoLogLength);
glGetShaderInfoLog(VertexShaderID, InfoLogLength, NULL, &VertexShaderErrorMessage[0]);
fprintf(stdout, "%s\n", &VertexShaderErrorMessage[0]);
// Compile Fragment Shader
printf("Compiling shader : %s\n", fragment_file_path);
char const * FragmentSourcePointer = FragmentShaderCode.c_str();
glShaderSource(FragmentShaderID, 1, &FragmentSourcePointer , NULL);
glCompileShader(FragmentShaderID);
// Check Fragment Shader
glGetShaderiv(FragmentShaderID, GL_COMPILE_STATUS, &Result);
glGetShaderiv(FragmentShaderID, GL_INFO_LOG_LENGTH, &InfoLogLength);
std::vector<char> FragmentShaderErrorMessage(InfoLogLength);
glGetShaderInfoLog(FragmentShaderID, InfoLogLength, NULL, &FragmentShaderErrorMessage[0]);
fprintf(stdout, "%s\n", &FragmentShaderErrorMessage[0]);
// Link the program
fprintf(stdout, "Linking program\n");
GLuint ProgramID = glCreateProgram();
glAttachShader(ProgramID, VertexShaderID);
glAttachShader(ProgramID, FragmentShaderID);
glLinkProgram(ProgramID);
// Check the program
glGetProgramiv(ProgramID, GL_LINK_STATUS, &Result);
glGetProgramiv(ProgramID, GL_INFO_LOG_LENGTH, &InfoLogLength);
std::vector<char> ProgramErrorMessage( std::max(InfoLogLength, int(1)) );
glGetProgramInfoLog(ProgramID, InfoLogLength, NULL, &ProgramErrorMessage[0]);
fprintf(stdout, "%s\n", &ProgramErrorMessage[0]);
glDeleteShader(VertexShaderID);
glDeleteShader(FragmentShaderID);
return ProgramID;
}
int main ()
{
int w = 640,h=480;
glfwInit();
//configure glfw
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 2);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
GLFWwindow* window = glfwCreateWindow(w, h, "OpenGL", NULL, nullptr); // windowed
glfwMakeContextCurrent(window);
glewExperimental = GL_TRUE;
glewInit();
initializeCapturing();
GLuint VertexArrayID;
glGenVertexArrays(1, &VertexArrayID);
glBindVertexArray(VertexArrayID);
// An array of 3 vectors which represents 3 vertices (singular: vertex -> ein Punkt im dreidimensionalen raum)
static const GLfloat g_vertex_buffer_data[] = {
//x,y,z
-1.0f, -1.0f, 0.0f, //unten links
1.0f, 1.0f, 0.0f, //oben rechts
-1.0f, 1.0f, 0.0f, //oben links
-1.0f, -1.0f, 0.0f, //unten links
1.0f, 1.0f, 0.0f, //oben rechts
1.0f,-1.0f,0.0f //unten rechts
};
static const GLfloat vertex_buffer_coordinates[] ={
0.0f,0.0f,
1.0f,1.0f,
0.0f,1.0f,
0.0f,0.0f,
1.0f,1.0f,
1.0f,0.0f,
};
GLuint coordinateBuffer;
glGenBuffers(1,&coordinateBuffer);
glBindBuffer(GL_ARRAY_BUFFER, coordinateBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertex_buffer_coordinates), vertex_buffer_coordinates, GL_STATIC_DRAW);
// This will identify our vertex buffer
GLuint vertexbuffer;
// Generate 1 buffer, put the resulting identifier in vertexbuffer
glGenBuffers(1, &vertexbuffer);
// The following commands will talk about our 'vertexbuffer' buffer
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
// Give our vertices to OpenGL.
glBufferData(GL_ARRAY_BUFFER, sizeof(g_vertex_buffer_data), g_vertex_buffer_data, GL_STATIC_DRAW);
GLuint shader_programm = LoadShaders("vertex.shader","fragment.shader");
GLuint tex;
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
//was passiert wenn die texture koordinaten außerhalb des bereichs sind?
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
//was passiert wenn die textur gestreckt/gestaucht wird?
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
cv::Mat frame;
captureFromWebcam(frame,capture0);
glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA,frame.size().width,frame.size().height,0,GL_RGB,GL_UNSIGNED_BYTE,frame.data);
glUniform1i(glGetUniformLocation(shader_programm, "myTextureSampler"), 0);
while(!glfwWindowShouldClose(window))
{
glfwPollEvents();
if (glfwGetKey(window, GLFW_KEY_ESCAPE) == GLFW_PRESS)
glfwSetWindowShouldClose(window, GL_TRUE);
// 1rst attribute buffer : vertices
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// 2nd attribute buffer : colors
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, coordinateBuffer);
glVertexAttribPointer(
1, // attribute. No particular reason for 1, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
const GLfloat color[] = {0.0f,0.2f,0.0f,1.0f};
glClearBufferfv(GL_COLOR,0,color);
glUseProgram(shader_programm);
// Draw the triangle !
glDrawArrays(GL_TRIANGLES, 0, 2*3); // Starting from vertex 0; 3 vertices total -> 1 triangle
//glDrawArrays(GL_POINTS,0,1);
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glfwSwapBuffers(window);
}
glDeleteVertexArrays(1,&VertexArrayID);
glDeleteProgram(shader_programm);
glDeleteVertexArrays(1,&VertexArrayID);
releaseCapturing();
glfwTerminate();
return 1;
}
vertex shader:
#version 330 core
layout (location = 0) in vec3 vertexPosition_modelspace; //input vom vertexbuffer
layout (location = 1) in vec2 UVcoord;
out vec2 UV;
void main(void)
{
gl_Position.xyz = vertexPosition_modelspace;
gl_Position.w = 1.0; //Zoomfaktor
UV = UVcoord;
}
Fragment shader:
#version 330 core
in vec2 UV;
out vec4 color;
// Values that stay constant for the whole mesh.
uniform sampler2D myTextureSampler;
void main(void)
{
//color = texture2D(myTextureSampler,UV);
color = texelFetch(myTextureSampler,ivec2(gl_FragCoord.xy),0);
}
The commented line in the fragment shader with texture2D() won't work! It looks like this image. What is wrong? Output
Where are the diffrences between texture2D() and texelFetch() and what is best practice?
The image shown with texelFetch is bluish. Any idea why that happens? (the cv::Mat loaded has no tint)
GLSL texture addresses using normalized coordinates, i.e. values in the range [0, 1] and does perform filtering. texelFetch addresses by absolute pixel index from a specific mipmap level and does not filter.
Judging by your screenshot the texture coordinates you pass to texture are wrong, or wrongly processed; the texelFetch code does not use explicitly specified texture coordinated, but uses the viewport pixel coordinate.
Looking at your glVertexAttribPointer for the texture coordinates call, you tell OpenGL that there are 3 elements per texture coordinate, while the array has only 2. So that's likely your problem.

a lot of GREEN Color at YUV420p --> RGB in OpenGL 2.0 Shader on iOS

I want to make a movie player for iOS using ffmpeg and OpenGL ES 2.0
but I have some problem. Output RGB image has a lot of GREEN color.
This is code and images
480x320 width & height:
512x512 Texture width & height
I got a YUV420p row data from ffmpeg AVFrame.
for (int i = 0, nDataLen = 0; i < 3; i++) {
int nShift = (i == 0) ? 0 : 1;
uint8_t *pYUVData = (uint8_t *)_frame->data[i];
for (int j = 0; j < (mHeight >> nShift); j++) {
memcpy(&pData->pOutBuffer[nDataLen], pYUVData, (mWidth >> nShift));
pYUVData += _frame->linesize[i];
nDataLen += (mWidth >> nShift);
}
}
and prepare texture for Y, U & V channel.
//: U Texture
if (sampler1Texture) glDeleteTextures(1, &sampler1Texture);
glActiveTexture(GL_TEXTURE1);
glGenTextures(1, &sampler1Texture);
glBindTexture(GL_TEXTURE_2D, sampler1Texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
// This is necessary for non-power-of-two textures
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glEnable(GL_TEXTURE_2D);
glTexImage2D(GL_TEXTURE_2D,
0,
GL_LUMINANCE,
texW / 2,
texH / 2,
0,
GL_LUMINANCE,
GL_UNSIGNED_BYTE,
NULL);
//: V Texture
if (sampler2Texture) glDeleteTextures(1, &sampler2Texture);
glActiveTexture(GL_TEXTURE2);
glGenTextures(1, &sampler2Texture);
glBindTexture(GL_TEXTURE_2D, sampler2Texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
// This is necessary for non-power-of-two textures
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glEnable(GL_TEXTURE_2D);
glTexImage2D(GL_TEXTURE_2D,
0,
GL_LUMINANCE,
texW / 2,
texH / 2,
0,
GL_LUMINANCE,
GL_UNSIGNED_BYTE,
NULL);
//: Y Texture
if (sampler0Texture) glDeleteTextures(1, &sampler0Texture);
glActiveTexture(GL_TEXTURE0);
glGenTextures(1, &sampler0Texture);
glBindTexture(GL_TEXTURE_2D, sampler0Texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
// This is necessary for non-power-of-two textures
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glEnable(GL_TEXTURE_2D);
glTexImage2D(GL_TEXTURE_2D,
0,
GL_LUMINANCE,
texW,
texH,
0,
GL_LUMINANCE,
GL_UNSIGNED_BYTE,
NULL);
Rendering part is below.
int _idxU = mFrameW * mFrameH;
int _idxV = _idxU + (_idxU / 4);
// U data
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, sampler1Texture);
glUniform1i(sampler1Uniform, 1);
glTexSubImage2D(
GL_TEXTURE_2D,
0,
0,
0,
mFrameW / 2, // source width
mFrameH / 2, // source height
GL_LUMINANCE,
GL_UNSIGNED_BYTE,
&_frameData[_idxU]);
// V data
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_2D, sampler2Texture);
glUniform1i(sampler2Texture, 2);
glTexSubImage2D(
GL_TEXTURE_2D,
0,
0,
0,
mFrameW / 2, // source width
mFrameH / 2, // source height
GL_LUMINANCE,
GL_UNSIGNED_BYTE,
&_frameData[_idxV]);
// Y data
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, sampler0Texture);
glUniform1i(sampler0Uniform, 0);
glTexSubImage2D(
GL_TEXTURE_2D,
0,
0,
0,
mFrameW, // source width
mFrameH, // source height
GL_LUMINANCE,
GL_UNSIGNED_BYTE,
_frameData);
Vertex Shader & Fragment Shader is below.
attribute vec4 Position;
attribute vec2 TexCoordIn;
varying vec2 TexCoordOut;
varying vec2 TexCoordOut_UV;
uniform mat4 Projection;
uniform mat4 Modelview;
void main()
{
gl_Position = Projection * Modelview * Position;
TexCoordOut = TexCoordIn;
}
uniform sampler2D sampler0; // Y Texture Sampler
uniform sampler2D sampler1; // U Texture Sampler
uniform sampler2D sampler2; // V Texture Sampler
varying highp vec2 TexCoordOut;
void main()
{
highp float y = texture2D(sampler0, TexCoordOut).r;
highp float u = texture2D(sampler2, TexCoordOut).r - 0.5;
highp float v = texture2D(sampler1, TexCoordOut).r - 0.5;
//y = 0.0;
//u = 0.0;
//v = 0.0;
highp float r = y + 1.13983 * v;
highp float g = y - 0.39465 * u - 0.58060 * v;
highp float b = y + 2.03211 * u;
gl_FragColor = vec4(r, g, b, 1.0);
}
Y Texture (Grayscale) is correct but U & V has a lot of Green Color.
So final RGB image (Y+U+V) has a lot of GREEN Color.
What's the problem?
Please help.
thanks.
Change u and v uniforms (vice versa) and you will have correct result.
So pixel shader (stays the same):
uniform sampler2D sampler0; // Y Texture Sampler
uniform sampler2D sampler1; // U Texture Sampler
uniform sampler2D sampler2; // V Texture Sampler
varying highp vec2 TexCoordOut;
void main()
{
highp float y = texture2D(sampler0, TexCoordOut).r;
highp float u = texture2D(sampler2, TexCoordOut).r - 0.5;
highp float v = texture2D(sampler1, TexCoordOut).r - 0.5;
highp float r = y + 1.13983 * v;
highp float g = y - 0.39465 * u - 0.58060 * v;
highp float b = y + 2.03211 * u;
gl_FragColor = vec4(r, g, b, 1.0);
}
and rendering code:
// RENDERING
int _idxU = mFrameW * mFrameH;
int _idxV = _idxU + (_idxU / 4);
// U data
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, sampler1Texture);
GLint sampler1Uniform = glGetUniformLocation(programStandard, "sampler2");
glUniform1i(sampler1Uniform, 1);
glTexSubImage2D(
GL_TEXTURE_2D,
0,
0,
0,
mFrameW / 2, // source width
mFrameH / 2, // source height
GL_LUMINANCE,
GL_UNSIGNED_BYTE,
&_frameData[_idxU]);
// V data
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_2D, sampler2Texture);
GLint sampler2Uniform = glGetUniformLocation(programStandard, "sampler1");
glUniform1i(sampler2Uniform, 2);
glTexSubImage2D(
GL_TEXTURE_2D,
0,
0,
0,
mFrameW / 2, // source width
mFrameH / 2, // source height
GL_LUMINANCE,
GL_UNSIGNED_BYTE,
&_frameData[_idxV]);
// Y data
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, sampler0Texture);
GLint sampler0Uniform = glGetUniformLocation(programStandard, "sampler0");
glUniform1i(sampler0Uniform, 0);
glTexSubImage2D(
GL_TEXTURE_2D,
0,
0,
0,
mFrameW, // source width
mFrameH, // source height
GL_LUMINANCE,
GL_UNSIGNED_BYTE,
_frameData);
//draw RECT
glVertexAttribPointer(ATTRIB_VERTEX, 3, GL_FLOAT, 0, 0, squareVertices);
glEnableVertexAttribArray(ATTRIB_VERTEX);
//ATTRIB_TEXTUREPOSITON
glVertexAttribPointer(ATTRIB_TEXTUREPOSITON, 2, GL_FLOAT, 0, 0, textureCoords);
glEnableVertexAttribArray(ATTRIB_TEXTUREPOSITON);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
free(_frameData);
[(EAGLView *)self.view presentFramebuffer];
Conclusion: u <-> v uniforms.
Since iOS supports rgb_422 textures, Instead of using three luminance texture use one rgb_422 texture. http://www.opengl.org/registry/specs/APPLE/rgb_422.txt.
EDIT:
Whoops YUV480p is different than YUV422. In this case you must convert the YUV Data to an RGB data before uploading as a texture due to its odd layout.

Resources