fragment shader: texture2D() and texelFetch() - opencv

my programm displays an image loaded with openCV from a webcam with openGL.
The Programm below works generally but I have some questions listed after the code.
main:
#define GLEW_STATIC
#include <GL/glew.h>
#include <GLFW\glfw3.h>
#include <iostream>
#include <fstream> //std::ifstream
#include <algorithm> //std::max()
#include <opencv2\core\core.hpp>
#include <opencv2\highgui\highgui.hpp>
cv::VideoCapture capture0;
cv::VideoCapture capture1;
void captureFromWebcam(cv::Mat &frame, cv::VideoCapture &capture)
{
capture.read(frame);
}
bool initializeCapturing()
{
capture0.open(0);
capture1.open(1);
if(!capture0.isOpened() | !capture1.isOpened())
{
std::cout << "Ein oder mehrere VideoCaptures konnten nicht geöffnet werden" << std::endl;
if(!capture0.isOpened())
capture0.release();
if(!capture1.isOpened())
capture1.release();
return false;
}
return true;
}
void releaseCapturing()
{
capture0.release();
capture1.release();
}
GLuint LoadShaders(const char * vertex_file_path,const char * fragment_file_path){
// Create the shaders
GLuint VertexShaderID = glCreateShader(GL_VERTEX_SHADER);
GLuint FragmentShaderID = glCreateShader(GL_FRAGMENT_SHADER);
// Read the Vertex Shader code from the file
std::string VertexShaderCode;
std::ifstream VertexShaderStream(vertex_file_path, std::ios::in);
if(VertexShaderStream.is_open())
{
std::string Line = "";
while(getline(VertexShaderStream, Line))
VertexShaderCode += "\n" + Line;
VertexShaderStream.close();
}
// Read the Fragment Shader code from the file
std::string FragmentShaderCode;
std::ifstream FragmentShaderStream(fragment_file_path, std::ios::in);
if(FragmentShaderStream.is_open()){
std::string Line = "";
while(std::getline(FragmentShaderStream, Line))
FragmentShaderCode += "\n" + Line;
FragmentShaderStream.close();
}
GLint Result = GL_FALSE;
int InfoLogLength;
// Compile Vertex Shader
printf("Compiling shader : %s\n", vertex_file_path);
char const * VertexSourcePointer = VertexShaderCode.c_str();
glShaderSource(VertexShaderID, 1, &VertexSourcePointer , NULL);
glCompileShader(VertexShaderID);
// Check Vertex Shader
glGetShaderiv(VertexShaderID, GL_COMPILE_STATUS, &Result);
glGetShaderiv(VertexShaderID, GL_INFO_LOG_LENGTH, &InfoLogLength);
std::vector<char> VertexShaderErrorMessage(InfoLogLength);
glGetShaderInfoLog(VertexShaderID, InfoLogLength, NULL, &VertexShaderErrorMessage[0]);
fprintf(stdout, "%s\n", &VertexShaderErrorMessage[0]);
// Compile Fragment Shader
printf("Compiling shader : %s\n", fragment_file_path);
char const * FragmentSourcePointer = FragmentShaderCode.c_str();
glShaderSource(FragmentShaderID, 1, &FragmentSourcePointer , NULL);
glCompileShader(FragmentShaderID);
// Check Fragment Shader
glGetShaderiv(FragmentShaderID, GL_COMPILE_STATUS, &Result);
glGetShaderiv(FragmentShaderID, GL_INFO_LOG_LENGTH, &InfoLogLength);
std::vector<char> FragmentShaderErrorMessage(InfoLogLength);
glGetShaderInfoLog(FragmentShaderID, InfoLogLength, NULL, &FragmentShaderErrorMessage[0]);
fprintf(stdout, "%s\n", &FragmentShaderErrorMessage[0]);
// Link the program
fprintf(stdout, "Linking program\n");
GLuint ProgramID = glCreateProgram();
glAttachShader(ProgramID, VertexShaderID);
glAttachShader(ProgramID, FragmentShaderID);
glLinkProgram(ProgramID);
// Check the program
glGetProgramiv(ProgramID, GL_LINK_STATUS, &Result);
glGetProgramiv(ProgramID, GL_INFO_LOG_LENGTH, &InfoLogLength);
std::vector<char> ProgramErrorMessage( std::max(InfoLogLength, int(1)) );
glGetProgramInfoLog(ProgramID, InfoLogLength, NULL, &ProgramErrorMessage[0]);
fprintf(stdout, "%s\n", &ProgramErrorMessage[0]);
glDeleteShader(VertexShaderID);
glDeleteShader(FragmentShaderID);
return ProgramID;
}
int main ()
{
int w = 640,h=480;
glfwInit();
//configure glfw
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 2);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
GLFWwindow* window = glfwCreateWindow(w, h, "OpenGL", NULL, nullptr); // windowed
glfwMakeContextCurrent(window);
glewExperimental = GL_TRUE;
glewInit();
initializeCapturing();
GLuint VertexArrayID;
glGenVertexArrays(1, &VertexArrayID);
glBindVertexArray(VertexArrayID);
// An array of 3 vectors which represents 3 vertices (singular: vertex -> ein Punkt im dreidimensionalen raum)
static const GLfloat g_vertex_buffer_data[] = {
//x,y,z
-1.0f, -1.0f, 0.0f, //unten links
1.0f, 1.0f, 0.0f, //oben rechts
-1.0f, 1.0f, 0.0f, //oben links
-1.0f, -1.0f, 0.0f, //unten links
1.0f, 1.0f, 0.0f, //oben rechts
1.0f,-1.0f,0.0f //unten rechts
};
static const GLfloat vertex_buffer_coordinates[] ={
0.0f,0.0f,
1.0f,1.0f,
0.0f,1.0f,
0.0f,0.0f,
1.0f,1.0f,
1.0f,0.0f,
};
GLuint coordinateBuffer;
glGenBuffers(1,&coordinateBuffer);
glBindBuffer(GL_ARRAY_BUFFER, coordinateBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertex_buffer_coordinates), vertex_buffer_coordinates, GL_STATIC_DRAW);
// This will identify our vertex buffer
GLuint vertexbuffer;
// Generate 1 buffer, put the resulting identifier in vertexbuffer
glGenBuffers(1, &vertexbuffer);
// The following commands will talk about our 'vertexbuffer' buffer
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
// Give our vertices to OpenGL.
glBufferData(GL_ARRAY_BUFFER, sizeof(g_vertex_buffer_data), g_vertex_buffer_data, GL_STATIC_DRAW);
GLuint shader_programm = LoadShaders("vertex.shader","fragment.shader");
GLuint tex;
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
//was passiert wenn die texture koordinaten außerhalb des bereichs sind?
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
//was passiert wenn die textur gestreckt/gestaucht wird?
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
cv::Mat frame;
captureFromWebcam(frame,capture0);
glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA,frame.size().width,frame.size().height,0,GL_RGB,GL_UNSIGNED_BYTE,frame.data);
glUniform1i(glGetUniformLocation(shader_programm, "myTextureSampler"), 0);
while(!glfwWindowShouldClose(window))
{
glfwPollEvents();
if (glfwGetKey(window, GLFW_KEY_ESCAPE) == GLFW_PRESS)
glfwSetWindowShouldClose(window, GL_TRUE);
// 1rst attribute buffer : vertices
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// 2nd attribute buffer : colors
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, coordinateBuffer);
glVertexAttribPointer(
1, // attribute. No particular reason for 1, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
const GLfloat color[] = {0.0f,0.2f,0.0f,1.0f};
glClearBufferfv(GL_COLOR,0,color);
glUseProgram(shader_programm);
// Draw the triangle !
glDrawArrays(GL_TRIANGLES, 0, 2*3); // Starting from vertex 0; 3 vertices total -> 1 triangle
//glDrawArrays(GL_POINTS,0,1);
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glfwSwapBuffers(window);
}
glDeleteVertexArrays(1,&VertexArrayID);
glDeleteProgram(shader_programm);
glDeleteVertexArrays(1,&VertexArrayID);
releaseCapturing();
glfwTerminate();
return 1;
}
vertex shader:
#version 330 core
layout (location = 0) in vec3 vertexPosition_modelspace; //input vom vertexbuffer
layout (location = 1) in vec2 UVcoord;
out vec2 UV;
void main(void)
{
gl_Position.xyz = vertexPosition_modelspace;
gl_Position.w = 1.0; //Zoomfaktor
UV = UVcoord;
}
Fragment shader:
#version 330 core
in vec2 UV;
out vec4 color;
// Values that stay constant for the whole mesh.
uniform sampler2D myTextureSampler;
void main(void)
{
//color = texture2D(myTextureSampler,UV);
color = texelFetch(myTextureSampler,ivec2(gl_FragCoord.xy),0);
}
The commented line in the fragment shader with texture2D() won't work! It looks like this image. What is wrong? Output
Where are the diffrences between texture2D() and texelFetch() and what is best practice?
The image shown with texelFetch is bluish. Any idea why that happens? (the cv::Mat loaded has no tint)

GLSL texture addresses using normalized coordinates, i.e. values in the range [0, 1] and does perform filtering. texelFetch addresses by absolute pixel index from a specific mipmap level and does not filter.
Judging by your screenshot the texture coordinates you pass to texture are wrong, or wrongly processed; the texelFetch code does not use explicitly specified texture coordinated, but uses the viewport pixel coordinate.
Looking at your glVertexAttribPointer for the texture coordinates call, you tell OpenGL that there are 3 elements per texture coordinate, while the array has only 2. So that's likely your problem.

Related

How to fill the texture within a reactangle and fill (0.0,0.0,0.0) outside reactangle in a destination texture.?

There are 2 textures.
Destination texture - 7201080
source texture - 300300
Help required:
We need to put source texture onto destination texure at a given location (x center, y center, height and width) and fill the remaining destination texture with 0,0.
Challenge we are facing is that source texture is getting enlarged to the size of destination texture.
I tried this to do in it in shader coder as follows.
// Texture Coordinates
static const GLfloat square_vertices[] = {
-1.0f, -1.0f, // bottom left
1.0f, -1.0f, // bottom right
-1.0f, 1.0f, // top left
1.0f, 1.0f, // top right
};
static const GLfloat texture_vertices[] = {
0.0f, 0.0f, // bottom left
1.0f, 0.0f, // bottom right
0.0f, 1.0f, // top left
1.0f, 1.0f, // top right
};
// program
glUseProgram(upsample_program_);
// vertex storage
GLuint vbo[2];
glGenBuffers(2, vbo);
GLuint vao;
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
// vbo 0
glBindBuffer(GL_ARRAY_BUFFER, vbo[0]);
glBufferData(GL_ARRAY_BUFFER, 4 * 2 * sizeof(GLfloat), square_vertices,
GL_STATIC_DRAW);
glEnableVertexAttribArray(ATTRIB_VERTEX);
glVertexAttribPointer(ATTRIB_VERTEX, 2, GL_FLOAT, 0, 0, nullptr);
// vbo 1
glBindBuffer(GL_ARRAY_BUFFER, vbo[1]);
glBufferData(GL_ARRAY_BUFFER, 4 * 2 * sizeof(GLfloat), texture_vertices,
GL_STATIC_DRAW);
glEnableVertexAttribArray(ATTRIB_TEXTURE_POSITION);
glVertexAttribPointer(ATTRIB_TEXTURE_POSITION, 2, GL_FLOAT, 0, 0, nullptr);
// draw
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
// cleanup
glDisableVertexAttribArray(ATTRIB_VERTEX);
glDisableVertexAttribArray(ATTRIB_TEXTURE_POSITION);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
glDeleteVertexArrays(1, &vao);
glDeleteBuffers(2, vbo);
// Frame drawing
output_width=720;
output_height=1080;
// Upsample small mask into output.
GlTexture output_texture = CreateDestinationTexture(
output_width, output_height,GpuBufferFormat::kBGRA32);
{
gpu_helper_.BindFramebuffer(output_texture); // GL_TEXTURE0
glActiveTexture(GL_TEXTURE1);
//small_mask_texture is the texture need to render inside the react.
glBindTexture(GL_TEXTURE_2D, small_mask_texture.id());
GlRender();
glBindTexture(GL_TEXTURE_2D, 0);
glUniform2f(glGetUniformLocation(upsample_program_, "rcenters"),normx_center,normy_center);
glUniform2f(glGetUniformLocation(upsample_program_, "rhw"),normalized_width,normalized_height);
glFlush();
}
Here is my shader code;
#if __VERSION__ < 130
#define in varying
#endif // __VERSION__ < 130
#ifdef GL_ES
#define fragColor gl_FragColor
precision highp float;
#else
#define lowp
#define mediump
#define highp
#define texture2D texture
out vec4 fragColor;
#endif // defined(GL_ES)
in vec2 sample_coordinate;
uniform sampler2D input_data;
uniform vec2 rcenters;
uniform vec2 rhw;
void main() {
vec4 pix = texture2D(input_data, sample_coordinate);
float xcenter = rcenters.s;
float ycenter = rcenters.t;
float rwidth = rhw.s;
float rheight = rhw.t;
if(rwidth != 0.0 && rheight != 0.0){
float cox = sample_coordinate.s;
float coy = sample_coordinate.t;
float xmin = xcenter-(rwidth/2.0);
float ymin = ycenter-(rheight/2.0);
float xmax = xcenter+(rwidth/2.0);
float ymax = ycenter+(rheight/2.0);
if((xmin<cox && xmax>cox) && (ymin<coy && ymax>coy)){
fragColor = pix;
}
else{
fragColor = vec4(0.0,0.0,0.0,1.0);
}
}
else{
fragColor = vec4(0.0,0.0,0.0,1.0);
}
}
With this solution I am getting source texture enlarge equal to destination texture. I understand that I need to do something with texture coordinates in vertex but don't know what.

OpenGL ES Depthbuffer not working

I hope someone can help me, sitting on that problem for long time now. I'm working on a 3D Game with OpenGl-ES. I have a map/world and some Objects and I want to make shadows with shadow mapping.
My problem ist, that the shadow map information does not fit with my information of lightspace while rendering.
I'm not sure if I have problems in my FBO, that's my I give it here with:
- (void)createShadowMap:(GLboolean)c Depth:(GLboolean)d Stencil:(GLboolean)s{
//Get ID
glGetIntegerv(GL_FRAMEBUFFER_BINDING, &defaultFBO);
//Texture
glGenTextures(1, &_colorBuffer);
glBindTexture(GL_TEXTURE_2D, _colorBuffer);
glTexImage2D ( GL_TEXTURE_2D, 0, GL_RGBA,_screenWidth, _screenHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL );
glTexParameterf ( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glTexParameterf ( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameterf ( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf ( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
//SetupFBO
glGenFramebuffers(1, &_frameBuffer);
glGenRenderbuffers(1, &_depthBuffer);
glBindRenderbuffer(GL_RENDERBUFFER, _depthBuffer);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, _screenWidth, _screenHeight);
glBindFramebuffer(GL_FRAMEBUFFER, _frameBuffer);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, _depthBuffer);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, _colorBuffer, 0);
GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (status != GL_FRAMEBUFFER_COMPLETE) {
exit(1);
}
glBindFramebuffer ( GL_FRAMEBUFFER, defaultFBO );
glBindTexture ( GL_TEXTURE_2D, 0 );
}
But I think the problem is in the calculation, I just don't find it. So I have here the shaders for the depth map:
void main()
{
vTexCoord = projectionMatrix * viewMatrix * modelMatrix * position;
gl_Position = projectionMatrix * viewMatrix * modelMatrix * position;
}
Fragment:
void main()
{
highp float depth = gl_FragCoord.z;
highp float linearDepth = (2.0 * 0.1) / (60.0 + 1.0 - depth * (60.0 - 1.0));
lowp vec4 color = vec4(vec3(linearDepth), 1.0);
gl_FragColor = color;
}
And now the shader while rendering:
Vert:
vec4 lightSight = projectionMatrix * lightSpaceMatrix * modelMatrix * position;
projCoords = lightSight;
Frag:
highp vec3 depth = projCoords.xyz/projCoords.w;
depth = (depth +1.0)*0.5;
highp float currentDepth = (projCoords.xyz/projCoords.w).z;
highp float closestDepth = texture2D(shadowMap, depth).r;
lowp float shadow;
if (closestDepth > currentDepth) {
shadow = 0.0;
} else {
shadow = 0.5;
}
lowp vec4 color = vec4(vec3(shadow), 1.0);

OpenGL + CUDA interop - image not displaying in window

Background: I read an image from disk using OpenCV, passed it to the GPU using CUDA, and now, I am trying to get OpenGL to render the image.
I am not using GLUT here because I compile my code and get 32-bit Windows to create a new window, inside which I will render the image. Now, I flipped the OpenCV image and got OpenGL to render the image nicely when I simply passed flipped.data to the glTexImage2D() function. However, the same image is not being rendered when I use CUDA + OpenGL.
My actual images are bigger than the current one. I am using the OpenGL pixel buffer object, and the OpenGL texture to render the image. Utilizing the texture allows me to specify the part of the image I want to display. My grayscale image has dimensions w1024xh256, and it has an 8-bit depth (unsigned char/GL_UNSIGNED_BYTE).
Question: I can't quite figure out what is going wrong in my code. I tried to carefully follow the CUDA C programming guide, and register/map the CUDA resource with the PBO and the texture as well as with the actual input data. Since my input image data comes from OpenCV, I simply copied flipped's data into the device pointer dev_inp. I (correctly?) mapped the dev_inp to the CUDA resource using cudaGraphicsResourceGetMappedPointer() as well. Yet, the window does not display anything, and remains black. There are no viewport changes, and the coordinates that I specify at glBegin().. glEnd() are correct as they properly map flipped's data to the texture.
Am I missing something else here? Am I mapping the CUDA resource incorrectly to the PBO or the device pointer?
OpenGL + CUDA interop portion: This portion is specifically only the CUDA + OpenGL interoperation in my code. The function DrawOpenGLScene() is called from the WindProc() method.
void DrawOpenGLScene()
{
initCUDADevice();
Mat image, flipped;
image = imread("K:/Ultrasound experiment images/PA_160.png", CV_LOAD_IMAGE_GRAYSCALE); // Read the file from disk
if(!image.data) // Check for invalid input
{
cout << "Could not open or find the image" << std::endl ;
}
cv::flip(image, flipped, 0);
imshow("flip", image); // displays output
//cout << "depth: " << flipped.depth() << endl;
// ===================================================================================
// opengl setup
// first, the context was created
// now, clear the window with the rendering context
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
// ====================================================================================
// generate the pixel buffer object (PBO)
// Generate a buffer ID called a PBO (Pixel Buffer Object)
glGenBuffers(1, &pbo);
// Make this the current UNPACK buffer (OpenGL is state-based)
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo);
// Allocate data for the buffer. 4-channel 8-bit image
glBufferData(GL_PIXEL_UNPACK_BUFFER, sizeof(unsigned char) * flipped.rows * flipped.cols, NULL, GL_DYNAMIC_COPY);
//gpuErrchk(cudaGLRegisterBufferObject( pbo ));
gpuErrchk(cudaGraphicsGLRegisterBuffer(&cuda_resource, pbo, cudaGraphicsMapFlagsNone));
// ====================================================================================
// create the texture object
// enable 2D texturing
glEnable(GL_TEXTURE_2D);
// bind the texture
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
//glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
// put flipped.data at the end, and it'll work for normal texturing
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, image.cols, image.rows, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, NULL);
// put tex at the end, and it'll work for normal texturing
glBindTexture(GL_TEXTURE_2D, 0);
// ====================================================================================
// copy data from openCV
unsigned char *dev_inp;
gpuErrchk( cudaMalloc((void**)&dev_inp, sizeof(unsigned char)*flipped.rows*flipped.cols) );
//cudaGLMapBufferObject((void**)dev_inp, pbo);
gpuErrchk( cudaGraphicsMapResources(1, &cuda_resource, 0) );
size_t size; // = sizeof(unsigned char)*flipped.rows*flipped.cols;
gpuErrchk( cudaGraphicsResourceGetMappedPointer((void **)&dev_inp, &size, cuda_resource) );
gpuErrchk( cudaMemcpy(dev_inp, flipped.data, sizeof(unsigned char)*flipped.rows*flipped.cols, cudaMemcpyHostToDevice) );
//cudaGLUnmapBufferObject(pbo);
gpuErrchk( cudaGraphicsUnmapResources(1, &cuda_resource, 0) );
// ====================================================================================
// bind pbo and texture to render data now
glBindBuffer( GL_PIXEL_UNPACK_BUFFER, pbo);
glBindTexture(GL_TEXTURE_2D, tex);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, flipped.cols, flipped.rows, GL_LUMINANCE, GL_UNSIGNED_BYTE, NULL);
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f); glVertex3f(-1.0f, -1.0f, 1.0f); // Bottom Left Of The Texture and Quad
glTexCoord2f(1.0f, 0.0f); glVertex3f( 1.0f, -1.0f, 1.0f); // Bottom Right Of The Texture and Quad
glTexCoord2f(1.0f, 1.0f); glVertex3f( 1.0f, 1.0f, 1.0f); // Top Right Of The Texture and Quad
glTexCoord2f(0.0f, 1.0f); glVertex3f(-1.0f, 1.0f, 1.0f); // Top Left Of The Texture and Quad
glEnd();
glFlush(); // force rendering to happen
//glBindTexture(GL_TEXTURE_2D, 0);
}
Entire code:
LRESULT CALLBACK WndProc(HWND, UINT, WPARAM, LPARAM);
void DrawOpenGLScene(void);
HGLRC SetUpOpenGLContext(HWND hWnd);
GLuint tex;
GLuint pbo;
struct cudaGraphicsResource *cuda_resource;
int WINAPI WinMain (HINSTANCE hInstance, HINSTANCE hPrevInstance,
LPSTR lpszCmdLine, int nCmdShow)
{
static char szClassName[] = "Myclass";
static char szTitle[]="A Simple Win32 API OpenGL Program";
WNDCLASS wc;
MSG msg;
HWND hWnd;
wc.style = CS_HREDRAW | CS_VREDRAW;
wc.lpfnWndProc = (WNDPROC)WndProc;
wc.cbClsExtra = 0;
wc.cbWndExtra = 0;
wc.hInstance = hInstance;
wc.hIcon = NULL;
wc.hCursor = LoadCursor(NULL, IDC_ARROW);
wc.hbrBackground = (HBRUSH)GetStockObject (BLACK_BRUSH);
wc.lpszMenuName = NULL;
wc.lpszClassName = szClassName;
if (!RegisterClass (&wc))
return 0;
hWnd = CreateWindow(szClassName, szTitle,
WS_OVERLAPPEDWINDOW |
// NEED THESE for OpenGL calls to work!
WS_CLIPCHILDREN | WS_CLIPSIBLINGS,
0, 0, 1024, 256,
NULL, NULL, hInstance, NULL);
ShowWindow(hWnd, nCmdShow);
UpdateWindow( hWnd );
while (GetMessage(&msg, NULL, 0, 0))
{
TranslateMessage( &msg );
DispatchMessage( &msg );
}
return(msg.wParam);
}
LRESULT CALLBACK WndProc( HWND hWnd, UINT msg,
WPARAM wParam, LPARAM lParam )
{
HDC hDC;
static HGLRC hRC; // Note this is STATIC!
PAINTSTRUCT ps;
switch (msg)
{
case WM_CREATE:
// Select a pixel format and create a rendering context
hRC = SetUpOpenGLContext(hWnd);
break;
case WM_PAINT:
// Draw the scene
// Get a DC, make RC current & associate it with this DC
hDC = BeginPaint(hWnd, &ps);
wglMakeCurrent(hDC, hRC);
DrawOpenGLScene(); // Draw
// We're done with the RC, so deselect it
wglMakeCurrent(NULL, NULL);
EndPaint(hWnd, &ps);
break;
case WM_DESTROY:
//cudaGLUnregisterBufferObject(pbo);
cudaGraphicsUnregisterResource(cuda_resource);
// Clean up and terminate
wglDeleteContext(hRC);
PostQuitMessage(0);
break;
default:
return DefWindowProc(hWnd, msg, wParam, lParam);
}
return (0);
}
//*******************************************************
// SetUpOpenGL sets the pixel format and a rendering
// context then returns the RC
//*******************************************************
HGLRC SetUpOpenGLContext(HWND hWnd)
{
static PIXELFORMATDESCRIPTOR pfd = {
sizeof (PIXELFORMATDESCRIPTOR), // strcut size
1, // Version number
PFD_DRAW_TO_WINDOW | // Flags, draw to a window,
PFD_SUPPORT_OPENGL, // use OpenGL
PFD_TYPE_RGBA, // RGBA pixel values
24, // 24-bit color
0, 0, 0, // RGB bits & shift sizes.
0, 0, 0, // Don't care about them
0, 0, // No alpha buffer info
0, 0, 0, 0, 0, // No accumulation buffer
32, // 32-bit depth buffer
0, // No stencil buffer
0, // No auxiliary buffers
PFD_MAIN_PLANE, // Layer type
0, // Reserved (must be 0)
0, // No layer mask
0, // No visible mask
0 // No damage mask
};
int nMyPixelFormatID;
HDC hDC;
HGLRC hRC;
hDC = GetDC(hWnd);
nMyPixelFormatID = ChoosePixelFormat(hDC, &pfd);
SetPixelFormat(hDC, nMyPixelFormatID, &pfd);
hRC = wglCreateContext(hDC);
ReleaseDC(hWnd, hDC);
return hRC;
}
//***********************************************************
// initCUDADevice uses CUDA commands to initiate the CUDA
// enabled graphics card. This is prior to resource mapping,
// and rendering.
//***********************************************************
void initCUDADevice() {
gpuErrchk(cudaGLSetGLDevice( cutGetMaxGflopsDeviceId() ));
}
//********************************************************
// DrawOpenGLScene uses OpenGL commands to draw the scene
// This is where we put the OpenGL drawing commands
//********************************************************
void DrawOpenGLScene()
{
initCUDADevice();
Mat image, flipped;
image = imread("K:/Ultrasound experiment images/PA_160.png", CV_LOAD_IMAGE_GRAYSCALE); // Read the file from disk
if(!image.data) // Check for invalid input
{
cout << "Could not open or find the image" << std::endl ;
}
cv::flip(image, flipped, 0);
imshow("flip", image); // displays output
//cout << "depth: " << flipped.depth() << endl;
// ===================================================================================
// opengl setup
// first, the context was created
// now, clear the window with the rendering context
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
// ====================================================================================
// generate the pixel buffer object (PBO)
// Generate a buffer ID called a PBO (Pixel Buffer Object)
glGenBuffers(1, &pbo);
// Make this the current UNPACK buffer (OpenGL is state-based)
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo);
// Allocate data for the buffer. 4-channel 8-bit image
glBufferData(GL_PIXEL_UNPACK_BUFFER, sizeof(unsigned char) * flipped.rows * flipped.cols, NULL, GL_DYNAMIC_COPY);
//gpuErrchk(cudaGLRegisterBufferObject( pbo ));
gpuErrchk(cudaGraphicsGLRegisterBuffer(&cuda_resource, pbo, cudaGraphicsMapFlagsNone));
// ====================================================================================
// create the texture object
// enable 2D texturing
glEnable(GL_TEXTURE_2D);
// bind the texture
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
//glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
// put flipped.data at the end, and it'll work for normal texturing
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, image.cols, image.rows, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, NULL);
// put tex at the end, and it'll work for normal texturing
glBindTexture(GL_TEXTURE_2D, 0);
// ====================================================================================
// copy data from openCV
unsigned char *dev_inp;
gpuErrchk( cudaMalloc((void**)&dev_inp, sizeof(unsigned char)*flipped.rows*flipped.cols) );
//cudaGLMapBufferObject((void**)dev_inp, pbo);
gpuErrchk( cudaGraphicsMapResources(1, &cuda_resource, 0) );
size_t size; // = sizeof(unsigned char)*flipped.rows*flipped.cols;
gpuErrchk( cudaGraphicsResourceGetMappedPointer((void **)&dev_inp, &size, cuda_resource) );
gpuErrchk( cudaMemcpy(dev_inp, flipped.data, sizeof(unsigned char)*flipped.rows*flipped.cols, cudaMemcpyHostToDevice) );
//cudaGLUnmapBufferObject(pbo);
gpuErrchk( cudaGraphicsUnmapResources(1, &cuda_resource, 0) );
// ====================================================================================
// bind pbo and texture to render data now
glBindBuffer( GL_PIXEL_UNPACK_BUFFER, pbo);
glBindTexture(GL_TEXTURE_2D, tex);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, flipped.cols, flipped.rows, GL_LUMINANCE, GL_UNSIGNED_BYTE, NULL);
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f); glVertex3f(-1.0f, -1.0f, 1.0f); // Bottom Left Of The Texture and Quad
glTexCoord2f(1.0f, 0.0f); glVertex3f( 1.0f, -1.0f, 1.0f); // Bottom Right Of The Texture and Quad
glTexCoord2f(1.0f, 1.0f); glVertex3f( 1.0f, 1.0f, 1.0f); // Top Right Of The Texture and Quad
glTexCoord2f(0.0f, 1.0f); glVertex3f(-1.0f, 1.0f, 1.0f); // Top Left Of The Texture and Quad
glEnd();
glFlush(); // force rendering to happen
//glBindTexture(GL_TEXTURE_2D, 0);
}
In case someone else runs into the same problem, this thread can serve to help them.
I solved my problem by changing only a couple of calls in DrawOpenGLScene().
It turns out that cudaGraphicsResourceGetMappedPointer() returns a pointer to and derived from the OpenGL PBO, and places that pointer in dev_inp. It internally allocates size = sizeof(unsigned char) * flipped.rows * flipped.cols memory for the dev_inp based on the previously established calls to glBufferData() and cudaGraphicsGLRegisterBuffer().
Once this is done, the memory that I had previously allocated using cudaMalloc() now ceases to exist because it is overwritten by the call to cudaGraphicsResourceGetMappedPointer() that places the pointer in dev_inp. Removing the cudaMalloc() and cudaFree() allowed the program to run as originally intended.
In order to deallocate the memory, one should deallocate the PBO as OpenGL is the "owner" of the memory, and CUDA just shares access to the memory owned by OpenGL.
The modified DrawOpenGLScene() routine is pasted below:
#define GET_PROC_ADDRESS( str ) wglGetProcAddress( str )
PFNGLBINDBUFFERARBPROC glBindBuffer = NULL;
PFNGLDELETEBUFFERSARBPROC glDeleteBuffers = NULL;
PFNGLGENBUFFERSARBPROC glGenBuffers = NULL;
PFNGLBUFFERDATAARBPROC glBufferData = NULL;
void initCUDADevice() {
gpuErrchk(cudaGLSetGLDevice( cutGetMaxGflopsDeviceId() ));
}
//********************************************************
// DrawOpenGLScene uses OpenGL commands to draw the scene
// This is where we put the OpenGL drawing commands
//********************************************************
void DrawOpenGLScene()
{
// Clear Color and Depth Buffers
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Reset transformations
glLoadIdentity();
// ====================================================================================
// initiate GPU by setting it correctly
initCUDADevice();
// ====================================================================================
// read the image that needs to be textured
Mat image, flipped;
image = imread("K:/OCT experiment images/PA_175.png", CV_LOAD_IMAGE_GRAYSCALE); // Read the file from disk
if(!image.data) // Check for invalid input
{
cout << "Could not open or find the image" << std::endl ;
}
cv::flip(image, flipped, 0);
imshow("OpenCV - image", image); // displays output
// ====================================================================================
// allocate the PBO, texture, and CUDA resource
glBindBuffer = (PFNGLBINDBUFFERARBPROC)GET_PROC_ADDRESS("glBindBuffer");
glDeleteBuffers = (PFNGLDELETEBUFFERSARBPROC)GET_PROC_ADDRESS("glDeleteBuffers");
glGenBuffers = (PFNGLGENBUFFERSARBPROC)GET_PROC_ADDRESS("glGenBuffers");
glBufferData = (PFNGLBUFFERDATAARBPROC)GET_PROC_ADDRESS("glBufferData");
// ====================================================================================
// generate the pixel buffer object (PBO)
// Generate a buffer ID called a PBO (Pixel Buffer Object)
glGenBuffers(1, &pbo);
// Make this the current UNPACK buffer (OpenGL is state-based)
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo);
// Allocate data for the buffer. 4-channel 8-bit image
glBufferData(GL_PIXEL_UNPACK_BUFFER, sizeof(unsigned char) * flipped.rows * flipped.cols, NULL, GL_STREAM_DRAW);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
gpuErrchk(cudaGraphicsGLRegisterBuffer(&cuda_resource, pbo, cudaGraphicsMapFlagsNone));
// ====================================================================================
// create the texture object
// enable 2D texturing
glEnable(GL_TEXTURE_2D);
// generate and bind the texture
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
// put flipped.data at the end for cpu rendering
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, image.cols, image.rows, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, 0 );
// put tex at the end for cpu rendering
glBindTexture(GL_TEXTURE_2D, 0);
// ====================================================================================
// copy OpenCV flipped image data into the device pointer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
unsigned char *dev_inp;
//gpuErrchk( cudaMalloc((void**)&dev_inp, sizeof(unsigned char)*flipped.rows*flipped.cols) );
gpuErrchk( cudaGraphicsMapResources(1, &cuda_resource, 0) );
size_t size;
gpuErrchk( cudaGraphicsResourceGetMappedPointer((void **)&dev_inp, &size, cuda_resource) );
gpuErrchk( cudaMemcpy(dev_inp, flipped.data, sizeof(unsigned char)*flipped.rows*flipped.cols, cudaMemcpyHostToDevice) );
gpuErrchk( cudaGraphicsUnmapResources(1, &cuda_resource, 0) );
// ====================================================================================
// bind pbo and texture to render data now
glBindBuffer( GL_PIXEL_UNPACK_BUFFER, pbo);
//
glBindTexture(GL_TEXTURE_2D, tex);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, flipped.cols, flipped.rows, GL_LUMINANCE, GL_UNSIGNED_BYTE, NULL);
gpuErrchk( cudaGraphicsUnregisterResource(cuda_resource));
gpuErrchk( cudaThreadSynchronize());
//gpuErrchk(cudaFree(dev_inp));
// ====================================================================================
// map the texture coords to the vertex coords
glBegin(GL_QUADS);
// Front Face
glTexCoord2f(0.0f, 0.0f); glVertex3f(-1.0f, -1.0f, 1.0f); // Bottom Left Of The Texture and Quad
glTexCoord2f(1.0f, 0.0f); glVertex3f( 1.0f, -1.0f, 1.0f); // Bottom Right Of The Texture and Quad
glTexCoord2f(1.0f, 1.0f); glVertex3f( 1.0f, 1.0f, 1.0f); // Top Right Of The Texture and Quad
glTexCoord2f(0.0f, 1.0f); glVertex3f(-1.0f, 1.0f, 1.0f); // Top Left Of The Texture and Quad
glEnd();
glFlush(); // force rendering
glDisable(GL_TEXTURE_2D);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
glDeleteBuffers(1, &pbo);

No transparency with simple OpenGL ES2.0 stencils

I am attempting to make a stencil mask in OpenGL. I have been following the model from this source (http://open.gl/depthstencils and more specifically, http://open.gl/content/code/c5_reflection.txt), and as far as I can tell, I have followed the example properly. My code is drawing one square stencil, and then another square on top of it. I expected to see only the parts of the second rotating green square that are covering the same space as the first. What I actually see is the two overlapping squares, one rotating with no transparency. One notable difference from the example is that I am not using a texture. Is that a problem? I figured that this would be a simpler example.
I'm fairly new to ES2.0, so if I'm doing something obviously stupid, please let me know.
Initialization:
GLuint attributes[] = { GLKVertexAttribPosition, GLKVertexAttribColor, GLKVertexAttribTexCoord0 };
const char *attributeNames[] = { "position", "color", "texcoord0" };
// These are all global GLuint variables
// vshSrc and fshSrc are const char* filenames (the files are found properly)
_myProgram = loadProgram(vshSrc, fshSrc, 3, attributes, attributeNames);
_myProgramUniformMVP = glGetUniformLocation(_myProgram, "modelViewProjectionMatrix");
_myProgramUniformTex = glGetUniformLocation(_myProgram, "tex");
_myProgramUniformOverrideColor = glGetUniformLocation(_myProgram, "overrideColor");
The draw loop:
glEnable(GL_DEPTH_TEST);
glUseProgram(_myProgram);
glDisable(GL_BLEND);
glClearColor(1.0, 1.0, 1.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
GLfloat gSquare[20] = { // not using the textures currently
// posX, posY, posZ, texX, texY,
-0.5f, -0.5f, 0, 0.0f, 0.0f,
0.5f, -0.5f, 0, 1.0f, 0.0f,
-0.5f, 0.5f, 0, 0.0f, 1.0f,
0.5f, 0.5f, 0, 1.0f, 1.0f
};
// Projection matrix
float aspect = fabsf(self.view.bounds.size.width / self.view.bounds.size.height);
GLKMatrix4 projectionMatrix = GLKMatrix4MakePerspective(GLKMathDegreesToRadians(65.0f), aspect, 0.1f, 100.0f);
// Put the squares where they can be seen
GLKMatrix4 baseModelViewMatrix = GLKMatrix4MakeTranslation(0.0f, 0.0f, -4.0f);
glEnable(GL_STENCIL_TEST);
// Build the stencil
glStencilFunc(GL_ALWAYS, 1, 0xFF); // Set any stencil to 1
glStencilOp(GL_KEEP, GL_KEEP, GL_REPLACE);
glStencilMask(0xFF); // Write to stencil buffer
glDepthMask(GL_FALSE); // Don't write to depth buffer
glClear(GL_STENCIL_BUFFER_BIT); // Clear stencil buffer (0 by default)
GLKMatrix4 mvp = GLKMatrix4Multiply(projectionMatrix, baseModelViewMatrix);
// Draw a stationary red square for the stencil (though the color shouldn't matter)
glUniformMatrix4fv(_chetProgramUniformMVP, 1, 0, mvp.m);
glUniform1i(_chetProgramUniformTex, 0);
glUniform4f(_chetProgramUniformOverrideColor, 1.0f, 1.0f, 1.0f,0.0f);
glVertexAttrib4f(GLKVertexAttribColor, 1, 0, 0, 1);
glEnableVertexAttribArray(GLKVertexAttribPosition);
glVertexAttribPointer(GLKVertexAttribPosition, 3, GL_FLOAT, GL_FALSE, 20, gSquare);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
// Prepare the mask
glStencilFunc(GL_EQUAL, 1, 0xFF); // Pass test if stencil value is 1
glStencilMask(0x00); // Don't write anything to stencil buffer
glDepthMask(GL_TRUE); // Write to depth buffer
glUniform4f(_myProgramUniformOverrideColor, 0.3f, 0.3f, 0.3f,1.0f);
// A slow rotating green square to be masked by the stencil
static float rotation = 0;
rotation += 0.01;
baseModelViewMatrix = GLKMatrix4Rotate(baseModelViewMatrix, rotation, 0, 0, 1);
mvp = GLKMatrix4Multiply(projectionMatrix, baseModelViewMatrix);
glUniformMatrix4fv(_myProgramUniformMVP, 1, 0, mvp.m);//The transformation matrix
glUniform1i(_myProgramUniformTex, 0); // The texture
glVertexAttrib4f(GLKVertexAttribColor, 0, 1, 0, 1);
glEnableVertexAttribArray(GLKVertexAttribPosition);
glVertexAttribPointer(GLKVertexAttribPosition, 3, GL_FLOAT, GL_FALSE, 20, gSquare);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glDisable(GL_STENCIL_TEST);
EDIT: The following shader stuff is irrelevant to the problem I was having. The stenciling does not take place in the shader.
Vertex Shader:
attribute vec4 position;
attribute vec4 color;
attribute vec2 texcoord0;
varying lowp vec4 colorVarying;
varying lowp vec2 texcoord;
uniform mat4 modelViewProjectionMatrix;
uniform vec4 overrideColor;
void main()
{
colorVarying = overrideColor * color;
texcoord = texcoord0;
gl_Position = modelViewProjectionMatrix * position;
}
Fragment Shader:
varying lowp vec4 colorVarying;
varying lowp vec2 texcoord;
uniform sampler2D tex;
void main()
{
gl_FragColor = colorVarying * texture2D(tex, texcoord);
}
It was necessary to initialize a stencil buffer. Here is the code that fixed it.
glGenRenderbuffersOES(1, &depthStencilRenderbuffer);
glBindRenderbufferOES(GL_RENDERBUFFER_OES, depthStencilRenderbuffer);
glRenderbufferStorageOES(GL_RENDERBUFFER_OES, GL_DEPTH24_STENCIL8_OES, framebufferWidth, framebufferHeight);
glFramebufferRenderbufferOES(GL_FRAMEBUFFER_OES, GL_DEPTH_ATTACHMENT_OES, GL_RENDERBUFFER_OES, depthStencilRenderbuffer);
glFramebufferRenderbufferOES(GL_FRAMEBUFFER_OES, GL_STENCIL_ATTACHMENT_OES, GL_RENDERBUFFER_OES, depthStencilRenderbuffer);

Opengl 3.3 rendering nothing

I'm trying to create my own lib that can simplify code, so I'm trying to write the tutorials that we can found on web using my lib but I'm have some trouble and I don't know why it's rendereing nothing.
so this is my main file
#include "../../lib/OpenGLControl.h"
#include "../../lib/Object.h"
#include <iostream>
using namespace std;
using namespace sgl;
int main(){
OpenGLControl sglControl;
sglControl.initOpenGL("02 - My First Triangle",1024,768,3,3);
GLuint VertexArrayID;
glGenVertexArrays(1, &VertexArrayID);
glBindVertexArray(VertexArrayID);
//trconfigurações do triangulo
vector<glm::vec3> vertices;
vertices.push_back(glm::vec3(-1.0f,-1.0f,0.0f));
vertices.push_back(glm::vec3(1.0f,-1.0f,0.0f));
vertices.push_back(glm::vec3(0.0f,1.0f,0.0f));
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
Object triangle(vertices);
do{
glClear(GL_COLOR_BUFFER_BIT);
triangle.render(GL_TRIANGLES);
glfwSwapBuffers();
}
while( glfwGetKey( GLFW_KEY_ESC ) != GLFW_PRESS &&
glfwGetWindowParam( GLFW_OPENED ) );
glDeleteVertexArrays(1, &VertexArrayID);
glfwTerminate();
return 0;
}
and this is my Object class functions.
#include "../lib/Object.h"
sgl::Object::Object(){
this->hasColor = false;
}
sgl::Object::Object(std::vector<glm::vec3> vertices){
for(int i = 0; i < vertices.size(); i++)
this->vertices.push_back(vertices[i]);
glGenBuffers(1, &vertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, this->vertexBuffer);
glBufferData(GL_ARRAY_BUFFER, this->vertices.size()*sizeof(glm::vec3),&vertices[0], GL_STATIC_DRAW);
}
sgl::Object::~Object(){
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glDeleteBuffers(1,&(this->vertexBuffer));
glDeleteBuffers(1,&(this->colorBuffer));
}
void sgl::Object::render(GLenum mode){
// 1rst attribute buffer : vertices
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, this->vertexBuffer);
glVertexAttribPointer(
0, // The attribute we want to configure
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
cout<<vertices.size()<<endl;
glDrawArrays(mode, 0, vertices.size());
glDisableVertexAttribArray(0);
}
void sgl::Object::setColor(std::vector<glm::vec3> color){
for(int i = 0; i < color.size(); i++)
this->color.push_back(color[i]);
glGenBuffers(1, &(this->colorBuffer));
glBindBuffer(GL_ARRAY_BUFFER, this->colorBuffer);
glBufferData(GL_ARRAY_BUFFER, color.size()*sizeof(glm::vec3),&color[0], GL_STATIC_DRAW);
this->hasColor = true;
}
void sgl::Object::setVertices(std::vector<glm::vec3> vertices){
for(int i = 0; i < vertices.size(); i++)
this->vertices.push_back(vertices[i]);
glGenBuffers(1, &vertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, this->vertexBuffer);
glBufferData(GL_ARRAY_BUFFER, vertices.size()*sizeof(glm::vec3),&vertices[0], GL_STATIC_DRAW);
}
the tutorial that I rewtriting is it:
/*
Copyright 2010 Etay Meiri
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Tutorial 03 - First triangle
*/
#include <stdio.h>
#include <GL/glew.h>
#include <GL/freeglut.h>
#include "math_3d.h"
GLuint VBO;
static void RenderSceneCB()
{
glClear(GL_COLOR_BUFFER_BIT);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glDrawArrays(GL_TRIANGLES, 0, 3);
glDisableVertexAttribArray(0);
glutSwapBuffers();
}
static void InitializeGlutCallbacks()
{
glutDisplayFunc(RenderSceneCB);
}
static void CreateVertexBuffer()
{
Vector3f Vertices[3];
Vertices[0] = Vector3f(-1.0f, -1.0f, 0.0f);
Vertices[1] = Vector3f(1.0f, -1.0f, 0.0f);
Vertices[2] = Vector3f(0.0f, 1.0f, 0.0f);
glGenBuffers(1, &VBO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(Vertices), Vertices, GL_STATIC_DRAW);
}
int main(int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE|GLUT_RGBA);
glutInitWindowSize(1024, 768);
glutInitWindowPosition(100, 100);
glutCreateWindow("Tutorial 03");
InitializeGlutCallbacks();
// Must be done after glut is initialized!
GLenum res = glewInit();
if (res != GLEW_OK) {
fprintf(stderr, "Error: '%s'\n", glewGetErrorString(res));
return 1;
}
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
CreateVertexBuffer();
glutMainLoop();
return 0;
}
if some one can find the error please help me!
I found the problem, in constructor when I call de function glbufferdata I was sending the wrong data, the right way to do it is that:
sgl::Object::Object(std::vector<glm::vec3> vertices){
for(int i = 0; i < vertices.size(); i++)
this->vertices.push_back(vertices[i]);
glGenBuffers(1, &vertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, this->vertexBuffer);
glBufferData(GL_ARRAY_BUFFER, this->vertices.size()*sizeof(glm::vec3),&(this->vertices[0]), GL_STATIC_DRAW);
}

Resources