Loading a BMP and Mapping to Square with openGL C++ - texture-mapping

i am simply trying to load a 2D image with openGL and map it to a square. I have looked at many online fourms and tried many different ways to do this, tried loading many different files to load but each attempt gets a different error. I would prefer not to use an external library.
The image loads incorrectly:
Image trying to load:
#include <SFML/Graphics.hpp>
#include <SFML/OpenGL.hpp>
#include <math.h>
#include <string>
#include <fstream>
#include <sstream>
#include <vector>
#include <iostream>
#include <stdint.h>
using namespace std;
#define size 200
#define other 100
int main()
{
GLuint _vertexBufferID;
GLfloat objects[] = {
other,other,0.0f,
size, other,0.0f,
size,size,0.0f,
other,size,0.0f
};
GLfloat texture[] = {
0.0f,0.0f,
1.0f,0.0f,
1.0f,1.0f,
0.0f,1.0f
};
sf::Window window(sf::VideoMode(1000, 800, 32), "SFML OpenGL");//!< Create the main window
glEnable(GL_DEPTH_TEST);//!< Enable Z-buffer read and write
glDepthMask(GL_TRUE);
glClearColor(1.0f,1.0f,1.0f,1.0f);
glViewport(0.0f,0.0f,1000,800);
glMatrixMode(GL_PROJECTION);
gluOrtho2D(0,1000,0,800);
glMatrixMode(GL_MODELVIEW);
glEnable(GL_TEXTURE_2D);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glVertexPointer(3,GL_FLOAT,0,objects);
glTexCoordPointer(2, GL_FLOAT, 0, texture);
unsigned char* datBuff[2] = {nullptr, nullptr}; // Header buffers
unsigned char* pixels = nullptr; // Pixels
BITMAPFILEHEADER* bmpHeader = nullptr; // Header
BITMAPINFOHEADER* bmpInfo = nullptr; // Info
// The file... We open it with it's constructor
std::ifstream file("other.bmp", std::ios::binary);
// Allocate byte memory that will hold the two headers
datBuff[0] = new unsigned char[sizeof(BITMAPFILEHEADER)];
datBuff[1] = new unsigned char[sizeof(BITMAPINFOHEADER)];
file.read((char*)datBuff[0], sizeof(BITMAPFILEHEADER));
file.read((char*)datBuff[1], sizeof(BITMAPINFOHEADER));
// Construct the values from the buffers
bmpHeader = (BITMAPFILEHEADER*) datBuff[0];
bmpInfo = (BITMAPINFOHEADER*) datBuff[1];
// First allocate pixel memory
pixels = new unsigned char[bmpInfo->biSizeImage];
// Go to where image data starts, then read in image data
file.seekg(bmpHeader->bfOffBits);
file.read((char*)pixels, bmpInfo->biSizeImage);
unsigned char tmpRGB = 0; // Swap buffer
for (unsigned long i = 0; i < bmpInfo->biSizeImage; i += 3)
{
tmpRGB = pixels[i];
pixels[i] = pixels[i + 2];
pixels[i + 2] = tmpRGB;
}
GLuint textureBuf;
glGenTextures(1, &textureBuf); // Generate a texture
glBindTexture(GL_TEXTURE_2D, textureBuf); // Bind that texture temporarily
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,GL_CLAMP);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, bmpInfo->biWidth, bmpInfo->biHeight, 0, GL_RGB, GL_UNSIGNED_BYTE, pixels);
while (window.isOpen())//!< Start game loop
{
sf::Event Event;//!< Process events
while (window.pollEvent(Event))
{ }
window.setActive();
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);//!< Clear colour and depth buffer
glClear(GL_COLOR_BUFFER_BIT);
glDrawArrays(GL_QUADS,0,4);//sizeof(vertexData));
window.display();
}
return EXIT_SUCCESS;
}

To follow up on my comment guess, and assuming you can't just use an off-the-shelf BMP loader for some reason, try dumping your current attempt to load the full headers in favour of:
uint32_t data_offset;
file.seekg(10);
file.read((char *)&data_offset, 4);
uint32_t image_width, image_height;
file.seekg(18);
file.read((char *)&image_width, 4);
file.read((char *)&image_height, 4);
uint32_t image_size;
file.seekg(34);
file.read((char *)&image_size, 4);
file.seekg(data_offset);
file.read((char*)pixels, image_size);
Coupled with the rest of your code this still assumes, amongst other sins:
the machine you're running on is little endian;
the BMP is an uncompressed 24-bit BGR image (per your byte swap later);
the info header is one that either is or is an extension of BITMAPINFOHEADER.
... but if it solves the immediate problem then that would confirm that the issue is merely a false assumption about how your compiler will layout structs in memory.

Related

Can opencv draw a float-coordinate rect in cv::Mat?

I am tring to use cv::rectangle() to draw a rect in cv::Mat, but could I draw a rect whose four points coordinate value with a float percision ? (like what Qt do in QPainter).
Try Blend2d library it fast and integates easyly with OpenCV:
Simple example:
#include <blend2d.h>
#include "opencv2/opencv.hpp"
int main(int argc, char* argv[])
{
BLImage img(480, 480, BL_FORMAT_PRGB32);
BLContext ctx(img);
// Read an image from file.
cv::Mat I = cv::imread("F:/ImagesForTest/lena.jpg");
cv::cvtColor(I, I, cv::COLOR_RGB2RGBA);
BLImage texture;
//BLResult err = texture.readFromFile("texture.jpeg");
texture.create(512, 512, BL_FORMAT_XRGB32);
memcpy((uchar*)texture.impl->pixelData, (uchar*)I.data, 512 * 512 * 4);
// Create a pattern and use it to fill a rounded-rect.
BLPattern pattern(texture);
ctx.setFillStyle(pattern);
ctx.setCompOp(BL_COMP_OP_SRC_COPY);
ctx.fillAll();
// Coordinates can be specified now or changed later.
BLGradient linear(BLLinearGradientValues(0, 0, 0, 480));
// Color stops can be added in any order.
linear.addStop(0.0, BLRgba32(0xFFFFFFFF));
linear.addStop(0.5, BLRgba32(0xFF5FAFDF));
linear.addStop(1.0, BLRgba32(0xFF2F5FDF));
// `setFillStyle()` can be used for both colors and styles.
ctx.setFillStyle(linear);
ctx.setCompOp(BL_COMP_OP_MODULATE);
ctx.fillRoundRect(40.0, 40.0, 400.0, 400.0, 45.5);
ctx.setStrokeStyle(BLRgba32(0xFFFF0000));
ctx.setStrokeWidth(3);
ctx.strokeLine(0,0,480,480);
ctx.end();
//BLImageCodec codec;
//codec.findByName("BMP");
//img.writeToFile("bl-getting-started-2.bmp", codec);
cv::Mat cvImg(img.height(), img.width(), CV_8UC4, img.impl->pixelData);
cv::imshow("res", cvImg);
cv::waitKey(0);
return 0;
}

How to render a textured object to a frambuffer texture, acquire by OpenCL, convert to OpenCV

So I'm trying to combine the usefulness of all 3 libraries, I load an object with a texture:
// Load the model of the store, create a program with the shaders
GLint store = OpGL::initModel(MESH_PATH);
GLuint storeProgram = OpGL::initProgram(VS_GLSL_PATH, FS_GLSL_PATH);
glUseProgram (storeProgram);
// Find the location in the shader, for the texture image
GLuint TEX_ID = glGetUniformLocation(storeProgram, "tex_glsl");
GLuint TEX = OpGL::loadTexture(TEXTURE_IMAGE_PATH, 25);
// Bind texture in Texture Unit 0
glBindTexture(GL_TEXTURE_2D, TEX);
// Set
glUniform1i(TEX_ID, 0); // use texture 0
set up a framebuffer with a texture in GL:
GLuint g_fb = 0; // frame buffer
glGenFramebuffers (1, &g_fb);
glBindFramebuffer(GL_FRAMEBUFFER, g_fb);
GLuint g_fb_tex = 0;
glGenTextures (1, &g_fb_tex);
glBindTexture (GL_TEXTURE_2D, g_fb_tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D ( GL_TEXTURE_2D,0, GL_RGBA,640,480, 0,GL_RGBA,GL_UNSIGNED_BYTE,NULL );
glFramebufferTexture2D (GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, g_fb_tex, 0);
GLuint g_db = 0; // depth buffer
glGenRenderbuffers(1, &g_db);
glBindRenderbuffer(GL_RENDERBUFFER, g_db);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT, 640, 480);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, g_db);
/* tell the framebuffer to expect a colour output attachment*/
GLenum draw_bufs[1] = { GL_COLOR_ATTACHMENT0 };
glDrawBuffers (1, draw_bufs);
create storage in CL:
cl_mem CL_image; //location for the gl rendering to reside in CL
CL_image = clCreateFromGLTexture2D(context, CL_MEM_READ_WRITE, GL_TEXTURE_2D, 0, g_fb_tex, &err);
create a UMat:
cv::UMat Umat;
re-bind the original texture:
glBindTexture(GL_TEXTURE_2D, TEX);
glBindFramebuffer(GL_FRAMEBUFFER, g_fb); // just as a precaution
render:
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glUseProgram (storeProgram);
glBindVertexArray (store);
glDrawArrays (GL_TRIANGLES, 0, 79227);
glfwPollEvents ();
glFlush();
// pass the images to CL
err = clEnqueueAcquireGLObjects(queue, 1, &CL_image, 0, NULL, NULL);
cl_event wait;
cv::ocl::convertFromImage(CL_image, Umat);
cv::flip(Umat,Umat,0);
cv::imshow("CVforCLimage", Umat);
cv::waitKey(1);
err = clEnqueueReleaseGLObjects(queue, 1, &out_toCL_image, 0, 0, 0);
err = clFinish(queue);
Everything renders fine if I just send it to the screen (glBindFramebuffer(GL_FRAMEBUFFER, 0);)... but I get a blue object instead of an orange object when I render it to CV. Almost as though the original texture I loaded is not making it to the rendering.
Thanks for the help!

How to process a JPEG binary data in OpenCV?

I am trying to process a JPEG Binary data in OpenCV. When I do that I get Segmentation fault (core dumped).
I read JPEG file through fread command and stored in a buffer.
After reading, I copied the buffer data to a Mat variable,
When I tried to do grayscale conversion on copied data using cvtColor OpenCV function. I get Segmentation Fault.
int main( int argc, char** argv )
{
Mat threshold_output;
Mat gray_image;
unsigned char *pre_image;
FILE *read_image;
FILE *write_image;
int filesize;
size_t data, write;
read_image = fopen(argv[1] , "rb"); //Read Jpeg as Binary
write_image = fopen("output11.jpg", "wb"); //Write JPEG
if(read_image == NULL)
{
printf("Image Not Found\r\n");
}
fseek(read_image, 0, SEEK_END);
int fileLen = ftell(read_image);
fseek(read_image, 0, SEEK_SET);
pre_image = (unsigned char *)malloc(fileLen);
data = fread(pre_image, 1, fileLen, read_image);
write = fwrite(pre_image, 1, fileLen, write_image);
// Printed and verify the values
printf("File Size %d\r\n", fileLen);
printf("Read bytes %zu\r\n", data);
printf("Write bytes %zu\r\n", data);
fclose(read_image);
fclose(write_image);
/* Copy the Jpeg Binary buffer to a MAt Variable*/
cv::Mat image(Size(640, 480), CV_8UC3, pre_image); //Seg Fault comes here
/* Convert Grayscale */
cvtColor( image, gray_image, CV_BGR2GRAY);
/* Threshold conversion */
threshold( gray_image, threshold_output, 80, 255, THRESH_BINARY );
namedWindow( "Thresholded", CV_WINDOW_AUTOSIZE );
imshow( "Thresholded", image );
waitKey(0);
return 0;
}
I have attached the code for reference. I have verified that both fread and fwrite works properly.
But when I do the cvtColor only I got error.
As #Micka already pointed out, you should use cv::imdecode
You can use it with your FILE*. You probably may want to use fstreams if you're using C++. You can also rely directly on OpenCV capabilities to read files.
The code below will show you these options for reading files. Code for writing is similar (I can add it if you need it).
Remember that if you want to write the binary stream, you should use imencode
#include <opencv2\opencv.hpp>
#include <fstream>
#include <stdio.h>
using namespace std;
using namespace cv;
int main()
{
////////////////////////////////
// Method 1: using FILE*
////////////////////////////////
FILE* read_image = fopen("path_to_image", "rb");
if (read_image == NULL)
{
printf("Image Not Found\n");
}
fseek(read_image, 0, SEEK_END);
int fileLen = ftell(read_image);
fseek(read_image, 0, SEEK_SET);
unsigned char* pre_image = (unsigned char *)malloc(fileLen);
size_t data = fread(pre_image, 1, fileLen, read_image);
// Printed and verify the values
printf("File Size %d\n", fileLen);
printf("Read bytes %d\n", data);
fclose(read_image);
vector<unsigned char> buffer(pre_image, pre_image + data);
Mat img = imdecode(buffer, IMREAD_ANYCOLOR);
////////////////////////////////
//// Method 2: using fstreams
////////////////////////////////
//ifstream ifs("path_to_image", iostream::binary);
//filebuf* pbuf = ifs.rdbuf();
//size_t size = pbuf->pubseekoff(0, ifs.end, ifs.in);
//pbuf->pubseekpos(0, ifs.in);
//vector<char> buffer(size);
//pbuf->sgetn(buffer.data(), size);
//ifs.close();
//Mat img = imdecode(buffer, IMREAD_ANYCOLOR);
////////////////////////////////
//// Method 3: using imread
////////////////////////////////
//Mat img = imread("path_to_image", IMREAD_ANYCOLOR);
// Work with img as you want
imshow("img", img);
waitKey();
return 0;
}
OpenCV uses channels like BGR etc and can't perform computer vision operations on ENCODED images, since encoded images don't consist of pixel data but some encoded data which can be transformed to pixels. OpenCV assumes that images are already decoded so it can work on pixel data.
BUT: you can use a binary image buffer (like your pre_image) and let openCV DECODE it.
use cv::imdecode to do it and after that you'll get a legal cv::Mat image. http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html#imdecode

fragment shader: texture2D() and texelFetch()

my programm displays an image loaded with openCV from a webcam with openGL.
The Programm below works generally but I have some questions listed after the code.
main:
#define GLEW_STATIC
#include <GL/glew.h>
#include <GLFW\glfw3.h>
#include <iostream>
#include <fstream> //std::ifstream
#include <algorithm> //std::max()
#include <opencv2\core\core.hpp>
#include <opencv2\highgui\highgui.hpp>
cv::VideoCapture capture0;
cv::VideoCapture capture1;
void captureFromWebcam(cv::Mat &frame, cv::VideoCapture &capture)
{
capture.read(frame);
}
bool initializeCapturing()
{
capture0.open(0);
capture1.open(1);
if(!capture0.isOpened() | !capture1.isOpened())
{
std::cout << "Ein oder mehrere VideoCaptures konnten nicht geöffnet werden" << std::endl;
if(!capture0.isOpened())
capture0.release();
if(!capture1.isOpened())
capture1.release();
return false;
}
return true;
}
void releaseCapturing()
{
capture0.release();
capture1.release();
}
GLuint LoadShaders(const char * vertex_file_path,const char * fragment_file_path){
// Create the shaders
GLuint VertexShaderID = glCreateShader(GL_VERTEX_SHADER);
GLuint FragmentShaderID = glCreateShader(GL_FRAGMENT_SHADER);
// Read the Vertex Shader code from the file
std::string VertexShaderCode;
std::ifstream VertexShaderStream(vertex_file_path, std::ios::in);
if(VertexShaderStream.is_open())
{
std::string Line = "";
while(getline(VertexShaderStream, Line))
VertexShaderCode += "\n" + Line;
VertexShaderStream.close();
}
// Read the Fragment Shader code from the file
std::string FragmentShaderCode;
std::ifstream FragmentShaderStream(fragment_file_path, std::ios::in);
if(FragmentShaderStream.is_open()){
std::string Line = "";
while(std::getline(FragmentShaderStream, Line))
FragmentShaderCode += "\n" + Line;
FragmentShaderStream.close();
}
GLint Result = GL_FALSE;
int InfoLogLength;
// Compile Vertex Shader
printf("Compiling shader : %s\n", vertex_file_path);
char const * VertexSourcePointer = VertexShaderCode.c_str();
glShaderSource(VertexShaderID, 1, &VertexSourcePointer , NULL);
glCompileShader(VertexShaderID);
// Check Vertex Shader
glGetShaderiv(VertexShaderID, GL_COMPILE_STATUS, &Result);
glGetShaderiv(VertexShaderID, GL_INFO_LOG_LENGTH, &InfoLogLength);
std::vector<char> VertexShaderErrorMessage(InfoLogLength);
glGetShaderInfoLog(VertexShaderID, InfoLogLength, NULL, &VertexShaderErrorMessage[0]);
fprintf(stdout, "%s\n", &VertexShaderErrorMessage[0]);
// Compile Fragment Shader
printf("Compiling shader : %s\n", fragment_file_path);
char const * FragmentSourcePointer = FragmentShaderCode.c_str();
glShaderSource(FragmentShaderID, 1, &FragmentSourcePointer , NULL);
glCompileShader(FragmentShaderID);
// Check Fragment Shader
glGetShaderiv(FragmentShaderID, GL_COMPILE_STATUS, &Result);
glGetShaderiv(FragmentShaderID, GL_INFO_LOG_LENGTH, &InfoLogLength);
std::vector<char> FragmentShaderErrorMessage(InfoLogLength);
glGetShaderInfoLog(FragmentShaderID, InfoLogLength, NULL, &FragmentShaderErrorMessage[0]);
fprintf(stdout, "%s\n", &FragmentShaderErrorMessage[0]);
// Link the program
fprintf(stdout, "Linking program\n");
GLuint ProgramID = glCreateProgram();
glAttachShader(ProgramID, VertexShaderID);
glAttachShader(ProgramID, FragmentShaderID);
glLinkProgram(ProgramID);
// Check the program
glGetProgramiv(ProgramID, GL_LINK_STATUS, &Result);
glGetProgramiv(ProgramID, GL_INFO_LOG_LENGTH, &InfoLogLength);
std::vector<char> ProgramErrorMessage( std::max(InfoLogLength, int(1)) );
glGetProgramInfoLog(ProgramID, InfoLogLength, NULL, &ProgramErrorMessage[0]);
fprintf(stdout, "%s\n", &ProgramErrorMessage[0]);
glDeleteShader(VertexShaderID);
glDeleteShader(FragmentShaderID);
return ProgramID;
}
int main ()
{
int w = 640,h=480;
glfwInit();
//configure glfw
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 2);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
GLFWwindow* window = glfwCreateWindow(w, h, "OpenGL", NULL, nullptr); // windowed
glfwMakeContextCurrent(window);
glewExperimental = GL_TRUE;
glewInit();
initializeCapturing();
GLuint VertexArrayID;
glGenVertexArrays(1, &VertexArrayID);
glBindVertexArray(VertexArrayID);
// An array of 3 vectors which represents 3 vertices (singular: vertex -> ein Punkt im dreidimensionalen raum)
static const GLfloat g_vertex_buffer_data[] = {
//x,y,z
-1.0f, -1.0f, 0.0f, //unten links
1.0f, 1.0f, 0.0f, //oben rechts
-1.0f, 1.0f, 0.0f, //oben links
-1.0f, -1.0f, 0.0f, //unten links
1.0f, 1.0f, 0.0f, //oben rechts
1.0f,-1.0f,0.0f //unten rechts
};
static const GLfloat vertex_buffer_coordinates[] ={
0.0f,0.0f,
1.0f,1.0f,
0.0f,1.0f,
0.0f,0.0f,
1.0f,1.0f,
1.0f,0.0f,
};
GLuint coordinateBuffer;
glGenBuffers(1,&coordinateBuffer);
glBindBuffer(GL_ARRAY_BUFFER, coordinateBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertex_buffer_coordinates), vertex_buffer_coordinates, GL_STATIC_DRAW);
// This will identify our vertex buffer
GLuint vertexbuffer;
// Generate 1 buffer, put the resulting identifier in vertexbuffer
glGenBuffers(1, &vertexbuffer);
// The following commands will talk about our 'vertexbuffer' buffer
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
// Give our vertices to OpenGL.
glBufferData(GL_ARRAY_BUFFER, sizeof(g_vertex_buffer_data), g_vertex_buffer_data, GL_STATIC_DRAW);
GLuint shader_programm = LoadShaders("vertex.shader","fragment.shader");
GLuint tex;
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
//was passiert wenn die texture koordinaten außerhalb des bereichs sind?
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
//was passiert wenn die textur gestreckt/gestaucht wird?
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
cv::Mat frame;
captureFromWebcam(frame,capture0);
glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA,frame.size().width,frame.size().height,0,GL_RGB,GL_UNSIGNED_BYTE,frame.data);
glUniform1i(glGetUniformLocation(shader_programm, "myTextureSampler"), 0);
while(!glfwWindowShouldClose(window))
{
glfwPollEvents();
if (glfwGetKey(window, GLFW_KEY_ESCAPE) == GLFW_PRESS)
glfwSetWindowShouldClose(window, GL_TRUE);
// 1rst attribute buffer : vertices
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// 2nd attribute buffer : colors
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, coordinateBuffer);
glVertexAttribPointer(
1, // attribute. No particular reason for 1, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
const GLfloat color[] = {0.0f,0.2f,0.0f,1.0f};
glClearBufferfv(GL_COLOR,0,color);
glUseProgram(shader_programm);
// Draw the triangle !
glDrawArrays(GL_TRIANGLES, 0, 2*3); // Starting from vertex 0; 3 vertices total -> 1 triangle
//glDrawArrays(GL_POINTS,0,1);
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glfwSwapBuffers(window);
}
glDeleteVertexArrays(1,&VertexArrayID);
glDeleteProgram(shader_programm);
glDeleteVertexArrays(1,&VertexArrayID);
releaseCapturing();
glfwTerminate();
return 1;
}
vertex shader:
#version 330 core
layout (location = 0) in vec3 vertexPosition_modelspace; //input vom vertexbuffer
layout (location = 1) in vec2 UVcoord;
out vec2 UV;
void main(void)
{
gl_Position.xyz = vertexPosition_modelspace;
gl_Position.w = 1.0; //Zoomfaktor
UV = UVcoord;
}
Fragment shader:
#version 330 core
in vec2 UV;
out vec4 color;
// Values that stay constant for the whole mesh.
uniform sampler2D myTextureSampler;
void main(void)
{
//color = texture2D(myTextureSampler,UV);
color = texelFetch(myTextureSampler,ivec2(gl_FragCoord.xy),0);
}
The commented line in the fragment shader with texture2D() won't work! It looks like this image. What is wrong? Output
Where are the diffrences between texture2D() and texelFetch() and what is best practice?
The image shown with texelFetch is bluish. Any idea why that happens? (the cv::Mat loaded has no tint)
GLSL texture addresses using normalized coordinates, i.e. values in the range [0, 1] and does perform filtering. texelFetch addresses by absolute pixel index from a specific mipmap level and does not filter.
Judging by your screenshot the texture coordinates you pass to texture are wrong, or wrongly processed; the texelFetch code does not use explicitly specified texture coordinated, but uses the viewport pixel coordinate.
Looking at your glVertexAttribPointer for the texture coordinates call, you tell OpenGL that there are 3 elements per texture coordinate, while the array has only 2. So that's likely your problem.

OpenGL + CUDA interop - image not displaying in window

Background: I read an image from disk using OpenCV, passed it to the GPU using CUDA, and now, I am trying to get OpenGL to render the image.
I am not using GLUT here because I compile my code and get 32-bit Windows to create a new window, inside which I will render the image. Now, I flipped the OpenCV image and got OpenGL to render the image nicely when I simply passed flipped.data to the glTexImage2D() function. However, the same image is not being rendered when I use CUDA + OpenGL.
My actual images are bigger than the current one. I am using the OpenGL pixel buffer object, and the OpenGL texture to render the image. Utilizing the texture allows me to specify the part of the image I want to display. My grayscale image has dimensions w1024xh256, and it has an 8-bit depth (unsigned char/GL_UNSIGNED_BYTE).
Question: I can't quite figure out what is going wrong in my code. I tried to carefully follow the CUDA C programming guide, and register/map the CUDA resource with the PBO and the texture as well as with the actual input data. Since my input image data comes from OpenCV, I simply copied flipped's data into the device pointer dev_inp. I (correctly?) mapped the dev_inp to the CUDA resource using cudaGraphicsResourceGetMappedPointer() as well. Yet, the window does not display anything, and remains black. There are no viewport changes, and the coordinates that I specify at glBegin().. glEnd() are correct as they properly map flipped's data to the texture.
Am I missing something else here? Am I mapping the CUDA resource incorrectly to the PBO or the device pointer?
OpenGL + CUDA interop portion: This portion is specifically only the CUDA + OpenGL interoperation in my code. The function DrawOpenGLScene() is called from the WindProc() method.
void DrawOpenGLScene()
{
initCUDADevice();
Mat image, flipped;
image = imread("K:/Ultrasound experiment images/PA_160.png", CV_LOAD_IMAGE_GRAYSCALE); // Read the file from disk
if(!image.data) // Check for invalid input
{
cout << "Could not open or find the image" << std::endl ;
}
cv::flip(image, flipped, 0);
imshow("flip", image); // displays output
//cout << "depth: " << flipped.depth() << endl;
// ===================================================================================
// opengl setup
// first, the context was created
// now, clear the window with the rendering context
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
// ====================================================================================
// generate the pixel buffer object (PBO)
// Generate a buffer ID called a PBO (Pixel Buffer Object)
glGenBuffers(1, &pbo);
// Make this the current UNPACK buffer (OpenGL is state-based)
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo);
// Allocate data for the buffer. 4-channel 8-bit image
glBufferData(GL_PIXEL_UNPACK_BUFFER, sizeof(unsigned char) * flipped.rows * flipped.cols, NULL, GL_DYNAMIC_COPY);
//gpuErrchk(cudaGLRegisterBufferObject( pbo ));
gpuErrchk(cudaGraphicsGLRegisterBuffer(&cuda_resource, pbo, cudaGraphicsMapFlagsNone));
// ====================================================================================
// create the texture object
// enable 2D texturing
glEnable(GL_TEXTURE_2D);
// bind the texture
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
//glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
// put flipped.data at the end, and it'll work for normal texturing
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, image.cols, image.rows, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, NULL);
// put tex at the end, and it'll work for normal texturing
glBindTexture(GL_TEXTURE_2D, 0);
// ====================================================================================
// copy data from openCV
unsigned char *dev_inp;
gpuErrchk( cudaMalloc((void**)&dev_inp, sizeof(unsigned char)*flipped.rows*flipped.cols) );
//cudaGLMapBufferObject((void**)dev_inp, pbo);
gpuErrchk( cudaGraphicsMapResources(1, &cuda_resource, 0) );
size_t size; // = sizeof(unsigned char)*flipped.rows*flipped.cols;
gpuErrchk( cudaGraphicsResourceGetMappedPointer((void **)&dev_inp, &size, cuda_resource) );
gpuErrchk( cudaMemcpy(dev_inp, flipped.data, sizeof(unsigned char)*flipped.rows*flipped.cols, cudaMemcpyHostToDevice) );
//cudaGLUnmapBufferObject(pbo);
gpuErrchk( cudaGraphicsUnmapResources(1, &cuda_resource, 0) );
// ====================================================================================
// bind pbo and texture to render data now
glBindBuffer( GL_PIXEL_UNPACK_BUFFER, pbo);
glBindTexture(GL_TEXTURE_2D, tex);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, flipped.cols, flipped.rows, GL_LUMINANCE, GL_UNSIGNED_BYTE, NULL);
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f); glVertex3f(-1.0f, -1.0f, 1.0f); // Bottom Left Of The Texture and Quad
glTexCoord2f(1.0f, 0.0f); glVertex3f( 1.0f, -1.0f, 1.0f); // Bottom Right Of The Texture and Quad
glTexCoord2f(1.0f, 1.0f); glVertex3f( 1.0f, 1.0f, 1.0f); // Top Right Of The Texture and Quad
glTexCoord2f(0.0f, 1.0f); glVertex3f(-1.0f, 1.0f, 1.0f); // Top Left Of The Texture and Quad
glEnd();
glFlush(); // force rendering to happen
//glBindTexture(GL_TEXTURE_2D, 0);
}
Entire code:
LRESULT CALLBACK WndProc(HWND, UINT, WPARAM, LPARAM);
void DrawOpenGLScene(void);
HGLRC SetUpOpenGLContext(HWND hWnd);
GLuint tex;
GLuint pbo;
struct cudaGraphicsResource *cuda_resource;
int WINAPI WinMain (HINSTANCE hInstance, HINSTANCE hPrevInstance,
LPSTR lpszCmdLine, int nCmdShow)
{
static char szClassName[] = "Myclass";
static char szTitle[]="A Simple Win32 API OpenGL Program";
WNDCLASS wc;
MSG msg;
HWND hWnd;
wc.style = CS_HREDRAW | CS_VREDRAW;
wc.lpfnWndProc = (WNDPROC)WndProc;
wc.cbClsExtra = 0;
wc.cbWndExtra = 0;
wc.hInstance = hInstance;
wc.hIcon = NULL;
wc.hCursor = LoadCursor(NULL, IDC_ARROW);
wc.hbrBackground = (HBRUSH)GetStockObject (BLACK_BRUSH);
wc.lpszMenuName = NULL;
wc.lpszClassName = szClassName;
if (!RegisterClass (&wc))
return 0;
hWnd = CreateWindow(szClassName, szTitle,
WS_OVERLAPPEDWINDOW |
// NEED THESE for OpenGL calls to work!
WS_CLIPCHILDREN | WS_CLIPSIBLINGS,
0, 0, 1024, 256,
NULL, NULL, hInstance, NULL);
ShowWindow(hWnd, nCmdShow);
UpdateWindow( hWnd );
while (GetMessage(&msg, NULL, 0, 0))
{
TranslateMessage( &msg );
DispatchMessage( &msg );
}
return(msg.wParam);
}
LRESULT CALLBACK WndProc( HWND hWnd, UINT msg,
WPARAM wParam, LPARAM lParam )
{
HDC hDC;
static HGLRC hRC; // Note this is STATIC!
PAINTSTRUCT ps;
switch (msg)
{
case WM_CREATE:
// Select a pixel format and create a rendering context
hRC = SetUpOpenGLContext(hWnd);
break;
case WM_PAINT:
// Draw the scene
// Get a DC, make RC current & associate it with this DC
hDC = BeginPaint(hWnd, &ps);
wglMakeCurrent(hDC, hRC);
DrawOpenGLScene(); // Draw
// We're done with the RC, so deselect it
wglMakeCurrent(NULL, NULL);
EndPaint(hWnd, &ps);
break;
case WM_DESTROY:
//cudaGLUnregisterBufferObject(pbo);
cudaGraphicsUnregisterResource(cuda_resource);
// Clean up and terminate
wglDeleteContext(hRC);
PostQuitMessage(0);
break;
default:
return DefWindowProc(hWnd, msg, wParam, lParam);
}
return (0);
}
//*******************************************************
// SetUpOpenGL sets the pixel format and a rendering
// context then returns the RC
//*******************************************************
HGLRC SetUpOpenGLContext(HWND hWnd)
{
static PIXELFORMATDESCRIPTOR pfd = {
sizeof (PIXELFORMATDESCRIPTOR), // strcut size
1, // Version number
PFD_DRAW_TO_WINDOW | // Flags, draw to a window,
PFD_SUPPORT_OPENGL, // use OpenGL
PFD_TYPE_RGBA, // RGBA pixel values
24, // 24-bit color
0, 0, 0, // RGB bits & shift sizes.
0, 0, 0, // Don't care about them
0, 0, // No alpha buffer info
0, 0, 0, 0, 0, // No accumulation buffer
32, // 32-bit depth buffer
0, // No stencil buffer
0, // No auxiliary buffers
PFD_MAIN_PLANE, // Layer type
0, // Reserved (must be 0)
0, // No layer mask
0, // No visible mask
0 // No damage mask
};
int nMyPixelFormatID;
HDC hDC;
HGLRC hRC;
hDC = GetDC(hWnd);
nMyPixelFormatID = ChoosePixelFormat(hDC, &pfd);
SetPixelFormat(hDC, nMyPixelFormatID, &pfd);
hRC = wglCreateContext(hDC);
ReleaseDC(hWnd, hDC);
return hRC;
}
//***********************************************************
// initCUDADevice uses CUDA commands to initiate the CUDA
// enabled graphics card. This is prior to resource mapping,
// and rendering.
//***********************************************************
void initCUDADevice() {
gpuErrchk(cudaGLSetGLDevice( cutGetMaxGflopsDeviceId() ));
}
//********************************************************
// DrawOpenGLScene uses OpenGL commands to draw the scene
// This is where we put the OpenGL drawing commands
//********************************************************
void DrawOpenGLScene()
{
initCUDADevice();
Mat image, flipped;
image = imread("K:/Ultrasound experiment images/PA_160.png", CV_LOAD_IMAGE_GRAYSCALE); // Read the file from disk
if(!image.data) // Check for invalid input
{
cout << "Could not open or find the image" << std::endl ;
}
cv::flip(image, flipped, 0);
imshow("flip", image); // displays output
//cout << "depth: " << flipped.depth() << endl;
// ===================================================================================
// opengl setup
// first, the context was created
// now, clear the window with the rendering context
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
// ====================================================================================
// generate the pixel buffer object (PBO)
// Generate a buffer ID called a PBO (Pixel Buffer Object)
glGenBuffers(1, &pbo);
// Make this the current UNPACK buffer (OpenGL is state-based)
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo);
// Allocate data for the buffer. 4-channel 8-bit image
glBufferData(GL_PIXEL_UNPACK_BUFFER, sizeof(unsigned char) * flipped.rows * flipped.cols, NULL, GL_DYNAMIC_COPY);
//gpuErrchk(cudaGLRegisterBufferObject( pbo ));
gpuErrchk(cudaGraphicsGLRegisterBuffer(&cuda_resource, pbo, cudaGraphicsMapFlagsNone));
// ====================================================================================
// create the texture object
// enable 2D texturing
glEnable(GL_TEXTURE_2D);
// bind the texture
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
//glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
// put flipped.data at the end, and it'll work for normal texturing
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, image.cols, image.rows, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, NULL);
// put tex at the end, and it'll work for normal texturing
glBindTexture(GL_TEXTURE_2D, 0);
// ====================================================================================
// copy data from openCV
unsigned char *dev_inp;
gpuErrchk( cudaMalloc((void**)&dev_inp, sizeof(unsigned char)*flipped.rows*flipped.cols) );
//cudaGLMapBufferObject((void**)dev_inp, pbo);
gpuErrchk( cudaGraphicsMapResources(1, &cuda_resource, 0) );
size_t size; // = sizeof(unsigned char)*flipped.rows*flipped.cols;
gpuErrchk( cudaGraphicsResourceGetMappedPointer((void **)&dev_inp, &size, cuda_resource) );
gpuErrchk( cudaMemcpy(dev_inp, flipped.data, sizeof(unsigned char)*flipped.rows*flipped.cols, cudaMemcpyHostToDevice) );
//cudaGLUnmapBufferObject(pbo);
gpuErrchk( cudaGraphicsUnmapResources(1, &cuda_resource, 0) );
// ====================================================================================
// bind pbo and texture to render data now
glBindBuffer( GL_PIXEL_UNPACK_BUFFER, pbo);
glBindTexture(GL_TEXTURE_2D, tex);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, flipped.cols, flipped.rows, GL_LUMINANCE, GL_UNSIGNED_BYTE, NULL);
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f); glVertex3f(-1.0f, -1.0f, 1.0f); // Bottom Left Of The Texture and Quad
glTexCoord2f(1.0f, 0.0f); glVertex3f( 1.0f, -1.0f, 1.0f); // Bottom Right Of The Texture and Quad
glTexCoord2f(1.0f, 1.0f); glVertex3f( 1.0f, 1.0f, 1.0f); // Top Right Of The Texture and Quad
glTexCoord2f(0.0f, 1.0f); glVertex3f(-1.0f, 1.0f, 1.0f); // Top Left Of The Texture and Quad
glEnd();
glFlush(); // force rendering to happen
//glBindTexture(GL_TEXTURE_2D, 0);
}
In case someone else runs into the same problem, this thread can serve to help them.
I solved my problem by changing only a couple of calls in DrawOpenGLScene().
It turns out that cudaGraphicsResourceGetMappedPointer() returns a pointer to and derived from the OpenGL PBO, and places that pointer in dev_inp. It internally allocates size = sizeof(unsigned char) * flipped.rows * flipped.cols memory for the dev_inp based on the previously established calls to glBufferData() and cudaGraphicsGLRegisterBuffer().
Once this is done, the memory that I had previously allocated using cudaMalloc() now ceases to exist because it is overwritten by the call to cudaGraphicsResourceGetMappedPointer() that places the pointer in dev_inp. Removing the cudaMalloc() and cudaFree() allowed the program to run as originally intended.
In order to deallocate the memory, one should deallocate the PBO as OpenGL is the "owner" of the memory, and CUDA just shares access to the memory owned by OpenGL.
The modified DrawOpenGLScene() routine is pasted below:
#define GET_PROC_ADDRESS( str ) wglGetProcAddress( str )
PFNGLBINDBUFFERARBPROC glBindBuffer = NULL;
PFNGLDELETEBUFFERSARBPROC glDeleteBuffers = NULL;
PFNGLGENBUFFERSARBPROC glGenBuffers = NULL;
PFNGLBUFFERDATAARBPROC glBufferData = NULL;
void initCUDADevice() {
gpuErrchk(cudaGLSetGLDevice( cutGetMaxGflopsDeviceId() ));
}
//********************************************************
// DrawOpenGLScene uses OpenGL commands to draw the scene
// This is where we put the OpenGL drawing commands
//********************************************************
void DrawOpenGLScene()
{
// Clear Color and Depth Buffers
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Reset transformations
glLoadIdentity();
// ====================================================================================
// initiate GPU by setting it correctly
initCUDADevice();
// ====================================================================================
// read the image that needs to be textured
Mat image, flipped;
image = imread("K:/OCT experiment images/PA_175.png", CV_LOAD_IMAGE_GRAYSCALE); // Read the file from disk
if(!image.data) // Check for invalid input
{
cout << "Could not open or find the image" << std::endl ;
}
cv::flip(image, flipped, 0);
imshow("OpenCV - image", image); // displays output
// ====================================================================================
// allocate the PBO, texture, and CUDA resource
glBindBuffer = (PFNGLBINDBUFFERARBPROC)GET_PROC_ADDRESS("glBindBuffer");
glDeleteBuffers = (PFNGLDELETEBUFFERSARBPROC)GET_PROC_ADDRESS("glDeleteBuffers");
glGenBuffers = (PFNGLGENBUFFERSARBPROC)GET_PROC_ADDRESS("glGenBuffers");
glBufferData = (PFNGLBUFFERDATAARBPROC)GET_PROC_ADDRESS("glBufferData");
// ====================================================================================
// generate the pixel buffer object (PBO)
// Generate a buffer ID called a PBO (Pixel Buffer Object)
glGenBuffers(1, &pbo);
// Make this the current UNPACK buffer (OpenGL is state-based)
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo);
// Allocate data for the buffer. 4-channel 8-bit image
glBufferData(GL_PIXEL_UNPACK_BUFFER, sizeof(unsigned char) * flipped.rows * flipped.cols, NULL, GL_STREAM_DRAW);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
gpuErrchk(cudaGraphicsGLRegisterBuffer(&cuda_resource, pbo, cudaGraphicsMapFlagsNone));
// ====================================================================================
// create the texture object
// enable 2D texturing
glEnable(GL_TEXTURE_2D);
// generate and bind the texture
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
// put flipped.data at the end for cpu rendering
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, image.cols, image.rows, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, 0 );
// put tex at the end for cpu rendering
glBindTexture(GL_TEXTURE_2D, 0);
// ====================================================================================
// copy OpenCV flipped image data into the device pointer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
unsigned char *dev_inp;
//gpuErrchk( cudaMalloc((void**)&dev_inp, sizeof(unsigned char)*flipped.rows*flipped.cols) );
gpuErrchk( cudaGraphicsMapResources(1, &cuda_resource, 0) );
size_t size;
gpuErrchk( cudaGraphicsResourceGetMappedPointer((void **)&dev_inp, &size, cuda_resource) );
gpuErrchk( cudaMemcpy(dev_inp, flipped.data, sizeof(unsigned char)*flipped.rows*flipped.cols, cudaMemcpyHostToDevice) );
gpuErrchk( cudaGraphicsUnmapResources(1, &cuda_resource, 0) );
// ====================================================================================
// bind pbo and texture to render data now
glBindBuffer( GL_PIXEL_UNPACK_BUFFER, pbo);
//
glBindTexture(GL_TEXTURE_2D, tex);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, flipped.cols, flipped.rows, GL_LUMINANCE, GL_UNSIGNED_BYTE, NULL);
gpuErrchk( cudaGraphicsUnregisterResource(cuda_resource));
gpuErrchk( cudaThreadSynchronize());
//gpuErrchk(cudaFree(dev_inp));
// ====================================================================================
// map the texture coords to the vertex coords
glBegin(GL_QUADS);
// Front Face
glTexCoord2f(0.0f, 0.0f); glVertex3f(-1.0f, -1.0f, 1.0f); // Bottom Left Of The Texture and Quad
glTexCoord2f(1.0f, 0.0f); glVertex3f( 1.0f, -1.0f, 1.0f); // Bottom Right Of The Texture and Quad
glTexCoord2f(1.0f, 1.0f); glVertex3f( 1.0f, 1.0f, 1.0f); // Top Right Of The Texture and Quad
glTexCoord2f(0.0f, 1.0f); glVertex3f(-1.0f, 1.0f, 1.0f); // Top Left Of The Texture and Quad
glEnd();
glFlush(); // force rendering
glDisable(GL_TEXTURE_2D);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
glDeleteBuffers(1, &pbo);

Resources