GLSL strange IF behaviour - ios

I am writing an iPad app running on iPad Retina using OpenGL ES 3.0
I am trying to use transform feedback for the first time and the vertex shader is acting really strangely. It seems that the boolean expression inside the IF command is always returning true for example this shader:
#version 300 es
in ivec2 coords;
out highp uint vertexID;
uniform sampler2D depthTexture;
uniform int yResolution;
void main () {
ivec2 textCoords = coords;
textCoords.y = yResolution - 1 - coords.y;
bool val = true;
bool val2 = false;
if (val == val2) {
vertexID = uint(6);
return;
}
vertexID = uint(4);
return;
}
When I map the VBO and check the values I get 6!
I'll post the drawing and mapping code below:
glUseProgram(_ValidInputPixelsProg);
uniforms[UNIFORM_VALID_INPUT_PIXEL_DEPTH_TEXTURE] = glGetUniformLocation(_ValidInputPixelsProg, "depthTexture");
uniforms[UNIFORM_VALID_INPUT_PIXEL_Y_RESOLUTION] = glGetUniformLocation(_ValidInputPixelsProg, "yResolution");
glUniform1i(uniforms[UNIFORM_VALID_INPUT_PIXEL_DEPTH_TEXTURE], 10);
glUniform1i(uniforms[UNIFORM_VALID_INPUT_PIXEL_Y_RESOLUTION], RESOLUTION_Y);
glEnable(GL_RASTERIZER_DISCARD);
glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, inputTFB);
glBindVertexArray(validPixelsVA);
glBindBuffer(GL_ARRAY_BUFFER, _inputTFBBuffer);
glBeginTransformFeedback(GL_POINTS);
glDrawArrays(GL_POINTS, 0, RESOLUTION_INNER_XY);
glEndTransformFeedback();
glBindVertexArray(0);
glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, 0);
glDisable(GL_RASTERIZER_DISCARD);
glFlush();
glBindBuffer(GL_ARRAY_BUFFER, indexValidInputPixels);
GLuint* ptr = (GLuint*)(glMapBufferRange(GL_ARRAY_BUFFER, 0, RESOLUTION_INNER_XY * sizeof(GLuint), GL_MAP_READ_BIT));
ptr += 320 + 120;
NSLog(#"\nshader value = %u \n", *ptr);
glUnmapBuffer(GL_ARRAY_BUFFER);
Anybody know what I am doing wrong?

Have the if statement check for a non-variable.
if (val == false)
{
vertexID = uint(6);
return;
}

Related

OpenGL in Dart not showing triangle

I've written a simple "hello word triangle" in OpenGL/C++, it works perfectly. Then, when i was sure that the code worked, I decided to switch to dart and convert all my code.
These are the the pulgins that i've used: OpenGL - GLFW(mingw x64 dll)
And this is the OpenGL code without all the GLFW abstraction:
int shader;
#override
void onCreate() {
List<double> pos = [
-0.5, -0.5,
0.0, 0.5,
0.5, -0.5,
];
Pointer<Float> pPos = allocate<Float>(count: pos.length);
for(int i = 0; i < pos.length; i++)
pPos.elementAt(i).value = pos[i];
Pointer<Uint32> buffer = allocate();
glGenBuffers(1, buffer);
glBindBuffer(GL_ARRAY_BUFFER, buffer.value);
glBufferData(GL_ARRAY_BUFFER, 6 * sizeOf<Float>(), pPos, GL_STATIC_DRAW);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 2 * sizeOf<Float>(), 0);
glEnableVertexAttribArray(0);
String vs =
"""
#version 330 core
layout(location = 0) in vec4 position;
void main()
{
gl_Position = position;
}
""";
String fs =
"""
#version 330 core
layout(location = 0) out vec4 color;
void main()
{
color = vec4(1.0, 0.0, 0.0, 1.0);
}
""";
shader = _createShader(vs, fs);
}
#override
void onUpdate() {
glDrawArrays(GL_TRIANGLES, 0, 3);
}
These are the functions to compile the shaders:
int _compileShader(String source, int type) {
int id = glCreateShader(type);
Pointer<Pointer<Utf8>> ptr = allocate<Pointer<Utf8>>();
ptr.elementAt(0).value = NativeString.fromString(source).cast();
glShaderSource(id, 1, ptr, nullptr);
glCompileShader(id);
Pointer<Int32> result = allocate<Int32>();
glGetShaderiv(id, GL_COMPILE_STATUS, result);
if(result.value == GL_FALSE) {
Pointer<Int32> length = allocate<Int32>();
glGetShaderiv(id, GL_INFO_LOG_LENGTH, length);
Pointer<Utf8> message = allocate<Utf8>(count: length.value);
glGetShaderInfoLog(id, length.value, length, message);
glDeleteShader(id);
print("Failed to compile ${type == GL_VERTEX_SHADER ? "vertex" : "fragment"} shader");
print(message.ref);
return 0;
}
return id;
}
int _createShader(String vertexShader, String fragmentShader) {
int program = glCreateProgram();
int vs = _compileShader(vertexShader, GL_VERTEX_SHADER);
int fs = _compileShader(fragmentShader, GL_FRAGMENT_SHADER);
glAttachShader(program, vs);
glAttachShader(program, fs);
glLinkProgram(program);
glValidateProgram(program);
glDeleteShader(vs);
glDeleteShader(fs);
return program;
}
Since this exact code works perfectly in C++ the only thing I can think is that I've messed un some poiner witht ffi. Can you find any mistake I don't see?
EDIT:
One could think that OpenGL cannot interact with glfw but glClear does work
This is the github repository if you need to investigate more code
I've been doing something very similar and have the same problem. I thought I'd tracked it down to the call to glDrawArrays which crashed with a blue screen of death on Windows and a thread stuck in device driver error message. However, with further investigation I believe it is caused by the attribute array:
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 2 * sizeOf<Float>(), 0);
glEnableVertexAttribArray(0);
Commenting out those lines lets it run without crashing but no triangle appears. Uncommenting results in the BSOD.
I also found an interesting post from 2013 which appears to be relevant: Link
That's as far as I know at the moment.

get current pixel position on webGL2 fragment shader

I created a simple webGL script, it apply pixel color depending on (x,y) pixel position
What I get:
here's what I did:
#ifdef GL_ES
precision mediump float;
#endif
uniform float width;
uniform float height;
uniform float time;
void main() {
vec2 u_resolution = vec2(width, height);
vec2 st = gl_FragCoord.xy / u_resolution;
gl_FragColor = vec4(st.x, st.y, 0.5, 1.0);
}
Codepen: Hello WebGL
I'm trying to convert it to webGL2 but I don't know how to get current pixel position.
here's what I tried:
#version 300 es
#ifdef GL_ES
precision mediump float;
#endif
uniform float width;
uniform float height;
uniform float time;
out vec4 color;
void main() {
vec2 u_resolution = vec2(width, height);
vec2 st = color.xy / u_resolution;
color = vec4(st.x, st.y, 0.5, 1.0);
}
Codepen: Hello WebGL2
How to get current pixel position in webgl2?
gl_FragCoord is still the correct way in WebGL2
var canvas = document.body.appendChild(document.createElement("canvas"));
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;
var gl = canvas.getContext("webgl2");
//************** Shader sources **************
var vertexSource = `
#version 300 es
in vec2 position;
void main() {
gl_Position = vec4(position, 0.0, 1.0);
}
`;
var fragmentSource = `
#version 300 es
#ifdef GL_ES
precision mediump float;
#endif
uniform float width;
uniform float height;
uniform float time;
out vec4 color;
void main() {
vec2 u_resolution = vec2(width, height);
vec2 st = gl_FragCoord.xy / u_resolution;
color = vec4(st.x, st.y, 0.5, 1.0);
}`;
window.addEventListener("resize", onWindowResize, false);
function onWindowResize() {
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;
gl.viewport(0, 0, canvas.width, canvas.height);
gl.uniform1f(widthHandle, window.innerWidth);
gl.uniform1f(heightHandle, window.innerHeight);
}
//Compile shader and combine with source
function compileShader(shaderSource, shaderType) {
var shader = gl.createShader(shaderType);
gl.shaderSource(shader, shaderSource);
gl.compileShader(shader);
if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) {
throw "Shader compile failed with: " + gl.getShaderInfoLog(shader);
}
return shader;
}
//From https://codepen.io/jlfwong/pen/GqmroZ
//Utility to complain loudly if we fail to find the attribute/uniform
function getAttribLocation(program, name) {
var attributeLocation = gl.getAttribLocation(program, name);
if (attributeLocation === -1) {
throw "Cannot find attribute " + name + ".";
}
return attributeLocation;
}
function getUniformLocation(program, name) {
var attributeLocation = gl.getUniformLocation(program, name);
if (attributeLocation === -1) {
throw "Cannot find uniform " + name + ".";
}
return attributeLocation;
}
//************** Create shaders **************
//Create vertex and fragment shaders
var vertexShader = compileShader(vertexSource.trim(), gl.VERTEX_SHADER);
var fragmentShader = compileShader(fragmentSource.trim(), gl.FRAGMENT_SHADER);
//Create shader programs
var program = gl.createProgram();
gl.attachShader(program, vertexShader);
gl.attachShader(program, fragmentShader);
gl.linkProgram(program);
gl.useProgram(program);
//Set up rectangle covering entire canvas
var vertexData = new Float32Array([
-1.0,
1.0, // top left
-1.0,
-1.0, // bottom left
1.0,
1.0, // top right
1.0,
-1.0 // bottom right
]);
//Create vertex buffer
var vertexDataBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, vertexDataBuffer);
gl.bufferData(gl.ARRAY_BUFFER, vertexData, gl.STATIC_DRAW);
// Layout of our data in the vertex buffer
var positionHandle = getAttribLocation(program, "position");
gl.enableVertexAttribArray(positionHandle);
gl.vertexAttribPointer(
positionHandle,
2, // position is a vec2 (2 values per component)
gl.FLOAT, // each component is a float
false, // don't normalize values
2 * 4, // two 4 byte float components per vertex (32 bit float is 4 bytes)
0 // how many bytes inside the buffer to start from
);
//Set uniform handle
var timeHandle = getUniformLocation(program, "time");
var widthHandle = getUniformLocation(program, "width");
var heightHandle = getUniformLocation(program, "height");
gl.uniform1f(widthHandle, window.innerWidth);
gl.uniform1f(heightHandle, window.innerHeight);
function draw() {
//Send uniforms to program
gl.uniform1f(timeHandle, performance.now());
//Draw a triangle strip connecting vertices 0-4
gl.drawArrays(gl.TRIANGLE_STRIP, 0, 4);
requestAnimationFrame(draw);
}
draw();
html {
overflow: hidden;
}
canvas {
display: block;
}
Some other random tips.
These ifdefs are irrelevant
#ifdef GL_ES
precision mediump float;
#endif
Just
precision mediump float;
is fine.
I'm guessing this obvious but why pass in width and height separate?
How about just
uniform vec2 u_resolution;
No reason to call performance.now. The time is passed to your requestAnimationFrame callback
function draw(time) {
//Send uniforms to program
gl.uniform1f(timeHandle, time);
...
requestAnimationFrame(draw);
}
requestAnimationFrame(draw);
The code checks for compile errors but not link errors
You should check for link errors
gl.linkProgram(program);
if (!gl.getProgramParameter(program, gl.LINK_STATUS)) {
throw "Program link failed with: " + gl.getProgramInfoLog(program);
}
There will be link errors if your varyings don't match and further the spec doesn't require compiling to ever fail even on bad shaders. Rather it only requires if they were bad to fail to link.
window.innerWidth
see: this
gl.getUniformLocation returns null if the uniform does not exist
The code is checking for -1 which is correct for attributes but not for uniforms.
throwing on attributes and uniforms not existing
Of course it's helpful to know they don't exist but it's common to debug shaders by commenting things out or editing. For example lets say nothing appears on the screen. If it was me the first thing I'd do is change the fragment shader to this
const fragmentSource = `
#version 300 es
precision mediump float;
uniform vec2 u_resolution;
uniform float time;
out vec4 color;
void main() {
vec2 st = gl_FragCoord.xy / u_resolution;
color = vec4(st.x, st.y, 0.5, 1.0);
color = vec4(1, 0, 0, 1); // <----------------------
}`;
Just output a solid color to check if the issue is in the fragment shader or the vertex shader. The moment I do that most WebGL implentations will optimize out u_resolution and the code that throws when looking up locations effectively makes the program undebuggable.
In fact the code only runs currently because of the previous bug checking for -1 instead of null. With that bug fixed the code crashes beacuse time is optimized out.
var canvas = document.body.appendChild(document.createElement("canvas"));
var gl = canvas.getContext("webgl2");
//************** Shader sources **************
var vertexSource = `
#version 300 es
in vec2 position;
void main() {
gl_Position = vec4(position, 0.0, 1.0);
}
`;
var fragmentSource = `
#version 300 es
precision mediump float;
uniform vec2 u_resolution;
uniform float time;
out vec4 color;
void main() {
vec2 st = gl_FragCoord.xy / u_resolution;
color = vec4(st.x, st.y, 0.5, 1.0);
}`;
function resize() {
if (canvas.width !== canvas.clientWidth || canvas.height !== canvas.clientHeight) {
canvas.width = canvas.clientWidth;
canvas.height = canvas.clientHeight;
gl.viewport(0, 0, canvas.width, canvas.height);
gl.uniform2f(resHandle, canvas.width, canvas.height);
}
}
//Compile shader and combine with source
function compileShader(shaderSource, shaderType) {
var shader = gl.createShader(shaderType);
gl.shaderSource(shader, shaderSource);
gl.compileShader(shader);
if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) {
throw "Shader compile failed with: " + gl.getShaderInfoLog(shader);
}
return shader;
}
//From https://codepen.io/jlfwong/pen/GqmroZ
//Utility to complain loudly if we fail to find the attribute/uniform
function getAttribLocation(program, name) {
var attributeLocation = gl.getAttribLocation(program, name);
if (attributeLocation === -1) {
console.warn("Cannot find attribute", name);
}
return attributeLocation;
}
function getUniformLocation(program, name) {
var uniformLocation = gl.getUniformLocation(program, name);
if (uniformLocation === null) {
console.warn("Cannot find uniform", name);
}
return uniformLocation;
}
//************** Create shaders **************
//Create vertex and fragment shaders
var vertexShader = compileShader(vertexSource.trim(), gl.VERTEX_SHADER);
var fragmentShader = compileShader(fragmentSource.trim(), gl.FRAGMENT_SHADER);
//Create shader programs
var program = gl.createProgram();
gl.attachShader(program, vertexShader);
gl.attachShader(program, fragmentShader);
gl.linkProgram(program);
if (!gl.getProgramParameter(program, gl.LINK_STATUS)) {
throw "Program link failed with: " + gl.getProgramInfoLog(program);
}
gl.useProgram(program);
//Set up rectangle covering entire canvas
var vertexData = new Float32Array([
-1.0,
1.0, // top left
-1.0,
-1.0, // bottom left
1.0,
1.0, // top right
1.0,
-1.0 // bottom right
]);
//Create vertex buffer
var vertexDataBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, vertexDataBuffer);
gl.bufferData(gl.ARRAY_BUFFER, vertexData, gl.STATIC_DRAW);
// Layout of our data in the vertex buffer
var positionHandle = getAttribLocation(program, "position");
gl.enableVertexAttribArray(positionHandle);
gl.vertexAttribPointer(
positionHandle,
2, // position is a vec2 (2 values per component)
gl.FLOAT, // each component is a float
false, // don't normalize values
2 * 4, // two 4 byte float components per vertex (32 bit float is 4 bytes)
0 // how many bytes inside the buffer to start from
);
//Set uniform handle
var timeHandle = getUniformLocation(program, "time");
var resHandle = getUniformLocation(program, "u_resolution");
function draw(time) {
resize();
//Send uniforms to program
gl.uniform1f(timeHandle, time);
//Draw a triangle strip connecting vertices 0-4
gl.drawArrays(gl.TRIANGLE_STRIP, 0, 4);
requestAnimationFrame(draw);
}
requestAnimationFrame(draw);
html,body {
height: 100%;
margin: 0;
}
canvas {
width: 100%;
height: 100%;
display: block;
}

iOS - Render a YUV420p image using a BiPlanar pixel format in OpenGL ES 2

I am trying to render a yuv420p encoded video to an OpenGL ES2 texture using Swift 3 on an iPhone 6S with iOS 10.3.3.
Texture Setup:
var formatType = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange
var lumaTexture: CVOpenGLESTexture?
var chromaTexture: CVOpenGLESTexture?
var mediapixelBuffer: CVPixelBuffer?
var ioSurfaceBuffer: CVPixelBuffer?
media.videoSamplesBuffer = media.assetReaderOutput?.copyNextSampleBuffer()
mediapixelBuffer = CMSampleBufferGetImageBuffer(media.videoSamplesBuffer!)!
CVPixelBufferLockBaseAddress(mediapixelBuffer!, .readOnly)
let bufferWidth0: Int = CVPixelBufferGetWidthOfPlane(mediapixelBuffer!, 0)
let bufferWidth1: Int = CVPixelBufferGetWidthOfPlane(mediapixelBuffer!, 1)
let bufferHeight0: Int = CVPixelBufferGetWidthOfPlane(mediapixelBuffer!, 0)
let bufferHeight1: Int = CVPixelBufferGetWidthOfPlane(mediapixelBuffer!, 1)
let bytesPerRow0: Int = CVPixelBufferGetBytesPerRowOfPlane(mediapixelBuffer!, 0)
let bytesPerRow1: Int = CVPixelBufferGetBytesPerRowOfPlane(mediapixelBuffer!, 1)
let pixelBufferBaseAddress = CVPixelBufferGetBaseAddress(mediapixelBuffer!)
let pixelBufferPlaneAddress0 = CVPixelBufferGetBaseAddressOfPlane(mediapixelBuffer!, 0)
let pixelBufferPlaneAddress1 = CVPixelBufferGetBaseAddressOfPlane(mediapixelBuffer!, 1)
let ioBufferRet = CVPixelBufferCreate(kCFAllocatorDefault,
bufferWidth_,
bufferHeight_,
self.formatType,
attr,
&ioSurfaceBuffer)
if ioBufferRet != 0 { print("error at `CVPixelBufferCreate`", ioBufferRet) }
CVPixelBufferLockBaseAddress(ioSurfaceBuffer!, .readOnly)
var copyBufferPlaneAddress0 = CVPixelBufferGetBaseAddressOfPlane(ioSurfaceBuffer!, 0)
var copyBufferPlaneAddress1 = CVPixelBufferGetBaseAddressOfPlane(ioSurfaceBuffer!, 1)
memcpy(copyBufferPlaneAddress0, pixelBufferPlaneAddress0, bufferHeight0 * bytesPerRow0/2) // Y
memcpy(copyBufferPlaneAddress1, pixelBufferPlaneAddress1, bufferHeight1 * bytesPerRow1/2) // UV
glActiveTexture(GLenum(GL_TEXTURE0))
if nil != ioSurfaceBuffer && nil != media.vidTexCachePtr {
var cvRet = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault,
media.vidTexCachePtr!,
ioSurfaceBuffer!,
nil,
GLenum(GL_TEXTURE_2D),
GLint(GL_RED_EXT),
GLsizei(bufferWidth0),
GLsizei(bufferHeight0),
GLenum(GL_RED_EXT),
GLenum(GL_UNSIGNED_BYTE),
0,
&lumaTexture)
if cvRet != 0 { print("0 error at `CVOpenGLESTextureCacheCreateTextureFromImage`", cvRet) }
}
if nil != lumaTexture {
glBindTexture(CVOpenGLESTextureGetTarget(lumaTexture!), CVOpenGLESTextureGetName(lumaTexture!))
}
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_MIN_FILTER), GL_LINEAR)
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_WRAP_S), GL_CLAMP_TO_EDGE)
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_WRAP_T), GL_CLAMP_TO_EDGE)
glActiveTexture(GLenum(GL_TEXTURE1))
if nil != ioSurfaceBuffer && nil != media.vidTexCachePtr {
var cvRet = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault,
media.vidTexCachePtr!,
ioSurfaceBuffer!,
nil,
GLenum(GL_TEXTURE_2D),
GLint(GL_RG_EXT),
GLsizei(bufferWidth1),
GLsizei(bufferHeight1),
GLenum(GL_RG_EXT),
GLenum(GL_UNSIGNED_BYTE),
1,
&chromaTexture)
if cvRet != 0 { print("1 error at `CVOpenGLESTextureCacheCreateTextureFromImage`", cvRet) }
}
if nil != chromaTexture {
glBindTexture(CVOpenGLESTextureGetTarget(chromaTexture!), CVOpenGLESTextureGetName(chromaTexture!))
}
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_MIN_FILTER), GL_LINEAR)
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_WRAP_S), GL_CLAMP_TO_EDGE)
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_WRAP_T), GL_CLAMP_TO_EDGE)
CVPixelBufferUnlockBaseAddress(mediapixelBuffer!, .readOnly)
CVPixelBufferUnlockBaseAddress(ioSurfaceBuffer!, .readOnly)
Fragment Shader:
#version 100
precision mediump float;
varying vec2 vUV;
uniform sampler2D SamplerY;
uniform sampler2D SamplerUV;
void main() {
mediump vec3 yuv;
lowp vec3 rgb;
yuv.x = texture2D(SamplerY, vUV).r;
yuv.yz = texture2D(SamplerUV, vUV).rg - vec2(0.5, 0.5);
// Using BT.709 which is the standard for HDTV
rgb = mat3( 1, 1, 1,
0, -.18732, 1.8556,
1.57481, -.46813, 0) * yuv;
gl_FragColor = vec4(rgb, 1);
}
The separate luminance texture looks right and but the separate Chroma texture seems to only have the Cr channel. I know that because the video is 4:2:0 the second chroma channel is empty and so maybe I shouldn't "see" the Cb channel, but the final result (which should be colorbar colors) looks like this.
It is missing the red. (I assume this is because the output is BGRA. If it were RGBA the blue would be missing). How do I get the red back?
This post describes a similar issue to the one I am experiencing. But the solution employs 3 planes (Y, U, and V separately) while I am trying to achieve this with 2 planes (Y, and UV). I tried using kCVPixelFormatType_420YpCbCr8Planar format type to get access to 3 planes, but then CVOpenGLESTextureCacheCreateTextureFromImage fails to create an IOSurface. I've also tried a few different YUV->RGB shader equations and looked into using ffmpeg to supply the CVPixelBuffer, but I can't get it to build for my iPhone architecture (arm64). Thank you in advance and any help would be much appreciated!
So it turns out the SamplerUV texture was not actually being sent to the shader. (But it was viewable in the GPU captured frame, which was misleading). I assumed (incorrectly) that because the SamplerY was automatically sent to the shader, that the second texture, SamplerUV, would be as well. So the result I was seeing before was the luma texture being used for both Y and UV textures.
The missing lines that fixed the issue:
var SamplerY: GLint = 0
var SamplerUV: GLint = 1
SamplerY = glGetUniformLocation(shaderProgram, "SamplerY")
SamplerUV = glGetUniformLocation(shaderProgram, "SamplerUV")
glUniform1i(SamplerY, 0)
glUniform1i(SamplerUV, 1)

Update texture from vector directx9

I' m trying to render two textures one for RGB and another on for the alpha channel, I blend them together with a shader.
The alpha channel texture doesn't overlap properly to the RGB one. It seems to be stretched.
The alpha channel texture changes at every frame and I need to fill starting from an array of uint8_t by the following fuction:
D3DLOCKED_RECT locked_rect;
HRESULT hr = alpha_tex->LockRect(0, &locked_rect, nullptr, 0);
if (!FAILED(hr)) {
ret_code = 0;
BYTE *p_dst = (BYTE *)locked_rect.pBits;
for (uint y = 0; y < height; y++) {
memcpy(p_dst, alpha_array, width);
alpha_array += width;
p_dst += locked_rect.Pitch;
}
alpha_tex->UnlockRect(0);
}
where the alpha_array is a uint8_t array containing the alpha values.
To render the texture i use the following function:
hwctx->d3d9device->Clear(0, 0, D3DCLEAR_TARGET | D3DCLEAR_ZBUFFER, 0xffeeeeee, 1.0f, 0);
hwctx->d3d9device->BeginScene();
ctx->mFX->SetTechnique(ctx->mhTech);
ctx->texRGB->GetSurfaceLevel(0, &ctx->surfRGB);
hwctx->d3d9device->StretchRect((IDirect3DSurface9*)s->vdrFrame->data[3], NULL, ctx->surfRGB, NULL, D3DTEXF_LINEAR);
ctx->mFX->SetTexture(ctx->mhTexRGB, ctx->texRGB);
ctx->mFX->SetTexture(ctx->mhTexAlpha, ctx->texAlpha);
// Enable alpha blending.
hwctx->d3d9device->SetRenderState(D3DRS_ALPHABLENDENABLE, true);
hwctx->d3d9device->SetRenderState(D3DRS_SRCBLEND, D3DBLEND_SRCALPHA);
hwctx->d3d9device->SetRenderState(D3DRS_DESTBLEND, D3DBLEND_INVSRCALPHA);
UINT numPasses = 0;
ctx->mFX->Begin(&numPasses, 0);
for (UINT i = 0; i < numPasses; ++i){
ctx->mFX->BeginPass(i);
hwctx->d3d9device->DrawPrimitive(D3DPT_TRIANGLEFAN, 0, 2);
ctx->mFX->EndPass();
}
ctx->mFX->End();
hwctx->d3d9device->EndScene();
hwctx->d3d9device->Present(0, 0, 0, 0);
// Disable alpha blending.
hwctx->d3d9device->SetRenderState(D3DRS_ALPHABLENDENABLE, false);
I combine the textures by vertex/pixel shader:
uniform extern texture gTexRGB;
uniform extern texture gTexAlpha;
sampler TexRGB = sampler_state{
Texture = <gTexRGB>;
AddressU = WRAP;
AddressV = WRAP;
};
sampler TexAlpha = sampler_state{
Texture = <gTexAlpha>;
AddressU = WRAP;
AddressV = WRAP;
};
struct OutputVS{
float4 posH : POSITION0;
float2 tex0 : TEXCOORD0;
};
OutputVS TextureBlendingVS(float2 tex0: TEXCOORD0){
// Zero out our output.
OutputVS outVS = (OutputVS)0;
// Pass on texture coordinates to be interpolated in rasterization.
outVS.tex0 = tex0;
// Done--return the output.
return outVS;
}
float4 TextureBlendingPS(float2 tex0 : TEXCOORD0) : COLOR{
float3 rgb = tex2D(TexRGB, tex0).rgb;
float alpha = tex2D(TexAlpha, tex0).a;
return float4(rgb, alpha);
}
technique DirLightTexTech{
pass P0 {
// Specify the vertex and pixel shader associated with this pass.
vertexShader = compile vs_2_0 TextureBlendingVS();
pixelShader = compile ps_2_0 TextureBlendingPS();
}
}
The size of the textures is the same but during the rendering something goes wrong.
Please help me. :)

Porting a depth peeling example to modern OpenGL

I am trying to port an example of the depth peeling, an Order Independent Transparency technique, to the so-called modern OpenGL (3.3+) but since I am a beginner, it is not that easy..
Here you can find a working version (the GL2) and the one in progress (the GL3)
https://github.com/elect86/modern-jogl-examples/tree/master/modern-jogl-examples/src/depthPeeling
I can't see any layer behind...
I guess there are some problems with the alpha value..
I tried to debug it and in the core part
private void renderDepthPeeling(GL3 gl3) {
/**
* (1) Initialize min depth buffer.
*/
gl3.glBindFramebuffer(GL3.GL_FRAMEBUFFER, colorBlenderFboId[0]);
gl3.glDrawBuffer(GL3.GL_COLOR_ATTACHMENT0);
gl3.glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
gl3.glClear(GL3.GL_COLOR_BUFFER_BIT | GL3.GL_DEPTH_BUFFER_BIT);
gl3.glEnable(GL3.GL_DEPTH_TEST);
dpInit.bind(gl3);
{
gl3.glUniform1f(dpInit.getAlphaUnLoc(), opacity);
drawModel(gl3);
}
dpInit.unbind(gl3);
/**
* (2) Depth peeling + blending.
*/
int layersNumber = (passesNumber - 1) * 2;
// System.out.println("layersNumber: " + layersNumber);
for (int layer = 1; layer < 2; layer++) {
int currentId = layer % 2;
int previousId = 1 - currentId;
// gl3.glBindFramebuffer(GL3.GL_FRAMEBUFFER, fboId[currentId]);
gl3.glBindFramebuffer(GL3.GL_FRAMEBUFFER, 0);
gl3.glDrawBuffer(GL3.GL_COLOR_ATTACHMENT0);
gl3.glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
gl3.glClear(GL3.GL_COLOR_BUFFER_BIT | GL3.GL_DEPTH_BUFFER_BIT);
gl3.glDisable(GL3.GL_BLEND);
gl3.glEnable(GL3.GL_DEPTH_TEST);
{
dpPeel.bind(gl3);
{
gl3.glActiveTexture(GL3.GL_TEXTURE0);
gl3.glBindTexture(GL3.GL_TEXTURE_RECTANGLE, depthTextureId[previousId]);
gl3.glUniform1i(dpPeel.getDepthTexUnLoc(), 0);
{
gl3.glUniform1f(dpPeel.getAlphaUnLoc(), opacity);
drawModel(gl3);
}
gl3.glBindTexture(GL3.GL_TEXTURE_RECTANGLE, 0);
}
dpPeel.unbind(gl3);
gl3.glBindFramebuffer(GL3.GL_FRAMEBUFFER, colorBlenderFboId[0]);
gl3.glDrawBuffer(GL3.GL_COLOR_ATTACHMENT0);
}
gl3.glDisable(GL3.GL_DEPTH_TEST);
gl3.glEnable(GL3.GL_BLEND);
{
gl3.glBlendEquation(GL3.GL_FUNC_ADD);
gl3.glBlendFuncSeparate(GL3.GL_DST_ALPHA, GL3.GL_ONE, GL3.GL_ZERO, GL3.GL_ONE_MINUS_SRC_ALPHA);
dpBlend.bind(gl3);
dpBlend.bindTextureRECT(gl3, "TempTex", colorTextureId[currentId], 0);
{
// gl3.glCallList(quadDisplayList);
drawFullScreenQuad(gl3);
}
dpBlend.unbind(gl3);
}
gl3.glDisable(GL3.GL_BLEND);
}
/**
* (3) Final pass.
*/
// gl3.glBindFramebuffer(GL3.GL_FRAMEBUFFER, 0);
// gl3.glDrawBuffer(GL3.GL_BACK);
// gl3.glDisable(GL3.GL_DEPTH_TEST);
//
// dpFinal.bind(gl3);
// {
// gl3.glUniform3f(dpFinal.getBackgroundColorUnLoc(), 1.0f, 1.0f, 1.0f);
//
//// dpFinal.bindTextureRECT(gl3, "ColorTex", colorBlenderTextureId[0], 0);
// gl3.glActiveTexture(GL3.GL_TEXTURE0);
// gl3.glBindTexture(GL3.GL_TEXTURE_RECTANGLE, colorBlenderTextureId[0]);
// gl3.glUniform1i(dpFinal.getColorTexUnLoc(), 0);
// {
//// gl3.glCallList(quadDisplayList);
// drawFullScreenQuad(gl3);
// }
// gl3.glBindTexture(GL3.GL_TEXTURE_RECTANGLE, 0);
// }
// dpFinal.unbind(gl3);
}
confronting between the GL2 and GL3 program version the the first and the last passage (1 and 3) looks correct, so the problem lies in the 2
I modified the for cicle in order to get only a cicle
for (int layer = 1; layer < 2; layer++) {
and
// gl2.glBindFramebuffer(GL2.GL_FRAMEBUFFER, fboId[currentId]);
gl2.glBindFramebuffer(GL2.GL_FRAMEBUFFER, 0);
In order to see visually the intermediate result
Well, in the GL2 I get
while in the GL3
My dpPeel program is based on dpPeel_VS
#version 330
layout (location = 0) in vec4 position;
layout(std140) uniform mvpMatrixes {
mat4 projectionMatrix;
mat4 cameraMatrix;
};
void main(void)
{
gl_Position = projectionMatrix * cameraMatrix * position;
}
And dpPeel_FS plus shade_FS
#version 330
uniform samplerRect DepthTex;
vec4 ShadeFragment();
out vec4 outputColor;
void main(void)
{
// Bit-exact comparison between FP32 z-buffer and fragment depth
float frontDepth = texture(DepthTex, gl_FragCoord.xy).r;
if (gl_FragCoord.z <= frontDepth) {
discard;
}
// Shade all the fragments behind the z-buffer
vec4 color = ShadeFragment();
outputColor = vec4(color.rgb * color.a, color.a);
}
#version 330
uniform float Alpha;
vec4 ShadeFragment()
{
vec4 color;
color.rgb = vec3(.4,.85,.0);
color.a = Alpha;
return color;
}
Do you see the error(s)?
Ok solved, it was a problem at the blending passage and the full screen quad
If I would like to do lighting now, where should I apply it?

Resources