I am trying to implement a texture cubic projection inside my WebGL shader, like in the picture below:
What I tried so far:
I am passing the bounding box of my object (the box in the middle of the picture) as follows:
uniform vec3 u_bbmin;
uniform vec3 u_bbmax;
... so the eight vertexes of my projection box are:
vec3 v1 = vec3(u_bbmin.x, u_bbmin.y, u_bbmin.z);
vec3 v2 = vec3(u_bbmax.x, u_bbmin.y, u_bbmin.z);
vec3 v3 = vec3(u_bbmin.x, u_bbmax.y, u_bbmin.z);
...other combinations
vec3 v8 = vec3(u_bbmax.x, u_bbmax.y, u_bbmax.z);
At the end, to sample from my texture I need a map in the form of:
varying vec3 v_modelPos;
...
uniform sampler2D s_texture;
vec2 tCoords = vec2(0.0);
tCoords.s = s(x,y,z)
tCoords.t = t(y,y,z)
vec4 color = texture2D(s_texture, tCoords);
I was able to implement spherical and cylindrical projections, but I am stuck now how to get this kind of cubic map, The texture shall stretch to the whole bounding box, aspect ratio doesn't matter.
Maybe I am missing some key points and I need some hints. How should the math for a cubic projection looks like?
I honestly don't know if this is correct or not but ...
Looking up how cube mapping works there's a table in the OpenGL ES 2.0 spec
Major Axis Direction| Target |sc |tc |ma |
--------------------+---------------------------+---+---+---+
+rx |TEXTURE_CUBE_MAP_POSITIVE_X|−rz|−ry| rx|
−rx |TEXTURE_CUBE_MAP_NEGATIVE_X| rz|−ry| rx|
+ry |TEXTURE_CUBE_MAP_POSITIVE_Y| rx| rz| ry|
−ry |TEXTURE_CUBE_MAP_NEGATIVE_Y| rx|−rz| ry|
+rz |TEXTURE_CUBE_MAP_POSITIVE_Z| rx|−ry| rz|
−rz |TEXTURE_CUBE_MAP_NEGATIVE_Z|−rx|−ry| rz|
--------------------+---------------------------+---+---+---+
Table 3.21: Selection of cube map images based on major axis direction of texture coordinates
Using that I wrote this function
#define RX 0
#define RY 1
#define RZ 2
#define S 0
#define T 1
void majorAxisDirection(vec3 normal, inout mat4 uvmat) {
vec3 absnorm = abs(normal);
if (absnorm.x > absnorm.y && absnorm.x > absnorm.z) {
// x major
if (normal.x >= 0.0) {
uvmat[RZ][S] = -1.;
uvmat[RY][T] = -1.;
} else {
uvmat[RZ][S] = 1.;
uvmat[RY][T] = -1.;
}
} else if (absnorm.y > absnorm.z) {
// y major
if (normal.y >= 0.0) {
uvmat[RX][S] = 1.;
uvmat[RZ][T] = 1.;
} else {
uvmat[RX][S] = 1.;
uvmat[RZ][T] = -1.;
}
} else {
// z major
if (normal.z >= 0.0) {
uvmat[RX][S] = 1.;
uvmat[RY][T] = -1.;
} else {
uvmat[RX][S] = -1.;
uvmat[RY][T] = -1.;
}
}
}
You pass in a matrix and it sets it up to move the correct X, Y, or Z to the X and Y columns (to convert to s and t). In other words you pass in normal and it returns s and t.
This would effectively give a unit cube projected on the positive side of the origin. Adding in another matrix we can move and scale that cube.
If you want it to fit the cube exactly then you need to set the scale, translation and orientation to match the cube.
"use strict";
/* global document, twgl, requestAnimationFrame */
const vs = `
uniform mat4 u_model;
uniform mat4 u_viewProjection;
attribute vec4 position;
attribute vec3 normal;
attribute vec2 texcoord;
varying vec2 v_texCoord;
varying vec3 v_normal;
varying vec3 v_position;
void main() {
v_texCoord = texcoord;
vec4 position = u_model * position;
gl_Position = u_viewProjection * position;
v_position = position.xyz;
v_normal = (u_model * vec4(normal, 0)).xyz;
}
`;
const fs = `
precision mediump float;
varying vec3 v_position;
varying vec2 v_texCoord;
varying vec3 v_normal;
uniform mat4 u_cubeProjection;
uniform sampler2D u_diffuse;
#define RX 0
#define RY 1
#define RZ 2
#define S 0
#define T 1
#if BOX_PROJECTION
void majorAxisDirection(vec3 normal, inout mat4 uvmat) {
vec3 absnorm = abs(normal);
if (absnorm.x > absnorm.y && absnorm.x > absnorm.z) {
// x major
if (normal.x >= 0.0) {
uvmat[RZ][S] = -1.;
uvmat[RY][T] = -1.;
} else {
uvmat[RZ][S] = 1.;
uvmat[RY][T] = -1.;
}
} else if (absnorm.y > absnorm.z) {
// y major
if (normal.y >= 0.0) {
uvmat[RX][S] = 1.;
uvmat[RZ][T] = 1.;
} else {
uvmat[RX][S] = 1.;
uvmat[RZ][T] = -1.;
}
} else {
// z major
if (normal.z >= 0.0) {
uvmat[RX][S] = 1.;
uvmat[RY][T] = -1.;
} else {
uvmat[RX][S] = -1.;
uvmat[RY][T] = -1.;
}
}
}
#else // cube projection
void majorAxisDirection(vec3 normal, inout mat4 uvmat) {
vec3 absnorm = abs(normal);
if (absnorm.x > absnorm.y && absnorm.x > absnorm.z) {
// x major
uvmat[RZ][S] = 1.;
uvmat[RY][T] = -1.;
} else if (absnorm.y > absnorm.z) {
uvmat[RX][S] = 1.;
uvmat[RZ][T] = 1.;
} else {
uvmat[RX][S] = 1.;
uvmat[RY][T] = -1.;
}
}
#endif
void main() {
vec3 normal = normalize(v_normal);
mat4 uvmat = mat4(
vec4(0, 0, 0, 0),
vec4(0, 0, 0, 0),
vec4(0, 0, 0, 0),
vec4(0, 0, 0, 1));
majorAxisDirection(normal, uvmat);
uvmat = mat4(
abs(uvmat[0]),
abs(uvmat[1]),
abs(uvmat[2]),
abs(uvmat[3]));
vec2 uv = (uvmat * u_cubeProjection * vec4(v_position, 1)).xy;
gl_FragColor = texture2D(u_diffuse, uv);
}
`;
const m4 = twgl.m4;
const gl = twgl.getWebGLContext(document.getElementById("c"));
// compile shaders, look up locations
const cubeProjProgramInfo = twgl.createProgramInfo(gl,
[vs, '#define BOX_PROJECTION 0\n' + fs]);
const boxProjProgramInfo = twgl.createProgramInfo(gl,
[vs, '#define BOX_PROJECTION 1\n' + fs]);
let progNdx = 1;
const programInfos = [
cubeProjProgramInfo,
boxProjProgramInfo,
];
// create buffers
const cubeBufferInfo = twgl.primitives.createCubeBufferInfo(gl, 2);
const sphereBufferInfo = twgl.primitives.createSphereBufferInfo(gl, 1, 60, 40);
const ctx = document.createElement("canvas").getContext("2d");
ctx.canvas.width = 256;
ctx.canvas.height = 256;
ctx.fillStyle = `hsl(${360}, 0%, 30%)`;
ctx.fillRect(0, 0, 256, 256);
for (let y = 0; y < 4; ++y) {
for (let x = 0; x < 4; x += 2) {
ctx.fillStyle = `hsl(${(x + y) / 16 * 360}, 100%, 75%)`;
ctx.fillRect((x + (y & 1)) * 64, y * 64, 64, 64);
}
}
ctx.lineWidth = 10;
ctx.strokeRect(0, 0, 256, 256);
ctx.font = "240px sans-serif";
ctx.textAlign = "center";
ctx.textBaseline = "middle";
ctx.fillStyle = 'red';
ctx.fillText("F", 128, 128);
const texture = twgl.createTexture(gl, {
src: ctx.canvas,
wrap: gl.CLAMP_TO_EDGE,
min: gl.LINEAR, // no mips
});
function addElem(parent, type) {
const elem = document.createElement(type);
parent.appendChild(elem);
return elem;
}
function makeRange(parent, obj, prop, min, max, name) {
const divElem = addElem(parent, 'div');
const inputElem = addElem(divElem, 'input');
Object.assign(inputElem, {
type: 'range',
min: 0,
max: 1000,
value: (obj[prop] - min) / (max - min) * 1000,
});
const valueElem = addElem(divElem, 'span');
valueElem.textContent = obj[prop].toFixed(2);
const labelElem = addElem(divElem, 'label');
labelElem.textContent = name;
function update() {
inputElem.value = (obj[prop] - min) / (max - min) * 1000,
valueElem.textContent = obj[prop].toFixed(2);
}
inputElem.addEventListener('input', (e) => {
obj[prop] = (e.target.value / 1000 * (max - min) + min);
update();
});
return update;
}
const models = [
cubeBufferInfo,
sphereBufferInfo,
cubeBufferInfo,
];
const rotateSpeeds = [
1,
1,
0,
];
let modelNdx = 0;
const ui = document.querySelector('#ui');
const cubeMatrix = m4.translation([0.5, 0.5, 0.5]);
const updaters = [
makeRange(ui, cubeMatrix, 0, -2, 2, 'sx'),
makeRange(ui, cubeMatrix, 5, -2, 2, 'sy'),
makeRange(ui, cubeMatrix, 10, -2, 2, 'sz'),
makeRange(ui, cubeMatrix, 12, -2, 2, 'tx'),
makeRange(ui, cubeMatrix, 13, -2, 2, 'ty'),
makeRange(ui, cubeMatrix, 14, -2, 2, 'tz'),
];
document.querySelectorAll('input[name=shape]').forEach((elem) => {
elem.addEventListener('change', (e) => {
if (e.target.checked) {
modelNdx = parseInt(e.target.value);
if (modelNdx == 2) {
m4.scaling([1/2, 1/2, 1/2], cubeMatrix);
m4.translate(cubeMatrix, [1, 1, 1], cubeMatrix);
updaters.forEach(f => f());
}
}
})
});
document.querySelectorAll('input[name=proj]').forEach((elem) => {
elem.addEventListener('change', (e) => {
if (e.target.checked) {
progNdx = parseInt(e.target.value);
}
})
});
const uniforms = {
u_diffuse: texture,
u_cubeProjection: cubeMatrix,
};
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
const programInfo = programInfos[progNdx];
const bufferInfo = models[modelNdx];
gl.enable(gl.DEPTH_TEST);
gl.enable(gl.CULL_FACE);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
const fov = 30 * Math.PI / 180;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const zNear = 0.5;
const zFar = 10;
const projection = m4.perspective(fov, aspect, zNear, zFar);
const eye = [0, 4, -4];
const target = [0, 0, 0];
const up = [0, 1, 0];
const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
const viewProjection = m4.multiply(projection, view);
const model = m4.rotationY(time * rotateSpeeds[modelNdx]);
uniforms.u_viewProjection = viewProjection;
uniforms.u_model = model;
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.setUniforms(programInfo, uniforms);
gl.drawElements(gl.TRIANGLES, bufferInfo.numElements, gl.UNSIGNED_SHORT, 0);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body {
margin: 0;
font-family: monospace;
color: white;
}
canvas {
display: block;
width: 100vw;
height: 100vh;
background: #444;
}
#ui {
position: absolute;
left: 0;
top: 0;
}
#ui span {
display: inline-block;
width: 4em;
text-align: right;
}
<canvas id="c"></canvas>
<script src="https://twgljs.org/dist/3.x/twgl-full.min.js"></script>
<div id="ui">
<div>
<input type="radio" name="proj" id="sphere" value="0">
<label for="sphere">cubic projection</label>
<input type="radio" name="proj" id="cube" value="1" checked>
<label for="cube">box projection</label>
</div>
<div>
<input type="radio" name="shape" id="sphere" value="1">
<label for="sphere">sphere</label>
<input type="radio" name="shape" id="cube" value="0" checked>
<label for="cube">cube</label>
<input type="radio" name="shape" id="cube" value="2">
<label for="cube">cube match</label>
</div>
</div>
The key-point here is: normals shall be in object-space. Please note that gman's answer is more elegant than mine, by using a matrix for the uv computation. I am using instead the bounding box coordinates, which are already passed to the vertex shader as uniform for other general purposes.
Moreover, I don't even need to distinguish all the six major axis, I just only need three sides projection, so this can be simplified down. Of course, the texture will be mirrored on the opposite faces.
float sX = u_bbmax.x - u_bbmin.x;
float sY = u_bbmax.y - u_bbmin.y;
float sZ = u_bbmax.z - u_bbmin.z;
/* --- BOX PROJECTION - THREE SIDES --- */
if( (abs(modelNormal.x) > abs(modelNormal.y)) && (abs(modelNormal.x) > abs(modelNormal.z)) ) {
uvCoords = modelPos.yz / vec2(sY, -sZ); // X axis
} else if( (abs(modelNormal.z) > abs(modelNormal.x)) && (abs(modelNormal.z) > abs(modelNormal.y)) ) {
uvCoords = modelPos.xy / vec2(sX, -sY); // Z axis
} else {
uvCoords = modelPos.xz / vec2(sX, -sZ); // Y axis
}
uvCoords += vec2(0.5);
Explanation:
The direction of the texture projection is determined by the
order of the modelPos coordinates.
Example: the texture can be rotated by 90 degrees by using
modelPos.yx instead of modelPos.xy.
The orientation of the texture projection is determined by the sign of
the modelPos coordinates.
Example: the texture can be mirrored on the Y-axis by using
vec2(sX, sY) instead of vec2(sX, -sY).
Result:
EDIT:
It is worth to link here another answer from gman which contain additional information about this topic and also some cool optimization techniques to avoid conditionals inside GLSL shaders: How to implement textureCube using 6 sampler2D.
Related
I'm rendering a variable number of circles in the plane with variable size, color, and position using instancing. I'm hoping to reach on the order of 10k-100k circles/labels.
in float instanceSize;
in vec3 instanceColor;
in vec2 instanceCenter;
The buffer backing the instanceCenter attribute changes every frame, animating the circles, but the rest is mostly static.
I have a quad per circle and I'm creating the circle in the fragment shader.
Now I'm looking into labeling the shapes with labels with font size proportional to circle size, centered on the circle, moving with the circles. From what I've read the most performant way to do so is to use a glyph texture with a quad for every letter using either a bitmap texture atlas or a signed distance field texture atlas. The examples I've seen seem to do a lot of work on the Javascript side and then use a draw call for every string like: https://webgl2fundamentals.org/webgl/lessons/webgl-text-glyphs.html
Is there a way to render the text with one draw call (with instancing, or otherwise?), while reusing the Float32Array backing instanceCenter every frame? It seems like more work would need to be done in the shaders but I'm not exactly sure how. Because each label has a variable number of glyphs I'm not sure how to associate a single instanceCenter with a single label.
All that aside, more basically I'm wondering how one centers text at a point?
Any help appreciated
Off the top of my head you could store your messages in a texture and add a message texcoord and length per instance. You can then compute the size of the rectangle needed to draw the message in the vertex shader and use that to center as well.
attribute float msgLength;
attribute vec2 msgTexCoord;
...
widthOfQuad = max(minSizeForCircle, msgLength * glphyWidth)
In the fragment shader read the message from the texture and use it look up glyphs (image based or SDF based).
varying vec2 v_msgTexCoord; // passed in from vertex shader
varying float v_msgLength; // passed in from vertex shader
varying vec2 uv; // uv that goes 0 to 1 across quad
float glyphIndex = texture2D(
messageTexture,
v_msgTexCoord + vec2(uv.x * v_msgLength / widthOfMessageTexture)).r;
// now convert glyphIndex to tex coords to look up glyph in glyph texture
glyphUV = (up to you)
textColor = texture2D(glyphTexture,
glyphUV + glyphSize * vec2(fract(uv.x * v_msgLength), uv.v) / glyphTextureSize);
Or something like that. I have no idea how slow it would be
async function main() {
const gl = document.querySelector('canvas').getContext('webgl');
twgl.addExtensionsToContext(gl);
function convertToGlyphIndex(c) {
c = c.toUpperCase();
if (c >= 'A' && c <= 'Z') {
return c.charCodeAt(0) - 0x41;
} else if (c >= '0' && c <= '9') {
return c.charCodeAt(0) - 0x30 + 26;
} else {
return 255;
}
}
const messages = [
'pinapple',
'grape',
'banana',
'strawberry',
];
const glyphImg = await loadImage("https://webglfundamentals.org/webgl/resources/8x8-font.png");
const glyphTex = twgl.createTexture(gl, {
src: glyphImg,
minMag: gl.NEAREST,
});
// being lazy about size, making them all the same.
const glyphsAcross = 8;
// too lazy to pack these in a texture in a more compact way
// so just put one message per row
const longestMsg = Math.max(...messages.map(m => m.length));
const messageData = new Uint8Array(longestMsg * messages.length * 4);
messages.forEach((message, row) => {
for (let i = 0; i < message.length; ++i) {
const c = convertToGlyphIndex(message[i]);
const offset = (row * longestMsg + i) * 4;
const u = c % glyphsAcross;
const v = c / glyphsAcross | 0;
messageData[offset + 0] = u;
messageData[offset + 1] = v;
}
});
const messageTex = twgl.createTexture(gl, {
src: messageData,
width: longestMsg,
height: messages.length,
minMag: gl.NEAREST,
});
const vs = `
attribute vec4 position; // a centered quad (-1 + 1)
attribute vec2 texcoord;
attribute float messageLength; // instanced
attribute vec4 center; // instanced
attribute vec2 messageUV; // instanced
uniform vec2 glyphDrawSize;
varying vec2 v_texcoord;
varying vec2 v_messageUV;
varying float v_messageLength;
void main() {
vec2 size = vec2(messageLength * glyphDrawSize.x, glyphDrawSize.y);
gl_Position = position * vec4(size, 1, 0) + center;
v_texcoord = texcoord;
v_messageUV = messageUV;
v_messageLength = messageLength;
}
`;
const fs = `
precision highp float;
varying vec2 v_texcoord;
varying vec2 v_messageUV;
varying float v_messageLength;
uniform sampler2D messageTex;
uniform vec2 messageTexSize;
uniform sampler2D glyphTex;
uniform vec2 glyphTexSize;
uniform vec2 glyphSize;
void main() {
vec2 msgUV = v_messageUV + vec2(v_texcoord.x * v_messageLength / messageTexSize.x, 0);
vec2 glyphOffset = texture2D(messageTex, msgUV).xy * 255.0;
vec2 glyphsAcrossDown = glyphTexSize / glyphSize;
vec2 glyphUVOffset = glyphOffset / glyphsAcrossDown;
vec2 glyphUV = fract(v_texcoord * vec2(v_messageLength, 1)) * glyphSize / glyphTexSize;
vec4 glyphColor = texture2D(glyphTex, glyphUVOffset + glyphUV);
// do some math here for a circle
// TBD
if (glyphColor.a < 0.1) discard;
gl_FragColor = glyphColor;
}
`;
const prgInfo = twgl.createProgramInfo(gl, [vs, fs]);
const bufferInfo = twgl.createBufferInfoFromArrays(gl, {
position: {
numComponents: 2,
data: [
-1, -1,
1, -1,
-1, 1,
-1, 1,
1, -1,
1, 1,
],
},
texcoord: [
0, 1,
1, 1,
0, 0,
0, 0,
1, 1,
1, 0,
],
center: {
numComponents: 2,
divisor: 1,
data: [
-0.4, 0.1,
-0.3, -0.5,
0.6, 0,
0.1, 0.5,
],
},
messageLength: {
numComponents: 1,
divisor: 1,
data: messages.map(m => m.length),
},
messageUV: {
numComponents: 2,
divisor: 1,
data: messages.map((m, i) => [0, i / messages.length]).flat(),
},
});
gl.clearColor(0, 0, 1, 1);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.useProgram(prgInfo.program);
twgl.setBuffersAndAttributes(gl, prgInfo, bufferInfo);
twgl.setUniformsAndBindTextures(prgInfo, {
glyphDrawSize: [16 / gl.canvas.width, 16 / gl.canvas.height],
messageTex,
messageTexSize: [longestMsg, messages.length],
glyphTex,
glyphTexSize: [glyphImg.width, glyphImg.height],
glyphSize: [8, 8],
});
// ext.drawArraysInstancedANGLE(gl.TRIANGLES, 0, 6, messages.length);
gl.drawArraysInstanced(gl.TRIANGLES, 0, 6, messages.length);
}
function loadImage(url) {
return new Promise((resolve, reject) => {
const img = new Image();
img.crossOrigin = "anonymous";
img.onerror = reject;
img.onload = () => resolve(img);
img.src = url;
});
}
main();
<canvas></canvas>
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
note that if the glyphs were different sizes it seems like it would get extremely slow, at least off the top of my head, the only way to find each glyph as you draw a quad would be to loop over all the glyphs in the message for every pixel.
On the other hand, you could build a mesh of glyphs similar to the article, for each message, for every glyph in that message, add a per vertex message id or message uv that you use to look up offsets or matrices from a texture. In this way you can move every message independently but make it all happen in a single draw call. This would
allow non-monospaced glyphs. As an example of storing positions or matrices in a texture see this article on skinning. It stores bone matrices in a texture.
async function main() {
const gl = document.querySelector('canvas').getContext('webgl');
const ext = gl.getExtension('OES_texture_float');
if (!ext) {
alert('need OES_texture_float');
return;
}
twgl.addExtensionsToContext(gl);
function convertToGlyphIndex(c) {
c = c.toUpperCase();
if (c >= 'A' && c <= 'Z') {
return c.charCodeAt(0) - 0x41;
} else if (c >= '0' && c <= '9') {
return c.charCodeAt(0) - 0x30 + 26;
} else {
return 255;
}
}
const messages = [
'pinapple',
'grape',
'banana',
'strawberry',
];
const glyphImg = await loadImage("https://webglfundamentals.org/webgl/resources/8x8-font.png");
const glyphTex = twgl.createTexture(gl, {
src: glyphImg,
minMag: gl.NEAREST,
});
// being lazy about size, making them all the same.
const glyphsAcross = 8;
const glyphsDown = 5;
const glyphWidth = glyphImg.width / glyphsAcross;
const glyphHeight = glyphImg.height / glyphsDown;
const glyphUWidth = glyphWidth / glyphImg.width;
const glyphVHeight = glyphHeight / glyphImg.height;
// too lazy to pack these in a texture in a more compact way
// so just put one message per row
const positions = [];
const texcoords = [];
const messageIds = [];
const matrixData = new Float32Array(messages.length * 16);
const msgMatrices = [];
const quadPositions = [
-1, -1,
1, -1,
-1, 1,
-1, 1,
1, -1,
1, 1,
];
const quadTexcoords = [
0, 1,
1, 1,
0, 0,
0, 0,
1, 1,
1, 0,
];
messages.forEach((message, id) => {
msgMatrices.push(matrixData.subarray(id * 16, (id + 1) * 16));
for (let i = 0; i < message.length; ++i) {
const c = convertToGlyphIndex(message[i]);
const u = (c % glyphsAcross) * glyphUWidth;
const v = (c / glyphsAcross | 0) * glyphVHeight;
for (let j = 0; j < 6; ++j) {
const offset = j * 2;
positions.push(
quadPositions[offset ] * 0.5 + i - message.length / 2,
quadPositions[offset + 1] * 0.5,
);
texcoords.push(
u + quadTexcoords[offset ] * glyphUWidth,
v + quadTexcoords[offset + 1] * glyphVHeight,
);
messageIds.push(id);
}
}
});
const matrixTex = twgl.createTexture(gl, {
src: matrixData,
type: gl.FLOAT,
width: 4,
height: messages.length,
minMag: gl.NEAREST,
wrap: gl.CLAMP_TO_EDGE,
});
const vs = `
attribute vec4 position;
attribute vec2 texcoord;
attribute float messageId;
uniform sampler2D matrixTex;
uniform vec2 matrixTexSize;
uniform mat4 viewProjection;
varying vec2 v_texcoord;
void main() {
vec2 uv = (vec2(0, messageId) + 0.5) / matrixTexSize;
mat4 model = mat4(
texture2D(matrixTex, uv),
texture2D(matrixTex, uv + vec2(1.0 / matrixTexSize.x, 0)),
texture2D(matrixTex, uv + vec2(2.0 / matrixTexSize.x, 0)),
texture2D(matrixTex, uv + vec2(3.0 / matrixTexSize.x, 0)));
gl_Position = viewProjection * model * position;
v_texcoord = texcoord;
}
`;
const fs = `
precision highp float;
varying vec2 v_texcoord;
uniform sampler2D glyphTex;
void main() {
vec4 glyphColor = texture2D(glyphTex, v_texcoord);
// do some math here for a circle
// TBD
if (glyphColor.a < 0.1) discard;
gl_FragColor = glyphColor;
}
`;
const prgInfo = twgl.createProgramInfo(gl, [vs, fs]);
const bufferInfo = twgl.createBufferInfoFromArrays(gl, {
position: {
numComponents: 2,
data: positions,
},
texcoord: texcoords,
messageId: {
numComponents: 1,
data: messageIds
},
});
gl.clearColor(0, 0, 1, 1);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.useProgram(prgInfo.program);
const m4 = twgl.m4;
const viewProjection = m4.ortho(0, gl.canvas.width, 0, gl.canvas.height, -1, 1);
msgMatrices.forEach((mat, i) => {
m4.translation([80 + i * 30, 30 + i * 25, 0], mat);
m4.scale(mat, [16, 16, 1], mat)
});
// update the matrices
gl.bindTexture(gl.TEXTURE_2D, matrixTex);
gl.texSubImage2D(gl.TEXTURE_2D, 0, 0, 0, 4, messages.length, gl.RGBA, gl.FLOAT, matrixData);
twgl.setBuffersAndAttributes(gl, prgInfo, bufferInfo);
twgl.setUniformsAndBindTextures(prgInfo, {
viewProjection,
matrixTex,
matrixTexSize: [4, messages.length],
glyphTex,
});
gl.drawArrays(gl.TRIANGLES, 0, positions.length / 2);
}
function loadImage(url) {
return new Promise((resolve, reject) => {
const img = new Image();
img.crossOrigin = "anonymous";
img.onerror = reject;
img.onload = () => resolve(img);
img.src = url;
});
}
main();
<canvas></canvas>
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
Also see https://stackoverflow.com/a/54720138/128511
I have been working on a voxel engine using webgl. It uses gl.points to draw voxels using a square based on your distance to the point.
Here is the basics of how it work
Vertex:
//Get position using the projection matrix and block XYZ
gl_Position = uMatrix * uModelMatrix * vec4(aPixelPosition[0],-aPixelPosition[2],aPixelPosition[1],1.0);
//Set size of point based on screen height and divide it by depth to size based on distance
gl_PointSize = (uScreenSize[1]) / gl_Position[2];
And here is how that looks when it is from a non-problematic angle.
You can see, it looks just how I want to (of course its not as good as real cubes, but preforms amazing on mobile) now lets go inside of this hollow cube and see what it looks like. This picture is me looking into the corner
I changed the background color to highlight the issue. Basically if you are looking directly at the blocks, they work fine, but if they are at an angle to you, they are too small and leave large gaps. This picture is me looking at a wall directly
You can see facing the back wall works perfect, but all the other walls look bad.
So clearly I am doing something wrong, or not thinking about something properly. I have tried a lot of different things to try and repair it but none of my fixes work proper.
I have tried making it so blocks towards the edge of the screen are bigger, this fixes the problem but it also makes blocks bigger that don't need to be. Like for example looking at a flat wall, the edges would become much bigger even though looking at a flat wall doesn't have the issue.
I have also tried making the squares much bigger and this fixes it but then they overlap everywhere and it doesn't look nearly as clean.
You can see the example of the problem here (just takes a second to generate the structure)
https://sebastian97.itch.io/voxel-glitchy
WASD- movement
Arrow keys / mouse - Look
Assuming you have your projection matrix separated out I think you want gl_PointSize to be
vec4 modelViewPosition = view * model * position;
gl_PointSize = someSize / -modelViewPosition.z;
gl_Position = projection * modelViewPosition;
'use strict';
/* global window, twgl, requestAnimationFrame, document */
const height = 120;
const width = 30
const position = [];
const color = [];
const normal = [];
for (let z = 0; z < width; ++z) {
for (let x = 0; x < width; ++x) {
position.push(x, 0, z);
color.push(r(0.5), 1, r(0.5));
normal.push(0, 1, 0);
}
}
for (let y = 1; y < height ; ++y) {
for (let x = 0; x < width; ++x) {
position.push(x, -y, 0);
color.push(0.6, 0.6, r(0.5));
normal.push(0, 0, -1);
position.push(x, -y, width - 1);
color.push(0.6, 0.6, r(0.5));
normal.push(0, 0, 1);
position.push(0, -y, x);
color.push(0.6, 0.6, r(0.5));
normal.push(-1, 0, 0);
position.push(width - 1, -y, x);
color.push(0.6, 0.6, r(0.5));
normal.push(1, 0, 0);
}
}
function r(min, max) {
if (max === undefined) {
max = min;
min = 0;
}
return Math.random() * (max - min) + min;
}
const m4 = twgl.m4;
const v3 = twgl.v3;
const gl = document.querySelector('canvas').getContext('webgl');
const vs = `
attribute vec4 position;
attribute vec3 normal;
attribute vec3 color;
uniform mat4 projection;
uniform mat4 modelView;
varying vec3 v_normal;
varying vec3 v_color;
void main() {
vec4 modelViewPosition = modelView * position;
gl_Position = projection * modelViewPosition;
gl_PointSize = 850.0 / -modelViewPosition.z;
v_normal = mat3(modelView) * normal;
v_color = color;
}
`;
const fs = `
precision highp float;
varying vec3 v_normal;
varying vec3 v_color;
void main() {
vec3 lightDirection = normalize(vec3(1, 2, 3)); // arbitrary light direction
float l = dot(lightDirection, normalize(v_normal)) * .5 + .5;
gl_FragColor = vec4(v_color * l, 1);
}
`;
// compile shader, link, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
// make some vertex data
const bufferInfo = twgl.createBufferInfoFromArrays(gl, {
position,
normal,
color: { numComponents: 3, data: color },
});
const keys = [];
const eye = [10, 10, 55];
const target = [0, 0, 0];
const up = [0, 1, 0];
const speed = 50;
const kUp = 38;
const kDown = 40;
const kLeft = 37;
const kRight = 39;
const kForward = 87;
const kBackward = 83;
const kSlideLeft = 65;
const kSlideRight = 68;
const keyMove = new Map();
keyMove.set(kForward, { ndx: 8, eye: 1, target: -1 });
keyMove.set(kBackward, { ndx: 8, eye: 1, target: 1 });
keyMove.set(kSlideLeft, { ndx: 0, eye: 1, target: -1 });
keyMove.set(kSlideRight, { ndx: 0, eye: 1, target: 1 });
keyMove.set(kLeft, { ndx: 0, eye: 0, target: -1 });
keyMove.set(kRight, { ndx: 0, eye: 0, target: 1 });
keyMove.set(kUp, { ndx: 4, eye: 0, target: -1 });
keyMove.set(kDown, { ndx: 4, eye: 0, target: 1 });
let then = 0;
function render(time) {
time *= 0.001; // seconds
const deltaTime = time - then;
then = time;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.enable(gl.DEPTH_TEST);
gl.enable(gl.CULL_FACE);
const fov = Math.PI * 0.25;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const near = 0.1;
const far = 1000;
const projection = m4.perspective(fov, aspect, near, far);
const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
const modelView = m4.translate(view, [width / -2, 0, width / -2]);
keyMove.forEach((move, key) => {
if (keys[key]) {
const dir = camera.slice(move.ndx, move.ndx + 3);
const delta = v3.mulScalar(dir, deltaTime * speed * move.target);
v3.add(target, delta, target);
if (move.eye) {
v3.add(eye, delta, eye);
}
}
});
gl.useProgram(programInfo.program);
// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
// calls gl.activeTexture, gl.bindTexture, gl.uniformXXX
twgl.setUniforms(programInfo, {
projection,
modelView,
});
// calls gl.drawArrays or gl.drawElements
twgl.drawBufferInfo(gl, bufferInfo, gl.POINTS);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
window.addEventListener('keydown', (e) => {
e.preventDefault();
keys[e.keyCode] = true;
});
window.addEventListener('keyup', (e) => {
keys[e.keyCode] = false;
});
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
#i { position: absolute; top: 0; left: 5px; font-family: monospace; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>
<div id="i">ASWD ⬆️⬇️⬅️➡️</div>
My problem is that I'm using the TWGL library to make shaders with textures, it happens that when they load the images always appears a blue box before loading. I could not find anything about that problem or in the documentation, or even in other works.
How can I remove that blue box?
"use strict";
class MathUtils {
constructor() {}
lerp(a, b, n) {
return n * (b - a) + a;
}
}
const main = () => {
const canvas = document.getElementById("canvas");
const gl = canvas.getContext("webgl");
const mathUtils = new MathUtils();
const mouse = { x: 0, y: 0 };
const lastmouse = [0, 0];
if (!gl) {
return;
}
const textures = [
"https://i.ibb.co/9WvFgbZ/fancycrave-165873-unsplash.jpg",
"https://i.ibb.co/NSfVqTq/map2.jpg",
"https://i.ibb.co/cy79kN4/blur.jpg"
];
const textLoaded = [];
for (let tex of textures) {
textLoaded.push(
twgl.createTexture(gl, {
src: tex,
crossOrigin: ""
})
);
}
let originalImage = { width: 1, height: 1 }; // replaced after loading
const text0 = twgl.createTexture(
gl,
{
src: textures[0],
crossOrigin: ""
},
(err, texture, source) => {
originalImage = source;
}
);
let uniforms = {};
let anim = { value: 1 };
// compile shaders, link program, lookup location
const programInfo = twgl.createProgramInfo(gl, ["vs", "fs"]);
// calls gl.createBuffer, gl.bindBuffer, gl.bufferData for a quad
const bufferInfo = twgl.primitives.createXYQuadBufferInfo(gl);
const render = time => {
time *= 0.001; // seconds
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.useProgram(programInfo.program);
// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
const canvasAspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const imageAspect = originalImage.width / originalImage.height;
let mat = m3.scaling(imageAspect / canvasAspect, -1);
// this assumes we want to fill vertically
let horizontalDrawAspect = imageAspect / canvasAspect;
let verticalDrawAspect = -1;
// does it fill horizontally?
if (horizontalDrawAspect < 1) {
// no it does not so scale so we fill horizontally and
// adjust vertical to match
verticalDrawAspect /= horizontalDrawAspect;
horizontalDrawAspect = 1;
}
mat = m3.scaling(horizontalDrawAspect, verticalDrawAspect);
uniforms = {
u_text: textLoaded[0],
u_map: textLoaded[1],
u_blur: textLoaded[2],
u_matrix: mat,
u_time: time * 10,
u_res: [gl.canvas.clientWidth, gl.canvas.clientHeight],
u_mouse: lastmouse
};
// calls gl.activeTexture, gl.bindTexture, gl.uniformXXX
twgl.setUniforms(programInfo, uniforms);
// calls gl.drawArrays or gl.drawElements
twgl.drawBufferInfo(gl, bufferInfo);
lastmouse[0] = mathUtils.lerp(lastmouse[0], mouse.x, 0.1);
lastmouse[1] = mathUtils.lerp(lastmouse[1], mouse.y, 0.1);
requestAnimationFrame(render);
};
requestAnimationFrame(render);
canvas.addEventListener("mousemove", ({ clientX, clientY }) => {
mouse.x = -clientX / innerWidth;
mouse.y = -clientY / innerHeight;
});
};
main();
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<canvas id="canvas"></canvas>
<script id="vs" type="f">
attribute vec2 position;
attribute vec2 texcoord;
uniform mat3 u_matrix;
varying vec2 v_texcoord;
void main() {
gl_Position = vec4(u_matrix * vec3(position, 1), 1);
v_texcoord = texcoord;
}
</script>
<script id="fs" type="f">
precision mediump float;
uniform vec2 u_mouse;
uniform vec2 u_res;
uniform float u_time;
uniform float u_dpi;
uniform sampler2D u_text;
uniform sampler2D u_map;
uniform sampler2D u_blur;
varying vec2 v_texcoord;
void main(){
float distAmount = .003;
vec2 uv = v_texcoord;
vec2 parallax = u_mouse * 0.07;
float freq = 70.0;
float amp = 0.004;
vec4 map = texture2D(u_map, uv);
float dethMap = map.r - .5;
float distMap = map.g;
float x = uv.y * freq + u_time * 3.;
float y = uv.x * freq + u_time * .3;
float distX = cos(x+y) * amp * cos(y);
float distY = sin(x-y) * amp * cos(y);
vec2 distPosition = vec2(uv.x + distX * distMap, uv.y + distY * distMap);
vec2 turbulance = distPosition + (parallax * dethMap);
vec4 ori_img = texture2D(u_text, turbulance);
vec4 blur_img = texture2D(u_blur, turbulance);
vec4 color = mix(ori_img, blur_img, length(distPosition) * distAmount);
gl_FragColor = color;
}
</script>
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<script src="https://webglfundamentals.org/webgl/resources/m3.js"></script>
I don't see any blue box
You can set the default color twgl uses to make textures immediately usable by calling
twgl.setDefaults({
textureColor: [0, 0, 0, 0], // make initial color transparent black
});
before loading textures.
You can set any individual texture's temporary initial color via the color option
const texture = twgl.createTexture(gl, {
src: 'https://somedomain.com/path/to/someimage.jpg',
color: [0, 0, 0, 0], // make initial color transparent black
});
Also note you can load all images in one call to twgl.createTextures and that twgl should handle cross origin automatically
// replaced after loading
let srcs = {
text: { width: 1, height: 1 },
};
const textures = twgl.createTextures(gl, {
text: {src: "https://i.ibb.co/9WvFgbZ/fancycrave-165873-unsplash.jpg" },
map: {src: "https://i.ibb.co/NSfVqTq/map2.jpg" },
blur: {src: "https://i.ibb.co/cy79kN4/blur.jpg" },
}, (err, textures, sources) => {
srcs = sources;
});
const imageAspect = srcs.text.width / srcs.text.height;
uniforms = {
u_text: textures.text,
u_map: textures.map,
u_blur: textures.blur,
...
"use strict";
class MathUtils {
constructor() {}
lerp(a, b, n) {
return n * (b - a) + a;
}
}
const main = () => {
const canvas = document.getElementById("canvas");
const gl = canvas.getContext("webgl");
const mathUtils = new MathUtils();
const mouse = { x: 0, y: 0 };
const lastmouse = [0, 0];
if (!gl) {
return;
}
twgl.setDefaults({
textureColor: [0, 0, 0, 0],
});
// replaced after loading
let srcs = {
text: { width: 1, height: 1 },
};
const textures = twgl.createTextures(gl, {
text: {src: "https://i.ibb.co/9WvFgbZ/fancycrave-165873-unsplash.jpg" },
map: {src: "https://i.ibb.co/NSfVqTq/map2.jpg" },
blur: {src: "https://i.ibb.co/cy79kN4/blur.jpg" },
}, (err, textures, sources) => {
srcs = sources;
});
let uniforms = {};
let anim = { value: 1 };
// compile shaders, link program, lookup location
const programInfo = twgl.createProgramInfo(gl, ["vs", "fs"]);
// calls gl.createBuffer, gl.bindBuffer, gl.bufferData for a quad
const bufferInfo = twgl.primitives.createXYQuadBufferInfo(gl);
const render = time => {
time *= 0.001; // seconds
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.useProgram(programInfo.program);
// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
const canvasAspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const imageAspect = srcs.text.width / srcs.text.height;
let mat = m3.scaling(imageAspect / canvasAspect, -1);
// this assumes we want to fill vertically
let horizontalDrawAspect = imageAspect / canvasAspect;
let verticalDrawAspect = -1;
// does it fill horizontally?
if (horizontalDrawAspect < 1) {
// no it does not so scale so we fill horizontally and
// adjust vertical to match
verticalDrawAspect /= horizontalDrawAspect;
horizontalDrawAspect = 1;
}
mat = m3.scaling(horizontalDrawAspect, verticalDrawAspect);
uniforms = {
u_text: textures.text,
u_map: textures.map,
u_blur: textures.blur,
u_matrix: mat,
u_time: time * 10,
u_res: [gl.canvas.clientWidth, gl.canvas.clientHeight],
u_mouse: lastmouse
};
// calls gl.activeTexture, gl.bindTexture, gl.uniformXXX
twgl.setUniforms(programInfo, uniforms);
// calls gl.drawArrays or gl.drawElements
twgl.drawBufferInfo(gl, bufferInfo);
lastmouse[0] = mathUtils.lerp(lastmouse[0], mouse.x, 0.1);
lastmouse[1] = mathUtils.lerp(lastmouse[1], mouse.y, 0.1);
requestAnimationFrame(render);
};
requestAnimationFrame(render);
canvas.addEventListener("mousemove", ({ clientX, clientY }) => {
mouse.x = -clientX / innerWidth;
mouse.y = -clientY / innerHeight;
});
};
main();
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<canvas id="canvas"></canvas>
<script id="vs" type="f">
attribute vec2 position;
attribute vec2 texcoord;
uniform mat3 u_matrix;
varying vec2 v_texcoord;
void main() {
gl_Position = vec4(u_matrix * vec3(position, 1), 1);
v_texcoord = texcoord;
}
</script>
<script id="fs" type="f">
precision mediump float;
uniform vec2 u_mouse;
uniform vec2 u_res;
uniform float u_time;
uniform float u_dpi;
uniform sampler2D u_text;
uniform sampler2D u_map;
uniform sampler2D u_blur;
varying vec2 v_texcoord;
void main(){
float distAmount = .003;
vec2 uv = v_texcoord;
vec2 parallax = u_mouse * 0.07;
float freq = 70.0;
float amp = 0.004;
vec4 map = texture2D(u_map, uv);
float dethMap = map.r - .5;
float distMap = map.g;
float x = uv.y * freq + u_time * 3.;
float y = uv.x * freq + u_time * .3;
float distX = cos(x+y) * amp * cos(y);
float distY = sin(x-y) * amp * cos(y);
vec2 distPosition = vec2(uv.x + distX * distMap, uv.y + distY * distMap);
vec2 turbulance = distPosition + (parallax * dethMap);
vec4 ori_img = texture2D(u_text, turbulance);
vec4 blur_img = texture2D(u_blur, turbulance);
vec4 color = mix(ori_img, blur_img, length(distPosition) * distAmount);
gl_FragColor = color;
}
</script>
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<script src="https://webglfundamentals.org/webgl/resources/m3.js"></script>
How do I transition between a 3D view and a 2D view in WebGL?
I have a 3D view of a scene and I want to show a 2D view as well, like a map view. How do I switch between the 2 types of views?
Generally to do switch from 3d to 2d you just use an orthographic projection instead of a perspective projection.
If you want to animate the transition lerping between the 2 seems to work
const ortho = someOrthoFunc(left, right, top, bottom, orthoZNear, orthZFar);
const persp = somePerspFunc(fov, aspect, perspZNear, perspZFar);
const projection = [];
for (let i = 0; i < 16; ++i) {
projection[i] = lerp(ortho[i], persp[i], mixAmount);
}
function lerp(a, b, l) {
return a + (b - a) * l;
}
Where mixAmount is 0 when you want the orthographic view (2d-ish) and mixAmount is 1 when you want the perspective view (3d) and you can animate that between 0 and 1.
Note that if you want the orthographic view match the perspective view you need to choose top, bottom, left, right values that match which fit your app. For transitioning between 2 different views (say first person on the ground vs looking straight down) you can pick whatever settings you want. But say you were looking down and just wanted to view to go from 3D to 2D with the same view. In that case you need to pick a left, right, top, bottom that matches the perspective view for a given number of units. For top and bottom that's probably how ever many units fit vertically the "ground" distance from the camera.
See this answer where distance is the distance to the ground, the formula will then give you the number of half the number of units at that distance which you can then plug into top and bottom. For left and right just multiply by the aspect of the canvas's display size
The other thing that changes is the camera. A common way to position a camera is using a lookAt function which, depending on the library might generate a view matrix or a camera matrix.
To look down
const cameraPosition = [x, groundHeight + distanceAboveGround, z];
const target = [x, groundHeight, z];
const up = [0, 0, 1];
const camera = someLookAtFunction(camearPosition, target, up);
You'd have a different set of cameraPosition, target, up for the 3d camera. You can animate the transition between them by lerping those 3 variables.
const vs = `
uniform mat4 u_worldViewProjection;
attribute vec4 a_position;
attribute vec2 a_texcoord;
varying vec4 v_position;
varying vec2 v_texcoord;
void main() {
v_texcoord = a_texcoord;
gl_Position = u_worldViewProjection * a_position;
}
`;
const fs = `
precision mediump float;
varying vec2 v_texcoord;
uniform sampler2D u_texture;
void main() {
gl_FragColor = texture2D(u_texture, v_texcoord);
}
`;
"use strict";
twgl.setDefaults({attribPrefix: "a_"});
const m4 = twgl.m4;
const v3 = twgl.v3;
const gl = document.getElementById("c").getContext("webgl");
// compiles shaders, links program, looks up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
// calls gl.createBuffer, gl.bindBuffer, gl.bufferData for positions, texcoords
const bufferInfo = twgl.primitives.createCubeBufferInfo(gl);
// calls gl.createTexture, gl.bindTexture, gl.texImage2D, gl.texParameteri
const tex = twgl.createTexture(gl, {
min: gl.NEAREST,
mag: gl.NEAREST,
src: [
255, 0, 0, 255,
0, 192, 0, 255,
0, 0, 255, 255,
255, 224, 0, 255,
],
});
const settings = {
projectionMode: 2,
cameraMode: 2,
fov: 30,
};
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.enable(gl.DEPTH_TEST);
gl.enable(gl.CULL_FACE);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
const fov = settings.fov * Math.PI / 180;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const perspZNear = 0.5;
const perspZFar = 10;
const persp = m4.perspective(fov, aspect, perspZNear, perspZFar);
// the size to make the orthographic view is arbitrary.
// here we're choosing the number of units at ground level
// away from the top perspective camera
const heightAboveGroundInTopView = 7;
const halfSizeToFitOnScreen = heightAboveGroundInTopView * Math.tan(fov / 2);
const top = -halfSizeToFitOnScreen;
const bottom = +halfSizeToFitOnScreen;
const left = top * aspect;
const right = bottom * aspect;
const orthoZNear = 0.5;
const orthoZFar = 10;
const ortho = m4.ortho(left, right, top, bottom, orthoZNear, orthoZFar);
let perspMixAmount;
let camMixAmount;
switch (settings.projectionMode) {
case 0: // 2d
perspMixAmount = 0;
break;
case 1: // 3d
perspMixAmount = 1;
break;
case 2: // animated
perspMixAmount = Math.sin(time) * .5 + .5;
break;
}
switch (settings.cameraMode) {
case 0: // top
camMixAmount = 0;
break;
case 1: // angle
camMixAmount = 1;
break;
case 2: // animated
camMixAmount = Math.sin(time) * .5 + .5;
break;
}
const projection = [];
for (let i = 0; i < 16; ++i) {
projection[i] = lerp(ortho[i], persp[i], perspMixAmount);
}
const perspEye = [1, 4, -6];
const perspTarget = [0, 0, 0];
const perspUp = [0, 1, 0];
const orthoEye = [0, heightAboveGroundInTopView, 0];
const orthoTarget = [0, 0, 0];
const orthoUp = [0, 0, 1];
const eye = v3.lerp(orthoEye, perspEye, camMixAmount);
const target = v3.lerp(orthoTarget, perspTarget, camMixAmount);
const up = v3.lerp(orthoUp, perspUp, camMixAmount);
const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
const viewProjection = m4.multiply(projection, view);
gl.useProgram(programInfo.program);
// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
const t = time * .1;
for (let z = -1; z <= 1; ++z) {
for (let x = -1; x <= 1; ++x) {
const world = m4.translation([x * 1.4, 0, z * 1.4]);
m4.rotateY(world, t + z + x, world);
// calls gl.uniformXXX
twgl.setUniforms(programInfo, {
u_texture: tex,
u_worldViewProjection: m4.multiply(viewProjection, world),
});
// calls gl.drawArrays or gl.drawElements
twgl.drawBufferInfo(gl, bufferInfo);
}
}
requestAnimationFrame(render);
}
requestAnimationFrame(render);
setupRadioButtons("proj", "projectionMode");
setupRadioButtons("cam", "cameraMode");
setupSlider("#fovSlider", "#fov", "fov");
function setupSlider(sliderId, labelId, property) {
const slider = document.querySelector(sliderId);
const label = document.querySelector(labelId);
function updateLabel() {
label.textContent = settings[property];
}
slider.addEventListener('input', e => {
settings[property] = parseInt(slider.value);
updateLabel();
});
updateLabel();
slider.value = settings[property];
}
function setupRadioButtons(name, property) {
document.querySelectorAll(`input[name=${name}]`).forEach(elem => {
elem.addEventListener('change', e => {
if (e.target.checked) {
settings[property] = parseInt(e.target.value);
}
});
});
}
function lerp(a, b, l) {
return a + (b - a) * l;
}
body { margin: 0; }
canvas { display: block; width: 100vw; height: 100vh; }
#ui {
position: absolute;
left: 10px;
top: 10px;
z-index: 2;
background: rgba(255, 255, 255, 0.9);
padding: .5em;
}
<script src="https://twgljs.org/dist/3.x/twgl-full.min.js"></script>
<canvas id="c"></canvas>
<div id="ui">
<div>projection:</div>
<div><input type="radio" name="proj" value="0" /><label for="2d">orthographic</label></div>
<div><input type="radio" name="proj" value="1" /><label for="3d">perspective</label></div>
<div><input type="radio" name="proj" value="2" checked/><label for="animated">animated</label></div>
<div> </div>
<div>camera:</div>
<div><input type="radio" name="cam" value="0" /><label for="top">top</label></div>
<div><input type="radio" name="cam" value="1" /><label for="angle">angle</label></div>
<div><input type="radio" name="cam" value="2" checked/><label for="animated">animated</label></div>
<div> </div>
<div>field of view[<span id="fov"></span>]</div>
<div><input id="fovSlider" type="range" min="10" max="90" value="60"/></div>
</div>
I am trying to implement conway's game of life in 3D. Basically, I am experimenting it with an extra dimension.
I am instantiating a list of cubes at the start of the game and give each one of them an index that's going to be associated with a logic object where I call twgl.drawObjectList if it's alive, else I will skip it within a function that I am using requestAnimationFrame on.
The problem is that the FPS drops below 1 when I make a 50*50*50 (125000 cubes) game. Is this normal? Am I doing the correct approach?
Edit:
function newGame (xDimV, yDimV, zDimV, gameSelected = false) {
// No game to load
if (!gameSelected) {
xDim = xDimV;
yDim = yDimV;
zDim = zDimV;
} else {
xDim = gameSelected[0][0].length;
yDim = gameSelected[0].length;
zDim = gameSelected.length;
}
myGame = Object.create(game);
myGame.consutructor(xDim , yDim , zDim, gameSelected);
objects = [];
for (var z = 0; z < zDim; z++) {
for (var y = 0; y < yDim; y++){
for (var x = 0; x < xDim; x++){
var uniforms = {
u_colorMult: chroma.hsv(emod(baseHue + rand(0, 120), 360), rand(0.5,
1), rand(0.5, 1)).gl(),
u_world: m4.identity(),
u_worldInverseTranspose: m4.identity(),
u_worldViewProjection: m4.identity(),
};
var drawObjects = [];
drawObjects.push({
programInfo: programInfo,
bufferInfo: cubeBufferInfo,
uniforms: uniforms,
});
objects.push({
translation: [(x*scale)-xDim*scale/2, (z*scale), (y*scale)-yDim*scale/2],
scale: scale,
uniforms: uniforms,
bufferInfo: cubeBufferInfo,
programInfo: programInfo,
drawObject: drawObjects,
index: [z, y, x],
});
}
}
}
requestAnimationFrame(render);
}
var then = 0;
function render(time) {
time *= 0.001;
var elapsed = time - then;
then = time;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.enable(gl.DEPTH_TEST);
gl.enable(gl.CULL_FACE);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
gl.clearColor(255, 255, 0, 0.1);
var fovy = 30 * Math.PI / 180;
var projection = m4.perspective(fovy, gl.canvas.clientWidth / gl.canvas.clientHeight, 0.5, 10000);
var eye = [cameraX, cameraY, cameraZ];
var target = [cameraX, cameraY, 10];
var up = [0, 1, 0];
var camera = m4.lookAt(eye, target, up);
var view = m4.inverse(camera);
var viewProjection = m4.multiply(projection, view);
viewProjection = m4.rotateX(viewProjection, phi);
viewProjection = m4.rotateY(viewProjection, theta);
targetTimer -= elapsed;
objects.forEach(function(obj) {
var uni = obj.uniforms;
var world = uni.u_world;
m4.identity(world);
m4.translate(world, obj.translation, world);
m4.scale(world, [obj.scale, obj.scale, obj.scale], world);
m4.transpose(m4.inverse(world, uni.u_worldInverseTranspose), uni.u_worldInverseTranspose);
m4.multiply(viewProjection, uni.u_world, uni.u_worldViewProjection);
if (myGame.life[obj.index[0]][obj.index[1]][obj.index[2]] === 1) {
twgl.drawObjectList(gl, obj.drawObject);
}
});
if (targetTimer <= 0 && !paused) {
targetTimer = targetChangeInterval / speed;
myGame.nextGen();
setGameStatus();
myGame.resetStatus();
}
requestAnimationFrame(render);
}
Thanks in advance.
125k cubes is quite a lot. Typical AAA games generally make 1000 to 5000 draw calls total. There are breakdowns on the web of various game engines and how many draw calls they take the generate a frame.
Here's a talk with several methods. It includes putting all the cubes in one giant mesh and moving them around in JavaScript so they're is effectively one draw call.
If it was me I'd do that and I'd make a texture with one pixel per cube. So for 125k cubes that texture would be like 356x356 though I'd probably choose something more fitting the cube size like 500x300 (since each face slice is 50x50). For each vertex of each cube I'd have an attribute with a UV pointing to a specific pixel in that texture. In other words for the first vertices of the first cube there would be an attribute that UVs that repeats 36 times, in a new UVs for the 2nd cube that repeats 36 times,
attribute vec2 cubeUV;
Then I can use the cubeUV to lookup a pixel in the texture whether or not the cube should be on or off
attribute vec2 cubeUV;
uniform sampler2D lifeTexture;
void main() {
float cubeOn = texture2D(lifeTexture, cubeUV).r;
}
I could clip out the cube pretty easily with
if (cubeOn < 0.5) {
gl_Position = vec4(2, 2, 2, 1); // outside clip space
return;
}
// otherwise do the calcs for a cube
In this case the cubes don't need to move so all JavaScript has to do each frame is compute life in some Uint8Array and then call
gl.bindTexture(gl.TEXTURE_2D, lifeTexture);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.LUMINANCE, width, height, 0,
gl.LUMINANCE, gl.UNSIGNED_BYTE, lifeStatusUint8Array);
every frame and make one draw call.
Note: You can effectively see examples of this type of shader here except that that shdaer is not looking at a texture with life running in it, instead it's looking at a texture with 4 seconds of audio data in it. It's also generating the cubeId from vertexId and generating the cube vertices and normals from vertexId. That would make it slower than putting that data in attributes but it is an example of positioning or drawing cubes based on data coming from a texture.
const vs = `
attribute vec4 position;
attribute vec3 normal;
attribute vec2 cubeUV;
uniform mat4 u_matrix;
uniform sampler2D u_lifeTex;
varying vec3 v_normal;
void main() {
float on = texture2D(u_lifeTex, cubeUV).r;
if (on < .5) {
gl_Position = vec4(20, 20, 20, 1);
return;
}
gl_Position = u_matrix * position;
v_normal = normal;
}
`;
const fs = `
precision mediump float;
varying vec3 v_normal;
void main() {
gl_FragColor = vec4(v_normal * .5 + .5, 1);
}
`;
const oneFace = [
[ -1, -1, ],
[ 1, -1, ],
[ -1, 1, ],
[ -1, 1, ],
[ 1, -1, ],
[ 1, 1, ],
];
const m4 = twgl.m4;
const gl = document.querySelector("canvas").getContext("webgl");
// compiles shaders, links program, looks up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const cubeSize = 50;
const texBuf = makeCubeTexBuffer(gl, cubeSize);
const tex = twgl.createTexture(gl, {
src: texBuf.buffer,
width: texBuf.width,
format: gl.LUMINANCE,
wrap: gl.CLAMP_TO_EDGE,
minMag: gl.NEAREST,
});
const arrays = makeCubes(cubeSize, texBuf);
// calls gl.createBuffer, gl.bindBuffer, gl.bufferData for each array
const bufferInfo = twgl.createBufferInfoFromArrays(gl, arrays);
function render(time) {
time *= 0.001; // seconds
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.enable(gl.DEPTH_TEST);
//gl.enable(gl.CULL_FACE);
const fov = Math.PI * .25;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const zNear = .01;
const zFar = 1000;
const projection = m4.perspective(fov, aspect, zNear, zFar);
const radius = cubeSize * 2.5;
const speed = time * .1;
const position = [
Math.sin(speed) * radius,
Math.sin(speed * .7) * radius * .7,
Math.cos(speed) * radius,
];
const target = [0, 0, 0];
const up = [0, 1, 0];
const camera = m4.lookAt(position, target, up);
const view = m4.inverse(camera);
const mat = m4.multiply(projection, view);
// do life
// (well, randomly turn on/off cubes)
for (let i = 0; i < 100; ++i) {
texBuf.buffer[Math.random() * texBuf.buffer.length | 0] = Math.random() > .5 ? 255 : 0;
}
gl.bindTexture(gl.TEXTURE_2D, tex);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.LUMINANCE, texBuf.width, texBuf.height,
0, gl.LUMINANCE, gl.UNSIGNED_BYTE, texBuf.buffer);
gl.useProgram(programInfo.program)
// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.setUniforms(programInfo, {
u_matrix: mat,
u_lifeTex: tex,
});
// calls gl.drawArrays or gl.drawElements
twgl.drawBufferInfo(gl, bufferInfo);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
// generate cubes
function makeCube(vertOffset, off, uv, arrays) {
const positions = arrays.position;
const normals = arrays.normal;
const cubeUV = arrays.cubeUV;
for (let f = 0; f < 6; ++f) {
const axis = f / 2 | 0;
const sign = f % 2 ? -1 : 1;
const major = (axis + 1) % 3;
const minor = (axis + 2) % 3;
for (let i = 0; i < 6; ++i) {
const offset2 = vertOffset * 2;
const offset3 = vertOffset * 3;
positions[offset3 + axis ] = off[axis] + sign;
positions[offset3 + major] = off[major] + oneFace[i][0];
positions[offset3 + minor] = off[minor] + oneFace[i][1];
normals[offset3 + axis ] = sign;
normals[offset3 + major] = 0;
normals[offset3 + minor] = 0;
cubeUV[offset2 + 0] = uv[0];
cubeUV[offset2 + 1] = uv[1];
++vertOffset;
}
}
return vertOffset;
}
function makeCubes(size, texBuf) {
const numCubes = size * size * size;
const numVertsPerCube = 36;
const numVerts = numCubes * numVertsPerCube;
const slicesAcross = texBuf.width / size | 0;
const arrays = {
position: new Float32Array(numVerts * 3),
normal: new Float32Array(numVerts * 3),
cubeUV: new Float32Array(numVerts * 2),
};
let spacing = size * 1.2;
let vertOffset = 0;
for (let z = 0; z < size; ++z) {
const zoff = (z / (size - 1) * 2 - 1) * spacing;
for (let y = 0; y < size; ++y) {
const yoff = (y / (size - 1) * 2 - 1) * spacing;
for (let x = 0; x < size; ++x) {
const xoff = (x / (size - 1) * 2 - 1) * spacing;
const sx = z % slicesAcross;
const sy = z / slicesAcross | 0;
const uv = [
(sx * size + x + 0.5) / texBuf.width,
(sy * size + y + 0.5) / texBuf.height,
];
vertOffset = makeCube(vertOffset, [xoff, yoff, zoff], uv, arrays);
}
}
}
arrays.cubeUV = {
numComponents: 2,
data: arrays.cubeUV,
};
return arrays;
}
function makeCubeTexBuffer(gl, cubeSize) {
const numCubes = cubeSize * cubeSize * cubeSize;
const maxTextureSize = Math.min(gl.getParameter(gl.MAX_TEXTURE_SIZE), 2048);
const maxSlicesAcross = maxTextureSize / cubeSize | 0;
const slicesAcross = Math.min(cubeSize, maxSlicesAcross);
const slicesDown = Math.ceil(cubeSize / slicesAcross);
const width = slicesAcross * cubeSize;
const height = slicesDown * cubeSize;
const buffer = new Uint8Array(width * height);
return {
buffer: buffer,
slicesAcross: slicesAcross,
slicesDown: slicesDown,
width: width,
height: height,
};
}
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/3.x/twgl-full.min.js"></script>
<canvas></canvas>
hoiested from the comments below, using a big merged mesh appears to be 1.3x faster than using instanced drawing. Here's 3 samples
big mesh using texture uvs (same as above)
instanced using texture uvs (less data, same shader)
instanced no texture (no texture, life data is in buffer/attribute)
For me, on my machine #1 can do 60x60x60 cubes (216000) at 60fps whereas both #2 and #3 only get 56x56x56 cubes (175616) at 60fps. Of course other GPUs/system/browsers might be different.
The fps drop is coming from two things most likely:
The overhead of doing 125k matrix operations each tick.
The overhead of doing 125k drawcalls.
You can look into instancing
http://blog.tojicode.com/2013/07/webgl-instancing-with.html?m=1
And possibly move the matrix stuff into a shader