Adding night lights to a WebGL / Three.js earth - webgl

I am using Three.js as a framework for developing a space simulator and I am trying, but failing to get night lights working.
The simulator can be accessed here:
orbitingeden.com
and a page running the code snippet below can be found here:
orbitingeden.com/orrery/soloearth.html
The code for the sample page is here. I don't even know where to begin. I tried rendering two globes a few units apart, one closer to the sun (daytime version) and one further(nighttime version) but there are many problems, not the least of which is that they begin to overlap each other in strange dodecahedron kind of ways. I adopted the tDiffuse2 idea from this orrery, but couldn't get it working.
<!doctype html>
<html lang="en">
<head>
<title>three.js webgl - earth</title>
<meta charset="utf-8">
<script src="three.js/Detector.js"></script>
<script src="three.js/Three.js"></script>
</head>
<body>
<script>
if ( ! Detector.webgl ) Detector.addGetWebGLMessage();
var radius = 6371;
var tilt = 0.41;
var rotationSpeed = 0.02;
var cloudsScale = 1.005;
var SCREEN_HEIGHT = window.innerHeight;
var SCREEN_WIDTH = window.innerWidth;
var container, camera, scene, renderer;
var meshPlanet, meshClouds, dirLight, ambientLight;
var clock = new THREE.Clock();
init();
animate();
function init() {
container = document.createElement( 'div' );
document.body.appendChild( container );
scene = new THREE.Scene();
scene.fog = new THREE.FogExp2( 0x000000, 0.00000025 );
camera = new THREE.PerspectiveCamera( 25, SCREEN_WIDTH / SCREEN_HEIGHT, 50, 1e7 );
camera.position.z = radius * 5;
scene.add( camera );
dirLight = new THREE.DirectionalLight( 0xffffff );
dirLight.position.set( -20, 0, 2 ).normalize();
scene.add( dirLight );
ambientLight = new THREE.AmbientLight( 0x000000 );
scene.add( ambientLight );
//initialize the earth
var planetTexture = THREE.ImageUtils.loadTexture( "textures/earth-day.jpg" ),
nightTexture = THREE.ImageUtils.loadTexture( "textures/earthNight.gif" ),
cloudsTexture = THREE.ImageUtils.loadTexture( "textures/clouds.gif" ),
normalTexture = THREE.ImageUtils.loadTexture( "textures/earth-map.jpg" ),
specularTexture = THREE.ImageUtils.loadTexture( "textures/earth-specular.jpg" );
var shader = THREE.ShaderUtils.lib[ "normal" ];
var uniforms = THREE.UniformsUtils.clone( shader.uniforms );
uniforms[ "tNormal" ].texture = normalTexture;
uniforms[ "uNormalScale" ].value = 0.85;
uniforms[ "tDiffuse" ].texture = planetTexture;
uniforms[ "tDiffuse2" ].texture = nightTexture;
uniforms[ "tSpecular" ].texture = specularTexture;
uniforms[ "enableAO" ].value = false;
uniforms[ "enableDiffuse" ].value = true;
uniforms[ "enableSpecular" ].value = true;
uniforms[ "uDiffuseColor" ].value.setHex( 0xffffff );
uniforms[ "uSpecularColor" ].value.setHex( 0x333333 );
uniforms[ "uAmbientColor" ].value.setHex( 0x000000 );
uniforms[ "uShininess" ].value = 15;
var parameters = {
fragmentShader: shader.fragmentShader,
vertexShader: shader.vertexShader,
uniforms: uniforms,
lights: true,
fog: true
};
var materialNormalMap = new THREE.ShaderMaterial( parameters );
geometry = new THREE.SphereGeometry( radius, 100, 50 );
geometry.computeTangents();
meshPlanet = new THREE.Mesh( geometry, materialNormalMap );
meshPlanet.rotation.y = 0;
meshPlanet.rotation.z = tilt;
scene.add( meshPlanet );
// clouds
var materialClouds = new THREE.MeshLambertMaterial( { color: 0xffffff, map: cloudsTexture, transparent: true } );
meshClouds = new THREE.Mesh( geometry, materialClouds );
meshClouds.scale.set( cloudsScale, cloudsScale, cloudsScale );
meshClouds.rotation.z = tilt;
scene.add( meshClouds );
renderer = new THREE.WebGLRenderer( { clearColor: 0x000000, clearAlpha: 1 } );
renderer.setSize( SCREEN_WIDTH, SCREEN_HEIGHT );
renderer.sortObjects = false;
renderer.autoClear = false;
container.appendChild( renderer.domElement );
};
function animate() {
requestAnimationFrame( animate );
render();
};
function render() {
// rotate the planet and clouds
var delta = clock.getDelta();
meshPlanet.rotation.y += rotationSpeed * delta;
meshClouds.rotation.y += 1.25 * rotationSpeed * delta;
//render the scene
renderer.clear();
renderer.render( scene, camera );
};
</script>
</body>
</html>

If I understand your question....
I don't know three.js but in general I'd do this by having a shader that has gets passed both the day and night time textures and then selecting one or the other in the shader. For example
uniform sampler2D dayTexture;
uniform sampler2D nightTexture;
varying vec3 v_surfaceToLight; // assumes this gets passed in from vertex shader
varying vec4 v_normal; // assumes this gets passed in from vertex shader
varying vec2 v_texCoord; // assumes this gets passed in from vertex shader
void main () {
vec3 normal = normalize(v_normal);
vec3 surfaceToLight = normalize(v_surfaceToLight);
float angle = dot(normal, surfaceToLight);
vec4 dayColor = texture2D(dayTexture, v_texCoords);
vec4 nightColor = texture2D(nightTexture, v_texCoord);
vec4 color = angle < 0.0 ? dayColor : nightColor;
...
gl_FragColor = color * ...;
}
Basically you take the lighting calculation and instead of using it for lighting you use it to select the texture. A lighting calculation usually uses a dot product between the normal of the surface and the direction of the light (the sun) from the surface. That gives you the cosine of the angle between those to vectors. Cosine goes from -1 to 1 so if the value is from -1 to 0 it's facing away from the sun, if it's 0 to +1 it's facing toward the sun.
The line
vec4 color = angle < 0.0 ? dayColor : nightColor;
selects the day or night. That's going to be a harsh cutoff. You might experiment with something more fuzzy like
// convert from -1 <-> +1 to 0 <-> +1
float lerp0To1 = angle * 0.5 + 0.5;
// mix between night and day
vec4 color = mix(nightColor, dayColor, lerp0to1);
That would give you 100% day on the spot directly facing the sun and 100% night on the spot directly opposite the sun and a mix in-between. Probably not what you want but you can futs with the numbers. For example
// sharpen the mix
angle = clamp(angle * 10.0, -1.0, 1.0);
// convert from -1 <-> +1 to 0 <-> +1
float lerp0To1 = angle * 0.5 + 0.5;
// mix between night and day
vec4 color = mix(nightColor, dayColor, lerp0to1);
Hopefully that made sense.
So I spent a little time working up a Three.js example, partly to learn Three.js. The sample is here.
const vs = `
varying vec2 vUv;
varying vec3 vNormal;
void main() {
vUv = uv;
vec4 mvPosition = modelViewMatrix * vec4(position, 1.0);
vNormal = normalMatrix * normal;
gl_Position = projectionMatrix * mvPosition;
}
`;
const fs = `
uniform sampler2D dayTexture;
uniform sampler2D nightTexture;
uniform vec3 sunDirection;
varying vec2 vUv;
varying vec3 vNormal;
void main( void ) {
vec3 dayColor = texture2D( dayTexture, vUv ).rgb;
vec3 nightColor = texture2D( nightTexture, vUv ).rgb;
// compute cosine sun to normal so -1 is away from sun and +1 is toward sun.
float cosineAngleSunToNormal = dot(normalize(vNormal), sunDirection);
// sharpen the edge beween the transition
cosineAngleSunToNormal = clamp( cosineAngleSunToNormal * 10.0, -1.0, 1.0);
// convert to 0 to 1 for mixing
float mixAmount = cosineAngleSunToNormal * 0.5 + 0.5;
// Select day or night texture based on mix.
vec3 color = mix( nightColor, dayColor, mixAmount );
gl_FragColor = vec4( color, 1.0 );
}
`;
const scene = new THREE.Scene();
const camera = new THREE.PerspectiveCamera(40, 1, 1, 3000);
camera.position.z = 4;
scene.add( camera );
const directionalLight = new THREE.DirectionalLight( 0xaaff33, 0 );
directionalLight.position.set(-1, 1, 0.5).normalize();
scene.add( directionalLight );
const textureLoader = new THREE.TextureLoader();
const uniforms = {
sunDirection: {value: new THREE.Vector3(0,1,0) },
dayTexture: { value: textureLoader.load( "https://i.imgur.com/dfLCd19.jpg" ) },
nightTexture: { value: textureLoader.load( "https://i.imgur.com/MeKgLts.jpg" ) }
};
const material = new THREE.ShaderMaterial({
uniforms: uniforms,
vertexShader: vs,
fragmentShader: fs,
});
const mesh = new THREE.Mesh( new THREE.SphereGeometry( 0.75, 32, 16 ), material );
scene.add( mesh );
renderer = new THREE.WebGLRenderer();
document.body.appendChild(renderer.domElement);
resize(true);
requestAnimationFrame(render);
function resize(force) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
if (force || canvas.width !== width || canvas.height !== height) {
renderer.setSize(width, height, false);
camera.aspect = width / height;
camera.updateProjectionMatrix();
}
}
function render(time) {
time *= 0.001; // seconds
resize();
uniforms.sunDirection.value.x = Math.sin(time);
uniforms.sunDirection.value.y = Math.cos(time);
// Note: Since the earth is at 0,0,0 you can set the normal for the sun
// with
//
// uniforms.sunDirection.value.copy(sunPosition);
// uniforms.sunDirection.value.normalize();
mesh.rotation.y = time * .3
mesh.rotation.x = time * .7;
renderer.render(scene, camera);
requestAnimationFrame(render);
}
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/87/three.min.js"></script>
The shader I used is this
uniform sampler2D dayTexture;
uniform sampler2D nightTexture;
uniform vec3 sunDirection;
varying vec2 vUv;
varying vec3 vNormal;
void main( void ) {
vec3 dayColor = texture2D( dayTexture, vUv ).rgb;
vec3 nightColor = texture2D( nightTexture, vUv ).rgb;
// compute cosine sun to normal so -1 is away from sun and +1 is toward sun.
float cosineAngleSunToNormal = dot(normalize(vNormal), sunDirection);
// sharpen the edge beween the transition
cosineAngleSunToNormal = clamp( cosineAngleSunToNormal * 10.0, -1.0, 1.0);
// convert to 0 to 1 for mixing
float mixAmount = cosineAngleSunToNormal * 0.5 + 0.5;
// Select day or night texture based on mixAmount.
vec3 color = mix( nightColor, dayColor, mixAmount );
gl_FragColor = vec4( color, 1.0 );
// comment in the next line to see the mixAmount
//gl_FragColor = vec4( mixAmount, mixAmount, mixAmount, 1.0 );
}
The big difference from the one above is that since the sun is generally considered a directional light since it is so far away then all you need is it's direction. In other words, which way it's pointing relative to the earth.

Thank you for sharing - very useful. Although I am now sure why shadow does not face away from sun when camera rotates (it stays static in relation to camera) . This is the code I am using to set the sunDirection uniform:
this.uniforms.sunDirection.value.copy(this.sunPosition);
this.uniforms.sunDirection.value.normalize();
Not sure why...

Related

Webgl highmap by displacement mapping according to brightness of texture

I am new to webgl and opengl es ,below vertex shader show error that only produce a plan.The fragment shader is a typical one, it is not provided.
uniform mat4 modelview;
uniform mat4 transform;
uniform mat3 normalMatrix;
uniform mat4 texMatrix;
uniform sampler2D texture;
attribute vec4 vertex;
attribute vec4 color;
attribute vec3 normal;
attribute vec2 texCoord;
varying vec4 vertColor;
varying vec4 vertTexCoord;
const float zero_float = 0.0;
const float one_float = 1.0;
const vec3 zero_vec3 = vec3(0);
varying highp float height;
uniform float brightness;
void main() {
//height =texture2D(texture,vec2(vertex.xz));
//height =texture2D(texture,vec2(vertex.xz)).r;
//gl_Position = transform * vertex;
gl_Position = transform *vec4(vertex.x,vertex.y,brightness,1.0);
vec3 ecVertex = vec3(modelview * vertex);
vec3 ecNormal = normalize(normalMatrix * normal);
vertTexCoord = texMatrix * vec4(texCoord, 1.0, 1.0);
}
The above vertices shader fail showing highmap by using displacement mapping of brightness of texture image, and only displace a plane with texture
Please help how the vertices can shift from the surface of a sphere(original shape) to a higher position according to the brightness of the pixels of the textures.(show hills like on the surface of the sphere, the height of the hills are proportional to the brightness of pixels of the texture)
You can't just move the position
imaging you have a 2x2 quad plane
A--B--C
| /| /|
|/ |/ |
D--E--F
| /| /|
|/ |/ |
G--H--I
Point E has a single normal facing perpendicular the plane but if you move Point E itself perpenticular to the plane suddenly it needs a different normal for each triangle that uses it, 6 triangles in the diagram above. And of course the normals of the other vertices need to change as well.
You'll need to compute new normals in the fragment shader either by using standard derivatives.
function main() {
const gl = document.querySelector('canvas').getContext('webgl');
const ext = gl.getExtension('OES_standard_derivatives');
if (!ext) {
return alert('need OES_standard_derivatives');
}
const m4 = twgl.m4;
const vs = `
attribute vec4 position;
attribute vec2 texcoord;
uniform sampler2D displacementMap;
uniform mat4 projection;
uniform mat4 view;
uniform mat4 model;
varying vec3 v_worldPosition;
void main() {
float displacementScale = 10.0;
float displacement = texture2D(displacementMap, texcoord).r * displacementScale;
vec4 displacedPosition = position + vec4(0, displacement, 0, 0);
gl_Position = projection * view * model * displacedPosition;
v_worldPosition = (model * displacedPosition).xyz;
}
`;
const fs = `
#extension GL_OES_standard_derivatives : enable
precision highp float;
varying vec3 v_worldPosition;
void main() {
vec3 dx = dFdx(v_worldPosition);
vec3 dy = dFdy(v_worldPosition);
vec3 normal = normalize(cross(dy, dx));
// just hard code lightDir and color
// to make it easy
vec3 lightDir = normalize(vec3(1, -2, 3));
float light = dot(lightDir, normal);
vec3 color = vec3(0.3, 1, 0.1);
gl_FragColor = vec4(color * (light * 0.5 + 0.5), 1);
}
`;
// compile shader, link, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
// make some vertex data
// calls gl.createBuffer, gl.bindBuffer, gl.bufferData for each array
const bufferInfo = twgl.primitives.createPlaneBufferInfo(
gl,
96, // width
64, // height
96, // quads across
64, // quads down
);
const tex = twgl.createTexture(gl, {
src: 'https://threejsfundamentals.org/threejs/resources/images/heightmap-96x64.png',
minMag: gl.NEAREST,
wrap: gl.CLAMP_TO_EDGE,
});
function render(time) {
time *= 0.001; // seconds
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.enable(gl.DEPTH_TEST);
gl.enable(gl.CULL_FACE);
const fov = 60 * Math.PI / 180;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const near = 0.1;
const far = 200;
const projection = m4.perspective(fov, aspect, near, far);
const eye = [Math.cos(time) * 30, 10, Math.sin(time) * 30];
const target = [0, 0, 0];
const up = [0, 1, 0];
const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
const model = m4.identity();
gl.useProgram(programInfo.program);
// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
// calls gl.activeTexture, gl.bindTexture, gl.uniformXXX
twgl.setUniformsAndBindTextures(programInfo, {
projection,
view,
model,
displacementMap: tex,
});
// calls gl.drawArrays or gl.drawElements
twgl.drawBufferInfo(gl, bufferInfo);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas id="canvas"></canvas>
or by looking at multiple points on the displacement map or
function main() {
const gl = document.querySelector('canvas').getContext('webgl');
const m4 = twgl.m4;
const vs = `
attribute vec4 position;
attribute vec2 texcoord;
uniform sampler2D displacementMap;
uniform mat4 projection;
uniform mat4 view;
uniform mat4 model;
varying vec2 v_texcoord;
void main() {
float displacementScale = 10.0;
float displacement = texture2D(displacementMap, texcoord).r * displacementScale;
vec4 displacedPosition = position + vec4(0, displacement, 0, 0);
gl_Position = projection * view * model * displacedPosition;
v_texcoord = texcoord;
}
`;
const fs = `
precision highp float;
varying vec2 v_texcoord;
uniform sampler2D displacementMap;
void main() {
// should make this a uniform so it's shared
float displacementScale = 10.0;
// I'm sure there is a better way to compute
// what this offset should be
float offset = 0.01;
vec2 uv0 = v_texcoord;
vec2 uv1 = v_texcoord + vec2(offset, 0);
vec2 uv2 = v_texcoord + vec2(0, offset);
float h0 = texture2D(displacementMap, uv0).r;
float h1 = texture2D(displacementMap, uv1).r;
float h2 = texture2D(displacementMap, uv2).r;
vec3 p0 = vec3(uv0, h0 * displacementScale);
vec3 p1 = vec3(uv1, h1 * displacementScale);
vec3 p2 = vec3(uv2, h2 * displacementScale);
vec3 v0 = p1 - p0;
vec3 v1 = p2 - p0;
vec3 normal = normalize(cross(v1, v0));
// just hard code lightDir and color
// to make it easy
vec3 lightDir = normalize(vec3(1, -3, 2));
float light = dot(lightDir, normal);
vec3 color = vec3(0.3, 1, 0.1);
gl_FragColor = vec4(color * (light * 0.5 + 0.5), 1);
}
`;
// compile shader, link, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
// make some vertex data
// calls gl.createBuffer, gl.bindBuffer, gl.bufferData for each array
const bufferInfo = twgl.primitives.createPlaneBufferInfo(
gl,
96, // width
64, // height
96, // quads across
64, // quads down
);
const tex = twgl.createTexture(gl, {
src: 'https://threejsfundamentals.org/threejs/resources/images/heightmap-96x64.png',
minMag: gl.LINEAR,
wrap: gl.CLAMP_TO_EDGE,
});
function render(time) {
time *= 0.001; // seconds
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.enable(gl.DEPTH_TEST);
gl.enable(gl.CULL_FACE);
const fov = 60 * Math.PI / 180;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const near = 0.1;
const far = 200;
const projection = m4.perspective(fov, aspect, near, far);
const eye = [Math.cos(time) * 30, 10, Math.sin(time) * 30];
const target = [0, 0, 0];
const up = [0, 1, 0];
const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
const model = m4.identity();
gl.useProgram(programInfo.program);
// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
// calls gl.activeTexture, gl.bindTexture, gl.uniformXXX
twgl.setUniformsAndBindTextures(programInfo, {
projection,
view,
model,
displacementMap: tex,
});
// calls gl.drawArrays or gl.drawElements
twgl.drawBufferInfo(gl, bufferInfo);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas id="canvas"></canvas>
Note that rather than compute a normal from 3 samples of the texture you could probably precompute them at init time by going over the height map and generating a normal map. You could supply that as 3 channels of the same texture. Like say RGB = normal and A = height
async function main() {
const gl = document.querySelector('canvas').getContext('webgl');
const m4 = twgl.m4;
const v3 = twgl.v3;
const vs = `
attribute vec4 position;
attribute vec2 texcoord;
uniform sampler2D displacementMap;
uniform mat4 projection;
uniform mat4 view;
uniform mat4 model;
varying vec2 v_texcoord;
void main() {
float displacementScale = 10.0;
float displacement = texture2D(displacementMap, texcoord).a * displacementScale;
vec4 displacedPosition = position + vec4(0, displacement, 0, 0);
gl_Position = projection * view * model * displacedPosition;
v_texcoord = texcoord;
}
`;
const fs = `
precision highp float;
varying vec2 v_texcoord;
uniform sampler2D displacementMap;
void main() {
// should make this a uniform so it's shared
float displacementScale = 10.0;
vec3 data = texture2D(displacementMap, v_texcoord).rgb;
vec3 normal = data * 2. - 1.;
// just hard code lightDir and color
// to make it easy
vec3 lightDir = normalize(vec3(1, -3, 2));
float light = dot(lightDir, normal);
vec3 color = vec3(0.3, 1, 0.1);
gl_FragColor = vec4(color * (light * 0.5 + 0.5), 1);
}
`;
// compile shader, link, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
// make some vertex data
// calls gl.createBuffer, gl.bindBuffer, gl.bufferData for each array
const bufferInfo = twgl.primitives.createPlaneBufferInfo(
gl,
96, // width
64, // height
96, // quads across
64, // quads down
);
const img = await loadImage('https://threejsfundamentals.org/threejs/resources/images/heightmap-96x64.png');
// get image data
const ctx = document.createElement('canvas').getContext('2d');
ctx.canvas.width = img.width;
ctx.canvas.height = img.height;
ctx.drawImage(img, 0, 0);
const imgData = ctx.getImageData(0, 0, img.width, img.height);
// generate normals from height data
const displacementScale = 10;
const data = new Uint8Array(imgData.data.length);
for (let z = 0; z < imgData.height; ++z) {
for (let x = 0; x < imgData.width; ++x) {
const off = (z * img.width + x) * 4;
const h0 = imgData.data[off];
const h1 = imgData.data[off + 4] || 0; // being lazy at edge
const h2 = imgData.data[off + imgData.width * 4] || 0; // being lazy at edge
const p0 = [x , h0 * displacementScale / 255, z ];
const p1 = [x + 1, h1 * displacementScale / 255, z ];
const p2 = [x , h2 * displacementScale / 255, z + 1];
const v0 = v3.normalize(v3.subtract(p1, p0));
const v1 = v3.normalize(v3.subtract(p2, p0));
const normal = v3.normalize(v3.cross(v0, v1));
data[off + 0] = (normal[0] * 0.5 + 0.5) * 255;
data[off + 1] = (normal[1] * 0.5 + 0.5) * 255;
data[off + 2] = (normal[2] * 0.5 + 0.5) * 255;
data[off + 3] = h0;
}
}
const tex = twgl.createTexture(gl, {
src: data,
width: imgData.width,
minMag: gl.LINEAR,
wrap: gl.CLAMP_TO_EDGE,
});
function render(time) {
time *= 0.001; // seconds
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.enable(gl.DEPTH_TEST);
gl.enable(gl.CULL_FACE);
const fov = 60 * Math.PI / 180;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const near = 0.1;
const far = 200;
const projection = m4.perspective(fov, aspect, near, far);
const eye = [Math.cos(time) * 30, 10, Math.sin(time) * 30];
const target = [0, 0, 0];
const up = [0, 1, 0];
const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
const model = m4.identity();
gl.useProgram(programInfo.program);
// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
// calls gl.activeTexture, gl.bindTexture, gl.uniformXXX
twgl.setUniformsAndBindTextures(programInfo, {
projection,
view,
model,
displacementMap: tex,
});
// calls gl.drawArrays or gl.drawElements
twgl.drawBufferInfo(gl, bufferInfo);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
function loadImage(url) {
return new Promise((resolve, reject) => {
const img = new Image();
img.onload = _ => resolve(img);
img.onerror = reject;
img.crossOrigin = 'anonymous';
img.src = url;
});
}
main();
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas id="canvas"></canvas>

WebGL rendering outside of browser paint time

We are building a WebGL application that has some high render-load objects. Is there a way we can render those object outside of browser-paint time, i.e. in the background? We don't want our FPS going down, and breaking up our rendering process is possible (to split between frames).
Three ideas come to mind.
You can render to a texture via a framebuffer over many frames, when you're done you render that texture to the canvas.
const gl = document.querySelector('canvas').getContext('webgl');
const vs = `
attribute vec4 position;
attribute vec2 texcoord;
varying vec2 v_texcoord;
void main() {
gl_Position = position;
v_texcoord = texcoord;
}
`;
const fs = `
precision highp float;
uniform sampler2D tex;
varying vec2 v_texcoord;
void main() {
gl_FragColor = texture2D(tex, v_texcoord);
}
`;
// compile shader, link program, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
// gl.createBuffer, gl.bindBuffer, gl.bufferData
const bufferInfo = twgl.createBufferInfoFromArrays(gl, {
position: {
numComponents: 2,
data: [
-1, -1,
1, -1,
-1, 1,
-1, 1,
1, -1,
1, 1,
],
},
texcoord: {
numComponents: 2,
data: [
0, 0,
1, 0,
0, 1,
0, 1,
1, 0,
1, 1,
],
},
});
// create a framebuffer with a texture and depth buffer
// same size as canvas
// gl.createTexture, gl.texImage2D, gl.createFramebuffer
// gl.framebufferTexture2D
const framebufferInfo = twgl.createFramebufferInfo(gl);
const infoElem = document.querySelector('#info');
const numDrawSteps = 16;
let drawStep = 0;
let time = 0;
// draw over several frames. Return true when ready
function draw() {
// draw to texture
// gl.bindFrambuffer, gl.viewport
twgl.bindFramebufferInfo(gl, framebufferInfo);
if (drawStep == 0) {
// on the first step clear and record time
gl.disable(gl.SCISSOR_TEST);
gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
time = performance.now() * 0.001;
}
// this represents drawing something.
gl.enable(gl.SCISSOR_TEST);
const halfWidth = framebufferInfo.width / 2;
const halfHeight = framebufferInfo.height / 2;
const a = time * 0.1 + drawStep
const x = Math.cos(a ) * halfWidth + halfWidth;
const y = Math.sin(a * 1.3) * halfHeight + halfHeight;
gl.scissor(x, y, 16, 16);
gl.clearColor(
drawStep / 16,
drawStep / 6 % 1,
drawStep / 3 % 1,
1);
gl.clear(gl.COLOR_BUFFER_BIT);
drawStep = (drawStep + 1) % numDrawSteps;
return drawStep === 0;
}
let frameCount = 0;
function render() {
++frameCount;
infoElem.textContent = frameCount;
if (draw()) {
// draw to canvas
// gl.bindFramebuffer, gl.viewport
twgl.bindFramebufferInfo(gl, null);
gl.disable(gl.DEPTH_TEST);
gl.disable(gl.BLEND);
gl.disable(gl.SCISSOR_TEST);
gl.useProgram(programInfo.program);
// gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
// gl.uniform...
twgl.setUniformsAndBindTextures(programInfo, {
tex: framebufferInfo.attachments[0],
});
// draw the quad
gl.drawArrays(gl.TRIANGLES, 0, 6);
}
requestAnimationFrame(render);
}
requestAnimationFrame(render);
<canvas></canvas>
<div id="info"></div>
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
You can make 2 canvases. A webgl canvas that is not in the DOM. You render to it over many frames and when you're done you draw it to a 2D canvas with ctx.drawImage(webglCanvas, ...) This is basically the same as #1 except you're letting the browser "render that texture to a canvas" part
const ctx = document.querySelector('canvas').getContext('2d');
const gl = document.createElement('canvas').getContext('webgl');
const vs = `
attribute vec4 position;
attribute vec2 texcoord;
varying vec2 v_texcoord;
void main() {
gl_Position = position;
v_texcoord = texcoord;
}
`;
const fs = `
precision highp float;
uniform sampler2D tex;
varying vec2 v_texcoord;
void main() {
gl_FragColor = texture2D(tex, v_texcoord);
}
`;
// compile shader, link program, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const infoElem = document.querySelector('#info');
const numDrawSteps = 16;
let drawStep = 0;
let time = 0;
// draw over several frames. Return true when ready
function draw() {
if (drawStep == 0) {
// on the first step clear and record time
gl.disable(gl.SCISSOR_TEST);
gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
time = performance.now() * 0.001;
}
// this represents drawing something.
gl.enable(gl.SCISSOR_TEST);
const halfWidth = gl.canvas.width / 2;
const halfHeight = gl.canvas.height / 2;
const a = time * 0.1 + drawStep
const x = Math.cos(a ) * halfWidth + halfWidth;
const y = Math.sin(a * 1.3) * halfHeight + halfHeight;
gl.scissor(x, y, 16, 16);
gl.clearColor(
drawStep / 16,
drawStep / 6 % 1,
drawStep / 3 % 1,
1);
gl.clear(gl.COLOR_BUFFER_BIT);
drawStep = (drawStep + 1) % numDrawSteps;
return drawStep === 0;
}
let frameCount = 0;
function render() {
++frameCount;
infoElem.textContent = frameCount;
if (draw()) {
// draw to canvas
ctx.clearRect(0, 0, ctx.canvas.width, ctx.canvas.height);
ctx.drawImage(gl.canvas, 0, 0);
}
requestAnimationFrame(render);
}
requestAnimationFrame(render);
<canvas></canvas>
<div id="info"></div>
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
You can use OffscreenCanvas and render in a worker. This has only shipped in Chrome though.
Note that if you DOS the GPU (give the GPU too much work) you can still affect the responsiveness of the main thread because most GPUs do not support pre-emptive multitasking. So, if you have a lot of really heavy work then split it up into smaller tasks.
As an example if you took one of the heaviest shaders from shadertoy.com that runs at say 0.5 fps when rendered at 1920x1080, even offscreen it will force the entire machine to run at 0.5 fps. To fix you'd need to render smaller portions over several frames. If it's running at 0.5 fps that suggests you need to split it up into at least 120 smaller parts, maybe more, to keep the main thread responsive and at 120 smaller parts you'd only see the results every 2 seconds.
In fact trying it out shows some issues. Here's Iq's Happy Jumping Example drawn over 960 frames. It still can't keep 60fps on my late 2018 Macbook Air even though it's rendering only 2160 pixels a frame (2 columns of a 1920x1080 canvas). The issue is likely some parts of the scene have to recurse deeply and there is no way knowing before hand which parts of the scene that will be. One reason why shadertoy style shaders using signed distance fields are more of a toy (hence shaderTOY) and not actually a production style technique.
Anyway, the point of that is if you give the GPU too much work you'll still get an unresponsive machine.

What am I doing wrong getting the angles from voxels?

Apologies in advanced if I don't explain anything clearly, please feel free to ask for clarification. This hobby game project means a lot to me
I am making a voxel rendering engine using webgl. It uses gl.points to draw squares for each voxel. I simply use a projection matrix translated by the cameras position, and then rotated by the cameras rotations.
gl_Position =
uMatrix * uModelMatrix * vec4(aPixelPosition[0],-aPixelPosition[2],aPixelPosition[1],1.0);
The modelviewmatrix is simply just the default mat4.create(), for some reason it would not display anything without one. aPixelPosition is simply the X,Z,Y (in webgl space) of a voxel.
Using something like this:
gl_PointSize = (uScreenSize[1]*0.7) / (gl_Position[2]);
You can set the size of the voxels based on their distance from the camera. Which works pretty well minus one visual error.
(Picture from inside a large hollow cube)
You can see the back wall displays fine (because they all are pointed directly at you) but the walls that are displayed at an angle to you, need to be increased in size.
So I used the dot product between your facing position, and the position of the voxel minus your camera position to get the angle of each block and colored them accordingly.
vPosition=acos(dot( normalize(vec2(sin(uYRotate),-cos(uYRotate))) ,
normalize(vec2(aPixelPosition[0],aPixelPosition[1])-
vec2(uCam[0],uCam[1]))));
then color the blocks with what this returns.
(walls go from black to white depending on their angle to you)
This visual demonstration shows the problem, the walls on the back face all point at an angle to you except for the ones you are directly looking at, the walls on the side of the same face get more and more angled to you.
If I adjust the pointSize to increase with the angle using this, it will fix the visual glitch, but it introduces a new one.
Everything looks good from here, but if you get really close to a wall of blocks and move left and right
There is a fairly noticeable bubbling effect as you scan left and right, because the ones on the side of your view are slightly more at an angle (even though they should face the same way anyways)
So clearly, my math isn't the best. How could I have it so only the walls on the side return an angle? And the ones on the back wall all don't return any angle. Thanks a ton.
I have tried making it so the dot product always checks the voxels X as if it is the same as the cameras, but this just made it so each voxel was colored the same.
I'm not sure you can actually do what you're trying to do which is represent voxel (cubes) and 2D squares (gl.POINTS).
I'm not sure I can demo the issue. Maybe I should write a program to draw this so you can move the camera around but ...
Consider these 6 cubes
Just putting a square at their projected centers won't work
It seems to me there are no squares that will represent those cubes in a generic way that have no gaps and no other issues.
To make sure there are no gaps, every pixel the cube would cover needs to be covered by the square. So, first we can draw the rectangle that covers each cube
Then because gl.POINTS are square we need to expand each area to a square
given the amount of overlap there are going to be all kinds of issues. At extreme angles the size a particular square needs to be to cover the screen space of the cube it represents will get really large. Then, when Z is the same for a bunch of cubes you'll get z-fighting issues. For example the blue square will appear in front of the green square where they overlap making a little notch in the green.
We can see that here
Each green pixel is partially overlapped by the brown pixel that is one column to the right and one voxel down because that POINT is in front and large enough to cover the screen space the brown voxel takes it ends up covering the green pixel to the left and up one.
Here's a shader that follows the algorithm above. For each point in 3D space it assumes a unit cube. It computes the normalized device coordinates (NDC) of each of the 8 points of the cube and uses those to get the min and max NDC coordinates. From that it can compute the gl_PointSize need to cover that large of an area. It then places the point in the center of that area.
'use strict';
/* global window, twgl, requestAnimationFrame, document */
const height = 120;
const width = 30
const position = [];
const color = [];
const normal = [];
for (let z = 0; z < width; ++z) {
for (let x = 0; x < width; ++x) {
position.push(x, 0, z);
color.push(r(0.5), 1, r(0.5));
normal.push(0, 1, 0);
}
}
for (let y = 1; y < height ; ++y) {
for (let x = 0; x < width; ++x) {
position.push(x, -y, 0);
color.push(0.6, 0.6, r(0.5));
normal.push(0, 0, -1);
position.push(x, -y, width - 1);
color.push(0.6, 0.6, r(0.5));
normal.push(0, 0, 1);
position.push(0, -y, x);
color.push(0.6, 0.6, r(0.5));
normal.push(-1, 0, 0);
position.push(width - 1, -y, x);
color.push(0.6, 0.6, r(0.5));
normal.push(1, 0, 0);
}
}
function r(min, max) {
if (max === undefined) {
max = min;
min = 0;
}
return Math.random() * (max - min) + min;
}
const m4 = twgl.m4;
const v3 = twgl.v3;
const gl = document.querySelector('canvas').getContext('webgl');
const vs = `
attribute vec4 position;
attribute vec3 normal;
attribute vec3 color;
uniform mat4 projection;
uniform mat4 modelView;
uniform vec2 resolution;
varying vec3 v_normal;
varying vec3 v_color;
vec2 computeNDC(vec4 p, vec4 off) {
vec4 clipspace = projection * modelView * (p + off);
return clipspace.xy / clipspace.w;
}
void main() {
vec2 p0 = computeNDC(position, vec4(-.5, -.5, -.5, 0));
vec2 p1 = computeNDC(position, vec4( .5, -.5, -.5, 0));
vec2 p2 = computeNDC(position, vec4(-.5, .5, -.5, 0));
vec2 p3 = computeNDC(position, vec4( .5, .5, -.5, 0));
vec2 p4 = computeNDC(position, vec4(-.5, -.5, .5, 0));
vec2 p5 = computeNDC(position, vec4( .5, -.5, .5, 0));
vec2 p6 = computeNDC(position, vec4(-.5, .5, .5, 0));
vec2 p7 = computeNDC(position, vec4( .5, .5, .5, 0));
vec2 minNDC =
min(p0, min(p1, min(p2, min(p3, min(p4, min(p5, min(p6, p7)))))));
vec2 maxNDC =
max(p0, max(p1, max(p2, max(p3, max(p4, max(p5, max(p6, p7)))))));
vec2 minScreen = (minNDC * 0.5 + 0.5) * resolution;
vec2 maxScreen = (maxNDC * 0.5 + 0.5) * resolution;
vec2 rangeScreen = ceil(maxScreen) - floor(minScreen);
float sizeScreen = max(rangeScreen.x, rangeScreen.y);
// sizeSize is now how large the point has to be to touch the
// corners
gl_PointSize = sizeScreen;
vec4 pos = projection * modelView * position;
// clip ourselves
if (pos.x < -pos.w || pos.x > pos.w) {
gl_Position = vec4(0,0,-10,1);
return;
}
// pos is the wrong place to put the point. The correct
// place to put the point is the center of the extents
// of the screen space points
gl_Position = vec4(
(minNDC + (maxNDC - minNDC) * 0.5) * pos.w,
pos.z,
pos.w);
v_normal = mat3(modelView) * normal;
v_color = color;
}
`;
const fs = `
precision highp float;
varying vec3 v_normal;
varying vec3 v_color;
void main() {
vec3 lightDirection = normalize(vec3(1, 2, 3)); // arbitrary light direction
float l = dot(lightDirection, normalize(v_normal)) * .5 + .5;
gl_FragColor = vec4(v_color * l, 1);
gl_FragColor.rgb *= gl_FragColor.a;
}
`;
// compile shader, link, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
// make some vertex data
const bufferInfo = twgl.createBufferInfoFromArrays(gl, {
position,
normal,
color: { numComponents: 3, data: color },
});
let camera;
const eye = [10, 10, 55];
const target = [0, 0, 0];
const up = [0, 1, 0];
const speed = 0.5;
const kUp = 38;
const kDown = 40;
const kLeft = 37;
const kRight = 39;
const kForward = 87;
const kBackward = 83;
const kSlideLeft = 65;
const kSlideRight = 68;
const keyMove = new Map();
keyMove.set(kForward, { ndx: 8, eye: 1, target: -1 });
keyMove.set(kBackward, { ndx: 8, eye: 1, target: 1 });
keyMove.set(kSlideLeft, { ndx: 0, eye: 1, target: -1 });
keyMove.set(kSlideRight, { ndx: 0, eye: 1, target: 1 });
keyMove.set(kLeft, { ndx: 0, eye: 0, target: -1 });
keyMove.set(kRight, { ndx: 0, eye: 0, target: 1 });
keyMove.set(kUp, { ndx: 4, eye: 0, target: -1 });
keyMove.set(kDown, { ndx: 4, eye: 0, target: 1 });
function render() {
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.enable(gl.DEPTH_TEST);
gl.enable(gl.CULL_FACE);
const fov = Math.PI * 0.25;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const near = 0.1;
const far = 1000;
const projection = m4.perspective(fov, aspect, near, far);
camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
const modelView = m4.translate(view, [width / -2, 0, width / -2]);
gl.useProgram(programInfo.program);
// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
// calls gl.activeTexture, gl.bindTexture, gl.uniformXXX
twgl.setUniforms(programInfo, {
projection,
modelView,
resolution: [gl.canvas.width, gl.canvas.height],
});
// calls gl.drawArrays or gl.drawElements
twgl.drawBufferInfo(gl, bufferInfo, gl.POINTS);
}
render();
window.addEventListener('keydown', (e) => {
e.preventDefault();
const move = keyMove.get(e.keyCode);
if (move) {
const dir = camera.slice(move.ndx, move.ndx + 3);
const delta = v3.mulScalar(dir, speed * move.target);
v3.add(target, delta, target);
if (move.eye) {
v3.add(eye, delta, eye);
}
render();
}
});
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
#i { position: absolute; top: 0; left: 5px; font-family: monospace; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>
<div id="i">ASWD ⬆️⬇️⬅️➡️</div>
Even on top of that you're going to have other issues using POINTS
the max point size only has to be 1.
The spec says implementation can choose a max size point they support and that at has to be at least 1. In other words, some implementations might only support point sizes of 1. Checking WebGLStats it appears it appears in reality you might be ok but still...
some implementations clip POINTS in correctly and it's unlikely to be fixed
See https://stackoverflow.com/a/56066386/128511

webGL contour color plot on 3D model

I am working on software which is visualising engineering data on a surface of 3D model as color maps. For this I am using WebGL. At the moment I was able to display colors on surface of 3D model.
But now I need to improve visualisation to make sharp transitions between colors (without color interpolation on a surface of triangles).
I am not sure how to do it efficiently.
smooth contours plot
sharp contours plot
It's not clear what you're trying to do. You have not provided enough information to understand how your colors are chosen/computed in the first place.
I can only guess of a couple of solutions that might fit your description
Post process with a posterization type of technique
You could do a simple
gl_FragColor.rgb = floor(gl_FragColor.rgb * numLevels) / numLevels;
Or you could do it in some color space like
// convert to HSV
vec3 hsv = rgb2hsv(gl_FragColor.rgb);
// quantize hue only
hsv.x = floor(hsv.x * numLevels) / numLevels;
// concert back to RGB
gl_FragColor.rgb = hsv2rgb(hsv);
Or you could also do this in your 3D shader, it doesn't have to be post process.
You can find rgb2hsv and hsv2rgb here but of course you could use some other color space.
Example:
const gl = document.querySelector('canvas').getContext('webgl');
const m4 = twgl.m4;
const v3 = twgl.v3;
// used to generate colors
const ctx = document.createElement('canvas').getContext('2d');
ctx.canvas.width = 1;
ctx.canvas.height = 1;
const vs = `
attribute vec4 position;
attribute vec3 normal;
// note: there is no reason this has to come from an attrbute (per vertex)
// it could just as easily come from a texture used in the fragment shader
// for more resolution
attribute vec4 color;
uniform mat4 projection;
uniform mat4 modelView;
varying vec3 v_normal;
varying vec4 v_color;
void main () {
gl_Position = projection * modelView * position;
v_normal = mat3(modelView) * normal;
v_color = color;
}
`;
const fs = `
precision mediump float;
varying vec3 v_normal;
varying vec4 v_color;
uniform float numLevels;
uniform vec3 lightDirection;
vec3 rgb2hsv(vec3 c) {
vec4 K = vec4(0.0, -1.0 / 3.0, 2.0 / 3.0, -1.0);
vec4 p = mix(vec4(c.bg, K.wz), vec4(c.gb, K.xy), step(c.b, c.g));
vec4 q = mix(vec4(p.xyw, c.r), vec4(c.r, p.yzx), step(p.x, c.r));
float d = q.x - min(q.w, q.y);
float e = 1.0e-10;
return vec3(abs(q.z + (q.w - q.y) / (6.0 * d + e)), d / (q.x + e), q.x);
}
vec3 hsv2rgb(vec3 c) {
c = vec3(c.x, clamp(c.yz, 0.0, 1.0));
vec4 K = vec4(1.0, 2.0 / 3.0, 1.0 / 3.0, 3.0);
vec3 p = abs(fract(c.xxx + K.xyz) * 6.0 - K.www);
return c.z * mix(K.xxx, clamp(p - K.xxx, 0.0, 1.0), c.y);
}
void main() {
vec3 hsv = rgb2hsv(v_color.rgb);
hsv.x = floor(hsv.x * numLevels) / numLevels;
vec3 rgb = hsv2rgb(hsv);
// fake light
float light = dot(normalize(v_normal), lightDirection) * .5 + .5;
gl_FragColor = vec4(rgb * light, v_color.a);
// uncomment next line to see without hue quantization
// gl_FragColor = v_color;
}
`;
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const radius = 5;
const thickness = 2;
const radialDivisions = 32;
const bodyDivisions = 12;
// creates positions, normals, etc...
const arrays = twgl.primitives.createTorusVertices(
radius, thickness, radialDivisions, bodyDivisions);
// add colors for each vertex
const numVerts = arrays.position.length / 3;
const colors = new Uint8Array(numVerts * 4);
for (let i = 0; i < numVerts; ++i) {
const pos = arrays.position.subarray(i * 3, i * 3 + 3);
const dist = v3.distance([3, 1, 3 + Math.sin(pos[0])], pos);
colors.set(hsla(clamp(dist / 10, 0, 1), 1, .5, 1), i * 4);
}
arrays.color = {
numComponents: 4,
data: colors,
};
// calls gl.createBuffer, gl.bindBuffer, gl.bufferData for each
// array in arrays
const bufferInfo = twgl.createBufferInfoFromArrays(gl, arrays);
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.enable(gl.DEPTH_TEST);
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const halfHeight = 8;
const halfWidth = halfHeight * aspect;
const projection = m4.ortho(
-halfWidth, halfWidth,
-halfHeight, halfHeight,
-2, 2);
const modelView = m4.identity();
m4.rotateX(modelView, Math.PI * .5, modelView);
gl.useProgram(programInfo.program);
// calls gl.bindbuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
// for each attribute
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
// calls gl.activeTexture, gl.bindTexture, gl.uniformXXX
twgl.setUniforms(programInfo, {
projection,
modelView,
numLevels: 8,
lightDirection: v3.normalize([1, 2, 3]),
});
// calls gl.drawArrays or gl.drawElements
twgl.drawBufferInfo(gl, bufferInfo);
function hsla(h, s, l, a) {
ctx.fillStyle = `hsla(${h * 360 | 0},${s * 100 | 0}%,${l * 100 | 0}%,${a})`;
ctx.fillRect(0, 0, 1, 1);
return ctx.getImageData(0, 0, 1, 1).data;
}
function clamp(v, min, max) {
return Math.min(max, Math.max(min, v));
}
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>
Render in 1 channel, use a lookup table
In this case you'd make an Nx1 texture with your N colors. Then in your shader you'd just compute a gray scale (it's not clear how you're coloring things now) and use that to look up a color from your texture
uniform sampler2D lookupTable; // Nx1 texture set to nearest filtering
float gray = whateverYourDoingNow();
vec4 color = texture2D(lookupTable, vec2((gray, 0.5);
// apply lighting to color
...
Example:
const gl = document.querySelector('canvas').getContext('webgl');
const m4 = twgl.m4;
const v3 = twgl.v3;
const vs = `
attribute vec4 position;
attribute vec3 normal;
// note: there is no reason this has to come from an attrbute (per vertex)
// it could just as easily come from a texture used in the fragment shader
// for more resolution
attribute float hotness; // the data value 0 to 1
uniform mat4 projection;
uniform mat4 modelView;
varying vec3 v_normal;
varying float v_hotness;
void main () {
gl_Position = projection * modelView * position;
v_normal = mat3(modelView) * normal;
v_hotness = hotness;
}
`;
const fs = `
precision mediump float;
varying vec3 v_normal;
varying float v_hotness;
uniform float numColors;
uniform sampler2D lookupTable;
uniform vec3 lightDirection;
void main() {
vec4 color = texture2D(lookupTable, vec2(v_hotness, 0.5));
// fake light
float light = dot(normalize(v_normal), lightDirection) * .5 + .5;
gl_FragColor = vec4(color.rgb * light, color.a);
}
`;
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const radius = 5;
const thickness = 2;
const radialDivisions = 32;
const bodyDivisions = 12;
// creates positions, normals, etc...
const arrays = twgl.primitives.createTorusVertices(
radius, thickness, radialDivisions, bodyDivisions);
// add a hotness value, 0 <-> 1, for each vertex
const numVerts = arrays.position.length / 3;
const hotness = [];
for (let i = 0; i < numVerts; ++i) {
const pos = arrays.position.subarray(i * 3, i * 3 + 3);
const dist = v3.distance([3, 1, 3 + Math.sin(pos[0])], pos);
hotness[i] = clamp(dist / 10, 0, 1);
}
arrays.hotness = {
numComponents: 1,
data: hotness,
};
// calls gl.createBuffer, gl.bindBuffer, gl.bufferData for each
// array in arrays
const bufferInfo = twgl.createBufferInfoFromArrays(gl, arrays);
const colors = [
255, 0, 0, 255, // red
255, 150, 30, 255, // orange
255, 255, 0, 255, // yellow
0, 210, 0, 255, // green
0, 255, 255, 255, // cyan
0, 0, 255, 255, // blue
160, 30, 255, 255, // purple
255, 0, 255, 255, // magenta
];
// calls gl.createTexture, gl.texImage2D, gl.texParameteri
const lookupTableTexture = twgl.createTexture(gl, {
src: colors,
width: colors.length / 4,
wrap: gl.CLAMP_TO_EDGE,
minMag: gl.NEAREST, // comment this line out to see non hard edges
});
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.enable(gl.DEPTH_TEST);
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const halfHeight = 8;
const halfWidth = halfHeight * aspect;
const projection = m4.ortho(
-halfWidth, halfWidth,
-halfHeight, halfHeight,
-2, 2);
const modelView = m4.identity();
m4.rotateX(modelView, Math.PI * .5, modelView);
gl.useProgram(programInfo.program);
// calls gl.bindbuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
// for each attribute
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
// calls gl.activeTexture, gl.bindTexture, gl.uniformXXX
twgl.setUniforms(programInfo, {
projection,
modelView,
lookupTable: lookupTableTexture,
lightDirection: v3.normalize([1, 2, 3]),
});
// calls gl.drawArrays or gl.drawElements
twgl.drawBufferInfo(gl, bufferInfo);
function clamp(v, min, max) {
return Math.min(max, Math.max(min, v));
}
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>
One way of doing this would be add the flat interpolation modifier to your color attribute, as described in this tutorial. This will prevent color values from beeing interpolated, so each triangle will end up with only one color (the one specified in the first of the three vertices).
Sadly I couldn't find anything about its webgl support, but you might as well try it out to see if it works.
If it doesn't work or you don't want the inividual triangles to be visible, you could also load the color data to a texture and retrieve the color of each pixel in the fragment shader. There would still be some interpolation depending on the texture size though (similar to how an image becomes blurry when scaled up).

WebGL: Zooming to and stopping at object in a scene in WebGL

We've created a WebGl application which displays a scene containing multiple objects. The entire scene can be rotated in multiple directions. The application requires the user to be able to zoom up to but NOT thru the object. I know this functionality can be implemented using webgl frameworks such as Three.js and SceneJs. Unfortunately, our application is not leveraging a framework. Is there a way to implement the zoom functionality described here using webgl only? Note: I don't believe object picking will work for us since the user is not required to select any object in the scene. Thanks for your help.
Off the top of my head.
First off you need to know the size of each object in world space. For example if one object is 10 units big and another is 100 units big you probably want to be a different distance from the 100 unit object as the 10 unit object. By world space I also mean if you're scaling the 10 unit object by 9 then in world space it would be 90 units big and again you'd want to get a different distance away then if it was 10 units
You generally compute the size of an object in local space by computing the extents of its vertices. Just go through all the vertices and keep track of the min and max values in x, y, and z. Whether you want to take the biggest value from the object's origin or compute an actual center point is up to you.
So, given the size we can compute how far away you need to be to see the entire object. For the standard perspective matrix you can just work backward. If you know your object is 10 units big then you need to fit 10 units in your frustum. You'd probably actually pick something like 14 units (say size * 1.4) so there's some space around the object.
We know halfFovy, halfSizeToFitOnScreen, we need to compute distance
sohcahtoa
tangent = opposite / adjacent
opposite = halfsizeToFitOnScreen
adjacent = distance
tangent = Math.tan(halfFovY)
Therefore
tangent = sizeToFitOnScreen / distance
tangent * distance = sizeToFitOnScreen
distance = sizeToFitOnScreen / tangent
distance = sizeToFitOnScreen / Math.tan(halfFovY)
So now we know the camera needs to be distance away from the object. There's an entire sphere that's distance away from the object. Where you pick on that sphere is up to you. Assuming you go from where the camera currently is you can compute the direction from the object to the camera
direction = normalize(cameraPos - objectPos)
Now you can compute a point distance away in that direction.
desiredCameraPosition = direction * distance
Now either put the camera there using some lookAt function
matrix = lookAt(desiredCameraPosition, objectPosition, up)
Or lerp between where the camera currently is to it's new desired position
var m4 = twgl.m4;
var v3 = twgl.v3;
twgl.setAttributePrefix("a_");
var gl = twgl.getWebGLContext(document.getElementById("c"));
var programInfo = twgl.createProgramInfo(gl, ["vs", "fs"]);
var shapes = [
twgl.primitives.createCubeBufferInfo(gl, 2),
twgl.primitives.createSphereBufferInfo(gl, 1, 24, 12),
twgl.primitives.createTruncatedConeBufferInfo(gl, 1, 0, 2, 24, 1),
];
function rand(min, max) {
return min + Math.random() * (max - min);
}
function easeInOut(t, start, end) {
var c = end - start;
if ((t /= 0.5) < 1) {
return c / 2 * t * t + start;
} else {
return -c / 2 * ((--t) * (t - 2) - 1) + start;
}
}
// Shared values
var lightWorldPosition = [1, 8, -10];
var lightColor = [1, 1, 1, 1];
var camera = m4.identity();
var view = m4.identity();
var viewProjection = m4.identity();
var targetNdx = 0;
var targetTimer = 0;
var zoomTimer = 0;
var eye = v3.copy([1, 4, -60]);
var target = v3.copy([0, 0, 0]);
var up = [0, 1, 0];
var zoomScale = 1.4;
var zoomDuration = 2;
var targetChangeInterval = 3;
var oldEye;
var oldTarget;
var newEye;
var newTarget;
var tex = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, tex);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, 2, 2, 0, gl.RGBA, gl.UNSIGNED_BYTE, new Uint8Array([
255,255,255,255,
192,192,192,255,
192,192,192,255,
255,255,255,255]));
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
var objects = [];
var drawObjects = [];
var numObjects = 100;
var baseHue = rand(0, 360);
for (var ii = 0; ii < numObjects; ++ii) {
var uniforms = {
u_lightWorldPos: lightWorldPosition,
u_lightColor: lightColor,
u_diffuseMult: chroma.hsv((baseHue + rand(0, 60)) % 360, 0.4, 0.8).gl(),
u_specular: [1, 1, 1, 1],
u_shininess: 50,
u_specularFactor: 1,
u_diffuse: tex,
u_viewInverse: camera,
u_world: m4.identity(),
u_worldInverseTranspose: m4.identity(),
u_worldViewProjection: m4.identity(),
};
drawObjects.push({
programInfo: programInfo,
bufferInfo: shapes[ii % shapes.length],
uniforms: uniforms,
});
objects.push({
translation: [rand(-50, 50), rand(-50, 50), rand(-50, 50)],
scale: rand(1, 5),
size: 2,
xSpeed: rand(0.2, 0.7),
zSpeed: rand(0.2, 0.7),
uniforms: uniforms,
});
}
var then = Date.now() * 0.001;
function render() {
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.enable(gl.DEPTH_TEST);
gl.enable(gl.CULL_FACE);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
var time = Date.now() * 0.001;
var elapsed = time - then;
then = time;
var radius = 6;
var fovy = 30 * Math.PI / 180;
var projection = m4.perspective(fovy, gl.canvas.clientWidth / gl.canvas.clientHeight, 0.5, 200);
targetTimer -= elapsed;
if (targetTimer <= 0) {
targetTimer = targetChangeInterval;
zoomTimer = 0;
targetNdx = (targetNdx + 1) % objects.length;
oldEye = v3.copy(eye);
oldTarget = v3.copy(target);
var targetObj = objects[targetNdx];
newTarget = targetObj.translation;
var halfSize = targetObj.size * targetObj.scale * zoomScale * 0.5;
var distance = halfSize / Math.tan(fovy * 0.5);
var direction = v3.normalize(v3.subtract(eye, newTarget));
newEye = v3.add(newTarget, v3.mulScalar(direction, distance));
}
zoomTimer += elapsed;
var lerp = easeInOut(Math.min(1, zoomTimer / zoomDuration), 0, 1);
eye = v3.lerp(oldEye, newEye, lerp);
target = v3.lerp(oldTarget, newTarget, lerp);
m4.lookAt(eye, target, up, camera);
m4.inverse(camera, view);
m4.multiply(projection, view, viewProjection);
objects.forEach(function(obj, ndx) {
var uni = obj.uniforms;
var world = uni.u_world;
m4.identity(world);
m4.translate(world, obj.translation, world);
m4.rotateX(world, time * obj.xSpeed, world);
m4.rotateZ(world, time * obj.zSpeed, world);
m4.scale(world, [obj.scale, obj.scale, obj.scale], world);
m4.transpose(m4.inverse(world, uni.u_worldInverseTranspose), uni.u_worldInverseTranspose);
m4.multiply(viewProjection, uni.u_world, uni.u_worldViewProjection);
});
twgl.drawObjectList(gl, drawObjects);
requestAnimationFrame(render);
}
render();
body {
margin: 0;
}
canvas {
width: 100vw;
height: 100vh;
display: block;
}
<script src="//twgljs.org/dist/4.x/twgl-full.min.js"></script>
<script src="//cdnjs.cloudflare.com/ajax/libs/chroma-js/0.6.3/chroma.min.js"></script>
<canvas id="c"></canvas>
<script id="vs" type="notjs">
uniform mat4 u_worldViewProjection;
uniform vec3 u_lightWorldPos;
uniform mat4 u_world;
uniform mat4 u_viewInverse;
uniform mat4 u_worldInverseTranspose;
attribute vec4 a_position;
attribute vec3 a_normal;
attribute vec2 a_texcoord;
varying vec4 v_position;
varying vec2 v_texCoord;
varying vec3 v_normal;
varying vec3 v_surfaceToLight;
varying vec3 v_surfaceToView;
void main() {
v_texCoord = a_texcoord;
v_position = (u_worldViewProjection * a_position);
v_normal = (u_worldInverseTranspose * vec4(a_normal, 0)).xyz;
v_surfaceToLight = u_lightWorldPos - (u_world * a_position).xyz;
v_surfaceToView = (u_viewInverse[3] - (u_world * a_position)).xyz;
gl_Position = v_position;
}
</script>
<script id="fs" type="notjs">
precision mediump float;
varying vec4 v_position;
varying vec2 v_texCoord;
varying vec3 v_normal;
varying vec3 v_surfaceToLight;
varying vec3 v_surfaceToView;
uniform vec4 u_lightColor;
uniform vec4 u_diffuseMult;
uniform sampler2D u_diffuse;
uniform vec4 u_specular;
uniform float u_shininess;
uniform float u_specularFactor;
vec4 lit(float l ,float h, float m) {
return vec4(1.0,
abs(l),//max(l, 0.0),
(l > 0.0) ? pow(max(0.0, h), m) : 0.0,
1.0);
}
void main() {
vec4 diffuseColor = texture2D(u_diffuse, v_texCoord) * u_diffuseMult;
vec3 a_normal = normalize(v_normal);
vec3 surfaceToLight = normalize(v_surfaceToLight);
vec3 surfaceToView = normalize(v_surfaceToView);
vec3 halfVector = normalize(surfaceToLight + surfaceToView);
vec4 litR = lit(dot(a_normal, surfaceToLight),
dot(a_normal, halfVector), u_shininess);
vec4 outColor = vec4((
u_lightColor * (diffuseColor * litR.y +
u_specular * litR.z * u_specularFactor)).rgb,
diffuseColor.a);
gl_FragColor = outColor;
}
</script>

Resources