I want to get the pixel-data from a Three.js demo.
As far as I know, there are 2 way to proceed :
1) draw the webGl-canvas inside a 2D-canvas and use Context2D.getImageData like that :
var canvas = document.createElement("canvas");
var ctx = canvas.getContext("2d");
ctx.drawImage(renderer.domElement,0,0);
var data = ctx.getImageData(0,0,w,h).data;
2) use directly the context3D with readPixels, like that :
var ctx = renderer.domElement.getContext("webgl");
var data = new UInt8Array(w*h*4);
ctx.readPixels(0, 0, w,h, ctx.RGBA, ctx.UNSIGNED_BYTE, data);
These 2 way to proceed works and give the same results but the second one is almost 2 times slower than the one using context2d.getImageData.
Sounds very weird to me. How the fact to draw the 3D-stuff into a 2D-canvas could be faster than using the context3D directly ? I don't understand and I'm almost sure I don't use gl.readPixels correctly.
Then my question is : how to use gl.readPixels in order to be faster than context2d.drawImage + context2d.getImageData ?
I tryed to used a Float32Array like that
var ctx = renderer.domElement.getContext("webgl");
var data = new Float32Array(w*h*4);
ctx.readPixels(0, 0, w,h, ctx.RGBA, ctx.FLOAT, data);
I thought it should be faster since there is no conversion from Float to UInt8 but it looks like it doesn't work like that because my 'data' array stay empty after the call of ctx.readPixels
Thank you for your help !
(please excuse me if my english is not perfect, it's not my native language)
On my machine I get readPixels as 2x to 20x faster than drawImage/getImageData. Tested on MacOS Chrome, Firefox, well as Windows 10 Chrome, and Firefox. Safari came out readPixels as slower. Sounds like a bug in Safari and in fact checking Safari Technology Preview Release 46, as expected, readPixels is 3x to 1.2x faster than drawImage/getImageData
const gl = document.createElement("canvas").getContext("webgl");
const ctx = document.createElement("canvas").getContext("2d");
const w = 512;
const h = 512;
gl.canvas.width = w;
gl.canvas.height = h;
ctx.canvas.width = w;
ctx.canvas.height = h;
const readPixelBuffer = new Uint8Array(w * h * 4);
const tests = [
{ fn: withReadPixelsPreAlloc, msg: "readPixelsPreAlloc", },
{ fn: withReadPixels, msg: "readPixels", },
{ fn: withDrawImageGetImageData, msg: "drawImageGetPixels", },
];
let ndx = 0;
runNextTest();
function runNextTest() {
if (ndx >= tests.length) {
return;
}
const test = tests[ndx++];
// use setTimeout to give the browser a change to
// do something between tests
setTimeout(function() {
log(test.msg, "iterations in 5 seconds:", runTest(test.fn));
runNextTest();
}, 0);
}
function runTest(fn) {
const start = performance.now();
let count = 0;
for (;;) {
const elapsed = performance.now() - start;
if (elapsed > 5000) {
break;
}
fn();
++count;
}
return count;
}
function withReadPixelsPreAlloc() {
gl.readPixels(0, 0, w, h, gl.RGBA, gl.UNSIGNED_BYTE, readPixelBuffer);
}
function withReadPixels() {
const readPixelBuffer = new Uint8Array(w * h * 4);
gl.readPixels(0, 0, w, h, gl.RGBA, gl.UNSIGNED_BYTE, readPixelBuffer);
}
function withDrawImageGetImageData() {
ctx.drawImage(gl.canvas, 0, 0);
ctx.getImageData(0, 0, w, h);
}
function log(...args) {
const elem = document.createElement("pre");
elem.textContent = [...args].join(' ');
document.body.appendChild(elem);
}
As for converting to float the canvas itself is stored in bytes. There is no conversion to float and you likely got a GL error
const gl = document.createElement("canvas").getContext("webgl");
const buf = new Float32Array(4);
gl.readPixels(0, 0, 1, 1, gl.RGBA, gl.FLOAT, buf);
log("ERROR:", glEnumToString(gl, gl.getError()));
function log(...args) {
const elem = document.createElement("pre");
elem.textContent = [...args].join(' ');
document.body.appendChild(elem);
}
function glEnumToString(gl, val) {
if (val === 0) { return 'NONE'; }
for (key in gl) {
if (gl[key] === val) {
return key;
}
}
return `0x${val.toString(16)}`;
}
Checking the console I see the error is
WebGL: INVALID_ENUM: readPixels: invalid type
Related
I have some sample code for a transform buffer object I've been trying to simplify down to the most minimal form and I've found that my code works the same if I omit createTransformFeedback() and bindTransformFeedback(). The only "transform feedback" code left is just gl.bindBufferBase(gl.TRANSFORM_FEEDBACK_BUFFER, 0, buffer). I can only test this on two devices (Intel Iris and Qualcomm Adreno 610) but both run identically with or without binding any TFOs.
From all the documentation I've read, this doesn't make any sense to me. Is it actually required to call bindTransformFeedback() to get access to TFOs? Or is my code too "minimal".
The code below does not write to the canvas but only logs the buffer contents after two feedback passes. The relevant code is begins about half-way down. I've commented out code calling createTransformFeedback() and bindTransformFeedback().
// Shaders
const transformVertexShader = `#version 300 es
layout(location=0) in vec2 shaderInput;
out vec2 shaderOutput;
void main() {
shaderOutput = shaderInput * vec2(1.0, 10.0);
}`;
const transformFragmentShader = `#version 300 es
void main()
{
discard;
}`;
// Create program
const canvas = document.querySelector('canvas');
const gl = canvas.getContext('webgl2');
const program = gl.createProgram();
const vertexShader = gl.createShader(gl.VERTEX_SHADER);
gl.shaderSource(vertexShader, transformVertexShader);
gl.compileShader(vertexShader);
gl.attachShader(program, vertexShader);
const fragmentShader = gl.createShader(gl.FRAGMENT_SHADER);
gl.shaderSource(fragmentShader, transformFragmentShader);
gl.compileShader(fragmentShader);
gl.attachShader(program, fragmentShader);
gl.transformFeedbackVaryings(program, ['shaderOutput'], gl.INTERLEAVED_ATTRIBS);
gl.linkProgram(program);
if (!gl.getProgramParameter(program, gl.LINK_STATUS)) {
console.log(gl.getProgramInfoLog(program));
console.log(gl.getShaderInfoLog(vertexShader));
console.log(gl.getShaderInfoLog(fragmentShader));
}
gl.useProgram(program);
// Initialize VAOs, buffers and TFOs
const COUNT = 2000;
const A = 0;
const B = 1;
const sourceData = new Float32Array(COUNT).map((v,i) => i);
const buffer = [];
const vao = [];
// const tf = [];
for (const i of [A,B]) {
vao[i] = gl.createVertexArray();
buffer[i] = gl.createBuffer();
// tf[i] = gl.createTransformFeedback();
gl.bindVertexArray(vao[i]);
gl.bindBuffer(gl.ARRAY_BUFFER, buffer[i]);
gl.bufferData(gl.ARRAY_BUFFER, COUNT * 4, gl.STATIC_DRAW);
gl.vertexAttribPointer(0, 2, gl.FLOAT, false, 0, 0);
gl.enableVertexAttribArray(0);
}
// Populate initial source data buffer
gl.bindBuffer(gl.ARRAY_BUFFER, buffer[A]);
gl.bufferSubData(gl.ARRAY_BUFFER, 0, sourceData);
// Unbind everything (for no reason)
gl.bindVertexArray(null);
gl.bindBuffer(gl.ARRAY_BUFFER, null);
gl.bindBufferBase(gl.TRANSFORM_FEEDBACK_BUFFER, 0, null);
// gl.bindTransformFeedback(gl.TRANSFORM_FEEDBACK, null);
// First pass through the transform feedback A->B
gl.bindVertexArray(vao[A]);
// gl.bindTransformFeedback(gl.TRANSFORM_FEEDBACK, tf[A]);
gl.bindBufferBase(gl.TRANSFORM_FEEDBACK_BUFFER, 0, buffer[B]);
gl.enable(gl.RASTERIZER_DISCARD);
gl.beginTransformFeedback(gl.TRIANGLES);
gl.drawArrays(gl.TRIANGLES, 0, COUNT / 2);
gl.endTransformFeedback();
// Second pass through the transform feedback B->A
gl.bindVertexArray(vao[B]);
// gl.bindTransformFeedback(gl.TRANSFORM_FEEDBACK, tf[B]);
gl.bindBufferBase(gl.TRANSFORM_FEEDBACK_BUFFER, 0, buffer[A]);
gl.beginTransformFeedback(gl.TRIANGLES);
gl.drawArrays(gl.TRIANGLES, 0, COUNT / 2);
gl.endTransformFeedback();
gl.flush();
// Check the final results
const fence = gl.fenceSync(gl.SYNC_GPU_COMMANDS_COMPLETE, 0);
const startTime = performance.now();
const check = () => {
const status = gl.clientWaitSync(fence, 0 & gl.SYNC_FLUSH_COMMANDS_BIT, 0);
if (status === gl.CONDITION_SATISFIED) {
console.log(`sync after ${ performance.now() - startTime }ms`);
const output = new Float32Array(COUNT);
gl.bindBuffer(gl.ARRAY_BUFFER, buffer[B]);
gl.getBufferSubData(gl.ARRAY_BUFFER, 0, output);
console.log(`data finished fetching at ${ performance.now() - startTime }`);
console.log(output);
return;
} else console.log('not finished, skipping');
setTimeout(check);
};
check();
I completely missed that OpenGL provides a default transform feedback object that always exists, is bound until you unbind it (or bind to a different TFO) and cannot be deleted. If you are only pushing data back and forth between two buffers then you only need a single TFO, so it makes sense to use the default one.
I think the reason you would want to make two or more TFOs is if you were doing multiple draw calls in a single animation frame and wanted to retain info from both calls.
So for single transform-feedback programs, you only need to call bindBufferBase(gl.TRANSFORM_FEEDBACK_BUFFER,...), beginTransformFeedback(...) and endTransformFeedback() since the default transform feedback object is already bound and ready for work.
When I read vertex data to a Float32Array from a transform buffer in Chrome using getBufferSubData, I get the warning "performance warning: READ-usage buffer was read back without waiting on a fence. This caused a graphics pipeline stall.". My understanding is that the GPU is trying to write vertex data back to the CPU as soon as getBufferSubData is called, which may be before the shaders have finished. I figured that if I can prevent this I may be be able to speed up my application, and I thought the best way to do this would be with a callback. To clarify, the data returned is correct; I'm looking to speed up my application and better understand what's going on.
I have tried to implement a callback using fenceSync, similar to this answer. This should check whether the GPU has finished executing the current commands (including the transform feedback), before executing getBufferSubData. Here is my code.
(function () {
'use strict';
const createRandomF32Array = (arrSize) => {
return Float32Array.from({length: arrSize}, () => Math.floor(Math.random() * 1000));
};
const createGlContext = () => {
const canvas = document.createElement("canvas");
const gl = canvas.getContext("webgl2");
canvas.id = 'webgl_canvas';
document.body.appendChild(canvas);
if (gl === null) {
alert("Unable to initialize WebGL. Your browser or machine may not support it.");
return;
}
return gl;
};
// creates a single set of linked shaders containing a vertex and a fragment shader
class shaderProgram {
constructor(gl, rawVertex, rawFragment, transformFeedbackAttribs=false) {
this.gl = gl;
const compiledVertex = this.compileShader(gl.VERTEX_SHADER, rawVertex);
const compiledFragment = this.compileShader(gl.FRAGMENT_SHADER, rawFragment);
this.program = this.createProgram(compiledVertex, compiledFragment, transformFeedbackAttribs);
this.attributeLocations = {};
this.uniformLocations = {};
}
// run on init
compileShader(shaderType, shaderSource) {
const gl = this.gl;
var shader = gl.createShader(shaderType);
gl.shaderSource(shader, shaderSource);
gl.compileShader(shader);
var success = gl.getShaderParameter(shader, gl.COMPILE_STATUS);
if (success) {
return shader;
}
console.log(gl.getShaderInfoLog(shader));
gl.deleteShader(shader);
}
// run on init
createProgram = (rawVertex, rawFragment, transformFeedbackAttribs) => {
const gl = this.gl;
var program = gl.createProgram();
gl.attachShader(program, rawVertex);
gl.attachShader(program, rawFragment);
if (!(transformFeedbackAttribs === false)) {
gl.transformFeedbackVaryings(program, [transformFeedbackAttribs], gl.INTERLEAVED_ATTRIBS);
}
gl.linkProgram(program);
var success = gl.getProgramParameter(program, gl.LINK_STATUS);
if (success) {
return program;
}
console.log(gl.getProgramInfoLog(program));
gl.deleteProgram(program);
}
logAttributeLocations = (attributeName) => {
const gl = this.gl;
const attributeLocation = gl.getAttribLocation(this.program, attributeName);
if (!(attributeName in this.attributeLocations)) {
this.attributeLocations[attributeName] = attributeLocation;
}
return attributeLocation;
}
logUniformLocations = (uniformName) => {
const gl = this.gl;
const uniformLocation = gl.getUniformLocation(this.program, uniformName);
if (!(uniformName in this.uniformLocations)) {
this.uniformLocations[uniformName] = uniformLocation;
}
return uniformLocation;
}
activate = () => {
const gl = this.gl;
gl.useProgram(this.program);
}
deactivate = () => {
const gl = this.gl;
gl.useProgram(0);
}
}
// the aim of this class is to build a buffer to be sent to the gpu
class renderObject {
constructor(gl) {
this.gl = gl;
this.vao = this.gl.createVertexArray();
this.buffers = {};
}
addDataToShaderAttribute = (dataset, dataDimension, attributeLocation) => {
const gl = this.gl;
var attributeVboNumber = this.addDataToBuffer(dataset);
gl.bindVertexArray(this.vao);
gl.enableVertexAttribArray(attributeLocation);
gl.vertexAttribPointer(attributeLocation, dataDimension, gl.FLOAT, false, 0, 0);
return attributeVboNumber;
}
prepareDataForShaderUniform = (dataset) => {
const gl = this.gl;
var uniformVboNumber = this.addDataToBuffer(dataset);
return uniformVboNumber;
}
addDataToBuffer = (dataset) => {
const gl = this.gl;
var vertexBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, vertexBuffer);
gl.bufferData(gl.ARRAY_BUFFER, dataset, gl.STATIC_DRAW);
var bufferNumber = Object.keys(this.buffers).length;
this.buffers[bufferNumber] = vertexBuffer;
return bufferNumber;
}
draw = (drawType, offset, dataLength) => {
const gl = this.gl;
gl.drawArrays(drawType, offset, dataLength);
}
calculateAndRetreive = (drawType, offset, dataLength) => {
const gl = this.gl;
var transformBuffer = gl.createBuffer();
var emptyDataArray = new Float32Array(dataLength);
gl.enable(gl.RASTERIZER_DISCARD);
gl.bindBuffer(gl.TRANSFORM_FEEDBACK_BUFFER, transformBuffer);
gl.bufferData(gl.TRANSFORM_FEEDBACK_BUFFER, emptyDataArray, gl.STATIC_READ);
var bufferNumber = Object.keys(this.buffers).length;
this.buffers[bufferNumber] = transformBuffer;
gl.bindBufferBase(gl.TRANSFORM_FEEDBACK_BUFFER, 0, transformBuffer);
gl.beginTransformFeedback(gl.POINTS);
gl.drawArrays(gl.POINTS, offset, dataLength);
gl.endTransformFeedback();
var arrBuffer = emptyDataArray;
gl.getBufferSubData(gl.TRANSFORM_FEEDBACK_BUFFER, 0, arrBuffer);
this.callbackOnSync(this.returnBufferData, emptyDataArray);
}
callbackOnSync = (callback, param) => {
const gl = this.gl;
var fence = gl.fenceSync(gl.SYNC_GPU_COMMANDS_COMPLETE, 0);
gl.flush();
setTimeout(checkSync);
function checkSync() {
console.log(fence);
const status = gl.clientWaitSync(fence, 0, 0);
console.log(status);
if (status == gl.CONDITION_SATISFIED) {
gl.deleteSync(fence);
return callback(param);
} else {
return(setTimeout(checkSync));
}
}
}
returnBufferData = (arrBuffer) => {
const gl = this.gl;
gl.getBufferSubData(gl.TRANSFORM_FEEDBACK_BUFFER, 0, arrBuffer);
console.log(arrBuffer);
return arrBuffer;
}
}
var testVertex = "#version 300 es\r\n\r\nin float a_position;\r\nout float o_position;\r\n\r\nvoid main() {\r\n o_position = float(a_position + 5.0);\r\n}";
var testFragment = "#version 300 es\r\nprecision mediump float;\r\n\r\nout vec4 o_FragColor;\r\n\r\nvoid main() {\r\n o_FragColor = vec4(0.0);\r\n}";
const gl = createGlContext();
var positions = createRandomF32Array(1000);
var t0 = performance.now();
var testShader = new shaderProgram(gl, testVertex, testFragment, "o_position");
var aPositionAttribute = testShader.logAttributeLocations("a_position");
var uResolutionUniform = testShader.logUniformLocations("u_resolution");
var pointsBuffer = new renderObject(gl);
var dataBuffer = pointsBuffer.addDataToShaderAttribute(positions, 1, aPositionAttribute);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT);
testShader.activate();
var output = pointsBuffer.calculateAndRetreive(gl.TRIANGLES, 0, positions.length, testShader);
var t1 = performance.now();
console.log("GPU function took " + (t1 - t0) + " milliseconds.");
console.log(output);
}());
<!DOCTYPE html>
<html lang="en">
<meta charset="utf-8">
<head>
<title>Rollup Example</title>
</head>
<body>
</body>
<script src="../build/bundle.min.js"></script>
</html>
This gives the warning "GL_INVALID_OPERATION: Buffer is bound for transform feedback." and every value in the returned array is 0. The line causing the issue seems to be:
var fence = gl.fenceSync(gl.SYNC_GPU_COMMANDS_COMPLETE, 0)
, which seems to be interfering with the Transform Feedback. The checkSync function seems to work fine. My questions are 1) Where am I going wrong with this? 2) Is this a technique that could work for my use case with some tweaking, or do I need to try something different entirely?
So I think this might be a bug in Chrome. Your code works on Mac Chrome but fails on Windows Chrome.
There is one bug where the code waits for CONDITION_SATISFIED but it is also possible for the status to be ALREADY_SIGNALED
A few notes:
The code at the time I wrote this answer is calling getBufferSubData twice.
The correct thing to do is call it after the fence passes, not before. The warning is related to calling it before AFAICT.
The timing code makes no sense.
At the bottom the code does
var t0 = performance.now();
...
var output = pointsBuffer.calculateAndRetreive(...);
var t1 = performance.now();
console.log("GPU function took " + (t1 - t0) + " milliseconds.");
console.log(output);
pointsBuffer.calculateAndRetreive will always return immediately
and output will always be undefined
This is subjective but passing in a callback and a param to be used with it later looks like a C programmer using JavaScript. JavaScript has closures so there is arguably never a reason to pass in a parameter to be passed to a callback. The callback itself can always "close" over whatever variables it needs. Like I said though it's a style issue so feel free to continue to do it the way you're doing it. I'm just pointing out it stuck out to me.
The code passes a drawType to calculateAndRetreive but it's never used.
As an example for the future, here is a minimal repo.
'use strict';
/* global document, setTimeout */
const canvas = document.createElement("canvas");
const gl = canvas.getContext("webgl2");
function compileShader(gl, shaderType, shaderSource) {
const shader = gl.createShader(shaderType);
gl.shaderSource(shader, shaderSource);
gl.compileShader(shader);
const success = gl.getShaderParameter(shader, gl.COMPILE_STATUS);
if (success) {
return shader;
}
throw new Error(gl.getShaderInfoLog(shader));
}
function createProgram(gl, rawVertex, rawFragment, transformFeedbackAttribs) {
const program = gl.createProgram();
gl.attachShader(program, compileShader(gl, gl.VERTEX_SHADER, rawVertex));
gl.attachShader(program, compileShader(gl, gl.FRAGMENT_SHADER, rawFragment));
if (transformFeedbackAttribs) {
gl.transformFeedbackVaryings(program, [transformFeedbackAttribs], gl.INTERLEAVED_ATTRIBS);
}
gl.linkProgram(program);
const success = gl.getProgramParameter(program, gl.LINK_STATUS);
if (success) {
return program;
}
throw new Error(gl.getProgramInfoLog(program));
}
const vertexShader = `#version 300 es
in float inputValue;
out float outputValue;
void main() {
outputValue = inputValue * 2.0;
}`;
const fragmentShader = `#version 300 es
precision mediump float;
out vec4 dummy;
void main() {
dummy = vec4(0.0);
}`;
const program = createProgram(gl, vertexShader, fragmentShader, ['outputValue']);
gl.useProgram(program);
const input = new Float32Array([11, 22, 33, 44]);
const vao = gl.createVertexArray();
gl.bindVertexArray(vao);
const vertexBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, vertexBuffer);
gl.bufferData(gl.ARRAY_BUFFER, input, gl.STATIC_DRAW);
const inputLoc = gl.getAttribLocation(program, 'inputValue');
gl.enableVertexAttribArray(inputLoc);
gl.vertexAttribPointer(inputLoc, 1, gl.FLOAT, false, 0, 0);
const transformBuffer = gl.createBuffer();
gl.enable(gl.RASTERIZER_DISCARD);
gl.bindBuffer(gl.TRANSFORM_FEEDBACK_BUFFER, transformBuffer);
gl.bufferData(gl.TRANSFORM_FEEDBACK_BUFFER, input.length * 4, gl.STATIC_READ);
gl.bindBufferBase(gl.TRANSFORM_FEEDBACK_BUFFER, 0, transformBuffer);
gl.beginTransformFeedback(gl.POINTS);
gl.drawArrays(gl.POINTS, 0, input.length);
gl.endTransformFeedback();
const fence = gl.fenceSync(gl.SYNC_GPU_COMMANDS_COMPLETE, 0);
gl.flush();
log('waiting...');
setTimeout(waitForResult);
function waitForResult() {
const status = gl.clientWaitSync(fence, 0, 0);
if (status === gl.CONDITION_SATISFIED || status === gl.ALREADY_SIGNALED) {
gl.deleteSync(fence);
const output = new Float32Array(input.length);
gl.getBufferSubData(gl.TRANSFORM_FEEDBACK_BUFFER, 0, output);
log(output);
} else {
setTimeout(waitForResult);
}
}
function log(...args) {
const elem = document.createElement('pre');
elem.textContent = args.join(' ');
document.body.appendChild(elem);
}
Update
If you want the code to work I suggest you use a transformfeedback object. A transformfeedback object is just like a vertex array object except for outputs instead of inputs. A vertex array object contains all the attribute settings (the settings set with gl.vertexAttribPointer, and gl.enableVertexAttribArray, etc.). A transformfeedback object contains all the varying output settings (the settings set with gl.bindBufferBase and gl.bindBufferRange)
The current issue comes from ambiguous language in the spec about using buffers when they are bound for transform feedback.
You can unbind them, in your case call gl.bindBufferBase with null on index 0. Or you can store them in a transformfeedback object and then unbind that object. The reason using a transformfeedback object is recommended is because it holds more state. If you had 4 bounds bound you can unbind them all by just unbinding the transformfeedback object they are bound to (1 call) where as binding null with gl.bindBufferBase/gl.bindBufferRange it would be 4 calls.
'use strict';
/* global document, setTimeout */
const canvas = document.createElement("canvas");
const gl = canvas.getContext("webgl2");
function compileShader(gl, shaderType, shaderSource) {
const shader = gl.createShader(shaderType);
gl.shaderSource(shader, shaderSource);
gl.compileShader(shader);
const success = gl.getShaderParameter(shader, gl.COMPILE_STATUS);
if (success) {
return shader;
}
throw new Error(gl.getShaderInfoLog(shader));
}
function createProgram(gl, rawVertex, rawFragment, transformFeedbackAttribs) {
const program = gl.createProgram();
gl.attachShader(program, compileShader(gl, gl.VERTEX_SHADER, rawVertex));
gl.attachShader(program, compileShader(gl, gl.FRAGMENT_SHADER, rawFragment));
if (transformFeedbackAttribs) {
gl.transformFeedbackVaryings(program, [transformFeedbackAttribs], gl.INTERLEAVED_ATTRIBS);
}
gl.linkProgram(program);
const success = gl.getProgramParameter(program, gl.LINK_STATUS);
if (success) {
return program;
}
throw new Error(gl.getProgramInfoLog(program));
}
const vertexShader = `#version 300 es
in float inputValue;
out float outputValue;
void main() {
outputValue = inputValue * 2.0;
}`;
const fragmentShader = `#version 300 es
precision mediump float;
out vec4 dummy;
void main() {
dummy = vec4(0.0);
}`;
const program = createProgram(gl, vertexShader, fragmentShader, ['outputValue']);
gl.useProgram(program);
const input = new Float32Array([11, 22, 33, 44]);
const vao = gl.createVertexArray();
gl.bindVertexArray(vao);
const vertexBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, vertexBuffer);
gl.bufferData(gl.ARRAY_BUFFER, input, gl.STATIC_DRAW);
const inputLoc = gl.getAttribLocation(program, 'inputValue');
gl.enableVertexAttribArray(inputLoc);
gl.vertexAttribPointer(inputLoc, 1, gl.FLOAT, false, 0, 0);
const transformBuffer = gl.createBuffer();
gl.enable(gl.RASTERIZER_DISCARD);
const tf = gl.createTransformFeedback();
gl.bindTransformFeedback(gl.TRANSFORM_FEEDBACK, tf);
gl.bindBuffer(gl.TRANSFORM_FEEDBACK_BUFFER, transformBuffer);
gl.bufferData(gl.TRANSFORM_FEEDBACK_BUFFER, input.length * 4, gl.STATIC_READ);
gl.bindBuffer(gl.TRANSFORM_FEEDBACK_BUFFER, null);
gl.bindBufferBase(gl.TRANSFORM_FEEDBACK_BUFFER, 0, transformBuffer);
gl.beginTransformFeedback(gl.POINTS);
gl.drawArrays(gl.POINTS, 0, input.length);
gl.endTransformFeedback();
gl.bindTransformFeedback(gl.TRANSFORM_FEEDBACK, null);
const fence = gl.fenceSync(gl.SYNC_GPU_COMMANDS_COMPLETE, 0);
gl.flush();
log('waiting...');
setTimeout(waitForResult);
function waitForResult() {
const status = gl.clientWaitSync(fence, 0, 0);
if (status === gl.CONDITION_SATISFIED || status === gl.ALREADY_SIGNALED) {
gl.deleteSync(fence);
const output = new Float32Array(input.length);
gl.bindBuffer(gl.ARRAY_BUFFER, transformBuffer);
gl.getBufferSubData(gl.ARRAY_BUFFER, 0, output);
log(output);
} else {
setTimeout(waitForResult);
}
}
function log(...args) {
const elem = document.createElement('pre');
elem.textContent = args.join(' ');
document.body.appendChild(elem);
}
Note that just like there is a default vertex array object, the one that's bound originally and re-bound with calling gl.bindVertexArray(null), so to is there a default transformfeedback object.
you might find this helpful in seeing the various objects and their state
I am using the following code to print what is inside the clipboard (bitmap).
On the printed paper, I see the black line drawn with MoveTo/LineTo however the bitmap is not drawn.
Using the same drawing code in a CView works perfectly fine.
keybd_event(VK_SNAPSHOT, 1, 0, NULL);
HBITMAP handle = NULL;
if (::OpenClipboard(NULL))
{
handle = (HBITMAP)GetClipboardData(CF_BITMAP);
if (handle)
{
CBitmap* pBmp = CBitmap::FromHandle(handle);
BITMAP bm;
pBmp->GetBitmap(&bm);
int iBmpWidth = bm.bmWidth;
int iBmpHeight = bm.bmHeight;
CPrintDialog* pDlg = new CPrintDialog(FALSE);
CString csText;
CString cTitle;
if (pDlg->GetDefaults() == FALSE)
{
delete pDlg;
return;
}
pDlg->m_pd.Flags &= ~PD_RETURNDEFAULT;
LPDEVMODE pDevMode = pDlg->GetDevMode();
::GlobalUnlock(pDlg->m_pd.hDevMode);
DOCINFO di;
di.cbSize = sizeof(DOCINFO);
pDlg->m_pd.hwndOwner = this->GetSafeHwnd();
if (pDlg->DoModal() == IDOK)
{
HDC hdcPrinter = pDlg->GetPrinterDC();
if (hdcPrinter != NULL)
{
pDevMode = (LPDEVMODE)GlobalLock(pDlg->m_pd.hDevMode);
pDevMode->dmPaperSize = DMPAPER_A4;
pDevMode->dmOrientation = DMORIENT_LANDSCAPE;
ResetDCW(hdcPrinter, pDevMode);
GlobalUnlock(pDlg->m_pd.hDevMode);
// create a CDC and attach it to the default printer
CDC dcPrinter;
dcPrinter.Attach(hdcPrinter);
// call StartDoc() to begin printing
DOCINFO docinfo;
memset(&docinfo, 0, sizeof(docinfo));
docinfo.cbSize = sizeof(docinfo);
docinfo.lpszDocName = _T("CDC::StartDoc() Code Fragment");
// if it fails, complain and exit gracefully
if (dcPrinter.StartDoc(&docinfo) < 0)
{
MessageBox(_T("Printer wouldn't initalize"));
}
else
{
// start a page
if (dcPrinter.StartPage() < 0)
{
MessageBox(_T("Could not start page"));
dcPrinter.AbortDoc();
}
else
{
int PaperWidth = dcPrinter.GetDeviceCaps(HORZRES);
int PaperHeight = dcPrinter.GetDeviceCaps(VERTRES);
CDC memDC;
memDC.CreateCompatibleDC(&dcPrinter);
CBitmap* pOldBit = memDC.SelectObject(pBmp);
dcPrinter.MoveTo(1000, 1000);
dcPrinter.LineTo(PaperWidth - 1000, PaperHeight - 1000);
dcPrinter.StretchBlt(100,
100,
PaperWidth - 100,
PaperHeight - 100,
&memDC,
0,
0,
iBmpWidth,
iBmpHeight,
SRCCOPY);
memDC.SelectObject(pOldBit);
memDC.DeleteDC();
dcPrinter.EndPage();
dcPrinter.EndDoc();
}
}
}
}
delete pDlg;
}
::EmptyClipboard();
::CloseClipboard();
}
Using your code without any changes works for me, I tested using a real printer and CutePDF, both printed the bitmap. It might be an issue with your source DC when you create memDC, either it does not support the correct color space or does not support raster operations. Try the following code instead:
CDC* pDC = GetDesktopWindow()->GetDC();
memDC.CreateCompatibleDC(pDC);
GetDesktopWindow()->ReleaseDC(pDC);
I need to get Device Context (DC ) from directx3d. Here some code snap.
1.CREATE DEVICE:
int windowWidth = 640;
int windowHeight = 480;
IDirect3D9* direct3D9 = Direct3DCreate9(D3D_SDK_VERSION);
if(direct3D9 == NULL)
{
return FALSE;
}
D3DDISPLAYMODE *d3ddisplayMode =(D3DDISPLAYMODE *)calloc(1,sizeof(D3DDISPLAYMODE));
hr = direct3D9->GetAdapterDisplayMode(D3DADAPTER_DEFAULT,d3ddisplayMode);
if(hr != D3D_OK)
{
free(d3ddisplayMode);
direct3D9->Release();
return FALSE;
}
D3DPRESENT_PARAMETERS *d3dpresentParam =(D3DPRESENT_PARAMETERS*)calloc(1,sizeof(D3DPRESENT_PARAMETERS));
d3dpresentParam->Windowed = TRUE;
d3dpresentParam->hDeviceWindow = NULL;
d3dpresentParam->SwapEffect = D3DSWAPEFFECT_DISCARD;
d3dpresentParam->BackBufferFormat = d3ddisplayMode->Format;
d3dpresentParam->BackBufferWidth = windowWidth;
d3dpresentParam->BackBufferHeight = windowHeight;
d3dpresentParam->BackBufferCount = 1;
free(d3ddisplayMode);
hr = direct3D9->CreateDevice(D3DADAPTER_DEFAULT,D3DDEVTYPE_HAL,NULL,D3DCREATE_SOFTWARE_VERTEXPROCESSING,d3dpresentParam,&direct3D9Device);
2.CRETAE TEXTURE:
hr = D3DXCreateTexture(direct3D9Device,bmpWidth,bmpHeight,1,0,D3DFMT_X8R8G8B8,D3DPOOL_MANAGED,&pTexture);
3.DISPLAY IMAGE:
float left = 0,top =0,width =640,height=480;
direct3D9Device->BeginScene();
D3DXMATRIX mat;
D3DXVECTOR3 pos;
pos.x = (bmpWidth * left) / width;
pos.y = (bmpHeight * top) / height;
pos.z = 0;
d3dxSprite->Begin(D3DXSPRITE_ALPHABLEND);
D3DXVECTOR2 scaling((width/bmpWidth),(height/bmpHeight));
if(pTexture == direct3DTextureRemote )
{
D3DXVECTOR2 spriteCentre((width/2),(height/2));
D3DXMatrixTransformation2D(&mat,NULL,0.0,&scaling,&spriteCentre,NULL,NULL);
}
else
{
D3DXMatrixTransformation2D(&mat,NULL,0.0,&scaling,NULL,NULL,NULL);
}
d3dxSprite->SetTransform(&mat);
d3dxSprite->Draw(pTexture,NULL,NULL,&pos,0xFFFFFFFF);
d3dxSprite->End();
direct3D9Device->EndScene();
direct3D9Device->Present( NULL, NULL, NULL, NULL );
Now Working probely. I can get dc from window like HDC hdc = ::GetDC(hwnd) but in my case if there is no window(i.e. windowless) then how can i get DC from directx. please give some piece of code get DC from directx device.
Call GetDC with null as argument:
HDC hdc = ::GetDC(0);
Quote from MSDN:
Parameters
hWnd [in]
A handle to the window whose DC is to be retrieved.
If this value is NULL, GetDC retrieves the DC for the entire screen.
Edit:
As we know now that you are using NPAPI, here is a possible solution:
Call NPAPI function NPN_GetValue() with NPNVnetscapeWindow parameter. Returned HWND is a handle to plug-in drawing surface. Use it when create DirectX device and to retrieve HDC.
Alternatively, you could try to retrieve the back buffer (IDirect3DSurface9) via IDirect3DDevice9::GetRenderTarget() method and then retrieve its HDC via IDirect3DSurface9::GetDC() method.
I have a WebGL application in which some attributes are bounded to a program via getAttribLocation and some attributes are bounded to a program via bindAttribLocation.
Is there a way in which I can get a mapping of all string names to attrib indices/values for a program? Also, at which point can I do this? I think getAttribLocation can be called after a program is linked, right?
Yes, you can do this. Here's an excerpt of code from my Cubes, which both binds some attributes and looks up the indices of others:
for (var attribName in boundAttribLocations) {
var index = boundAttribLocations[attribName];
if (typeof index === "number") {
gl.bindAttribLocation(program, index, attribName);
} else {
if (typeof console !== "undefined") {
console.warn("Enumerable non-number", attribName, "in boundAttribLocations object", boundAttribLocations);
}
}
}
gl.linkProgram(program);
if (!gl.getProgramParameter(program, gl.LINK_STATUS)) {
throw new Error(gl.getProgramInfoLog(program));
}
var i, name;
var attribs = Object.create(boundAttribLocations);
for (i = gl.getProgramParameter(program, gl.ACTIVE_ATTRIBUTES) - 1; i >= 0; i--) {
name = gl.getActiveAttrib(program, i).name;
attribs[name] = gl.getAttribLocation(program, name);
}
If I recall correctly, the inheritance of boundAttribLocations (Object.create) is so that attribs will contain valid locations for all bound attributes, including those not used by the current shader, which GL will not enumerate with getActiveAttrib.
You can query all the attributes like this
Assuming program is a shader program that was successfully linked with gl.linkProgram then
const numAttribs = gl.getProgramParameter(program, gl.ACTIVE_ATTRIBUTES);
for (let ii = 0; ii < numAttribs; ++ii) {
const attribInfo = gl.getActiveAttrib(program, ii);
const index = gl.getAttribLocation(program, attribInfo.name);
console.log(index, attribInfo.name);
}
Example:
const gl = document.createElement("canvas").getContext("webgl");
const vs = `
attribute vec4 position;
attribute vec3 normal;
attribute vec4 color;
attribute vec2 texcoord;
attribute float extra;
void main() {
// it's only important we use all the attributes so they don't get optimized
// out. It's not important that this shader makes no sense since that's
// not the point of this example.
gl_Position = position + vec4(normal, 0) + color + vec4(texcoord, extra, 0);
}
`;
const fs = `
precision mediump float;
void main() {
gl_FragColor = vec4(1);
}
`;
const prg = createProgram(gl, vs, fs);
showAttributes(gl, prg);
function showAttributes(gl, program) {
const numAttribs = gl.getProgramParameter(program, gl.ACTIVE_ATTRIBUTES);
for (let ii = 0; ii < numAttribs; ++ii) {
const attribInfo = gl.getActiveAttrib(program, ii);
const index = gl.getAttribLocation(program, attribInfo.name);
log("index:", index, "size:", attribInfo.size, "type:", glEnumToString(gl, attribInfo.type).padEnd(10), "name:", attribInfo.name);
}
}
function createProgram(gl, vSrc, fSrc) {
const vs = createShader(gl, gl.VERTEX_SHADER, vSrc);
const fs = createShader(gl, gl.FRAGMENT_SHADER, fSrc);
const p = gl.createProgram();
gl.attachShader(p, vs);
gl.attachShader(p, fs);
gl.linkProgram(p);
return p;
}
function createShader(gl, type, src) {
const s = gl.createShader(type);
gl.shaderSource(s, src);
gl.compileShader(s);
return s;
}
function glEnumToString(gl, value) {
for (let key in gl) {
if (gl[key] === value) {
return key;
}
}
return "0x" + value.toString(16);
}
function log(...args) {
const elem = document.createElement("pre");
elem.textContent = [...args].join(' ');
document.body.appendChild(elem);
}
pre { margin: 0; }