I have some questions about drawing shadows of a .obj in a scene of WebGL.
For example, if I want to draw shadows with Shadow Volumes Method, how should I develop this ? I'm trying to implement this but I have failed. Are there more efficient methods to do this (writing less code) ?
Below is the the code:
function createShadowBuilder(item) {
var that = function() {};
that.init = function(item) {
this.item = item;
this.glPositionBuffer = null;
this.glVertexIndexBuffer = null;
};
that.setupData = function() {
if (this.glPositionBuffer !== null) {
gl.deleteBuffer(this.glPositionBuffer);
}
if (this.glVertexIndexBuffer !== null) {
gl.deleteBuffer(this.glVertexIndexBuffer);
}
this.glVertices = [];
this.glIndices = [];
};
that.addGLVertex = function(vector) {
this.glVertices.push(vector[0]);
this.glVertices.push(vector[1]);
this.glVertices.push(vector[2]);
this.glIndices.push(this.glIndices.length);
};
that.addShadowSide = function(vector1, vector2, vector3, vector4) {
this.addGLVertex(vector1);
this.addGLVertex(vector2);
this.addGLVertex(vector3);
this.addGLVertex(vector4);
this.addGLVertex(vector3);
this.addGLVertex(vector2);
};
/**
* Check which triangles face the light source...
**/
that.checkDirection = function(lightLocation) {
var triangles = this.item.triangles,
triangle,
vector,
i = triangles.length;
while (i) {
i--;
// Create a normalized vector based on the vector from
// the center of the triangle to the lights position...
triangle = triangles[i];
vector = vec3.create(triangle.center);
vector = vec3.normalize(vec3.subtract(vector, lightLocation));
// Compare the vector with the normal of the triangle...
triangle.visible = (vec3.dot(vector, triangle.normal) < 0);
}
}
/**
* Find the edge of the object...
**/
that.findEdge = function() {
var triangles = this.item.triangles,
triangle,
a, b,
lines = this.item.lines,
line,
lineSidesHash = {},
i, j, k;
this.lineSides = [];
i = triangles.length;
while (i) {
i--;
triangle = triangles[i];
if (triangle.visible) {
j = 3;
while (j) {
j--;
// Check if the side...
k = triangle.lines[j];
line = lines[k];
a = line.v1 + '_' + line.v2;
b = line.v2 + '_' + line.v1;
if (lineSidesHash[a] !== undefined) { // Check the v1 -> v2 direction...
// The side already exists, remove it...
delete(lineSidesHash[a]);
}
else if (lineSidesHash[b] !== undefined) { // Check the v2 -> v1 direction...
// The side already exists, remove it...
delete(lineSidesHash[b]);
}
else {
// It's a new side, add it to the list...
lineSidesHash[a] = k;
}
}
}
}
// Convert the hash map to an array...
for (i in lineSidesHash) {
line = lines[lineSidesHash[i]];
this.lineSides.push(line);
}
};
that.rotateVectorX = function(vector, angle) {
var x, y,
sin, cos;
if (angle === 0) {
return;
}
y = vector[1];
z = vector[2];
sin = Math.sin(angle);
cos = Math.cos(angle);
vector[1] = y * cos - z * sin;
vector[2] = y * sin + z * cos;
};
that.rotateVectorY = function(vector, angle) {
var x, z,
sin, cos;
if (angle === 0) {
return;
}
x = vector[0];
z = vector[2];
sin = Math.sin(angle);
cos = Math.cos(angle);
vector[0] = z * sin + x * cos;
vector[2] = z * cos - x * sin;
};
that.rotateVectorZ = function(vector, angle) {
var x, y,
sin, cos;
if (angle === 0) {
return;
}
x = vector[0];
y = vector[1];
sin = Math.sin(angle);
cos = Math.cos(angle);
vector[0] = x * cos - y * sin;
vector[1] = x * sin + y * cos;
};
/**
* Update the shadow...
**/
that.update = function(lightLocation, lightAngle, matrix, zoom) {
// Get the position of the light from the matrix, remove the zoom value...
var vector = vec3.subtract(vec3.create(lightLocation), [matrix[12], matrix[13], matrix[14] + zoom]),
sin, cos,
x, y, z;
// Instead of rotating the object to face the light at the
// right angle it's a lot faster to rotate the light in the
// reverse direction...
this.rotateVectorX(vector, -lightAngle[0]);
this.rotateVectorY(vector, -lightAngle[1]);
this.rotateVectorZ(vector, -lightAngle[2]);
// Store the location for later use...
this.lightLocation = vector;
this.setupData(); // Reset all lists and buffers...
this.checkDirection(vector); // Check which triangles face the light source...
this.findEdge(); // Find the edge...
};
/**
* Create the buffers for the shadow volume...
**/
that.createVolume = function(lightLocation) {
var vertices = this.item.vertices,
triangles = this.item.triangles,
triangle,
lineSides = this.lineSides,
line,
vector1, vector2, vector3, vector4,
i = lineSides.length,
j;
while (i) { // For all edge lines...
i--;
line = lineSides[i];
vector1 = vertices[line.v1];
vector2 = vertices[line.v2];
// Extrude the line away from the light...
// Get the vector from the light position to the vertex...
vector3 = vec3.subtract(vector1, lightLocation, vec3.create());
// Add the normalized vector scaled with the volume
// depth to the vertex which gives a point on the other
// side of the object than the light source...
vector3 = vec3.add(vec3.scale(vec3.normalize(vector3), 30), vector1);
// And again for the second point on the line...
vector4 = vec3.subtract(vector2, lightLocation, vec3.create());
vector4 = vec3.add(vec3.scale(vec3.normalize(vector4), 30), vector2);
this.addShadowSide(vector1, vector2, vector3, vector4);
}
// Add the end caps to the volume...
i = triangles.length;
while (i) {
i--;
triangle = triangles[i];
if (triangle.visible) { // Only add polygons facing the light...
// Add the top...
j = 3;
while (j) {
j--;
this.addGLVertex(vertices[triangle.vertices[j]]);
}
// Add the bottom...
j = 0;
while (j < 3) {
vector1 = vertices[triangle.vertices[j]];
vector2 = vec3.subtract(vector1, lightLocation, vec3.create());
this.addGLVertex(vec3.add(vec3.scale(vec3.normalize(vector2), 30), vector1));
j++;
}
}
}
// Create the vertex position buffer...
this.glPositionBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, this.glPositionBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(this.glVertices), gl.STATIC_DRAW);
this.glPositionBuffer.itemSize = 3;
// Create the vertex index buffer...
this.glVertexIndexBuffer = gl.createBuffer();
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, this.glVertexIndexBuffer);
gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, new Uint16Array(this.glIndices), gl.STATIC_DRAW);
this.glVertexIndexBuffer.numItems = this.glIndices.length;
};
that.render = function() {
// Create the volume for the light...
this.createVolume(this.lightLocation);
gl.bindBuffer(gl.ARRAY_BUFFER, this.glPositionBuffer);
gl.vertexAttribPointer(shaderProgram.vertexPositionAttribute, this.glPositionBuffer.itemSize, gl.FLOAT, false, 0, 0);
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, this.glVertexIndexBuffer);
setMatrixUniforms();
// Disable the texture coord attribute...
gl.disableVertexAttribArray(shaderProgram.textureCoordAttribute);
// Disable the normal attribute...
gl.disableVertexAttribArray(shaderProgram.vertexNormalAttribute);
// Disable the color attribute...
gl.disableVertexAttribArray(shaderProgram.vertexColorAttribute);
// Render both front and back facing polygons with different stencil operations...
gl.disable(gl.CULL_FACE);
gl.enable(gl.STENCIL_TEST);
gl.depthFunc(gl.LESS);
// Disable rendering to the color buffer...
gl.colorMask(false, false, false, false);
// Disable z buffer updating...
gl.depthMask(false);
// Allow all bits in the stencil buffer...
gl.stencilMask(255);
// Increase the stencil buffer for back facing polygons, set the z pass opperator
gl.stencilOpSeparate(gl.BACK, gl.KEEP, gl.KEEP, gl.INCR);
// Decrease the stencil buffer for front facing polygons, set the z pass opperator
gl.stencilOpSeparate(gl.FRONT, gl.KEEP, gl.KEEP, gl.DECR);
// Always pass...
gl.stencilFunc(gl.ALWAYS, 0, 255);
gl.drawElements(gl.TRIANGLES, this.glVertexIndexBuffer.numItems, gl.UNSIGNED_SHORT, 0);
// Enable rendering the the color and depth buffer again...
gl.colorMask(true, true, true, true);
gl.depthMask(true);
gl.disable(gl.STENCIL_TEST);
};
that.init(item);
return that;
}
this code is taken from an example on the Web, I'm trying to adapt this.
Related
How can I calculate distance between a fixed parameter and a target image/pixel?
The following code does color recognition, finds the average position, and draws circle on it. It is able to find if the target (averageX and averageY) is close to leftPd, centerPd, or rightPd. I want to change this code as lane tracking which is at least able to find distance value between leftPd parameter variable and left lane or rightPd parameter variable and right lane.
import processing.video.*;
Capture video;
float threshold = 210;
color trackColor;
PVector leftP, centerP, rightP, target;
void setup() {
leftP = new PVector (80,420);
centerP = new PVector (width/2, 380);
rightP = new PVector (560,420);
size(640, 480);
video = new Capture(this, width, height);
video.start();
trackColor = color(160,0,0); // Start off tracking for red
}
void captureEvent(Capture video) {
// Read image from the camera
video.read();
}
void draw() {
loadPixels();
video.loadPixels();
image(video, 0, 0);
float avgX = 0;
float avgY = 0;
int count = 0;
for (int x = 0; x < video.width; x ++ ) {
for (int y = 0; y < video.height; y ++ ) {
int loc = x + y*video.width;
color currentColor = video.pixels[loc];
float r1 = red(currentColor);
float g1 = green(currentColor);
float b1 = blue(currentColor);
float r2 = red(trackColor);
float g2 = green(trackColor);
float b2 = blue(trackColor);
// Using euclidean distance to compare colors
float d = distSq(r1, g1, b1, r2, g2, b2);
if (d < threshold) {
stroke(255);
strokeWeight(1);
point(x,y);
avgX += x;
avgY += y;
count++;
}
}
}
if (count > 0) {
avgX = avgX / count;
avgY = avgY / count;
// Draw a circle at the tracked pixel
fill(trackColor);
strokeWeight(4.0);
stroke(0);
ellipse(avgX, avgY, 20, 20);
text("brightnesslevel: " + trackColor, 20, 60);
text("FPS: " + frameRate, 20, 80);
}
target = new PVector (avgX, avgY);
color c = color(255, 204, 0);
fill(c);
noStroke();
ellipse(leftP.x,leftP.y,16,16); // left param
ellipse(centerP.x,centerP.y,16,16); // center param
ellipse(rightP.x,rightP.y,16,16); // right param
float leftPd = leftP.dist(target);
float centerPd = centerP.dist(target);
float rightPd = rightP.dist(target);
if ( leftPd <= 85 ){
text("To Close left " , 20, 250);
}
if ( centerPd <= 85 ){
text("To Close turn center " , 20, 275);
}
if ( rightPd <= 85 ){
text("To Close turn right " , 20, 300);
}
}
float distSq(float x1,float y1, float z1, float x2, float y2, float z2){
float d = (x2-x1)*(x2-x1) + (y2-y1)*(y2-y1) + (z2-z1)*(z2-z1);
return d;
}
void mousePressed() {
// Save color where the mouse is clicked in trackColor variable
int loc = mouseX + mouseY*video.width;
trackColor = video.pixels[loc];
}
I'm trying to implement a WebGL app according to the documentation in the Mozilla Docs.
My code generates a sphere with is shaped by a scalefactor. The colors are generated according to the scalefactor. The shape is ok, but the colors are wrong. So what is going wrong - I have no clue. This code works on Android and in Java. I'm using the latest Chrome browser.
Here is the code:
export function createHcm3dObject(gl, diagram3D, deltaTheta, deltaPhi) {
let positions = [];
let colors = [];
let alpha = 1.0;
for (let theta = 0; theta < 360; theta += deltaTheta) {
for (let phi = 0; phi < 180; phi += deltaPhi) {
//r is scalefactor between 0 and 1 which shapes the sphere
let r = diagram3D[theta][phi];
//Color is generated according to the radius (alpha is currently set to 1.0)
let x1Color = generateColorArray(r, alpha);
let x1 = r * Math.sin(math3d.toRadians(phi)) * Math.cos(math3d.toRadians(theta));
let y1 = r * Math.sin(math3d.toRadians(phi)) * Math.sin(math3d.toRadians(theta));
let z1 = r * Math.cos(math3d.toRadians(phi));
r = diagram3D[theta + deltaTheta][phi];
let x2Color = generateColorArray(r, alpha);
let x2 = r * Math.sin(math3d.toRadians(phi)) * Math.cos(math3d.toRadians(theta + deltaTheta));
let y2 = r * Math.sin(math3d.toRadians(phi)) * Math.sin(math3d.toRadians(theta + deltaTheta));
let z2 = r * Math.cos(math3d.toRadians(phi));
r = diagram3D[theta][phi + deltaPhi];
let x3Color = generateColorArray(r, alpha);
let x3 = r * Math.sin(math3d.toRadians(phi + deltaPhi)) * Math.cos(math3d.toRadians(theta));
let y3 = r * Math.sin(math3d.toRadians(phi + deltaPhi)) * Math.sin(math3d.toRadians(theta));
let z3 = r * Math.cos(math3d.toRadians(phi + deltaPhi));
r = diagram3D[theta + deltaTheta][phi + deltaPhi];
let x4Color = generateColorArray(r, alpha);
let x4 = r * Math.sin(math3d.toRadians(phi + deltaPhi)) * Math.cos(math3d.toRadians(theta + deltaTheta));
let y4 = r * Math.sin(math3d.toRadians(phi + deltaPhi)) * Math.sin(math3d.toRadians(theta + deltaTheta));
let z4 = r * Math.cos(math3d.toRadians(phi + deltaPhi));
//1. Triangle
positions.push(x1, y1, z1);
positions.push(x3, y3, z3);
positions.push(x4, y4, z4);
//2. Triangle
positions.push(x2, y2, z2);
positions.push(x1, y1, z1);
positions.push(x4, y4, z4);
//Colors for 1. Triangle (red,green,blue,alpha=1.0)
colors.push(x1Color[0], x1Color[1], x1Color[2], x1Color[3]);
colors.push(x3Color[0], x3Color[1], x3Color[2], x3Color[3]);
colors.push(x4Color[0], x4Color[1], x4Color[2], x4Color[3]);
//Colors for 2. Triangle
colors.push(x2Color[0], x2Color[1], x2Color[2], x2Color[3]);
colors.push(x1Color[0], x1Color[1], x1Color[2], x1Color[3]);
colors.push(x4Color[0], x4Color[1], x4Color[2], x4Color[3]);
}
//console.log(positions);
//console.log(colors);
}
// Now pass the list of positions into WebGL to build the
// shape. We do this by creating a Float32Array from the
// JavaScript array, then use it to fill the current buffer.
const positionBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(positions), gl.STATIC_DRAW);
const colorBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, colorBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(colors), gl.STATIC_DRAW);
return {
position: positionBuffer,
color: colorBuffer,
positionSize: positions.length,
deltaTheta,
deltaPhi
};
};
function generateColorArray(r, alpha) {
let colorQuad = [];
let green = Math.abs(Math.sin(2 * r * Math.PI));
let blue = Math.abs(Math.cos(2 * r * Math.PI));
colorQuad[0] = 0.0;
colorQuad[1] = green;
colorQuad[2] = blue;
colorQuad[3] = alpha;
if (r >= 0.5 / 2) {
let red = Math.abs(Math.cos(2 * r * Math.PI));
green = Math.abs(Math.sin(2 * r * Math.PI));
if (r < 0.5) {
green = 1.0;
}
colorQuad[0] = red;
colorQuad[1] = green;
colorQuad[2] = 0.0;
colorQuad[3] = alpha;
}
if (r >= 0.5) {
let red = Math.abs(Math.cos(2 * r * Math.PI));
green = Math.abs(Math.cos(2 * r * Math.PI));
if (r < 0.75) {
red = 1.0;
}
colorQuad[0] = red;
colorQuad[1] = green;
colorQuad[2] = 0.0;
colorQuad[3] = alpha;
}
if (r >= 0.75) {
let red = 1.0;
blue = Math.abs(Math.cos(2 * r * Math.PI));
colorQuad[0] = red;
colorQuad[1] = 0.0;
colorQuad[2] = blue;
colorQuad[3] = alpha;
}
return colorQuad;
}
React Class:
export class Viewer3d extends Component {
state = {
rotX: 0,
rotY: 0,
gl: null,
buffers: null,
programInfo: null,
};
componentDidMount() {
this.init();
}
init = () => {
console.log("Comp did mount");
const canvas = document.querySelector("#glCanvas");
/** #type {WebGLRenderingContext} */
const gl = canvas.getContext("webgl");
if (!gl) {
alert(
"Unable to initialize WebGL. Your browser or machine may not support it."
);
return;
}
gl.clearColor(0.0, 0.0, 0.0, 1.0);
gl.clear(gl.COLOR_BUFFER_BIT);
let vs = document.getElementById("vshader").textContent;
let fs = document.getElementById("fshader").textContent;
//console.log(vs+" "+fs);
const shaderProgram = shader.initShaderProgram(gl, vs, fs);
let diagram3D = [];
let deltaTheta = 10;
let deltaPhi = 10;
for (let theta = 0; theta <= 360; theta += deltaTheta) {
let phiArray = [];
for (let phi = 0; phi <= 180; phi += deltaPhi) {
let eleCorr = 90 - phi;
let thetaCorr = 360 - theta;
let out = engine.antenna_correction(
thetaCorr,
0,
eleCorr,
0,
"012EA34",
"012EA34"
);
let att = out.a;
let logarithmic = false;
if (logarithmic) {
att = 1.0 - (-20.0 * Math.log10(att)) / 40.0;
}
phiArray[phi] = att;
}
diagram3D[theta] = phiArray;
}
//console.log(diagram3D);
const buffers = hcm3d.createHcm3dObject(
gl,
diagram3D,
deltaTheta,
deltaPhi
);
const programInfo = {
program: shaderProgram,
attribLocations: {
vertexPosition: gl.getAttribLocation(shaderProgram, "aVertexPosition"),
vertexColor: gl.getAttribLocation(shaderProgram,"aVertexColor"),
},
uniformLocations: {
projectionMatrix: gl.getUniformLocation(shaderProgram,"uProjectionMatrix"),
modelViewMatrix: gl.getUniformLocation(shaderProgram,"uModelViewMatrix"),
},
};
this.setState({ gl, buffers, programInfo });
this.drawScene(gl, programInfo, buffers);
};
drawScene = (gl, programInfo, buffers) => {
gl.clearColor(0.0, 0.0, 0.0, 1.0); // Clear to black, fully opaque
gl.clearDepth(1.0); // Clear everything
gl.enable(gl.DEPTH_TEST); // Enable depth testing
gl.depthFunc(gl.LEQUAL); // Near things obscure far things
// Clear the canvas before we start drawing on it.
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
// Create a perspective matrix, a special matrix that is
// used to simulate the distortion of perspective in a camera.
// Our field of view is 45 degrees, with a width/height
// ratio that matches the display size of the canvas
// and we only want to see objects between 0.1 units
// and 100 units away from the camera.
const fieldOfView = (45 * Math.PI) / 180; // in radians
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const zNear = 0.1;
const zFar = 100.0;
const projectionMatrix = mat4.create();
// note: glmatrix.js always has the first argument
// as the destination to receive the result.
mat4.perspective(projectionMatrix, fieldOfView, aspect, zNear, zFar);
// Set the drawing position to the "identity" point, which is
// the center of the scene.
const modelViewMatrix = mat4.create();
// Now move the drawing position a bit to where we want to
// start drawing the square.
mat4.translate(
modelViewMatrix, // destination matrix
modelViewMatrix, // matrix to translate
[0, 0, -2.5]
);
mat4.rotate(modelViewMatrix, modelViewMatrix, this.state.rotY, [1, 0, 0]);
mat4.rotate(modelViewMatrix, modelViewMatrix, this.state.rotX, [0, 1, 0]);
gl.bindBuffer(gl.ARRAY_BUFFER, buffers.color);
gl.bindBuffer(gl.ARRAY_BUFFER, buffers.position);
gl.vertexAttribPointer(
programInfo.attribLocations.vertexPosition,
3,
gl.FLOAT,
false,
0,
0
);
gl.vertexAttribPointer(
programInfo.attribLocations.vertexColor,
4,
gl.FLOAT,
false,
0,
0
);
gl.enableVertexAttribArray(programInfo.attribLocations.vertexPosition);
gl.enableVertexAttribArray(programInfo.attribLocations.vertexColor);
gl.useProgram(programInfo.program);
gl.uniformMatrix4fv(
programInfo.uniformLocations.projectionMatrix,
false,
projectionMatrix
);
gl.uniformMatrix4fv(
programInfo.uniformLocations.modelViewMatrix,
false,
modelViewMatrix
);
gl.drawArrays(gl.TRIANGLES, 0, buffers.positionSize);
};
onMouseMove = (evt) => {
if (!mouseDown) {
return;
}
evt.preventDefault();
let deltaX = evt.clientX - mouseX;
let deltaY = evt.clientY - mouseY;
mouseX = evt.clientX;
mouseY = evt.clientY;
this.rotateScene(deltaX, deltaY);
};
onMouseDown = (evt) => {
evt.preventDefault();
mouseDown = true;
mouseX = evt.clientX;
mouseY = evt.clientY;
};
onMouseUp = (evt) => {
evt.preventDefault();
mouseDown = false;
};
rotateScene = (deltaX, deltaY) => {
this.setState({
rotX: this.state.rotX + deltaX / 100,
rotY: this.state.rotY + deltaY / 100,
});
this.drawScene(this.state.gl, this.state.programInfo, this.state.buffers);
};
render() {
return (
<div className="w3-container w3-padding-16">
<canvas
id="glCanvas"
width={1280}
height={720}
onMouseMove={this.onMouseMove}
onMouseDown={this.onMouseDown}
onMouseUp={this.onMouseUp}
></canvas>
</div>
);
}
}
export default Viewer3d;
Fragment Shader:
<script id="fshader" type="x-shader/x-fragment">
precision mediump float;
varying vec4 vColor;
void main(void) {
gl_FragColor = vColor;
}
</script>
Vertex Shader:
<script id="vshader" type="x-shader/x-vertex">
attribute vec4 aVertexPosition;
attribute vec4 aVertexColor;
uniform mat4 uModelViewMatrix;
uniform mat4 uProjectionMatrix;
varying vec4 vColor;
void main(void) {
gl_Position = uProjectionMatrix * uModelViewMatrix * aVertexPosition;
vColor = aVertexColor;
}
</script>
You need to bind one buffer (say the color one), then use vertexAttribPointer to bind the set buffer to the color attribute. Then again, bind the vertex position buffer, and call vertexAttribPointer to bind it the vertex position attribute. Pseudocode:
gl.bindBuffer(gl.ARRAY_BUFFER, buffers.color);
gl.vertexAttribPointer(programInfo.attribLocations.vertexColor, ...);
gl.bindBuffer(gl.ARRAY_BUFFER, buffers.position);
gl.vertexAttribPointer(programInfo.attribLocations.vertexPosition, ...);
I have created a subclass in Fabric.js 4.3.0 extending fabric.Image, this helps me change the render function so that image will always fit in the bounding box.
I have also created a custom filter for Fabric, using which, by giving 4 corner coordinates, I can distort the image, similar to Photoshop's free transform -> distort tool.
While my code works, the issue is that when I drag the corner controls, the image always resizes from center, moving the other controls points as well.
I am trying to follow the instructions on how to resize objects in fabric using custom control points, the instructions own on polygons, and other shapes, but it does not yield the result required with images.
The result that I want to achieve, is when dragging one of the green control points, the image should distort, but image and the other control points must stay in their own positions without moving, similar to what you see here: https://youtu.be/Pn-9qFNM6Zg?t=274
Here is a JSFIDDLE for the demo: https://jsfiddle.net/human_a/p6d71skm/
fabric.textureSize = 4096;
// Set default filter backend
fabric.filterBackend = new fabric.WebglFilterBackend();
fabric.isWebglSupported(fabric.textureSize);
fabric.Image.filters.Perspective = class extends fabric.Image.filters.BaseFilter {
/**
* Constructor
* #param {Object} [options] Options object
*/
constructor(options) {
super();
if (options) this.setOptions(options);
this.applyPixelRatio();
}
type = 'Perspective';
pixelRatio = fabric.devicePixelRatio;
bounds = {width: 0, height: 0, minX: 0, maxX: 0, minY: 0, maxY: 0};
hasRelativeCoordinates = true;
/**
* Array of attributes to send with buffers. do not modify
* #private
*//** #ts-ignore */
vertexSource = `
precision mediump float;
attribute vec2 aPosition;
attribute vec2 aUvs;
uniform float uStepW;
uniform float uStepH;
varying vec2 vUvs;
vec2 uResolution;
void main() {
vUvs = aUvs;
uResolution = vec2(uStepW, uStepH);
gl_Position = vec4(uResolution * aPosition * 2.0 - 1.0, 0.0, 1.0);
}
`;
fragmentSource = `
precision mediump float;
varying vec2 vUvs;
uniform sampler2D uSampler;
void main() {
gl_FragColor = texture2D(uSampler, vUvs);
}
`;
/**
* Return a map of attribute names to WebGLAttributeLocation objects.
*
* #param {WebGLRenderingContext} gl The canvas context used to compile the shader program.
* #param {WebGLShaderProgram} program The shader program from which to take attribute locations.
* #returns {Object} A map of attribute names to attribute locations.
*/
getAttributeLocations(gl, program) {
return {
aPosition: gl.getAttribLocation(program, 'aPosition'),
aUvs: gl.getAttribLocation(program, 'aUvs'),
};
}
/**
* Send attribute data from this filter to its shader program on the GPU.
*
* #param {WebGLRenderingContext} gl The canvas context used to compile the shader program.
* #param {Object} attributeLocations A map of shader attribute names to their locations.
*/
sendAttributeData(gl, attributeLocations, data, type = 'aPosition') {
const attributeLocation = attributeLocations[type];
if (gl[type + 'vertexBuffer'] == null) {
gl[type + 'vertexBuffer'] = gl.createBuffer();
}
gl.bindBuffer(gl.ARRAY_BUFFER, gl[type+'vertexBuffer']);
gl.enableVertexAttribArray(attributeLocation);
gl.vertexAttribPointer(attributeLocation, 2, gl.FLOAT, false, 0, 0);
gl.bufferData(gl.ARRAY_BUFFER, data, gl.STATIC_DRAW);
}
generateSurface() {
const corners = this.perspectiveCoords;
const surface = verb.geom.NurbsSurface.byCorners(...corners);
const tess = surface.tessellate();
return tess;
}
/**
* Apply the resize filter to the image
* Determines whether to use WebGL or Canvas2D based on the options.webgl flag.
*
* #param {Object} options
* #param {Number} options.passes The number of filters remaining to be executed
* #param {Boolean} options.webgl Whether to use webgl to render the filter.
* #param {WebGLTexture} options.sourceTexture The texture setup as the source to be filtered.
* #param {WebGLTexture} options.targetTexture The texture where filtered output should be drawn.
* #param {WebGLRenderingContext} options.context The GL context used for rendering.
* #param {Object} options.programCache A map of compiled shader programs, keyed by filter type.
*/
applyTo(options) {
if (options.webgl) {
const { width, height } = this.getPerspectiveBounds();
options.context.canvas.width = width;
options.context.canvas.height = height;
options.destinationWidth = width;
options.destinationHeight = height;
this.hasRelativeCoordinates && this.calculateCoordsByCorners();
this._setupFrameBuffer(options);
this.applyToWebGL(options);
this._swapTextures(options);
}
}
applyPixelRatio(coords = this.perspectiveCoords) {
for(let i = 0; i < coords.length; i++) {
coords[i][0] *= this.pixelRatio;
coords[i][1] *= this.pixelRatio;
}
return coords;
}
getPerspectiveBounds(coords = this.perspectiveCoords) {
coords = this.perspectiveCoords.slice().map(c => (
{
x: c[0],
y: c[1],
}
));
this.bounds.minX = fabric.util.array.min(coords, 'x') || 0;
this.bounds.minY = fabric.util.array.min(coords, 'y') || 0;
this.bounds.maxX = fabric.util.array.max(coords, 'x') || 0;
this.bounds.maxY = fabric.util.array.max(coords, 'y') || 0;
this.bounds.width = Math.abs(this.bounds.maxX - this.bounds.minX);
this.bounds.height = Math.abs(this.bounds.maxY - this.bounds.minY);
return {
width: this.bounds.width,
height: this.bounds.height,
minX: this.bounds.minX,
maxX: this.bounds.maxX,
minY: this.bounds.minY,
maxY: this.bounds.maxY,
};
}
/**
* #description coordinates are coming in relative to mockup item sections
* the following function normalizes the coords based on canvas corners
*
* #param {number[]} coords
*/
calculateCoordsByCorners(coords = this.perspectiveCoords) {
for(let i = 0; i < coords.length; i++) {
coords[i][0] -= this.bounds.minX;
coords[i][1] -= this.bounds.minY;
}
}
/**
* Apply this filter using webgl.
*
* #param {Object} options
* #param {Number} options.passes The number of filters remaining to be executed
* #param {Boolean} options.webgl Whether to use webgl to render the filter.
* #param {WebGLTexture} options.originalTexture The texture of the original input image.
* #param {WebGLTexture} options.sourceTexture The texture setup as the source to be filtered.
* #param {WebGLTexture} options.targetTexture The texture where filtered output should be drawn.
* #param {WebGLRenderingContext} options.context The GL context used for rendering.
* #param {Object} options.programCache A map of compiled shader programs, keyed by filter type.
*/
applyToWebGL(options) {
const gl = options.context;
const shader = this.retrieveShader(options);
const tess = this.generateSurface(options.sourceWidth, options.sourceHeight);
const indices = new Uint16Array(_.flatten(tess.faces));
// Clear the canvas first
this.clear(gl); // !important
// bind texture buffer
this.bindTexture(gl, options);
gl.useProgram(shader.program);
// create the buffer
this.indexBuffer(gl, indices);
this.sendAttributeData(gl, shader.attributeLocations, new Float32Array(_.flatten(tess.points)), 'aPosition');
this.sendAttributeData(gl, shader.attributeLocations, new Float32Array(_.flatten(tess.uvs)), 'aUvs');
gl.uniform1f(shader.uniformLocations.uStepW, 1 / gl.canvas.width);
gl.uniform1f(shader.uniformLocations.uStepH, 1 / gl.canvas.height);
this.sendUniformData(gl, shader.uniformLocations);
gl.viewport(0, 0, options.destinationWidth, options.destinationHeight);
// enable indices up to 4294967296 for webGL 1.0
gl.getExtension('OES_element_index_uint');
gl.drawElements(gl.TRIANGLES, indices.length, gl.UNSIGNED_SHORT, 0);
}
clear(gl) {
gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
}
bindTexture(gl, options) {
if (options.pass === 0 && options.originalTexture) {
gl.bindTexture(gl.TEXTURE_2D, options.originalTexture);
} else {
gl.bindTexture(gl.TEXTURE_2D, options.sourceTexture);
}
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
}
indexBuffer(gl, data) {
const indexBuffer = gl.createBuffer();
// make this buffer the current 'ELEMENT_ARRAY_BUFFER'
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, indexBuffer);
// Fill the current element array buffer with data
gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, data, gl.STATIC_DRAW);
}
};
/**
* Returns filter instance from an object representation
* #static
* #param {Object} object Object to create an instance from
* #param {function} [callback] to be invoked after filter creation
* #return {fabric.Image.filters.Perspective} Instance of fabric.Image.filters.Perspective
*/
fabric.Image.filters.Perspective.fromObject = fabric.Image.filters.BaseFilter.fromObject;
/**
* Photo subclass
* #class fabric.Photo
* #extends fabric.Photo
* #return {fabric.Photo} thisArg
*
*/
fabric.Photo = class extends fabric.Image {
type = 'photo';
repeat = 'no-repeat';
fill = 'transparent';
initPerspective = true;
cacheProperties = fabric.Image.prototype.cacheProperties.concat('perspectiveCoords');
constructor(src, options) {
super(options);
if (options) this.setOptions(options);
this.on('added', () => {
const image = new Image();
image.setAttribute('crossorigin', 'anonymous');
image.onload = () => {
this._initElement(image, options);
this.width = image.width / 2;
this.height = image.height / 2;
this.loaded = true;
this.setCoords();
this.fire('image:loaded');
};
image.src = src;
this.on('image:loaded', () => {
!this.perspectiveCoords && this.getInitialPerspective();
this.togglePerspective();
this.canvas.requestRenderAll();
});
});
}
cacheProperties = fabric.Image.prototype.cacheProperties.concat('perspectiveCoords');
/**
* #private
* #param {CanvasRenderingContext2D} ctx Context to render on
*//** #ts-ignore */
_render(ctx) {
fabric.util.setImageSmoothing(ctx, this.imageSmoothing);
if (this.isMoving !== true && this.resizeFilter && this._needsResize()) {
this.applyResizeFilters();
}
this._stroke(ctx);
this._renderPaintInOrder(ctx);
}
/**
* #private
* #param {CanvasRenderingContext2D} ctx Context to render on
*//** #ts-ignore */
_renderFill(ctx) {
var elementToDraw = this._element;
if (!elementToDraw) return;
ctx.save();
const elWidth = elementToDraw.naturalWidth || elementToDraw.width;
const elHeight = elementToDraw.naturalHeight || elementToDraw.height;
const width = this.width;
const height = this.height;
ctx.translate(-width / 2, -height / 2);
// get the scale
const scale = Math.min(width / elWidth, height / elHeight);
// get the top left position of the image
const x = (width / 2) - (elWidth / 2) * scale;
const y = (height / 2) - (elHeight / 2) * scale;
ctx.drawImage(elementToDraw, x, y, elWidth * scale, elHeight * scale);
ctx.restore();
}
togglePerspective(mode = true) {
this.set('perspectiveMode', mode);
// this.set('hasBorders', !mode);
if (mode === true) {
this.set('layout', 'fit');
var lastControl = this.perspectiveCoords.length - 1;
this.controls = this.perspectiveCoords.reduce((acc, coord, index) => {
const anchorIndex = index > 0 ? index - 1 : lastControl;
let name = `prs${index + 1}`;
acc[name] = new fabric.Control({
name,
x: -0.5,
y: -0.5,
actionHandler: this._actionWrapper(anchorIndex, (_, transform, x, y) => {
const target = transform.target;
const localPoint = target.toLocalPoint(new fabric.Point(x, y), 'left', 'top');
coord[0] = localPoint.x / target.scaleX * fabric.devicePixelRatio;
coord[1] = localPoint.y / target.scaleY * fabric.devicePixelRatio;
target.setCoords();
target.applyFilters();
return true;
}),
positionHandler: function (dim, finalMatrix, fabricObject) {
const zoom = fabricObject.canvas.getZoom();
const scalarX = fabricObject.scaleX * zoom / fabric.devicePixelRatio;
const scalarY = fabricObject.scaleY * zoom / fabric.devicePixelRatio;
var point = fabric.util.transformPoint({
x: this.x * dim.x + this.offsetX + coord[0] * scalarX,
y: this.y * dim.y + this.offsetY + coord[1] * scalarY,
}, finalMatrix
);
return point;
},
cursorStyleHandler: () => 'cell',
render: function(ctx, left, top, _, fabricObject) {
const zoom = fabricObject.canvas.getZoom();
const scalarX = fabricObject.scaleX * zoom / fabric.devicePixelRatio;
const scalarY = fabricObject.scaleY * zoom / fabric.devicePixelRatio;
ctx.save();
ctx.translate(left, top);
ctx.rotate(fabric.util.degreesToRadians(fabricObject.angle));
ctx.beginPath();
ctx.moveTo(0, 0);
ctx.strokeStyle = 'green';
if (fabricObject.perspectiveCoords[index + 1]) {
ctx.strokeStyle = 'green';
ctx.lineTo(
(fabricObject.perspectiveCoords[index + 1][0] - coord[0]) * scalarX,
(fabricObject.perspectiveCoords[index + 1][1] - coord[1]) * scalarY,
);
} else {
ctx.lineTo(
(fabricObject.perspectiveCoords[0][0] - coord[0]) * scalarX,
(fabricObject.perspectiveCoords[0][1] - coord[1]) * scalarY,
);
}
ctx.stroke();
ctx.beginPath();
ctx.arc(0, 0, 4, 0, Math.PI * 2);
ctx.closePath();
ctx.fillStyle = 'green';
ctx.fill();
ctx.stroke();
ctx.restore();
},
offsetX: 0,
offsetY: 0,
actionName: 'perspective-coords',
});
return acc;
}, {});
} else {
this.controls = fabric.Photo.prototype.controls;
}
this.canvas.requestRenderAll();
}
_actionWrapper(anchorIndex, fn) {
return function(eventData, transform, x, y) {
if (!transform || !eventData) return;
const { target } = transform;
target._resetSizeAndPosition(anchorIndex);
const actionPerformed = fn(eventData, transform, x, y);
return actionPerformed;
};
}
/**
* #description manually reset the bounding box after points update
*
* #see http://fabricjs.com/custom-controls-polygon
* #param {number} index
*/
_resetSizeAndPosition = (index, apply = true) => {
const absolutePoint = fabric.util.transformPoint({
x: this.perspectiveCoords[index][0],
y: this.perspectiveCoords[index][1],
}, this.calcTransformMatrix());
this._setPositionDimensions({});
const penBaseSize = this._getNonTransformedDimensions();
const newX = (this.perspectiveCoords[index][0]) / penBaseSize.x;
const newY = (this.perspectiveCoords[index][1]) / penBaseSize.y;
this.setPositionByOrigin(absolutePoint, newX + 0.5, newY + 0.5);
apply && this._applyPointsOffset();
}
/**
* This is modified version of the internal fabric function
* this helps determine the size and the location of the path
*
* #param {object} options
*/
_setPositionDimensions(options) {
const { left, top, width, height } = this._calcDimensions(options);
this.width = width;
this.height = height;
var correctLeftTop = this.translateToGivenOrigin(
{
x: left,
y: top,
},
'left',
'top',
this.originX,
this.originY
);
if (typeof options.left === 'undefined') {
this.left = correctLeftTop.x;
}
if (typeof options.top === 'undefined') {
this.top = correctLeftTop.y;
}
this.pathOffset = {
x: left,
y: top,
};
return { left, top, width, height };
}
/**
* #description this is based on fabric.Path._calcDimensions
*
* #private
*/
_calcDimensions() {
const coords = this.perspectiveCoords.slice().map(c => (
{
x: c[0] / fabric.devicePixelRatio,
y: c[1] / fabric.devicePixelRatio,
}
));
const minX = fabric.util.array.min(coords, 'x') || 0;
const minY = fabric.util.array.min(coords, 'y') || 0;
const maxX = fabric.util.array.max(coords, 'x') || 0;
const maxY = fabric.util.array.max(coords, 'y') || 0;
const width = Math.abs(maxX - minX);
const height = Math.abs(maxY - minY);
return {
left: minX,
top: minY,
width: width,
height: height,
};
}
/**
* #description This is modified version of the internal fabric function
* this subtracts the path offset from each path points
*/
_applyPointsOffset() {
for (let i = 0; i < this.perspectiveCoords.length; i++) {
const coord = this.perspectiveCoords[i];
coord[0] -= this.pathOffset.x;
coord[1] -= this.pathOffset.y;
}
}
/**
* #description generate the initial coordinates for warping, based on image dimensions
*
*/
getInitialPerspective() {
let w = this.getScaledWidth();
let h = this.getScaledHeight();
const perspectiveCoords = [
[0, 0], // top left
[w, 0], // top right
[w, h], // bottom right
[0, h], // bottom left
];
this.perspectiveCoords = perspectiveCoords;
const perspectiveFilter = new fabric.Image.filters.Perspective({
hasRelativeCoordinates: false,
pixelRatio: fabric.devicePixelRatio, // the Photo is already retina ready
perspectiveCoords
});
this.filters.push(perspectiveFilter);
this.applyFilters();
return perspectiveCoords;
}
};
/**
* Creates an instance of fabric.Photo from its object representation
* #static
* #param {Object} object Object to create an instance from
* #param {Function} callback Callback to invoke when an image instance is created
*/
fabric.Photo.fromObject = function(_object, callback) {
const object = fabric.util.object.clone(_object);
object.layout = _object.layout;
fabric.util.loadImage(object.src, function(img, isError) {
if (isError) {
callback && callback(null, true);
return;
}
fabric.Photo.prototype._initFilters.call(object, object.filters, function(filters) {
object.filters = filters || [];
fabric.Photo.prototype._initFilters.call(object, [object.resizeFilter], function(resizeFilters) {
object.resizeFilter = resizeFilters[0];
fabric.util.enlivenObjects([object.clipPath], function(enlivedProps) {
object.clipPath = enlivedProps[0];
var image = new fabric.Photo(img, object);
callback(image, false);
});
});
});
}, null, object.crossOrigin || 'anonymous');
};
const canvas = new fabric.Canvas(document.getElementById('canvas'), {
backgroundColor: 'white',
enableRetinaScaling: true,
});
function resizeCanvas() {
canvas.setWidth(window.innerWidth);
canvas.setHeight(window.innerHeight);
}
resizeCanvas();
window.addEventListener('resize', () => resizeCanvas(), false);
const photo = new fabric.Photo('https://cdn.artboard.studio/private/5cb9c751-5f17-4062-adb7-6ec2c137a65d/user_uploads/5bafe170-1580-4d6b-a3be-f5cdce22d17d-asdasdasd.jpg', {
left: canvas.getWidth() / 2,
top: canvas.getHeight() / 2,
originX: 'center',
originY: 'center',
});
canvas.add(photo);
canvas.setActiveObject(photo);
body {
margin: 0;
}
<script src="https://cdn.jsdelivr.net/npm/lodash#4.17.20/lodash.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/verb-nurbs-web#2.1.3/build/js/verb.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/fabric#4.3.0/dist/fabric.min.js"></script>
<canvas id="canvas"></canvas>
I suspect that the reference to absolutePoint in _resetSizeAndPosition needs to take into account the origin for the image and that there is a simple fix to this issue. However, I didn't find a good way to do this and resorted to manually "correcting" this issue in _resetSizeAndPosition.
The modified version of _resetSizeAndPosition looks like so:
_resetSizeAndPosition = (index, apply = true) => {
const absolutePoint = fabric.util.transformPoint({
x: this.perspectiveCoords[index][0],
y: this.perspectiveCoords[index][1],
}, this.calcTransformMatrix());
let { height, width, left, top } = this._calcDimensions({});
const widthDiff = (width - this.width) / 2;
if ((left < 0 && widthDiff > 0) || (left > 0 && widthDiff < 0)) {
absolutePoint.x -= widthDiff;
} else {
absolutePoint.x += widthDiff;
}
const heightDiff = (height - this.height) / 2;
if ((top < 0 && heightDiff > 0) || (top > 0 && heightDiff < 0)) {
absolutePoint.y -= heightDiff;
} else {
absolutePoint.y += heightDiff;
}
this._setPositionDimensions({});
const penBaseSize = this._getNonTransformedDimensions();
const newX = (this.perspectiveCoords[index][0]) / penBaseSize.x;
const newY = (this.perspectiveCoords[index][1]) / penBaseSize.y;
this.setPositionByOrigin(absolutePoint, newX + 0.5, newY + 0.5);
apply && this._applyPointsOffset();
}
The basic principle for this approach is that the left and top properties of the object are never being updated. This can be seen in your example through the console by modifying the image and checking the properties on the image. Therefore, we need to apply a correction to the position properties based on the changing width and height. This ensures that other points stay fixed in place, since we compensate for the changing height and width of the image in its position.
By comparing the values of width and this.width it's possible to determine if the image is increasing or decreasing in size. The value of left indicates whether the stretch is occurring to the left or right side of the image. If the user is stretching the image to the left or shrinking it from the right then we need. By combining the conditions for these, we can tell how we need to modify the position of the image to compensate. The same approach used for the horizontal values is also applied to the vertical values.
JSFiddle: https://jsfiddle.net/0x8caow6/
I need to change size of bubbles(points) by supplying 4th value in data points in Highcharts' 3D scatter chart. I couldn't find any way how to do this. Can anyone help ?
It seems that it is not supported out of the box. Although in this Thread in the Highcharts-Forum, a wrapper is shown that allows a 4th w value to be used as the size of the bubble (see http://jsfiddle.net/uqLfm1k6/1/):
(function (H) {
H.wrap(H.seriesTypes.bubble.prototype, 'getRadii', function (proceed, zMin, zMax, minSize, maxSize) {
var math = Math,
len,
i,
pos,
zData = this.zData,
wData = this.userOptions.data.map( function(e){ return e.w }), // ADDED
radii = [],
options = this.options,
sizeByArea = options.sizeBy !== 'width',
zThreshold = options.zThreshold,
zRange = zMax - zMin,
value,
radius;
// Set the shape type and arguments to be picked up in drawPoints
for (i = 0, len = zData.length; i < len; i++) {
// value = zData[i]; // DELETED
value = this.chart.is3d()? wData[i] : zData[i]; // ADDED
// When sizing by threshold, the absolute value of z determines the size
// of the bubble.
if (options.sizeByAbsoluteValue && value !== null) {
value = Math.abs(value - zThreshold);
zMax = Math.max(zMax - zThreshold, Math.abs(zMin - zThreshold));
zMin = 0;
}
if (value === null) {
radius = null;
// Issue #4419 - if value is less than zMin, push a radius that's always smaller than the minimum size
} else if (value < zMin) {
radius = minSize / 2 - 1;
} else {
// Relative size, a number between 0 and 1
pos = zRange > 0 ? (value - zMin) / zRange : 0.5;
if (sizeByArea && pos >= 0) {
pos = Math.sqrt(pos);
}
radius = math.ceil(minSize + pos * (maxSize - minSize)) / 2;
}
radii.push(radius);
}
this.radii = radii;
});
}(Highcharts));
Will anyone please help me in displaying a world map on Processing and then plotting tweets on corresponding locations?
This isn't an entire answer, but should show how to plot the locations on a world map. Note the getPoint() method that does the Cartesian coordinates (you could do the same with the map() function in Processing.Also note the constructor does the calculations to size the image of the earth you use to the sketch window...
WorldMap world;
Point[] cities = new Point[6];
void setup() {
size(800, 480);
background(255);
stroke(0);
strokeWeight(1.5);
ellipseMode(CENTER);
smooth();
// World
world = new WorldMap(0, 0, width, height);
// Cities
cities[0] = world.getPoint(45.24, -75.43); // Ottawa
cities[1] = world.getPoint(38.53, -77.02); // Washington
cities[2] = world.getPoint(51.32, 0.50); // London
cities[3] = world.getPoint(48.48, 2.20); // Paris
cities[4] = world.getPoint(39.55, 116.25); // Beijing
cities[5] = world.getPoint(35.40, 139.45); // Tokyo
}
void draw() {
world.drawBackground();
for (int i = 0; i < cities.length; i++) {
ellipse(cities[i].x, cities[i].y, 10, 10);
}
}
//-------------------------------
class WorldMap {
int x, y, w, h;
PImage raster;
WorldMap(int x, int y, int w, int h) {
// Scale Image
if (h >= w/2) {
this.w = w;
this.h = w/2;
this.x = x;
this.y = (h - this.h)/2;
}
else {
this.h = h;
this.w = 2*h;
this.x = (w - this.w)/2;
this.y = y;
}
// Load Image
raster = loadImage("world_longlatwgs3.png");
}
void drawBackground() {
image(raster, x, y, w, h);
}
Point getPoint(float phi, float lambda) {
Point pt = new Point(x + ((180 + lambda) / 360) * w, y + h - ((90 + phi) / 180) * h);
return pt;
}
}
//-------------------------------
class Point extends Point2D {
Point(float x, float y) {
super(x, y);
}
}
class Point2D {
float x, y;
Point2D(float x, float y) {
this.x = x; this.y = y;
}
}