Fixing the ios7 squished image in canvas, with rotation and scaling - ios

I am having problems with images being squished in iOS7 using canvas. I have found the following post which seems to be headed in the right direction:
HTML5 Canvas drawImage ratio bug iOS
However, I am beyond the simple case of drawing an image, I am also rotating and scaling the context (for thumbnails with EXIF orientation data) before drawing the image. The code runs, but there is no image data in my thumbnails. I'm guessing this has to do with the canvas rotation and scaling. However, I'm having a hard time understanding why the thumbnail does not create properly when my squish factor is 1 (on an iOS device that does not have the bug).
Here is my full "onload()" code:
reader.onloadend = function(evt) {
console.log('read file data!');
var tempImg = new Image();
console.log('created new Image');
tempImg.src = evt.target.result;
console.log('set canvas to file');
// alert(this);
tempImg.onload = function() {
console.log('loaded tempImg');
var MAX_WIDTH = 450;
var MAX_HEIGHT = 450;
var tempW = tempImg.width;
var tempH = tempImg.height;
if (tempW > tempH) {
if (tempW > MAX_WIDTH) {
tempH *= MAX_WIDTH / tempW;
tempW = MAX_WIDTH;
}
} else {
if (tempH > MAX_HEIGHT) {
tempW *= MAX_HEIGHT / tempH;
tempH = MAX_HEIGHT;
}
}
var canvas = document.createElement('canvas');
canvas.width = tempW;
canvas.height = tempH;
var ctx = canvas.getContext("2d");
// save the current co-ordinate system
// before we screw with it
ctx.save();
// move to the middle of where we want to draw our image
ctx.translate(tempW/2, tempH/2);
if (exifTags.hasOwnProperty('Orientation')) {
// EXIF FORMAT: 0x0112 Orientation int16u IFD0
// 1 = Horizontal (normal)
// 2 = Mirror horizontal
// 3 = Rotate 180
// 4 = Mirror vertical
// 5 = Mirror horizontal and rotate 270 CW
// 6 = Rotate 90 CW
// 7 = Mirror horizontal and rotate 90 CW
// 8 = Rotate 270 CW
// Working. See: http://creativejs.com/2012/01/day-10-drawing-rotated-images-into-canvas/
if (exifTags.Orientation == 2) {
console.log('orientation: 2 = Mirror horizontal')
// flip context horizontally
// ctx.translate
ctx.scale(-1, 1);
} else if (exifTags.Orientation == 3) {
console.log('orientation: 3 = Rotate 180')
ctx.rotate(180*Math.PI/180);
} else if (exifTags.Orientation == 4) {
console.log('orientation: 4 = Mirror vertical')
// flip context vertically
ctx.scale(1, -1);
} else if (exifTags.Orientation == 5) {
console.log('orientation: Mirror horizontal and rotate 270 CW')
// flip context horizontally
ctx.rotate(270*Math.PI/180);
ctx.scale(-1, 1);
} else if (exifTags.Orientation == 6) {
console.log('orientation: Rotate 90 CW')
ctx.rotate(90*Math.PI/180);
} else if (exifTags.Orientation == 7) {
console.log('orientation: Mirror horizontal and rotate 90 CW')
// flip context horizontally
ctx.rotate(90*Math.PI/180);
ctx.scale(-1, 1);
} else if (exifTags.Orientation == 8) {
console.log('orientation: Rotate 270 CW')
ctx.rotate(270*Math.PI/180);
} else {
console.log('unknown orientation: ' + exifTags.Orientation);
}
}
var myImage = this;
if ($scope.platform == "iOS") {
/* Detecting vertical squash in loaded image.
* Fixes a bug which squash image vertically while drawing into canvas for some images.
* This is a bug in iOS6 devices. This function from https://github.com/stomita/ios-imagefile-megapixel
*
*/
function detectVerticalSquash(img) {
var iw = img.naturalWidth, ih = img.naturalHeight;
var canvas = document.createElement('canvas');
canvas.width = 1;
canvas.height = ih;
var ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0);
var data = ctx.getImageData(0, 0, 1, ih).data;
// search image edge pixel position in case it is squashed vertically.
var sy = 0;
var ey = ih;
var py = ih;
while (py > sy) {
var alpha = data[(py - 1) * 4 + 3];
if (alpha === 0) {
ey = py;
} else {
sy = py;
}
py = (ey + sy) >> 1;
}
var ratio = (py / ih);
return (ratio===0)?1:ratio;
}
/**
* A replacement for context.drawImage
* (args are for source and destination).
*/
function drawImageIOSFix(ctx, img, sx, sy, sw, sh, dx, dy, dw, dh) {
var vertSquashRatio = detectVerticalSquash(img);
console.log('ratio: ' + vertSquashRatio);
// Works only if whole image is displayed:
// ctx.drawImage(img, sx, sy, sw, sh, dx, dy, dw, dh / vertSquashRatio);
// The following works correct also when only a part of the image is displayed:
ctx.drawImage(img, sx * vertSquashRatio, sy * vertSquashRatio,
sw * vertSquashRatio, sh * vertSquashRatio,
dx, dy, dw, dh );
}
console.log('image to unsquish', myImage);
// draw it up and to the left by half the width
// and height of the image
drawImageIOSFix(ctx, myImage, -tempW/2, -tempH/2, tempW, tempH);
} else {
// draw it up and to the left by half the width
// and height of the image
ctx.drawImage(myImage, -tempW/2, -tempH/2, tempW, tempH);
}
// and restore the co-ords to how they were when we began
ctx.restore();
var dataURL = canvas.toDataURL();
// alert('created image!');
var fileName = undefined;
if ($scope.platform == "iOS") {
// Store only the name for iOS, hard paths are unreliable
var timestamp = new Date().getTime();
fileName = timestamp.toString().concat('t.jpg');
var thumbPath = fileSystem.root.toURL() + "/STL/" + fileName;
var thumbName = "/STL/" + fileName;
} else {
var name = file.name
var position = name.length-4
fileName = name.substr(0, position) + 't.jpg';
var thumbPath = fileSystem.root.toURL() + "/.STL/" + thumbName;
var thumbName = fileName;
}
$scope.mediaCollection.thumbNames.push(thumbName);
$scope.mediaCollection.thumbPaths.push(thumbPath);
$scope.mediaCollection.exifData.push(exifTags);
$scope.mediaCollection.Orientation.push(exifTags.Orientation);
canvas.toBlob(function(blob){
console.log(blob.size + ':' + blob.type);
function newFile(fileEntry){
console.log('created new fileEntry');
fileEntry.createWriter(gotFileWriter, fail);
}
function gotFileWriter(writer) {
console.log('got fileWriter');
writer.seek(0);
// window.location = blobUrl;
writer.write(blob);
console.log('wrote blob!');
writeIfReady();
}
console.log('about to get Directory');
console.log('fileSystem root: ', fileSystem.root, $scope.iOS_FS);
console.log('platform: ', $scope.platform);
// can replace if/else with single request using $scope.STL_dir
if ($scope.platform = "iOS") {
// May need maintenance...
fileSystem.root.getDirectory('STL', {create: true}, function(dirEntry) {
console.log('got directory, about to create thumbnail file: ' + fileName);
dirEntry.getFile(fileName, {create: true, exclusive: true}, newFile, fail);
}, fail);
} else {
fileSystem.root.getDirectory('.STL', {create: true}, function(dirEntry) {
dirEntry.getFile(fileName, {create: true, exclusive: true}, newFile, fail);
}, fail);
}
}, "image/jpg");
}
}

Related

Distorting images using FabricJS filters and custom controls, by dragging the corner control points image resizes from center

I have created a subclass in Fabric.js 4.3.0 extending fabric.Image, this helps me change the render function so that image will always fit in the bounding box.
I have also created a custom filter for Fabric, using which, by giving 4 corner coordinates, I can distort the image, similar to Photoshop's free transform -> distort tool.
While my code works, the issue is that when I drag the corner controls, the image always resizes from center, moving the other controls points as well.
I am trying to follow the instructions on how to resize objects in fabric using custom control points, the instructions own on polygons, and other shapes, but it does not yield the result required with images.
The result that I want to achieve, is when dragging one of the green control points, the image should distort, but image and the other control points must stay in their own positions without moving, similar to what you see here: https://youtu.be/Pn-9qFNM6Zg?t=274
Here is a JSFIDDLE for the demo: https://jsfiddle.net/human_a/p6d71skm/
fabric.textureSize = 4096;
// Set default filter backend
fabric.filterBackend = new fabric.WebglFilterBackend();
fabric.isWebglSupported(fabric.textureSize);
fabric.Image.filters.Perspective = class extends fabric.Image.filters.BaseFilter {
/**
* Constructor
* #param {Object} [options] Options object
*/
constructor(options) {
super();
if (options) this.setOptions(options);
this.applyPixelRatio();
}
type = 'Perspective';
pixelRatio = fabric.devicePixelRatio;
bounds = {width: 0, height: 0, minX: 0, maxX: 0, minY: 0, maxY: 0};
hasRelativeCoordinates = true;
/**
* Array of attributes to send with buffers. do not modify
* #private
*//** #ts-ignore */
vertexSource = `
precision mediump float;
attribute vec2 aPosition;
attribute vec2 aUvs;
uniform float uStepW;
uniform float uStepH;
varying vec2 vUvs;
vec2 uResolution;
void main() {
vUvs = aUvs;
uResolution = vec2(uStepW, uStepH);
gl_Position = vec4(uResolution * aPosition * 2.0 - 1.0, 0.0, 1.0);
}
`;
fragmentSource = `
precision mediump float;
varying vec2 vUvs;
uniform sampler2D uSampler;
void main() {
gl_FragColor = texture2D(uSampler, vUvs);
}
`;
/**
* Return a map of attribute names to WebGLAttributeLocation objects.
*
* #param {WebGLRenderingContext} gl The canvas context used to compile the shader program.
* #param {WebGLShaderProgram} program The shader program from which to take attribute locations.
* #returns {Object} A map of attribute names to attribute locations.
*/
getAttributeLocations(gl, program) {
return {
aPosition: gl.getAttribLocation(program, 'aPosition'),
aUvs: gl.getAttribLocation(program, 'aUvs'),
};
}
/**
* Send attribute data from this filter to its shader program on the GPU.
*
* #param {WebGLRenderingContext} gl The canvas context used to compile the shader program.
* #param {Object} attributeLocations A map of shader attribute names to their locations.
*/
sendAttributeData(gl, attributeLocations, data, type = 'aPosition') {
const attributeLocation = attributeLocations[type];
if (gl[type + 'vertexBuffer'] == null) {
gl[type + 'vertexBuffer'] = gl.createBuffer();
}
gl.bindBuffer(gl.ARRAY_BUFFER, gl[type+'vertexBuffer']);
gl.enableVertexAttribArray(attributeLocation);
gl.vertexAttribPointer(attributeLocation, 2, gl.FLOAT, false, 0, 0);
gl.bufferData(gl.ARRAY_BUFFER, data, gl.STATIC_DRAW);
}
generateSurface() {
const corners = this.perspectiveCoords;
const surface = verb.geom.NurbsSurface.byCorners(...corners);
const tess = surface.tessellate();
return tess;
}
/**
* Apply the resize filter to the image
* Determines whether to use WebGL or Canvas2D based on the options.webgl flag.
*
* #param {Object} options
* #param {Number} options.passes The number of filters remaining to be executed
* #param {Boolean} options.webgl Whether to use webgl to render the filter.
* #param {WebGLTexture} options.sourceTexture The texture setup as the source to be filtered.
* #param {WebGLTexture} options.targetTexture The texture where filtered output should be drawn.
* #param {WebGLRenderingContext} options.context The GL context used for rendering.
* #param {Object} options.programCache A map of compiled shader programs, keyed by filter type.
*/
applyTo(options) {
if (options.webgl) {
const { width, height } = this.getPerspectiveBounds();
options.context.canvas.width = width;
options.context.canvas.height = height;
options.destinationWidth = width;
options.destinationHeight = height;
this.hasRelativeCoordinates && this.calculateCoordsByCorners();
this._setupFrameBuffer(options);
this.applyToWebGL(options);
this._swapTextures(options);
}
}
applyPixelRatio(coords = this.perspectiveCoords) {
for(let i = 0; i < coords.length; i++) {
coords[i][0] *= this.pixelRatio;
coords[i][1] *= this.pixelRatio;
}
return coords;
}
getPerspectiveBounds(coords = this.perspectiveCoords) {
coords = this.perspectiveCoords.slice().map(c => (
{
x: c[0],
y: c[1],
}
));
this.bounds.minX = fabric.util.array.min(coords, 'x') || 0;
this.bounds.minY = fabric.util.array.min(coords, 'y') || 0;
this.bounds.maxX = fabric.util.array.max(coords, 'x') || 0;
this.bounds.maxY = fabric.util.array.max(coords, 'y') || 0;
this.bounds.width = Math.abs(this.bounds.maxX - this.bounds.minX);
this.bounds.height = Math.abs(this.bounds.maxY - this.bounds.minY);
return {
width: this.bounds.width,
height: this.bounds.height,
minX: this.bounds.minX,
maxX: this.bounds.maxX,
minY: this.bounds.minY,
maxY: this.bounds.maxY,
};
}
/**
* #description coordinates are coming in relative to mockup item sections
* the following function normalizes the coords based on canvas corners
*
* #param {number[]} coords
*/
calculateCoordsByCorners(coords = this.perspectiveCoords) {
for(let i = 0; i < coords.length; i++) {
coords[i][0] -= this.bounds.minX;
coords[i][1] -= this.bounds.minY;
}
}
/**
* Apply this filter using webgl.
*
* #param {Object} options
* #param {Number} options.passes The number of filters remaining to be executed
* #param {Boolean} options.webgl Whether to use webgl to render the filter.
* #param {WebGLTexture} options.originalTexture The texture of the original input image.
* #param {WebGLTexture} options.sourceTexture The texture setup as the source to be filtered.
* #param {WebGLTexture} options.targetTexture The texture where filtered output should be drawn.
* #param {WebGLRenderingContext} options.context The GL context used for rendering.
* #param {Object} options.programCache A map of compiled shader programs, keyed by filter type.
*/
applyToWebGL(options) {
const gl = options.context;
const shader = this.retrieveShader(options);
const tess = this.generateSurface(options.sourceWidth, options.sourceHeight);
const indices = new Uint16Array(_.flatten(tess.faces));
// Clear the canvas first
this.clear(gl); // !important
// bind texture buffer
this.bindTexture(gl, options);
gl.useProgram(shader.program);
// create the buffer
this.indexBuffer(gl, indices);
this.sendAttributeData(gl, shader.attributeLocations, new Float32Array(_.flatten(tess.points)), 'aPosition');
this.sendAttributeData(gl, shader.attributeLocations, new Float32Array(_.flatten(tess.uvs)), 'aUvs');
gl.uniform1f(shader.uniformLocations.uStepW, 1 / gl.canvas.width);
gl.uniform1f(shader.uniformLocations.uStepH, 1 / gl.canvas.height);
this.sendUniformData(gl, shader.uniformLocations);
gl.viewport(0, 0, options.destinationWidth, options.destinationHeight);
// enable indices up to 4294967296 for webGL 1.0
gl.getExtension('OES_element_index_uint');
gl.drawElements(gl.TRIANGLES, indices.length, gl.UNSIGNED_SHORT, 0);
}
clear(gl) {
gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
}
bindTexture(gl, options) {
if (options.pass === 0 && options.originalTexture) {
gl.bindTexture(gl.TEXTURE_2D, options.originalTexture);
} else {
gl.bindTexture(gl.TEXTURE_2D, options.sourceTexture);
}
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
}
indexBuffer(gl, data) {
const indexBuffer = gl.createBuffer();
// make this buffer the current 'ELEMENT_ARRAY_BUFFER'
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, indexBuffer);
// Fill the current element array buffer with data
gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, data, gl.STATIC_DRAW);
}
};
/**
* Returns filter instance from an object representation
* #static
* #param {Object} object Object to create an instance from
* #param {function} [callback] to be invoked after filter creation
* #return {fabric.Image.filters.Perspective} Instance of fabric.Image.filters.Perspective
*/
fabric.Image.filters.Perspective.fromObject = fabric.Image.filters.BaseFilter.fromObject;
/**
* Photo subclass
* #class fabric.Photo
* #extends fabric.Photo
* #return {fabric.Photo} thisArg
*
*/
fabric.Photo = class extends fabric.Image {
type = 'photo';
repeat = 'no-repeat';
fill = 'transparent';
initPerspective = true;
cacheProperties = fabric.Image.prototype.cacheProperties.concat('perspectiveCoords');
constructor(src, options) {
super(options);
if (options) this.setOptions(options);
this.on('added', () => {
const image = new Image();
image.setAttribute('crossorigin', 'anonymous');
image.onload = () => {
this._initElement(image, options);
this.width = image.width / 2;
this.height = image.height / 2;
this.loaded = true;
this.setCoords();
this.fire('image:loaded');
};
image.src = src;
this.on('image:loaded', () => {
!this.perspectiveCoords && this.getInitialPerspective();
this.togglePerspective();
this.canvas.requestRenderAll();
});
});
}
cacheProperties = fabric.Image.prototype.cacheProperties.concat('perspectiveCoords');
/**
* #private
* #param {CanvasRenderingContext2D} ctx Context to render on
*//** #ts-ignore */
_render(ctx) {
fabric.util.setImageSmoothing(ctx, this.imageSmoothing);
if (this.isMoving !== true && this.resizeFilter && this._needsResize()) {
this.applyResizeFilters();
}
this._stroke(ctx);
this._renderPaintInOrder(ctx);
}
/**
* #private
* #param {CanvasRenderingContext2D} ctx Context to render on
*//** #ts-ignore */
_renderFill(ctx) {
var elementToDraw = this._element;
if (!elementToDraw) return;
ctx.save();
const elWidth = elementToDraw.naturalWidth || elementToDraw.width;
const elHeight = elementToDraw.naturalHeight || elementToDraw.height;
const width = this.width;
const height = this.height;
ctx.translate(-width / 2, -height / 2);
// get the scale
const scale = Math.min(width / elWidth, height / elHeight);
// get the top left position of the image
const x = (width / 2) - (elWidth / 2) * scale;
const y = (height / 2) - (elHeight / 2) * scale;
ctx.drawImage(elementToDraw, x, y, elWidth * scale, elHeight * scale);
ctx.restore();
}
togglePerspective(mode = true) {
this.set('perspectiveMode', mode);
// this.set('hasBorders', !mode);
if (mode === true) {
this.set('layout', 'fit');
var lastControl = this.perspectiveCoords.length - 1;
this.controls = this.perspectiveCoords.reduce((acc, coord, index) => {
const anchorIndex = index > 0 ? index - 1 : lastControl;
let name = `prs${index + 1}`;
acc[name] = new fabric.Control({
name,
x: -0.5,
y: -0.5,
actionHandler: this._actionWrapper(anchorIndex, (_, transform, x, y) => {
const target = transform.target;
const localPoint = target.toLocalPoint(new fabric.Point(x, y), 'left', 'top');
coord[0] = localPoint.x / target.scaleX * fabric.devicePixelRatio;
coord[1] = localPoint.y / target.scaleY * fabric.devicePixelRatio;
target.setCoords();
target.applyFilters();
return true;
}),
positionHandler: function (dim, finalMatrix, fabricObject) {
const zoom = fabricObject.canvas.getZoom();
const scalarX = fabricObject.scaleX * zoom / fabric.devicePixelRatio;
const scalarY = fabricObject.scaleY * zoom / fabric.devicePixelRatio;
var point = fabric.util.transformPoint({
x: this.x * dim.x + this.offsetX + coord[0] * scalarX,
y: this.y * dim.y + this.offsetY + coord[1] * scalarY,
}, finalMatrix
);
return point;
},
cursorStyleHandler: () => 'cell',
render: function(ctx, left, top, _, fabricObject) {
const zoom = fabricObject.canvas.getZoom();
const scalarX = fabricObject.scaleX * zoom / fabric.devicePixelRatio;
const scalarY = fabricObject.scaleY * zoom / fabric.devicePixelRatio;
ctx.save();
ctx.translate(left, top);
ctx.rotate(fabric.util.degreesToRadians(fabricObject.angle));
ctx.beginPath();
ctx.moveTo(0, 0);
ctx.strokeStyle = 'green';
if (fabricObject.perspectiveCoords[index + 1]) {
ctx.strokeStyle = 'green';
ctx.lineTo(
(fabricObject.perspectiveCoords[index + 1][0] - coord[0]) * scalarX,
(fabricObject.perspectiveCoords[index + 1][1] - coord[1]) * scalarY,
);
} else {
ctx.lineTo(
(fabricObject.perspectiveCoords[0][0] - coord[0]) * scalarX,
(fabricObject.perspectiveCoords[0][1] - coord[1]) * scalarY,
);
}
ctx.stroke();
ctx.beginPath();
ctx.arc(0, 0, 4, 0, Math.PI * 2);
ctx.closePath();
ctx.fillStyle = 'green';
ctx.fill();
ctx.stroke();
ctx.restore();
},
offsetX: 0,
offsetY: 0,
actionName: 'perspective-coords',
});
return acc;
}, {});
} else {
this.controls = fabric.Photo.prototype.controls;
}
this.canvas.requestRenderAll();
}
_actionWrapper(anchorIndex, fn) {
return function(eventData, transform, x, y) {
if (!transform || !eventData) return;
const { target } = transform;
target._resetSizeAndPosition(anchorIndex);
const actionPerformed = fn(eventData, transform, x, y);
return actionPerformed;
};
}
/**
* #description manually reset the bounding box after points update
*
* #see http://fabricjs.com/custom-controls-polygon
* #param {number} index
*/
_resetSizeAndPosition = (index, apply = true) => {
const absolutePoint = fabric.util.transformPoint({
x: this.perspectiveCoords[index][0],
y: this.perspectiveCoords[index][1],
}, this.calcTransformMatrix());
this._setPositionDimensions({});
const penBaseSize = this._getNonTransformedDimensions();
const newX = (this.perspectiveCoords[index][0]) / penBaseSize.x;
const newY = (this.perspectiveCoords[index][1]) / penBaseSize.y;
this.setPositionByOrigin(absolutePoint, newX + 0.5, newY + 0.5);
apply && this._applyPointsOffset();
}
/**
* This is modified version of the internal fabric function
* this helps determine the size and the location of the path
*
* #param {object} options
*/
_setPositionDimensions(options) {
const { left, top, width, height } = this._calcDimensions(options);
this.width = width;
this.height = height;
var correctLeftTop = this.translateToGivenOrigin(
{
x: left,
y: top,
},
'left',
'top',
this.originX,
this.originY
);
if (typeof options.left === 'undefined') {
this.left = correctLeftTop.x;
}
if (typeof options.top === 'undefined') {
this.top = correctLeftTop.y;
}
this.pathOffset = {
x: left,
y: top,
};
return { left, top, width, height };
}
/**
* #description this is based on fabric.Path._calcDimensions
*
* #private
*/
_calcDimensions() {
const coords = this.perspectiveCoords.slice().map(c => (
{
x: c[0] / fabric.devicePixelRatio,
y: c[1] / fabric.devicePixelRatio,
}
));
const minX = fabric.util.array.min(coords, 'x') || 0;
const minY = fabric.util.array.min(coords, 'y') || 0;
const maxX = fabric.util.array.max(coords, 'x') || 0;
const maxY = fabric.util.array.max(coords, 'y') || 0;
const width = Math.abs(maxX - minX);
const height = Math.abs(maxY - minY);
return {
left: minX,
top: minY,
width: width,
height: height,
};
}
/**
* #description This is modified version of the internal fabric function
* this subtracts the path offset from each path points
*/
_applyPointsOffset() {
for (let i = 0; i < this.perspectiveCoords.length; i++) {
const coord = this.perspectiveCoords[i];
coord[0] -= this.pathOffset.x;
coord[1] -= this.pathOffset.y;
}
}
/**
* #description generate the initial coordinates for warping, based on image dimensions
*
*/
getInitialPerspective() {
let w = this.getScaledWidth();
let h = this.getScaledHeight();
const perspectiveCoords = [
[0, 0], // top left
[w, 0], // top right
[w, h], // bottom right
[0, h], // bottom left
];
this.perspectiveCoords = perspectiveCoords;
const perspectiveFilter = new fabric.Image.filters.Perspective({
hasRelativeCoordinates: false,
pixelRatio: fabric.devicePixelRatio, // the Photo is already retina ready
perspectiveCoords
});
this.filters.push(perspectiveFilter);
this.applyFilters();
return perspectiveCoords;
}
};
/**
* Creates an instance of fabric.Photo from its object representation
* #static
* #param {Object} object Object to create an instance from
* #param {Function} callback Callback to invoke when an image instance is created
*/
fabric.Photo.fromObject = function(_object, callback) {
const object = fabric.util.object.clone(_object);
object.layout = _object.layout;
fabric.util.loadImage(object.src, function(img, isError) {
if (isError) {
callback && callback(null, true);
return;
}
fabric.Photo.prototype._initFilters.call(object, object.filters, function(filters) {
object.filters = filters || [];
fabric.Photo.prototype._initFilters.call(object, [object.resizeFilter], function(resizeFilters) {
object.resizeFilter = resizeFilters[0];
fabric.util.enlivenObjects([object.clipPath], function(enlivedProps) {
object.clipPath = enlivedProps[0];
var image = new fabric.Photo(img, object);
callback(image, false);
});
});
});
}, null, object.crossOrigin || 'anonymous');
};
const canvas = new fabric.Canvas(document.getElementById('canvas'), {
backgroundColor: 'white',
enableRetinaScaling: true,
});
function resizeCanvas() {
canvas.setWidth(window.innerWidth);
canvas.setHeight(window.innerHeight);
}
resizeCanvas();
window.addEventListener('resize', () => resizeCanvas(), false);
const photo = new fabric.Photo('https://cdn.artboard.studio/private/5cb9c751-5f17-4062-adb7-6ec2c137a65d/user_uploads/5bafe170-1580-4d6b-a3be-f5cdce22d17d-asdasdasd.jpg', {
left: canvas.getWidth() / 2,
top: canvas.getHeight() / 2,
originX: 'center',
originY: 'center',
});
canvas.add(photo);
canvas.setActiveObject(photo);
body {
margin: 0;
}
<script src="https://cdn.jsdelivr.net/npm/lodash#4.17.20/lodash.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/verb-nurbs-web#2.1.3/build/js/verb.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/fabric#4.3.0/dist/fabric.min.js"></script>
<canvas id="canvas"></canvas>
I suspect that the reference to absolutePoint in _resetSizeAndPosition needs to take into account the origin for the image and that there is a simple fix to this issue. However, I didn't find a good way to do this and resorted to manually "correcting" this issue in _resetSizeAndPosition.
The modified version of _resetSizeAndPosition looks like so:
_resetSizeAndPosition = (index, apply = true) => {
const absolutePoint = fabric.util.transformPoint({
x: this.perspectiveCoords[index][0],
y: this.perspectiveCoords[index][1],
}, this.calcTransformMatrix());
let { height, width, left, top } = this._calcDimensions({});
const widthDiff = (width - this.width) / 2;
if ((left < 0 && widthDiff > 0) || (left > 0 && widthDiff < 0)) {
absolutePoint.x -= widthDiff;
} else {
absolutePoint.x += widthDiff;
}
const heightDiff = (height - this.height) / 2;
if ((top < 0 && heightDiff > 0) || (top > 0 && heightDiff < 0)) {
absolutePoint.y -= heightDiff;
} else {
absolutePoint.y += heightDiff;
}
this._setPositionDimensions({});
const penBaseSize = this._getNonTransformedDimensions();
const newX = (this.perspectiveCoords[index][0]) / penBaseSize.x;
const newY = (this.perspectiveCoords[index][1]) / penBaseSize.y;
this.setPositionByOrigin(absolutePoint, newX + 0.5, newY + 0.5);
apply && this._applyPointsOffset();
}
The basic principle for this approach is that the left and top properties of the object are never being updated. This can be seen in your example through the console by modifying the image and checking the properties on the image. Therefore, we need to apply a correction to the position properties based on the changing width and height. This ensures that other points stay fixed in place, since we compensate for the changing height and width of the image in its position.
By comparing the values of width and this.width it's possible to determine if the image is increasing or decreasing in size. The value of left indicates whether the stretch is occurring to the left or right side of the image. If the user is stretching the image to the left or shrinking it from the right then we need. By combining the conditions for these, we can tell how we need to modify the position of the image to compensate. The same approach used for the horizontal values is also applied to the vertical values.
JSFiddle: https://jsfiddle.net/0x8caow6/

Zoom and pan two images simultaneously in opencv

I have two images with similar sizes that show similar scenes. How can we show two images in two frames and when panning or zooming in the left image, it pans and zooms in the right one? I don't want to concatenate the images though.
Is there a solution to do this? Both python or c++ OpenCV are fine.
About zoom in/out:
The basic idea is deciding the scale changed every time on mouse wheel. After you get the current scale (v.s. origin image) and correct region of image you want to show on screen, you can get the position and length of rectangle on scaled image. So you can draw this rectangle on scaled image.
In my github,checking OnMouseWheel () and RefreshSrcView () in Fastest_Image_Pattern_Matching/ELCVMatchTool/ELCVMatchToolDlg.cpp may give what you want.
About showing two images simutaneouly with same region:
use two picture boxes with MFC framework or other UI builder.
or use two cv::namedWindow () without framework
Effect:
Part of the code:
BOOL CELCVMatchToolDlg::OnMouseWheel (UINT nFlags, short zDelta, CPoint pt)
{
POINT pointCursor;
GetCursorPos (&pointCursor);
ScreenToClient (&pointCursor);
// TODO: 在此加入您的訊息處理常式程式碼和 (或) 呼叫預設值
if (zDelta > 0)
{
if (m_iScaleTimes == MAX_SCALE_TIMES)
return TRUE;
else
m_iScaleTimes++;
}
if (zDelta < 0)
{
if (m_iScaleTimes == MIN_SCALE_TIMES)
return TRUE;
else
m_iScaleTimes--;
}
CRect rect;
//GetWindowRect (rect);
GetDlgItem (IDC_STATIC_SRC_VIEW)->GetWindowRect (rect);//重要
if (m_iScaleTimes == 0)
g_dCompensationX = g_dCompensationY = 0;
int iMouseOffsetX = pt.x - (rect.left + 1);
int iMouseOffsetY = pt.y - (rect.top + 1);
double dPixelX = (m_hScrollBar.GetScrollPos () + iMouseOffsetX + g_dCompensationX) / m_dNewScale;
double dPixelY = (m_vScrollBar.GetScrollPos () + iMouseOffsetY + g_dCompensationY) / m_dNewScale;
m_dNewScale = m_dSrcScale * pow (SCALE_RATIO, m_iScaleTimes);
if (m_iScaleTimes != 0)
{
int iWidth = m_matSrc.cols;
int iHeight = m_matSrc.rows;
m_hScrollBar.SetScrollRange (0, int (m_dNewScale * iWidth - m_dSrcScale * iWidth) - 1 + BAR_SIZE);
m_vScrollBar.SetScrollRange (0, int (m_dNewScale * iHeight - m_dSrcScale * iHeight) - 1 + BAR_SIZE);
int iBarPosX = int (dPixelX * m_dNewScale - iMouseOffsetX + 0.5);
m_hScrollBar.SetScrollPos (iBarPosX);
m_hScrollBar.ShowWindow (SW_SHOW);
g_dCompensationX = -iBarPosX + (dPixelX * m_dNewScale - iMouseOffsetX);
int iBarPosY = int (dPixelY * m_dNewScale - iMouseOffsetY + 0.5);
m_vScrollBar.SetScrollPos (iBarPosY);
m_vScrollBar.ShowWindow (SW_SHOW);
g_dCompensationY = -iBarPosY + (dPixelY * m_dNewScale - iMouseOffsetY);
//滑塊大小
SCROLLINFO infoH;
infoH.cbSize = sizeof (SCROLLINFO);
infoH.fMask = SIF_PAGE;
infoH.nPage = BAR_SIZE;
m_hScrollBar.SetScrollInfo (&infoH);
SCROLLINFO infoV;
infoV.cbSize = sizeof (SCROLLINFO);
infoV.fMask = SIF_PAGE;
infoV.nPage = BAR_SIZE;
m_vScrollBar.SetScrollInfo (&infoV);
//滑塊大小
}
else
{
m_hScrollBar.SetScrollPos (0);
m_hScrollBar.ShowWindow (SW_HIDE);
m_vScrollBar.SetScrollPos (0);
m_vScrollBar.ShowWindow (SW_HIDE);
}
RefreshSrcView ();
return CDialogEx::OnMouseWheel (nFlags, zDelta, pt);
}

Konvajs filter fill transparent areas within image

Is there a filter in konva that can fill closed transparent areas of the image with the white color?
This is the original image
This is a target image
I'm currently using konva custom filter for border. https://konvajs.github.io/docs/filters/Custom_Filter.html
var canvas, tempCanvas
function initBorderCanvas () {
canvas = document.createElement('canvas')
tempCanvas = document.createElement('canvas')
}
// make all pixells opaque 100% (except pixels that 100% transparent)
function removeTransparency (canvas) {
var ctx = canvas.getContext('2d')
var imageData = ctx.getImageData(0, 0, canvas.width, canvas.height)
var nPixels = imageData.data.length
for (var i = 3; i < nPixels; i += 4) {
if (imageData.data[i] > 0) {
imageData.data[i] = 255
}
}
ctx.clearRect(0, 0, canvas.width, canvas.height)
ctx.putImageData(imageData, 0, 0)
return canvas
}
function Border (imageData) {
var nPixels = imageData.data.length
var ratio = this._cache.canvas.scene.pixelRatio
var size = (this.getAttr('borderSize') || 0) * ratio
if (size === 0) {
return imageData
}
// - first set correct dimensions for canvases
canvas.width = imageData.width
canvas.height = imageData.height
tempCanvas.width = imageData.width
tempCanvas.height = imageData.height
// - the draw original shape into temp canvas
tempCanvas.getContext('2d').putImageData(imageData, 0, 0)
// - then we need to remove alpha chanel, because it will affect shadow (transparent shapes has smaller shadow)
removeTransparency(tempCanvas)
var ctx = canvas.getContext('2d')
var color = this.getAttr('borderColor') || 'black'
// 3. we will use shadow as border
// so we just need apply shadow on the original image
ctx.save()
ctx.shadowColor = color
ctx.shadowBlur = size
ctx.drawImage(tempCanvas, 0, 0)
ctx.restore()
// - Then we will dive in into image data of [original image + shadow]
// and remove transparency from shadow
var tempImageData = ctx.getImageData(0, 0, canvas.width, canvas.height)
var SMOOTH_MIN_THRESHOLD = 0
var SMOOTH_MAX_THRESHOLD = 0
let val, hasValue
var offset = 3
for (var i = 3; i < nPixels; i += 4) {
// skip opaque pixels
if (imageData.data[i] === 255) {
continue
}
val = tempImageData.data[i]
hasValue = val !== 0
if (!hasValue) {
continue
}
if (val > SMOOTH_MAX_THRESHOLD) {
val = 255
} else if (val < SMOOTH_MIN_THRESHOLD) {
val = 0
} else {
val =
((val - SMOOTH_MIN_THRESHOLD) /
(SMOOTH_MAX_THRESHOLD - SMOOTH_MIN_THRESHOLD)) *
255
}
tempImageData.data[i] = val
}
// draw resulted image (original + shadow without opacity) into canvas
ctx.putImageData(tempImageData, 0, 0)
// then fill whole image with color (after that shadow is colored)
ctx.save()
ctx.globalCompositeOperation = 'source-in'
ctx.fillStyle = color
ctx.fillRect(0, 0, canvas.width, canvas.height)
ctx.restore()
// then we need to copy colored shadow into original imageData
var newImageData = ctx.getImageData(0, 0, canvas.width, canvas.height)
var indexesToProcess = []
for (var i = 3; i < nPixels; i += 4) {
var hasTransparentOnTop =
imageData.data[i - imageData.width * 4 * offset] === 0
var hasTransparentOnTopRight =
imageData.data[i - (imageData.width * 4 + 4) * offset] === 0
var hasTransparentOnTopLeft =
imageData.data[i - (imageData.width * 4 - 4) * offset] === 0
var hasTransparentOnRight = imageData.data[i + 4 * offset] === 0
var hasTransparentOnLeft = imageData.data[i - 4 * offset] === 0
var hasTransparentOnBottom =
imageData.data[i + imageData.width * 4 * offset] === 0
var hasTransparentOnBottomRight =
imageData.data[i + (imageData.width * 4 + 4) * offset] === 0
var hasTransparentOnBottomLeft =
imageData.data[i + (imageData.width * 4 - 4) * offset] === 0
var hasTransparentAround =
hasTransparentOnTop ||
hasTransparentOnRight ||
hasTransparentOnLeft ||
hasTransparentOnBottom ||
hasTransparentOnTopRight ||
hasTransparentOnTopLeft ||
hasTransparentOnBottomRight ||
hasTransparentOnBottomLeft
// if pixel presented in original image - skip it
// because we need to change only shadow area
if (
imageData.data[i] === 255 ||
(imageData.data[i] && !hasTransparentAround)
) {
continue
}
if (!newImageData.data[i]) {
// skip transparent pixels
continue
}
indexesToProcess.push(i)
}
for (var index = 0; index < indexesToProcess.length; index += 1) {
var i = indexesToProcess[index]
var alpha = imageData.data[i] / 255
imageData.data[i] = newImageData.data[i]
imageData.data[i - 1] =
newImageData.data[i - 1] * (1 - alpha) + imageData.data[i - 1] * alpha
imageData.data[i - 2] =
newImageData.data[i - 2] * (1 - alpha) + imageData.data[i - 2] * alpha
imageData.data[i - 3] =
newImageData.data[i - 3] * (1 - alpha) + imageData.data[i - 3] * alpha
}
}
export { initBorderCanvas, Border }
It does a good job on drawing a border around the image.
Konva custom filter applied
Is there a way to fill the circles as on target image?
Here is the fiddle: http://jsfiddle.net/ecsy6hb4/34/
In the end I ended up using https://github.com/sakri/MarchingSquaresJS. This is a algorithm to find a outline points. Once I got these I draw closed polygon and painted it inside. That solved my problem.
Outline points example
These red points are the outline points returned by the marching squares algorithm.
fiddle: jsfiddle.net/ecsy6hb4/42/

How can I implement Shadow Volumes in WebGL

I have some questions about drawing shadows of a .obj in a scene of WebGL.
For example, if I want to draw shadows with Shadow Volumes Method, how should I develop this ? I'm trying to implement this but I have failed. Are there more efficient methods to do this (writing less code) ?
Below is the the code:
function createShadowBuilder(item) {
var that = function() {};
that.init = function(item) {
this.item = item;
this.glPositionBuffer = null;
this.glVertexIndexBuffer = null;
};
that.setupData = function() {
if (this.glPositionBuffer !== null) {
gl.deleteBuffer(this.glPositionBuffer);
}
if (this.glVertexIndexBuffer !== null) {
gl.deleteBuffer(this.glVertexIndexBuffer);
}
this.glVertices = [];
this.glIndices = [];
};
that.addGLVertex = function(vector) {
this.glVertices.push(vector[0]);
this.glVertices.push(vector[1]);
this.glVertices.push(vector[2]);
this.glIndices.push(this.glIndices.length);
};
that.addShadowSide = function(vector1, vector2, vector3, vector4) {
this.addGLVertex(vector1);
this.addGLVertex(vector2);
this.addGLVertex(vector3);
this.addGLVertex(vector4);
this.addGLVertex(vector3);
this.addGLVertex(vector2);
};
/**
* Check which triangles face the light source...
**/
that.checkDirection = function(lightLocation) {
var triangles = this.item.triangles,
triangle,
vector,
i = triangles.length;
while (i) {
i--;
// Create a normalized vector based on the vector from
// the center of the triangle to the lights position...
triangle = triangles[i];
vector = vec3.create(triangle.center);
vector = vec3.normalize(vec3.subtract(vector, lightLocation));
// Compare the vector with the normal of the triangle...
triangle.visible = (vec3.dot(vector, triangle.normal) < 0);
}
}
/**
* Find the edge of the object...
**/
that.findEdge = function() {
var triangles = this.item.triangles,
triangle,
a, b,
lines = this.item.lines,
line,
lineSidesHash = {},
i, j, k;
this.lineSides = [];
i = triangles.length;
while (i) {
i--;
triangle = triangles[i];
if (triangle.visible) {
j = 3;
while (j) {
j--;
// Check if the side...
k = triangle.lines[j];
line = lines[k];
a = line.v1 + '_' + line.v2;
b = line.v2 + '_' + line.v1;
if (lineSidesHash[a] !== undefined) { // Check the v1 -> v2 direction...
// The side already exists, remove it...
delete(lineSidesHash[a]);
}
else if (lineSidesHash[b] !== undefined) { // Check the v2 -> v1 direction...
// The side already exists, remove it...
delete(lineSidesHash[b]);
}
else {
// It's a new side, add it to the list...
lineSidesHash[a] = k;
}
}
}
}
// Convert the hash map to an array...
for (i in lineSidesHash) {
line = lines[lineSidesHash[i]];
this.lineSides.push(line);
}
};
that.rotateVectorX = function(vector, angle) {
var x, y,
sin, cos;
if (angle === 0) {
return;
}
y = vector[1];
z = vector[2];
sin = Math.sin(angle);
cos = Math.cos(angle);
vector[1] = y * cos - z * sin;
vector[2] = y * sin + z * cos;
};
that.rotateVectorY = function(vector, angle) {
var x, z,
sin, cos;
if (angle === 0) {
return;
}
x = vector[0];
z = vector[2];
sin = Math.sin(angle);
cos = Math.cos(angle);
vector[0] = z * sin + x * cos;
vector[2] = z * cos - x * sin;
};
that.rotateVectorZ = function(vector, angle) {
var x, y,
sin, cos;
if (angle === 0) {
return;
}
x = vector[0];
y = vector[1];
sin = Math.sin(angle);
cos = Math.cos(angle);
vector[0] = x * cos - y * sin;
vector[1] = x * sin + y * cos;
};
/**
* Update the shadow...
**/
that.update = function(lightLocation, lightAngle, matrix, zoom) {
// Get the position of the light from the matrix, remove the zoom value...
var vector = vec3.subtract(vec3.create(lightLocation), [matrix[12], matrix[13], matrix[14] + zoom]),
sin, cos,
x, y, z;
// Instead of rotating the object to face the light at the
// right angle it's a lot faster to rotate the light in the
// reverse direction...
this.rotateVectorX(vector, -lightAngle[0]);
this.rotateVectorY(vector, -lightAngle[1]);
this.rotateVectorZ(vector, -lightAngle[2]);
// Store the location for later use...
this.lightLocation = vector;
this.setupData(); // Reset all lists and buffers...
this.checkDirection(vector); // Check which triangles face the light source...
this.findEdge(); // Find the edge...
};
/**
* Create the buffers for the shadow volume...
**/
that.createVolume = function(lightLocation) {
var vertices = this.item.vertices,
triangles = this.item.triangles,
triangle,
lineSides = this.lineSides,
line,
vector1, vector2, vector3, vector4,
i = lineSides.length,
j;
while (i) { // For all edge lines...
i--;
line = lineSides[i];
vector1 = vertices[line.v1];
vector2 = vertices[line.v2];
// Extrude the line away from the light...
// Get the vector from the light position to the vertex...
vector3 = vec3.subtract(vector1, lightLocation, vec3.create());
// Add the normalized vector scaled with the volume
// depth to the vertex which gives a point on the other
// side of the object than the light source...
vector3 = vec3.add(vec3.scale(vec3.normalize(vector3), 30), vector1);
// And again for the second point on the line...
vector4 = vec3.subtract(vector2, lightLocation, vec3.create());
vector4 = vec3.add(vec3.scale(vec3.normalize(vector4), 30), vector2);
this.addShadowSide(vector1, vector2, vector3, vector4);
}
// Add the end caps to the volume...
i = triangles.length;
while (i) {
i--;
triangle = triangles[i];
if (triangle.visible) { // Only add polygons facing the light...
// Add the top...
j = 3;
while (j) {
j--;
this.addGLVertex(vertices[triangle.vertices[j]]);
}
// Add the bottom...
j = 0;
while (j < 3) {
vector1 = vertices[triangle.vertices[j]];
vector2 = vec3.subtract(vector1, lightLocation, vec3.create());
this.addGLVertex(vec3.add(vec3.scale(vec3.normalize(vector2), 30), vector1));
j++;
}
}
}
// Create the vertex position buffer...
this.glPositionBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, this.glPositionBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(this.glVertices), gl.STATIC_DRAW);
this.glPositionBuffer.itemSize = 3;
// Create the vertex index buffer...
this.glVertexIndexBuffer = gl.createBuffer();
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, this.glVertexIndexBuffer);
gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, new Uint16Array(this.glIndices), gl.STATIC_DRAW);
this.glVertexIndexBuffer.numItems = this.glIndices.length;
};
that.render = function() {
// Create the volume for the light...
this.createVolume(this.lightLocation);
gl.bindBuffer(gl.ARRAY_BUFFER, this.glPositionBuffer);
gl.vertexAttribPointer(shaderProgram.vertexPositionAttribute, this.glPositionBuffer.itemSize, gl.FLOAT, false, 0, 0);
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, this.glVertexIndexBuffer);
setMatrixUniforms();
// Disable the texture coord attribute...
gl.disableVertexAttribArray(shaderProgram.textureCoordAttribute);
// Disable the normal attribute...
gl.disableVertexAttribArray(shaderProgram.vertexNormalAttribute);
// Disable the color attribute...
gl.disableVertexAttribArray(shaderProgram.vertexColorAttribute);
// Render both front and back facing polygons with different stencil operations...
gl.disable(gl.CULL_FACE);
gl.enable(gl.STENCIL_TEST);
gl.depthFunc(gl.LESS);
// Disable rendering to the color buffer...
gl.colorMask(false, false, false, false);
// Disable z buffer updating...
gl.depthMask(false);
// Allow all bits in the stencil buffer...
gl.stencilMask(255);
// Increase the stencil buffer for back facing polygons, set the z pass opperator
gl.stencilOpSeparate(gl.BACK, gl.KEEP, gl.KEEP, gl.INCR);
// Decrease the stencil buffer for front facing polygons, set the z pass opperator
gl.stencilOpSeparate(gl.FRONT, gl.KEEP, gl.KEEP, gl.DECR);
// Always pass...
gl.stencilFunc(gl.ALWAYS, 0, 255);
gl.drawElements(gl.TRIANGLES, this.glVertexIndexBuffer.numItems, gl.UNSIGNED_SHORT, 0);
// Enable rendering the the color and depth buffer again...
gl.colorMask(true, true, true, true);
gl.depthMask(true);
gl.disable(gl.STENCIL_TEST);
};
that.init(item);
return that;
}
this code is taken from an example on the Web, I'm trying to adapt this.

Is possible to have a Title (on axis) that rotates when draggable box (3d) rotate?

I'm trying to change some things in the graphic "3D scatter chart Draggable Box"
http://www.highcharts.com/demo/3d-scatter-draggable/grid-light
Is possible to add a title to an axis, but that it rotates when I turn the box?
Now, if i add a title in this way:
yAxis: {
title: {
text: "Latitude"
}
},
When i rotate the box, the title doesn't follow the yAxis....
This is the code to rotate the chart:
$(chart.container).bind('mousedown.hc touchstart.hc', function (e) {
e = chart.pointer.normalize(e);
var posX = e.pageX,
posY = e.pageY,
alpha = chart.options.chart.options3d.alpha,
beta = chart.options.chart.options3d.beta,
newAlpha,
newBeta,
sensitivity = 5; // lower is more sensitive
$(document).bind({
'mousemove.hc touchdrag.hc': function (e) {
// Run beta
newBeta = beta + (posX - e.pageX) / sensitivity;
newBeta = Math.min(100, Math.max(-100, newBeta));
chart.options.chart.options3d.beta = newBeta;
// Run alpha
newAlpha = alpha + (e.pageY - posY) / sensitivity;
newAlpha = Math.min(100, Math.max(-100, newAlpha));
chart.options.chart.options3d.alpha = newAlpha;
window.alphaOn3dGraph = newAlpha;
window.betaOn3dGraph = newBeta;
chart.redraw(false);
},
'mouseup touchend': function () {
$(document).unbind('.hc');
}
});
});

Resources