Can I make a bookmarklet put some text into the clipboard? - copy-paste

Say I wanted to have bit of text (actually 4 different addresses) that I'd like to be able to easily (and frequently) paste. Is there a way I can make a bookmarklet that will put those addresses into the clipboard?
I'd like to be able to click the appropriate one, then right click + Paste.

Yes it's possible, have a look at zeroclipboard (note: requires flash). Also see this previous question.

Try building a Firefox extension instead of a bookmarklet. Mozilla XUL (extension language) lets you do copy-paste. Another option is a Java Applet.
http://brooknovak.wordpress.com/2009/07/28/accessing-the-system-clipboard-with-javascript/

Method with no third-party libraries
While zeroclipboard could potentially work, this method will allow you to visually select an element and automatically copy the inner text to your clipboard without having to download any third-party libraries. It is based on this function by Arne Hartherz and modified to work both in HTTPS and HTTP contexts.
Readable version:
var overlay = document.createElement('div');
Object.assign(overlay.style, {
position: 'fixed',
top: 0,
left: 0,
width: '100vw',
height: '100vh',
zIndex: 99999999,
background: 'transparent',
cursor: 'crosshair'
});
document.body.append(overlay);
function copyToClipboard(textToCopy) {
// navigator clipboard api needs a secure context (https)
if (navigator.clipboard && window.isSecureContext) {
// navigator clipboard api method'
return navigator.clipboard.writeText(textToCopy);
} else {
// text area method
let textArea = document.createElement("textarea");
textArea.value = textToCopy;
// make the textarea out of viewport
textArea.style.position = "fixed";
textArea.style.left = "-999999px";
textArea.style.top = "-999999px";
document.body.appendChild(textArea);
textArea.focus();
textArea.select();
return new Promise((res, rej) => {
// here the magic happens
document.execCommand('copy') ? res() : rej();
textArea.remove();
});
}
};
function getElement(event) {
overlay.style.pointerEvents = 'none';
var element = document.elementFromPoint(event.clientX, event.clientY);
overlay.style.pointerEvents = 'auto';
return element;
}
document.addEventListener('mousemove', function(event) {
var element = getElement(event);
if (!element) return;
var position = element.getBoundingClientRect();
Object.assign(overlay.style, {
background: 'rgba(0, 128, 255, 0.25)',
outline: '1px solid rgba(0, 128, 255, 0.5)',
top: '' + position.top + 'px',
left: '' + position.left + 'px',
width: '' + position.width + 'px',
height: '' + position.height + 'px'
});
});
overlay.addEventListener('click', function(event) {
var element = getElement(event);
var text = element.textContent || element.value;
text = text.replace(/\n[ \n]+\n/g, "\n").replace(/\n\n+/g, "\n\n").replace(/^\n+|\n+$/g, '');
if (!text.match("\n")) text = text.replace(/^ +| +$/, '')
copyToClipboard(text);
document.body.removeChild(overlay);
});
Minified version for use in bookmarklet:
javascript:void function(){function a(a){if(navigator.clipboard&&window.isSecureContext)return navigator.clipboard.writeText(a);else{let b=document.createElement("textarea");return b.value=a,b.style.position="fixed",b.style.left="-999999px",b.style.top="-999999px",document.body.appendChild(b),b.focus(),b.select(),new Promise((a,c)=>{document.execCommand("copy")?a():c(),b.remove()})}}function b(a){c.style.pointerEvents="none";var b=document.elementFromPoint(a.clientX,a.clientY);return c.style.pointerEvents="auto",b}var c=document.createElement("div");Object.assign(c.style,{position:"fixed",top:0,left:0,width:"100vw",height:"100vh",zIndex:99999999,background:"transparent",cursor:"crosshair"}),document.body.append(c);document.addEventListener("mousemove",function(a){var d=b(a);if(d){var e=d.getBoundingClientRect();Object.assign(c.style,{background:"rgba(0, 128, 255, 0.25)",outline:"1px solid rgba(0, 128, 255, 0.5)",top:""+e.top+"px",left:""+e.left+"px",width:""+e.width+"px",height:""+e.height+"px"})}}),c.addEventListener("click",function(d){var e=b(d),f=e.textContent||e.value;f=f.replace(/\n[ \n]+\n/g,"\n").replace(/\n\n+/g,"\n\n").replace(/^\n+|\n+$/g,""),f.match("\n")||(f=f.replace(/^ +| +$/,"")),a(f),document.body.removeChild(c)})}();

Related

How to make a GUI to visually add Mattertags into a Matterport scene?

There are 2 examples in the Matterport SDK for Embeds documentation to show how to place Mattertags in a scene:
The Intersection Inspector which only allows you to see coordinates for placing a Mattertag where the cursor is if you wait a little bit ... Not very user friendly, you need to copy the coordinates manually in your program.
The Transient Tags Editor which enable you to interactively place multiple Mattertags visually, edit them and then to extract them easily in a JSON file ...
I was wondering how to reproduce the Transient Tags Editor visual UX as I would like to use it in an application.
Insert Mattertags into the model visually
The source code of the app of the Transient Tags Editor is privately hosted on github (Maybe because it doesn't run perfectly on Firefox?), unlike the source code of the Intersection Inspector which is publicly hosted on JSFiddle.
But the user friendliness of the Transient Tags Editor intrigued me and I wanted to understand the difference between the two tools Matterport SDK provides to find out Mattertags coordinates.
How the Intersection Inspector works
The Intersection Inspector uses a timer to display a button at the position of the Pointer when the user does not move the pointer for more than one second. The user can then click the button to see the Mattertag coordinates and copy them manually ...
To achieve that, it needs the current Camera position, which it obtains by observing the camera's pose property:
var poseCache;
mpSdk.Camera.pose.subscribe(function(pose) {
poseCache = pose;
});
Also, it needs the current Pointer position, which it obtains by observing the pointer's intersection property:
var intersectionCache;
mpSdk.Pointer.intersection.subscribe(function(intersection) {
intersectionCache = intersection;
intersectionCache.time = new Date().getTime();
button.style.display = 'none';
buttonDisplayed = false;
});
※ An intersection event is triggered the user moves the pointer, so we hide the button to make sure it is not displayed before the one second delay is over.
Then, a timer is set up using setInterval() to display the button at the right time:
setInterval(() => {
// ...
}, 16);
In the timer callback, we check wether all the conditions to display the button are met ...
First, check we have the information we need:
setInterval(() => {
if (!intersectionCache || !poseCache) {
return;
}
// ...
}, 16);
Then, check one second has elapsed since the last intersection event was received, or we wait the next tick to check again:
var delayBeforeShow = 1000;
setInterval(() => {
if (!intersectionCache || !poseCache) {
return;
}
const nextShow = intersectionCache.time + delayBeforeShow;
if (new Date().getTime() > nextShow) {
// ...
}
}, 16);
Finally, we check the button is not already being displayed:
var delayBeforeShow = 1000;
var buttonDisplayed = false;
setInterval(() => {
if (!intersectionCache || !poseCache) {
return;
}
const nextShow = intersectionCache.time + delayBeforeShow;
if (new Date().getTime() > nextShow) {
if (buttonDisplayed) {
return;
}
// ...
}
}, 16);
Once the conditions are met, we can display the button using Conversion.worldToScreen() to get the screen coordinate of the pointer :
// ...
setInterval(() => {
// ...
if (new Date().getTime() > nextShow) {
// ...
var size = {
w: iframe.clientWidth,
h: iframe.clientHeight,
};
var coord = mpSdk.Conversion.worldToScreen(intersectionCache.position, poseCache, size);
button.style.left = `${coord.x - 25}px`;
button.style.top = `${coord.y - 22}px`;
button.style.display = 'block';
buttonDisplayed = true;
}
}, 16);
The button is a simple HTML button hidden by default using display: none; and positioned relative to the body with position: absolute;.
When the user clicks the button, the current coordinates of the pointer are displayed in a <div> tag above the <iframe> and the button is hidden:
button.addEventListener('click', function() {
text.innerHTML = `position: ${pointToString(intersectionCache.position)}\nnormal: ${pointToString(intersectionCache.normal)}\nfloorId: ${intersectionCache.floorId}`;
button.style.display = 'none';
iframe.focus();
});
The coordinates are formatted using the following function:
function pointToString(point) {
var x = point.x.toFixed(3);
var y = point.y.toFixed(3);
var z = point.z.toFixed(3);
return `{ x: ${x}, y: ${y}, z: ${z} }`;
}
Now, let's see how the easier-to-use and user-friendlier Transient Tags Editor interface works ...
How the Transient Tag Editor works
The Intersection Inspector is enough if you just have a few __Mattertag__s to set permanently in a few models in your application. But if you need your users to set tags interactively in models, something like the Transient Tags Editor's GUI is a better starting point.
The main advantage of using the Transient Tags Editor is that you can see how the Mattertag will be displayed before creating it and! That allows you to place the tag precisely without trial and error ...
To add a tag, you must click on the "Place New Tag" button to enter the "add tag" mode, then you can place one new tag anywhere you want.
We will only focus on that aspect of the editor and produce a simplified code sample that only add tags:
As the user move a tag along the pointer when in "add tag" mode, the first step is to create a new tag and place it. Let's create a function for that using Mattertag.add():
function addTag() {
if(!tag){
mpSdk.Mattertag.add([{
label: "Matterport Tag",
description: "",
anchorPosition: {x: 0, y: 0, z: 0},
stemVector: {x:0, y: 0, z: 0},
color: {r: 1, g: 0, b: 0},
}])
.then((sid) => {
tag = sid[0];
})
.catch( (e) => {
console.error(e);
})
}
}
Then we will have to place the tag at a position near the pointer, and update its position as the user moves the pointer, so let's create a function for that using Mattertag.editPosition():
function updateTagPos(newPos, newNorm=undefined, scale=undefined){
if(!newPos) return;
if(!scale) scale = .33;
if(!newNorm) newNorm = {x: 0, y: 1, z: 0};
mpSdk.Mattertag.editPosition(tag, {
anchorPosition: newPos,
stemVector: {
x: scale * newNorm.x,
y: scale * newNorm.y,
z: scale * newNorm.z,
}
})
.catch(e =>{
console.error(e);
tag = null;
});
}
As you can see the updateTagPos() function takes 3 parameters:
newPos: the new anchor position for the Mattertag.
newNorm: an optional new stem vector for the Mattertag.
scale: an optional new scale for the stem of the Mattertag.
To update the tag position as the user moves the pointer, let's observe the pointer's intersection property to call updateTagPos():
mpSdk.Pointer.intersection.subscribe(intersectionData => {
if(tag){
if(intersectionData.object === 'intersectedobject.model' || intersectionData.object === 'intersectedobject.sweep'){
updateTagPos(intersectionData.position, intersectionData.normal);
}
}
});
To place the new tag, the user simply clicks their mouse button, the Transient Tags Editor provides its own version of the document.activeElement method for intercepting clicks on the <iframe> (but does not work with Firefox so the editor use a quite complex workaround):
function focusDetect(){
const eventListener = window.addEventListener('blur', function() {
if (document.activeElement === iframe) {
placeTag(); //function you want to call on click
setTimeout(function(){ window.focus(); }, 0);
}
window.removeEventListener('blur', eventListener );
});
}
But, we will use our version which works better with Firefox (But still stop working after the first click in Firefox for whatever reason):
window.addEventListener('blur',function(){
window.setTimeout(function () {
if (document.activeElement === iframe) {
placeTag(); //function you want to call on click
window.focus()
addTag();
}
}, 0);
});
Finally, let's the function that navigates to the new tag and opens its billboard, usingMattertag.navigateToTag()
function placeTag(){
if(tag) mpSdk.Mattertag.navigateToTag(tag, mpSdk.Mattertag.Transition.INSTANT);
tag = null;
}
Simple Editor Code Sample
First, the complete JavaScript source code:
"use strict";
const sdkKey = "aaaaXaaaXaaaXaaaXaaaXaaa"
const modelSid = "iL4RdJqi2yK";
let iframe;
let tag;
document.addEventListener("DOMContentLoaded", () => {
iframe = document.querySelector('.showcase');
iframe.setAttribute('src', `https://my.matterport.com/show/?m=${modelSid}&help=0&play=1&qs=1&gt=0&hr=0`);
iframe.addEventListener('load', () => showcaseLoader(iframe));
});
function showcaseLoader(iframe){
try{
window.MP_SDK.connect(iframe, sdkKey, '3.10')
.then(loadedShowcaseHandler)
.catch(console.error);
} catch(e){
console.error(e);
}
}
function loadedShowcaseHandler(mpSdk){
addTag()
function placeTag(){
if(tag) mpSdk.Mattertag.navigateToTag(tag, mpSdk.Mattertag.Transition.INSTANT);
tag = null;
}
window.addEventListener('blur',function(){
window.setTimeout(function () {
if (document.activeElement === iframe) {
placeTag(); //function you want to call on click
window.focus()
addTag();
}
}, 0);
});
function updateTagPos(newPos, newNorm=undefined, scale=undefined){
if(!newPos) return;
if(!scale) scale = .33;
if(!newNorm) newNorm = {x: 0, y: 1, z: 0};
mpSdk.Mattertag.editPosition(tag, {
anchorPosition: newPos,
stemVector: {
x: scale * newNorm.x,
y: scale * newNorm.y,
z: scale * newNorm.z,
}
})
.catch(e =>{
console.error(e);
tag = null;
});
}
mpSdk.Pointer.intersection.subscribe(intersectionData => {
if(tag){
if(intersectionData.object === 'intersectedobject.model' || intersectionData.object === 'intersectedobject.sweep'){
updateTagPos(intersectionData.position, intersectionData.normal);
}
}
});
function addTag() {
if(!tag){
mpSdk.Mattertag.add([{
label: "Matterport Tag",
description: "",
anchorPosition: {x: 0, y: 0, z: 0},
stemVector: {x:0, y: 0, z: 0},
color: {r: 1, g: 0, b: 0},
}])
.then((sid) => {
tag = sid[0];
})
.catch( (e) => {
console.error(e);
})
}
}
} // loadedShowcaseHandler
The HTML source code:
<!DOCTYPE html>
<html>
<head>
<title>Transient Tag Editor</title>
<style>
#showcase {
width: 100%;
height: 100vh;
}
</style>
<script src="https://static.matterport.com/showcase-sdk/2.0.1-0-g64e7e88/sdk.js"></script>
<script src="/js/app-editor.js" type="text/javascript" defer></script>
</head>
<body>
<iframe id="showcase" frameborder="0" allowFullScreen allow="xr-spatial-tracking"></iframe>
</body>
</html>
It works!
Complete Code
The complete code for this sample and others is available on github:
github.com/loic-meister-guild/pj-matterport-sdk-2021-tutorial
See Also
Matterport SDK 2021 Tutorial
Node.js + Express Tutorial for 2021
How to detect a click event on a cross domain iframe

How to display a huge GeoJSON file to the MapBox?

I'm new in MapBox GL Js and I want to call a big GeoJSON file over https and display it to the map.
I think that calling vector Tile is the best way to do that, I found some tutorials that show how to convert your GeoJSON data to Vector Tile but on Server Side or upload it to the MapBox Style but my GeoJSON file is frequently changing. So I found this solution is a new JavaScript library called geojson-vt, describing how to convert a huge GeoJSON files to vector tile on the fly (Client Side) with crazy fast, It's seems like what I'm looking for, BUT !!, How can I integrate it to the MapBox GL JS for calling the layer ??
Blocking on How can I add Layer using Mapbox GL JS with the following result :
var tileIndex = geojsonvt(MyGeoJSON);
var tile = tileIndex.getTile(z, x, y);
... Or I just didn't get it ! Please somebody helps or can propose some other solution for my problem.
You don't need to worry about geojson-vt. Mapbox-GL-JS does that internally. So you can just follow the standard documentation for loading a GeoJSON layer.
If your GeoJSON is really huge, then probably the limiting factor will be network transfer, which means you really need to be serving server-side vector tiles.
I'd recommend using Deck.gl GeoJSON Layer. Here's an example:
<html>
<head>
<title>deck.gl GeoJsonLayer (Polygon) Example</title>
<script src="https://unpkg.com/deck.gl#^8.0.0/dist.min.js"></script>
<script src="https://api.tiles.mapbox.com/mapbox-gl-js/v1.4.0/mapbox-gl.js"></script>
<style type="text/css">
body {
width: 100vw;
height: 100vh;
margin: 0;
overflow: hidden;
}
.deck-tooltip {
font-family: Helvetica, Arial, sans-serif;
padding: 6px !important;
margin: 8px;
max-width: 300px;
font-size: 10px;
}
</style>
</head>
<body>
</body>
<script type="text/javascript">
const {DeckGL, GeoJsonLayer} = deck;
const COLOR_SCALE = [
// negative
[65, 182, 196],
[127, 205, 187],
[199, 233, 180],
[237, 248, 177],
// positive
[255, 255, 204],
[255, 237, 160],
[254, 217, 118],
[254, 178, 76],
[253, 141, 60],
[252, 78, 42],
[227, 26, 28],
[189, 0, 38],
[128, 0, 38]
];
const geojsonLayer = new GeoJsonLayer({
data: 'https://raw.githubusercontent.com/uber-common/deck.gl-data/master/examples/geojson/vancouver-blocks.json',
opacity: 0.8,
stroked: false,
filled: true,
extruded: true,
wireframe: true,
getElevation: f => Math.sqrt(f.properties.valuePerSqm) * 10,
getFillColor: f => colorScale(f.properties.growth),
getLineColor: [255, 255, 255],
pickable: true
});
new DeckGL({
mapboxApiAccessToken: '<mapbox-access-token>',
mapStyle: 'mapbox://styles/mapbox/light-v9',
initialViewState: {
latitude: 49.254,
longitude: -123.13,
zoom: 11,
maxZoom: 16,
pitch: 45
},
controller: true,
layers: [geojsonLayer],
getTooltip
});
function colorScale(x) {
const i = Math.round(x * 7) + 4;
if (x < 0) {
return COLOR_SCALE[i] || COLOR_SCALE[0];
}
return COLOR_SCALE[i] || COLOR_SCALE[COLOR_SCALE.length - 1];
}
function getTooltip({object}) {
return object && `Average Property Value
${object.properties.valuePerSqm}
Growth
${Math.round(object.properties.growth * 100)}`;
}
</script>
</html>
Atrribution here.
Oh, man, I've spent 8 days, researching that.
The solution is:
var vtpbf = require('vt-pbf');
var geojsonVt = require('geojson-vt');
var orig = JSON.parse(fs.readFileSync(__dirname + 'myjson.json'))
var tileindex = geojsonVt(orig)
var tile = tileindex.getTile(x, y, z); // mapbox sends automatic request to the server, and give x, y , z
// pass in an object mapping layername -> tile object
var buff = vtpbf.fromGeojsonVt({ 'geojsonLayer': tile });
I've sent the result to the frontend, it works like Mapbox API. For the details check: https://github.com/mapbox/vt-pbf
And from the Mapbox side:
const source = {
type : 'vector',
'tiles' : [ 'http://localhost:1234/county?z={z}&x={x}&y={y}' ],
minzoom : 0,
maxzoom : 14
};
map.addSource('source', source );
map.addLayer({
'id' : 'source',
'type' : 'fill',
'source' : 'source',
'source-layer' : 'tiles-sequences',
'fill-color' : '#00ffff'
});

Why is the zIndex sequence of my objects not what I expected?

How can I get a list of all objects with all params (x, y, width, etc.), including zIndex param on the stage after completing resizing? And how can I set an zIndex for each object when creating a stage?
I have this code, but setZIndex not working correctly. Images are not set correctly.
const oKonvaStage = new Konva.Stage({
container: 'dropzone'
});
const oKonvaLayer = new Konva.Layer();
oKonvaStage.add(oKonvaLayer);
const oKonvaImage1 = new Konva.Image({
x: 624,
y: 433,
width: 1920,
height: 1280
});
const oImage1 = new Image();
oImage1.onload = function() {
oKonvaImage1.image(oImage1);
oKonvaLayer.add(oKonvaImage1);
oKonvaImage1.setZIndex(2);
oKonvaLayer.draw();
};
oImage1.src = 'image1.jpg';
oKonvaImage1.on('transformend', function(e) {
UpdateAttrs();
});
const oKonvaImage2 = new Konva.Image({
x: 9,
y: 254,
width: 1024,
height: 1024
});
const oImage2 = new Image();
oImage2.onload = function() {
oKonvaImage2.image(oImage2);
oKonvaLayer.add(oKonvaImage2);
oKonvaImage2.setZIndex(0);
oKonvaLayer.draw();
};
oImage2.src = 'image2.jpg';
oKonvaImage2.on('transformend', function(e) {
UpdateAttrs();
});
const oKonvaImage3 = new Konva.Image({
x: -586,
y: -315,
width: 1920,
height: 1199
});
const oImage3 = new Image();
oImage3.onload = function() {
oKonvaImage3.image(oImage3);
oKonvaLayer.add(oKonvaImage3);
oKonvaImage3.setZIndex(1);
oKonvaLayer.draw();
};
oImage3.src = 'image3.jpg';
Image3 has index = 1 but is over Image2 which has index = 2.
First off, prompted by #lavrton's comment, you should add the konva.Images to the canvas as soon as you have instantiated them - not in the image onload event. The image objects are no overhead to the canvas, and you can then be sure of the initial z-index sequence. You may change the sequence after that, but at least you start with a known layout.
And as a general rule, you need to take care when using any commands inside the onload event of an image. Image loading is asynchronous - meaning it does not happen in the sequence you might anticipate when you write the code. A large image coming from a slow server will load more slowly than a small image from a quick server, but you cannot make any assumptions about the sequence. The ONLY way you can ensure the sequence is to have the load of the second image initiated from the onload event of the first, but this is generally going to give bad UX.
Back to the code you posted, the code in my snippet below would appear to work as you intended. I switched the ECMA2015 const to plain old vars, and removed the unnecessary on-transforms.
I also added some code to analyse the results, showing the hoped-for zIndex value and the achieved zIndex values.
Note that the zIndex value in Konva is relative to the parent container and not absolute.
So, for example, when I set zondex=999 I actually get a value of 4.
Summary:
avoid calling code for which sequence is critical in onload events.
do not expect to get exactly the zindex you ask for.
var div = $('#dropzone');
var oKonvaStage = new Konva.Stage({container: 'dropzone', width: div.width(), height: div.height()});
var indexWanted = [];
var oKonvaLayer = new Konva.Layer();
oKonvaStage.add(oKonvaLayer);
var oKonvaImage1 = new Konva.Image({
name: 'image-1',
x: 20,
y: 20,
width: 300,
height: 100
});
var oImage1 = new Image();
oImage1.onload = function() {
oKonvaLayer.add(oKonvaImage1);
oKonvaImage1.image(oImage1);
oKonvaImage1.setZIndex(2);
indexWanted[0] = 2;
oKonvaLayer.draw();
sayPos();
};
oImage1.src = 'https://dummyimage.com/300x100/666/fff.png?text=Image-1'
var oKonvaImage2 = new Konva.Image({
name: 'image-2',
x: 10,
y: 100,
width: 300,
height: 100
});
var oImage2 = new Image();
oImage2.onload = function() {
oKonvaImage2.image(oImage2);
oKonvaLayer.add(oKonvaImage2);
oKonvaImage2.setZIndex(0);
indexWanted[1] = 0;
oKonvaLayer.draw();
sayPos();
};
oImage2.src = 'https://dummyimage.com/300x100/333/fff.png?text=Image-2';
var oKonvaImage3 = new Konva.Image({
name: 'image-3',
x: 280,
y: 80,
width: 300,
height: 100
});
var oImage3 = new Image();
oImage3.onload = function() {
oKonvaImage3.image(oImage3);
oKonvaLayer.add(oKonvaImage3);
oKonvaImage3.setZIndex(999); // <<<< notice this is set to 99. Compare to console output !!
indexWanted[2] = 999;
oKonvaLayer.draw();
sayPos();
};
oImage3.src = 'https://dummyimage.com/300x100/ccc/fff.png?text=Image-3';
oKonvaLayer.draw();
oKonvaStage.draw();
var picCnt = 0, s= '', imgNo = 0;
function sayPos(){
picCnt = picCnt + 1;
if (picCnt === 3){
for (var i = 0; i < indexWanted.length; i = i + 1){
imgNo = i + 1;
s = s + '<br/>Image-' + imgNo + ' zindex wanted = ' + indexWanted[i] + ' actual zIndex=' + oKonvaLayer.findOne('.image-' + imgNo).getAbsoluteZIndex();
}
$('#info').html(s)
}
}
#info {
font-size: 10pt;
height: 100px;
}
<script src="https://ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/konva/2.5.1/konva.min.js"></script>
<p id='info' >
</p>
<div id='dropzone' style="position: absolute; top: 90px; z-index: -1; display: inline-block; left: 0px; width: 600px; height: 400px; background-color: silver;"></div>

Post image data instead of image url in fabric js

I have implemented the Drag n Drop image, now the issue is when i am converting canvas data as toSVG and send it to server it includes the image URL instead of image data.
When user upload the file then i am using below method:
//Add photo in canvas
document.getElementById('add_image').addEventListener('change', function (e) {
var file = e.target.files[0];
var reader = new FileReader();
reader.onload = function (f) {
var data = f.target.result;
fabric.Image.fromURL(data, function (img) {
var oImg = img.set({
left: 0,
top: 0,
angle: 0,
border: '#000',
stroke: '#F0F0F0', //<-- set this
strokeWidth: 0, //<-- set this
fill: 'rgba(0,0,0,0)'
}).scale(0.2);
canvas.add(oImg).renderAll();
canvas.moveTo(oImg, z_index);
z_index = z_index + 1;
//var a = canvas.setActiveObject(oImg);
var dataURL = canvas.toDataURL({
format: 'png',
quality: 1
});
});
};
reader.readAsDataURL(file);
$(this).val('');
});
then it sends the data as below:
< image xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="data:image/gif;base64,..." x="-100" y="-100" style="stroke: rgb(240,240,240); stroke-width: 0; stroke-dasharray: none; stroke-linecap: butt; stroke-linejoin: miter; stroke-miterlimit: 10; fill: rgb(0,0,0); fill-opacity: 0; fill-rule: nonzero; opacity: 1;" width="200" height="200">
Here ... contains base64 data.
If image is uploaded using Drag n Drop then i am using below method:
var new_image = new fabric.Image(obj, {
width: obj.naturalWidth,
height: obj.naturalHeight,
scaleX: setImageWidth/obj.naturalWidth,
scaleY: setImageHeight/obj.naturalHeight,
// Set the center of the new object based on the event coordinates relative
// to the canvas container.
left: e.layerX,
top: e.layerY,
id: 'verified_image'
});
canvas.add(new_image);
canvas.renderAll();
which sends the data as below:
< image id="verified_image" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://localhost/lynkus/uploads/userprofile/verified_image.png" x="-256" y="-256" style="stroke: none; stroke-width: 0; stroke-dasharray: none; stroke-linecap: butt; stroke-linejoin: miter; stroke-miterlimit: 10; fill: rgb(0,0,0); fill-rule: nonzero; opacity: 1;" width="512" height="512">
So both type of image uploading is working fine but the issue is i am trying to generate the png file using above svg. So system is able to create png for 1st option but not for 2nd option as its have a URL.
So is there a way to send data as base 64 instead of image url in drag n drop option?
http://jsfiddle.net/durga598/w8kkc/414/
function handleDrop(e) {
// this / e.target is current target element.
e.preventDefault();
if (e.stopPropagation) {
e.stopPropagation(); // stops the browser from redirecting.
}
var img = document.querySelector('#images img.img_dragging');
var setImageWidth = 100,
setImageHeight = 100;
var imgObj = new Image();
imgObj.crossOrigin = 'Anonymous';
imgObj.onload = function(oImg) {
var tempCanvas = document.createElement('CANVAS');
var tempCtx = tempCanvas.getContext('2d');
var height = tempCanvas.height = this.naturalHeight;
var width = tempCanvas.width = this.naturalWidth;
tempCtx.drawImage(this, 0, 0);
var dataURL = tempCanvas.toDataURL();
fabric.Image.fromURL(dataURL, function(img) {
img.set({
width: width,
height: height,
scaleX: setImageWidth / width,
scaleY: setImageHeight / height,
left: e.layerX,
top: e.layerY,
})
canvas.add(img);
})
}
imgObj.src = img.src;
return false;
}
You need to create an image object and convert that to base64 data using toDataURL of canvas element. Then use fabric.Image.fromURL to add that image data to fabric canvas. Here is updated fiddle.

Styling D3 graphic in Rails view

I am trying to render the following D3 graph in a rails view:
https://gist.github.com/mbostock/4063570
It shows up OK, except it's completely black and the lines don't show up well either (screenshot permalink):
https://www.evernote.com/shard/s116/sh/5d2b40c6-2bd0-49a7-8ead-c29713cc5ed7/2ca5e19814e84f05d5709232b3edec6f/deep/0/Screenshot%207/5/13%207:07%20PM.png
Here's the code in my view (I was having trouble getting it to render at all when the js was in the assets pipeline):
/app/views/steps/mindmap.html.erb
<%= javascript_tag do %>
var width = 960,
height = 2200;
var cluster = d3.layout.cluster()
.size([height, width - 160]);
var diagonal = d3.svg.diagonal()
.projection(function(d) { return [d.y, d.x]; });
var svg = d3.select("body").append("svg")
.attr("width", width)
.attr("height", height)
.append("g")
.attr("transform", "translate(40,0)");
d3.json("/assets/flare.json", function(root) {
var nodes = cluster.nodes(root),
links = cluster.links(nodes);
var link = svg.selectAll(".link")
.data(links)
.enter().append("path")
.attr("class", "link")
.attr("d", diagonal);
var node = svg.selectAll(".node")
.data(nodes)
.enter().append("g")
.attr("class", "node")
.attr("transform", function(d) { return "translate(" + d.y + "," + d.x + ")"; })
node.append("circle")
.attr("r", 4.5);
node.append("text")
.attr("dx", function(d) { return d.children ? -8 : 8; })
.attr("dy", 3)
.style("text-anchor", function(d) { return d.children ? "end" : "start"; })
.text(function(d) { return d.name; });
});
d3.select(self.frameElement).style("height", height + "px");
<% end %>
and app/assets/mindmap.css:
.node circle {
fill: #7A8B8B;
stroke: #7A8B8B;
stroke-width: 1.5px;
}
.node {
font: 10px sans-serif;
}
.link {
fill: #7A8B8B;
stroke: #7A8B8B;
stroke-width: 1.5px;
}
I've tried changing the color and other attributes to no effect. The CSS settings don't seem to have any impact on the way D3 renders the graphic. Is this a Rails/asset pipeline problem, a javascript problem, or perhaps something special about how D3 renders SVGs?
Thanks for any insights you may have!
If your JavaScript was failing with the assets pipeline then it's likely that your CSS is failing too. To verify this, put the contents of mindmap.css into your main app css and it wil render fine.
The reason why the assets pipeline is not including your files might be due to configuration or some other reason unrelated to your code.

Resources