How to display a huge GeoJSON file to the MapBox? - geojson

I'm new in MapBox GL Js and I want to call a big GeoJSON file over https and display it to the map.
I think that calling vector Tile is the best way to do that, I found some tutorials that show how to convert your GeoJSON data to Vector Tile but on Server Side or upload it to the MapBox Style but my GeoJSON file is frequently changing. So I found this solution is a new JavaScript library called geojson-vt, describing how to convert a huge GeoJSON files to vector tile on the fly (Client Side) with crazy fast, It's seems like what I'm looking for, BUT !!, How can I integrate it to the MapBox GL JS for calling the layer ??
Blocking on How can I add Layer using Mapbox GL JS with the following result :
var tileIndex = geojsonvt(MyGeoJSON);
var tile = tileIndex.getTile(z, x, y);
... Or I just didn't get it ! Please somebody helps or can propose some other solution for my problem.

You don't need to worry about geojson-vt. Mapbox-GL-JS does that internally. So you can just follow the standard documentation for loading a GeoJSON layer.
If your GeoJSON is really huge, then probably the limiting factor will be network transfer, which means you really need to be serving server-side vector tiles.

I'd recommend using Deck.gl GeoJSON Layer. Here's an example:
<html>
<head>
<title>deck.gl GeoJsonLayer (Polygon) Example</title>
<script src="https://unpkg.com/deck.gl#^8.0.0/dist.min.js"></script>
<script src="https://api.tiles.mapbox.com/mapbox-gl-js/v1.4.0/mapbox-gl.js"></script>
<style type="text/css">
body {
width: 100vw;
height: 100vh;
margin: 0;
overflow: hidden;
}
.deck-tooltip {
font-family: Helvetica, Arial, sans-serif;
padding: 6px !important;
margin: 8px;
max-width: 300px;
font-size: 10px;
}
</style>
</head>
<body>
</body>
<script type="text/javascript">
const {DeckGL, GeoJsonLayer} = deck;
const COLOR_SCALE = [
// negative
[65, 182, 196],
[127, 205, 187],
[199, 233, 180],
[237, 248, 177],
// positive
[255, 255, 204],
[255, 237, 160],
[254, 217, 118],
[254, 178, 76],
[253, 141, 60],
[252, 78, 42],
[227, 26, 28],
[189, 0, 38],
[128, 0, 38]
];
const geojsonLayer = new GeoJsonLayer({
data: 'https://raw.githubusercontent.com/uber-common/deck.gl-data/master/examples/geojson/vancouver-blocks.json',
opacity: 0.8,
stroked: false,
filled: true,
extruded: true,
wireframe: true,
getElevation: f => Math.sqrt(f.properties.valuePerSqm) * 10,
getFillColor: f => colorScale(f.properties.growth),
getLineColor: [255, 255, 255],
pickable: true
});
new DeckGL({
mapboxApiAccessToken: '<mapbox-access-token>',
mapStyle: 'mapbox://styles/mapbox/light-v9',
initialViewState: {
latitude: 49.254,
longitude: -123.13,
zoom: 11,
maxZoom: 16,
pitch: 45
},
controller: true,
layers: [geojsonLayer],
getTooltip
});
function colorScale(x) {
const i = Math.round(x * 7) + 4;
if (x < 0) {
return COLOR_SCALE[i] || COLOR_SCALE[0];
}
return COLOR_SCALE[i] || COLOR_SCALE[COLOR_SCALE.length - 1];
}
function getTooltip({object}) {
return object && `Average Property Value
${object.properties.valuePerSqm}
Growth
${Math.round(object.properties.growth * 100)}`;
}
</script>
</html>
Atrribution here.

Oh, man, I've spent 8 days, researching that.
The solution is:
var vtpbf = require('vt-pbf');
var geojsonVt = require('geojson-vt');
var orig = JSON.parse(fs.readFileSync(__dirname + 'myjson.json'))
var tileindex = geojsonVt(orig)
var tile = tileindex.getTile(x, y, z); // mapbox sends automatic request to the server, and give x, y , z
// pass in an object mapping layername -> tile object
var buff = vtpbf.fromGeojsonVt({ 'geojsonLayer': tile });
I've sent the result to the frontend, it works like Mapbox API. For the details check: https://github.com/mapbox/vt-pbf
And from the Mapbox side:
const source = {
type : 'vector',
'tiles' : [ 'http://localhost:1234/county?z={z}&x={x}&y={y}' ],
minzoom : 0,
maxzoom : 14
};
map.addSource('source', source );
map.addLayer({
'id' : 'source',
'type' : 'fill',
'source' : 'source',
'source-layer' : 'tiles-sequences',
'fill-color' : '#00ffff'
});

Related

Bullet points and alphabets are missing when converting html to pdf using jsPDF

I try to convert from html to pdf using jsPDF.
I could not get the pdf as the original html.
ordered list and unordered list bullet points are missing in the pdf file.
ordered-list-in-html
ordered-list-in-pdf
unordered-list-in-html
unordered-list-in-pdf
function BlazorDownloadFile(filename, text) {
let parser = new DOMParser();
let doc = parser.parseFromString(text, "text/html");
console.log(doc.body);
const element = doc.body;
var opt = {
margin: 1,
filename: filename,
html2canvas: { scale: 2 },
jsPDF: { unit: "cm", format: "a4", orientation: "portrait" },
};
// Choose the element that our invoice is rendered in.
html2pdf().set(opt).from(element).save();
}
Please help me to fix this issue.
Here is a workaround for bullet points in scss you can use to overcome this issue:
ul li {
list-style-type: none;
&:before {
content: '• ';
margin-left: -1em;
}
}

Why is the zIndex sequence of my objects not what I expected?

How can I get a list of all objects with all params (x, y, width, etc.), including zIndex param on the stage after completing resizing? And how can I set an zIndex for each object when creating a stage?
I have this code, but setZIndex not working correctly. Images are not set correctly.
const oKonvaStage = new Konva.Stage({
container: 'dropzone'
});
const oKonvaLayer = new Konva.Layer();
oKonvaStage.add(oKonvaLayer);
const oKonvaImage1 = new Konva.Image({
x: 624,
y: 433,
width: 1920,
height: 1280
});
const oImage1 = new Image();
oImage1.onload = function() {
oKonvaImage1.image(oImage1);
oKonvaLayer.add(oKonvaImage1);
oKonvaImage1.setZIndex(2);
oKonvaLayer.draw();
};
oImage1.src = 'image1.jpg';
oKonvaImage1.on('transformend', function(e) {
UpdateAttrs();
});
const oKonvaImage2 = new Konva.Image({
x: 9,
y: 254,
width: 1024,
height: 1024
});
const oImage2 = new Image();
oImage2.onload = function() {
oKonvaImage2.image(oImage2);
oKonvaLayer.add(oKonvaImage2);
oKonvaImage2.setZIndex(0);
oKonvaLayer.draw();
};
oImage2.src = 'image2.jpg';
oKonvaImage2.on('transformend', function(e) {
UpdateAttrs();
});
const oKonvaImage3 = new Konva.Image({
x: -586,
y: -315,
width: 1920,
height: 1199
});
const oImage3 = new Image();
oImage3.onload = function() {
oKonvaImage3.image(oImage3);
oKonvaLayer.add(oKonvaImage3);
oKonvaImage3.setZIndex(1);
oKonvaLayer.draw();
};
oImage3.src = 'image3.jpg';
Image3 has index = 1 but is over Image2 which has index = 2.
First off, prompted by #lavrton's comment, you should add the konva.Images to the canvas as soon as you have instantiated them - not in the image onload event. The image objects are no overhead to the canvas, and you can then be sure of the initial z-index sequence. You may change the sequence after that, but at least you start with a known layout.
And as a general rule, you need to take care when using any commands inside the onload event of an image. Image loading is asynchronous - meaning it does not happen in the sequence you might anticipate when you write the code. A large image coming from a slow server will load more slowly than a small image from a quick server, but you cannot make any assumptions about the sequence. The ONLY way you can ensure the sequence is to have the load of the second image initiated from the onload event of the first, but this is generally going to give bad UX.
Back to the code you posted, the code in my snippet below would appear to work as you intended. I switched the ECMA2015 const to plain old vars, and removed the unnecessary on-transforms.
I also added some code to analyse the results, showing the hoped-for zIndex value and the achieved zIndex values.
Note that the zIndex value in Konva is relative to the parent container and not absolute.
So, for example, when I set zondex=999 I actually get a value of 4.
Summary:
avoid calling code for which sequence is critical in onload events.
do not expect to get exactly the zindex you ask for.
var div = $('#dropzone');
var oKonvaStage = new Konva.Stage({container: 'dropzone', width: div.width(), height: div.height()});
var indexWanted = [];
var oKonvaLayer = new Konva.Layer();
oKonvaStage.add(oKonvaLayer);
var oKonvaImage1 = new Konva.Image({
name: 'image-1',
x: 20,
y: 20,
width: 300,
height: 100
});
var oImage1 = new Image();
oImage1.onload = function() {
oKonvaLayer.add(oKonvaImage1);
oKonvaImage1.image(oImage1);
oKonvaImage1.setZIndex(2);
indexWanted[0] = 2;
oKonvaLayer.draw();
sayPos();
};
oImage1.src = 'https://dummyimage.com/300x100/666/fff.png?text=Image-1'
var oKonvaImage2 = new Konva.Image({
name: 'image-2',
x: 10,
y: 100,
width: 300,
height: 100
});
var oImage2 = new Image();
oImage2.onload = function() {
oKonvaImage2.image(oImage2);
oKonvaLayer.add(oKonvaImage2);
oKonvaImage2.setZIndex(0);
indexWanted[1] = 0;
oKonvaLayer.draw();
sayPos();
};
oImage2.src = 'https://dummyimage.com/300x100/333/fff.png?text=Image-2';
var oKonvaImage3 = new Konva.Image({
name: 'image-3',
x: 280,
y: 80,
width: 300,
height: 100
});
var oImage3 = new Image();
oImage3.onload = function() {
oKonvaImage3.image(oImage3);
oKonvaLayer.add(oKonvaImage3);
oKonvaImage3.setZIndex(999); // <<<< notice this is set to 99. Compare to console output !!
indexWanted[2] = 999;
oKonvaLayer.draw();
sayPos();
};
oImage3.src = 'https://dummyimage.com/300x100/ccc/fff.png?text=Image-3';
oKonvaLayer.draw();
oKonvaStage.draw();
var picCnt = 0, s= '', imgNo = 0;
function sayPos(){
picCnt = picCnt + 1;
if (picCnt === 3){
for (var i = 0; i < indexWanted.length; i = i + 1){
imgNo = i + 1;
s = s + '<br/>Image-' + imgNo + ' zindex wanted = ' + indexWanted[i] + ' actual zIndex=' + oKonvaLayer.findOne('.image-' + imgNo).getAbsoluteZIndex();
}
$('#info').html(s)
}
}
#info {
font-size: 10pt;
height: 100px;
}
<script src="https://ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/konva/2.5.1/konva.min.js"></script>
<p id='info' >
</p>
<div id='dropzone' style="position: absolute; top: 90px; z-index: -1; display: inline-block; left: 0px; width: 600px; height: 400px; background-color: silver;"></div>

Post image data instead of image url in fabric js

I have implemented the Drag n Drop image, now the issue is when i am converting canvas data as toSVG and send it to server it includes the image URL instead of image data.
When user upload the file then i am using below method:
//Add photo in canvas
document.getElementById('add_image').addEventListener('change', function (e) {
var file = e.target.files[0];
var reader = new FileReader();
reader.onload = function (f) {
var data = f.target.result;
fabric.Image.fromURL(data, function (img) {
var oImg = img.set({
left: 0,
top: 0,
angle: 0,
border: '#000',
stroke: '#F0F0F0', //<-- set this
strokeWidth: 0, //<-- set this
fill: 'rgba(0,0,0,0)'
}).scale(0.2);
canvas.add(oImg).renderAll();
canvas.moveTo(oImg, z_index);
z_index = z_index + 1;
//var a = canvas.setActiveObject(oImg);
var dataURL = canvas.toDataURL({
format: 'png',
quality: 1
});
});
};
reader.readAsDataURL(file);
$(this).val('');
});
then it sends the data as below:
< image xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="data:image/gif;base64,..." x="-100" y="-100" style="stroke: rgb(240,240,240); stroke-width: 0; stroke-dasharray: none; stroke-linecap: butt; stroke-linejoin: miter; stroke-miterlimit: 10; fill: rgb(0,0,0); fill-opacity: 0; fill-rule: nonzero; opacity: 1;" width="200" height="200">
Here ... contains base64 data.
If image is uploaded using Drag n Drop then i am using below method:
var new_image = new fabric.Image(obj, {
width: obj.naturalWidth,
height: obj.naturalHeight,
scaleX: setImageWidth/obj.naturalWidth,
scaleY: setImageHeight/obj.naturalHeight,
// Set the center of the new object based on the event coordinates relative
// to the canvas container.
left: e.layerX,
top: e.layerY,
id: 'verified_image'
});
canvas.add(new_image);
canvas.renderAll();
which sends the data as below:
< image id="verified_image" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://localhost/lynkus/uploads/userprofile/verified_image.png" x="-256" y="-256" style="stroke: none; stroke-width: 0; stroke-dasharray: none; stroke-linecap: butt; stroke-linejoin: miter; stroke-miterlimit: 10; fill: rgb(0,0,0); fill-rule: nonzero; opacity: 1;" width="512" height="512">
So both type of image uploading is working fine but the issue is i am trying to generate the png file using above svg. So system is able to create png for 1st option but not for 2nd option as its have a URL.
So is there a way to send data as base 64 instead of image url in drag n drop option?
http://jsfiddle.net/durga598/w8kkc/414/
function handleDrop(e) {
// this / e.target is current target element.
e.preventDefault();
if (e.stopPropagation) {
e.stopPropagation(); // stops the browser from redirecting.
}
var img = document.querySelector('#images img.img_dragging');
var setImageWidth = 100,
setImageHeight = 100;
var imgObj = new Image();
imgObj.crossOrigin = 'Anonymous';
imgObj.onload = function(oImg) {
var tempCanvas = document.createElement('CANVAS');
var tempCtx = tempCanvas.getContext('2d');
var height = tempCanvas.height = this.naturalHeight;
var width = tempCanvas.width = this.naturalWidth;
tempCtx.drawImage(this, 0, 0);
var dataURL = tempCanvas.toDataURL();
fabric.Image.fromURL(dataURL, function(img) {
img.set({
width: width,
height: height,
scaleX: setImageWidth / width,
scaleY: setImageHeight / height,
left: e.layerX,
top: e.layerY,
})
canvas.add(img);
})
}
imgObj.src = img.src;
return false;
}
You need to create an image object and convert that to base64 data using toDataURL of canvas element. Then use fabric.Image.fromURL to add that image data to fabric canvas. Here is updated fiddle.

Openlayers 3: Drawing grid lines (graticule) with predefined units on the custom static image

I am trying to draw custom x-y axes grid lines on top of a static image, i.e. image pixels rather than lattitude and longitudes. Ideally, the grid lines should be redrawn dynamically when I drag/zoom/scroll the image, just like the x-y ruler bars in Photoshop.
I came across the following code example, which provides a custom projection function to directly map image pixel coordinates to map coordinates.
http://openlayers.org/en/latest/examples/static-image.html
// Map views always need a projection. Here we just want to map image
// coordinates directly to map coordinates, so we create a projection that uses
// the image extent in pixels.
var extent = [0, 0, 1024, 968];
var projection = new ol.proj.Projection({
code: 'xkcd-image',
units: 'pixels',
extent: extent
});
I tried to append the following code to the script. However, the ol.Graticule class seems to be incompatible with the custom ol.proj.Projection definition.
http://openlayers.org/en/latest/examples/graticule.html
// Create the graticule component
var graticule = new ol.Graticule({
// the style to use for the lines, optional.
strokeStyle: new ol.style.Stroke({
color: 'rgba(255,120,0,0.9)',
width: 2,
lineDash: [0.5, 4]
})
});
graticule.setMap(map);
What's wrong with the above code?
P.S. I am aware of the Openseadragon API which provides a dynamic scalebar. However, I wish to stick to Openlayers API because I also have an extra map layer of anchor points at predefined locations on the static image.
I had the same problem. For this to work I created a Vector Layer, (where axis are drawn).
To draw the axis, I need to listen to View changes.
Whenever the view changes, calculate the actual extent for the view.
With extent information and ([width, height] of the image, you can then draw axis)
let listenerAxis = null,
w = 0,
h = 0
const xAxisStyle = new ol.style.Style({
stroke: new ol.style.Stroke({
color: 'red',
width: 2
})
})
const yAxisStyle = new ol.style.Style({
stroke: new ol.style.Stroke({
color: 'green',
width: 2
})
})
const ImageLayer = new ol.layer.Image()
const AxisLayer = new ol.layer.Vector({ source: new ol.source.Vector() })
AxisLayer.setStyle((feature, resolution) => {
if(feature.getProperties().axis == 'x') {
return xAxisStyle
}
return yAxisStyle
})
const renderer = new ol.Map({
target: 'map',
layers: [ImageLayer]
})
AxisLayer.setMap(renderer)
processFile('https://i2.wp.com/beebom.com/wp-content/uploads/2016/01/Reverse-Image-Search-Engines-Apps-And-Its-Uses-2016.jpg?resize=640%2C426')
function removeAxis() {
AxisLayer.getSource().clear()
ol.Observable.unByKey(listenerAxis)
listenerAxis = null
}
function drawAxis() {
function draw(){
AxisLayer.getSource().clear()
const extent = renderer.getView().calculateExtent()
const [xmin, ymin, xmax, ymax] = extent
// Eje X
const axisX = new ol.geom.LineString([ [xmin, h / 2], [xmax, h / 2] ])
const axisY = new ol.geom.LineString([ [w / 2, ymin], [w / 2, ymax] ])
const featureX = new ol.Feature({ geometry: axisX, axis: 'x' })
const featureY = new ol.Feature({ geometry: axisY, axis: 'y' })
AxisLayer.getSource().addFeatures([featureX, featureY])
}
listenerAxis = renderer.getView().on('change', draw)
draw()
}
async function processFile(path) {
ImageLayer.setSource()
removeAxis()
if(!path) {
return
}
const [wi, hi] = await readImage(path)
w = wi
h = hi
const source = getImageStatic(path, w, h)
const view = getViewForImage(w, h)
ImageLayer.setSource(source)
renderer.setView(view)
drawAxis()
}
// Some helpers
function readImage(localPath) {
const img = document.createElement('img')
return new Promise((res, rej) => {
img.src = localPath
img.addEventListener('load', (event) => {
const { naturalWidth, naturalHeight } = img
console.log('img', naturalWidth, naturalHeight)
res([naturalWidth, naturalHeight])
})
})
}
function getViewForImage(w, h) {
return new ol.View({
center: [w / 2, h / 2],
zoom: 2,
projection: new ol.proj.Projection({
extent: [0, 0, w, h],
units: 'pixels'
}),
extent: [0, 0, w, h]
})
}
function getImageStatic(path, w, h) {
return new ol.source.ImageStatic({
url: path,
imageExtent: [0, 0, w, h]
})
}
#map {
width: 100%;
height: 100%;
background: grey;
}
<link href="https://openlayers.org/en/v4.6.5/css/ol.css" rel="stylesheet"/>
<script src="https://openlayers.org/en/v4.6.5/build/ol.js"></script>
<div id="map"></div>

Can I make a bookmarklet put some text into the clipboard?

Say I wanted to have bit of text (actually 4 different addresses) that I'd like to be able to easily (and frequently) paste. Is there a way I can make a bookmarklet that will put those addresses into the clipboard?
I'd like to be able to click the appropriate one, then right click + Paste.
Yes it's possible, have a look at zeroclipboard (note: requires flash). Also see this previous question.
Try building a Firefox extension instead of a bookmarklet. Mozilla XUL (extension language) lets you do copy-paste. Another option is a Java Applet.
http://brooknovak.wordpress.com/2009/07/28/accessing-the-system-clipboard-with-javascript/
Method with no third-party libraries
While zeroclipboard could potentially work, this method will allow you to visually select an element and automatically copy the inner text to your clipboard without having to download any third-party libraries. It is based on this function by Arne Hartherz and modified to work both in HTTPS and HTTP contexts.
Readable version:
var overlay = document.createElement('div');
Object.assign(overlay.style, {
position: 'fixed',
top: 0,
left: 0,
width: '100vw',
height: '100vh',
zIndex: 99999999,
background: 'transparent',
cursor: 'crosshair'
});
document.body.append(overlay);
function copyToClipboard(textToCopy) {
// navigator clipboard api needs a secure context (https)
if (navigator.clipboard && window.isSecureContext) {
// navigator clipboard api method'
return navigator.clipboard.writeText(textToCopy);
} else {
// text area method
let textArea = document.createElement("textarea");
textArea.value = textToCopy;
// make the textarea out of viewport
textArea.style.position = "fixed";
textArea.style.left = "-999999px";
textArea.style.top = "-999999px";
document.body.appendChild(textArea);
textArea.focus();
textArea.select();
return new Promise((res, rej) => {
// here the magic happens
document.execCommand('copy') ? res() : rej();
textArea.remove();
});
}
};
function getElement(event) {
overlay.style.pointerEvents = 'none';
var element = document.elementFromPoint(event.clientX, event.clientY);
overlay.style.pointerEvents = 'auto';
return element;
}
document.addEventListener('mousemove', function(event) {
var element = getElement(event);
if (!element) return;
var position = element.getBoundingClientRect();
Object.assign(overlay.style, {
background: 'rgba(0, 128, 255, 0.25)',
outline: '1px solid rgba(0, 128, 255, 0.5)',
top: '' + position.top + 'px',
left: '' + position.left + 'px',
width: '' + position.width + 'px',
height: '' + position.height + 'px'
});
});
overlay.addEventListener('click', function(event) {
var element = getElement(event);
var text = element.textContent || element.value;
text = text.replace(/\n[ \n]+\n/g, "\n").replace(/\n\n+/g, "\n\n").replace(/^\n+|\n+$/g, '');
if (!text.match("\n")) text = text.replace(/^ +| +$/, '')
copyToClipboard(text);
document.body.removeChild(overlay);
});
Minified version for use in bookmarklet:
javascript:void function(){function a(a){if(navigator.clipboard&&window.isSecureContext)return navigator.clipboard.writeText(a);else{let b=document.createElement("textarea");return b.value=a,b.style.position="fixed",b.style.left="-999999px",b.style.top="-999999px",document.body.appendChild(b),b.focus(),b.select(),new Promise((a,c)=>{document.execCommand("copy")?a():c(),b.remove()})}}function b(a){c.style.pointerEvents="none";var b=document.elementFromPoint(a.clientX,a.clientY);return c.style.pointerEvents="auto",b}var c=document.createElement("div");Object.assign(c.style,{position:"fixed",top:0,left:0,width:"100vw",height:"100vh",zIndex:99999999,background:"transparent",cursor:"crosshair"}),document.body.append(c);document.addEventListener("mousemove",function(a){var d=b(a);if(d){var e=d.getBoundingClientRect();Object.assign(c.style,{background:"rgba(0, 128, 255, 0.25)",outline:"1px solid rgba(0, 128, 255, 0.5)",top:""+e.top+"px",left:""+e.left+"px",width:""+e.width+"px",height:""+e.height+"px"})}}),c.addEventListener("click",function(d){var e=b(d),f=e.textContent||e.value;f=f.replace(/\n[ \n]+\n/g,"\n").replace(/\n\n+/g,"\n\n").replace(/^\n+|\n+$/g,""),f.match("\n")||(f=f.replace(/^ +| +$/,"")),a(f),document.body.removeChild(c)})}();

Resources