I want cubeTextureLoader cube map background but it doesn't work, Why? - developer-tools

Three js
I want cubeTextureLoader cube map background. but it doesn't work and showing nothing only white background and my object, But I want 3d background. It gives me error like this one-
"Access to image at 'file:///C:/Users/Mili%20Murmu/Documents/Arena_web/Mili/ThreeJs/Bts_music-player/images/posx.jpg' from origin 'null' has been blocked by CORS policy: Cross origin requests are only supported for protocol schemes: http, data, chrome, chrome-extension, chrome-untrusted, https.".
second error -
"Failed to load resource: net::ERR_FAILED".
I don't understand why isn't showing any cube map background? Please help me! How can I solve this?
const scene = new THREE.Scene();
//light
const light = new THREE.PointLight(0xffffff, 1)
light.position.set(2000, 2000, 1500);
scene.add(light);
const ambientlight = new THREE.AmbientLight(0xffffff, 0.2);
scene.add(ambientlight);
//set up camera
// Set up camera
const camera = new THREE.PerspectiveCamera(75, window.innerWidth / window.innerHeight, 0.3, 10000)
camera.position.set(1300, 0, 0);
function onWindowResize() {
HEIGHT = window.innerHeight;
WIDTH = window.innerWidth;
windowHalfX = WIDTH / 2;
windowHalfY = HEIGHT / 2;
renderer.setSize(WIDTH, HEIGHT);
camera.aspect = WIDTH / HEIGHT;
camera.updateProjectionMatrix();
}
// renderer
//set up renderer
const renderer = new THREE.WebGLRenderer({antialias : true, alpha : true});
renderer.setSize(window.innerWidth - 8, window.innerHeight - 8) ;
renderer.render(scene, camera);
renderer.setPixelRatio( window.devicePixelRatio );
renderer.setClearColor("#803855");
renderer.shadowMapEnabled = true;
const container = document.getElementById('world');
container.appendChild(renderer.domElement);
document.body.appendChild(renderer.domElement);
controls = new THREE.OrbitControls( camera, renderer.domElement);
const group = new THREE.Group();
scene.add(group);
const imgLoc = "https://eyes.nasa.gov/apps/exo/assets/image/exoplanet/";
const geometry = new THREE.SphereGeometry (500, 32, 32);
const material = new THREE.MeshPhongMaterial();
const planet = new THREE.Mesh(geometry, material);
group.add(planet);
const starGeometry = new THREE.BoxGeometry(20, 20, 20);
const starMaterial = new THREE.MeshBasicMaterial( );
const starsphere = new THREE.Mesh(starGeometry, starMaterial );
scene.add(starsphere);
let loader = new THREE.TextureLoader();
material.map = loader.load(imgLoc+'GJ_504_b.jpg');
material.bumpMap = loader.load(imgLoc+'GJ_504_b.jpg');
material.bumpScale = 8;
material.specular = new THREE.Color('#000000');
var textureCubeLoader = new THREE.CubeTextureLoader();
var path = 'images/';
var textureCube = textureCubeLoader.load( [ path + 'posx.jpg' , path + 'negx.jpg', path + 'posy.jpg' , path + 'negy.jpg', path + 'posz.jpg' , path + 'negz.jpg' ] );
scene.background = textureCube;
const animate = () => {
requestAnimationFrame(animate);
planet.rotation.y += 0.001;
renderer.render(scene, camera);
};
animate();
*{
box-sizing: border-box;
}
body{
margin: 0;
padding: 0;
}
#world{
margin: 0;
padding: 0;
}
<html>
<head>
<meta charset="utf-8">
<title>BTS_Music Player</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<link rel="stylesheet" type="text/css" href="css/style.css">
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r126/three.min.js"></script>
<script src="https://s3-us-west-2.amazonaws.com/s.cdpn.io/264161/OrbitControls.js"></script>
</head>
<body>
<div id="world"></div>
<script src="js/main.js"></script>
</body>
</html>

Related

How i can extract svg element with text pdf js

I need extarct svg element with text from pdf. But if i did like this:
PDF:
Pdf link https://dropfiles.org/2dTlpxTN
const PDF_PATH = "https://dropfiles.org/2dTlpxTN";
const PAGE_NUMBER = 1;
const PAGE_SCALE = 1.5;
const SVG_NS = "http://www.w3.org/2000/svg";
pdfjsLib.GlobalWorkerOptions.workerSrc =
"../../node_modules/pdfjs-dist/build/pdf.worker.js";
function buildSVG(viewport, textContent) {
// Building SVG with size of the viewport (for simplicity)
const svg = document.createElementNS(SVG_NS, "svg:svg");
svg.setAttribute("width", viewport.width + "px");
svg.setAttribute("height", viewport.height + "px");
// items are transformed to have 1px font size
svg.setAttribute("font-size", 1);
// processing all items
textContent.items.forEach(function (textItem) {
// we have to take in account viewport transform, which includes scale,
// rotation and Y-axis flip, and not forgetting to flip text.
const tx = pdfjsLib.Util.transform(
pdfjsLib.Util.transform(viewport.transform, textItem.transform),
[1, 0, 0, -1, 0, 0]
);
const style = textContent.styles[textItem.fontName];
// adding text element
const text = document.createElementNS(SVG_NS, "svg:text");
text.setAttribute("transform", "matrix(" + tx.join(" ") + ")");
text.setAttribute("font-family", style.fontFamily);
text.textContent = textItem.str;
svg.append(text);
});
return svg;
}
async function pageLoaded() {
// Loading document and page text content
const loadingTask = pdfjsLib.getDocument({ url: PDF_PATH });
const pdfDocument = await loadingTask.promise;
const page = await pdfDocument.getPage(PAGE_NUMBER);
const viewport = page.getViewport({ scale: PAGE_SCALE });
const textContent = await page.getTextContent();
// building SVG and adding that to the DOM
const svg = buildSVG(viewport, textContent);
document.getElementById("pageContainer").append(svg);
// Release page resources.
page.cleanup();
}
document.addEventListener("DOMContentLoaded", function () {
if (typeof pdfjsLib === "undefined") {
// eslint-disable-next-line no-alert
alert("Please build the pdfjs-dist library using\n `gulp dist-install`");
return;
}
pageLoaded();
});
I extracted text with out Font and i extract all text. I need extract text as image.
If i do this:
pdfDoc_.getPage(pageNumber).then(function (page) {
var canvasEl = document.createElement('canvas');
var context = canvasEl.getContext('2d');
var viewport = page.getViewport({scale: imagesSetting_reader.scaleText});
canvasEl.height = viewport.height;
canvasEl.width = viewport.width;
var back;
var renderContext = {
canvasContext: context,
background:back,
viewport: viewport
};
page.render(renderContext).promise.then(function () {
resolve(canvasEl.toDataURL("image/png"));
})
});
It extracted text and background image.
if i do like this :
page.getOperatorList().then(opList => {
var svgGfx = new pdfjsLib.SVGGraphics(page.commonObjs, page.objs);
return svgGfx.getSVG(opList, viewport);
}).then(svg => {
console.log(svg)
return svg;
});
And extracted text from svg, i got error :
error on line 1 at column 101: Namespace prefix svg on svg is not defined
How i can extract only svg text with font and convert it to image ?
Like this:

Why is the zIndex sequence of my objects not what I expected?

How can I get a list of all objects with all params (x, y, width, etc.), including zIndex param on the stage after completing resizing? And how can I set an zIndex for each object when creating a stage?
I have this code, but setZIndex not working correctly. Images are not set correctly.
const oKonvaStage = new Konva.Stage({
container: 'dropzone'
});
const oKonvaLayer = new Konva.Layer();
oKonvaStage.add(oKonvaLayer);
const oKonvaImage1 = new Konva.Image({
x: 624,
y: 433,
width: 1920,
height: 1280
});
const oImage1 = new Image();
oImage1.onload = function() {
oKonvaImage1.image(oImage1);
oKonvaLayer.add(oKonvaImage1);
oKonvaImage1.setZIndex(2);
oKonvaLayer.draw();
};
oImage1.src = 'image1.jpg';
oKonvaImage1.on('transformend', function(e) {
UpdateAttrs();
});
const oKonvaImage2 = new Konva.Image({
x: 9,
y: 254,
width: 1024,
height: 1024
});
const oImage2 = new Image();
oImage2.onload = function() {
oKonvaImage2.image(oImage2);
oKonvaLayer.add(oKonvaImage2);
oKonvaImage2.setZIndex(0);
oKonvaLayer.draw();
};
oImage2.src = 'image2.jpg';
oKonvaImage2.on('transformend', function(e) {
UpdateAttrs();
});
const oKonvaImage3 = new Konva.Image({
x: -586,
y: -315,
width: 1920,
height: 1199
});
const oImage3 = new Image();
oImage3.onload = function() {
oKonvaImage3.image(oImage3);
oKonvaLayer.add(oKonvaImage3);
oKonvaImage3.setZIndex(1);
oKonvaLayer.draw();
};
oImage3.src = 'image3.jpg';
Image3 has index = 1 but is over Image2 which has index = 2.
First off, prompted by #lavrton's comment, you should add the konva.Images to the canvas as soon as you have instantiated them - not in the image onload event. The image objects are no overhead to the canvas, and you can then be sure of the initial z-index sequence. You may change the sequence after that, but at least you start with a known layout.
And as a general rule, you need to take care when using any commands inside the onload event of an image. Image loading is asynchronous - meaning it does not happen in the sequence you might anticipate when you write the code. A large image coming from a slow server will load more slowly than a small image from a quick server, but you cannot make any assumptions about the sequence. The ONLY way you can ensure the sequence is to have the load of the second image initiated from the onload event of the first, but this is generally going to give bad UX.
Back to the code you posted, the code in my snippet below would appear to work as you intended. I switched the ECMA2015 const to plain old vars, and removed the unnecessary on-transforms.
I also added some code to analyse the results, showing the hoped-for zIndex value and the achieved zIndex values.
Note that the zIndex value in Konva is relative to the parent container and not absolute.
So, for example, when I set zondex=999 I actually get a value of 4.
Summary:
avoid calling code for which sequence is critical in onload events.
do not expect to get exactly the zindex you ask for.
var div = $('#dropzone');
var oKonvaStage = new Konva.Stage({container: 'dropzone', width: div.width(), height: div.height()});
var indexWanted = [];
var oKonvaLayer = new Konva.Layer();
oKonvaStage.add(oKonvaLayer);
var oKonvaImage1 = new Konva.Image({
name: 'image-1',
x: 20,
y: 20,
width: 300,
height: 100
});
var oImage1 = new Image();
oImage1.onload = function() {
oKonvaLayer.add(oKonvaImage1);
oKonvaImage1.image(oImage1);
oKonvaImage1.setZIndex(2);
indexWanted[0] = 2;
oKonvaLayer.draw();
sayPos();
};
oImage1.src = 'https://dummyimage.com/300x100/666/fff.png?text=Image-1'
var oKonvaImage2 = new Konva.Image({
name: 'image-2',
x: 10,
y: 100,
width: 300,
height: 100
});
var oImage2 = new Image();
oImage2.onload = function() {
oKonvaImage2.image(oImage2);
oKonvaLayer.add(oKonvaImage2);
oKonvaImage2.setZIndex(0);
indexWanted[1] = 0;
oKonvaLayer.draw();
sayPos();
};
oImage2.src = 'https://dummyimage.com/300x100/333/fff.png?text=Image-2';
var oKonvaImage3 = new Konva.Image({
name: 'image-3',
x: 280,
y: 80,
width: 300,
height: 100
});
var oImage3 = new Image();
oImage3.onload = function() {
oKonvaImage3.image(oImage3);
oKonvaLayer.add(oKonvaImage3);
oKonvaImage3.setZIndex(999); // <<<< notice this is set to 99. Compare to console output !!
indexWanted[2] = 999;
oKonvaLayer.draw();
sayPos();
};
oImage3.src = 'https://dummyimage.com/300x100/ccc/fff.png?text=Image-3';
oKonvaLayer.draw();
oKonvaStage.draw();
var picCnt = 0, s= '', imgNo = 0;
function sayPos(){
picCnt = picCnt + 1;
if (picCnt === 3){
for (var i = 0; i < indexWanted.length; i = i + 1){
imgNo = i + 1;
s = s + '<br/>Image-' + imgNo + ' zindex wanted = ' + indexWanted[i] + ' actual zIndex=' + oKonvaLayer.findOne('.image-' + imgNo).getAbsoluteZIndex();
}
$('#info').html(s)
}
}
#info {
font-size: 10pt;
height: 100px;
}
<script src="https://ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/konva/2.5.1/konva.min.js"></script>
<p id='info' >
</p>
<div id='dropzone' style="position: absolute; top: 90px; z-index: -1; display: inline-block; left: 0px; width: 600px; height: 400px; background-color: silver;"></div>

Post image data instead of image url in fabric js

I have implemented the Drag n Drop image, now the issue is when i am converting canvas data as toSVG and send it to server it includes the image URL instead of image data.
When user upload the file then i am using below method:
//Add photo in canvas
document.getElementById('add_image').addEventListener('change', function (e) {
var file = e.target.files[0];
var reader = new FileReader();
reader.onload = function (f) {
var data = f.target.result;
fabric.Image.fromURL(data, function (img) {
var oImg = img.set({
left: 0,
top: 0,
angle: 0,
border: '#000',
stroke: '#F0F0F0', //<-- set this
strokeWidth: 0, //<-- set this
fill: 'rgba(0,0,0,0)'
}).scale(0.2);
canvas.add(oImg).renderAll();
canvas.moveTo(oImg, z_index);
z_index = z_index + 1;
//var a = canvas.setActiveObject(oImg);
var dataURL = canvas.toDataURL({
format: 'png',
quality: 1
});
});
};
reader.readAsDataURL(file);
$(this).val('');
});
then it sends the data as below:
< image xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="data:image/gif;base64,..." x="-100" y="-100" style="stroke: rgb(240,240,240); stroke-width: 0; stroke-dasharray: none; stroke-linecap: butt; stroke-linejoin: miter; stroke-miterlimit: 10; fill: rgb(0,0,0); fill-opacity: 0; fill-rule: nonzero; opacity: 1;" width="200" height="200">
Here ... contains base64 data.
If image is uploaded using Drag n Drop then i am using below method:
var new_image = new fabric.Image(obj, {
width: obj.naturalWidth,
height: obj.naturalHeight,
scaleX: setImageWidth/obj.naturalWidth,
scaleY: setImageHeight/obj.naturalHeight,
// Set the center of the new object based on the event coordinates relative
// to the canvas container.
left: e.layerX,
top: e.layerY,
id: 'verified_image'
});
canvas.add(new_image);
canvas.renderAll();
which sends the data as below:
< image id="verified_image" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="http://localhost/lynkus/uploads/userprofile/verified_image.png" x="-256" y="-256" style="stroke: none; stroke-width: 0; stroke-dasharray: none; stroke-linecap: butt; stroke-linejoin: miter; stroke-miterlimit: 10; fill: rgb(0,0,0); fill-rule: nonzero; opacity: 1;" width="512" height="512">
So both type of image uploading is working fine but the issue is i am trying to generate the png file using above svg. So system is able to create png for 1st option but not for 2nd option as its have a URL.
So is there a way to send data as base 64 instead of image url in drag n drop option?
http://jsfiddle.net/durga598/w8kkc/414/
function handleDrop(e) {
// this / e.target is current target element.
e.preventDefault();
if (e.stopPropagation) {
e.stopPropagation(); // stops the browser from redirecting.
}
var img = document.querySelector('#images img.img_dragging');
var setImageWidth = 100,
setImageHeight = 100;
var imgObj = new Image();
imgObj.crossOrigin = 'Anonymous';
imgObj.onload = function(oImg) {
var tempCanvas = document.createElement('CANVAS');
var tempCtx = tempCanvas.getContext('2d');
var height = tempCanvas.height = this.naturalHeight;
var width = tempCanvas.width = this.naturalWidth;
tempCtx.drawImage(this, 0, 0);
var dataURL = tempCanvas.toDataURL();
fabric.Image.fromURL(dataURL, function(img) {
img.set({
width: width,
height: height,
scaleX: setImageWidth / width,
scaleY: setImageHeight / height,
left: e.layerX,
top: e.layerY,
})
canvas.add(img);
})
}
imgObj.src = img.src;
return false;
}
You need to create an image object and convert that to base64 data using toDataURL of canvas element. Then use fabric.Image.fromURL to add that image data to fabric canvas. Here is updated fiddle.

Three.js iPad performance

before I totally give up on this idea I wanted to check with the three.js community to make sure that I'm not doing something wrong?
Essentially I have taken Mr doobs canvas geometry cube example http://mrdoob.github.io/three.js/examples/canvas_geometry_cube.html and applied an image to the cube faces. When testing on the iPad I have frame a frame speed of 1fps (as opposed to 60fps with the original example). The image also looks really broken up on rotation.
Are there any tricks on getting this to work well on the iPad? I'm aware that WebGL isn't supported but I thought the canvas renderer would perform better than it is?
Code below
Many thanks
function init() {
container = document.createElement( 'div' );
document.body.appendChild( container );
var info = document.createElement( 'div' );
info.style.position = 'absolute';
info.style.top = '10px';
info.style.width = '100%';
info.style.textAlign = 'center';
info.innerHTML = 'Drag to spin the cube';
container.appendChild( info );
camera = new THREE.PerspectiveCamera( 70, window.innerWidth / window.innerHeight, 1, 1000 );
camera.position.y = 150;
camera.position.z = 500;
scene = new THREE.Scene();
// Cube
var loader = new THREE.TextureLoader();
loader.load( 'test-texture.png', function ( texture ) {
var materials = [];
var material = new THREE.MeshBasicMaterial( { map: texture, overdraw: 0.5 } );
material.transparent = true;
for ( var i = 0; i < 6; i ++ ) {
materials.push( material );
}
// then the cube definitions
cube = new THREE.Mesh( new THREE.CubeGeometry( 200, 200, 200,4,4,4), new THREE.MeshFaceMaterial(materials));
cube.position.y = 150;
scene.add( cube );
animate();
});
// Plane
var geometry = new THREE.PlaneGeometry( 200, 200 );
geometry.applyMatrix( new THREE.Matrix4().makeRotationX( - Math.PI / 2 ) );
var material = new THREE.MeshBasicMaterial( { color: 0xe0e0e0, overdraw: 0.5 } );
plane = new THREE.Mesh( geometry, material );
scene.add( plane );
renderer = new THREE.CanvasRenderer();
renderer.setClearColor( 0xf0f0f0 );
renderer.setSize( window.innerWidth, window.innerHeight );
container.appendChild( renderer.domElement );
stats = new Stats();
stats.domElement.style.position = 'absolute';
stats.domElement.style.top = '0px';
container.appendChild( stats.domElement );
document.addEventListener( 'mousedown', onDocumentMouseDown, false );
document.addEventListener( 'touchstart', onDocumentTouchStart, false );
document.addEventListener( 'touchmove', onDocumentTouchMove, false );
//
window.addEventListener( 'resize', onWindowResize, false );
}
I would suggest to wait for iOS8 which should bring WebGL support.

openstreetmap geolocation

I'm newbie, I'm reading in internet the possibilities of openstreetmap, and also I read about openlayers...What I need, for begin, is obtain the location and create the corresponding map...I found a good example with openlayers, this is the code:
<html>
<head>
<title>HTML5 geolocation with Openstreetmap and OpenLayers</title>
<style type="text/css">
html, body, #basicMap {
width: 240;
height: 320;
margin: 10;
}
</style>
<script src="http://openlayers.org/api/OpenLayers.js"></script>
<script>
function init() {
map = new OpenLayers.Map("basicMap");
var mapnik = new OpenLayers.Layer.OSM();
map.addLayer(mapnik);
navigator.geolocation.getCurrentPosition(function(position) {
document.getElementById('anzeige').innerHTML="Latitude: " + position.coords.latitude + " Longitude: " +
position.coords.longitude + "<p>";
var lonLat = new OpenLayers.LonLat(position.coords.longitude,
position.coords.latitude)
.transform(
new OpenLayers.Projection("EPSG:4326"), //transform from WGS 1984
map.getProjectionObject() //to Spherical Mercator Projection
);
markers.addMarker(new OpenLayers.Marker(lonLat));
map.setCenter(lonLat, 14 // Zoom level
);
});
//map = new OpenLayers.Map("basicMap");
//var mapnik = new OpenLayers.Layer.OSM();
//map.addLayer(mapnik);
map.setCenter(new
OpenLayers.LonLat(3,3) // Center of the map
.transform(
new OpenLayers.Projection("EPSG:4326"), // transform from WGS 1984
new OpenLayers.Projection("EPSG:900913") // to Spherical Mercator Projection
), 15 // Zoom level
);
var markers = new OpenLayers.Layer.Markers( "Markers" );
map.addLayer(markers);
}
</script>
</head>
<body onload="init();">
<center>
HTML5 geolocation:
<br>
<div id="basicMap"></div>
<br>HTML5 geolocation<br>
<br>with Openstreetmap and OpenLayers<br>
For Android Froyo,iPhone,iPAD,iPod
<br>
Your position estimated by browser geolocation API:<p>
<div id="anzeige">(will be displayed here)<p></div>
Andreas Bischoff
<br>(view source to see how it works
</center>
</body>
</html>
You can see a live example here: http://www.pediaphon.org/~bischoff/location_based_services/
Next step, I need draw a scretch line which displays the rute followed. Here is a live example of drawing lines (pressing shift + clicking on mouse): http://openlayers.org/dev/examples/draw-feature.html
But I'm new and I'm lost in how call api of openlayers in order to draw line from origin to destination...any help is welcome
Best regards.
EDIT:
I just copied this peace of code ( Drawing a path with a line in OpenLayers using JavaScript ) just before the tag, but I don't see the line drawn:
var lineLayer = new OpenLayers.Layer.Vector("Line Layer");
map.addLayer(lineLayer);
map.addControl(new OpenLayers.Control.DrawFeature(lineLayer, OpenLayers.Handler.Path));
var points = new Array(
/*I put these coords of my city*/
new OpenLayers.Geometry.Point(-3.7904085, 37.76225609999999 ),
new OpenLayers.Geometry.Point(-4.7904085, 39.76225609999999 )
);
var line = new OpenLayers.Geometry.LineString(points);
var style = {
strokeColor: '#0000ff',
strokeOpacity: 0.5,
strokeWidth: 5
};
var lineFeature = new OpenLayers.Feature.Vector(line, null, style);
lineLayer.addFeatures([lineFeature]);

Resources