We have an app, created with React Native, where the user can take a picture and save it to his account. So we are sending the photo to our server. The problem is, that this takes really much time (about 20 to 30 seconds) on iOS. With the Android-Build it is much faster (about 2 seconds).
We have tried to reduce the quality of the pictures, but that has also not a big effect.
takePicture = async function(camera) {
const options = {
quality: 0.5,
fixOrientation: true,
forceUpOrientation: true
};
const data = await camera.takePictureAsync(options);
this.props.onCapture(data);
};
We would like to achieve the same uploading-time like on Android. Can someone help?
I've written the following function. After taking image it returns the original and resized image which takes around 500KB on iOS.
It uses ImagePicker package.
const pickImage = async (index) => {
const { status: cameraPerm } = await Permissions.askAsync(Permissions.CAMERA);
const { status: cameraRollPerm } = await Permissions.askAsync(Permissions.CAMERA_ROLL);
import * as ImagePicker from 'expo-image-picker'
if (cameraPerm === "granted" && cameraRollPerm === "granted") {
let pickerResult;
if (index == 0 || index == undefined) {
pickerResult = await ImagePicker.launchCameraAsync({ allowsEditing: false, aspect: [4, 3], quality: 1 });
}
else if (index == 1) {
pickerResult = await ImagePicker.launchImageLibraryAsync({ allowsEditing: false, aspect: [4, 3], quality: 1 });
}
if (!pickerResult.cancelled) {
let resizedImage = await ImageManipulator.manipulateAsync(
pickerResult.uri, [{ resize: { width: 1200 } }],
{ compress: 1, format: "jpg", base64: false });
return [resizedImage.uri, pickerResult.uri];
} else {
return
}
} else {
alert(Messages.userManagement.cameraPermissions);
return
}
Then you can call above method like this.
let [resizedImage, originalImage] = await pickImage();
Related
I have been following this tutorial and it succesfully renders video on to a cube using webGL.
Is it possible for instead of using a video I would like to use a live feed from a webcam without using frameworks like Three.js?
The code below reads from the webcam into HTMLVideoElement how to convert it into texture so I can map it to my vertices in raw WebGL?
function setupVideo(url) {
const video = document.createElement('video');
var playing = false;
var timeupdate = false;
video.autoplay = true;
video.muted = true;
video.loop = true;
// Waiting for these 2 events ensures
// there is data in the video
video.addEventListener('playing', function() {
playing = true;
checkReady();
}, true);
video.addEventListener('timeupdate', function() {
timeupdate = true;
checkReady();
}, true);
navigator.getUserMedia = ( navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia);
var hasUserMedia = navigator.getUserMedia ? true : false;
console.log(hasUserMedia);
// Prefer camera resolution nearest to 1280x720.
var constraints = { audio: true, video: { width: 640, height: 360 } };
navigator.mediaDevices.getUserMedia(constraints)
.then(function(mediaStream) {
video.srcObject = mediaStream;
video.onloadedmetadata = function(e) {
};
})
.catch(function(err) { console.log(err.name + ": " + err.message); }); // always check for errors at the end.
video.play();
function checkReady() {
if (playing && timeupdate) {
copyVideo = true;
}
}
return video;
}
I am going to rotate the image in react–native and I would like to get base64 of rotated image.
I used several libraries
react-native-image-rotate: It's working well on Android but on iOS I get rct-image-store://1 as url so I tried getting base64 using rn-fetch-blob but it throws error that can't recognize that url.
react-native-image-resizer: I used this but the response is not good in iOS. If I set -90 then rotate -180, if I set -180 then it's rotating as -270.
Please help me on this problem, how can I rotate the image in iOS.
I need to rotate the image as -90, -180, -270, -360(original).
Finally, I found answer.
import ImageRotate from 'react-native-image-rotate';
import ImageResizer from 'react-native-image-resizer';
import RNFetchBlob from 'rn-fetch-blob';
ImageRotate.rotateImage(
this.state.image.uri,
rotateDegree,
uri => {
if (Platform.OS === 'android') {
console.log('rotate', uri);
RNFetchBlob.fs.readFile(uri, 'base64').then(data => {
const object = {};
object.base64 = data;
object.width = this.state.image.height;
object.height = this.state.image.width;
object.uri = uri;
this.setState({image: object, spinner: false});
});
} else {
console.log(uri);
const outputPath = `${RNFetchBlob.fs.dirs.DocumentDir}`;
ImageResizer.createResizedImage(
uri,
this.state.image.height,
this.state.image.width,
'JPEG',
100,
0,
outputPath,
).then(response => {
console.log(response.uri, response.size);
let imageUri = response.uri;
if (Platform.OS === 'ios') {
imageUri = imageUri.split('file://')[1];
}
RNFetchBlob.fs.readFile(imageUri, 'base64').then(resData => {
const object = {};
object.base64 = resData;
object.width = this.state.image.height;
object.height = this.state.image.width;
object.uri = response.uri;
this.setState({image: object, spinner: false});
});
});
}
},
error => {
console.error(error);
},
);
}
This is my work well code up to now
rotateImage = (angle) => {
const { currentImage } = this.state; // origin Image, you can pass it from params,... as you wish
ImageRotate.rotateImage( // using 'react-native-image-rotate'
currentImage.uri,
angle,
(rotatedUri) => {
if (Platform.OS === 'ios') {
ImageStore.getBase64ForTag( // import from react-native
rotatedUri,
(base64Image) => {
const imagePath = `${RNFS.DocumentDirectoryPath}/${new Date().getTime()}.jpg`;
RNFS.writeFile(imagePath, `${base64Image}`, 'base64') // using 'react-native-fs'
.then(() => {
// now your file path is imagePath (which is a real path)
if (success) {
this.updateCurrentImage(imagePath, currentImage.height, currentImage.width);
ImageStore.removeImageForTag(rotatedUri);
}
})
.catch(() => {});
},
() => {},
);
} else {
this.updateCurrentImage(rotatedUri, currentImage.height, currentImage.width);
}
},
(error) => {
console.error(error);
},
);
};
I think you have done rotatedUri.
Then you can get base64 by ImageStore from react-native.
Then write it to the local image with react-native-fs
After that you have imagePath is the local image.
Try to use Expo Image Manipulator
https://docs.expo.io/versions/latest/sdk/imagemanipulator/
const rotated = await ImageManipulator.manipulateAsync(
image.uri,
[{ rotate: -90 }],
{ base64: true }
);
I have DropZone to upload files.
I have set parallelUploads to 10. I am dropping 125 files and I am getting just one request to server and there are 20 files in one request. I have also set parallelUploads to 9999 but again just 20 files I receive on server.
Here is the code:
Dropzone.autoDiscover = false;
//maxFilesize: 20 is 20 MB
var myDropzone = new Dropzone("#myDropzoneForm", {
addRemoveLinks: true,
method: 'post',
uploadMultiple: true,
parallelUploads: 10, // Also set to 9999 but not working
maxFileSize: 4,
autoProcessQueue: false
});
myDropzone.on('addedfiles', function () {
setTimeout(function () {
sendFiles();
}, 1);
});
myDropzone.on('addedfile', function (file) {
var iFilesLength = myDropzone.files.length;
if (iFilesLength > 0) {
var bFileFound = false;
for (var iiFileLength = iFilesLength - 1; iiFileLength >= 0; iiFileLength--) {
if (file.name == myDropzone.files[iiFileLength].name) {
if (bFileFound)
myDropzone.removeFile(myDropzone.files[iiFileLength]);
else bFileFound = true;
}
}
}
});
function sendFiles() {
if (myDropzone.files.length > 0) {
myDropzone.processFiles(myDropzone.files);
}
}
myDropzone.on('successmultiple', function (file, responseText) {
$("#myResponse").html(responseText);
});
It was an issue of php.ini. As we know there are default configuration settings set in php.ini. In this file default value is max_file_uploads = 20 so change this to max_file_uploads = 99999.
Refresh your server and see it will work.
And problem solved.
I have an electron app that has very simple desktop capturing functionality:
const {desktopCapturer} = require('electron')
const fs = require('fs');
var recorder;
var chunks = [];
var WINDOW_TITLE = "App Title";
function startRecording() {
desktopCapturer.getSources({ types: ['window', 'screen'] }, function(error, sources) {
if (error) throw error;
for (let i = 0; i < sources.length; i++) {
let src = sources[i];
if (src.name === WINDOW_TITLE) {
navigator.webkitGetUserMedia({
audio: false,
video: {
mandatory: {
chromeMediaSource: 'desktop',
chromeMediaSourceId: src.id,
minWidth: 800,
maxWidth: 1280,
minHeight: 600,
maxHeight: 720
}
}
}, handleStream, handleUserMediaError);
return;
}
}
});
}
function handleStream(stream) {
recorder = new MediaRecorder(stream);
chunks = [];
recorder.ondataavailable = function(event) {
chunks.push(event.data);
};
recorder.start();
}
function stopRecording() {
recorder.stop();
toArrayBuffer(new Blob(chunks, {type: 'video/webm'}), function(ab) {
var buffer = toBuffer(ab);
var file = `./test.webm`;
fs.writeFile(file, buffer, function(err) {
if (err) {
console.error('Failed to save video ' + err);
} else {
console.log('Saved video: ' + file);
}
});
});
}
function handleUserMediaError(e) {
console.error('handleUserMediaError', e);
}
function toArrayBuffer(blob, cb) {
let fileReader = new FileReader();
fileReader.onload = function() {
let arrayBuffer = this.result;
cb(arrayBuffer);
};
fileReader.readAsArrayBuffer(blob);
}
function toBuffer(ab) {
let buffer = new Buffer(ab.byteLength);
let arr = new Uint8Array(ab);
for (let i = 0; i < arr.byteLength; i++) {
buffer[i] = arr[i];
}
return buffer;
}
// Record for 3.5 seconds and save to disk
startRecording();
setTimeout(function() { stopRecording() }, 3500);
I know that to save the MediaRecorder blob sources, I need to read it into an ArrayBuffer, then copy that into a normal Buffer for the file to be saved.
However, where this seems to be failing for me is combining the chunk of blobs into blobs. When the chunks are added into a single Blob - it's like they just disappear. The new Blob is empty, and every other data structure they are copied into afterwards also is completely empty.
Before creating the Blob, I know I have valid Blob's in the chunks array.
Here's what the debug info of chunks is, before executing the new Blob(chunks, {.. part.
console.log(chunks)
Then here's the debug info of the new Blob(chunks, {type: 'video/webm'}) object.
console.log(ab)
I'm completely stumped. All the reference tutorials or other SO answers I can find basically follow this flow. What am I missing?
Electron version: 1.6.2
That's not possible to be working. You didn't wait for value to come in stopReocoring. You need to change your stopRecording function to following:
function stopRecording() {
var save = function() {
console.log(blobs);
toArrayBuffer(new Blob(blobs, {type: 'video/webm'}), function(ab) {
console.log(ab);
var buffer = toBuffer(ab);
var file = `./videos/example.webm`;
fs.writeFile(file, buffer, function(err) {
if (err) {
console.error('Failed to save video ' + err);
} else {
console.log('Saved video: ' + file);
}
});
});
};
recorder.onstop = save;
recorder.stop();
}
This problem literally just fixed itself today without me changing anything. I'm not sure what about my system changed (other than a reboot) but it's now working exactly as it should.
I'm using Appcelerator to provide a geo track. This works very well on Android, but on the iOS platform I am not getting speed or heading information from the onLocation event.
I initialise my GPS:
permissions.requestLocationPermissions(Ti.Geolocation.AUTHORIZATION_ALWAYS, function (e) {
if (e.success && !configuredMonitoring) {
if (OS_IOS) {
Ti.Geolocation.accuracy = Ti.Geolocation.ACCURACY_BEST;
Ti.Geolocation.distanceFilter = 5;
Ti.Geolocation.preferredProvider = Ti.Geolocation.PROVIDER_GPS;
Ti.Geolocation.pauseLocationUpdateAutomatically = true;
Ti.Geolocation.activityType = Ti.Geolocation.ACTIVITYTYPE_OTHER_NAVIGATION;
}
if (OS_ANDROID) {
Ti.Geolocation.Android.addLocationProvider(Ti.Geolocation.Android.createLocationProvider({
name: Ti.Geolocation.PROVIDER_GPS,
minUpdateDistance: 10,
minUpdateTime: 1
}));
Ti.Geolocation.Android.addLocationRule(Ti.Geolocation.Android.createLocationRule({
provider: Ti.Geolocation.PROVIDER_GPS,
accuracy: 10,
maxAge: 5000,
minAge: 1000
}));
Ti.Geolocation.Android.addLocationProvider(Ti.Geolocation.Android.createLocationProvider({
name: Ti.Geolocation.PROVIDER_NETWORK,
minUpdateDistance: 10,
minUpdateTime: 1
}));
Ti.Geolocation.Android.addLocationRule(Ti.Geolocation.Android.createLocationRule({
provider: Ti.Geolocation.PROVIDER_NETWORK,
accuracy: 10,
maxAge: 5000,
minAge: 1000
}));
Ti.Geolocation.Android.manualMode = true;
}
}
Then I add eventlisteners for iOS location updates being paused
if (Ti.Geolocation.locationServicesEnabled){
Ti.Geolocation.addEventListener('location', onLocation);
if (OS_IOS) {
Ti.Geolocation.addEventListener('locationupdatepaused', onLocationupdate);
Ti.Geolocation.addEventListener('locationupdateresumed', onLocationupdate);
}
}
My onLocation function runs
function onLocation(e) {
if (!e.error) {
var coords = e.coords;
console.log(JSON.stringify(e));
} else {
console.log('Error!':+JSON.stringify(e))
}
}
in my return data I see
{
"success":true,
"coords:{
"timestamp":1488757841662,
"speed":-1,
"longitude":173.2793426513672,
"floor":{"level":0},
"latitude":-41.274879455566406,
"accuracy":65,
"heading":-1,
"altitude":3.9126133918762207,
"altitudeAccuracy":10
},
"code":0,
"bubbles":true,
"type":"location",
"source":{
"preferredProvider":null
},
"cancelBubble":false
}
I never see anything other than -1 in my speed or heading, which according to the documentation: "On iOS, a negative value indicates that the heading data is not valid."
What am I doing wrong here?
We had this same issue and were pulling our hair out trying to figure it out. Try changing Ti.Geolocation.ACCURACY_BEST to Ti.Geolocation.ACCURACY_BEST_FOR_NAVIGATION. This fixed it for us.