tensorflow.js model does not learn - machine-learning

My model doesn´t learn.. It is supposed to do a softmax calculation in the end. I want as a result a classification (quit or no-quit). The model should predict if the customer will quit. I am giving the quit-column as label and have 196 input-features.
My visor says there is no learning at all. But then I am not certain, how the visor will get information, if my model learns. I am very new to javascript and would appreciate any help.
ngOnInit() {
this.train();
}
async train(): Promise<any> {
const csvUrl = '/assets/little.csv';
const csvDataset = tf.data.csv(
csvUrl,
{
columnConfigs: {
quit: {
isLabel: true
}
},
delimiter:','
});
const numOfFeatures = (await csvDataset.columnNames()).length -1;
console.log(numOfFeatures);
const flattenedDataset =
csvDataset
.map(({xs, ys}: any) =>
{
// Convert xs(features) and ys(labels) from object form (keyed by
// column name) to array form.
return {xs:Object.values(xs), ys:Object.values(ys)};
}).batch(10);
console.log(flattenedDataset.toArray());
const model = tf.sequential({
layers: [
tf.layers.dense({inputShape: [196], units: 100, activation: 'relu'}),
tf.layers.dense({units: 100, activation: 'relu'}),
tf.layers.dense({units: 100, activation: 'relu'}),
tf.layers.dense({units: 1, activation: 'softmax'}),
]
});
await trainModel(model, flattenedDataset);
const surface = { name: 'Model Summary', tab: 'Model Inspection'};
tfvis.show.modelSummary(surface, model);
console.log('Done Training');
}
async function trainModel(model, flattenedDataset) {
// Prepare the model for training.
model.compile({
optimizer: tf.train.adam(),
loss: tf.losses.sigmoidCrossEntropy,
metrics: ['accuracy']
});
const batchSize = 32;
const epochs = 50;
return await model.fitDataset(flattenedDataset, {
batchSize,
epochs,
shuffle: true,
callbacks: tfvis.show.fitCallbacks(
{ name: 'Training Performance' },
['loss'],
{ height: 200, callbacks: ['onEpochEnd'] }
)
});
}

The number of units for the last layer is the number of categories. There are two categories quit and no-quit. Additionnaly your labels should be one-hot encoded. More general answers on why a model is not learning can be found here.

Related

How to patch values in reactive form array in Angular, while editing input?

How to patch values for multiple inputs in formArray, while editing inputs values that are coming from DB? By the there are dynamic inputs implemented.
--THIS Is my formGroup with formArray--
` this.accountsForm = new FormGroup({
'type_name': new FormControl('', Validators.required),
'state': new FormControl(true, Validators.required),
'masks': this.MaskArray,
}); `
--This is FormArray--
`onInit() {
this.MaskArray = new FormArray([]);
this.MaskArray.push(
new FormGroup({
mask: new FormControl()
})
);
`
---This dynamic inputs creation
`createItem() {
(<FormArray>this.accountsForm.get('masks')).push(
new FormGroup({
mask: new FormControl()
})
);
}
`
---Here a problem appears. I tried to get values from DB and pass to inputs, it worked only for inputs with one value in array that coming from data.rows.
How to pass multiple values from data.row to mask controller of formArray--
`this.subs.add(this.sql.query(`Select t.Mask From Mid_s_Account_Type_Masks t Where Type_Id=${this.type_id} `).subscribe(data => {
this.maskObj = data.rows;
console.log(this.maskObj);
for(var i = 0; i < this.maskObj.length ; i++){
var j = 0;
for(let row of this.maskObj){
(<FormArray>this.accountsForm.controls['masks']).at(j).patchValue({
mask: row.mask,
});
j++;
console.log(row.mask);
}
console.log(j);
}
}));`
I have patched formArray value in my application through patchModelValue method, this can be achieved with the help of #rxweb/reactive-form-validators package.
patchModelValue will update the value of the FormControl inside the FormGroup based on provided server JSON object or model object. For these you need to import RxFormBuilder in your component
export class AccountComponent implements OnInit {
accountsForm : RxFormGroup
constructor(
private formBuilder: RxFormBuilder ) { }
ngOnInit() {
this.accountsForm = <RxFormGroup>this.formBuilder.group({
masks:[
{
mask:''
}]
});
}
getFormArray(){
let formarray = this.accountsForm .controls.masks as FormArray;
return formarray.controls;
}
patchvalue()
{
this.accountsForm.patchModelValue( {
masks: [{ mask: "xyz" }] });
}
}
Please refer this example: stackblitz

How to upload images faster on iOS, when using takePictureAsync?

We have an app, created with React Native, where the user can take a picture and save it to his account. So we are sending the photo to our server. The problem is, that this takes really much time (about 20 to 30 seconds) on iOS. With the Android-Build it is much faster (about 2 seconds).
We have tried to reduce the quality of the pictures, but that has also not a big effect.
takePicture = async function(camera) {
const options = {
quality: 0.5,
fixOrientation: true,
forceUpOrientation: true
};
const data = await camera.takePictureAsync(options);
this.props.onCapture(data);
};
We would like to achieve the same uploading-time like on Android. Can someone help?
I've written the following function. After taking image it returns the original and resized image which takes around 500KB on iOS.
It uses ImagePicker package.
const pickImage = async (index) => {
const { status: cameraPerm } = await Permissions.askAsync(Permissions.CAMERA);
const { status: cameraRollPerm } = await Permissions.askAsync(Permissions.CAMERA_ROLL);
import * as ImagePicker from 'expo-image-picker'
if (cameraPerm === "granted" && cameraRollPerm === "granted") {
let pickerResult;
if (index == 0 || index == undefined) {
pickerResult = await ImagePicker.launchCameraAsync({ allowsEditing: false, aspect: [4, 3], quality: 1 });
}
else if (index == 1) {
pickerResult = await ImagePicker.launchImageLibraryAsync({ allowsEditing: false, aspect: [4, 3], quality: 1 });
}
if (!pickerResult.cancelled) {
let resizedImage = await ImageManipulator.manipulateAsync(
pickerResult.uri, [{ resize: { width: 1200 } }],
{ compress: 1, format: "jpg", base64: false });
return [resizedImage.uri, pickerResult.uri];
} else {
return
}
} else {
alert(Messages.userManagement.cameraPermissions);
return
}
Then you can call above method like this.
let [resizedImage, originalImage] = await pickImage();

ol3 ext-ol how can make cluster for different layers

I'm using ol3/ol4 with ol-ext
I create two layer:
clusterSource = new ol.source.Cluster({
distance: distanceFt,
source: new ol.source.Vector()
});
// Animated cluster layer
clusterLayer = new ol.layer.AnimatedCluster({
name: 'Cluster',
source: clusterSource,
animationDuration: 700, //$("#animatecluster").prop('checked') ? 700 : 0,
// Cluster style
style: getStyle
});
layersArray.push(clusterLayer); // adding to array
sourceReclamos_Eventos = new ol.source.Cluster({
distance: distanceFt,
source: new ol.source.Vector()
});
capaReclamos_Eventos = new ol.layer.AnimatedCluster({
name: "Reclamos_Eventos",
source: sourceReclamos_Eventos,
animationDuration: 700,
style: getStyle
});
layersArray.push(capaReclamos_Eventos);
Later, add that layers in:
selectCluster = new ol.interaction.SelectCluster({
layers: arraySelectCLuster,
// Point radius: to calculate distance between the features
pointRadius: 20,
animate: true, //$("#animatesel").prop('checked'),
// Feature style when it springs apart
featureStyle: featureStyle,
selectCluster: false, // disable cluster selection
});
After load the layers, only persist the Features in the first layer, in the second layer the Features is removed (clear) after zoom changing... why?
please, help
EDIT
I'm adding features using clusterLayer.getSource().addFeatures() and capaReclamos_Eventos.getSource().addFeatures().
function addFeatures_Reclamos_Eventos(ffs, centrar) {
var transform = ol.proj.getTransform('EPSG:4326', 'EPSG:3857');
var features = [];
for (var i = 0; i < ffs.length; i++) {
features[i] = new ol.Feature();
features[i].setProperties(ffs[i]);
var geometry = new ol.geom.Point(transform([parseFloat(ffs[i].lon), parseFloat(ffs[i].lat)]));
features[i].setGeometry(geometry);
}
qweFeature = features;
capaReclamos_Eventos.getSource().addFeatures(features);
removeloading('mapLoading');
if (document.getElementById('botonFiltrar')) {
document.getElementById('botonFiltrar').disabled = false;
}
if (centrar) {
window.setTimeout(function () {
var extent = capaReclamos_Eventos.getSource().getExtent();
map.getView().fit(extent, map.getSize());
}, 700);// 1/2 seg
}
}

Getting the class label using DynamicTimeWarping using Accord.NET

I'm developing a project in which I need to do gesture recognition.
After searching for a way to do this I came across dynamic time warping.
To try this idea, and since my project is in C#, I decided to try Accord.NET.
Before I tried this out in my project, I created a clean project and modified the example on Accord.NET's documentation, that can be found here:
http://accord-framework.net/docs/html/T_Accord_Statistics_Kernels_DynamicTimeWarping.htm
So, what I'm trying right now, is to train my algorithm with a set of learning data (composed of the gestures swipe right, swipe left and double tap) and then, use the same examples in the learning data to see if the algorithm is identifying the correct gesture. The values are just an exemple, not the real deal.
The documentation is not very clear on how to do this, since the Decide method used on the example only returns a boolean.
I've searched the documentation for a way to identify the correct class but to no avail.
The only thing I've found was the following line, but what it returns is an int value of 0 or 1, instead of a boolean:
var res1 = ((IClassifier)svm.ToMulticlass()).Decide(sequences[0]);
Can anyone point me in the right direction on how to correctly identify the gesture?
This is my first attempt at machine learning and Accord.NET, so this is all absolutelly new to me.
The example code can be found bellow.
namespace DynamicTimeWarpingExample
{
public class Program
{
public static void Main(string[] args)
{
double[][][] sequences =
{
new double[][] // Swipe left
{
new double[] { 1, 1, 1 },
new double[] { 1, 2, 1 },
new double[] { 1, 2, 2 },
new double[] { 2, 2, 2 },
},
new double[][] // Swipe right
{
new double[] { 1, 10, 6 },
new double[] { 1, 5, 6 },
new double[] { 6, 7, 1 },
},
new double[][] // Double tap
{
new double[] { 8, 2, 5 },
new double[] { 1, 50, 4 },
}
};
int[] outputs =
{
0, // Swipe left
1, // Swipe right
2 // Double tap
};
var smo = new SequentialMinimalOptimization<DynamicTimeWarping, double[][]>()
{
Complexity = 1.5,
Kernel = new DynamicTimeWarping(alpha: 1, degree: 1)
};
var svm = smo.Learn(sequences, outputs);
bool[] predicted = svm.Decide(sequences);
double error = new ZeroOneLoss(outputs).Loss(predicted); // error will be 0.0
var res1 = ((IClassifier<double[][], int>)svm.ToMulticlass()).Decide(sequences[0]); // returns 0
var res2 = ((IClassifier<double[][], int>)svm.ToMulticlass()).Decide(sequences[1]); // returns 1
var res3 = ((IClassifier<double[][], int>)svm.ToMulticlass()).Decide(sequences[2]); // returns 1
}
}
}
***************** New Version *****************
public static void Main(string[] args)
{
double[][][] sequences =
{
new double[][] // Swipe left
{
new double[] { 1, 1, 1 },
new double[] { 1, 2, 1 },
new double[] { 1, 2, 2 },
new double[] { 2, 2, 2 },
},
new double[][] // Swipe right
{
new double[] { 1, 10, 6 },
new double[] { 1, 5, 6 },
new double[] { 6, 7, 1 },
},
new double[][] // Double tap
{
new double[] { 8, 2, 5 },
new double[] { 1, 50, 4 },
}
};
int[] outputs =
{
0, // Swipe left
1, // Swipe right
2 // Double tap
};
var teacher = new MulticlassSupportVectorLearning<DynamicTimeWarping, double[][]>()
{
// Configure the learning algorithm to use SMO to train the
// underlying SVMs in each of the binary class subproblems.
Learner = (param) => new SequentialMinimalOptimization<DynamicTimeWarping, double[][]>
{
Complexity = 1.5,
Kernel = new DynamicTimeWarping(alpha: 1, degree: 1),
//UseKernelEstimation = true
}
};
// Learn a machine
var machine = teacher.Learn(sequences, outputs);
// Create the multi-class learning algorithm for the machine
var calibration = new MulticlassSupportVectorLearning<DynamicTimeWarping, double[][]>()
{
Model = machine, // We will start with an existing machine
// Configure the learning algorithm to use Platt's calibration
Learner = (param) => new ProbabilisticOutputCalibration<DynamicTimeWarping, double[][]>()
{
Model = param.Model // Start with an existing machine
}
};
// Configure parallel execution options
calibration.ParallelOptions.MaxDegreeOfParallelism = 1;
// Learn a machine
calibration.Learn(sequences, outputs);
double decision1, decision2, decision3, decision4, decision5, decision6;
var res1 = machine.Probability(sequences[0], out decision1); // decision 0 - Probability 0.78698604216159851 - Score 1
var res2 = machine.Probability(sequences[1], out decision2); // decision 1 - Probability 0.67246889837875257 - Score 1
var res3 = machine.Probability(sequences[2], out decision3); // decision 2 - Probability 0.78698604216159851 - Score 1
var newGesture1 = new double[][]
{
new double[] { 1, 1, 1 },
new double[] { 1, 2, 1 },
new double[] { 1, 2, 2 },
new double[] { 2, 2, 2 },
};
var newGesture2 = new double[][]
{
new double[] { 1, 10, 6 },
new double[] { 1, 5, 6 },
new double[] { 6, 7, 1 },
};
var newGesture3 = new double[][]
{
new double[] { 8, 2, 5 },
new double[] { 1, 50, 4 },
};
var res5 = machine.Score(newGesture1, out decision5); // decision 0 - Probability 0.35577588944247057 - Score 0.051251948605637254
var res6 = machine.Score(newGesture2, out decision6); // decision 1 - Probability 0.40733908994050544 - Score 0.19912250476931792
var res4 = machine.Score(newGesture3, out decision4); // decision 2 - Probability 0.71853321355842836 - Score 0.816934034911964
}
The problem is that you are creating a binary classifier for a problem that actually involves multiple classes.
In your case, instead of doing:
var smo = new SequentialMinimalOptimization<DynamicTimeWarping, double[][]>()
{
Complexity = 1.5,
Kernel = new DynamicTimeWarping(alpha: 1, degree: 1)
};
var svm = smo.Learn(sequences, outputs);
You would want to wrap this binary learning problem into a multi-class learning using
// Create the multi-class learning algorithm for the machine
var teacher = new MulticlassSupportVectorLearning<DynamicTimeWarping, double[][]>()
{
// Configure the learning algorithm to use SMO to train the
// underlying SVMs in each of the binary class subproblems.
Learner = (param) => new SequentialMinimalOptimization<DynamicTimeWarping, double[][]>
{
Complexity = 1.5,
Kernel = new DynamicTimeWarping(alpha: 1, degree: 1)
};
}
// Learn a machine
var svm = teacher.Learn(inputs, outputs);

Openlayers 3: Drawing grid lines (graticule) with predefined units on the custom static image

I am trying to draw custom x-y axes grid lines on top of a static image, i.e. image pixels rather than lattitude and longitudes. Ideally, the grid lines should be redrawn dynamically when I drag/zoom/scroll the image, just like the x-y ruler bars in Photoshop.
I came across the following code example, which provides a custom projection function to directly map image pixel coordinates to map coordinates.
http://openlayers.org/en/latest/examples/static-image.html
// Map views always need a projection. Here we just want to map image
// coordinates directly to map coordinates, so we create a projection that uses
// the image extent in pixels.
var extent = [0, 0, 1024, 968];
var projection = new ol.proj.Projection({
code: 'xkcd-image',
units: 'pixels',
extent: extent
});
I tried to append the following code to the script. However, the ol.Graticule class seems to be incompatible with the custom ol.proj.Projection definition.
http://openlayers.org/en/latest/examples/graticule.html
// Create the graticule component
var graticule = new ol.Graticule({
// the style to use for the lines, optional.
strokeStyle: new ol.style.Stroke({
color: 'rgba(255,120,0,0.9)',
width: 2,
lineDash: [0.5, 4]
})
});
graticule.setMap(map);
What's wrong with the above code?
P.S. I am aware of the Openseadragon API which provides a dynamic scalebar. However, I wish to stick to Openlayers API because I also have an extra map layer of anchor points at predefined locations on the static image.
I had the same problem. For this to work I created a Vector Layer, (where axis are drawn).
To draw the axis, I need to listen to View changes.
Whenever the view changes, calculate the actual extent for the view.
With extent information and ([width, height] of the image, you can then draw axis)
let listenerAxis = null,
w = 0,
h = 0
const xAxisStyle = new ol.style.Style({
stroke: new ol.style.Stroke({
color: 'red',
width: 2
})
})
const yAxisStyle = new ol.style.Style({
stroke: new ol.style.Stroke({
color: 'green',
width: 2
})
})
const ImageLayer = new ol.layer.Image()
const AxisLayer = new ol.layer.Vector({ source: new ol.source.Vector() })
AxisLayer.setStyle((feature, resolution) => {
if(feature.getProperties().axis == 'x') {
return xAxisStyle
}
return yAxisStyle
})
const renderer = new ol.Map({
target: 'map',
layers: [ImageLayer]
})
AxisLayer.setMap(renderer)
processFile('https://i2.wp.com/beebom.com/wp-content/uploads/2016/01/Reverse-Image-Search-Engines-Apps-And-Its-Uses-2016.jpg?resize=640%2C426')
function removeAxis() {
AxisLayer.getSource().clear()
ol.Observable.unByKey(listenerAxis)
listenerAxis = null
}
function drawAxis() {
function draw(){
AxisLayer.getSource().clear()
const extent = renderer.getView().calculateExtent()
const [xmin, ymin, xmax, ymax] = extent
// Eje X
const axisX = new ol.geom.LineString([ [xmin, h / 2], [xmax, h / 2] ])
const axisY = new ol.geom.LineString([ [w / 2, ymin], [w / 2, ymax] ])
const featureX = new ol.Feature({ geometry: axisX, axis: 'x' })
const featureY = new ol.Feature({ geometry: axisY, axis: 'y' })
AxisLayer.getSource().addFeatures([featureX, featureY])
}
listenerAxis = renderer.getView().on('change', draw)
draw()
}
async function processFile(path) {
ImageLayer.setSource()
removeAxis()
if(!path) {
return
}
const [wi, hi] = await readImage(path)
w = wi
h = hi
const source = getImageStatic(path, w, h)
const view = getViewForImage(w, h)
ImageLayer.setSource(source)
renderer.setView(view)
drawAxis()
}
// Some helpers
function readImage(localPath) {
const img = document.createElement('img')
return new Promise((res, rej) => {
img.src = localPath
img.addEventListener('load', (event) => {
const { naturalWidth, naturalHeight } = img
console.log('img', naturalWidth, naturalHeight)
res([naturalWidth, naturalHeight])
})
})
}
function getViewForImage(w, h) {
return new ol.View({
center: [w / 2, h / 2],
zoom: 2,
projection: new ol.proj.Projection({
extent: [0, 0, w, h],
units: 'pixels'
}),
extent: [0, 0, w, h]
})
}
function getImageStatic(path, w, h) {
return new ol.source.ImageStatic({
url: path,
imageExtent: [0, 0, w, h]
})
}
#map {
width: 100%;
height: 100%;
background: grey;
}
<link href="https://openlayers.org/en/v4.6.5/css/ol.css" rel="stylesheet"/>
<script src="https://openlayers.org/en/v4.6.5/build/ol.js"></script>
<div id="map"></div>

Resources