I have a little problem with gradient in sankey diagram.
My sankey diagram look like this
sankey diagram
I don't know how to set gradient to flow from first node to second node, etc. I'd like to create like this
sankey diagram
At this time I have done something like this
JSFiddle but this isn't what i want to.
My JS code include only sankeyChart class look like this
function sankeyChart() {
var sankey = Highcharts.chart('container', {
title: {
text: 'Sankey Diagram'
},
series: [{
keys: ['from', 'to', 'weight'],
data: [],
type: 'sankey',
name: ''
}],
plotOptions: {
series: {
colorByPoint: true
}
},
credits: {
enabled: false
}
});
function setData(data) {
sankey.series[0].update({
data: data,
}, true);
}
return {
'chart': sankey,
'setData': setData
}
}
I'll be grateful for any idea.
You can overwrite pointAttribs method and set the gradient in the way you want:
var H = Highcharts;
H.seriesTypes.sankey.prototype.pointAttribs = function(point, state) {
var opacity = this.options.linkOpacity,
color = point.color;
if (state) {
opacity = this.options.states[state].linkOpacity || opacity;
color = this.options.states[state].color || point.color;
}
return {
fill: point.isNode ?
color : {
linearGradient: {
x1: 0,
x2: 1,
y1: 0,
y2: 0
},
stops: [
[0, H.color(color).setOpacity(opacity).get()],
[1, H.color(point.toNode.color).setOpacity(opacity).get()]
]
}
};
}
Live demo: https://jsfiddle.net/BlackLabel/w3qgu497/
Docs: https://www.highcharts.com/docs/extending-highcharts/extending-highcharts
I'm working on some project about image tagging and I'm new to caffe. I have a caffe code from one of the papers that i read about image tagging and the data set that is used for it, is CIFAR100.
According to the code and layers structure, I have to feed image features in LMDB format and labels in HDF5 format into the code separately. I converted the features into the lmdb format and labels into the hdf5 format but yet i can't run the code. I think it should be some pre-processing on the labels before feed them into the network but i couldn't understand how! I wrote an email to paper's Authors but unfortunately they didn't respond yet.
as the paper author told me, the data-set should be in a hierarchical structure so I'm trying to use CIFAR100 data-set with two-level hierarchical structure: every image has to labels, a coarse label and a fine label.
I put the code here, so if anybody could run it with the CIFAR100 data-set, please teach me how.
this is proto.txt file:
name: "res_to_lstm"
layer {
name: "image_feature"
type: "Data"
top: "global_pool"
include { phase: TRAIN }
data_param {
source: "./examples/coarse-to-fine/Imagenet/ResNet/feature/global_pool_train"
batch_size: 100
backend: LMDB
}
}
layer {
name: "data"
type: "HDF5Data"
top: "cont_sentence"
top: "input_sentence"
top: "target_sentence"
include { phase: TRAIN }
hdf5_data_param {
source: "/home/destiny/Datasets/Imagenet/train_h5_caffe/hdf5_chunk_list_shuffle.txt"
batch_size: 10
}
}
layer {
name: "image_feature"
type: "Data"
top: "global_pool"
include { phase: TEST }
data_param {
source: "./examples/coarse-to-fine/Imagenet/ResNet/feature/global_pool_val"
batch_size: 100
backend: LMDB
}
}
layer {
name: "data"
type: "HDF5Data"
top: "cont_sentence"
top: "input_sentence"
top: "target_sentence"
include { phase: TEST }
hdf5_data_param {
source: "/home/destiny/Datasets/Imagenet/val_h5_caffe/hdf5_chunk_list.txt"
batch_size: 10
}
}
layer {
name: "embedding"
type: "Embed"
bottom: "input_sentence"
top: "embedded_input_sentence"
param {
lr_mult: 1
}
embed_param {
bias_term: false
input_dim: 1861
num_output: 1000
weight_filler {
type: "uniform"
min: -0.08
max: 0.08
}
}
}
layer {
name: "lstm1"
type: "LSTM"
bottom: "embedded_input_sentence"
bottom: "cont_sentence"
bottom: "global_pool"
top: "lstm1"
recurrent_param {
num_output: 1000
weight_filler {
type: "uniform"
min: -0.08
max: 0.08
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "predict"
type: "InnerProduct"
bottom: "lstm1"
top: "predict"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 1861
weight_filler {
type: "uniform"
min: -0.08
max: 0.08
}
bias_filler {
type: "constant"
value: 0
}
axis: 2
}
}
layer {
name: "cross_entropy_loss"
type: "SoftmaxWithLoss"
bottom: "predict"
bottom: "target_sentence"
top: "cross_entropy_loss"
loss_weight: 10
loss_param {
ignore_label: -1
}
softmax_param {
axis: 2
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "predict"
bottom: "target_sentence"
top: "accuracy"
include { phase: TEST }
accuracy_param {
axis: 2
ignore_label: -1
}
}
and this is the solver.prototxt file:
net: "./examples/coarse-to-fine/Imagenet/ResNet-BN/ResNet_train_lstm.prototxt"
test_iter: 500
test_interval: 1000
test_initialization: true
base_lr: 0.1
lr_policy: "step"
gamma: 0.5
stepsize: 60000
display: 200
max_iter: 260000
momentum: 0.9
weight_decay: 0.0000
snapshot: 10000
snapshot_prefix: "./examples/coarse-to-fine/Imagenet/ResNet/models/global_pool_lstm"
solver_mode: GPU
random_seed: 1701
average_loss: 100
clip_gradients: 10
Caffe accuracy.layer accuracy error when I revise the accuracy for my regression project :
the accuracy.layer code to modify accuracy:
for (int i = 0; i < outer_num_; ++i)
for (int j = 0; j < inner_num_; ++j) {
Distance = sqrt((bottom_data[i * dim + j] - bottom_label[i * inner_num_ + j])*(bottom_data[i * dim + j] - bottom_label[i * inner_num_ + j]));
if (Distance <= 10) {
++accuracy;
}
}
}
but the result is:
I1008 22:14:37.701171 102764 caffe.cpp:286] Loss: 70993.9
I1008 22:14:37.701171 102764 caffe.cpp:298] accuracy = -1.#IND
here is my net.prototxt:
layer {
name: "framert"
type: "HDF5Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
hdf5_data_param {
source: "G:/lab-zhang/caffe-windows/data/csv/train_data_list.txt"
batch_size: 10
}
}
layer {
name: "inner1"
type: "InnerProduct"
bottom: "data"
top: "inner1"
param {
lr_mult: 1
decay_mult: 1.5
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 50
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "inner2"
type: "InnerProduct"
bottom: "inner1"
top: "inner2"
param {
lr_mult: 1
decay_mult: 1.0
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "inner2"
top: "inner2"
relu_param {
engine: CAFFE
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "inner2"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
layer {
name: "loss"
type: "EuclideanLoss"
bottom: "inner2"
bottom: "label"
top: "loss"
}
What is the reason for the wrong result: accuracy:-1.#IND ?
here is my net.prototxt:
layer {
name: "framert"
type: "HDF5Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
hdf5_data_param {
source: "G:/lab-zhang/caffe-windows/data/csv/train_data_list.txt"
batch_size: 10
}
}
layer {
name: "inner1"
type: "InnerProduct"
bottom: "data"
top: "inner1"
param {
lr_mult: 1
decay_mult: 1.5
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 50
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "inner2"
type: "InnerProduct"
bottom: "inner1"
top: "inner2"
param {
lr_mult: 1
decay_mult: 1.0
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "inner2"
top: "inner2"
relu_param {
engine: CAFFE
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "inner2"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
layer {
name: "loss"
type: "EuclideanLoss"
bottom: "inner2"
bottom: "label"
top: "loss"
}
The accuracy you get: -1.#IND means the value your code computes is not a number (NaN).
Why you get NaN is unclear from the code you posted. I suspect you changed too much of the accuracy layer code and introduced a bug that led to Nan.
Make sure you do not forget to update count and that you make sure you update top[0]->mutable_cpu_data()[0] with the computed accuracy.
In general, it is best not to override existing layers, but rather write new ones with the desired functionality.
When writing a new layer, please follow the guidelines in caffe wiki and in this git issue. Specifically, write a test for your layer!
I'm experimenting with LeNet network as a binary classifier (yes, no).
The first and several last layers in the configuration file for testing is the following:
layer {
name: "data"
type: "ImageData"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
scale: 0.00390625
}
image_data_param {
source: "examples/my_example/test_images_labels.txt"
batch_size: 1
new_height: 128
new_width: 128
}
}
...
layer {
name: "ip2"
type: "InnerProduct"
bottom: "ip1"
top: "ip2"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 2
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "ip2"
bottom: "label"
top: "accuracy"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip2"
bottom: "label"
top: "loss"
}
For testing I've set the batch_size=1, thus I ran testing with the following command:
./build/tools/caffe test -model examples/my_example/lenet_test.prototxt -weights=examples/my_example/lenet_iter_528.caffemodel -iterations 200
My intent is to be able to analyze result for each test image separately.
Currently I get the following info for each iteration:
I0310 18:30:21.889688 5952 caffe.cpp:264] Batch 41, accuracy = 1
I0310 18:30:21.889739 5952 caffe.cpp:264] Batch 41, loss = 0.578524
However since I have two outputs in my network, on testing I want to see two separate values for each of the outputs: one for class "0" ("no") and one for class "1" ("yes"). It should be something like that:
Batch 41, class 0 output: 0.755
Batch 41, class 1 output: 0.201
How should I modify the testing configuration file to make it happen?
You want to see the "Softmax" probability output (not just the loss).
For this end you might try to use "SoftmaxWithLoss" with two "top"s (I'm not 100% sure this option is fully functional/supported):
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip2"
bottom: "label"
top: "loss"
top: "prob" # add class probability output
}
Alternatively, if the former solution does not work, explicitly add a "Softmax" layer:
layer {
name: "prob"
type: "Softmax"
bottom: "ip2"
top: "prob"
}
HI can I tweak tree map and do it ?
I can pass all data hierarchy .
its already implemented in d3 .
Can i use/tweak Any properties of highcharts to render icicle ?
You could use heatmap - see this SO question: Creating ICICLE Chart using Highcharts Library
Other way would be to use treemap. Unfortunately there is no default layout algorithm that would enable icicle chart, so it would have to be created.
How to create custom layout algorithm: http://www.highcharts.com/docs/chart-and-series-types/treemap
For icicle it will be better if children passed to layout algorithm will be unsorted. Feature can be changed by wrapping setTreeValues function.
Example: http://jsfiddle.net/c6bo2asn/
$(function () {
//start wapper
(function (H) {
H.wrap(H.seriesTypes.treemap.prototype, 'setTreeValues', function (proceed) {
var tree = arguments[1];
//setTreeValues: function (tree) {
var series = this,
childrenTotal = 0,
sorted = [],
val,
point = series.points[tree.i];
// First give the children some values
H.each(tree.children, function (child) {
child = series.setTreeValues(child);
series.insertElementSorted(sorted, child, function (el, el2) {
return 0;//do not sort
});
if (!child.ignore) {
childrenTotal += child.val;
} else {
// #todo Add predicate to avoid looping already ignored children
series.eachChildren(child, function (node) {
H.extend(node, {
ignore: true,
isLeaf: false,
visible: false
});
});
}
});
// Set the values
val = H.pick(point && point.value, childrenTotal);
H.extend(tree, {
children: sorted,
childrenTotal: childrenTotal,
// Ignore this node if point is not visible
ignore: !(H.pick(point && point.visible, true) && (val > 0)),
isLeaf: tree.visible && !childrenTotal,
name: H.pick(point && point.name, ""),
val: val
});
return tree;
//},
});
}(Highcharts));
//end wapper
//start layoutAlgorithm logic
function myFunction(parent, children) {
var childrenAreas = [],
widthSoFar = 0,
w;
Highcharts.each(children, function (child,i) {
if (child.level == 1) { //even lines
childrenAreas.push({
x: parent.x,
y: parent.y + parent.height*(i/children.length),
width: parent.width,
height: parent.height/children.length
});
} else {
w = parent.width * child.val/parent.val;
childrenAreas.push({
x: parent.x + widthSoFar,
y: parent.y,
width: child.name === '_empty' ? 0 : w,
height: parent.height
});
widthSoFar += w;
}
});
return childrenAreas;
}
//end layoutAlgorithm logic
//assign new layoutAlgorithm logic
Highcharts.seriesTypes.treemap.prototype.icicle = myFunction;
//create chart
$('#container').highcharts({
series: [{
type: "treemap",
layoutAlgorithm: 'icicle',
dataLabels: {
formatter: function(){
//hide _empty
return this.key === '_empty' ? '' : this.key;
},
rotation: -90
},
borderWidth: 0,
levels: [{
level: 2,
borderWidth: 1
}],
/*
level 1 data points are lines
*/
data: [{
id: 'top',
color: "#EC2500"
}, {
name: 'a Anne',
parent: 'top',
value: 50
}, {
name: 'a Rick',
parent: 'top',
value: 30
}, {
name: 'a Peter',
parent: 'top',
value: 20
}, {
id: 'second'
}, {
name: 'b Anne',
parent: 'second',
value: 30,
color: "#ECE100"
}, {
name: '_empty',
parent: 'second',
value: 20
}, {
name: 'b Peter',
parent: 'second',
value: 30,
color: "#EC9800"
}, {
name: '_empty',
parent: 'second',
value: 20
}, {
id: 'third',
color: '#EC9800'
}, {
name: 'o Anne',
parent: 'third',
value: 20
}, {
name: 'o Rick',
parent: 'third',
value: 10
}, {
name: '_empty',
parent: 'third',
value: 70
}, {
id: 'last',
color: '#669866'
}, {
name: '_empty',
parent: 'last',
value: 20
}, {
name: 'z Anne',
parent: 'last',
value: 10
}, {
name: '_empty',
parent: 'last',
value: 70
}]
}],
title: {
text: 'Fruit consumption'
}
});
});