Implement MLP in tensorflow - machine-learning

I want to implement the MLP model taught in https://www.coursera.org/learn/machine-learning, using tensorflow. Here's implementation.
# one hidden layer MLP
x = tf.placeholder(tf.float32, shape=[None, 784])
y = tf.placeholder(tf.float32, shape=[None, 10])
W_h1 = tf.Variable(tf.random_normal([784, 512]))
h1 = tf.nn.sigmoid(tf.matmul(x, W_h1))
W_out = tf.Variable(tf.random_normal([512, 10]))
y_ = tf.matmul(h1, W_out)
# cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(y_, y)
cross_entropy = tf.reduce_sum(- y * tf.log(y_) - (1 - y) * tf.log(1 - y_), 1)
loss = tf.reduce_mean(cross_entropy)
train_step = tf.train.GradientDescentOptimizer(0.05).minimize(loss)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# train
with tf.Session() as s:
s.run(tf.initialize_all_variables())
for i in range(10000):
batch_x, batch_y = mnist.train.next_batch(100)
s.run(train_step, feed_dict={x: batch_x, y: batch_y})
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={x: batch_x, y: batch_y})
print('step {0}, training accuracy {1}'.format(i, train_accuracy))
However, it does not work. I think the definition for the layers are correct, but the problem is in the cross_entropy. If I use the first one, the one got commented out, the model converges quickly; but if I use the 2nd one, which I think/hope is the translation of the previous equation, the model won't converge.
If you want to take a look at the cost equation, you can find it at here.
Update
I have implemented this same MLP model using numpy and scipy, and it works.
In the tensorflow code, I added a print line in the training loop, and I found out that all the elements in y_ are nan...I think it is caused by arithmetic overflow or something alike.

It is likely 0*log(0) issue.
Replacing
cross_entropy = tf.reduce_sum(- y * tf.log(y_) - (1 - y) * tf.log(1 - y_), 1)
with
cross_entropy = tf.reduce_sum(- y * tf.log(tf.clip_by_value(y_, 1e-10, 1.0)) - (1 - y) * tf.log(tf.clip_by_value(1 - y_, 1e-10, 1.0)), 1)
Please see Tensorflow NaN bug?.

The problem I think is that nn.sigmoid_cross_entropy_with_logits expects unormalized results, where as the function you replace it with cross_entropy = tf.reduce_sum(- y * tf.log(y_) - (1 - y) * tf.log(1 - y_), 1)
Expects y_ to be normalized (by the sigmoid) between 0 and 1
try replacing
y_ = tf.matmul(h1, W_out)
with
y_ = tf.nn.sigmoid(tf.matmul(h1, W_out))

Related

The neural network after several training epochs has too large sigmoid values ​and does not learn

I'm implementing a fully connected neural network for MNIST (not convolutional!) and I'm having a problem. When I make multiple forward passes and backward passes, the exponents get abnormally high and python is unable to calculate them. It seems to me that I incorrectly registered backward_pass. Could you help me with this. Here are the network settings:
w_1 = np.random.uniform(-0.5, 0.5, (128, 784))
b_1 = np.random.uniform(-0.5, 0.5, (128, 1))
w_2 = np.random.uniform(-0.5, 0.5, (10, 128))
b_2 = np.random.uniform(-0.5, 0.5, (10, 1))
X_train shape: (784, 31500)
y_train shape: (31500,)
X_test shape: (784, 10500)
y_test shape: (10500,)
def sigmoid(x, alpha):
return 1 / (1 + np.exp(-alpha * x))
def dx_sigmoid(x, alpha):
exp_neg_x = np.exp(-alpha * x)
return alpha * exp_neg_x / ((1 + exp_neg_x)**2)
def ReLU(x):
return np.maximum(0, x)
def dx_ReLU(x):
return np.where(x > 0, 1, 0)
def one_hot(y):
one_hot_y = np.zeros((y.size, y.max() + 1))
one_hot_y[np.arange(y.size), y] = 1
one_hot_y = one_hot_y.T
return one_hot_y
def forward_pass(X, w_1, b_1, w_2, b_2):
layer_1 = np.dot(w_1, X) + b_1
layer_1_act = ReLU(layer_1)
layer_2 = np.dot(w_2, layer_1_act) + b_2
layer_2_act = sigmoid(layer_2, 0.01)
return layer_1, layer_1_act, layer_2, layer_2_act
def backward_pass(layer_1, layer_1_act, layer_2, layer_2_act, X, y, w_2):
one_hot_y = one_hot(y)
n_samples = one_hot_y.shape[1]
d_loss_by_layer_2_act = (2 / n_samples) * np.sum(one_hot_y - layer_2_act, axis=1).reshape(-1, 1)
d_layer_2_act_by_layer_2 = dx_sigmoid(layer_2, 0.01)
d_loss_by_layer_2 = d_loss_by_layer_2_act * d_layer_2_act_by_layer_2
d_layer_2_by_w_2 = layer_1_act.T
d_loss_by_w_2 = np.dot(d_loss_by_layer_2, d_layer_2_by_w_2)
d_loss_by_b_2 = np.sum(d_loss_by_layer_2, axis=1).reshape(-1, 1)
d_layer_2_by_layer_1_act = w_2.T
d_loss_by_layer_1_act = np.dot(d_layer_2_by_layer_1_act, d_loss_by_layer_2)
d_layer_1_act_by_layer_1 = dx_ReLU(layer_1)
d_loss_by_layer_1 = d_loss_by_layer_1_act * d_layer_1_act_by_layer_1
d_layer_1_by_w_1 = X.T
d_loss_by_w_1 = np.dot(d_loss_by_layer_1, d_layer_1_by_w_1)
d_loss_by_b_1 = np.sum(d_loss_by_layer_1, axis=1).reshape(-1, 1)
return d_loss_by_w_1, d_loss_by_b_1, d_loss_by_w_2, d_loss_by_b_2
for epoch in range(epochs):
layer_1, layer_1_act, layer_2, layer_2_act = forward_pass(X_train, w_1, b_1, w_2, b_2)
d_loss_by_w_1, d_loss_by_b_1, d_loss_by_w_2, d_loss_by_b_2 = backward_pass(layer_1, layer_1_act,
layer_2, layer_2_act,
X_train, y_train,
w_2)
w_1 -= learning_rate * d_loss_by_w_1
b_1 -= learning_rate * d_loss_by_b_1
w_2 -= learning_rate * d_loss_by_w_2
b_2 -= learning_rate * d_loss_by_b_2
_, _, _, predictions = forward_pass(X_train, w_1, b_1, w_2, b_2)
predictions = predictions.argmax(axis=0)
accuracy = accuracy_score(predictions, y_train)
print(f"epoch: {epoch} / acuracy: {accuracy}")
My loss is MSE: (1 / n_samples) * np.sum((one_hot_y - layer_2_act)**2, axis=0)
This is my
calculations
calculations
I tried to decrease lr, set the alpha coefficient to the exponent (e^(-alpha * x) for sigmoid), I divided my entire sample by 255. and still the program cannot learn because the numbers are too large
To start the unifrom initialization you are using has a relatively big std, for linear layer you should be 1/sqrt(fin) , which for first layer will be :
1 / np.sqrt(128)
0.08838834764831843
which means:
w_1 = np.random.uniform(-0.08, 0.08, (128, 784))
...
also did not check your forward and backward path, assuming if it is correct and you see very big values in your activation, you could as well normalize (like using an implementation of batchnorm or layer norm) to force centred around zero with unit std.
P.S:
also noticed you as well doing a multi-class, then MSE would not be a good choice, use Softmax or logSoftmax (easier implementation), but why loss is not moving fast enough could also be linked to not a good LR as well. and do your inputs normalized?
you could plot the dist for layers and see if they are good.

loss.backward() no grad in pytorch NN

The code gives an error in loss.backward()
Error is:
untimeError: element 0 of tensors does not require grad and does not have a grad_fn
for epoch in range(N_EPOCHS):
model.train()
for i,(im1, im2, labels) in enumerate(train_dl):
i1 = torch.flatten(im1,1)
i2 = torch.flatten(im2,1)
inp = torch.cat([i1,i2],1)
b_x = Variable(inp) # batch x
b_y = Variable(labels) # batch y
y_ = model(b_x).squeeze()
y_ = (y_>0.5).float()
print(y_)
print(l)
loss = criterion(y_,b_y)
print(loss.item())
loss.backward()
optimizer.step()
y_ = (y_>0.5).float()
has a zero gradient, intuitively because "tiny changes in the argument lead to absolutely no change in the value (imagine that y_ changes by tiny epsilon, it does not affect value of y_.
With additional info given by OP in the comment, the correct approach here is just removing the line
y_ = (y_>0.5).float()

How to implement custom logloss with identical behavior to binary objective in LightGBM?

I am trying to implement my own loss function for binary classification. To get started, I want to reproduce the exact behavior of the binary objective. In particular, I want that:
The loss of both functions have the same scale
The training and validation slope is similar
predict_proba(X) returns probabilities
None of this is the case for the code below:
import sklearn.datasets
import lightgbm as lgb
import numpy as np
X, y = sklearn.datasets.load_iris(return_X_y=True)
X, y = X[y <= 1], y[y <= 1]
def loglikelihood(labels, preds):
preds = 1. / (1. + np.exp(-preds))
grad = preds - labels
hess = preds * (1. - preds)
return grad, hess
model = lgb.LGBMClassifier(objective=loglikelihood) # or "binary"
model.fit(X, y, eval_set=[(X, y)], eval_metric="binary_logloss")
lgb.plot_metric(model.evals_result_)
With objective="binary":
With objective=loglikelihood the slope is not even smooth:
Moreover, sigmoid has to be applied to model.predict_proba(X) to get probabilities for loglikelihood (as I have figured out from https://github.com/Microsoft/LightGBM/issues/2136).
Is it possible to get the same behavior with a custom loss function? Does anybody understand where all these differences come from?
Looking at the output of model.predict_proba(X) in each case, we can see that the built-in binary_logloss model returns probabilities, while the custom model returns logits.
The built-in evaluation function takes probabilities as input. To fit the custom objective, we need a custom evaluation function which will take logits as input.
Here is how you could write this. I've changed the sigmoid calculation so that it doesn't overflow if logit is a large negative number.
def loglikelihood(labels, logits):
#numerically stable sigmoid:
preds = np.where(logits >= 0,
1. / (1. + np.exp(-logits)),
np.exp(logits) / (1. + np.exp(logits)))
grad = preds - labels
hess = preds * (1. - preds)
return grad, hess
def my_eval(labels, logits):
#numerically stable logsigmoid:
logsigmoid = np.where(logits >= 0,
-np.log(1 + np.exp(-logits)),
logits - np.log(1 + np.exp(logits)))
loss = (-logsigmoid + logits * (1 - labels)).mean()
return "error", loss, False
model1 = lgb.LGBMClassifier(objective='binary')
model1.fit(X, y, eval_set=[(X, y)], eval_metric="binary_logloss")
model2 = lgb.LGBMClassifier(objective=loglikelihood)
model2.fit(X, y, eval_set=[(X, y)], eval_metric=my_eval)
Now the results are the same.

How to include batch size in pytorch basic example?

I am new to pytorch. The following is the basic example of using nn module to train a simple one-layer model with some random data (from here)
import torch
N, D_in, H, D_out = 64, 1000, 100, 10
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
loss_fn = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
for t in range(500):
y_pred = model(x)
loss = loss_fn(y_pred, y)
print(t, loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
As far as I understand, the batch size is equal to 1 in the example, in other words, a single point (out of 64) is used to calculate gradients and update parameters. My question is: how to modify this example to train the model with the batch size greater than one?
In fact N is the batch size. So you just need to modify N currently its set to 64. So you have in every training batch 64 vectors with size / dim D_in.
I checked the link you posted, you can also take a look at the comments - there is some explanation too :)
# -*- coding: utf-8 -*-
import numpy as np
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random input and output data
x = np.random.randn(N, D_in)
y = np.random.randn(N, D_out)
# Randomly initialize weights
w1 = np.random.randn(D_in, H)
w2 = np.random.randn(H, D_out)
learning_rate = 1e-6
for t in range(500):
# Forward pass: compute predicted y
h = x.dot(w1)
h_relu = np.maximum(h, 0)
y_pred = h_relu.dot(w2)
# Compute and print loss
loss = np.square(y_pred - y).sum()
print(t, loss)
# Backprop to compute gradients of w1 and w2 with respect to loss
grad_y_pred = 2.0 * (y_pred - y)
grad_w2 = h_relu.T.dot(grad_y_pred)
grad_h_relu = grad_y_pred.dot(w2.T)
grad_h = grad_h_relu.copy()
grad_h[h < 0] = 0
grad_w1 = x.T.dot(grad_h)
# Update weights
w1 -= learning_rate * grad_w1
w2 -= learning_rate * grad_w2
To include batch size in PyTorch basic examples, the easiest and cleanest way is to use PyTorch torch.utils.data.DataLoader and torch.utils.data.TensorDataset.
Dataset stores the samples and their corresponding labels, and DataLoader wraps an iterable around the Dataset to enable easy access to the samples.
DataLoader will take care of creating batches for you.
Building on your question, there is a complete code snippet, where we iterate over a dataset of 10000 examples for 2 epochs with a batch size of 64:
import torch
from torch.utils.data import DataLoader, TensorDataset
# Create the dataset with N_SAMPLES samples
N_SAMPLES, D_in, H, D_out = 10000, 1000, 100, 10
x = torch.randn(N_SAMPLES, D_in)
y = torch.randn(N_SAMPLES, D_out)
# Define the batch size and the number of epochs
BATCH_SIZE = 64
N_EPOCHS = 2
# Use torch.utils.data to create a DataLoader
# that will take care of creating batches
dataset = TensorDataset(x, y)
dataloader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)
# Define model, loss and optimizer
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
loss_fn = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
# Get the dataset size for printing (it is equal to N_SAMPLES)
dataset_size = len(dataloader.dataset)
# Loop over epochs
for epoch in range(N_EPOCHS):
print(f"Epoch {epoch + 1}\n-------------------------------")
# Loop over batches in an epoch using DataLoader
for id_batch, (x_batch, y_batch) in enumerate(dataloader):
y_batch_pred = model(x_batch)
loss = loss_fn(y_batch_pred, y_batch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Every 100 batches, print the loss for this batch
# as well as the number of examples processed so far
if id_batch % 100 == 0:
loss, current = loss.item(), (id_batch + 1)* len(x_batch)
print(f"loss: {loss:>7f} [{current:>5d}/{dataset_size:>5d}]")
The output should be something like:
Epoch 1
-------------------------------
loss: 643.433716 [ 64/10000]
loss: 648.195435 [ 6464/10000]
Epoch 2
-------------------------------
loss: 613.619873 [ 64/10000]
loss: 625.018555 [ 6464/10000]

Dynamically changing weights in TensorFlow

In TensorFlow, I'm trying to change weights during training, but get no change in the results. I've tried to disrupt the weights (set to zero), but it seems to do nothing (other than take longer to complete). What am I missing? Is there a way to manipulate W like a regular matrix/tensor during session?
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
import tensorflow as tf
sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
W = tf.Variable(tf.zeros([784,10]), trainable=True)
W2 = tf.Variable(tf.zeros([784,10]), trainable=False)
b = tf.Variable(tf.zeros([10]))
sess.run(tf.initialize_all_variables())
y = tf.nn.softmax(tf.matmul(x,W) + b)
loss = tf.reduce_mean(tf.square(y_ - y))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
for i in range(1000):
#try to change W during training
W = W2
W = tf.Variable(tf.zeros([784,10]))
W.assign(tf.Variable(tf.zeros([784,10])))
batch = mnist.train.next_batch(1)
train_step.run(feed_dict={x: batch[0], y_: batch[1]})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
Accuracy remains the same (0.82).
I am not sure it's a good idea, but if you want to update W after W.assign, you need to evaluate it.
sess.run(W)
In addition, Since TensorFlow and most Neural Nets use forward/backpropagation to compute values/gradients to update weights, initializing weights with 0 kills all forward values and thus gradients. It's not a good idea.
You can try to initialize them with small random numbers:
tf.Variable(tf.random_normal([784, 10], stddev=0.01))
Or use the xavier initializer
W = tf.get_variable("W", shape=[784, 10],
initializer=tf.contrib.layers.xavier_initializer())
When you use tf.assign(), you need to give a name for this operation:
W= W.assign(tf.Variable(tf.zeros([784,10])))
Then when you use W again, the assign operation will be executed.

Resources