Tensorflow BiRNN dynamic dimension size for input data - machine-learning

I have issue with constructing Input data for BiRNN network.
I'm create License Plate detection system like described : https://arxiv.org/pdf/1601.05610v1.pdf
I have got to "4.2.3 Sequence Labelling" part where I need to train BiRNN with dataset of (total_count_of_images, None, 256) shape, None because it's length of image and and it is different for every picture in data set.
Let's say I have 3000 Images. Then shape would look like :
train.shape : (3000,) but really it is (3000, None, 256) !?
So I got example code from
https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/3_NeuralNetworks/bidirectional_rnn.ipynb
So I'm struggling even with starting to train my RNN. I don't understand how I need to constrct input data/model, input placeholders, variables etc to achieve any training process.
As far as I know everything should work. My code :
reset_graph()
'''
Dataset : (10000, 784)
Labels : (10000, 10)
To classify images using a bidirectional reccurent neural network, we consider
every image row as a sequence of pixels. Because MNIST image shape is 28*28px,
we will then handle 28 sequences of 28 steps for every sample.
'''
# Parameters
learning_rate = 0.001
training_iters = 100 # 100000
display_step = 10
batch_size = 40
# Network Parameters
n_input = 256 # data inpit size/256D
n_steps = 256 # timesteps
n_hidden = 200 # hidden layer num of features
n_classes = 36 # MNIST total classes (0-9 digits and a-z letters)
# tf Graph input
x = tf.placeholder("float", [batch_size, None , n_input], name='input_placeholder')
y = tf.placeholder("float", [batch_size, None, n_classes], name='labels_placeholder')
# Define weights
weights = {
# Hidden layer weights => 2*n_hidden because of foward + backward cells
'out': tf.Variable(tf.random_normal([2*n_hidden, n_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([n_classes]))
}
def BiRNN(x, weights, biases):
print('Input x',x.get_shape().as_list())
print('weights[\'out\']', weights['out'].get_shape().as_list())
print('biases[\'out\']', biases['out'].get_shape().as_list())
# Prepare data shape to match `bidirectional_rnn` function requirements
# Current data input shape: (batch_size, n_steps, n_input)
# Required shape: 'n_steps' tensors list of shape (batch_size, n_input)
# Permuting batch_size and n_steps
#x = tf.transpose(x, [1, 0, 2])
#print('Transposed x',x.get_shape().as_list())
# Reshape to (n_steps*batch_size, n_input)
x = tf.reshape(x, [-1, n_steps])
print('Reshaped x',x.get_shape().as_list())
# Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
x = tf.split(0, n_input, x)
print(len(x),'of [ ',x[0],' ] kinds')
# Define lstm cells with tensorflow
# Forward direction cell
lstm_fw_cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)
# Backward direction cell
lstm_bw_cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)
# Get lstm cell output
outputs, _, _ = rnn.bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x, dtype=tf.float32)
print( len(outputs),'of [ ',outputs[0],' ] kinds' )
# Linear activation, using rnn inner loop last output
ret = tf.matmul(outputs[-1], weights['out']) + biases['out']
print('ret', ret.get_shape().as_list())
return ret
pred = BiRNN(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.initialize_all_variables()
OUTPUT :
Input x [40, None, 256]
weights['out'] [400, 36]
biases['out'] [36]
Reshaped x [None, 256]
256 of [ Tensor("split:0", shape=(?, 256), dtype=float32) ] kinds
256 of [ Tensor("concat:0", shape=(?, 400), dtype=float32) ] kinds
ret [None, 36]
Everything just right there.
Problems start at session part :
# Launch the graph
with tf.Session() as sess:
sess.run(init)
step = 1
batch_data = batch_gen(batch_size)
# Keep training until reach max iterations
while step * batch_size < training_iters:
batch_x, batch_y = next(batch_data)
print(batch_x.shape)
print(batch_y.shape)
#m[:,0, None, None].shape
#Run optimization op (backprop)
print('Optimizer')
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
if step % display_step == 0:
print('Display')
# Calculate batch accuracy
acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
# Calculate batch loss
loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
print("Iter " + str(step * batch_size) + ", Minibatch Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " + \
"{:.5f}".format(acc))
step += 1
print("Optimization Finished!")
# Calculate accuracy for 128 mnist test images
test_len = 128
test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input))
test_label = mnist.test.labels[:test_len]
print("Testing Accuracy:", \
sess.run(accuracy, feed_dict={x: test_data, y: test_label}))
There I got following error :
(40,)
(40,)
Optimizer
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-96-a53814db8181> in <module>()
14 #Run optimization op (backprop)
15 print('Optimizer')
---> 16 sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
17
18 if step % display_step == 0:
/home/nauris/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
715 try:
716 result = self._run(None, fetches, feed_dict, options_ptr,
--> 717 run_metadata_ptr)
718 if run_metadata:
719 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
/home/nauris/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
886 ' to a larger type (e.g. int64).')
887
--> 888 np_val = np.asarray(subfeed_val, dtype=subfeed_dtype)
889
890 if not subfeed_t.get_shape().is_compatible_with(np_val.shape):
/home/nauris/anaconda3/lib/python3.5/site-packages/numpy/core/numeric.py in asarray(a, dtype, order)
480
481 """
--> 482 return array(a, dtype, copy=False, order=order)
483
484 def asanyarray(a, dtype=None, order=None):
ValueError: setting an array element with a sequence.
Any help would be highly appreciated. Thanks everyone in advance.
Realized that error occurs because you cannot feed numpy ndarray with inconsistent dimensions such as (3000, None, 256) in my case. Haven't found any solution yet.

Related

t() expects a tensor with <= 2 dimensions, but self is 3D

I'm new to pytorch and wrote a simple code as following to classify some inputs. The model input has 8*2 with batch size of 2 and the input layer in the model has 2 nodes. I don't know what is wrong!
X1=np.array([[2,1],[3,2],[-4,-1],[-1,-3],[2,-1],[3,-3],[-2,1],[-4,-2]])
Y1=np.array([0,0,0,0,1,1,1,1])
X=torch.tensor(X1)
Y=torch.tensor(Y1)
BATCH_SIZE=2
trainset= torch.utils.data.TensorDataset(X, Y)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE,
shuffle=True, num_workers=1)
from torch.nn.modules import flatten
learning_rate = 0.01
num_epochs = 20
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = MyModel()
model = model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
## compute accuracy
def get_accuracy(logit, target, batch_size):
''' Obtain accuracy for training round '''
corrects = (torch.max(logit, 1)[1].view(target.size()).data == target.data).sum()
accuracy = 100.0 * corrects/batch_size
return accuracy.item()
model = MyModel()
# Commented out IPython magic to ensure Python compatibility.
for epoch in range(num_epochs):
train_running_loss = 0.0
train_acc = 0.0
## training step
for inputs, labels in trainloader:
#inputs=torch.flatten(inputs)
inputs,labels=inputs.to(device), labels.to(device)
#inputs = inputs.to(device)
#labels = labels.to(device)
optimizer.zero_grad()
## forward + backprop + loss
print(inputs)
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
## update model params
optimizer.step()
train_running_loss += loss.detach().item()
train_acc += get_accuracy(outputs, labels, BATCH_SIZE)
#model.train()
model.eval()
print('Epoch: %d | Loss: %.4f | Train Accuracy: %.2f'%(epoch, train_running_loss / i, train_acc/i))
And my model is as below:
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.d1 = nn.Linear(2,3)
self.d2 = nn.Linear(3,1)
self.init_weights()
def init_weights(self):
k1=torch.tensor([0.1,-0.72,0.94,-0.29,0.12,0.44])
k1=torch.unsqueeze(torch.unsqueeze(k1,0),0)
self.d1.weight.data=k1
k2=torch.tensor([1,-1.16,-0.26])
k2=torch.unsqueeze(torch.unsqueeze(k2,0),0)
self.d2.weight.data=k2
def forward(self, x):
x = self.d1(x)
x = F.tanh(x)
x = self.d2(x)
out = F.sigmoid(x)
return out
Then I got an error:
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-27-196d819d3ccd> in <module>
101 print(inputs)
102
--> 103 outputs = model.forward(inputs)
104 loss = criterion(outputs, labels)
105
2 frames
/usr/local/lib/python3.8/dist-packages/torch/nn/modules/linear.py in forward(self, input)
112
113 def forward(self, input: Tensor) -> Tensor:
--> 114 return F.linear(input, self.weight, self.bias)
115
116 def extra_repr(self) -> str:
RuntimeError: t() expects a tensor with <= 2 dimensions, but self is 3D
I flatten the input but nothing changed. What should I do to fix it?
First of all, you don't need to invoke your model's forward pass by model.forward(x); using model(x) is good.
Second of all, what exactly are you trying to achieve via the init_weights method? You're unsqueezing k1 and k2 twice, giving them the shape of (1, 1, x) which is 3D which is what the error is telling you. torch.nn.Linear performs a matrix multiplication with a 2D matrix, so you can't use a 3D one. torch.nn.Linear already initializes the weights via Kaiming initialization [1] so I'm not sure what you're trying to achieve here.
Changing the init_weights method to:
def init_weights(self):
k1 = torch.tensor([0.1, -0.72, 0.94, -0.29, 0.12, 0.44])
k1 = k1.reshape(self.d1.weight.shape)
self.d1.weight.data = k1
k2 = torch.tensor([1, -1.16, -0.26])
k2 = k2.reshape(self.d2.weight.shape)
self.d2.weight.data = k2
and changing the type of inputs from Long to Float (i.e., model(inputs.float())) should solve your problem.
References
[1] https://github.com/pytorch/pytorch/blob/0dceaf07cd1236859953b6f85a61dc4411d10f87/torch/nn/modules/linear.py#L103

Feeding Multiple Inputs to LSTM for Time-Series Forecasting using PyTorch

I'm currently working on building an LSTM network to forecast time-series data using PyTorch. Following Roman's blog post, I implemented a simple LSTM for univariate time-series data, please see the class definitions below. However, it's been a few days since I ground to a halt on adding more features to the input data, say an hour of the day, day of the week, week of the year, and sorts.
class Model(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(Model, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.lstm = nn.LSTMCell(self.input_size, self.hidden_size)
self.linear = nn.Linear(self.hidden_size, self.output_size)
def forward(self, input, future=0, y=None):
outputs = []
# reset the state of LSTM
# the state is kept till the end of the sequence
h_t = torch.zeros(input.size(0), self.hidden_size, dtype=torch.float32)
c_t = torch.zeros(input.size(0), self.hidden_size, dtype=torch.float32)
for i, input_t in enumerate(input.chunk(input.size(1), dim=1)):
h_t, c_t = self.lstm(input_t, (h_t, c_t))
output = self.linear(h_t)
outputs += [output]
for i in range(future):
if y is not None and random.random() > 0.5:
output = y[:, [i]] # teacher forcing
h_t, c_t = self.lstm(output, (h_t, c_t))
output = self.linear(h_t)
outputs += [output]
outputs = torch.stack(outputs, 1).squeeze(2)
return outputs
class Optimization:
"A helper class to train, test and diagnose the LSTM"
def __init__(self, model, loss_fn, optimizer, scheduler):
self.model = model
self.loss_fn = loss_fn
self.optimizer = optimizer
self.scheduler = scheduler
self.train_losses = []
self.val_losses = []
self.futures = []
#staticmethod
def generate_batch_data(x, y, batch_size):
for batch, i in enumerate(range(0, len(x) - batch_size, batch_size)):
x_batch = x[i : i + batch_size]
y_batch = y[i : i + batch_size]
yield x_batch, y_batch, batch
def train(
self,
x_train,
y_train,
x_val=None,
y_val=None,
batch_size=100,
n_epochs=20,
dropout=0.2,
do_teacher_forcing=None,
):
seq_len = x_train.shape[1]
for epoch in range(n_epochs):
start_time = time.time()
self.futures = []
train_loss = 0
for x_batch, y_batch, batch in self.generate_batch_data(x_train, y_train, batch_size):
y_pred = self._predict(x_batch, y_batch, seq_len, do_teacher_forcing)
self.optimizer.zero_grad()
loss = self.loss_fn(y_pred, y_batch)
loss.backward()
self.optimizer.step()
train_loss += loss.item()
self.scheduler.step()
train_loss /= batch
self.train_losses.append(train_loss)
self._validation(x_val, y_val, batch_size)
elapsed = time.time() - start_time
print(
"Epoch %d Train loss: %.2f. Validation loss: %.2f. Avg future: %.2f. Elapsed time: %.2fs."
% (epoch + 1, train_loss, self.val_losses[-1], np.average(self.futures), elapsed)
)
def _predict(self, x_batch, y_batch, seq_len, do_teacher_forcing):
if do_teacher_forcing:
future = random.randint(1, int(seq_len) / 2)
limit = x_batch.size(1) - future
y_pred = self.model(x_batch[:, :limit], future=future, y=y_batch[:, limit:])
else:
future = 0
y_pred = self.model(x_batch)
self.futures.append(future)
return y_pred
def _validation(self, x_val, y_val, batch_size):
if x_val is None or y_val is None:
return
with torch.no_grad():
val_loss = 0
batch = 1
for x_batch, y_batch, batch in self.generate_batch_data(x_val, y_val, batch_size):
y_pred = self.model(x_batch)
loss = self.loss_fn(y_pred, y_batch)
val_loss += loss.item()
val_loss /= batch
self.val_losses.append(val_loss)
def evaluate(self, x_test, y_test, batch_size, future=1):
with torch.no_grad():
test_loss = 0
actual, predicted = [], []
for x_batch, y_batch, batch in self.generate_batch_data(x_test, y_test, batch_size):
y_pred = self.model(x_batch, future=future)
y_pred = (
y_pred[:, -len(y_batch) :] if y_pred.shape[1] > y_batch.shape[1] else y_pred
)
loss = self.loss_fn(y_pred, y_batch)
test_loss += loss.item()
actual += torch.squeeze(y_batch[:, -1]).data.cpu().numpy().tolist()
predicted += torch.squeeze(y_pred[:, -1]).data.cpu().numpy().tolist()
test_loss /= batch
return actual, predicted, test_loss
def plot_losses(self):
plt.plot(self.train_losses, label="Training loss")
plt.plot(self.val_losses, label="Validation loss")
plt.legend()
plt.title("Losses")
You can find some of the helper functions that help me split and format data before feeding it to my LSTM network.
def to_dataframe(actual, predicted):
return pd.DataFrame({"value": actual, "prediction": predicted})
def inverse_transform(scaler, df, columns):
for col in columns:
df[col] = scaler.inverse_transform(df[col])
return df
def split_sequences(sequences, n_steps):
X, y = list(), list()
for i in range(len(sequences)):
# find the end of this pattern
end_ix = i + n_steps
# check if we are beyond the dataset
if end_ix > len(sequences):
break
# gather input and output parts of the pattern
seq_x, seq_y = sequences[i:end_ix, :-1], sequences[end_ix-1, -1]
X.append(seq_x)
y.append(seq_y)
return array(X), array(y)
def train_val_test_split_new(df, test_ratio=0.2, seq_len = 100):
y = df['value']
X = df.drop(columns = ['value'])
tarin_ratio = 1 - test_ratio
val_ratio = 1 - ((train_ratio - test_ratio) / train_ratio)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_ratio)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=val_ratio)
return X_train, y_train, X_val, y_val, X_test, y_test
I use the following data frames to train my model.
# df_train
value weekday monthday hour
timestamp
2014-07-01 00:00:00 10844 1 1 0
2014-07-01 00:30:00 8127 1 1 0
2014-07-01 01:00:00 6210 1 1 1
2014-07-01 01:30:00 4656 1 1 1
2014-07-01 02:00:00 3820 1 1 2
... ... ... ... ...
2015-01-31 21:30:00 24670 5 31 21
2015-01-31 22:00:00 25721 5 31 22
2015-01-31 22:30:00 27309 5 31 22
2015-01-31 23:00:00 26591 5 31 23
2015-01-31 23:30:00 26288 5 31 23
10320 rows × 4 columns
# x_train
weekday monthday hour
timestamp
2014-08-26 16:30:00 1 26 16
2014-08-18 16:30:00 0 18 16
2014-10-22 20:00:00 2 22 20
2014-12-10 08:00:00 2 10 8
2014-07-27 22:00:00 6 27 22
... ... ... ...
2014-08-24 05:30:00 6 24 5
2014-11-24 12:00:00 0 24 12
2014-12-18 06:00:00 3 18 6
2014-07-27 17:00:00 6 27 17
2014-12-05 21:00:00 4 5 21
6192 rows × 3 columns
# y_train
timestamp
2014-08-26 16:30:00 14083
2014-08-18 16:30:00 14465
2014-10-22 20:00:00 25195
2014-12-10 08:00:00 21348
2014-07-27 22:00:00 16356
...
2014-08-24 05:30:00 2948
2014-11-24 12:00:00 16292
2014-12-18 06:00:00 7029
2014-07-27 17:00:00 18883
2014-12-05 21:00:00 26284
Name: value, Length: 6192, dtype: int64
After transforming and splitting time-series data into smaller batches, the training data set for X and y becomes as follows:
X_data shape is (6093, 100, 3)
y_data shape is (6093,)
tensor([[[-1.0097, 1.1510, 0.6508],
[-1.5126, 0.2492, 0.6508],
[-0.5069, 0.7001, 1.2238],
...,
[ 1.5044, -1.4417, -1.6413],
[ 1.0016, -0.0890, 0.7941],
[ 1.5044, -0.9908, -0.2087]],
[[-1.5126, 0.2492, 0.6508],
[-0.5069, 0.7001, 1.2238],
[-0.5069, -0.6526, -0.4952],
...,
[ 1.0016, -0.0890, 0.7941],
[ 1.5044, -0.9908, -0.2087],
[ 0.4988, 0.5874, 0.5076]],
[[-0.5069, 0.7001, 1.2238],
[-0.5069, -0.6526, -0.4952],
[ 1.5044, 1.2637, 1.5104],
...,
[ 1.5044, -0.9908, -0.2087],
[ 0.4988, 0.5874, 0.5076],
[ 0.4988, 0.5874, -0.6385]],
...,
[[ 1.0016, 0.9255, -1.2115],
[-1.0097, -0.9908, 1.0806],
[-0.0041, 0.8128, 0.3643],
...,
[ 1.5044, 0.9255, -0.9250],
[-1.5126, 0.9255, 0.0778],
[-0.0041, 0.2492, -0.7818]],
[[-1.0097, -0.9908, 1.0806],
[-0.0041, 0.8128, 0.3643],
[-0.5069, 1.3765, -0.0655],
...,
[-1.5126, 0.9255, 0.0778],
[-0.0041, 0.2492, -0.7818],
[ 1.5044, 1.2637, 0.7941]],
[[-0.0041, 0.8128, 0.3643],
[-0.5069, 1.3765, -0.0655],
[-0.0041, -1.6672, -0.4952],
...,
[-0.0041, 0.2492, -0.7818],
[ 1.5044, 1.2637, 0.7941],
[ 0.4988, -1.2163, 1.3671]]])
tensor([ 0.4424, 0.1169, 0.0148, ..., -1.1653, 0.5394, 1.6037])
Finally, just to check if the dimensions of all these training, validation, and test datasets are correct, I print out their shapes.
train shape is: torch.Size([6093, 100, 3])
train label shape is: torch.Size([6093])
val shape is: torch.Size([1965, 100, 3])
val label shape is: torch.Size([1965])
test shape is: torch.Size([1965, 100, 3])
test label shape is: torch.Size([1965])
When I try to build the model as follows, I end up getting a RuntimeError pointing at inconsistent input sizes.
model_params = {'train_ratio': 0.8,
'validation_ratio': 0.2,
'sequence_length': 100,
'teacher_forcing': False,
'dropout_rate': 0.2,
'batch_size': 100,
'num_of_epochs': 5,
'hidden_size': 24,
'n_features': 3,
'learning_rate': 1e-3
}
train_ratio = model_params['train_ratio']
val_ratio = model_params['validation_ratio']
seq_len = model_params['sequence_length']
teacher_forcing = model_params['teacher_forcing']
dropout_rate = model_params['dropout_rate']
batch_size = model_params['batch_size']
n_epochs = model_params['num_of_epochs']
hidden_size = model_params['hidden_size']
n_features = model_params['n_features']
lr = model_params['learning_rate']
model = Model(input_size=n_features, hidden_size=hidden_size, output_size=1)
loss_fn = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=lr)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1)
optimization = Optimization(model, loss_fn, optimizer, scheduler)
start_time = datetime.now()
optimization.train(x_train, y_train, x_val, y_val,
batch_size=batch_size,
n_epochs=n_epochs,
dropout=dropout_rate,
do_teacher_forcing=teacher_forcing)
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-192-6fc406c0113d> in <module>
6
7 start_time = datetime.now()
----> 8 optimization.train(x_train, y_train, x_val, y_val,
9 batch_size=batch_size,
10 n_epochs=n_epochs,
<ipython-input-189-c18d20430910> in train(self, x_train, y_train, x_val, y_val, batch_size, n_epochs, dropout, do_teacher_forcing)
68 train_loss = 0
69 for x_batch, y_batch, batch in self.generate_batch_data(x_train, y_train, batch_size):
---> 70 y_pred = self._predict(x_batch, y_batch, seq_len, do_teacher_forcing)
71 self.optimizer.zero_grad()
72 loss = self.loss_fn(y_pred, y_batch)
<ipython-input-189-c18d20430910> in _predict(self, x_batch, y_batch, seq_len, do_teacher_forcing)
93 else:
94 future = 0
---> 95 y_pred = self.model(x_batch)
96 self.futures.append(future)
97 return y_pred
~\Anaconda3\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
<ipython-input-189-c18d20430910> in forward(self, input, future, y)
17
18 for i, input_t in enumerate(input.chunk(input.size(1), dim=1)):
---> 19 h_t, c_t = self.lstm(input_t, (h_t, c_t))
20 output = self.linear(h_t)
21 outputs += [output]
~\Anaconda3\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~\Anaconda3\lib\site-packages\torch\nn\modules\rnn.py in forward(self, input, hx)
963
964 def forward(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None) -> Tuple[Tensor, Tensor]:
--> 965 self.check_forward_input(input)
966 if hx is None:
967 zeros = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
~\Anaconda3\lib\site-packages\torch\nn\modules\rnn.py in check_forward_input(self, input)
789 def check_forward_input(self, input: Tensor) -> None:
790 if input.size(1) != self.input_size:
--> 791 raise RuntimeError(
792 "input has inconsistent input_size: got {}, expected {}".format(
793 input.size(1), self.input_size))
RuntimeError: input has inconsistent input_size: got 1, expected 3
I suspect my current LSTM model class does not support data with multiple features, and I've been trying out different approaches lately with no luck so far. Feel free to share your thoughts or point me in the right direction that could help me solve this problem.
As suggested by #stackoverflowuser2010, I printed out the shapes of the tensors input_t, h_t and c_t that is fed into the forward step before the error is thrown.
input_t
torch.Size([100, 1, 3])
h_t
torch.Size([100, 24])
c_t
torch.Size([100, 24])
After muddling through for a couple of weeks, I solved the issue. This has been a fruitful journey for me, so I'd like to share what I have discovered. If you'd like to have a look at the complete walk-through with code, please check out my Medium post on the matter.
Just as in Pandas, I found that things tend to work faster and smoother when I stick to the PyTorch way. Both libraries rely on NumPy, and I'm sure one can do pretty much all the table and matrix operations explicitly with NumPy arrays and functions. However, doing so does eliminate all the nice abstractions and performance improvements these libraries provide and turn each step into a CS exercise. It's fun until it isn't.
Rather than shaping all the training and validation sets manually to pass them to the model, PyTorch's TensorDataset and DataLoaders classes have immensely helped me. Scaling the feature and target sets for training and validation, we then have NumPy arrays. We can transform these arrays into Tensors and use these Tensors to create our TensorDataset, or a custom Dataset depending on your requirements. Finally, DataLoaders allow us to iterate over such datasets with much less hassle than otherwise as they already provide built-in batching, shuffling, and dropping the last batch options.
train_features = torch.Tensor(X_train_arr)
train_targets = torch.Tensor(y_train_arr)
val_features = torch.Tensor(X_val_arr)
val_targets = torch.Tensor(y_val_arr)
train = TensorDataset(train_features, train_targets)
train_loader = DataLoader(train, batch_size=64, shuffle=False, drop_last=True)
val = TensorDataset(val_features, val_targets)
val_loader = DataLoader(val, batch_size=64, shuffle=False, drop_last=True)
After transforming our data into iterable datasets, they can later be used to do mini-batch training. Instead of explicitly defining batches or wrestling with matrix operations, we can easily iterate over them via DataLoaders as follows.
model = LSTMModel(input_dim, hidden_dim, layer_dim, output_dim)
criterion = nn.MSELoss(reduction='mean')
optimizer = optim.Adam(model.parameters(), lr=1e-2)
train_losses = []
val_losses = []
train_step = make_train_step(model, criterion, optimizer)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
for epoch in range(n_epochs):
batch_losses = []
for x_batch, y_batch in train_loader:
x_batch = x_batch.view([batch_size, -1, n_features]).to(device)
y_batch = y_batch.to(device)
loss = train_step(x_batch, y_batch)
batch_losses.append(loss)
training_loss = np.mean(batch_losses)
train_losses.append(training_loss)
with torch.no_grad():
batch_val_losses = []
for x_val, y_val in val_loader:
x_val = x_val.view([batch_size, -1, n_features]).to(device)
y_val = y_val.to(device)
model.eval()
yhat = model(x_val)
val_loss = criterion(y_val, yhat).item()
batch_val_losses.append(val_loss)
validation_loss = np.mean(batch_val_losses)
val_losses.append(validation_loss)
print(f"[{epoch+1}] Training loss: {training_loss:.4f}\t Validation loss: {validation_loss:.4f}")
Another cool feature that PyTorch provides is the view() function, which allows faster and memory-efficient reshaping of tensors. Since I earlier defined my LSTM model with batch_first = True, the batch tensor for the feature set must have the shape of (batch size, time steps, number of features). The line in the code above x_batch = x_batch.view([batch_size, -1, n_features]).to(device) just does that.
I hope this answer helps those dealing with similar problems or at least gives an idea of which direction to take. I had changed a lot in the code shared in the original post, but I'll not put them all here for the sake of simplicity. Feel free to check out the rest of it in my other SO post here.

Facing this error while classifying Images, containing 10 classes in pytorch, in ResNet50. My code is:

This is the code I am implementing: I am using a subset of the CalTech256 dataset to classify images of 10 different kinds of animals. We will go over the dataset preparation, data augmentation and then steps to build the classifier.
def train_and_validate(model, loss_criterion, optimizer, epochs=25):
'''
Function to train and validate
Parameters
:param model: Model to train and validate
:param loss_criterion: Loss Criterion to minimize
:param optimizer: Optimizer for computing gradients
:param epochs: Number of epochs (default=25)
Returns
model: Trained Model with best validation accuracy
history: (dict object): Having training loss, accuracy and validation loss, accuracy
'''
start = time.time()
history = []
best_acc = 0.0
for epoch in range(epochs):
epoch_start = time.time()
print("Epoch: {}/{}".format(epoch+1, epochs))
# Set to training mode
model.train()
# Loss and Accuracy within the epoch
train_loss = 0.0
train_acc = 0.0
valid_loss = 0.0
valid_acc = 0.0
for i, (inputs, labels) in enumerate(train_data_loader):
inputs = inputs.to(device)
labels = labels.to(device)
# Clean existing gradients
optimizer.zero_grad()
# Forward pass - compute outputs on input data using the model
outputs = model(inputs)
# Compute loss
loss = loss_criterion(outputs, labels)
# Backpropagate the gradients
loss.backward()
# Update the parameters
optimizer.step()
# Compute the total loss for the batch and add it to train_loss
train_loss += loss.item() * inputs.size(0)
# Compute the accuracy
ret, predictions = torch.max(outputs.data, 1)
correct_counts = predictions.eq(labels.data.view_as(predictions))
# Convert correct_counts to float and then compute the mean
acc = torch.mean(correct_counts.type(torch.FloatTensor))
# Compute total accuracy in the whole batch and add to train_acc
train_acc += acc.item() * inputs.size(0)
#print("Batch number: {:03d}, Training: Loss: {:.4f}, Accuracy: {:.4f}".format(i, loss.item(), acc.item()))
# Validation - No gradient tracking needed
with torch.no_grad():
# Set to evaluation mode
model.eval()
# Validation loop
for j, (inputs, labels) in enumerate(valid_data_loader):
inputs = inputs.to(device)
labels = labels.to(device)
# Forward pass - compute outputs on input data using the model
outputs = model(inputs)
# Compute loss
loss = loss_criterion(outputs, labels)
# Compute the total loss for the batch and add it to valid_loss
valid_loss += loss.item() * inputs.size(0)
# Calculate validation accuracy
ret, predictions = torch.max(outputs.data, 1)
correct_counts = predictions.eq(labels.data.view_as(predictions))
# Convert correct_counts to float and then compute the mean
acc = torch.mean(correct_counts.type(torch.FloatTensor))
# Compute total accuracy in the whole batch and add to valid_acc
valid_acc += acc.item() * inputs.size(0)
#print("Validation Batch number: {:03d}, Validation: Loss: {:.4f}, Accuracy: {:.4f}".format(j, loss.item(), acc.item()))
# Find average training loss and training accuracy
avg_train_loss = train_loss/train_data_size
avg_train_acc = train_acc/train_data_size
# Find average training loss and training accuracy
avg_valid_loss = valid_loss/valid_data_size
avg_valid_acc = valid_acc/valid_data_size
history.append([avg_train_loss, avg_valid_loss, avg_train_acc, avg_valid_acc])
epoch_end = time.time()
print("Epoch : {:03d}, Training: Loss: {:.4f}, Accuracy: {:.4f}%, \n\t\tValidation : Loss : {:.4f}, Accuracy: {:.4f}%, Time: {:.4f}s".format(epoch, avg_train_loss, avg_train_acc*100, avg_valid_loss, avg_valid_acc*100, epoch_end-epoch_start))
# Save if the model has best accuracy till now
torch.save(model, dataset+'_model_'+str(epoch)+'.pt')
return model, history
# Load pretrained ResNet50 Model
resnet50 = models.resnet50(pretrained=True)
#resnet50 = resnet50.to('cuda:0')
# Freeze model parameters
for param in resnet50.parameters():
param.requires_grad = False
# Change the final layer of ResNet50 Model for Transfer Learning
fc_inputs = resnet50.fc.in_features
resnet50.fc = nn.Sequential(
nn.Linear(fc_inputs, 256),
nn.ReLU(),
nn.Dropout(0.4),
nn.Linear(256, num_classes), # Since 10 possible outputs
nn.LogSoftmax(dim=1) # For using NLLLoss()
)
# Convert model to be used on GPU
# resnet50 = resnet50.to('cuda:0')
# Change the final layer of ResNet50 Model for Transfer Learning
fc_inputs = resnet50.fc.in_features
resnet50.fc = nn.Sequential(
nn.Linear(fc_inputs, 256),
nn.ReLU(),
nn.Dropout(0.4),
nn.Linear(256, num_classes), # Since 10 possible outputs
nn.LogSoftmax(dienter code herem=1) # For using NLLLoss()
)
# Convert model to be used on GPU
# resnet50 = resnet50.to('cuda:0')`enter code here`
Error is this:
RuntimeError Traceback (most recent call
last) in ()
6 # Train the model for 25 epochs
7 num_epochs = 30
----> 8 trained_model, history = train_and_validate(resnet50, loss_func, optimizer, num_epochs)
9
10 torch.save(history, dataset+'_history.pt')
in train_and_validate(model,
loss_criterion, optimizer, epochs)
43
44 # Compute loss
---> 45 loss = loss_criterion(outputs, labels)
46
47 # Backpropagate the gradients
~\Anaconda3\lib\site-packages\torch\nn\modules\module.py in
call(self, *input, **kwargs)
539 result = self._slow_forward(*input, **kwargs)
540 else:
--> 541 result = self.forward(*input, **kwargs)
542 for hook in self._forward_hooks.values():
543 hook_result = hook(self, input, result)
~\Anaconda3\lib\site-packages\torch\nn\modules\loss.py in
forward(self, input, target)
202
203 def forward(self, input, target):
--> 204 return F.nll_loss(input, target, weight=self.weight, ignore_index=self.ignore_index, reduction=self.reduction)
205
206
~\Anaconda3\lib\site-packages\torch\nn\functional.py in
nll_loss(input, target, weight, size_average, ignore_index, reduce,
reduction) 1836 .format(input.size(0),
target.size(0))) 1837 if dim == 2:
-> 1838 ret = torch._C._nn.nll_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index) 1839 elif dim == 4: 1840 ret = torch._C._nn.nll_loss2d(input, target,
weight, _Reduction.get_enum(reduction), ignore_index)
RuntimeError: Assertion `cur_target >= 0 && cur_target < n_classes'
failed. at
C:\Users\builder\AppData\Local\Temp\pip-req-build-0i480kur\aten\src\THNN/generic/ClassNLLCriterion.c:97
This happens when there are either incorrect labels in your dataset, or the labels are 1-indexed (instead of 0-indexed). As from the error message, cur_target must be smaller than the total number of classes (10). To verify the issue, check the maximum and minimum label in your dataset. If the data is indeed 1-indexed, just minus one from all annotations and you should be fine.
Note, another possible reason is that there exists some -1 labels in the data. Some (esp older) datasets use -1 as indication of a wrong/dubious label. If you find such labels, just discard them.

How to include batch size in pytorch basic example?

I am new to pytorch. The following is the basic example of using nn module to train a simple one-layer model with some random data (from here)
import torch
N, D_in, H, D_out = 64, 1000, 100, 10
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
loss_fn = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
for t in range(500):
y_pred = model(x)
loss = loss_fn(y_pred, y)
print(t, loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
As far as I understand, the batch size is equal to 1 in the example, in other words, a single point (out of 64) is used to calculate gradients and update parameters. My question is: how to modify this example to train the model with the batch size greater than one?
In fact N is the batch size. So you just need to modify N currently its set to 64. So you have in every training batch 64 vectors with size / dim D_in.
I checked the link you posted, you can also take a look at the comments - there is some explanation too :)
# -*- coding: utf-8 -*-
import numpy as np
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random input and output data
x = np.random.randn(N, D_in)
y = np.random.randn(N, D_out)
# Randomly initialize weights
w1 = np.random.randn(D_in, H)
w2 = np.random.randn(H, D_out)
learning_rate = 1e-6
for t in range(500):
# Forward pass: compute predicted y
h = x.dot(w1)
h_relu = np.maximum(h, 0)
y_pred = h_relu.dot(w2)
# Compute and print loss
loss = np.square(y_pred - y).sum()
print(t, loss)
# Backprop to compute gradients of w1 and w2 with respect to loss
grad_y_pred = 2.0 * (y_pred - y)
grad_w2 = h_relu.T.dot(grad_y_pred)
grad_h_relu = grad_y_pred.dot(w2.T)
grad_h = grad_h_relu.copy()
grad_h[h < 0] = 0
grad_w1 = x.T.dot(grad_h)
# Update weights
w1 -= learning_rate * grad_w1
w2 -= learning_rate * grad_w2
To include batch size in PyTorch basic examples, the easiest and cleanest way is to use PyTorch torch.utils.data.DataLoader and torch.utils.data.TensorDataset.
Dataset stores the samples and their corresponding labels, and DataLoader wraps an iterable around the Dataset to enable easy access to the samples.
DataLoader will take care of creating batches for you.
Building on your question, there is a complete code snippet, where we iterate over a dataset of 10000 examples for 2 epochs with a batch size of 64:
import torch
from torch.utils.data import DataLoader, TensorDataset
# Create the dataset with N_SAMPLES samples
N_SAMPLES, D_in, H, D_out = 10000, 1000, 100, 10
x = torch.randn(N_SAMPLES, D_in)
y = torch.randn(N_SAMPLES, D_out)
# Define the batch size and the number of epochs
BATCH_SIZE = 64
N_EPOCHS = 2
# Use torch.utils.data to create a DataLoader
# that will take care of creating batches
dataset = TensorDataset(x, y)
dataloader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)
# Define model, loss and optimizer
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
loss_fn = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
# Get the dataset size for printing (it is equal to N_SAMPLES)
dataset_size = len(dataloader.dataset)
# Loop over epochs
for epoch in range(N_EPOCHS):
print(f"Epoch {epoch + 1}\n-------------------------------")
# Loop over batches in an epoch using DataLoader
for id_batch, (x_batch, y_batch) in enumerate(dataloader):
y_batch_pred = model(x_batch)
loss = loss_fn(y_batch_pred, y_batch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Every 100 batches, print the loss for this batch
# as well as the number of examples processed so far
if id_batch % 100 == 0:
loss, current = loss.item(), (id_batch + 1)* len(x_batch)
print(f"loss: {loss:>7f} [{current:>5d}/{dataset_size:>5d}]")
The output should be something like:
Epoch 1
-------------------------------
loss: 643.433716 [ 64/10000]
loss: 648.195435 [ 6464/10000]
Epoch 2
-------------------------------
loss: 613.619873 [ 64/10000]
loss: 625.018555 [ 6464/10000]

How does one do Inference with Batch Normalization with Tensor Flow?

I was reading the original paper on BN and the stack overflow question on How could I use Batch Normalization in TensorFlow? which provides a very useful piece of code to insert a batch normalization block to a Neural Network but does not provides enough guidance on how to actually use it during training, inference and when evaluating models.
For example, I would like to track the train error during training and test error to make sure I don't overfit. Its clear that the batch normalization block should be off during test, but when evaluating the error on the training set, should the batch normalization block be turned off too? My main questions are:
During inference and error evaluation, should the batch normalization block be turned off regardless of the data set?
Does that mean that the batch normalization block should only be on during the training step then?
To make it very clear, I will provide an extract (of simplified) code I have been using to run batch normalization with Tensor flow according to what is my understanding of what is the right thing to do:
## TRAIN
if phase_train is not None:
#DO BN
feed_dict_train = {x:X_train, y_:Y_train, phase_train: False}
feed_dict_cv = {x:X_cv, y_:Y_cv, phase_train: False}
feed_dict_test = {x:X_test, y_:Y_test, phase_train: False}
else:
#Don't do BN
feed_dict_train = {x:X_train, y_:Y_train}
feed_dict_cv = {x:X_cv, y_:Y_cv}
feed_dict_test = {x:X_test, y_:Y_test}
def get_batch_feed(X, Y, M, phase_train):
mini_batch_indices = np.random.randint(M,size=M)
Xminibatch = X[mini_batch_indices,:] # ( M x D^(0) )
Yminibatch = Y[mini_batch_indices,:] # ( M x D^(L) )
if phase_train is not None:
#DO BN
feed_dict = {x: Xminibatch, y_: Yminibatch, phase_train: True}
else:
#Don't do BN
feed_dict = {x: Xminibatch, y_: Yminibatch}
return feed_dict
with tf.Session() as sess:
sess.run( tf.initialize_all_variables() )
for iter_step in xrange(steps):
feed_dict_batch = get_batch_feed(X_train, Y_train, M, phase_train)
# Collect model statistics
if iter_step%report_error_freq == 0:
train_error = sess.run(fetches=l2_loss, feed_dict=feed_dict_train)
cv_error = sess.run(fetches=l2_loss, feed_dict=feed_dict_cv)
test_error = sess.run(fetches=l2_loss, feed_dict=feed_dict_test)
do_stuff_with_errors(train_error, cv_error, test_error)
# Run Train Step
sess.run(fetches=train_step, feed_dict=feed_dict_batch)
and the code I am using to produce batch normalization blocks is:
def standard_batch_norm(l, x, n_out, phase_train, scope='BN'):
"""
Batch normalization on feedforward maps.
Args:
x: Vector
n_out: integer, depth of input maps
phase_train: boolean tf.Varialbe, true indicates training phase
scope: string, variable scope
Return:
normed: batch-normalized maps
"""
with tf.variable_scope(scope+l):
#beta = tf.Variable(tf.constant(0.0, shape=[n_out], dtype=tf.float64 ), name='beta', trainable=True, dtype=tf.float64 )
#gamma = tf.Variable(tf.constant(1.0, shape=[n_out],dtype=tf.float64 ), name='gamma', trainable=True, dtype=tf.float64 )
init_beta = tf.constant(0.0, shape=[n_out], dtype=tf.float64)
init_gamma = tf.constant(1.0, shape=[n_out],dtype=tf.float64)
beta = tf.get_variable(name='beta'+l, dtype=tf.float64, initializer=init_beta, regularizer=None, trainable=True)
gamma = tf.get_variable(name='gamma'+l, dtype=tf.float64, initializer=init_gamma, regularizer=None, trainable=True)
batch_mean, batch_var = tf.nn.moments(x, [0], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=0.5)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(phase_train, mean_var_with_update, lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)
return normed
I found that there is 'official' batch_norm layer in tensorflow. Try it out:
https://github.com/tensorflow/tensorflow/blob/b826b79718e3e93148c3545e7aa3f90891744cc0/tensorflow/contrib/layers/python/layers/layers.py#L100
Most likely it is not mentioned in docs since it included in some RC or 'beta' version only.
I haven't inspected deep into this matter yet, but as far as I see from documentation you just use binary parameter is_training in this batch_norm layer, and set it to true only for training phase. Try it out.
UPDATE: Below is the code to load data, build a network with one hidden ReLU layer and L2 normalization and introduce batch normalization for both hidden and out layer. This runs fine and trains fine.
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
pickle_file = '/home/maxkhk/Documents/Udacity/DeepLearningCourse/SourceCode/tensorflow/examples/udacity/notMNIST.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
image_size = 28
num_labels = 10
def reformat(dataset, labels):
dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)
# Map 2 to [0.0, 1.0, 0.0 ...], 3 to [0.0, 0.0, 1.0 ...]
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
#for NeuralNetwork model code is below
#We will use SGD for training to save our time. Code is from Assignment 2
#beta is the new parameter - controls level of regularization.
#Feel free to play with it - the best one I found is 0.001
#notice, we introduce L2 for both biases and weights of all layers
batch_size = 128
beta = 0.001
#building tensorflow graph
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
#introduce batchnorm
tf_train_dataset_bn = tf.contrib.layers.batch_norm(tf_train_dataset)
#now let's build our new hidden layer
#that's how many hidden neurons we want
num_hidden_neurons = 1024
#its weights
hidden_weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_hidden_neurons]))
hidden_biases = tf.Variable(tf.zeros([num_hidden_neurons]))
#now the layer itself. It multiplies data by weights, adds biases
#and takes ReLU over result
hidden_layer = tf.nn.relu(tf.matmul(tf_train_dataset_bn, hidden_weights) + hidden_biases)
#adding the batch normalization layerhi()
hidden_layer_bn = tf.contrib.layers.batch_norm(hidden_layer)
#time to go for output linear layer
#out weights connect hidden neurons to output labels
#biases are added to output labels
out_weights = tf.Variable(
tf.truncated_normal([num_hidden_neurons, num_labels]))
out_biases = tf.Variable(tf.zeros([num_labels]))
#compute output
out_layer = tf.matmul(hidden_layer_bn,out_weights) + out_biases
#our real output is a softmax of prior result
#and we also compute its cross-entropy to get our loss
#Notice - we introduce our L2 here
loss = (tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
out_layer, tf_train_labels) +
beta*tf.nn.l2_loss(hidden_weights) +
beta*tf.nn.l2_loss(hidden_biases) +
beta*tf.nn.l2_loss(out_weights) +
beta*tf.nn.l2_loss(out_biases)))
#now we just minimize this loss to actually train the network
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
#nice, now let's calculate the predictions on each dataset for evaluating the
#performance so far
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(out_layer)
valid_relu = tf.nn.relu( tf.matmul(tf_valid_dataset, hidden_weights) + hidden_biases)
valid_prediction = tf.nn.softmax( tf.matmul(valid_relu, out_weights) + out_biases)
test_relu = tf.nn.relu( tf.matmul( tf_test_dataset, hidden_weights) + hidden_biases)
test_prediction = tf.nn.softmax(tf.matmul(test_relu, out_weights) + out_biases)
#now is the actual training on the ANN we built
#we will run it for some number of steps and evaluate the progress after
#every 500 steps
#number of steps we will train our ANN
num_steps = 3001
#actual training
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))

Resources