How do I make a cross-validation with tflearn? - machine-learning

I want to make k-fold cross-validation with tflearn. Hence I want to reset the network k times. However, I think I need to reset the graph (e.g. tf.reset_default_graph()), but I'm not sure and I don't know how to do this with tflearn.
My code
For the following, you need hasy_tools.py
#!/usr/bin/env python
"""
Trains a simple convnet on the HASY dataset.
Gets to 76.78% test accuracy after 1 epoch.
573 seconds per epoch on a GeForce 940MX GPU.
# WARNING: THIS IS NOT WORKING RIGHT NOW
"""
import os
import hasy_tools as ht
import numpy as np
import tensorflow as tf
import tflearn
from tflearn.layers.core import input_data, fully_connected, dropout
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.estimator import regression
batch_size = 128
nb_epoch = 1
# input image dimensions
img_rows, img_cols = 32, 32
accuracies = []
for fold in range(1, 4):
tf.reset_default_graph()
# Load data
dataset_path = os.path.join(os.path.expanduser("~"), 'hasy')
hasy_data = ht.load_data(fold=fold,
normalize=True,
one_hot=True,
dataset_path=dataset_path,
flatten=False)
train_x = hasy_data['train']['X'][:1000]
train_y = hasy_data['train']['y'][:1000]
test_x = hasy_data['test']['X']
test_y = hasy_data['test']['y']
# Define model
network = input_data(shape=[None, img_rows, img_cols, 1], name='input')
network = conv_2d(network, 32, 3, activation='prelu')
network = conv_2d(network, 64, 3, activation='prelu')
network = max_pool_2d(network, 2)
network = dropout(network, keep_prob=0.25)
network = fully_connected(network, 1024, activation='tanh')
network = dropout(network, keep_prob=0.5)
network = fully_connected(network, 369, activation='softmax')
# Train model
network = regression(network, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy', name='target')
model = tflearn.DNN(network, tensorboard_verbose=0)
model.fit({'input': train_x}, {'target': train_y}, n_epoch=nb_epoch,
validation_set=({'input': test_x}, {'target': test_y}),
snapshot_step=100, show_metric=True, run_id='convnet_mnist',
batch_size=batch_size)
# Serialize model
model.save('cv-model-fold-%i.tflearn' % fold)
# Evaluate model
score = model.evaluate(test_x, test_y)
print('Test accuarcy: %0.4f%%' % (score[0] * 100))
accuracies.append(score[0])
accuracies = np.array(accuracies)
print(("CV Accuracy. mean={mean:0.2f}%%\t ({min:0.2f}%% - {max:0.2f}%%)"
).format(mean=accuracies.mean() * 100,
min=accuracies.min() * 100,
max=accuracies.max() * 100))
The error
Running the code with only one fold works fine, but with multiple folds I get:
I tensorflow/core/common_runtime/bfc_allocator.cc:678] Chunk at 0x5556c9100 of size 33554432
I tensorflow/core/common_runtime/bfc_allocator.cc:678] Chunk at 0x5576c9100 of size 66481920
I tensorflow/core/common_runtime/bfc_allocator.cc:687] Free at 0x506d7cc00 of size 256
I tensorflow/core/common_runtime/bfc_allocator.cc:687] Free at 0x506d7d100 of size 62720
I tensorflow/core/common_runtime/bfc_allocator.cc:687] Free at 0x507573700 of size 798208
I tensorflow/core/common_runtime/bfc_allocator.cc:693] Summary of in-use Chunks by size:
I tensorflow/core/common_runtime/bfc_allocator.cc:696] 345 Chunks of size 256 totalling 86.2KiB
I tensorflow/core/common_runtime/bfc_allocator.cc:696] 1 Chunks of size 1024 totalling 1.0KiB
I tensorflow/core/common_runtime/bfc_allocator.cc:696] 23 Chunks of size 1280 totalling 28.8KiB
I tensorflow/core/common_runtime/bfc_allocator.cc:696] 21 Chunks of size 1536 totalling 31.5KiB
I tensorflow/core/common_runtime/bfc_allocator.cc:696] 20 Chunks of size 4096 totalling 80.0KiB
I tensorflow/core/common_runtime/bfc_allocator.cc:696] 16 Chunks of size 73728 totalling 1.12MiB
I tensorflow/core/common_runtime/bfc_allocator.cc:696] 20 Chunks of size 131072 totalling 2.50MiB
I tensorflow/core/common_runtime/bfc_allocator.cc:696] 1 Chunks of size 188928 totalling 184.5KiB
I tensorflow/core/common_runtime/bfc_allocator.cc:696] 17 Chunks of size 262144 totalling 4.25MiB
I tensorflow/core/common_runtime/bfc_allocator.cc:696] 2 Chunks of size 446464 totalling 872.0KiB
I tensorflow/core/common_runtime/bfc_allocator.cc:696] 1 Chunks of size 515584 totalling 503.5KiB
I tensorflow/core/common_runtime/bfc_allocator.cc:696] 1 Chunks of size 524288 totalling 512.0KiB
I tensorflow/core/common_runtime/bfc_allocator.cc:696] 16 Chunks of size 1511424 totalling 23.06MiB
I tensorflow/core/common_runtime/bfc_allocator.cc:696] 6 Chunks of size 16777216 totalling 96.00MiB
I tensorflow/core/common_runtime/bfc_allocator.cc:696] 1 Chunks of size 23026176 totalling 21.96MiB
I tensorflow/core/common_runtime/bfc_allocator.cc:696] 6 Chunks of size 33554432 totalling 192.00MiB
I tensorflow/core/common_runtime/bfc_allocator.cc:696] 1 Chunks of size 66481920 totalling 63.40MiB
I tensorflow/core/common_runtime/bfc_allocator.cc:696] 15 Chunks of size 67108864 totalling 960.00MiB
I tensorflow/core/common_runtime/bfc_allocator.cc:696] 1 Chunks of size 67183872 totalling 64.07MiB
I tensorflow/core/common_runtime/bfc_allocator.cc:700] Sum Total of in-use chunks: 1.40GiB
I tensorflow/core/common_runtime/bfc_allocator.cc:702] Stats:
Limit: 1500971008
InUse: 1500109824
MaxInUse: 1500109824
NumAllocs: 43767
MaxAllocSize: 844062464
W tensorflow/core/common_runtime/bfc_allocator.cc:274] **************************************************************************************************xx
W tensorflow/core/common_runtime/bfc_allocator.cc:275] Ran out of memory trying to allocate 8.00MiB. See logs for memory state.
W tensorflow/core/framework/op_kernel.cc:975] Resource exhausted: OOM when allocating tensor with shape[128,16,16,64]
--
Traceback (most recent call last):
File "tflearn_hasy_cv.py", line 60, in <module>
batch_size=batch_size)
File "/home/moose/GitHub/tflearn/tflearn/models/dnn.py", line 188, in fit
run_id=run_id)
File "/home/moose/GitHub/tflearn/tflearn/helpers/trainer.py", line 277, in fit
show_metric)
File "/home/moose/GitHub/tflearn/tflearn/helpers/trainer.py", line 684, in _train
feed_batch)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 766, in run
run_metadata_ptr)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 964, in _run
feed_dict_string, options, run_metadata)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 1014, in _do_run
target_list, options, run_metadata)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 1034, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.ResourceExhaustedError: OOM when allocating tensor with shape[128,16,16,64]
[[Node: MaxPool2D/MaxPool = MaxPool[T=DT_FLOAT, data_format="NHWC", ksize=[1, 2, 2, 1], padding="SAME", strides=[1, 2, 2, 1], _device="/job:localhost/replica:0/task:0/gpu:0"](Conv2D_1/PReLU/add)]]
[[Node: Crossentropy/Mean/_19 = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/cpu:0", send_device="/job:localhost/replica:0/task:0/gpu:0", send_device_incarnation=1, tensor_name="edge_920_Crossentropy/Mean", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
Caused by op u'MaxPool2D/MaxPool', defined at:
File "tflearn_hasy_cv.py", line 47, in <module>
network = max_pool_2d(network, 2)
File "/home/moose/GitHub/tflearn/tflearn/layers/conv.py", line 363, in max_pool_2d
inference = tf.nn.max_pool(incoming, kernel, strides, padding)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/nn_ops.py", line 1617, in max_pool
name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_nn_ops.py", line 1598, in _max_pool
data_format=data_format, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/op_def_library.py", line 759, in apply_op
op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 2240, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 1128, in __init__
self._traceback = _extract_stack()
ResourceExhaustedError (see above for traceback): OOM when allocating tensor with shape[128,16,16,64]
[[Node: MaxPool2D/MaxPool = MaxPool[T=DT_FLOAT, data_format="NHWC", ksize=[1, 2, 2, 1], padding="SAME", strides=[1, 2, 2, 1], _device="/job:localhost/replica:0/task:0/gpu:0"](Conv2D_1/PReLU/add)]]
[[Node: Crossentropy/Mean/_19 = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/cpu:0", send_device="/job:localhost/replica:0/task:0/gpu:0", send_device_incarnation=1, tensor_name="edge_920_Crossentropy/Mean", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
edit: I get the same problem with Keras.

I put in my code tf.reset_default_graph() right before building the neural network, before the for loop.
You should also define your model outside the for loop, right before it. Only the training should be in the kfolds loop.

Related

Where is this dimension error getting generated in my ADAM algorithm?

I am working on main.py in this BRATS Unet
https://github.com/pykao/Modified-3D-UNet-Pytorch/blob/master/main.py
# create your optimizer
print ("Creating Optimizer")
##optimizer = optim.adam(net.parameteres(), lr=)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
print ("Created! \n")
trainloader = torch.utils.data.DataLoader(train_idx, batch_size=2, shuffle=True)
testloader = torch.utils.data.DataLoader(test_idx, batch_size=2, shuffle=False)
for epoch in range(2): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
print("inside for")
# get the inputs THIS ERRORS OUT
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(inputs)
loss = criterion(outputs, target)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
I get this output:
Creating Optimizer
Created!
inside for
Traceback (most recent call last):
File "main.py", line 109, in <module>
outputs = model(inputs)
File "/home/MAHEUNIX/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 489, in __call__
result = self.forward(*input, **kwargs)
File "/mnt/c/Users/MAHE/Modified Unet3D Master -TestRun/model.py", line 99, in forward
out = self.conv3d_c1_1(x)
File "/home/MAHEUNIX/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 489, in __call__
result = self.forward(*input, **kwargs)
File "/home/MAHEUNIX/anaconda3/lib/python3.6/site-packages/torch/nn/modules/conv.py", line 448, in forward
self.padding, self.dilation, self.groups)
RuntimeError: Expected 5-dimensional input for 5-dimensional weight [16, 4, 3, 3, 3], but got 0-dimensional input of size [] instead
I am unfamiliar with PyTorch, and so trainloader, testloader are probably incorrectly used. Please assume I don't know much while you help me. Thanks.
New error:
Traceback (most recent call last):
File "/mnt/c/Users/MAHE/Modified Unet3D Master -TestRun/main.py", line 91, in <module>
for id, info in enumerate(trainloader,0):
File "/home/MAHEUNIX/anaconda3/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 615, in __next__
batch = self.collate_fn([self.dataset[i] for i in indices])
File "/home/MAHEUNIX/anaconda3/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 615, in <listcomp>
batch = self.collate_fn([self.dataset[i] for i in indices])
KeyError: 0
You should pass the dataset to the data loader API. So, pass train_data and test_data instead of train_idx and test_idx to torch.utils.data.DataLoader.

Incompatible shapes of 1 using auto encoder

I'm trying to use a auto-encoder on time series. When I use padding on the data all is working, but when I'm using variable data length I have small data shape issues: Incompatible shapes: [1,125,4] vs. [1,126,4]
input_series = Input(shape=(None, 4))
x = Conv1D(4, 2, activation='relu', padding='same')(input_series)
x = MaxPooling1D(1, padding='same')(x)
x = Conv1D(4, 3, activation='relu', padding='same')(x)
x = MaxPooling1D(1, padding='same')(x)
x = Conv1D(4, 3, activation='relu', padding='same')(x)
encoder = MaxPooling1D(1, padding='same', name='encoder')(x)
x = Conv1D(4, 3, activation='relu', padding='same')(encoder)
x = UpSampling1D(1)(x)
x = Conv1D(4, 3, activation='relu', padding='same')(x)
x = UpSampling1D(1)(x)
x = Conv1D(16, 2, activation='relu')(x)
x = UpSampling1D(1)(x)
decoder = Conv1D(4, 2, activation='sigmoid', padding='same')(x)
autoencoder = Model(input_series, decoder)
autoencoder.compile(loss='mse', optimizer='adam')
autoencoder.summary()
Summary:
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_25 (InputLayer) (None, None, 4) 0
_________________________________________________________________
conv1d_169 (Conv1D) (None, None, 4) 36
_________________________________________________________________
max_pooling1d_49 (MaxPooling (None, None, 4) 0
_________________________________________________________________
conv1d_170 (Conv1D) (None, None, 4) 52
_________________________________________________________________
max_pooling1d_50 (MaxPooling (None, None, 4) 0
_________________________________________________________________
conv1d_171 (Conv1D) (None, None, 4) 52
_________________________________________________________________
encoder (MaxPooling1D) (None, None, 4) 0
_________________________________________________________________
conv1d_172 (Conv1D) (None, None, 4) 52
_________________________________________________________________
up_sampling1d_73 (UpSampling (None, None, 4) 0
_________________________________________________________________
conv1d_173 (Conv1D) (None, None, 4) 52
_________________________________________________________________
up_sampling1d_74 (UpSampling (None, None, 4) 0
_________________________________________________________________
conv1d_174 (Conv1D) (None, None, 16) 144
_________________________________________________________________
up_sampling1d_75 (UpSampling (None, None, 16) 0
_________________________________________________________________
conv1d_175 (Conv1D) (None, None, 4) 132
=================================================================
Total params: 520
Trainable params: 520
Non-trainable params: 0
_________________________________________________________________
Error:
Epoch 1/50
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\client\session.py in _do_call(self, fn, *args)
1321 try:
-> 1322 return fn(*args)
1323 except errors.OpError as e:
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\client\session.py in _run_fn(feed_dict, fetch_list, target_list, options, run_metadata)
1306 return self._call_tf_sessionrun(
-> 1307 options, feed_dict, fetch_list, target_list, run_metadata)
1308
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\client\session.py in _call_tf_sessionrun(self, options, feed_dict, fetch_list, target_list, run_metadata)
1408 self._session, options, feed_dict, fetch_list, target_list,
-> 1409 run_metadata)
1410 else:
InvalidArgumentError: Incompatible shapes: [1,125,4] vs. [1,126,4]
[[Node: loss_22/conv1d_175_loss/sub = Sub[T=DT_FLOAT, _class=["loc:#training_18/Adam/gradients/loss_22/conv1d_175_loss/sub_grad/Reshape"], _device="/job:localhost/replica:0/task:0/device:GPU:0"](conv1d_175/Sigmoid, _arg_conv1d_175_target_0_1/_4489)]]
[[Node: loss_22/mul/_4613 = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device_incarnation=1, tensor_name="edge_1245_loss_22/mul", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]
During handling of the above exception, another exception occurred:
InvalidArgumentError Traceback (most recent call last)
<ipython-input-101-a6e405699326> in <module>()
6 train_generator(X_train),
7 epochs=50,
----> 8 steps_per_epoch=len(X_train))
9
10
C:\ProgramData\Anaconda3\lib\site-packages\keras\legacy\interfaces.py in wrapper(*args, **kwargs)
89 warnings.warn('Update your `' + object_name +
90 '` call to the Keras 2 API: ' + signature, stacklevel=2)
---> 91 return func(*args, **kwargs)
92 wrapper._original_function = func
93 return wrapper
C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
2228 outs = self.train_on_batch(x, y,
2229 sample_weight=sample_weight,
-> 2230 class_weight=class_weight)
2231
2232 if not isinstance(outs, list):
C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py in train_on_batch(self, x, y, sample_weight, class_weight)
1881 ins = x + y + sample_weights
1882 self._make_train_function()
-> 1883 outputs = self.train_function(ins)
1884 if len(outputs) == 1:
1885 return outputs[0]
C:\ProgramData\Anaconda3\lib\site-packages\keras\backend\tensorflow_backend.py in __call__(self, inputs)
2480 session = get_session()
2481 updated = session.run(fetches=fetches, feed_dict=feed_dict,
-> 2482 **self.session_kwargs)
2483 return updated[:len(self.outputs)]
2484
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\client\session.py in run(self, fetches, feed_dict, options, run_metadata)
898 try:
899 result = self._run(None, fetches, feed_dict, options_ptr,
--> 900 run_metadata_ptr)
901 if run_metadata:
902 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\client\session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
1133 if final_fetches or final_targets or (handle and feed_dict_tensor):
1134 results = self._do_run(handle, final_targets, final_fetches,
-> 1135 feed_dict_tensor, options, run_metadata)
1136 else:
1137 results = []
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\client\session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
1314 if handle is None:
1315 return self._do_call(_run_fn, feeds, fetches, targets, options,
-> 1316 run_metadata)
1317 else:
1318 return self._do_call(_prun_fn, handle, feeds, fetches)
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\client\session.py in _do_call(self, fn, *args)
1333 except KeyError:
1334 pass
-> 1335 raise type(e)(node_def, op, message)
1336
1337 def _extend_graph(self):
InvalidArgumentError: Incompatible shapes: [1,125,4] vs. [1,126,4]
[[Node: loss_22/conv1d_175_loss/sub = Sub[T=DT_FLOAT, _class=["loc:#training_18/Adam/gradients/loss_22/conv1d_175_loss/sub_grad/Reshape"], _device="/job:localhost/replica:0/task:0/device:GPU:0"](conv1d_175/Sigmoid, _arg_conv1d_175_target_0_1/_4489)]]
[[Node: loss_22/mul/_4613 = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device_incarnation=1, tensor_name="edge_1245_loss_22/mul", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]
Caused by op 'loss_22/conv1d_175_loss/sub', defined at:
File "C:\ProgramData\Anaconda3\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "C:\ProgramData\Anaconda3\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "C:\ProgramData\Anaconda3\lib\site-packages\traitlets\config\application.py", line 658, in launch_instance
app.start()
File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\kernelapp.py", line 478, in start
self.io_loop.start()
File "C:\ProgramData\Anaconda3\lib\site-packages\zmq\eventloop\ioloop.py", line 177, in start
super(ZMQIOLoop, self).start()
File "C:\ProgramData\Anaconda3\lib\site-packages\tornado\ioloop.py", line 888, in start
handler_func(fd_obj, events)
File "C:\ProgramData\Anaconda3\lib\site-packages\tornado\stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\zmq\eventloop\zmqstream.py", line 440, in _handle_events
self._handle_recv()
File "C:\ProgramData\Anaconda3\lib\site-packages\zmq\eventloop\zmqstream.py", line 472, in _handle_recv
self._run_callback(callback, msg)
File "C:\ProgramData\Anaconda3\lib\site-packages\zmq\eventloop\zmqstream.py", line 414, in _run_callback
callback(*args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\tornado\stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 283, in dispatcher
return self.dispatch_shell(stream, msg)
File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 233, in dispatch_shell
handler(stream, idents, msg)
File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 399, in execute_request
user_expressions, allow_stdin)
File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\ipkernel.py", line 208, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\zmqshell.py", line 537, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 2728, in run_cell
interactivity=interactivity, compiler=compiler, result=result)
File "C:\ProgramData\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 2850, in run_ast_nodes
if self.run_code(code, result):
File "C:\ProgramData\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 2910, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-100-ddd3b57d5f0b>", line 22, in <module>
autoencoder.compile(loss='mse', optimizer='adam')
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py", line 830, in compile
sample_weight, mask)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py", line 429, in weighted
score_array = fn(y_true, y_pred)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\losses.py", line 14, in mean_squared_error
return K.mean(K.square(y_pred - y_true), axis=-1)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\ops\math_ops.py", line 979, in binary_op_wrapper
return func(x, y, name=name)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\ops\gen_math_ops.py", line 8582, in sub
"Sub", x=x, y=y, name=name)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 787, in _apply_op_helper
op_def=op_def)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py", line 3392, in create_op
op_def=op_def)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py", line 1718, in __init__
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
InvalidArgumentError (see above for traceback): Incompatible shapes: [1,125,4] vs. [1,126,4]
[[Node: loss_22/conv1d_175_loss/sub = Sub[T=DT_FLOAT, _class=["loc:#training_18/Adam/gradients/loss_22/conv1d_175_loss/sub_grad/Reshape"], _device="/job:localhost/replica:0/task:0/device:GPU:0"](conv1d_175/Sigmoid, _arg_conv1d_175_target_0_1/_4489)]]
[[Node: loss_22/mul/_4613 = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device_incarnation=1, tensor_name="edge_1245_loss_22/mul", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]
One of your Conv1D layers is not using padding='same'.
But there is something very weird there: why would you use MaxPooling with pool_size=1? It does nothing.
Now suppose you use pool_size=2, then you'd need to pad the inputs anyway, because you'd need inputs with length multiple of 8 (2³) to be able to end up with the same shape after the upsamplings.
For a variable length autoencoder, there is an example here: Variable length output in keras
For all effects, LSTM layers treat shapes exactly the same way Conv1D layers do.

how to compute the classification report for sentiment analysis with scikit-learn

how can I get the classification report measures precision, recall, accuracy, and support for 3 class classification and the classes are "positive", "negative" and "neutral". below is the code:
vec_clf = Pipeline([('vectorizer', vec), ('pac', svm_clf)])
print vec_clf.fit(X_train.values.astype('U'),y_train.values.astype('U'))
y_pred = vec_clf.predict(X_test.values.astype('U'))
print "SVM Accuracy-",metrics.accuracy_score(y_test, y_pred)
print "confuson metrics :\n", metrics.confusion_matrix(y_test, y_pred, labels=["positive","negative","neutral"])
print(metrics.classification_report(y_test, y_pred))
and it is giving error as:
SVM Accuracy- 0.850318471338
confuson metrics :
[[206 9 67]
[ 4 373 122]
[ 9 21 756]]
Traceback (most recent call last):
File "<ipython-input-62-e6ab3066790e>", line 1, in <module>
runfile('C:/Users/HP/abc16.py', wdir='C:/Users/HP')
File "C:\ProgramData\Anaconda2\lib\site-packages\spyder\utils\site\sitecustomize.py", line 880, in runfile
execfile(filename, namespace)
File "C:\ProgramData\Anaconda2\lib\site-packages\spyder\utils\site\sitecustomize.py", line 87, in execfile
exec(compile(scripttext, filename, 'exec'), glob, loc)
File "C:/Users/HP/abc16.py", line 133, in <module>
print(metrics.classification_report(y_test, y_pred))
File "C:\ProgramData\Anaconda2\lib\site-packages\sklearn\metrics\classification.py", line 1391, in classification_report
labels = unique_labels(y_true, y_pred)
File "C:\ProgramData\Anaconda2\lib\site-packages\sklearn\utils\multiclass.py", line 104, in unique_labels
raise ValueError("Mix of label input types (string and number)")
ValueError: Mix of label input types (string and number)
please guide me where I am getting wrong
EDIT 1: this is how the y_true and y_pred looks
print "y_true :" ,y_test
print "y_pred :",y_pred
y_true : 5985 neutral
899 positive
2403 neutral
3963 neutral
3457 neutral
5345 neutral
3779 neutral
299 neutral
5712 neutral
5511 neutral
234 neutral
1684 negative
3701 negative
2886 neutral
.
.
.
2623 positive
3549 neutral
4574 neutral
4972 positive
Name: sentiment, Length: 1570, dtype: object
y_pred : [u'neutral' u'positive' u'neutral' ..., u'neutral' u'neutral' u'negative']
EDIT 2: output for type(y_true) and type(y_pred)
type(y_true): <class 'pandas.core.series.Series'>
type(y_pred): <type 'numpy.ndarray'>
Cannot reproduce your error:
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
# toy data, similar to yours:
data = {'id':[5985,899,2403, 1684], 'sentiment':['neutral', 'positive', 'neutral', 'negative']}
y_true = pd.Series(data['sentiment'], index=data['id'], name='sentiment')
y_true
# 5985 neutral
# 899 positive
# 2403 neutral
# 1684 negative
# Name: sentiment, dtype: object
type(y_true)
# pandas.core.series.Series
y_pred = np.array(['neutral', 'positive', 'negative', 'neutral'])
# all metrics working fine:
accuracy_score(y_true, y_pred)
# 0.5
confusion_matrix(y_true, y_pred)
# array([[0, 1, 0],
# [1, 1, 0],
# [0, 0, 1]], dtype=int64)
classification_report(y_true, y_pred)
# result:
precision recall f1-score support
negative 0.00 0.00 0.00 1
neutral 0.50 0.50 0.50 2
positive 1.00 1.00 1.00 1
total 0.50 0.50 0.50 4

Input dimension mismatch in Keras

Hi Can anyone help me out with the error, I have seemed to search through the documentation but to no avail.
The aim is to predict a time series. I have used a dummy data shape = (N, timesteps, features). I wish to predict x_2 from x_1, x_3 from x_2 and so on till x_11 from x_10 using LSTM. (Any suggestion to do it better is welcome). The output (below) shows the expected output shapes which seem correct. However, the error mentions an input dimension mismatch. As per documentation, I can't seem to find the problem.
import numpy as np
N = 13*12;
T = 10;
F = 3;
X = np.random.rand(N, T, F);
Y = np.random.rand(N, 1, F);
Y = np.concatenate((X[:,1:T,:], Y), axis=1);
import keras
from keras.models import Model
from keras.layers import Dense, Input, LSTM, Lambda, concatenate, Dropout
from keras.optimizers import Adam, SGD
from keras import regularizers
from keras.metrics import categorical_accuracy
from keras.models import load_model
input_ = Input(shape = (T, F), name ='input');
x = Dense(15, activation='sigmoid', name='fc1')(input_);
x = LSTM(25, return_sequences=True, activation='tanh', name='lstm')(x);
x = Dense(F, activation='sigmoid', name='fc2')(x);
model = Model(input_, x, name='dummy');
model.compile(optimizer='rmsprop', loss='mse', metrics=['accuracy']);
print(model.input_shape); print(X.shape);
print(model.output_shape); print(Y.shape);
print(model.summary());
model.fit(X, Y, batch_size = 13, epochs=30, validation_split=0.20, shuffle=False);
The error comes as
Using Theano backend.
(None, 10, 3)
(156, 10, 3)
(None, 10, 3)
(156, 10, 3)
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input (InputLayer) (None, 10, 3) 0
_________________________________________________________________
fc1 (Dense) (None, 10, 15) 60
_________________________________________________________________
lstm (LSTM) (None, 10, 25) 4100
_________________________________________________________________
fc2 (Dense) (None, 10, 3) 78
=================================================================
Total params: 4,238
Trainable params: 4,238
Non-trainable params: 0
_________________________________________________________________
None
Train on 124 samples, validate on 32 samples
Epoch 1/30
Traceback (most recent call last):
File "C:\Anaconda3\lib\site-packages\theano\compile\function_module.py", line 903, in __call__
self.fn() if output_subset is None else\
ValueError: Input dimension mis-match. (input[0].shape[1] = 10, input[1].shape[1] = 15)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "b.py", line 34, in <module>
model.fit(X, Y, batch_size = 13, epochs=30, validation_split=0.20, shuffle=False);
File "C:\Anaconda3\lib\site-packages\keras\engine\training.py", line 1498, in fit
initial_epoch=initial_epoch)
File "C:\Anaconda3\lib\site-packages\keras\engine\training.py", line 1152, in _fit_loop
outs = f(ins_batch)
File "C:\Anaconda3\lib\site-packages\keras\backend\theano_backend.py", line 1158, in __call__
return self.function(*inputs)
File "C:\Anaconda3\lib\site-packages\theano\compile\function_module.py", line 917, in __call__
storage_map=getattr(self.fn, 'storage_map', None))
File "C:\Anaconda3\lib\site-packages\theano\gof\link.py", line 325, in raise_with_op
reraise(exc_type, exc_value, exc_trace)
File "C:\Anaconda3\lib\site-packages\six.py", line 692, in reraise
raise value.with_traceback(tb)
File "C:\Anaconda3\lib\site-packages\theano\compile\function_module.py", line 903, in __call__
self.fn() if output_subset is None else\
ValueError: Input dimension mis-match. (input[0].shape[1] = 10, input[1].shape[1] = 15)
Apply node that caused the error: Elemwise{Add}[(0, 0)](Reshape{3}.0, InplaceDimShuffle{x,0,x}.0)
Toposort index: 98
Inputs types: [TensorType(float32, 3D), TensorType(float32, (True, False, True))]
Inputs shapes: [(13, 10, 15), (1, 15, 1)]
Inputs strides: [(600, 60, 4), (60, 4, 4)]
Inputs values: ['not shown', 'not shown']
Outputs clients: [[Reshape{2}(Elemwise{Add}[(0, 0)].0, TensorConstant{[-1 15]}), Elemwise{Composite{((i0 + i1 + i2
+ i3) * scalar_sigmoid(i4) * (i5 - scalar_sigmoid(i4)))}}[(0, 0)](Reshape{3}.0, Reshape{3}.0, Reshape{3}.0, Reshape
{3}.0, Elemwise{Add}[(0, 0)].0, TensorConstant{(1, 1, 1) of 1.0})]]
HINT: Re-running with most Theano optimization disabled could give you a back-trace of when this node was created.
This can be done with by setting the Theano flag 'optimizer=fast_compile'. If that does not work, Theano optimizati
ons can be disabled with 'optimizer=None'.
HINT: Use the Theano flag 'exception_verbosity=high' for a debugprint and storage map footprint of this apply node.
I am unable to understand the error as to why the input shape would be (1,15,1) in the error and what are the 2 inputs which theano mentions?
The theano version I use is 0.9.0 and keras version is 2.0.4. If I rather use no features(F), the code runs smoothly.
Edit 1: batch size is 13, just for clarity in error log. Removing it also gives the exact same error.

Viewing layer activations with Keras

In NVIDIA's paper "End to End Learning for Self-Driving Cars" there's an illustration showing the activation of first-layer feature maps:
I'm trying to recreate this with the Comma AI model, but the only visualisation tools I've found are Keras' instructions for gradient ascent and descent, rather than simply viewing activations. what should I be looking for?
EDIT IN RESPONSE TO COMMENT
I tried implementing the code in this answer using the below code:
from keras import backend as K
import json
from keras.models import model_from_json
with open('outputs/steering_model/steering_angle.json', 'r') as jfile:
z = json.load(jfile)
model = model_from_json(z)
print("Loaded model")
model.load_weights('outputs/steering_model/steering_angle.keras')
print("Loaded weights")
img_width = 320
img_height = 160
outputs = [layer.output for layer in model.layers] # all layer outputs
functors = [K.function([inp]+ [K.learning_phase()], [out]) for out in outputs] # evaluation functions
# Testing
test = np.random.random((1, 3, img_width, img_height))
layer_outs = [func([test, 1.]) for func in functors]
print layer_outs
This give the following output error:
Using Theano backend.
Loaded model
Loaded weights
Traceback (most recent call last):
File "vis-layers.py", line 22, in <module>
layer_outs = [func([test, 1.]) for func in functors]
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/keras/backend/theano_backend.py", line 959, in __call__
return self.function(*inputs)
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/theano/compile/function_module.py", line 871, in __call__
storage_map=getattr(self.fn, 'storage_map', None))
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/theano/gof/link.py", line 314, in raise_with_op
reraise(exc_type, exc_value, exc_trace)
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/theano/compile/function_module.py", line 859, in __call__
outputs = self.fn()
ValueError: Shape mismatch: x has 49152 cols (and 1 rows) but y has 12800 rows (and 512 cols)
Apply node that caused the error: Dot22(Elemwise{Composite{Switch(GT(i0, i1), i0, expm1(i0))}}[(0, 0)].0, dense_1_W)
Toposort index: 50
Inputs types: [TensorType(float32, matrix), TensorType(float32, matrix)]
Inputs shapes: [(1, 49152), (12800, 512)]
Inputs strides: [(196608, 4), (2048, 4)]
Inputs values: ['not shown', 'not shown']
Outputs clients: [[Elemwise{Add}[(0, 0)](Dot22.0, InplaceDimShuffle{x,0}.0)]]
I thought this might be a problem with th vs tf dimensions, so tried changing the test input to:
test = np.random.random((1, img_height, img_width, 3))
which gave the following error:
Using Theano backend.
Loaded model
Loaded weights
Traceback (most recent call last):
File "vis-layers.py", line 22, in <module>
layer_outs = [func([test, 1.]) for func in functors]
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/keras/backend/theano_backend.py", line 959, in __call__
return self.function(*inputs)
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/theano/compile/function_module.py", line 871, in __call__
storage_map=getattr(self.fn, 'storage_map', None))
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/theano/gof/link.py", line 314, in raise_with_op
reraise(exc_type, exc_value, exc_trace)
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/theano/compile/function_module.py", line 859, in __call__
outputs = self.fn()
ValueError: CorrMM images and kernel must have the same stack size
Apply node that caused the error: CorrMM{half, (4, 4)}(Elemwise{Composite{(i0 + (i1 * i2))}}.0, Subtensor{::, ::, ::int64, ::int64}.0)
Toposort index: 9
Inputs types: [TensorType(float32, 4D), TensorType(float32, 4D)]
Inputs shapes: [(1, 320, 160, 3), (16, 3, 8, 8)]
Inputs strides: [(2250000, 6000, 12, 4), (768, 256, -32, -4)]
Inputs values: ['not shown', 'not shown']
Outputs clients: [[Subtensor{int64:int64:int8, int64:int64:int8, int64:int64:int8, int64:int64:int8}(CorrMM{half, (4, 4)}.0, ScalarFromTensor.0, ScalarFromTensor.0, Constant{1}, Constant{0}, Constant{16}, Constant{1}, ScalarFromTensor.0, ScalarFromTensor.0, Constant{1}, ScalarFromTensor.0, ScalarFromTensor.0, Constant{1})]]
Backtrace when the node is created(use Theano flag traceback.limit=N to make it longer):
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/keras/utils/layer_utils.py", line 43, in layer_from_config
return layer_class.from_config(config['config'])
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/keras/models.py", line 1091, in from_config
model.add(layer)
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/keras/models.py", line 332, in add
output_tensor = layer(self.outputs[0])
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/keras/engine/topology.py", line 572, in __call__
self.add_inbound_node(inbound_layers, node_indices, tensor_indices)
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/keras/engine/topology.py", line 635, in add_inbound_node
Node.create_node(self, inbound_layers, node_indices, tensor_indices)
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/keras/engine/topology.py", line 166, in create_node
output_tensors = to_list(outbound_layer.call(input_tensors[0], mask=input_masks[0]))
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/keras/layers/convolutional.py", line 475, in call
filter_shape=self.W_shape)
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/keras/backend/theano_backend.py", line 1508, in conv2d
filter_shape=filter_shape)
EDIT: Output of model.summary()
____________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
====================================================================================================
lambda_1 (Lambda) (None, 3, 160, 320) 0 lambda_input_1[0][0]
____________________________________________________________________________________________________
convolution2d_1 (Convolution2D) (None, 16, 40, 80) 3088 lambda_1[0][0]
____________________________________________________________________________________________________
elu_1 (ELU) (None, 16, 40, 80) 0 convolution2d_1[0][0]
____________________________________________________________________________________________________
convolution2d_2 (Convolution2D) (None, 32, 20, 40) 12832 elu_1[0][0]
____________________________________________________________________________________________________
elu_2 (ELU) (None, 32, 20, 40) 0 convolution2d_2[0][0]
____________________________________________________________________________________________________
convolution2d_3 (Convolution2D) (None, 64, 10, 20) 51264 elu_2[0][0]
____________________________________________________________________________________________________
flatten_1 (Flatten) (None, 12800) 0 convolution2d_3[0][0]
____________________________________________________________________________________________________
dropout_1 (Dropout) (None, 12800) 0 flatten_1[0][0]
____________________________________________________________________________________________________
elu_3 (ELU) (None, 12800) 0 dropout_1[0][0]
____________________________________________________________________________________________________
dense_1 (Dense) (None, 512) 6554112 elu_3[0][0]
____________________________________________________________________________________________________
dropout_2 (Dropout) (None, 512) 0 dense_1[0][0]
____________________________________________________________________________________________________
elu_4 (ELU) (None, 512) 0 dropout_2[0][0]
____________________________________________________________________________________________________
dense_2 (Dense) (None, 1) 513 elu_4[0][0]
====================================================================================================
Total params: 6,621,809
Trainable params: 6,621,809
Non-trainable params: 0
____________________________________________________________________________________________________
EDIT: DEBUGGING WITH SINGLE LAYER
In order to debug the issue with input shapes, I rewrote the script for a single layer:
from keras import backend as K
import numpy as np
import json
from keras.models import model_from_json
with open('outputs/steering_model/steering_angle.json', 'r') as jfile:
z = json.load(jfile)
model = model_from_json(z)
print("Loaded model")
model.load_weights('outputs/steering_model/steering_angle.keras')
print("Loaded weights")
layer_name = 'lambda_1'
img_width = 160
img_height = 320
inp = model.input
layer_idx = [idx for idx, layer in enumerate(model.layers) if layer.name == layer_name][0]
output = model.layers[layer_idx].output
functor = K.function([inp]+ [K.learning_phase()], output) # evaluation function
# Testing
test = np.random.random((1, img_height, img_width, 3))
layer_out = functor([test, 1.])
print layer_out
The output from this is as follows:
Using Theano backend.
Loaded model
Loaded weights
[[[[-0.99223709 -0.99468529 -0.99318016]
[-0.99725926 -0.9924705 -0.9994905 ]
[-0.99380279 -0.99291307 -0.99927235]
...,
[-0.99361622 -0.99258155 -0.99954134]
[-0.99748689 -0.99217939 -0.99918425]
[-0.99475586 -0.99366009 -0.992827 ]]
[[-0.99330682 -0.99756712 -0.99795902]
[-0.99421203 -0.99240923 -0.99438184]
[-0.99222761 -0.99425066 -0.99886942]
...,
[-0.99329156 -0.99460274 -0.99994165]
[-0.99763876 -0.99870259 -0.9998613 ]
[-0.99962425 -0.99702215 -0.9943046 ]]
[[-0.99947125 -0.99577188 -0.99294066]
[-0.99582225 -0.99568367 -0.99345332]
[-0.99823713 -0.99376178 -0.99432898]
...,
[-0.99828976 -0.99264622 -0.99669623]
[-0.99485278 -0.99353015 -0.99411404]
[-0.99832171 -0.99390954 -0.99620205]]
...,
[[-0.9980613 -0.99474132 -0.99680966]
[-0.99378282 -0.99288809 -0.99248403]
[-0.99375945 -0.9966079 -0.99440354]
...,
[-0.99634677 -0.99931824 -0.99611002]
[-0.99781156 -0.99990571 -0.99249381]
[-0.9996115 -0.99991143 -0.99486816]]
[[-0.99839222 -0.99690026 -0.99410695]
[-0.99551272 -0.99262673 -0.99934679]
[-0.99432331 -0.99822938 -0.99294668]
...,
[-0.99515969 -0.99867356 -0.9926796 ]
[-0.99478716 -0.99883151 -0.99760127]
[-0.9982425 -0.99547088 -0.99658638]]
[[-0.99240851 -0.99792403 -0.99360847]
[-0.99226022 -0.99546915 -0.99411654]
[-0.99558711 -0.9960795 -0.9993062 ]
...,
[-0.99745959 -0.99276334 -0.99800634]
[-0.99249429 -0.99748743 -0.99576926]
[-0.99531293 -0.99618822 -0.99997312]]]]
However, attempting the same on the first convolutional layer (convolution2d_1) with an 80x40 image returns the same error:
ValueError: CorrMM images and kernel must have the same stack size
Apply node that caused the error: CorrMM{half, (4, 4)}(Elemwise{Composite{(i0 + (i1 * i2))}}.0, Subtensor{::, ::, ::int64, ::int64}.0)
Toposort index: 9
Inputs types: [TensorType(float32, 4D), TensorType(float32, 4D)]
Inputs shapes: [(1, 40, 80, 3), (16, 3, 8, 8)]
Inputs strides: [(38400, 960, 12, 4), (768, 256, -32, -4)]
Inputs values: ['not shown', 'not shown']
Outputs clients: [[Subtensor{int64:int64:int8, int64:int64:int8, int64:int64:int8, int64:int64:int8}(CorrMM{half, (4, 4)}.0, ScalarFromTensor.0, ScalarFromTensor.0, Constant{1}, Constant{0}, Constant{16}, Constant{1}, ScalarFromTensor.0, ScalarFromTensor.0, Constant{1}, ScalarFromTensor.0, ScalarFromTensor.0, Constant{1})]]
EDIT: OUTPUT LAYER DATA AS IMAGE
The following code replaces the random image with a loaded one, and takes the layer output and saves it as an image:
input_img_data = imread(impath+'.png').astype(np.float32)
# change image to 4d theano array
test = np.expand_dims(input_img_data,axis=0)
print test.shape
layer_out = functor([test, 1.])
img = Image.fromarray(layer_out[0,:,:,:], 'RGB')
img.save('activ_%s_%s.png' % (layer_name,impath))
print("Created Image")
Here is the final code that does what I want it to do, still rough and in need of tidying up:
from keras import backend as K
from PIL import Image
from scipy.misc import imread
from scipy.misc import imsave
import numpy as np
import json
from keras.models import model_from_json
with open('outputs/steering_model/steering_angle.json', 'r') as jfile:
z = json.load(jfile)
model = model_from_json(z)
print("Loaded model")
model.load_weights('outputs/steering_model/steering_angle.keras')
print("Loaded weights")
layer_name = 'lambda_1'
#layer_name = 'convolution2d_1'
#layer_name = 'elu_1'
#layer_name = 'convolution2d_2'
impaths = ['track','road','mway']
img_width = 500
img_height = 375
inp = model.input
layer_idx = [idx for idx, layer in enumerate(model.layers) if layer.name == layer_name][0]
output = model.layers[layer_idx].output
functor = K.function([inp]+ [K.learning_phase()], output) # evaluation function
for impath in impaths:
input_img_data = imread('testimages/'+impath+'.png').astype(np.float32)
input_img_data = np.rollaxis(input_img_data,2,0) # change to (channels,h,w)
test = np.expand_dims(input_img_data,axis=0) # change to (dims,channels,h,w)
print("Test Shape: %s" % (test.shape,)) # check shape
layer_out = functor([test, 1.])
print ("Output Shape: %s" % (layer_out.shape,)) # check output shape
# save multiple greyscale images
layer_out = np.rollaxis(layer_out,0,4)
print ("Output Image Shape: %s" % (layer_out.shape,)) # check output shape
count = 1
for x in layer_out:
x = np.rollaxis(x,2,0)
print ("Final Image Shape: %s" % (x.shape,)) # check output shape
imsave('activationimages/activ_%s_%s_%d.png' % (layer_name,impath,count),x[0,:,:])
count = count + 1
The main issue was wrangling the shapes of the various input and output layers - hence all the print commands in the above code, for debugging.
A second confusion was that I was interpreting an array shape of (3,w,h) as a single RGB (3-channel) image, rather than one greyscale image.
The version above tests an array of images at a time (hardcoded image path). The lambda_1 level outputs a single RGB image per test image, convolution2d_1 and elu_1 output sixteen smaller (25%) greyscale images - one for each filter. And, I hope, so on.
I will add a Github link to a tidied gist with image stitching when I've done this. I've learned a lot.

Resources