Deconvolution (conv2d_transpose) in tensor flow - machine-learning

I have a trained network (AlexNet) for image classification; currently, I am trying to visualize the feature activation of different convolutional layers using deconvolution technique proposed by Zeiler. I am getting the following in results:
Input shape: (1,227,227,3)
Filter shape: (11,11,3,96)
stride = (4,4)
For getting the convolution output I used the following code:
wts = model.layers[1].get_weights()
inp = tf.constant(x_test)
ftr = tf.constant(wts[0])
b = tf.constant(wts[1])
c = tf.nn.conv2d(inp, ftr, strides = [1,4,4,1], padding='VALID')
c = c + b
sess = tf.Session()
res = sess.run(c)
print(res.shape)
sess.close()
fig=plt.figure(figsize=(12,12))
for i in range(96):
plt.subplot(10, 10, i+1)
img = res[0,:,:,i]
plt.imshow(img, cmap='gray')
plt.xticks(np.array([]))
plt.yticks(np.array([]))
plt.tight_layout()
plt.show()
Following is the result which appears to be correct
But when I am trying the inverse operation I was expecting to get the original image but I am getting something else. Please help me understand why I didn't get anything close to the original image.
inp = tf.constant(res)
ftr = tf.constant(wts[0])
b = tf.constant(wts[1])
c = inp - b
c = tf.nn.conv2d_transpose(c, ftr, output_shape = [1,227,227,3], strides = [1,4,4,1], padding='VALID')
sess = tf.Session()
res = sess.run(c)
print(res.shape)
sess.close()
plt.imshow(res[0,:,:,:])
plt.show()

Not sure if you fixed the issue. Try using:
c = tf.layers.conv2d_transpose(c, filters = 3, kernel_size = (11,11))
And see where it gets you instead of initial tf.nn.conv2d_transpose.
I would love your feedback even if you already solved the issue.

Related

Classification metrics can't handle a mix of multilabel-indicator and multiclass targets

I am trying to calculate the precision and recall for two images. with one image as the output of the model (shape [3,786,1024]) and the other ground truth (shape [1, 786, 1024]). But I keep getting an error. I am new to python and I am able to understand what I am doing wrong. I have tried some things like getting the input and output to the same shape, but I am getting the error now:
raise ValueError("Classification metrics can't handle a mix of {0} "
ValueError: Classification metrics can't handle a mix of multilabel-indicator and multiclass targets
If anybody can help me with this issue. i would be grateful.
My code is here:
def precision(outputs, labels):
op = outputs#.cpu()
la = labels#.cpu()
_, preds = torch.max(op, dim=1)
return torch.tensor(precision_score(la,preds, average='micro'))
def recall(outputs, labels):
op = outputs#.cpu()
la = labels#.cpu()
_, preds = torch.max(op, dim=1)
return torch.tensor(recall_score(la,preds, average='weighted', zero_division = 1))
def f1(outputs, labels):
op = outputs#.cpu()
la = labels#.cpu()
_, preds = torch.max(op, dim=1)
return torch.tensor(f1_score(la,preds, average='weighted'))
img50 = Image.open('0050_999.png')
numpydata_in = np.array(img50).astype('uint8')
#print("test shape",numpydata_in.shape)
numpydata_in= torch.from_numpy(numpydata_in)
print("final input shape", numpydata_in.shape)
imgT50 = Image.open('0050.jpg')
numpydata_out = np.array(imgT50).astype('uint8')
numpydata_out = transforms.ToTensor()(numpydata_out)
#print(numpydata_out.shape)
numpydata_out = torch.argmax(numpydata_out, dim=0)
#print(numpydata_out.shape)
numpydata_out = torch.squeeze(numpydata_out)
print("final shape target",numpydata_out.shape)
prec = precision(numpydata_in, numpydata_out)
print("precision is",prec)
rec = recall(numpydata_in, numpydata_out)
print("recall is",rec)
f1 = f1(numpydata_in, numpydata_out)
print("f1 score is",f1)
final input shape torch.Size([768, 1024])
final shape target torch.Size([768, 1024])
raise ValueError("Classification metrics can't handle a mix of {0} "
ValueError: Classification metrics can't handle a mix of multilabel-indicator and multiclass targets

For deep learning, With activation relu the output becomes NAN during training while is normal with tanh

The neural network I trained is the critic network for deep reinforcement learning. The problem is when one of the layer's activation is set to be relu or elu, the output would be nan after some training step, while the output is normal if the activation is tanh. And the code is as follows(based on tensorflow):
with tf.variable_scope('critic'):
self.batch_size = tf.shape(self.tfs)[0]
l_out_x = denseWN(x=self.tfs, name='l3', num_units=self.cell_size, nonlinearity=tf.nn.tanh, trainable=True,shape=[det*step*2, self.cell_size])
l_out_x1 = denseWN(x=l_out_x, name='l3_1', num_units=32, trainable=True,nonlinearity=tf.nn.tanh, shape=[self.cell_size, 32])
l_out_x2 = denseWN(x=l_out_x1, name='l3_2', num_units=32, trainable=True,nonlinearity=tf.nn.tanh,shape=[32, 32])
l_out_x3 = denseWN(x=l_out_x2, name='l3_3', num_units=32, trainable=True,shape=[32, 32])
self.v = denseWN(x=l_out_x3, name='l4', num_units=1, trainable=True, shape=[32, 1])
Here is the code for basic layer construction:
def get_var_maybe_avg(var_name, ema, trainable, shape):
if var_name=='V':
initializer = tf.contrib.layers.xavier_initializer()
v = tf.get_variable(name=var_name, initializer=initializer, trainable=trainable, shape=shape)
if var_name=='g':
initializer = tf.constant_initializer(1.0)
v = tf.get_variable(name=var_name, initializer=initializer, trainable=trainable, shape=[shape[-1]])
if var_name=='b':
initializer = tf.constant_initializer(0.1)
v = tf.get_variable(name=var_name, initializer=initializer, trainable=trainable, shape=[shape[-1]])
if ema is not None:
v = ema.average(v)
return v
def get_vars_maybe_avg(var_names, ema, trainable, shape):
vars=[]
for vn in var_names:
vars.append(get_var_maybe_avg(vn, ema, trainable=trainable, shape=shape))
return vars
def denseWN(x, name, num_units, trainable, shape, nonlinearity=None, ema=None, **kwargs):
with tf.variable_scope(name):
V, g, b = get_vars_maybe_avg(['V', 'g', 'b'], ema, trainable=trainable, shape=shape)
x = tf.matmul(x, V)
scaler = g/tf.sqrt(tf.reduce_sum(tf.square(V),[0]))
x = tf.reshape(scaler,[1,num_units])*x + tf.reshape(b,[1,num_units])
if nonlinearity is not None:
x = nonlinearity(x)
return x
Here is the code to train the network:
self.tfdc_r = tf.placeholder(tf.float32, [None, 1], 'discounted_r')
self.advantage = self.tfdc_r - self.v
l1_regularizer = tf.contrib.layers.l1_regularizer(scale=0.005, scope=None)
self.weights = tf.trainable_variables()
regularization_penalty_critic = tf.contrib.layers.apply_regularization(l1_regularizer, self.weights)
self.closs = tf.reduce_mean(tf.square(self.advantage))
self.optimizer = tf.train.RMSPropOptimizer(0.0001, 0.99, 0.0, 1e-6)
self.grads_and_vars = self.optimizer.compute_gradients(self.closs)
self.grads_and_vars = [[tf.clip_by_norm(grad,5), var] for grad, var in self.grads_and_vars if grad is not None]
self.ctrain_op = self.optimizer.apply_gradients(self.grads_and_vars, global_step=tf.contrib.framework.get_global_step())
Looks like you're facing the problem of exploding gradients with ReLu activation function (that what NaN means -- very big activations). There are several techniques to deal with this issue, e.g. batch normalization (changes the network architecture) or a delicate variable initialization (that's what I'd try first).
You are using Xavier initialization for V variables in different layers, which indeed works fine for logistic sigmoid activation (see the paper by Xavier Glorot and Yoshua Bengio), or, in other words, tanh.
The preferred initialization strategy for the ReLU activation function (and its variants, including ELU) is He initialization. In tensorflow it's implemented via tf.variance_scaling_initializer:
initializer = tf.variance_scaling_initializer()
v = tf.get_variable(name=var_name, initializer=initializer, ...)
You might also want to try smaller values for b and g variables, but it's hard to say the exact value just by looking at your model. If nothing helps, consider adding batch-norm layers to your model to control activation distribution.

Neural Network does not perform well on the CIFAR-10 dataset

I have been trying to implement a CNN on the CIFAR-10 dataset for a few days and my test set accuracy does not seem to go beyond the 10% and the error just hang around 69.07733. I have tweaking the model and few days but in vain. I haven't been able to spot out where I am going wrong. Please help me recognise the fault in the model. Here is the code for it:
import os
import sys
import pickle
import tensorflow as tf
import numpy as np
from matplotlib import pyplot as plt
data_root = './cifar-10-batches-py'
train_data = np.ndarray(shape=(50000,3072), dtype=np.float32)
train_labels = np.ndarray(shape=(50000), dtype=np.float32)
num_images = 0
test_data = np.ndarray(shape=(10000,3072),dtype = np.float32)
test_labels = np.ndarray(shape=(10000),dtype=np.float32)
meta_data = {}
for file in os.listdir(data_root):
file_path = os.path.join(data_root,file)
with open(file_path,'rb') as f:
temp = pickle.load(f,encoding ='bytes')
if file == 'batches.meta':
for i,j in enumerate(temp[b'label_names']):
meta_data[i] = j
if 'data_batch_' in file:
for i in range(10000):
train_data[num_images,:] = temp[b'data'][i]
train_labels[num_images] = temp[b'labels'][i]
num_images += 1
if 'test_batch' in file:
for i in range(10000):
test_data[i,:] = temp[b'data'][i]
test_labels[i] = temp[b'labels'][i]
'''
print('meta: \n',meta_data)
train_data = train_data.reshape(50000,3,32,32).transpose(0,2,3,1)
print('\ntrain data: \n',train_data.shape,'\nLabels: \n',train_labels[0])
print('\ntest data: \n',test_data[0].shape,'\nLabels: \n',train_labels[0])'''
#accuracy function acc = (no. of correct prediction/total attempts) * 100
def accuracy(predictions, labels):
return (100 * (np.sum(np.argmax(predictions,1)== np.argmax(labels, 1))/predictions.shape[0]))
#reformat the data
def reformat(data,labels):
data = data.reshape(data.shape[0],3,32,32).transpose(0,2,3,1).astype(np.float32)
labels = (np.arange(10) == labels[:,None]).astype(np.float32)
return data,labels
train_data, train_labels = reformat(train_data,train_labels)
test_data, test_labels = reformat(test_data, test_labels)
print ('Train ',train_data[0][1])
plt.axis("off")
plt.imshow(train_data[1], interpolation = 'nearest')
plt.savefig("1.png")
plt.show()
'''
print("Train: \n",train_data.shape,test_data[0],"\nLabels: \n",train_labels.shape,train_labels[:11])
print("Test: \n",test_data.shape,test_data[0],"\nLabels: \n",test_labels.shape,test_labels[:11])'''
image_size = 32
num_channels = 3
batch_size = 30
patch_size = 5
depth = 64
num_hidden = 256
num_labels = 10
graph = tf.Graph()
with graph.as_default():
#input data and labels
train_input = tf.placeholder(tf.float32,shape=(batch_size,image_size,image_size,num_channels))
train_output = tf.placeholder(tf.float32,shape=(batch_size,num_labels))
test_input = tf.constant(test_data)
#layer weights and biases
layer_1_weights = tf.Variable(tf.truncated_normal([patch_size,patch_size,num_channels,depth]))
layer_1_biases = tf.Variable(tf.zeros([depth]))
layer_2_weights = tf.Variable(tf.truncated_normal([patch_size,patch_size,depth,depth]))
layer_2_biases = tf.Variable(tf.constant(0.1, shape=[depth]))
layer_3_weights = tf.Variable(tf.truncated_normal([64*64, num_hidden]))
layer_3_biases = tf.Variable(tf.constant(0.1, shape=[num_hidden]))
layer_4_weights = tf.Variable(tf.truncated_normal([num_hidden, num_labels]))
layer_4_biases = tf.Variable(tf.constant(0.1, shape=[num_labels]))
def convnet(data):
conv_1 = tf.nn.conv2d(data, layer_1_weights,[1,1,1,1], padding = 'SAME')
hidden_1 = tf.nn.relu(conv_1+layer_1_biases)
norm_1 = tf.nn.lrn(hidden_1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
pool_1 = tf.nn.max_pool(norm_1,[1,2,2,1],[1,2,2,1], padding ='SAME')
conv_2 = tf.nn.conv2d(pool_1,layer_2_weights,[1,1,1,1], padding = 'SAME')
hidden_2 = tf.nn.relu(conv_2+layer_2_biases)
norm_2 = tf.nn.lrn(hidden_2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
pool_2 = tf.nn.max_pool(norm_2,[1,2,2,1],[1,2,2,1], padding ='SAME')
shape = pool_2.get_shape().as_list()
hidd2_trans = tf.reshape(pool_2,[shape[0],shape[1]*shape[2]*shape[3]])
hidden_3 = tf.nn.relu(tf.matmul(hidd2_trans,layer_3_weights) + layer_3_biases)
return tf.nn.relu(tf.matmul(hidden_3,layer_4_weights) + layer_4_biases)
logits = convnet(train_input)
loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(labels=train_output, logits = logits))
optimizer = tf.train.AdamOptimizer(1e-4).minimize(loss)
train_prediction = tf.nn.softmax(logits)
test_prediction = tf.nn.softmax(convnet(test_input))
num_steps = 100000
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
print('Initialized \n')
for step in range(num_steps):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch = train_data[offset:(offset+batch_size),:,:,:]
batch_labels = train_labels[offset:(offset+batch_size),:]
feed_dict ={train_input: batch, train_output: batch_labels}
_,l,prediction = session.run([optimizer, loss, train_prediction], feed_dict = feed_dict)
if (step % 500 == 0):
print("Loss at step %d: %f" %(step, l))
print("Accuracy: %f" %(accuracy(prediction, batch_labels)))
print("Test accuracy: %f" %(accuracy(session.run(test_prediction), test_labels)))
On a first glance I would say the initialization of the CNN is the culprit. A convnet is an optimization algorithm in a highly non-convex space and therefore depends a lot on careful initialization to not get stuck on local minima or saddle points. Look at xavier initialization for an example on how to fix that.
Example Code:
W = tf.get_variable("W", shape=[784, 256],
initializer=tf.contrib.layers.xavier_initializer())
Problem is your network is having very high depth(number of filters = 64 for both layers). Also, you are training the network from scratch. And your dataset of CIFAR10 (50000 images) is very little. Moreover, each CIFAR10 image is only 32x32x3 size.
Couple of alternatives what I can suggest you is to retrain a pre-trained model, i.e do transfer learning.
Other better alternative is to reduce the number of filters in each layer. In this way, you will be able to train the model from scratch and also it will be faster. (Assuming you don't have GPU).
Next you are making use of local response normalization. I would suggest you to remove this layer and do mean normalization in pre-processing step.
Next, if you feel the learning is not picking up at all, try increasing the learning rate a little and see.
Lastly, just to reduce some operation in your code, you are reshaping your tensor and then doing transpose in many places like this:
data.reshape(data.shape[0],3,32,32).transpose(0,2,3,1)
Why not directly reshape it to something like this?
data.reshape(data.shape[0], 32, 32, 3)
Hope the answer helps you.

GAN Converges in Just a Few Epochs

I implemented a genrative adversarial network in Keras. My training data size is about 16,000, where each image is of 32*32 size. All of my training images are the resized versions of the imageds from the imagenet dataset with regard to the object detection task. I fed the image matrix directly into the network without doing the center crop. I used the AdamOptimizer with the learning rate being 1e-4, and beta1 being 0.5 and I also set the dropout rate to be 0.1. I first trained the discrimator on 3000 real images and 3000 fake images and it achieved a 93% accuracy. Then, I trained for 500 epochs with the batch size being 32. However, my model seemed to converge in only a few epochs(<10), and the images it generated were ugly.
Plot of the Loss Function
Random Samples Generated by the Generator
I was wondering whether my training dataset is too small(compared to those in the paper of DCGAN, which are more than 300,000) or my model configuration is not correct. What's more, should I train the SGD on D for k iterations (where k is small, perhaps 1) and then training with SGD on G for one iteration as suggested by Ian Goodfellow in the original paper?(I have just tried to train them one at a time)
Below is the configuration of the generator.
g_input = Input(shape=[100])
H = Dense(1024*4*4, init='glorot_normal')(g_input)
H = BatchNormalization(mode=2)(H)
H = Activation('relu')(H)
H = Reshape( [4, 4,1024] )(H)
H = UpSampling2D(size=( 2, 2))(H)
H = Convolution2D(512, 3, 3, border_mode='same', init='glorot_uniform')(H)
H = BatchNormalization(mode=2)(H)
H = Activation('relu')(H)
H = UpSampling2D(size=( 2, 2))(H)
H = Convolution2D(256, 3, 3, border_mode='same', init='glorot_uniform')(H)
H = BatchNormalization(mode=2)(H)
H = Activation('relu')(H)
H = UpSampling2D(size=( 2, 2))(H)
H = Convolution2D(3, 3, 3, border_mode='same', init='glorot_uniform')(H)
g_V = Activation('tanh')(H)
generator = Model(g_input,g_V)
generator.compile(loss='binary_crossentropy', optimizer=opt)
generator.summary()
Below is the configuration of the discriminator:
d_input = Input(shape=shp)
H = Convolution2D(64, 5, 5, subsample=(2, 2), border_mode = 'same', init='glorot_normal')(d_input)
H = LeakyReLU(0.2)(H)
#H = Dropout(dropout_rate)(H)
H = Convolution2D(128, 5, 5, subsample=(2, 2), border_mode = 'same', init='glorot_normal')(H)
H = BatchNormalization(mode=2)(H)
H = LeakyReLU(0.2)(H)
#H = Dropout(dropout_rate)(H)
H = Flatten()(H)
H = Dense(256, init='glorot_normal')(H)
H = LeakyReLU(0.2)(H)
d_V = Dense(2,activation='softmax')(H)
discriminator = Model(d_input,d_V)
discriminator.compile(loss='categorical_crossentropy', optimizer=dopt)
discriminator.summary()
Below is the configuration of GAN as a whole:
gan_input = Input(shape=[100])
H = generator(gan_input)
gan_V = discriminator(H)
GAN = Model(gan_input, gan_V)
GAN.compile(loss='categorical_crossentropy', optimizer=opt)
GAN.summary()
I think problem is with loss function
Try
loss='categorical_crossentropy',
I suspect that your generator is trainable while you training the gan. You can verify by using generator.layers[-1].get_weights() to see if the parameters changed during training process of gan.
You should freeze discriminator before you assemble it to gan:
generator.trainnable = False
gan_input = Input(shape=[100])
H = generator(gan_input)
gan_V = discriminator(H)
GAN = Model(gan_input, gan_V)
GAN.compile(loss='categorical_crossentropy', optimizer=opt)
GAN.summary()
see this discussion:
https://github.com/fchollet/keras/issues/4674

What are the problems that causes neural networks stagnate in learning?

I was trying to see how accurate a neural network can approximate simple functions, like a scalar-valued polynomial in several variables. So I had these ideas:
Fix a polynomial of several variables, say, f(x_1,..,x_n).
Generate 50000 vectors of length n using numpy.random which will serve as training data.
Evaluate the f(x) at these points, the value will be used as label.
Make test data and label in the same way
Write a neural network and see how accuracy it can approximate f(x) on test set.
Here is my sample neural network implemented in tensorflow
import tensorflow as tf
import numpy as np
input_vector_length = int(10)
output_vector_length = int(1)
train_data_size = int(50000)
test_data_size = int(10000)
train_input_domain = [-10, 10] #Each component in an input vector is between -10 and 10
test_input_domain = [-10, 10]
iterations = 20000
batch_size = 200
regularizer = 0.01
sess = tf.Session()
x = tf.placeholder(tf.float32, shape=[None, input_vector_length], name="x")
y = tf.placeholder(tf.float32, shape =[None, output_vector_length], name="y")
function = tf.reduce_sum(x, 1) + 0.25*tf.pow(tf.reduce_sum(x,1), 2) + 0.025*tf.pow(tf.reduce_sum(x,1), 3)
#make train data input
train_input = (train_input_domain[1]-train_input_domain[0])*np.random.rand(train_data_size, input_vector_length) + train_input_domain[0]
#make train data label
train_label = sess.run(function, feed_dict = {x : train_input})
train_label = train_label.reshape(train_data_size, output_vector_length)
#make test data input
test_input = (test_input_domain[1]-test_input_domain[0])*np.random.rand(test_data_size, input_vector_length) + test_input_domain[0]
#make test data label
test_label = sess.run(function, feed_dict = {x : test_input})
test_label = test_label.reshape(test_data_size, output_vector_length)
def weight_variables(shape, name):
initial = 10*tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variables(shape, name):
initial = 10*tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def take_this_batch(data, batch_index=[]):
A = []
for i in range(len(batch_index)):
A.append(data[i])
return A
W_0 = weight_variables(shape=[input_vector_length, 10], name="W_0")
B_0 = bias_variables(shape=[10], name="W_0")
y_1 = tf.sigmoid(tf.matmul(x, W_0) + B_0)
W_1 = weight_variables(shape=[10, 20], name="W_1")
B_1 = bias_variables(shape=[20], name="B_1")
y_2 = tf.sigmoid(tf.matmul(y_1, W_1) + B_1)
W_2 = weight_variables(shape=[20,40], name="W_2")
B_2 = bias_variables(shape=[40], name="B_2")
y_3 = tf.sigmoid(tf.matmul(y_2, W_2) + B_2)
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
y_drop = tf.nn.dropout(y_3, keep_prob)
W_output = weight_variables(shape=[40, output_vector_length], name="W_output")
B_output = bias_variables(shape=[output_vector_length], name="B_output")
y_output = tf.matmul(y_drop, W_output) + B_output
weight_sum = tf.reduce_sum(tf.square(W_0)) + tf.reduce_sum(tf.square(W_1)) + tf.reduce_sum(tf.square(W_2)) + tf.reduce_sum(tf.square(W_3))
cost = tf.reduce_mean(tf.square(y - y_output)) + regularizer*(weight_sum)
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cost)
error = cost
sess.run(tf.initialize_all_variables())
with sess.as_default():
for step in range(iterations):
batch_index = np.random.randint(low=0, high=train_data_size, size=batch_size)
batch_input = take_this_batch(train_input, batch_index)
batch_label = take_this_batch(train_label, batch_index)
train_step.run(feed_dict = {x : batch_input, y:batch_label, keep_prob:0.5})
if step % 1000 == 0:
current_error = error.eval(feed_dict = {x:batch_input, y:batch_label, keep_prob:1.0})
print("step %d, Current error is %f" % (step,current_error))
print(error.eval(feed_dict={x:test_input, y:test_label, keep_prob:1.0}))
Simply speaking, the performance of this neural network is horrifying! My neural network has three hidden layers of size 10, 20 and 40. The input layer is of size 10, and the output layer has size 1. I used a simple L^2 cost function, and I regularized it with the square of weights and regularizer 0.01.
During training stage, I noticed that the error seems to get stuck and refuses to go down. I am wondering what could go wrong? Thanks a lot for reading this long question. Any suggestion is appreciated.
Since you are using sigmoid as the activation function in the hidden layers, the value at these neurons is reduced to the range of (0,1). Hence, it is a good idea to normalize the input data for this network.

Resources