I am having a task of assigning a score from 0.0 to 1.0 to images. For this I have made use of already learned models meant for classification of ImageNet competition like VGG, SqueezeNet etc. From the output of the convoluted layers of this models, I have added my own 2 or 3 dense layers (fully connected layers), with the first few layer having a certain 'x' hidden units and last layer having only one unit. The value coming from this last layer (having one unit), I am using as score.
I am performing retraining on all the dense layers, but after I perform training, I get a constant score of around 0.75 for whichever input I send. I have a good training set of 50000 images.
Please can somebody explain me where I am going wrong in this approach. Also, some directions on how to proceed in this type of problem will be very helpful.
Important parts of Code:-
from tensorflow.python.ops import control_flow_ops
def fcLayer(images, weight, bias, should_activate = True):
fc = tf.matmul(images, weight)
bias_add = tf.nn.bias_add(fc, bias)
if not should_activate:
return bias_add
out = tf.nn.relu(bias_add)
return out
weights = np.load('../Data/vgg16_weights.npz')
def fc_VGG(pool5_flat): # Feed directly the bottleneck features.
# fc6
with tf.variable_scope('fc6'):
fc6W = tf.get_variable('fc6_W', dtype = tf.float32, trainable = True,
initializer = weights['fc6_W'])
fc6b = tf.get_variable('fc6_b', dtype = tf.float32, trainable = True,
initializer = weights['fc6_b'])
fc6 = fcLayer(pool5_flat, fc6W, fc6b)
# fc7
with tf.variable_scope('fc7'):
fc7W = tf.get_variable('fc7_W', dtype = tf.float32, trainable = True,
initializer = weights['fc7_W'])
fc7b = tf.get_variable('fc7_b', dtype = tf.float32, trainable = True,
initializer = weights['fc7_b'])
fc7 = fcLayer(fc6, fc7W, fc7b)
fc7 = tf.cond(is_train, lambda: tf.nn.dropout(fc7, keep_prob = 0.35), lambda: fc7)
with tf.variable_scope('fc8'):
fc7_shape = int(np.prod(fc7.get_shape()[1:]))
fc8W = tf.get_variable('fc8_W', dtype = tf.float32, trainable = True,
initializer = tf.random_normal((fc7_shape, new_output_units), stddev = 1e-1))
fc8b = tf.get_variable('fc8_b', dtype = tf.float32, trainable = True,
initializer = tf.ones((1)))
fc8 = fcLayer(fc7, fc8W, fc8b, should_activate = False)
return fc8
learning_rate = 0.0001
tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape = (None, 25088))
y = tf.placeholder(tf.float32, shape = (None))
alpha = tf.constant(learning_rate, tf.float32)
is_train = tf.placeholder(tf.bool)
logits = fc_VGG(X)
loss = tf.reduce_mean(tf.abs(tf.subtract(logits, y)))
optimizer = tf.train.AdamOptimizer(learning_rate = alpha).minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(EPOCHS):
current_learning_rate = learning_rate * (1 - WEIGHT_DECAY)
num_examples = len(y_train)
X_train_files, y_train = shuffle(X_train_files, y_train)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x_files, batch_y = X_train_files[offset: end], y_train[offset: end]
batch_x = load_batchX()
_, loss_val = sess.run([optimizer, loss], feed_dict = {X: batch_x, y: batch_y,
alpha: current_learning_rate, is_train: True})
loss_history.append(loss_val)
Related
I am currently trying train a CNN using PyTorch to predict a subject's age. The age group ranges from 0 to 116. I used the same model to train it on gender classification with two options: male or female.
I ported the same code for the age classification, I was getting errors. The error was due to our last fully connected layer not return a large enough output (in terms of matrix size, it was initially returning a 50 x 2 matrix due to our gender classifier but I switched it to 50 x 117 for the age classification based on the total age options.)
My issue now is that the training loop prints epochs with a huge loss (~3.5 while before, when training the gender classification, it was sub zero.)
Below is my code:
DataLoader class:
class MyDataset(Dataset):
def __init__(self, root_directory, csv_file, image_path, transform = None):
annotated_path = os.path.relpath(csv_file) # Path to UTKFace Dataset and Annotations
self.read_in_csv = pd.read_csv(annotated_path, index_col=False)
self.image_path = os.path.join(root_directory, image_path)
self.transform = transform
self.labels = np.asarray(self.read_in_csv.loc[:,'age'])
def __getitem__(self, index):
attr = self.labels[index]
image_name = str(self.read_in_csv.loc[index, 'file'])
image = Image.open(image_name)
if self.transform:
image = self.transform(image)
dict = {'image':image, 'label':attr}
return dict
def __len__(self):
return len(self.read_in_csv.index)
CNN Architecture:
class ConvolutionalNN(nn.Module):
def __init__(self):
super(ConvolutionalNN,self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(3,96,kernel_size=7,stride=4),
nn.BatchNorm2d(96), # Number of Features
nn.ReLU(),
nn.MaxPool2d(kernel_size=3,stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(96,256,kernel_size=5,padding=2),
nn.BatchNorm2d(256),
nn.ReLU(), # Default = False
nn.MaxPool2d(kernel_size=3,stride=2))
self.layer3 = nn.Sequential(
nn.Conv2d(256,384,kernel_size=3,padding=1),
nn.BatchNorm2d(384),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3,stride=2))
self.fc1 = nn.Linear(384*6*6,512)
self.fc2 = nn.Linear(512,512)
self.fc3 = nn.Linear(512,117)
def forward(self,x):
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
out = out.view(out.size(0),-1)
#print out.size()
out = F.dropout(F.relu(self.fc1(out)))
out = F.dropout(F.relu(self.fc2(out)))
out = self.fc3(out)
return out
Training Loop:
def training_loop(checkpoint = None, best=False):
current_epoch = 1
num_epochs = 50
train_acc_history = []
val_acc_history = []
epoch_history = []
learning_rate = 0.001
best_val_acc = 0.0
is_best = False
criterion = nn.CrossEntropyLoss()
## Predict the Age and Gender of the Human in the Image
optimizer = torch.optim.SGD(cnn.parameters(),lr=0.001,momentum=0.9)
if checkpoint is not None:
is_best = best
current_epoch = checkpoint['epoch']
train_acc_history = checkpoint['train_acc_history']
val_acc_history = checkpoint['val_acc_history']
best_val_acc = checkpoint['best_val_acc']
optimizer.load_state_dict(checkpoint['optimizer'])
epoch_history = checkpoint['epoch_history']
print('Uploading our images now...')
for epoch in range(current_epoch, num_epochs + current_epoch):
print('Starting epoch %d / %d' % (epoch + 1, num_epochs + current_epoch))
print('Learning Rate for this epoch: {}'.format(learning_rate))
for i, batch in enumerate(train_loader):
images, labels = batch['image'], batch['label']
images = images.clone().detach()
labels = labels.clone().detach()
if use_gpu:
images = images.cuda()
labels = labels.cuda()
optimizer.zero_grad()
pred_labels = cnn(images)
loss = criterion(pred_labels,labels)
loss.backward()
optimizer.step()
So this is my code. It does not seem to be training well.
Please let me know on what could be done to fix this.
When I use linear or No activation in the last Discriminator layer using weight clipping Discriminator accuracy goes to 1 and Generator goes to 0. In case when I remove weight clipping, Generator accuracy goes to 1 and discriminator goes to 0 around 300 iterations. But when I use sigmoid activation as the last layer in the discriminator with clipping Generator accuracy goes to 1 and without clipping the generator loss get stuck while accuracies going as they should around 0.5.
NOTE - in all cases, results are produced and all of the show WARNING:tensorflow:Discrepancy between trainable weights and collected trainable weights, did you set model.trainable without calling model.compile after ?
Code is given here, please do not mind the indentation on copying and pasting it's everywhere -
class WGAN():
def __init__(self,
input_dim,
disc_filter,
disc_kernel,
disc_strides,
disc_dropout,
disc_lr,
gen_filter,
gen_kernel,
gen_strides,
gen_upsample,
gen_lr,
z_dim,
batch_size):
self.input_dim = input_dim
self.disc_filter = disc_filter
self.disc_kernel = disc_kernel
self.disc_strides = disc_strides
self.disc_dropout = disc_dropout
self.disc_lr = disc_lr
self.gen_filter = gen_filter
self.gen_kernel = gen_kernel
self.gen_strides = gen_strides
self.gen_upsample = gen_upsample
self.gen_lr = gen_lr
self.z_dim = z_dim
self.batch_size = batch_size
self.weight_init = RandomNormal(mean=0., stddev=0.02)
self.d_losses = []
self.g_losses = []
self.epoch = 0
self.Discriminator()
self.Generator()
self.full_model()
def wasserstein(self, y_true, y_pred):
return -K.mean(y_true * y_pred)
def Discriminator(self):
disc_input = Input(shape=self.input_dim, name='discriminator_input')
x = disc_input
for i in range(len(self.disc_filter)):
x = Conv2D(filters=self.disc_filter[i], kernel_size=self.disc_kernel[i], strides=self.disc_strides[i], padding='same', name='disc_'+str(i))(x)
x = LeakyReLU()(x)
x = Dropout(self.disc_dropout)(x)
x = BatchNormalization()(x)
x = Flatten()(x)
disc_output = Dense(1, activation='sigmoid', kernel_initializer = self.weight_init)(x)
self.discriminator = Model(disc_input, disc_output)
def Generator(self):
gen_input = Input(shape=(self.z_dim,), name='generator_input')
x = gen_input
x = Dense(7*7*self.batch_size, kernel_initializer = self.weight_init)(x)
x = LeakyReLU()(x)
x = BatchNormalization()(x)
x = Reshape(target_shape=(7,7,self.batch_size))(x)
for i in range(len(self.gen_filter)):
if self.gen_upsample[i]==2:
x = UpSampling2D(size=self.gen_upsample[i], name='upsample_'+str(i/2))(x)
x = Conv2D(filters=self.gen_filter[i], kernel_size=self.gen_kernel[i], strides=self.gen_strides[i], padding='same', name='gen_'+str(i))(x)
else:
x = Conv2DTranspose(filters=self.gen_filter[i], kernel_size=self.gen_kernel[i], strides=self.gen_strides[i], padding='same', name='gen_'+str(i))(x)
if i<len(self.gen_filter)-1:
x = BatchNormalization()(x)
x = LeakyReLU()(x)
else:
x = Activation("tanh")(x)
gen_output = x
self.generator = Model(gen_input, gen_output)
def set_trainable(self, model, val):
model.trainable=val
for l in model.layers:
l.trainable=val
def full_model(self):
### COMPILE DISCRIMINATOR
self.discriminator.compile(optimizer= Adam(self.disc_lr), loss = self.wasserstein, metrics=['accuracy'])
### COMPILE THE FULL GAN
self.set_trainable(self.discriminator, False)
self.discriminator.compile(optimizer= Adam(self.disc_lr), loss = self.wasserstein, metrics=['accuracy'])
model_input = Input(shape=(self.z_dim,), name='model_input')
model_output = self.discriminator(self.generator(model_input))
self.model = Model(model_input, model_output)
self.model.compile(optimizer= Adam(self.disc_lr), loss = self.wasserstein, metrics=['accuracy'])
self.set_trainable(self.discriminator, True)
def train_generator(self, batch_size):
valid = np.ones((batch_size,1))
noise = np.random.normal(0, 1, (batch_size, self.z_dim))
return self.model.train_on_batch(noise, valid)
def train_discriminator(self, x_train, batch_size, using_generator):
valid = np.ones((batch_size,1))
fake = np.zeros((batch_size,1))
if using_generator:
true_imgs = next(x_train)[0]
if true_imgs.shape[0] != batch_size:
true_imgs = next(x_train)[0]
else:
idx = np.random.randint(0, x_train.shape[0], batch_size)
true_imgs = x_train[idx]
noise = np.random.normal(0, 1, (batch_size, self.z_dim))
gen_imgs = self.generator.predict(noise)
d_loss_real, d_acc_real = self.discriminator.train_on_batch(true_imgs, valid)
d_loss_fake, d_acc_fake = self.discriminator.train_on_batch(gen_imgs, fake)
d_loss = 0.5 * (d_loss_real + d_loss_fake)
d_acc = 0.5 * (d_acc_real + d_acc_fake)
for l in self.discriminator.layers:
weights = l.get_weights()
weights = [np.clip(w, -0.01, 0.01) for w in weights]
l.set_weights(weights)
return [d_loss, d_loss_real, d_loss_fake, d_acc, d_acc_real, d_acc_fake]
def train(self, x_train, batch_size, epochs, print_every_n_batches = 50, using_generator = False):
for epoch in range(self.epoch, self.epoch + epochs):
d = self.train_discriminator(x_train, batch_size, using_generator)
g = self.train_generator(batch_size)
if self.epoch % print_every_n_batches == 0:
print ("%d [D loss: (%.3f)(R %.3f, F %.3f)] [D acc: (%.3f)(%.3f, %.3f)] [G loss: %.3f] [G acc: %.3f]" % (epoch, d[0], d[1], d[2], d[3], d[4], d[5], g[0], g[1]))
self.d_losses.append(d)
self.g_losses.append(g)
self.epoch+=1
can you help me to add new bilstm layer on top of bert output?
output_layer = model.get_pooled_output()
# Here, we make alterations to add the extra features
output_layer_extra_features = tf.concat([output_layer, tf.convert_to_tensor(extra_features, dtype=tf.float32)],
axis=1)
hidden_size = output_layer_extra_features.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer_extra_features = tf.nn.dropout(output_layer_extra_features, keep_prob=0.9)
logits = tf.matmul(output_layer_extra_features, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
The following RNN model decreases the loss for the first one or two epochs and then fluctuates around the cost of 6. This seems like the model is so random and not learning at all. I varied the learning rate from 0.1 to 0.0001 and it didn't help. The data is fed with an input pipeline, which worked fine with other models, so the functions that extract the label and images are not presented here. I have looked at this for so many times but still couldn't find what's wrong with it. Here's the code:
n_steps = 224
n_inputs = 224
learning_rate = 0.00015
batch_size = 256 # n_neurons
epochs = 100
num_batch = int(len(trainnames)/batch_size)
keep_prob = tf.placeholder(tf.float32)
# TRAIN QUEUE
train_queue = tf.RandomShuffleQueue(len(trainnames)*1.5, 0, [tf.string, tf.float32], shapes=[[],[num_labels,]])
enqueue_train = train_queue.enqueue_many([trainnames, train_label])
train_image, train_image_label = train_queue.dequeue()
train_image = read_image_file(train_image)
train_batch, train_label_batch = tf.train.batch(
[train_image, train_image_label],
batch_size=batch_size,
num_threads=1,
capacity=10*batch_size,
enqueue_many=False,
shapes=[[224,224], [num_labels,]],
allow_smaller_final_batch=True
)
train_close = train_queue.close()
def RNN(inputs, reuse):
with tf.variable_scope('cells', reuse=reuse):
basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=batch_size, reuse=reuse)
with tf.variable_scope('rnn'):
outputs, states = tf.nn.dynamic_rnn(basic_cell, inputs, dtype=tf.float32)
fc_drop = tf.nn.dropout(states, keep_prob)
logits = tf.contrib.layers.fully_connected(fc_drop, num_labels, activation_fn=None)
return logits
#Training
with tf.name_scope("cost_function") as scope:
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=train_label_batch, logits=RNN(train_batch, reuse=None)))
train_step = tf.train.MomentumOptimizer(learning_rate, 0.9).minimize(cost)
cost_summary = tf.summary.scalar("cost_function", cost)
file_writer = tf.summary.FileWriter(logdir)
#Session
with tf.Session() as sess:
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord, start=True)
step = 0
for epoch in range(epochs):
sess.run(enqueue_train)
for batch in range(num_batch):
if step % 100 == 0:
summary_str = cost_summary.eval(feed_dict={keep_prob: 1.0})
file_writer.add_summary(summary_str, step)
else:
sess.run(train_step, feed_dict={keep_prob: 0.5})
step += 1
sess.run(train_close)
coord.request_stop()
coord.join(threads)
file_writer.close()
Actually I am using AlexNet to classify my images in 2 groups , I am feeding images to the model in a batch of 60 images and the loss which I am getting after every batch is 6 to 7 digits large (for ex. 1428529.0) , here I am confused that why my loss is such a large value because on MNIST dataset the loss which I got was very small as compared to this. Can anyone explain me why I am getting such a large loss value.
Thanks in advance ;-)
Here is the code :-
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import os
img_size = 227
num_channels = 1
img_flat_size = img_size * img_size
num_classes = 2
drop = 0.5
x = tf.placeholder(tf.float32,[None,img_flat_size])
y = tf.placeholder(tf.float32,[None,num_classes])
drop_p = tf.placeholder(tf.float32)
def new_weight(shape):
return tf.Variable(tf.random_normal(shape))
def new_bias(size):
return tf.Variable(tf.random_normal(size))
def new_conv(x,num_input_channels,filter_size,num_filters,stride,padd="SAME"):
shape = [filter_size,filter_size,num_input_channels,num_filters]
weight = new_weight(shape)
bias = new_bias([num_filters])
conv = tf.nn.conv2d(x,weight,strides=[1,stride,stride,1],padding=padd)
conv = tf.nn.bias_add(conv,bias)
return tf.nn.relu(conv)
def new_max_pool(x,k,stride):
max_pool = tf.nn.max_pool(x,ksize=[1,k,k,1],strides=[1,stride,stride,1],padding="VALID")
return max_pool
def flatten_layer(layer):
layer_shape = layer.get_shape()
num_features = layer_shape[1:4].num_elements()
flat_layer = tf.reshape(layer,[-1,num_features])
return flat_layer,num_features
def new_fc_layer(x,num_input,num_output):
weight = new_weight([num_input,num_output])
bias = new_bias([num_output])
fc_layer = tf.matmul(x,weight) + bias
return fc_layer
def lrn(x, radius, alpha, beta, bias=1.0):
"""Create a local response normalization layer."""
return tf.nn.local_response_normalization(x, depth_radius=radius,
alpha=alpha, beta=beta,
bias=bias)
def AlexNet(x,drop,img_size):
x = tf.reshape(x,shape=[-1,img_size,img_size,1])
conv1 = new_conv(x,num_channels,11,96,4,"VALID")
max_pool1 = new_max_pool(conv1,3,2)
norm1 = lrn(max_pool1, 2, 2e-05, 0.75)
conv2 = new_conv(norm1,96,5,256,1)
max_pool2 = new_max_pool(conv2,3,2)
norm2 = lrn(max_pool2, 2, 2e-05, 0.75)
conv3 = new_conv(norm2,256,3,384,1)
conv4 = new_conv(conv3,384,3,384,1)
conv5 = new_conv(conv4,384,3,256,1)
max_pool3 = new_max_pool(conv5,3,2)
layer , num_features = flatten_layer(max_pool3)
fc1 = new_fc_layer(layer,num_features,4096)
fc1 = tf.nn.relu(fc1)
fc1 = tf.nn.dropout(fc1,drop)
fc2 = new_fc_layer(fc1,4096,4096)
fc2 = tf.nn.relu(fc2)
fc2 = tf.nn.dropout(fc2,drop)
out = new_fc_layer(fc2,4096,2)
return out #, tf.nn.softmax(out)
def read_and_decode(tfrecords_file, batch_size):
'''read and decode tfrecord file, generate (image, label) batches
Args:
tfrecords_file: the directory of tfrecord file
batch_size: number of images in each batch
Returns:
image: 4D tensor - [batch_size, width, height, channel]
label: 1D tensor - [batch_size]
'''
# make an input queue from the tfrecord file
filename_queue = tf.train.string_input_producer([tfrecords_file])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
img_features = tf.parse_single_example(
serialized_example,
features={
'label': tf.FixedLenFeature([], tf.int64),
'image_raw': tf.FixedLenFeature([], tf.string),
})
image = tf.decode_raw(img_features['image_raw'], tf.uint8)
##########################################################
# you can put data augmentation here, I didn't use it
##########################################################
# all the images of notMNIST are 28*28, you need to change the image size if you use other dataset.
image = tf.reshape(image, [227, 227])
label = tf.cast(img_features['label'], tf.int32)
image_batch, label_batch = tf.train.batch([image, label],
batch_size= batch_size,
num_threads= 1,
capacity = 6000)
return tf.reshape(image_batch,[batch_size,227*227*1]), tf.reshape(label_batch, [batch_size])
pred = AlexNet(x,drop_p,img_size) #pred
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred,labels=y))
optimiser = tf.train.AdamOptimizer(learning_rate = 0.001).minimize(loss)
correct_pred = tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred,tf.float32))
cost = tf.summary.scalar('loss',loss)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
merge_summary = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter('./AlexNet',graph = tf.get_default_graph())
tf_record_file = 'train.tfrecords'
x_val ,y_val = read_and_decode(tf_record_file,20)
y_val = tf.one_hot(y_val,depth=2,on_value=1,off_value=0)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
x_val = x_val.eval()
y_val = y_val.eval()
epoch = 2
for i in range(epoch):
_, summary= sess.run([optimiser,merge_summary],feed_dict={x:x_val,y:y_val,drop_p:drop})
summary_writer.add_summary(summary,i)
loss_a,accu = sess.run([loss,accuracy],feed_dict={x:x_val,y:y_val,drop_p:1.0})
print "Epoch "+str(i+1) +', Minibatch Loss = '+ \
"{:.6f}".format(loss_a) + ', Training Accuracy = '+ \
'{:.5f}'.format(accu)
print "Optimization Finished!"
tf_record_file1 = 'test.tfrecords'
x_v ,y_v = read_and_decode(tf_record_file1,10)
y_v = tf.one_hot(y_v,depth=2,on_value=1,off_value=0)
coord1 = tf.train.Coordinator()
threads1 = tf.train.start_queue_runners(coord=coord1)
x_v = sess.run(x_v)
y_v = sess.run(y_v)
print "Testing Accuracy : "
print sess.run(accuracy,feed_dict={x:x_v,y:y_v,drop_p:1.0})
coord.request_stop()
coord.join(threads)
coord1.request_stop()
coord1.join(threads1)
Take a look a what a confusion matrix is. It is a performance evaluator. In addition, you should compare your precision versus your recall. Precision is the accuracy of your positive predictions and recall is the ratio of positive instances that are correctly detected by the classifier. By combining both precision and recall, you get the F_1 score which is keep in evaluating the problems of your model.
I would suggest you pick up the text Hands-On Machine Learning with Scikit-Learn and TensorFlow. It is a truly comprehensive book and covers what I describe above in more detail.