I'm having the following problem, I have four embedding matrices and want to get the gradients of my loss function with respect to those matrices.
When I run the session to return the values for the gradients, two of those returned objects are of type tensorflow.python.framework.ops.IndexedSlicesValue, the other two are numpy arrays. Now for the numpy arrays, their shape corresponds to the shape of their corresponding embedding matrix, but I'm having problems with the IndexedSlicesValue objects.
If I call .values on one of those objects, I get an array whose shape does not match that of the gradient, the shape of the embedding matrix is [22,30], but calling .values on the IndexedSlicesValue object I get an array with shape [4200,30] ( The shape of my input tensor had dimensions of [30,20,7], the product of those dimensions equals 4200, not sure if this is relevant).
The IndexedSlicesValue object has an attribute called dense_shape, which is an array that holds the dimensions the gradient should have, i.e. array([22,30]) is value returned by .dense_shape.
I don't really understand the docs here: https://www.tensorflow.org/versions/r0.7/api_docs/python/state_ops.html#IndexedSlices
It says:
An IndexedSlices is typically used to represent a subset of a
larger tensor dense of shape [LARGE0, D1, .. , DN] where LARGE0 >> D0.
The values in indices are the indices in the first dimension of the
slices that have been extracted from the larger tensor.
So this array of shape (4200,30) is extracted from an array corresponding to an even larger, dense tensor?
What exactly is the gradient in this IndexedSlicesValue object and why does tensorflow automatically use this type for some gradients returned by tf.gradients()?
Here is my code:
input_tensor = tf.placeholder(tf.int32, shape = [None, memory_size, max_sent_length], name = 'Input')
q_tensor = tf.placeholder(tf.int32, shape = [None,max_sent_length], name = 'Question')
a_tensor = tf.placeholder(tf.float32, shape = [None,V+1], name = 'Answer')
# Embedding matrices
A_prior = tf.get_variable(name = 'A', shape = [V+1,d], initializer = tf.random_normal_initializer(stddev = 0.1))
A = tf.concat(0,[tf.zeros(shape = tf.pack([1,tf.shape(A_prior)[1]])),tf.slice(A_prior,[1,0],[-1,-1])])
B = tf.get_variable(name = 'B', shape = [V+1,d], initializer = tf.random_normal_initializer(stddev = 0.1))
C = tf.get_variable(name = 'C', shape = [V+1,d], initializer = tf.random_normal_initializer(stddev = 0.1))
W = tf.get_variable(name = 'W', shape = [V+1,d], initializer= tf.random_normal_initializer(stddev = 0.1))
embeddings = tf.reduce_sum(tf.nn.embedding_lookup(A,input_tensor),2)
u = tf.reshape(tf.reduce_sum(tf.nn.embedding_lookup(B,q_tensor),1),[-1,1,d])
test = tf.transpose(embeddings, perm = [0,2,1])
test_batch_mul = tf.squeeze(tf.batch_matmul(u,test))
cond = tf.not_equal(test_batch_mul,0.0)
tt = tf.fill(tf.shape(test_batch_mul),-1000.0)
softmax_in = tf.select(cond, test_batch_mul, tt)
p_values = tf.nn.softmax(softmax_in)
c_values = tf.reduce_sum(tf.nn.embedding_lookup(C,input_tensor),2)
o = tf.squeeze(tf.batch_matmul(tf.expand_dims(p_values,1),c_values))
a_pred = tf.nn.softmax(tf.matmul(tf.squeeze(u)+o,tf.transpose(W)))
loss = tf.nn.softmax_cross_entropy_with_logits(a_pred, a_tensor, name = 'loss')
cost = tf.reduce_mean(loss)
global_step = tf.Variable(0,name = 'global_step', trainable= False)
#optimizer = tf.train.MomentumOptimizer(0.01,0.9)
vars_list = tf.trainable_variables()
grads = tf.gradients(cost, vars_list)
#train_op = optimizer.minimize( cost, global_step, vars_list, name = 'train_op')
sess = tf.Session()
init = tf.initialize_all_variables()
sess.run(init)
input_feed = {input_tensor : phrases, q_tensor : questions, a_tensor : answers}
grad_results = sess.run(grads, feed_dict = input_feed)
I had the same issue, apparently IndexedSlices objects are automatically created for some Embedding matrices when computing their gradients,
If you want to access the gradients of the trainable variables of the Embedding, you need to convert the IndexedSlices to a tensor, by simply using:
tf.convert_to_tensor(gradients_of_the_embedding_layer)
Related
from torchvision.models.feature_extraction import create_feature_extractor
# Data processing
preprocess = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)])
image_path = './data/test_images/anemone.jpg'
image = Image.open(image_path).convert('RGB')
img_processed = preprocess(image)
batch_img_cat_tensor = torch.unsqueeze(img_processed, 0)
# Model initialization
resnet50_model = resnet50(weights=ResNet50_Weights.IMAGENET1K_V2)
# Eval model for predictions
resnet50_model.eval()
# Creating feature extractor (Detailed example here: https://pytorch.org/blog/FX-feature-extraction-torchvision/)
feature_extractor = create_feature_extractor(resnet50_model,
return_nodes=['layer4.2.conv3', 'fc'])
# Forward pass
out = feature_extractor(batch_img_cat_tensor)
pred = torch.argmax(out['fc'])
# Transforming last conv output to numpy and reshaping it so that the channels would be last
last_conv_output = torch.squeeze(out['layer4.2.conv3'])
last_conv_output = torch.reshape(last_conv_output, (7, 7, -1))
last_conv_output = last_conv_output.detach().numpy()
last_conv_output = last_conv_output.astype(np.uint8)
Calculating the upscale factors for last conv output
width_factor = int(image.size[0] / last_conv_output.shape[0])
height_factor = int(image.size[1] / last_conv_output.shape[1])
# Getting the shapes of the last conv output
last_conv_w, last_conv_h, n_channels = last_conv_output.shape
# Calculate the
upscaled_h = last_conv_h * height_factor
upscaled_w = last_conv_w * width_factor
# Upscaling the last_conv_output so that it could be "masked" with original image
upsampled_last_conv_output = np.zeros((upscaled_h, upscaled_w, n_channels))
upsampled_last_conv_output = []
for x in range(0, n_channels, 512):
upsampled_last_conv_output.append(cv2.resize(last_conv_output[:, :, x:x+512], (upscaled_w, upscaled_h), cv2.INTER_CUBIC))
upsampled_last_conv_output = np.concatenate(upsampled_last_conv_output, axis=2)
# Getting the weights of the predicted class
last_layer_weights = resnet50_model.fc.weight.T
last_layer_weights_for_pred = last_layer_weights[:, pred]
# Dot multiplying the upsampled_last_conv_output with last_layer_weights_for_pred
upsampled_last_conv_output = upsampled_last_conv_output.reshape((-1, 2048))
heat_map = np.dot(upsampled_last_conv_output,
last_layer_weights_for_pred.detach().numpy()).reshape(upscaled_h, upscaled_w)
# Plotting the results
fig, ax = plt.subplots()
ax.imshow(image)
ax.imshow(heat_map, cmap='jet', alpha=0.5)
ax.set_title(prediction)
I have followed the tutorial from here: https://www.youtube.com/watch?v=GiyldmoYe_M&t=665s&ab_channel=DigitalSreeni
The main problem with this is that I get the feature map that looks like this:
As you see it looks like the model reacts to multiple areas on the image and no matter what image I use it always has the biggest reaction in the middle.
PS. If you think this question should be posted on the AI stack exchange please notify me
I have found an error I made. It was that after creating a
heat_map = np.dot(upsampled_last_conv_output, last_layer_weights_for_pred.detach().numpy()).reshape(upscaled_h, upscaled_w)
I had to apply this as well:
heat_map = heat_map - np.min(heat_map)
heat_map = heat_map / np.max(heat_map)
Since I normalized the image, the generated heatmap was also normalized, so I needed to "denormalize" it back to it's original values.
I need to implement a multi-label image classification model in PyTorch. However my data is not balanced, so I used the WeightedRandomSampler in PyTorch to create a custom dataloader. But when I iterate through the custom dataloader, I get the error : IndexError: list index out of range
Implemented the following code using this link :https://discuss.pytorch.org/t/balanced-sampling-between-classes-with-torchvision-dataloader/2703/3?u=surajsubramanian
def make_weights_for_balanced_classes(images, nclasses):
count = [0] * nclasses
for item in images:
count[item[1]] += 1
weight_per_class = [0.] * nclasses
N = float(sum(count))
for i in range(nclasses):
weight_per_class[i] = N/float(count[i])
weight = [0] * len(images)
for idx, val in enumerate(images):
weight[idx] = weight_per_class[val[1]]
return weight
weights = make_weights_for_balanced_classes(train_dataset.imgs, len(full_dataset.classes))
weights = torch.DoubleTensor(weights)
sampler = WeightedRandomSampler(weights, len(weights))
train_loader = DataLoader(train_dataset, batch_size=4,sampler = sampler, pin_memory=True)
Based on the answer in https://stackoverflow.com/a/60813495/10077354, the following is my updated code. But then too when I create a dataloader :loader = DataLoader(full_dataset, batch_size=4, sampler=sampler), len(loader) returns 1.
class_counts = [1691, 743, 2278, 1271]
num_samples = np.sum(class_counts)
labels = [tag for _,tag in full_dataset.imgs]
class_weights = [num_samples/class_counts[i] for i in range(len(class_counts)]
weights = [class_weights[labels[i]] for i in range(num_samples)]
sampler = WeightedRandomSampler(torch.DoubleTensor(weights), num_samples)
Thanks a lot in advance !
I included an utility function based on the accepted answer below :
def sampler_(dataset):
dataset_counts = imageCount(dataset)
num_samples = sum(dataset_counts)
labels = [tag for _,tag in dataset]
class_weights = [num_samples/dataset_counts[i] for i in range(n_classes)]
weights = [class_weights[labels[i]] for i in range(num_samples)]
sampler = WeightedRandomSampler(torch.DoubleTensor(weights), int(num_samples))
return sampler
The imageCount function finds number of images of each class in the dataset. Each row in the dataset contains the image and the class, so we take the second element in the tuple into consideration.
def imageCount(dataset):
image_count = [0]*(n_classes)
for img in dataset:
image_count[img[1]] += 1
return image_count
That code looks a bit complex... You can try the following:
#Let there be 9 samples and 1 sample in class 0 and 1 respectively
class_counts = [9.0, 1.0]
num_samples = sum(class_counts)
labels = [0, 0,..., 0, 1] #corresponding labels of samples
class_weights = [num_samples/class_counts[i] for i in range(len(class_counts))]
weights = [class_weights[labels[i]] for i in range(int(num_samples))]
sampler = WeightedRandomSampler(torch.DoubleTensor(weights), int(num_samples))
Here is an alternative solution:
import numpy as np
from torch.utils.data.sampler import WeightedRandomSampler
counts = np.bincount(y)
labels_weights = 1. / counts
weights = labels_weights[y]
WeightedRandomSampler(weights, len(weights))
where y is a list of labels corresponding to each sample, has shape (n_samples,) and are encoded [0, ..., n_classes].
weights won't add up to 1, which is ok according to the official docs.
The neural network I trained is the critic network for deep reinforcement learning. The problem is when one of the layer's activation is set to be relu or elu, the output would be nan after some training step, while the output is normal if the activation is tanh. And the code is as follows(based on tensorflow):
with tf.variable_scope('critic'):
self.batch_size = tf.shape(self.tfs)[0]
l_out_x = denseWN(x=self.tfs, name='l3', num_units=self.cell_size, nonlinearity=tf.nn.tanh, trainable=True,shape=[det*step*2, self.cell_size])
l_out_x1 = denseWN(x=l_out_x, name='l3_1', num_units=32, trainable=True,nonlinearity=tf.nn.tanh, shape=[self.cell_size, 32])
l_out_x2 = denseWN(x=l_out_x1, name='l3_2', num_units=32, trainable=True,nonlinearity=tf.nn.tanh,shape=[32, 32])
l_out_x3 = denseWN(x=l_out_x2, name='l3_3', num_units=32, trainable=True,shape=[32, 32])
self.v = denseWN(x=l_out_x3, name='l4', num_units=1, trainable=True, shape=[32, 1])
Here is the code for basic layer construction:
def get_var_maybe_avg(var_name, ema, trainable, shape):
if var_name=='V':
initializer = tf.contrib.layers.xavier_initializer()
v = tf.get_variable(name=var_name, initializer=initializer, trainable=trainable, shape=shape)
if var_name=='g':
initializer = tf.constant_initializer(1.0)
v = tf.get_variable(name=var_name, initializer=initializer, trainable=trainable, shape=[shape[-1]])
if var_name=='b':
initializer = tf.constant_initializer(0.1)
v = tf.get_variable(name=var_name, initializer=initializer, trainable=trainable, shape=[shape[-1]])
if ema is not None:
v = ema.average(v)
return v
def get_vars_maybe_avg(var_names, ema, trainable, shape):
vars=[]
for vn in var_names:
vars.append(get_var_maybe_avg(vn, ema, trainable=trainable, shape=shape))
return vars
def denseWN(x, name, num_units, trainable, shape, nonlinearity=None, ema=None, **kwargs):
with tf.variable_scope(name):
V, g, b = get_vars_maybe_avg(['V', 'g', 'b'], ema, trainable=trainable, shape=shape)
x = tf.matmul(x, V)
scaler = g/tf.sqrt(tf.reduce_sum(tf.square(V),[0]))
x = tf.reshape(scaler,[1,num_units])*x + tf.reshape(b,[1,num_units])
if nonlinearity is not None:
x = nonlinearity(x)
return x
Here is the code to train the network:
self.tfdc_r = tf.placeholder(tf.float32, [None, 1], 'discounted_r')
self.advantage = self.tfdc_r - self.v
l1_regularizer = tf.contrib.layers.l1_regularizer(scale=0.005, scope=None)
self.weights = tf.trainable_variables()
regularization_penalty_critic = tf.contrib.layers.apply_regularization(l1_regularizer, self.weights)
self.closs = tf.reduce_mean(tf.square(self.advantage))
self.optimizer = tf.train.RMSPropOptimizer(0.0001, 0.99, 0.0, 1e-6)
self.grads_and_vars = self.optimizer.compute_gradients(self.closs)
self.grads_and_vars = [[tf.clip_by_norm(grad,5), var] for grad, var in self.grads_and_vars if grad is not None]
self.ctrain_op = self.optimizer.apply_gradients(self.grads_and_vars, global_step=tf.contrib.framework.get_global_step())
Looks like you're facing the problem of exploding gradients with ReLu activation function (that what NaN means -- very big activations). There are several techniques to deal with this issue, e.g. batch normalization (changes the network architecture) or a delicate variable initialization (that's what I'd try first).
You are using Xavier initialization for V variables in different layers, which indeed works fine for logistic sigmoid activation (see the paper by Xavier Glorot and Yoshua Bengio), or, in other words, tanh.
The preferred initialization strategy for the ReLU activation function (and its variants, including ELU) is He initialization. In tensorflow it's implemented via tf.variance_scaling_initializer:
initializer = tf.variance_scaling_initializer()
v = tf.get_variable(name=var_name, initializer=initializer, ...)
You might also want to try smaller values for b and g variables, but it's hard to say the exact value just by looking at your model. If nothing helps, consider adding batch-norm layers to your model to control activation distribution.
I trained a model with the purpose of generating sentences as follow:
I feed as training example 2 sequences: x which is a sequence of characters and y which is the same shift by one. The model is based on LSTM and is created with tensorflow.
My question is: since the model take in input sequences of a certain size (50 in my case), how can I make prediction giving him only a single character as seed ? I've seen it in some examples that after training they generate sentences by simply feeding a single characters.
Here is my code:
with tf.name_scope('input'):
x = tf.placeholder(tf.float32, [batch_size, truncated_backprop], name='x')
y = tf.placeholder(tf.int32, [batch_size, truncated_backprop], name='y')
with tf.name_scope('weights'):
W = tf.Variable(np.random.rand(n_hidden, num_classes), dtype=tf.float32)
b = tf.Variable(np.random.rand(1, num_classes), dtype=tf.float32)
inputs_series = tf.split(x, truncated_backprop, 1)
labels_series = tf.unstack(y, axis=1)
with tf.name_scope('LSTM'):
cell = tf.contrib.rnn.BasicLSTMCell(n_hidden, state_is_tuple=True)
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=dropout)
cell = tf.contrib.rnn.MultiRNNCell([cell] * n_layers)
states_series, current_state = tf.contrib.rnn.static_rnn(cell, inputs_series, \
dtype=tf.float32)
logits_series = [tf.matmul(state, W) + b for state in states_series]
prediction_series = [tf.nn.softmax(logits) for logits in logits_series]
losses = [tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels) \
for logits, labels, in zip(logits_series, labels_series)]
total_loss = tf.reduce_mean(losses)
train_step = tf.train.AdamOptimizer(learning_rate).minimize(total_loss)
I suggest you use dynamic_rnn instead of static_rnn, which creates the graph during execution time and allows you to have inputs of any length. Your input placeholder would be
x = tf.placeholder(tf.float32, [batch_size, None, features], name='x')
Next, you'll need a way to input your own initial state into the network. You can do that by passing the initial_state parameter to dynamic_rnn, like:
initialstate = cell.zero_state(batch_sie, tf.float32)
outputs, current_state = tf.nn.dynamic_rnn(cell,
inputs,
initial_state=initialstate)
With that, in order to generate text from a single character you can feed the graph 1 character at a time, passing in the previous character and state each time, like:
prompt = 's' # beginning character, whatever
inp = one_hot(prompt) # preprocessing, as you probably want to feed one-hot vectors
state = None
while True:
if state is None:
feed = {x: [[inp]]}
else:
feed = {x: [[inp]], initialstate: state}
out, state = sess.run([outputs, current_state], feed_dict=feed)
inp = process(out) # extract the predicted character from out and one-hot it
I am having a task of assigning a score from 0.0 to 1.0 to images. For this I have made use of already learned models meant for classification of ImageNet competition like VGG, SqueezeNet etc. From the output of the convoluted layers of this models, I have added my own 2 or 3 dense layers (fully connected layers), with the first few layer having a certain 'x' hidden units and last layer having only one unit. The value coming from this last layer (having one unit), I am using as score.
I am performing retraining on all the dense layers, but after I perform training, I get a constant score of around 0.75 for whichever input I send. I have a good training set of 50000 images.
Please can somebody explain me where I am going wrong in this approach. Also, some directions on how to proceed in this type of problem will be very helpful.
Important parts of Code:-
from tensorflow.python.ops import control_flow_ops
def fcLayer(images, weight, bias, should_activate = True):
fc = tf.matmul(images, weight)
bias_add = tf.nn.bias_add(fc, bias)
if not should_activate:
return bias_add
out = tf.nn.relu(bias_add)
return out
weights = np.load('../Data/vgg16_weights.npz')
def fc_VGG(pool5_flat): # Feed directly the bottleneck features.
# fc6
with tf.variable_scope('fc6'):
fc6W = tf.get_variable('fc6_W', dtype = tf.float32, trainable = True,
initializer = weights['fc6_W'])
fc6b = tf.get_variable('fc6_b', dtype = tf.float32, trainable = True,
initializer = weights['fc6_b'])
fc6 = fcLayer(pool5_flat, fc6W, fc6b)
# fc7
with tf.variable_scope('fc7'):
fc7W = tf.get_variable('fc7_W', dtype = tf.float32, trainable = True,
initializer = weights['fc7_W'])
fc7b = tf.get_variable('fc7_b', dtype = tf.float32, trainable = True,
initializer = weights['fc7_b'])
fc7 = fcLayer(fc6, fc7W, fc7b)
fc7 = tf.cond(is_train, lambda: tf.nn.dropout(fc7, keep_prob = 0.35), lambda: fc7)
with tf.variable_scope('fc8'):
fc7_shape = int(np.prod(fc7.get_shape()[1:]))
fc8W = tf.get_variable('fc8_W', dtype = tf.float32, trainable = True,
initializer = tf.random_normal((fc7_shape, new_output_units), stddev = 1e-1))
fc8b = tf.get_variable('fc8_b', dtype = tf.float32, trainable = True,
initializer = tf.ones((1)))
fc8 = fcLayer(fc7, fc8W, fc8b, should_activate = False)
return fc8
learning_rate = 0.0001
tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape = (None, 25088))
y = tf.placeholder(tf.float32, shape = (None))
alpha = tf.constant(learning_rate, tf.float32)
is_train = tf.placeholder(tf.bool)
logits = fc_VGG(X)
loss = tf.reduce_mean(tf.abs(tf.subtract(logits, y)))
optimizer = tf.train.AdamOptimizer(learning_rate = alpha).minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(EPOCHS):
current_learning_rate = learning_rate * (1 - WEIGHT_DECAY)
num_examples = len(y_train)
X_train_files, y_train = shuffle(X_train_files, y_train)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x_files, batch_y = X_train_files[offset: end], y_train[offset: end]
batch_x = load_batchX()
_, loss_val = sess.run([optimizer, loss], feed_dict = {X: batch_x, y: batch_y,
alpha: current_learning_rate, is_train: True})
loss_history.append(loss_val)