I built a custom NN model like so:
class MyNNet(torch.nn.Module):
def __init__(self, inp_dim, n_classes):
super(MyNNet, self).__init__()
self.flat = torch.nn.Flatten()
self.l1 = torch.nn.Linear(inp_dim * inp_dim, 32)
self.l2 = torch.nn.Linear(32, 16)
self.l3 = torch.nn.Linear(16, n_classes)
def forward(self, X):
out = self.flat(X)
out = F.relu(self.l1(out))
out = F.relu(self.l2(out))
return self.l3(out)
And a simple training script that updates the model parameters:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = MyNNet(28, 10)
model.to(device)
optimizer = torch.optim.Adam(model.parameters())
loss = torch.nn.CrossEntropyLoss()
epochs = 20
for e in range(epochs):
train_l = 0.
for i, (s, c) in enumerate(train_loader):
s.to(device)
c.to(device)
y_hat = model(s)
l = loss(y_hat, c)
train_l += l
l.backward()
optimizer.step()
optimizer.zero_grad()
print(f'Epoch: {e}, AvgLoss: {train_l / len(train_loader)}')
As in the script I store the model to cuda and so I do with each batch of the dataset (MNIST). However the folllowing error appears: Expected all tensors to be on the same device, but found at least two devices
but when I comment model.to(device), then the script works. Does this mean PyTorch stores the custom models automatically into cuda?
Thanks.
Unlike Modules (where .to(...) works in-place), when moving Tensors to a device, you need to reassign them:
s = s.to(device)
c = c.to(device)
Related
I have a classification problem to predict 8 classes for example, I am using EfficientNetB3 in pytorch from here. However, I got confused on whether my custom class is correctly written. I think I want to strip the last layer of the pre-trained model to suit the 8 outputs right? Did I do it correctly? Because when I print y_preds = model(images) in my DataLoader, it seems to give me 1536 predictions. Is this an expected behavior?
!pip install geffnet
import geffnet
class EfficientNet(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.model = geffnet.create_model(config.effnet, pretrained=True)
n_features = self.model.classifier.in_features
# does the name fc matter?
self.fc = nn.Linear(n_features, config.num_classes)
self.model.classifier = nn.Identity()
def extract(self, x):
x = self.model(x)
return x
def forward(self, x):
x = self.extract(x).squeeze(-1).squeeze(-1)
return x
model = EfficientNet(config=config)
if torch.cuda.is_available():
model.cuda()
Sample code for printing y_pred:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
for step, (images, labels) in enumerate(sample_loader):
images = images.to(device)
labels = labels.to(device)
batch_size = images.shape[0]
y_preds = model(images)
print('The predictions of the 4 images is as follows\n', y_preds)
break
You're not even using self.fc in forward pass.
Either just introduce it as:
def forward(self, x):
....
x = extract(x)...
x = fc(x)
return x
Or you can simply replace the layer named classifier (this way you don't need Identity layer):
self.model.classifier = nn.Linear(n_features, config.num_classes)
Also, here config.num_classes should be 8.
Image Transformation and Batch
transform = transforms.Compose([
transforms.Resize((100,100)),
transforms.ToTensor(),
transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225])
])
data_set = datasets.ImageFolder(root="/content/drive/My Drive/models/pokemon/dataset",transform=transform)
train_loader = DataLoader(data_set,batch_size=10,shuffle=True,num_workers=6)
Below is my Model
class pokimonClassifier(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3,6,3,1)
self.conv2 = nn.Conv2d(6,18,3,1)
self.fc1 = nn.Linear(23*23*18,520)
self.fc2 = nn.Linear(520,400)
self.fc3 = nn.Linear(400,320)
self.fc4 = nn.Linear(320,149)
def forward(self,x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x,2,2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x,2,2)
x = x.view(-1,23*23*18)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.log_softmax(self.fc4(x), dim=1)
return x
Creating Instance of model, Use GPU, Set Criterion and optimizer
Here is firsr set lr = 0.001 then later changed to 0.0001
model = pokimonClassifier()
model.to('cuda')
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(),lr = 0.0001)
Training Dataset
for e in range(epochs):
train_crt = 0
for b,(train_x,train_y) in enumerate(train_loader):
b+=1
train_x, train_y = train_x.to('cuda'), train_y.to('cuda')
# train model
y_preds = model(train_x)
loss = criterion(y_preds,train_y)
# analysis model
predicted = torch.max(y_preds,1)[1]
correct = (predicted == train_y).sum()
train_crt += correct
# print loss and accuracy
if b%50 == 0:
print(f'Epoch {e} batch{b} loss:{loss.item()} ')
# updating weights and bais
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss.append(loss)
train_correct.append(train_crt)
My loss value remains between 4 - 3 and its not converging to 0.
I am super new to deep learning and I don't know much about it.
The dataset I am using is here: https://www.kaggle.com/thedagger/pokemon-generation-one
A help will be much appreciated.
Thank You
The problem with your network is that you are applying softmax() twice - once at fc4() layer and once more while using nn.CrossEntropyLoss().
According to the official documentation, Pytorch takes care of softmax() while applying nn.CrossEntropyLoss().
So in your code, please change this line
x = F.log_softmax(self.fc4(x), dim=1)
to
x = self.fc4(x)
Here I show a dummy example that represents my actual problem.
My neural network (NN) receives one input and gives the probabilities for two output nodes. The code for the NN is:
class Net(torch.nn.Module):
def __init__(self, N, M):
super(Net, self).__init__()
self.fc1 = torch.nn.Linear(N, 4)
self.fc2 = torch.nn.Linear(4, 4)
self.fc3 = torch.nn.Linear(4, M)
def forward(self, x):
x = torch.sigmoid(self.fc1(x))
x = torch.sigmoid(self.fc2(x))
x = torch.softmax(self.fc3(x),0)
return x
The ABM class is our model that iteratiely sends calls to Net::forward, and based on the probabilities, chooses an action if it's the first index, increments agent_count. Inputs xx are stored in states which will be used to backward.
class ABM:
def __init__(self,_nn,_t_data):
self.nn = nn
self.iteration_n = _t_data.iteration_n
self.target_value = _t_data.target_value
def run(self):
for jj in range(self.iteration_n):
xx = self.generate_input();
self.states.append(xx); # store inputs
ys = nn.forward(xx);
action = self.draw(ys);
if (action == 0):
self.agent_count+=1
loss = self.calculateReward();
return loss;
def generate_input(self):
return torch.ones((1),requires_grad = True)
--some other attributes--
When the run is over, error is calculated as error = (target_value - agent_count)/target_value which is a value between -1 and 1.
In order to train the model, the error is applied to the probability of the first output node of NN. This is to correct the NN in a way that predicts the right probability for the first output. The code is:
class ABM:
def calculateReward(self):
error = (self.target_value - self.agent_count)/self.target_value
reward = torch.tensor((-error), requires_grad = True)
#since all states are same, we just choose the first one
state = self.states[0]
ys = nn.forward(state)
actionProb = ys[0]
action_reward = actionProb * reward
return action_reward;
--some other members--
Two parameters of iteration_n and target_value used in the ABM are defined in the training data class as:
class Train:
def __init__(self,tt , tv):
self.iteration_n = tt
self.target_value = tv
target_value =0;
iteration_n=0;
The harmony between different parts of the code is done as:
#### start optimization ####
nn = Net(1,2)
optimizer = optim.Adam(nn.parameters(), lr=0.01)
# create training data values
training_items = []
training_items.append(Train(1000,800))
training_items.append(Train(500,200))
error_record = []
for ii in range(100):
print("############ start iteration #%d ################"%ii)
for t_item in training_items:
model = ABM(nn,t_item)
loss = model.run()
optimizer.zero_grad()
loss.backward()
optimizer.step()
error_record.append(loss.item())
Now let's present the problem.
If I only define one training item as Train(/*iteration number*/1000,/*target value*/800));, the NN is optimized as expected:
however, by defining two training items, although the average error declines to zero, the error on each training data stays high:
Is there any idea how to solve this issue?
I have omitted some parts of the code here to make it more readable. The full running code is available on minimal ABM
I am currently learning deep learning especially GAN.
I found a simple code of GAN from a web site below.
https://medium.com/#devnag/generative-adversarial-networks-gans-in-50-lines-of-code-pytorch-e81b79659e3f
However, in the code, I don't understand why we always need to give true label to Generator as below.
for g_index in range(g_steps):
# 2. Train G on D's response (but DO NOT train D on these labels)
G.zero_grad()
gen_input = Variable(gi_sampler(minibatch_size, g_input_size))
g_fake_data = G(gen_input)
dg_fake_decision = D(preprocess(g_fake_data.t()))
g_error = criterion(dg_fake_decision, Variable(torch.ones(1))) # we want to fool, so pretend it's all genuine
g_error.backward()
g_optimizer.step() # Only optimizes G's parameters
Specifically, on this line.
g_error = criterion(dg_fake_decision, Variable(torch.ones(1))) # we want to fool, so pretend it's all genuine
Input data for Generator is fake data(includes noise), so if we assign True labels on those input data, I think Generator ends up creating data which is similar to fake data(doesn't look like genuine). Is my understanding wrong? Sorry for the silly question, but if you have knowledge, plz help me out.
I'll put a whole code below.
#!/usr/bin/env python
# Generative Adversarial Networks (GAN) example in PyTorch.
# See related blog post at https://medium.com/#devnag/generative-adversarial-networks-gans-in-50-lines-of-code-pytorch-e81b79659e3f#.sch4xgsa9
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
# Data params
data_mean = 4
data_stddev = 1.25
# Model params
g_input_size = 1 # Random noise dimension coming into generator, per output vector
g_hidden_size = 50 # Generator complexity
g_output_size = 1 # size of generated output vector
d_input_size = 100 # Minibatch size - cardinality of distributions
d_hidden_size = 50 # Discriminator complexity
d_output_size = 1 # Single dimension for 'real' vs. 'fake'
minibatch_size = d_input_size
d_learning_rate = 2e-4 # 2e-4
g_learning_rate = 2e-4
optim_betas = (0.9, 0.999)
num_epochs = 30000
print_interval = 200
d_steps = 1 # 'k' steps in the original GAN paper. Can put the discriminator on higher training freq than generator
g_steps = 1
# ### Uncomment only one of these
#(name, preprocess, d_input_func) = ("Raw data", lambda data: data, lambda x: x)
(name, preprocess, d_input_func) = ("Data and variances", lambda data: decorate_with_diffs(data, 2.0), lambda x: x * 2)
print("Using data [%s]" % (name))
# ##### DATA: Target data and generator input data
def get_distribution_sampler(mu, sigma):
return lambda n: torch.Tensor(np.random.normal(mu, sigma, (1, n))) # Gaussian
def get_generator_input_sampler():
return lambda m, n: torch.rand(m, n) # Uniform-dist data into generator, _NOT_ Gaussian
# ##### MODELS: Generator model and discriminator model
class Generator(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(Generator, self).__init__()
self.map1 = nn.Linear(input_size, hidden_size)
self.map2 = nn.Linear(hidden_size, hidden_size)
self.map3 = nn.Linear(hidden_size, output_size)
def forward(self, x):
x = F.elu(self.map1(x))
x = F.sigmoid(self.map2(x))
return self.map3(x)
class Discriminator(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(Discriminator, self).__init__()
self.map1 = nn.Linear(input_size, hidden_size)
self.map2 = nn.Linear(hidden_size, hidden_size)
self.map3 = nn.Linear(hidden_size, output_size)
def forward(self, x):
x = F.elu(self.map1(x))
x = F.elu(self.map2(x))
return F.sigmoid(self.map3(x))
def extract(v):
return v.data.storage().tolist()
def stats(d):
return [np.mean(d), np.std(d)]
def decorate_with_diffs(data, exponent):
mean = torch.mean(data.data, 1, keepdim=True)
mean_broadcast = torch.mul(torch.ones(data.size()), mean.tolist()[0][0])
diffs = torch.pow(data - Variable(mean_broadcast), exponent)
return torch.cat([data, diffs], 1)
d_sampler = get_distribution_sampler(data_mean, data_stddev)
gi_sampler = get_generator_input_sampler()
G = Generator(input_size=g_input_size, hidden_size=g_hidden_size, output_size=g_output_size)
D = Discriminator(input_size=d_input_func(d_input_size), hidden_size=d_hidden_size, output_size=d_output_size)
criterion = nn.BCELoss() # Binary cross entropy: http://pytorch.org/docs/nn.html#bceloss
d_optimizer = optim.Adam(D.parameters(), lr=d_learning_rate, betas=optim_betas)
g_optimizer = optim.Adam(G.parameters(), lr=g_learning_rate, betas=optim_betas)
for epoch in range(num_epochs):
for d_index in range(d_steps):
# 1. Train D on real+fake
D.zero_grad()
# 1A: Train D on real
d_real_data = Variable(d_sampler(d_input_size))
d_real_decision = D(preprocess(d_real_data))
d_real_error = criterion(d_real_decision, Variable(torch.ones(1))) # ones = true
d_real_error.backward() # compute/store gradients, but don't change params
# 1B: Train D on fake
d_gen_input = Variable(gi_sampler(minibatch_size, g_input_size))
d_fake_data = G(d_gen_input).detach() # detach to avoid training G on these labels
d_fake_decision = D(preprocess(d_fake_data.t()))
d_fake_error = criterion(d_fake_decision, Variable(torch.zeros(1))) # zeros = fake
d_fake_error.backward()
d_optimizer.step() # Only optimizes D's parameters; changes based on stored gradients from backward()
for g_index in range(g_steps):
# 2. Train G on D's response (but DO NOT train D on these labels)
G.zero_grad()
gen_input = Variable(gi_sampler(minibatch_size, g_input_size))
g_fake_data = G(gen_input)
dg_fake_decision = D(preprocess(g_fake_data.t()))
g_error = criterion(dg_fake_decision, Variable(torch.ones(1))) # we want to fool, so pretend it's all genuine
g_error.backward()
g_optimizer.step() # Only optimizes G's parameters
if epoch % print_interval == 0:
print("%s: D: %s/%s G: %s (Real: %s, Fake: %s) " % (epoch,
extract(d_real_error)[0],
extract(d_fake_error)[0],
extract(g_error)[0],
stats(extract(d_real_data)),
stats(extract(d_fake_data))))
In this part of the code you are training G to fool D, so G generates fake data and asks D whether it thinks it's real (true labels), D's gradients are then propogated all the way to G (this is possible as D's input was G's output) so that it will learn to better fool D in the next iteration.
The inputs of G are not trainable and G only tries to transform them into real data (data similar to what d_sampler generates)
I'm new to the PyTorch framework (coming from Theano and Tensorflow mainly):
I've followed the introduction tutorial and read the Classifying Names with a Character-Level RNN one.
I now try to adapt it to a char level LSTM model in order to gain some practical experience with the framework.
Basically I feed in the model sequences of char indices and give as target to the model the same sequence but shifted by one in the future.
However I can't overfit a simple training example and I don't see what I did wrong.
If someone can spot my mistake it would be very helpful.
Here is my code:
class LSTMTxtGen(nn.Module):
def __init__(self, hidden_dim, n_layer, vocab_size):
super(LSTMTxtGen, self).__init__()
self.n_layer = n_layer
self.hidden_dim = hidden_dim
self.vocab_size = vocab_size
self.lstm = nn.LSTM(vocab_size, hidden_dim, n_layer, batch_first=True)
# The linear layer that maps from hidden state space to tag space
#self.hidden = self.init_hidden()
def init_hidden(self, batch_size):
# Before we've done anything, we dont have any hidden state.
# Refer to the Pytorch documentation to see exactly
# why they have this dimensionality.
# The axes semantics are (num_layers, minibatch_size, hidden_dim)
return (autograd.Variable(torch.zeros(self.n_layer, batch_size,
self.hidden_dim)),
autograd.Variable(torch.zeros(self.n_layer, batch_size,
self.hidden_dim)))
def forward(self, seqs):
self.hidden = self.init_hidden(seqs.size()[0])
lstm_out, self.hidden = self.lstm(seqs, self.hidden)
lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)
lstm_out = nn.Linear(lstm_out.size(1), self.vocab_size)(lstm_out)
return lstm_out
model = LSTMTxtGen (
hidden_dim = 50,
n_layer = 3,
vocab_size = 44,
)
print(Model)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adamax(model.parameters())
G = Data.batch_generator(5,100)
batch_per_epoch, to_idx, to_char = next(G)
X, Y = next(G)
for epoch in range(10):
losses = []
for batch_count in range(batch_per_epoch):
model.zero_grad()
#mode.hidden = model.init_hidden()
#X, Y = next(G)
X = autograd.Variable(torch.from_numpy(X))
Y = autograd.Variable(torch.from_numpy(Y))
preds = model(X)
loss = criterion(preds.view(-1, model.vocab_size), Y.view(-1))
loss.backward()
optimizer.step()
losses.append(loss)
if (batch_count % 20 == 0):
print('Loss: ', losses[-1])