InvalidArgument Error: Graph execution error when trying to train model - machine-learning

Here is the model im using
X_test, X_valid, y_test, y_valid = train_test_split(testReview,testLabel, test_size = 0.4,shuffle=True, random_state = 42,stratify=testLabel)
print(len(X_test), len(X_valid))
print((X_valid.shape), X.shape)
def read_glove_vector(glove_vec):
with open(glove_vec, 'r', encoding='UTF-8') as f:
words = set()
word_to_vec_map = {}
for line in f:
w_line = line.split()
curr_word = w_line[0]
word_to_vec_map[curr_word] = np.array(w_line[1:], dtype='float32')
return word_to_vec_map
word_to_vec_map = read_glove_vector('/content/drive/MyDrive/Colab Notebooks/AmazonCustomerReview/glove.6B.100d.txt')
maxLen = 123
vocab_len = len(word_to_index)
embed_vector_len = word_to_vec_map['moon'].shape[0]
emb_matrix = np.zeros((vocab_len, embed_vector_len))
for word, index in word_to_index.items():
embedding_vector = word_to_vec_map.get(word)
if embedding_vector is not None:
emb_matrix[index] = embedding_vector
print('Build model...')
model = Sequential()
model.add(Embedding(vocab_len, embed_vector_len, input_length=maxLen))
model.add(Flatten())
model.add(Dense(32, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# Load GloVe embeding
model.layers[0].set_weights([emb_matrix])
model.layers[0].trainable = False
print(model.summary())
# Training and evaluation
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
model.fit(X, y, epochs=2, batch_size=32,
validation_data=(X_valid,y_valid))
print("Result: ", model.metrics_names, model.evaluate(X_test, y_test))
After this the Error i encounter is this
Epoch 1/2
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
<ipython-input-62-4152e138ca8e> in <module>()
----> 1 model.fit(X, y, batch_size=32, epochs=2, verbose=2)
2 # model.fit(X, y, epochs=2, batch_size=32)
3 # # validation_data=(X_valid,y_valid))
1 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
53 ctx.ensure_initialized()
54 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
---> 55 inputs, attrs, num_outputs)
56 except core._NotOkStatusException as e:
57 if name is not None:
InvalidArgumentError: Graph execution error:
The issue is , i did run the full code 1 hour ago, it was completed perfectly.
After i just tried to fine tune to the Model if i get better result or not, but just unexpected stuck on this , if any mistake have here, it wont run fully the 1st time , but got this error in 3rd time running.
What is the solution to this?

Related

t() expects a tensor with <= 2 dimensions, but self is 3D

I'm new to pytorch and wrote a simple code as following to classify some inputs. The model input has 8*2 with batch size of 2 and the input layer in the model has 2 nodes. I don't know what is wrong!
X1=np.array([[2,1],[3,2],[-4,-1],[-1,-3],[2,-1],[3,-3],[-2,1],[-4,-2]])
Y1=np.array([0,0,0,0,1,1,1,1])
X=torch.tensor(X1)
Y=torch.tensor(Y1)
BATCH_SIZE=2
trainset= torch.utils.data.TensorDataset(X, Y)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE,
shuffle=True, num_workers=1)
from torch.nn.modules import flatten
learning_rate = 0.01
num_epochs = 20
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = MyModel()
model = model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
## compute accuracy
def get_accuracy(logit, target, batch_size):
''' Obtain accuracy for training round '''
corrects = (torch.max(logit, 1)[1].view(target.size()).data == target.data).sum()
accuracy = 100.0 * corrects/batch_size
return accuracy.item()
model = MyModel()
# Commented out IPython magic to ensure Python compatibility.
for epoch in range(num_epochs):
train_running_loss = 0.0
train_acc = 0.0
## training step
for inputs, labels in trainloader:
#inputs=torch.flatten(inputs)
inputs,labels=inputs.to(device), labels.to(device)
#inputs = inputs.to(device)
#labels = labels.to(device)
optimizer.zero_grad()
## forward + backprop + loss
print(inputs)
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
## update model params
optimizer.step()
train_running_loss += loss.detach().item()
train_acc += get_accuracy(outputs, labels, BATCH_SIZE)
#model.train()
model.eval()
print('Epoch: %d | Loss: %.4f | Train Accuracy: %.2f'%(epoch, train_running_loss / i, train_acc/i))
And my model is as below:
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.d1 = nn.Linear(2,3)
self.d2 = nn.Linear(3,1)
self.init_weights()
def init_weights(self):
k1=torch.tensor([0.1,-0.72,0.94,-0.29,0.12,0.44])
k1=torch.unsqueeze(torch.unsqueeze(k1,0),0)
self.d1.weight.data=k1
k2=torch.tensor([1,-1.16,-0.26])
k2=torch.unsqueeze(torch.unsqueeze(k2,0),0)
self.d2.weight.data=k2
def forward(self, x):
x = self.d1(x)
x = F.tanh(x)
x = self.d2(x)
out = F.sigmoid(x)
return out
Then I got an error:
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-27-196d819d3ccd> in <module>
101 print(inputs)
102
--> 103 outputs = model.forward(inputs)
104 loss = criterion(outputs, labels)
105
2 frames
/usr/local/lib/python3.8/dist-packages/torch/nn/modules/linear.py in forward(self, input)
112
113 def forward(self, input: Tensor) -> Tensor:
--> 114 return F.linear(input, self.weight, self.bias)
115
116 def extra_repr(self) -> str:
RuntimeError: t() expects a tensor with <= 2 dimensions, but self is 3D
I flatten the input but nothing changed. What should I do to fix it?
First of all, you don't need to invoke your model's forward pass by model.forward(x); using model(x) is good.
Second of all, what exactly are you trying to achieve via the init_weights method? You're unsqueezing k1 and k2 twice, giving them the shape of (1, 1, x) which is 3D which is what the error is telling you. torch.nn.Linear performs a matrix multiplication with a 2D matrix, so you can't use a 3D one. torch.nn.Linear already initializes the weights via Kaiming initialization [1] so I'm not sure what you're trying to achieve here.
Changing the init_weights method to:
def init_weights(self):
k1 = torch.tensor([0.1, -0.72, 0.94, -0.29, 0.12, 0.44])
k1 = k1.reshape(self.d1.weight.shape)
self.d1.weight.data = k1
k2 = torch.tensor([1, -1.16, -0.26])
k2 = k2.reshape(self.d2.weight.shape)
self.d2.weight.data = k2
and changing the type of inputs from Long to Float (i.e., model(inputs.float())) should solve your problem.
References
[1] https://github.com/pytorch/pytorch/blob/0dceaf07cd1236859953b6f85a61dc4411d10f87/torch/nn/modules/linear.py#L103

Feeding Multiple Inputs to LSTM for Time-Series Forecasting using PyTorch

I'm currently working on building an LSTM network to forecast time-series data using PyTorch. Following Roman's blog post, I implemented a simple LSTM for univariate time-series data, please see the class definitions below. However, it's been a few days since I ground to a halt on adding more features to the input data, say an hour of the day, day of the week, week of the year, and sorts.
class Model(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(Model, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.lstm = nn.LSTMCell(self.input_size, self.hidden_size)
self.linear = nn.Linear(self.hidden_size, self.output_size)
def forward(self, input, future=0, y=None):
outputs = []
# reset the state of LSTM
# the state is kept till the end of the sequence
h_t = torch.zeros(input.size(0), self.hidden_size, dtype=torch.float32)
c_t = torch.zeros(input.size(0), self.hidden_size, dtype=torch.float32)
for i, input_t in enumerate(input.chunk(input.size(1), dim=1)):
h_t, c_t = self.lstm(input_t, (h_t, c_t))
output = self.linear(h_t)
outputs += [output]
for i in range(future):
if y is not None and random.random() > 0.5:
output = y[:, [i]] # teacher forcing
h_t, c_t = self.lstm(output, (h_t, c_t))
output = self.linear(h_t)
outputs += [output]
outputs = torch.stack(outputs, 1).squeeze(2)
return outputs
class Optimization:
"A helper class to train, test and diagnose the LSTM"
def __init__(self, model, loss_fn, optimizer, scheduler):
self.model = model
self.loss_fn = loss_fn
self.optimizer = optimizer
self.scheduler = scheduler
self.train_losses = []
self.val_losses = []
self.futures = []
#staticmethod
def generate_batch_data(x, y, batch_size):
for batch, i in enumerate(range(0, len(x) - batch_size, batch_size)):
x_batch = x[i : i + batch_size]
y_batch = y[i : i + batch_size]
yield x_batch, y_batch, batch
def train(
self,
x_train,
y_train,
x_val=None,
y_val=None,
batch_size=100,
n_epochs=20,
dropout=0.2,
do_teacher_forcing=None,
):
seq_len = x_train.shape[1]
for epoch in range(n_epochs):
start_time = time.time()
self.futures = []
train_loss = 0
for x_batch, y_batch, batch in self.generate_batch_data(x_train, y_train, batch_size):
y_pred = self._predict(x_batch, y_batch, seq_len, do_teacher_forcing)
self.optimizer.zero_grad()
loss = self.loss_fn(y_pred, y_batch)
loss.backward()
self.optimizer.step()
train_loss += loss.item()
self.scheduler.step()
train_loss /= batch
self.train_losses.append(train_loss)
self._validation(x_val, y_val, batch_size)
elapsed = time.time() - start_time
print(
"Epoch %d Train loss: %.2f. Validation loss: %.2f. Avg future: %.2f. Elapsed time: %.2fs."
% (epoch + 1, train_loss, self.val_losses[-1], np.average(self.futures), elapsed)
)
def _predict(self, x_batch, y_batch, seq_len, do_teacher_forcing):
if do_teacher_forcing:
future = random.randint(1, int(seq_len) / 2)
limit = x_batch.size(1) - future
y_pred = self.model(x_batch[:, :limit], future=future, y=y_batch[:, limit:])
else:
future = 0
y_pred = self.model(x_batch)
self.futures.append(future)
return y_pred
def _validation(self, x_val, y_val, batch_size):
if x_val is None or y_val is None:
return
with torch.no_grad():
val_loss = 0
batch = 1
for x_batch, y_batch, batch in self.generate_batch_data(x_val, y_val, batch_size):
y_pred = self.model(x_batch)
loss = self.loss_fn(y_pred, y_batch)
val_loss += loss.item()
val_loss /= batch
self.val_losses.append(val_loss)
def evaluate(self, x_test, y_test, batch_size, future=1):
with torch.no_grad():
test_loss = 0
actual, predicted = [], []
for x_batch, y_batch, batch in self.generate_batch_data(x_test, y_test, batch_size):
y_pred = self.model(x_batch, future=future)
y_pred = (
y_pred[:, -len(y_batch) :] if y_pred.shape[1] > y_batch.shape[1] else y_pred
)
loss = self.loss_fn(y_pred, y_batch)
test_loss += loss.item()
actual += torch.squeeze(y_batch[:, -1]).data.cpu().numpy().tolist()
predicted += torch.squeeze(y_pred[:, -1]).data.cpu().numpy().tolist()
test_loss /= batch
return actual, predicted, test_loss
def plot_losses(self):
plt.plot(self.train_losses, label="Training loss")
plt.plot(self.val_losses, label="Validation loss")
plt.legend()
plt.title("Losses")
You can find some of the helper functions that help me split and format data before feeding it to my LSTM network.
def to_dataframe(actual, predicted):
return pd.DataFrame({"value": actual, "prediction": predicted})
def inverse_transform(scaler, df, columns):
for col in columns:
df[col] = scaler.inverse_transform(df[col])
return df
def split_sequences(sequences, n_steps):
X, y = list(), list()
for i in range(len(sequences)):
# find the end of this pattern
end_ix = i + n_steps
# check if we are beyond the dataset
if end_ix > len(sequences):
break
# gather input and output parts of the pattern
seq_x, seq_y = sequences[i:end_ix, :-1], sequences[end_ix-1, -1]
X.append(seq_x)
y.append(seq_y)
return array(X), array(y)
def train_val_test_split_new(df, test_ratio=0.2, seq_len = 100):
y = df['value']
X = df.drop(columns = ['value'])
tarin_ratio = 1 - test_ratio
val_ratio = 1 - ((train_ratio - test_ratio) / train_ratio)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_ratio)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=val_ratio)
return X_train, y_train, X_val, y_val, X_test, y_test
I use the following data frames to train my model.
# df_train
value weekday monthday hour
timestamp
2014-07-01 00:00:00 10844 1 1 0
2014-07-01 00:30:00 8127 1 1 0
2014-07-01 01:00:00 6210 1 1 1
2014-07-01 01:30:00 4656 1 1 1
2014-07-01 02:00:00 3820 1 1 2
... ... ... ... ...
2015-01-31 21:30:00 24670 5 31 21
2015-01-31 22:00:00 25721 5 31 22
2015-01-31 22:30:00 27309 5 31 22
2015-01-31 23:00:00 26591 5 31 23
2015-01-31 23:30:00 26288 5 31 23
10320 rows × 4 columns
# x_train
weekday monthday hour
timestamp
2014-08-26 16:30:00 1 26 16
2014-08-18 16:30:00 0 18 16
2014-10-22 20:00:00 2 22 20
2014-12-10 08:00:00 2 10 8
2014-07-27 22:00:00 6 27 22
... ... ... ...
2014-08-24 05:30:00 6 24 5
2014-11-24 12:00:00 0 24 12
2014-12-18 06:00:00 3 18 6
2014-07-27 17:00:00 6 27 17
2014-12-05 21:00:00 4 5 21
6192 rows × 3 columns
# y_train
timestamp
2014-08-26 16:30:00 14083
2014-08-18 16:30:00 14465
2014-10-22 20:00:00 25195
2014-12-10 08:00:00 21348
2014-07-27 22:00:00 16356
...
2014-08-24 05:30:00 2948
2014-11-24 12:00:00 16292
2014-12-18 06:00:00 7029
2014-07-27 17:00:00 18883
2014-12-05 21:00:00 26284
Name: value, Length: 6192, dtype: int64
After transforming and splitting time-series data into smaller batches, the training data set for X and y becomes as follows:
X_data shape is (6093, 100, 3)
y_data shape is (6093,)
tensor([[[-1.0097, 1.1510, 0.6508],
[-1.5126, 0.2492, 0.6508],
[-0.5069, 0.7001, 1.2238],
...,
[ 1.5044, -1.4417, -1.6413],
[ 1.0016, -0.0890, 0.7941],
[ 1.5044, -0.9908, -0.2087]],
[[-1.5126, 0.2492, 0.6508],
[-0.5069, 0.7001, 1.2238],
[-0.5069, -0.6526, -0.4952],
...,
[ 1.0016, -0.0890, 0.7941],
[ 1.5044, -0.9908, -0.2087],
[ 0.4988, 0.5874, 0.5076]],
[[-0.5069, 0.7001, 1.2238],
[-0.5069, -0.6526, -0.4952],
[ 1.5044, 1.2637, 1.5104],
...,
[ 1.5044, -0.9908, -0.2087],
[ 0.4988, 0.5874, 0.5076],
[ 0.4988, 0.5874, -0.6385]],
...,
[[ 1.0016, 0.9255, -1.2115],
[-1.0097, -0.9908, 1.0806],
[-0.0041, 0.8128, 0.3643],
...,
[ 1.5044, 0.9255, -0.9250],
[-1.5126, 0.9255, 0.0778],
[-0.0041, 0.2492, -0.7818]],
[[-1.0097, -0.9908, 1.0806],
[-0.0041, 0.8128, 0.3643],
[-0.5069, 1.3765, -0.0655],
...,
[-1.5126, 0.9255, 0.0778],
[-0.0041, 0.2492, -0.7818],
[ 1.5044, 1.2637, 0.7941]],
[[-0.0041, 0.8128, 0.3643],
[-0.5069, 1.3765, -0.0655],
[-0.0041, -1.6672, -0.4952],
...,
[-0.0041, 0.2492, -0.7818],
[ 1.5044, 1.2637, 0.7941],
[ 0.4988, -1.2163, 1.3671]]])
tensor([ 0.4424, 0.1169, 0.0148, ..., -1.1653, 0.5394, 1.6037])
Finally, just to check if the dimensions of all these training, validation, and test datasets are correct, I print out their shapes.
train shape is: torch.Size([6093, 100, 3])
train label shape is: torch.Size([6093])
val shape is: torch.Size([1965, 100, 3])
val label shape is: torch.Size([1965])
test shape is: torch.Size([1965, 100, 3])
test label shape is: torch.Size([1965])
When I try to build the model as follows, I end up getting a RuntimeError pointing at inconsistent input sizes.
model_params = {'train_ratio': 0.8,
'validation_ratio': 0.2,
'sequence_length': 100,
'teacher_forcing': False,
'dropout_rate': 0.2,
'batch_size': 100,
'num_of_epochs': 5,
'hidden_size': 24,
'n_features': 3,
'learning_rate': 1e-3
}
train_ratio = model_params['train_ratio']
val_ratio = model_params['validation_ratio']
seq_len = model_params['sequence_length']
teacher_forcing = model_params['teacher_forcing']
dropout_rate = model_params['dropout_rate']
batch_size = model_params['batch_size']
n_epochs = model_params['num_of_epochs']
hidden_size = model_params['hidden_size']
n_features = model_params['n_features']
lr = model_params['learning_rate']
model = Model(input_size=n_features, hidden_size=hidden_size, output_size=1)
loss_fn = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=lr)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1)
optimization = Optimization(model, loss_fn, optimizer, scheduler)
start_time = datetime.now()
optimization.train(x_train, y_train, x_val, y_val,
batch_size=batch_size,
n_epochs=n_epochs,
dropout=dropout_rate,
do_teacher_forcing=teacher_forcing)
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-192-6fc406c0113d> in <module>
6
7 start_time = datetime.now()
----> 8 optimization.train(x_train, y_train, x_val, y_val,
9 batch_size=batch_size,
10 n_epochs=n_epochs,
<ipython-input-189-c18d20430910> in train(self, x_train, y_train, x_val, y_val, batch_size, n_epochs, dropout, do_teacher_forcing)
68 train_loss = 0
69 for x_batch, y_batch, batch in self.generate_batch_data(x_train, y_train, batch_size):
---> 70 y_pred = self._predict(x_batch, y_batch, seq_len, do_teacher_forcing)
71 self.optimizer.zero_grad()
72 loss = self.loss_fn(y_pred, y_batch)
<ipython-input-189-c18d20430910> in _predict(self, x_batch, y_batch, seq_len, do_teacher_forcing)
93 else:
94 future = 0
---> 95 y_pred = self.model(x_batch)
96 self.futures.append(future)
97 return y_pred
~\Anaconda3\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
<ipython-input-189-c18d20430910> in forward(self, input, future, y)
17
18 for i, input_t in enumerate(input.chunk(input.size(1), dim=1)):
---> 19 h_t, c_t = self.lstm(input_t, (h_t, c_t))
20 output = self.linear(h_t)
21 outputs += [output]
~\Anaconda3\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~\Anaconda3\lib\site-packages\torch\nn\modules\rnn.py in forward(self, input, hx)
963
964 def forward(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None) -> Tuple[Tensor, Tensor]:
--> 965 self.check_forward_input(input)
966 if hx is None:
967 zeros = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
~\Anaconda3\lib\site-packages\torch\nn\modules\rnn.py in check_forward_input(self, input)
789 def check_forward_input(self, input: Tensor) -> None:
790 if input.size(1) != self.input_size:
--> 791 raise RuntimeError(
792 "input has inconsistent input_size: got {}, expected {}".format(
793 input.size(1), self.input_size))
RuntimeError: input has inconsistent input_size: got 1, expected 3
I suspect my current LSTM model class does not support data with multiple features, and I've been trying out different approaches lately with no luck so far. Feel free to share your thoughts or point me in the right direction that could help me solve this problem.
As suggested by #stackoverflowuser2010, I printed out the shapes of the tensors input_t, h_t and c_t that is fed into the forward step before the error is thrown.
input_t
torch.Size([100, 1, 3])
h_t
torch.Size([100, 24])
c_t
torch.Size([100, 24])
After muddling through for a couple of weeks, I solved the issue. This has been a fruitful journey for me, so I'd like to share what I have discovered. If you'd like to have a look at the complete walk-through with code, please check out my Medium post on the matter.
Just as in Pandas, I found that things tend to work faster and smoother when I stick to the PyTorch way. Both libraries rely on NumPy, and I'm sure one can do pretty much all the table and matrix operations explicitly with NumPy arrays and functions. However, doing so does eliminate all the nice abstractions and performance improvements these libraries provide and turn each step into a CS exercise. It's fun until it isn't.
Rather than shaping all the training and validation sets manually to pass them to the model, PyTorch's TensorDataset and DataLoaders classes have immensely helped me. Scaling the feature and target sets for training and validation, we then have NumPy arrays. We can transform these arrays into Tensors and use these Tensors to create our TensorDataset, or a custom Dataset depending on your requirements. Finally, DataLoaders allow us to iterate over such datasets with much less hassle than otherwise as they already provide built-in batching, shuffling, and dropping the last batch options.
train_features = torch.Tensor(X_train_arr)
train_targets = torch.Tensor(y_train_arr)
val_features = torch.Tensor(X_val_arr)
val_targets = torch.Tensor(y_val_arr)
train = TensorDataset(train_features, train_targets)
train_loader = DataLoader(train, batch_size=64, shuffle=False, drop_last=True)
val = TensorDataset(val_features, val_targets)
val_loader = DataLoader(val, batch_size=64, shuffle=False, drop_last=True)
After transforming our data into iterable datasets, they can later be used to do mini-batch training. Instead of explicitly defining batches or wrestling with matrix operations, we can easily iterate over them via DataLoaders as follows.
model = LSTMModel(input_dim, hidden_dim, layer_dim, output_dim)
criterion = nn.MSELoss(reduction='mean')
optimizer = optim.Adam(model.parameters(), lr=1e-2)
train_losses = []
val_losses = []
train_step = make_train_step(model, criterion, optimizer)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
for epoch in range(n_epochs):
batch_losses = []
for x_batch, y_batch in train_loader:
x_batch = x_batch.view([batch_size, -1, n_features]).to(device)
y_batch = y_batch.to(device)
loss = train_step(x_batch, y_batch)
batch_losses.append(loss)
training_loss = np.mean(batch_losses)
train_losses.append(training_loss)
with torch.no_grad():
batch_val_losses = []
for x_val, y_val in val_loader:
x_val = x_val.view([batch_size, -1, n_features]).to(device)
y_val = y_val.to(device)
model.eval()
yhat = model(x_val)
val_loss = criterion(y_val, yhat).item()
batch_val_losses.append(val_loss)
validation_loss = np.mean(batch_val_losses)
val_losses.append(validation_loss)
print(f"[{epoch+1}] Training loss: {training_loss:.4f}\t Validation loss: {validation_loss:.4f}")
Another cool feature that PyTorch provides is the view() function, which allows faster and memory-efficient reshaping of tensors. Since I earlier defined my LSTM model with batch_first = True, the batch tensor for the feature set must have the shape of (batch size, time steps, number of features). The line in the code above x_batch = x_batch.view([batch_size, -1, n_features]).to(device) just does that.
I hope this answer helps those dealing with similar problems or at least gives an idea of which direction to take. I had changed a lot in the code shared in the original post, but I'll not put them all here for the sake of simplicity. Feel free to check out the rest of it in my other SO post here.

Unable to get output using CNN model

I am trying to use cnn-lstm model on this dataset. I've stored this dataset in dataframe named as df. there are totally 11 column in this dataset but i am just mentioning 9 columns here. All columns have numerical values only
Area book_hotel votes location hotel_type Total_Price Facilities Dine rate
6 0 0 1 163 400 22 7 4.4
19 1 2 7 122 220 28 11 4.6
X=df.drop(['rate'],axis=1)
Y=df['rate']
x_train, x_test, y_train, y_test = train_test_split(np.asarray(X), np.asarray(Y), test_size=0.33, shuffle= True)
x_train has shape (3350,10) and
x_test has shape (1650, 10)
# The known number of output classes.
num_classes = 10
# Input image dimensions
input_shape = (10,)
# Convert class vectors to binary class matrices. This uses 1 hot encoding.
y_train_binary = keras.utils.to_categorical(y_train, num_classes)
y_test_binary = keras.utils.to_categorical(y_test, num_classes)
x_train = x_train.reshape(3350, 10,1)
x_test = x_test.reshape(1650, 10,1)
input_layer = Input(shape=(10, 1))
conv1 = Conv1D(filters=32,
kernel_size=8,
strides=1,
activation='relu',
padding='same')(input_layer)
lstm1 = LSTM(32, return_sequences=True)(conv1)
output_layer = Dense(1, activation='sigmoid')(lstm1)
model = Model(inputs=input_layer, outputs=output_layer)
model.summary()
model.compile(loss='mse',optimizer='adam')
Finally when i am trying to fit the model with input
model.fit(x_train,y_train)
ValueError Traceback (most recent call last)
<ipython-input-170-4719cf73997a> in <module>()
----> 1 model.fit(x_train,y_train)
2 frames
/usr/local/lib/python3.6/dist-packages/keras/engine/training_utils.py in standardize_input_data(data, names, shapes, check_batch_axis, exception_prefix)
133 ': expected ' + names[i] + ' to have ' +
134 str(len(shape)) + ' dimensions, but got array '
--> 135 'with shape ' + str(data_shape))
136 if not check_batch_axis:
137 data_shape = data_shape[1:]
ValueError: Error when checking target: expected dense_2 to have 3 dimensions, but got array with shape (3350, 1)
Can someone please help me resolving this error
I see some problem in your code...
the last dimension output must be equal to the number of class and with multiclass tasks you need to apply a softmax activation: Dense(num_classes, activation='softmax')
you must set return_sequences=False in your last lstm cell because you need a 2D output and not a 3D
you must use categorical_crossentropy as loss function with one-hot encoded target
here a complete dummy example...
num_classes = 10
n_sample = 1000
X = np.random.uniform(0,1, (n_sample,10,1))
y = tf.keras.utils.to_categorical(np.random.randint(0,num_classes, n_sample))
input_layer = Input(shape=(10, 1))
conv1 = Conv1D(filters=32,
kernel_size=8,
strides=1,
activation='relu',
padding='same')(input_layer)
lstm1 = LSTM(32, return_sequences=False)(conv1)
output_layer = Dense(num_classes, activation='softmax')(lstm1)
model = Model(inputs=input_layer, outputs=output_layer)
model.compile(loss='categorical_crossentropy',optimizer='adam')
model.fit(X,y, epochs=5)

Size Mismatch using pytorch when trying to train data

I am really new to pytorch and just trying to use my own dataset to do a simple Linear Regression Model. I am only using the numbers values as inputs, too.
I have imported the data from the CSV
dataset = pd.read_csv('mlb_games_overview.csv')
I have split the data into four parts X_train, X_test, y_train, y_test
X = dataset.drop(['date', 'team', 'runs', 'win'], 1)
y = dataset['win']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=True)
I have converted the data to pytorch tensors
X_train = torch.from_numpy(np.array(X_train))
X_test = torch.from_numpy(np.array(X_test))
y_train = torch.from_numpy(np.array(y_train))
y_test = torch.from_numpy(np.array(y_test))
I have created a LinearRegressionModel
class LinearRegressionModel(torch.nn.Module):
def __init__(self):
super(LinearRegressionModel, self).__init__()
self.linear = torch.nn.Linear(1, 1)
def forward(self, x):
y_pred = self.linear(x)
return y_pred
I have initialized the optimizer and the loss function
criterion = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
Now when I start to train the data I get the runtime error mismatch
EPOCHS = 500
for epoch in range(EPOCHS):
pred_y = model(X_train) # RUNTIME ERROR HERE
loss = criterion(pred_y, y_train)
optimizer.zero_grad() # zero out gradients to update parameters correctly
loss.backward() # backpropagation
optimizer.step() # update weights
print('epoch {}, loss {}'. format(epoch, loss.data[0]))
Error Log:
RuntimeError Traceback (most recent call last)
<ipython-input-40-c0474231d515> in <module>
1 EPOCHS = 500
2 for epoch in range(EPOCHS):
----> 3 pred_y = model(X_train)
4 loss = criterion(pred_y, y_train)
5 optimizer.zero_grad() # zero out gradients to update parameters correctly
RuntimeError: size mismatch, m1: [3540 x 8], m2: [1 x 1] at
C:\w\1\s\windows\pytorch\aten\src\TH/generic/THTensorMath.cpp:752
In your Linear Regression model, you have:
self.linear = torch.nn.Linear(1, 1)
But your training data (X_train) shape is 3540 x 8 which means you have 8 features representing each input example. So, you should define the linear layer as follows.
self.linear = torch.nn.Linear(8, 1)
A linear layer in PyTorch has parameters, W and b. If you set the in_features to 8 and out_features to 1, then the shape of the W matrix will be 1 x 8 and the length of b vector will be 1.
Since your training data shape is 3540 x 8, you can perform the following operation.
linear_out = X_train W_T + b
I hope it clarifies your confusion.

Classifying sequences with different lengths with error batching

I'm using Keras with the TensorFlow backend. I've just figured out how to train and classify sequences of different lengths without masking, because I can't get masking to work. In the toy example I'm working with, I'm trying to train an LSTM to detect whether a sequence of arbitrary length starts with a 1 or not.
from keras.models import Sequential
from keras.layers import LSTM, Dense
import numpy as np
def gen_sig(num_samples, seq_len):
one_indices = np.random.choice(a=num_samples, size=num_samples // 2, replace=False)
x_val = np.zeros((num_samples, seq_len), dtype=np.bool)
x_val[one_indices, 0] = 1
y_val = np.zeros(num_samples, dtype=np.bool)
y_val[one_indices] = 1
return x_val, y_val
N_train = 100
N_test = 10
recall_len = 20
X_train, y_train = gen_sig(N_train, recall_len)
X_test, y_test = gen_sig(N_train, recall_len)
print('Build STATEFUL model...')
model = Sequential()
model.add(LSTM(10, batch_input_shape=(1, 1, 1), return_sequences=False, stateful=True))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print('Train...')
for epoch in range(15):
mean_tr_acc = []
mean_tr_loss = []
for seq_idx in range(X_train.shape[0]):
start_val = X_train[seq_idx, 0]
assert y_train[seq_idx] == start_val
assert tuple(np.nonzero(X_train[seq_idx, :]))[0].shape[0] == start_val
y_in = np.array([y_train[seq_idx]], dtype=np.bool)
for j in range(np.random.choice(a=np.arange(5, recall_len+1))):
x_in = np.array([[[X_train[seq_idx][j]]]])
tr_loss, tr_acc = model.train_on_batch(x_in, y_in)
mean_tr_acc.append(tr_acc)
mean_tr_loss.append(tr_loss)
model.reset_states()
print('accuracy training = {}'.format(np.mean(mean_tr_acc)))
print('loss training = {}'.format(np.mean(mean_tr_loss)))
print('___________________________________')
mean_te_acc = []
mean_te_loss = []
for seq_idx in range(X_test.shape[0]):
start_val = X_test[seq_idx, 0]
assert y_test[seq_idx] == start_val
assert tuple(np.nonzero(X_test[seq_idx, :]))[0].shape[0] == start_val
y_in = np.array([y_test[seq_idx]], dtype=np.bool)
for j in range(np.random.choice(a=np.arange(5, recall_len+1))):
te_loss, te_acc = model.test_on_batch(np.array([[[X_test[seq_idx][j]]]], dtype=np.bool), y_in)
mean_te_acc.append(te_acc)
mean_te_loss.append(te_loss)
model.reset_states()
print('accuracy testing = {}'.format(np.mean(mean_te_acc)))
print('loss testing = {}'.format(np.mean(mean_te_loss)))
print('___________________________________')
As seen in the code, my error is being batched over each time-step. This is bad for multiple reasons. How do I train the network in two steps? For example:
Run a bunch of values through the network to accumulate the error
Adjust the weights of the network given this accumulated error
To do what is described in the original question, the easiest way is to train the original network with masking, but then test with a stateful network so any length input can be classified:
import numpy as np
np.random.seed(1)
import tensorflow as tf
tf.set_random_seed(1)
from keras import models
from keras.layers import Dense, Masking, LSTM
import matplotlib.pyplot as plt
def stateful_model():
hidden_units = 256
model = models.Sequential()
model.add(LSTM(hidden_units, batch_input_shape=(1, 1, 1), return_sequences=False, stateful=True))
model.add(Dense(1, activation='relu', name='output'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop')
return model
def train_rnn(x_train, y_train, max_len, mask):
epochs = 10
batch_size = 200
vec_dims = 1
hidden_units = 256
in_shape = (max_len, vec_dims)
model = models.Sequential()
model.add(Masking(mask, name="in_layer", input_shape=in_shape,))
model.add(LSTM(hidden_units, return_sequences=False))
model.add(Dense(1, activation='relu', name='output'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop')
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs,
validation_split=0.05)
return model
def gen_train_sig_cls_pair(t_stops, num_examples, mask):
x = []
y = []
max_t = int(np.max(t_stops))
for t_stop in t_stops:
one_indices = np.random.choice(a=num_examples, size=num_examples // 2, replace=False)
sig = np.zeros((num_examples, max_t), dtype=np.int8)
sig[one_indices, 0] = 1
sig[:, t_stop:] = mask
x.append(sig)
cls = np.zeros(num_examples, dtype=np.bool)
cls[one_indices] = 1
y.append(cls)
return np.concatenate(x, axis=0), np.concatenate(y, axis=0)
def gen_test_sig_cls_pair(t_stops, num_examples):
x = []
y = []
for t_stop in t_stops:
one_indices = np.random.choice(a=num_examples, size=num_examples // 2, replace=False)
sig = np.zeros((num_examples, t_stop), dtype=np.bool)
sig[one_indices, 0] = 1
x.extend(list(sig))
cls = np.zeros((num_examples, t_stop), dtype=np.bool)
cls[one_indices] = 1
y.extend(list(cls))
return x, y
if __name__ == '__main__':
noise_mag = 0.01
mask_val = -10
signal_lengths = (10, 15, 20)
x_in, y_in = gen_train_sig_cls_pair(signal_lengths, 10, mask_val)
mod = train_rnn(x_in[:, :, None], y_in, int(np.max(signal_lengths)), mask_val)
testing_dat, expected = gen_test_sig_cls_pair(signal_lengths, 3)
state_mod = stateful_model()
state_mod.set_weights(mod.get_weights())
res = []
for s_i in range(len(testing_dat)):
seq_in = list(testing_dat[s_i])
seq_len = len(seq_in)
for t_i in range(seq_len):
res.extend(state_mod.predict(np.array([[[seq_in[t_i]]]])))
state_mod.reset_states()
fig, axes = plt.subplots(2)
axes[0].plot(np.concatenate(testing_dat), label="input")
axes[1].plot(res, "ro", label="result", alpha=0.2)
axes[1].plot(np.concatenate(expected, axis=0), "bo", label="expected", alpha=0.2)
axes[1].legend(bbox_to_anchor=(1.1, 1))
plt.show()

Resources