Memory Leak in Pytorch Autograd of WGAN-GP - memory

I want to use WGAN-GP, and when I run the code, it gives me an error:
def calculate_gradient_penalty(real_images, fake_images):
t = torch.rand(real_images.size(0), 1, 1, 1).to(real_images.device)
t = t.expand(real_images.size())
interpolates = t * real_images + (1 - t) * fake_images
interpolates.requires_grad_(True)
disc_interpolates = D(interpolates)
grad = torch.autograd.grad(
outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones_like(disc_interpolates),
create_graph=True, retain_graph=True, allow_unused=True)[0]
grad_norm = torch.norm(torch.flatten(grad, start_dim=1), dim=1)
loss_gp = torch.mean((grad_norm - 1) ** 2) * lambda_term
return loss_gp
RuntimeError Traceback (most recent call
last) in
/opt/conda/lib/python3.8/site-packages/torch/tensor.py in
backward(self, gradient, retain_graph, create_graph, inputs)
243 create_graph=create_graph,
244 inputs=inputs)
--> 245 torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)
246
247 def register_hook(self, hook):
/opt/conda/lib/python3.8/site-packages/torch/autograd/init.py in
backward(tensors, grad_tensors, retain_graph, create_graph,
grad_variables, inputs)
143 retain_graph = create_graph
144
--> 145 Variable.execution_engine.run_backward(
146 tensors, grad_tensors, retain_graph, create_graph, inputs,
147 allow_unreachable=True, accumulate_grad=True) # allow_unreachable flag
RuntimeError: CUDA out of memory. Tried to allocate 64.00 MiB (GPU 2;
15.75 GiB total capacity; 13.76 GiB already allocated; 2.75 MiB free; 14.50 GiB reserved in total by PyTorch)
The train code:
%%time
d_progress = []
d_fake_progress = []
d_real_progress = []
penalty = []
g_progress = []
data = get_infinite_batches(benign_data_loader)
one = torch.FloatTensor([1]).to(device)
mone = (one * -1).to(device)
for g_iter in range(generator_iters):
print('----------G Iter-{}----------'.format(g_iter+1))
for p in D.parameters():
p.requires_grad = True # This is by Default
d_loss_real = 0
d_loss_fake = 0
Wasserstein_D = 0
for d_iter in range(critic_iter):
D.zero_grad()
images = data.__next__()
if images.size()[0] != batch_size:
continue
# Train Discriminator
# Real Images
images = images.to(device)
z = torch.randn(batch_size, 100, 1, 1).to(device)
d_loss_real = D(images)
d_loss_real = d_loss_real.mean(0).view(1)
d_loss_real.backward(mone)
# Fake Images
fake_images = G(z)
d_loss_fake = D(fake_images)
d_loss_fake = d_loss_fake.mean(0).view(1)
d_loss_fake.backward(one)
# Calculate Penalty
gradient_penalty = calculate_gradient_penalty(images.data, fake_images.data)
gradient_penalty.backward()
# Total Loss
d_loss = d_loss_fake - d_loss_real + gradient_penalty
Wasserstein_D = d_loss_real - d_loss_fake
d_optimizer.step()
print(f'D Iter:{d_iter+1}/{critic_iter} Loss:{d_loss.detach().cpu().numpy()}')
time.sleep(0.1)
d_progress.append(d_loss) # Store Loss
d_fake_progress.append(d_loss_fake)
d_real_progress.append(d_loss_real)
penalty.append(gradient_penalty)
# Generator Updata
for p in D.parameters():
p.requires_grad = False # Avoid Computation
# Train Generator
# Compute with Fake
G.zero_grad()
z = torch.randn(batch_size, 100, 1, 1).to(device)
fake_images = G(z)
g_loss = D(fake_images)
g_loss = g_loss.mean().mean(0).view(1)
g_loss.backward(one)
# g_cost = -g_loss
g_optimizer.step()
print(f'G Iter:{g_iter+1}/{generator_iters} Loss:{g_loss.detach().cpu().numpy()}')
g_progress.append(g_loss) # Store Loss
Does anyone know how to solve this problem?

All loss tensors which are saved outside of the optimization cycle (i.e. outside the for g_iter in range(generator_iters) loop) need to be detached from the graph. Otherwise, you are keeping all previous computation graphs in memory.
As such, you should detach anything that gets appended to d_progress, d_fake_progress, d_real_progress, penalty, and g_progress.
You can do so by converting the tensor to a scalar value with torch.Tensor.item, the graph will free itself on the following iteration. Change the following lines:
d_progress.append(d_loss) # Store Loss
d_fake_progress.append(d_loss_fake)
d_real_progress.append(d_loss_real)
penalty.append(gradient_penalty)
#######
g_progress.append(g_loss) # Store Loss
to:
d_progress.append(d_loss.item()) # Store Loss
d_fake_progress.append(d_loss_fake.item())
d_real_progress.append(d_loss_real.item())
penalty.append(gradient_penalty.item())
#######
g_progress.append(g_loss.item()) # Store Loss

Related

How to calculate F1 Score for Multi-label Classification

I am trying to calculate F1 score (and accuracy) for my multi-label classification problem. Could you please provide feedback on my method, if I'm calculating it correctly. Note that I'm calculating IOU (intersection over union) when model predicts an object as 1, and mark it as TP only if IOU is greater than or equal to 0.5.
GT labels: 14 x 10 x 128
Output: 14 x 10 x 128
where 14 is the batch_size, 10 is the sequence_length, and 128 is the object vector (i.e., 1 if the object at an index belongs to the sequence and 0 otherwise).
def calculate_performance_metrics(total_padded_elements, gt_labels, predicted_labels):
# check if TP pred objects overlap with TP gt objects
TP_INDICES = (torch.logical_and(predicted_labels == 1, gt_labels == 1)).nonzero() # we only want the batch and object indices, i.e. the 0 and 2 indices
TP = calculate_tp_with_iou() # details of this don't matter for now
FP = torch.sum(torch.logical_and(predicted_labels, 1 - gt_labels)).item()
TN = torch.sum(torch.logical_and(1 - predicted_labels, 1 - gt_labels)).item()
FN = torch.sum(torch.logical_and(1 - predicted_labels, gt_labels)).item()
return float(TP), float(FP), float(TN - total_padded_elements), float(FN)
for epoch in range(10):
TP = FP = TN = FN = EPOCH_PRECISION = EPOCH_RECALL = EPOCH_F1 = 0.
for inputs, gt_labels, masks in tr_dl:
outputs = model(inputs) # out shape: (14, 10, 128)
# mask shape: (14, 10). So need to expand it to the shape of output
masks = masks[:, :, None].expand_as(outputs)
pred_labels = (torch.sigmoid(outputs) >= 0.5).float().type(torch.int64) # consider all predictions above 0.5 as 1, rest 0
pred_labels = pred_labels * masks
gt_labels = (gt_labels * masks).type(torch.int64)
total_padded_elements = masks.numel() - masks.sum() # need this to get accurate true negatives
batch_tp, batch_fp, batch_tn, batch_fn = calculate_performance_metrics(gt_labels, pred_labels, total_padded_elements)
EPOCH_TP += batch_tp
EPOCH_FP += batch_fp
EPOCH_TN += batch_tn
EPOCH_FN += batch_fn
EPOCH_ACCURACY = (EPOCH_TP + EPOCH_TN) / (EPOCH_TP + EPOCH_TN + EPOCH_FP + EPOCH_FN)
if EPOCH_TP + EPOCH_FP > 0:
EPOCH_PRECISION = EPOCH_TP / (EPOCH_TP + EPOCH_FP)
if EPOCH_TP + EPOCH_FN > 0:
EPOCH_RECALL = EPOCH_TP / (EPOCH_TP + EPOCH_FN)
EPOCH_F1 = (2 * EPOCH_PRECISION * EPOCH_RECALL) / (EPOCH_PRECISION + EPOCH_RECALL)

Error mistmatch m1 and m2, what is the error on my conv layers calculations?

I have the following architecture:
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# first convolutional layer
self.conv1 = torch.nn.Conv2d(1,32,5) # (32,220,220) output tensor # (W-F)/S + 1 = (224-5)/1 + 1 = 220
# first Max-pooling layer
self.pool1 = torch.nn.MaxPool2d(2,2) # (32,110,110) output tensor
# second convolutional layer
self.conv2 = torch.nn.Conv2d(32,64,5) # (64,106,106) output tensor # (W-F)/S + 1 = (110-5)/1 + 1 = 106
# second Max-pooling layer
self.pool2 = torch.nn.MaxPool2d(2,2) # (64,53,53) output tensor
# Fully connected layer
self.fc1 = torch.nn.Linear(64*53*53, 1000)
self.fc2 = torch.nn.Linear(1000, 500)
self.fc3 = torch.nn.Linear(500, 136)
self.drop1 = nn.Dropout(p=0.4)
The images are of 224 x 224 dimensions. I'm getting the error:
RuntimeError: size mismatch, m1: [10 x 173056], m2: [179776 x 1000] at
/opt/conda/conda-bld/pytorch_1524584710464/work/aten/src/TH/generic/THTensorMath.c:2033
I cannot see where my error is, can someone help me please?
The architecture is well, I was cropping the input image to 220 x 200 dimensions which was obviously incorrect.

How to compute the uncertainty of a Monte Carlo Dropout neural network with PyTorch?

I am trying to implement Bayesian CNN using Mc Dropout on Pytorch, the main idea is that by applying dropout at test time and running over many forward passes, you get predictions from a variety of different models. I need to obtain the uncertainty, does anyone have an idea of how I can do it Please
This is how I defined my CNN
'''
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.dropout = nn.Dropout(p=0.3)
nn.init.xavier_uniform_(self.conv1.weight)
nn.init.constant_(self.conv1.bias, 0.0)
nn.init.xavier_uniform_(self.conv2.weight)
nn.init.constant_(self.conv2.bias, 0.0)
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.constant_(self.fc1.bias, 0.0)
nn.init.xavier_uniform_(self.fc2.weight)
nn.init.constant_(self.fc2.bias, 0.0)
nn.init.xavier_uniform_(self.fc3.weight)
nn.init.constant_(self.fc3.bias, 0.0)
def forward(self, x):
x = self.pool(F.relu(self.dropout(self.conv1(x)))) # recommended to add the relu
x = self.pool(F.relu(self.dropout(self.conv2(x)))) # recommended to add the relu
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(self.dropout(x)))
x = self.fc3(self.dropout(x)) # no activation function needed for the last layer
return x
model = Net().to(device)
train_accuracies=np.zeros(num_epochs)
test_accuracies=np.zeros(num_epochs)
dataiter = iter(trainloader)
images, labels = dataiter.next()
#initializing variables
loss_acc = []
class_acc_mcdo = []
start_train = True
#Defining the Loss Function and Optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
def train():
loss_vals = []
acc_vals = []
for epoch in range(num_epochs): # loop over the dataset multiple times
n_correct = 0 # initialize number of correct predictions
acc = 0 # initialize accuracy of each epoch
somme = 0 # initialize somme of losses of each epoch
epoch_loss = []
for i, (images, labels) in enumerate(trainloader):
# origin shape: [4, 3, 32, 32] = 4, 3, 1024
# input_layer: 3 input channels, 6 output channels, 5 kernel size
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model.train()(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad() # zero the parameter gradients
loss.backward()
epoch_loss.append(loss.item()) # add the loss to epoch_loss list
optimizer.step()
# max returns (value ,index)
_, predicted = torch.max(outputs, 1)
n_correct += (predicted == labels).sum().item()
# print statistics
if (i + 1) % 2000 == 0:
print(f'Epoch [{epoch + 1}/{num_epochs}], Step [{i + 1}/{n_total_steps}], Loss:
{loss.item():.4f}')
somme = (sum(epoch_loss)) / len(epoch_loss)
loss_vals.append(somme) # add the epoch's loss to loss_vals
print("Loss = {}".format(somme))
acc = 100 * n_correct / len(trainset)
acc_vals.append(acc) # add the epoch's Accuracy to acc_vals
print("Accuracy = {}".format(acc))
# SAVE
PATH = './cnn.pth'
torch.save(model.state_dict(), PATH)
loss_acc.append(loss_vals)
loss_acc.append(acc_vals)
return loss_acc
And here is the code of the mc dropout
'''
def enable_dropout(model):
""" Function to enable the dropout layers during test-time """
for m in model.modules():
if m.__class__.__name__.startswith('Dropout'):
m.train()
def test():
# set non-dropout layers to eval mode
model.eval()
# set dropout layers to train mode
enable_dropout(model)
test_loss = 0
correct = 0
n_samples = 0
n_class_correct = [0 for i in range(10)]
n_class_samples = [0 for i in range(10)]
T = 100
for images, labels in testloader:
images = images.to(device)
labels = labels.to(device)
with torch.no_grad():
output_list = []
# getting outputs for T forward passes
for i in range(T):
output_list.append(torch.unsqueeze(model(images), 0))
# calculating mean
output_mean = torch.cat(output_list, 0).mean(0)
test_loss += F.nll_loss(F.log_softmax(output_mean, dim=1), labels,
reduction='sum').data # sum up batch loss
_, predicted = torch.max(output_mean, 1) # get the index of the max log-probability
correct += (predicted == labels).sum().item() # sum up correct predictions
n_samples += labels.size(0)
for i in range(batch_size):
label = labels[i]
predi = predicted[i]
if (label == predi):
n_class_correct[label] += 1
n_class_samples[label] += 1
test_loss /= len(testloader.dataset)
# PRINT TO HTML PAGE
print('\n Average loss: {:.4f}, Accuracy: ({:.3f}%)\n'.format(
test_loss,
100. * correct / n_samples))
# Accuracy for each class
acc_classes = []
for i in range(10):
acc = 100.0 * n_class_correct[i] / n_class_samples[i]
print(f'Accuracy of {classes[i]}: {acc} %')
acc_classes.append(acc)
class_acc_mcdo.extend(acc_classes)
print('Finished Testing')
You can compute the statistics, such as the sample mean or the sample variance, of different stochastic forward passes at test time (i.e. with the test or validation data), when the dropout is enabled. These statistics can be used to represent uncertainty. For example, you can compute the entropy, which is a measure of uncertainty, from the sample mean.

Out of core learning for multi-label text classification problem

I am working on a multi-label text classification problem (Total target labels 90). The data distribution has a long tail and class imbalance. I am working with a sample of 100k records using the OVR strategy (One Versus Rest). Since the dataset is huge, I am trying out the partial_fit method. I came to know that there were some issues previously and similar question was asked back in 2017. I tried the partial_fit and found the same issue still exist or maybe I am not doing it correctly.
Scikit-learn version : 0.22.2.post1
Code
def stream_documents(data=None):
"""Iterate over documents of the dataset.
Documents are represented as dictionaries
"""
for index,row in data.iterrows():
tmp_dict = dict()
tmp_dict['text'] = row[TEXT_FEAT]
tmp_dict['target'] = row[TARGET_LABEL]
yield tmp_dict
def get_minibatch(doc_iter, size, mlb):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(doc['text'], doc['target'])
for doc in itertools.islice(doc_iter, size)]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
y = pd.Series(data=y)
y_encoded = mlb.transform(y.str.split(','))
# print("Y SHAPE : ",np.asarray(y_encoded,dtype=int).shape)
return X_text, np.asarray(y_encoded,dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size, mlb)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size, mlb)
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs " % stats
s += "%(n_test)6d test docs " % test_stats
s += "Acc: %(accuracy).3f " % stats
s += "f1: %(f1).3f " % stats
s += "P: %(p).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18)
data_stream = stream_documents(data=df_sample_xs) # X, y
partial_fit_classifiers = {
'SGD': OneVsRestClassifier(SGDClassifier(max_iter=1000, tol=1e-3)),
'Logistic':OneVsRestClassifier(LogisticRegression(solver='lbfgs',max_iter=500))
}
# test data statistics
test_stats = {'n_test': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000, mlb)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
print("Test set is %d documents" % (len(y_test)))
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0,
'accuracy_history': [(0, 0)],
'f1': 0.0,
'f1_history': [(0,0)],
'p': 0.0,
'p_history': [(0,0)],
't0': time.time(),
'runtime_history': [(0, 0)],
'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents, mlb)
minibatch_size = 2000
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batchs of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
# print(X_train.shape,y_train.shape)
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
print(cls_name)
# update estimator with examples in the current mini-batch
# cls.partial_fit(X_train, y_train, classes=all_classes)
cls.partial_fit(X_train, y_train, classes=mlb.transform(df_sample_xs[TARGET_LABEL].str.split(',')))
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['f1'] = f1_score(y_test, cls.predict(X_test))
cls_stats[cls_name]['p'] = precision_score(y_test, cls.predict(X_test))
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
f1_history = (cls_stats[cls_name]['f1'],cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['f1_history'].append(f1_history)
p_history = (cls_stats[cls_name]['p'],cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['p_history'].append(p_history)
run_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['f1'],
cls_stats[cls_name]['p'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
Error
SGD
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-87-cf38c633c6aa> in <module>
31 # update estimator with examples in the current mini-batch
32 # cls.partial_fit(X_train, y_train, classes=all_classes)
---> 33 cls.partial_fit(X_train, y_train, classes=mlb.transform(df_sample_xs[TARGET_LABEL].str.split(',')))
34 # accumulate test accuracy stats
35 cls_stats[cls_name]['total_fit_time'] += time.time() - tick
/opt/virtual_env/py3/lib/python3.6/site-packages/sklearn/utils/metaestimators.py in <lambda>(*args, **kwargs)
114
115 # lambda, but not partial, allows help() to work with update_wrapper
--> 116 out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
117 # update the docstring of the returned function
118 update_wrapper(out, self.fn)
/opt/virtual_env/py3/lib/python3.6/site-packages/sklearn/multiclass.py in partial_fit(self, X, y, classes)
287 self.classes_))
288
--> 289 Y = self.label_binarizer_.transform(y)
290 Y = Y.tocsc()
291 columns = (col.toarray().ravel() for col in Y.T)
/opt/virtual_env/py3/lib/python3.6/site-packages/sklearn/preprocessing/_label.py in transform(self, y)
478 y_is_multilabel = type_of_target(y).startswith('multilabel')
479 if y_is_multilabel and not self.y_type_.startswith('multilabel'):
--> 480 raise ValueError("The object was not fitted with multilabel"
481 " input.")
482
ValueError: The object was not fitted with multilabel input.

Batch Training Accuracy is always multiple of 10%

So I am training a CNN and compute the training accuracy for each batch. Most of the it gives out 100% batch training accuracy. which I though was okay because I'm testing my model against the data I trained it with. But at some iterations, I get a 90% or 90% batch training accuracy. And worst, sometimes it goes down to 0% real quick and bounces back to 100% batch training accuracy. And I used the algorithm in https://github.com/Hvass-Labs/TensorFlow-Tutorials/blob/master/04_Save_Restore.ipynb and they also computed the batch training accuracy but they don't get the same results I get. They started out with around 80% batch training accuracy and observed a gradual increase until 98%. Why is this?
I was suspecting that my network is overfitting.
Here is my exact code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import tensorflow as tf
import pyfftw
from scipy import signal
import xlrd
from tensorflow.python.tools import freeze_graph
from tensorflow.python.tools import optimize_for_inference_lib
import time
from datetime import timedelta
import math
import os
from sklearn.metrics import confusion_matrix
##matplotlib inline
plt.style.use('ggplot')
## define funtions
def read_data(file_path):
## column_names = ['user-id','activity','timestamp', 'x-axis', 'y-axis', 'z-axis']
column_names = ['activity','timestamp', 'Ax', 'Ay', 'Az', 'Gx', 'Gy', 'Gz', 'Mx', 'My', 'Mz'] ## 3 sensors
data = pd.read_csv(file_path,header = None, names = column_names)
return data
def feature_normalize(dataset):
mu = np.mean(dataset,axis = 0)
sigma = np.std(dataset,axis = 0)
return (dataset - mu)/sigma
def plot_axis(ax, x, y, title):
ax.plot(x, y)
ax.set_title(title)
ax.xaxis.set_visible(False)
ax.set_ylim([min(y) - np.std(y), max(y) + np.std(y)])
ax.set_xlim([min(x), max(x)])
ax.grid(True)
def plot_activity(activity,data):
fig, (ax0, ax1, ax2) = plt.subplots(nrows = 3, figsize = (15, 10), sharex = True)
plot_axis(ax0, data['timestamp'], data['Ax'], 'x-axis')
plot_axis(ax1, data['timestamp'], data['Ay'], 'y-axis')
plot_axis(ax2, data['timestamp'], data['Az'], 'z-axis')
plt.subplots_adjust(hspace=0.2)
fig.suptitle(activity)
plt.subplots_adjust(top=0.90)
plt.show()
def windows(data, size):
start = 0
while start < data.count():
yield start, start + size
start += (size / 2)
def segment_signal(data, window_size = None, num_channels=None): # edited
segments = np.empty((0,window_size,num_channels)) #change from 3 to 9 channels for AGM fusion #use variable num_channels=9
labels = np.empty((0))
for (n_start, n_end) in windows(data['timestamp'], window_size):
## x = data["x-axis"][start:end]
## y = data["y-axis"][start:end]
## z = data["z-axis"][start:end]
n_start = int(n_start)
n_end = int(n_end)
Ax = data["Ax"][n_start:n_end]
Ay = data["Ay"][n_start:n_end]
Az = data["Az"][n_start:n_end]
Gx = data["Gx"][n_start:n_end]
Gy = data["Gy"][n_start:n_end]
Gz = data["Gz"][n_start:n_end]
Mx = data["Mx"][n_start:n_end]
My = data["My"][n_start:n_end]
Mz = data["Mz"][n_start:n_end]
if(len(dataset['timestamp'][n_start:n_end]) == window_size): # include only windows with size of 90
segments = np.vstack([segments,np.dstack([Ax,Ay,Az,Gx,Gy,Gz,Mx,My,Mz])])
labels = np.append(labels,stats.mode(data["activity"][n_start:n_end])[0][0])
return segments, labels
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev = 0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.0, shape = shape)
return tf.Variable(initial)
def depthwise_conv2d(x, W):
return tf.nn.depthwise_conv2d(x,W, [1, 1, 1, 1], padding='VALID')
def apply_depthwise_conv(x,weights,biases):
return tf.nn.relu(tf.add(depthwise_conv2d(x, weights),biases))
def apply_max_pool(x,kernel_size,stride_size):
return tf.nn.max_pool(x, ksize=[1, 1, kernel_size, 1],
strides=[1, 1, stride_size, 1], padding='VALID')
#------------------------get dataset----------------------#
## run shoaib_dataset.py to generate dataset_shoaib_total.txt
## get data from dataset_shoaib_total.txt
dataset = read_data('dataset_shoaib_total.txt')
#--------------------preprocessing------------------------#
dataset['Ax'] = feature_normalize(dataset['Ax'])
dataset['Ay'] = feature_normalize(dataset['Ay'])
dataset['Az'] = feature_normalize(dataset['Az'])
dataset['Gx'] = feature_normalize(dataset['Gx'])
dataset['Gy'] = feature_normalize(dataset['Gy'])
dataset['Gz'] = feature_normalize(dataset['Gz'])
dataset['Mx'] = feature_normalize(dataset['Mx'])
dataset['My'] = feature_normalize(dataset['My'])
dataset['Mz'] = feature_normalize(dataset['Mz'])
###--------------------plot activity data----------------#
##for activity in np.unique(dataset["activity"]):
## subset = dataset[dataset["activity"] == activity][:180]
## plot_activity(activity,subset)
#------------------fixed hyperparameters--------------------#
window_size = 200 #from 90 #FIXED at 4 seconds
#----------------input hyperparameters------------------#
input_height = 1
input_width = window_size
num_labels = 6
num_channels = 9 #from 3 channels #9 channels for AGM
#-------------------sliding time window----------------#
segments, labels = segment_signal(dataset, window_size=window_size, num_channels=num_channels)
labels = np.asarray(pd.get_dummies(labels), dtype = np.int8)
reshaped_segments = segments.reshape(len(segments), (window_size*num_channels)) #use variable num_channels instead of constant 3 channels
#------------divide data into test and training set-----------#
train_test_split = np.random.rand(len(reshaped_segments)) < 0.80
train_x_init = reshaped_segments[train_test_split]
train_y_init = labels[train_test_split]
test_x = reshaped_segments[~train_test_split]
test_y = labels[~train_test_split]
train_validation_split = np.random.rand(len(train_x_init)) < 0.80
train_x = train_x_init[train_validation_split]
train_y = train_y_init[train_validation_split]
validation_x = train_x_init[~train_validation_split]
validation_y = train_y_init[~train_validation_split]
#---------------training hyperparameters----------------#
batch_size = 10
kernel_size = 60 #from 60 #optimal 2
depth = 15 #from 60 #optimal 15
num_hidden = 1000 #from 1000 #optimal 80
learning_rate = 0.0001
training_epochs = 8
total_batches = train_x.shape[0] ##// batch_size
#---------define placeholders for input----------#
X = tf.placeholder(tf.float32, shape=[None,input_width * num_channels], name="input")
X_reshaped = tf.reshape(X,[-1,input_height,input_width,num_channels])
Y = tf.placeholder(tf.float32, shape=[None,num_labels])
#---------------------perform convolution-----------------#
# first convolutional layer
c_weights = weight_variable([1, kernel_size, num_channels, depth])
c_biases = bias_variable([depth * num_channels])
c = apply_depthwise_conv(X_reshaped,c_weights,c_biases)
p = apply_max_pool(c,20,2)
# second convolutional layer
c2_weights = weight_variable([1, 6,depth*num_channels,depth//10])
c2_biases = bias_variable([(depth*num_channels)*(depth//10)])
c = apply_depthwise_conv(p,c2_weights,c2_biases)
#--------------flatten data for fully connected layers----------#
shape = c.get_shape().as_list()
c_flat = tf.reshape(c, [-1, shape[1] * shape[2] * shape[3]])
#------------fully connected layers----------------#
f_weights_l1 = weight_variable([shape[1] * shape[2] * depth * num_channels * (depth//10), num_hidden])
f_biases_l1 = bias_variable([num_hidden])
f = tf.nn.tanh(tf.add(tf.matmul(c_flat, f_weights_l1),f_biases_l1))
#----------------------dropout------------------#
keep_prob = tf.placeholder(tf.float32)
drop_layer = tf.nn.dropout(f, keep_prob)
#----------------------softmax layer----------------#
out_weights = weight_variable([num_hidden, num_labels])
out_biases = bias_variable([num_labels])
y_ = tf.nn.softmax(tf.add(tf.matmul(drop_layer, out_weights),out_biases), name="y_")
#-----------------loss optimization-------------#
loss = -tf.reduce_sum(Y * tf.log(y_))
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(loss)
#-----------------compute accuracy---------------#
correct_prediction = tf.equal(tf.argmax(y_,1), tf.argmax(Y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
cost_history = np.empty(shape=[1],dtype=float)
saver = tf.train.Saver()
session = tf.Session()
session.run(tf.global_variables_initializer())
#-------------early stopping-----------------#
# Best validation accuracy seen so far.
best_validation_accuracy = 0.0
# Iteration-number for last improvement to validation accuracy.
last_improvement = 0
# Stop optimization if no improvement found in this many iterations.
require_improvement = 1000
# Counter for total number of iterations performed so far.
total_iterations = 0
def validation_accuracy():
return session.run(accuracy, feed_dict={X: validation_x, Y: validation_y, keep_prob: 1.0})
def next_batch(b, batch_size, train_x, train_y):
##for b in range(total_batches):
offset = (b * batch_size) % (train_y.shape[0] - batch_size)
batch_x = train_x[offset:(offset + batch_size), :]
batch_y = train_y[offset:(offset + batch_size), :]
return batch_x, batch_y
def optimize(num_iterations):
# Ensure we update the global variables rather than local copies.
global total_iterations
global best_validation_accuracy
global last_improvement
# Start-time used for printing time-usage below.
start_time = time.time()
for i in range(num_iterations):
# Increase the total number of iterations performed.
# It is easier to update it in each iteration because
# we need this number several times in the following.
total_iterations += 1
# Get a batch of training examples.
# x_batch now holds a batch of images and
# y_true_batch are the true labels for those images.
##x_batch, y_true_batch = data.train.next_batch(train_batch_size)
x_batch, y_true_batch = next_batch(i, batch_size, train_x, train_y)
# Put the batch into a dict with the proper names
# for placeholder variables in the TensorFlow graph.
feed_dict_train = {X: x_batch,
Y: y_true_batch, keep_prob: 0.5}
# Run the optimizer using this batch of training data.
# TensorFlow assigns the variables in feed_dict_train
# to the placeholder variables and then runs the optimizer.
session.run(optimizer, feed_dict=feed_dict_train)
# Print status every 100 iterations and after last iteration.
if (total_iterations % 100 == 0) or (i == (num_iterations - 1)):
# Calculate the accuracy on the training-batch.
acc_train = session.run(accuracy, feed_dict={X: x_batch,
Y: y_true_batch, keep_prob: 1.0})
# Calculate the accuracy on the validation-set.
# The function returns 2 values but we only need the first.
##acc_validation, _ = validation_accuracy()
acc_validation = validation_accuracy()
# If validation accuracy is an improvement over best-known.
if acc_validation > best_validation_accuracy:
# Update the best-known validation accuracy.
best_validation_accuracy = acc_validation
# Set the iteration for the last improvement to current.
last_improvement = total_iterations
# Save all variables of the TensorFlow graph to file.
saver.save(sess=session, save_path="../shoaib-har_agm_es.ckpt")
# A string to be printed below, shows improvement found.
improved_str = '*'
else:
# An empty string to be printed below.
# Shows that no improvement was found.
improved_str = ''
# Status-message for printing.
msg = "Iter: {0:>6}, Train-Batch Accuracy: {1:>6.1%}, Validation Acc: {2:>6.1%} {3}"
# Print it.
print(msg.format(i + 1, acc_train, acc_validation, improved_str))
# If no improvement found in the required number of iterations.
if total_iterations - last_improvement > require_improvement:
print("No improvement found in a while, stopping optimization.")
# Break out from the for-loop.
break
# Ending time.
end_time = time.time()
# Difference between start and end-times.
time_dif = end_time - start_time
# Print the time-usage.
print("Time usage: " + str(timedelta(seconds=int(round(time_dif)))))
optimize(10000)
With the output:
What exactly is training accuracy? Is it even computed? Or do you compute the training accuracy on the entire training data and not just the batch you trained your network with?
Here I printed the results such that it prints out the batch training accuracy and the training accuracy on the entire dataset set for every multiples of 20 iterations.
The data is divided to 3 sets: train, validation and test.
Batch training accuracy is computed on the train set (the difference between the label and the prediction).
Validation accuracy is the accuracy on the validation set.
The batch accuracy can be computed just after a forward pass in the network. The number of samples in one forward pass is the batch size. It is just a way to train models faster (mini-batch gradient descent)
Overfitting is when the model works really good on known data (training set) but performs poorly on new data.
As to the 10% multiples, it is just the printing format you are using.

Resources