I am using the Chexpert dataset (found here on kaggle) to build a CNN model that can predict disease conditions (e.g. cardiomegaly, pleural effusion, atelectasis, etc) from chest x-ray image. I am using PyTorch lightning and my code is attached to this question. I have tried several architectures and I don’t seem to get the models to perform well. I performed the overfit test (in which I try to overfit a model on one batch of data) and the models were able to overfit the single batch - showing that they are capable of fitting the data. However, regardless of the architecture I use, there is quite a difference between training loss (which can get as low as 0.2) and validation (which can get as low as 0.49). On sensitivity and precision (the metrics I am interested in), the models perform terribly during validation. After leaving the models for longer epochs, I also observed that the loss values start to increase. I will appreciate any help or suggestion to help me solve this problem. Thank you.
This is my code:
import torch
import torch.nn as nn
import torchvision
import torchvision.models as models
import torchvision.transforms as transforms
import torch.nn.functional as F
import matplotlib.pyplot as plt
import pytorch_lightning as pl
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TensorBoardLogger
from sklearn.metrics import roc_auc_score
from pytorch_lightning.trainer.states import RunningStage, TrainerFn, TrainerState, TrainerStatus
import numpy as np
import time
import pandas as pd
import gc
import random
from chexpertloader import ChestXrayDataset
# from ipykernel import kernelapp as app
from torch.utils.tensorboard import SummaryWriter
import torchmetrics
from torchmetrics import AUROC
from pytorch_lightning.callbacks import LearningRateMonitor
from pytorch_lightning.callbacks import ModelCheckpoint
from torch.optim.lr_scheduler import ReduceLROnPlateau
from sklearn.metrics import confusion_matrix
seed = 123
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
backbones = {
'efficientnetb0': models.efficientnet_b0(weights='IMAGENET1K_V1'),
'efficientnetb1': models.efficientnet_b1(weights='IMAGENET1K_V1'),
'efficientnetb2': models.efficientnet_b2(weights='IMAGENET1K_V1'),
'efficientnetb3': models.efficientnet_b3(weights='IMAGENET1K_V1'),
'efficientnetb4': models.efficientnet_b4(weights='IMAGENET1K_V1'),
'efficientnetb5': models.efficientnet_b5(weights='IMAGENET1K_V1'),
'efficientnetb6': models.efficientnet_b6(weights='IMAGENET1K_V1'),
'efficientnetb7': models.efficientnet_b7(weights='IMAGENET1K_V1'),
'densenet121': models.densenet121(weights='IMAGENET1K_V1'),
'densenet161': models.densenet161(weights='IMAGENET1K_V1'),
'densenet169': models.densenet169(weights='IMAGENET1K_V1'),
'densenet201': models.densenet201(weights='IMAGENET1K_V1'),
'resnet50': models.resnet50(weights='IMAGENET1K_V1'),
'efficientnetV2_m': models.efficientnet_v2_m(weights='IMAGENET1K_V1')
}
class LitEfficientNet(pl.LightningModule):
def __init__(self, arch, num_classes, lr):
super(LitEfficientNet, self).__init__()
self.arch = arch
self.lr = lr
self.sizes = {
'efficientnetb0': (256, 224), 'efficientnetb1': (256, 240), 'efficientnetb2': (288, 288), 'efficientnetb3': (320, 300),
'efficientnetb4': (384, 380), 'efficientnetb5': (489, 456), 'efficientnetb6': (561, 528), 'efficientnetb7': (633, 600),
'densenet121':(256,256), 'densenet161':(256,256), 'densenet169':(256,256), 'densenet201':(256,256),
'resnet50':(224,224), 'efficientnetV2_m':(384,384)
}
self.batch_sizes = {
'efficientnetb0': 64, 'efficientnetb1': 64, 'efficientnetb2': 64, 'efficientnetb3': 32,
'efficientnetb4': 20, 'efficientnetb5': 7, 'efficientnetb6': 5, 'efficientnetb7': 2,
'densenet121':64, 'densenet161':32, 'densenet169':32, 'densenet201':32, 'resnet50':32,
'efficientnetV2_m':16
}
self.model = backbones[arch]
if 'densenet' in arch:
self.model.classifier = nn.Sequential(
nn.Linear(self.model.classifier.in_features, 2048),
nn.ReLU(),
nn.Dropout(p=0.6),
nn.Linear(2048, 512),
nn.ReLU(),
nn.Dropout(p=0.2),
nn.Linear(512, num_classes),
)
elif 'resnet' in arch:
self.model.fc = nn.Linear(self.model.fc.in_features, num_classes)
elif 'efficientnet' in arch:
self.model.classifier = nn.Sequential(
nn.Dropout(p=self.model.classifier[0].p, inplace=True),
nn.Linear(self.model.classifier[1].in_features, num_classes),
)
def forward(self, x):
y_pred = self.model(x)
return y_pred
def training_step(self, batch, batch_idx):
images, labels = batch
# Forward pass
m = nn.Sigmoid()
outputs = self.model(images)
classes = {0:'Cardiomegaly', 1:'Edema', 2:'Atelectasis',
3:'Pleural Effuion', 4:'Lung Opacity'
}
Loss = nn.BCEWithLogitsLoss()
loss = Loss(outputs, labels)
self.log('train_loss', loss, sync_dist=True)
return loss
def train_dataloader(self):
train_csv = pd.read_csv('CheXpert-v1.0-small/train.csv')
train_csv.fillna(0, inplace=True)
train_dataset = ChestXrayDataset("CheXpert-v1.0-small/train", train_csv, self.sizes[self.arch], True)
# Data loader
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset, batch_size=self.batch_sizes[self.arch], num_workers=8, shuffle=False
)
return train_loader
def validation_step(self, batch, batch_idx):
images, labels = batch
images = images
m = nn.Sigmoid()
outputs = self.model(images)
classes = {0:'Cardiomegaly', 1:'Edema', 2:'Atelectasis',
3:'Pleural Effuion', 4:'Lung Opacity'
}
Loss = nn.BCEWithLogitsLoss()
loss = Loss(outputs, labels)
self.log('val_loss', loss, sync_dist=True)
tensorboard_logs = {'val_loss': loss}
return loss
def val_dataloader(self):
validation_csv = pd.read_csv('CheXpert-v1.0-small/valid.csv')
validation_csv.fillna(0, inplace=True)
validation_csv = validation_csv.sample(frac=1)
validation_dataset = ChestXrayDataset("CheXpert-v1.0-small/valid", validation_csv, self.sizes[self.arch], True)
# Data loader
validation_loader = torch.utils.data.DataLoader(
dataset=validation_dataset, batch_size=self.batch_sizes[self.arch], num_workers=8, shuffle=False
)
return validation_loader
def configure_optimizers(self):
optimizer = optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
return optimizer
if __name__ == '__main__':
archs = ['efficientnetV2_m']
learning_rates = [0.001]
num_classes = 5
for i in range(len(learning_rates)):
arch = archs[0]
learning_rate = learning_rates[i]
logger = TensorBoardLogger(f"tb_logs_binary/{arch}",name=f"{arch}_{learning_rate}_ppv_npv_sensitive")
model = LitEfficientNet(arch,num_classes, learning_rate)
trainer = Trainer(
log_every_n_steps=1411,
logger=logger,
accelerator='gpu',
devices=-1,
# devices=1,
# overfit_batches=10,
max_epochs=50,
val_check_interval=0.1,
deterministic=True,
fast_dev_run=False)
trainer.fit(model)
del model, trainer
gc.collect()
Related
I created my own custom toy dataset of graphs in order to learn graph neural networks in Pytorch-geopmetric (PyG).
The data looks like the following:
Data(x=[20, 1], edge_index=[2, 20], y=[1])
I also created a dataloader as follows:
from torch_geometric.loader import DataLoader
train_dataloader = DataLoader(dataset[0:8000], batch_size=32, shuffle=True)
test_dataloader = DataLoader(dataset[8000:10000], batch_size=32, shuffle=True)
Therefore, a batch will look like:
DataBatch(x=[640, 1], edge_index=[2, 640], y=[32], batch=[640], ptr=[33])
My attempt to make a Graph-CNN:
import torch
from torch import nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class GCN(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = GCNConv(dataset[0].num_node_features, 16)
self.conv2 = GCNConv(16, 16)
self.out = nn.Linear(16, 1)
def forward(self, data):
x, edge_index = data.x, data.edge_index
x = self.conv1(x, edge_index)
x = F.relu(x)
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index)
out = self.out(x)
return out
model = GCN()
When I do something like:
criterion = torch.nn.CrossEntropyLoss()
target = batch.y.to(torch.float32)
loss = criterion(out, target)
loss
I get the error:
ValueError: Expected input batch_size (640) to match target batch_size (32).
Full code is in my github repo here:
https://github.com/amine179/myGNN-learning/blob/main/My%20first%20GCNN.ipynb
Please I would love some assistance to plot a confusion matrix from my model. Code displayed below:
import os
import glob
from sklearn.model_selection import train_test_split
import shutil
from tensorflow.keras import callbacks
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from my_utils import create_generators
from CNN_models import amazon_model
import tensorflow as tf
import matplotlib.pyplot as plt
if name=="main":
path_to_train = "data\\train"
path_to_val = "data\\val"
path_to_test = "data\\test"
batch_size = 128
epochs = 5
lr = 0.0001
train_generator, val_generator, test_generator = create_generators(batch_size, path_to_train, path_to_val, path_to_test)
nbr_classes = train_generator.num_classes
TRAIN=True
TEST=False
if TRAIN:
path_to_save_model = './Models'
ckpt_saver = ModelCheckpoint(
path_to_save_model,
monitor="val_accuracy",
mode='max',
save_best_only=True,
save_freq='epoch',
verbose=1
)
early_stop = EarlyStopping(monitor="val_accuracy", patience=5)
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="./logs")
csv_logger = tf.keras.callbacks.CSVLogger('first_model_training.log', separator=",", append=False)
model = amazon_model(nbr_classes)
optimizer = tf.keras.optimizers.Adam(learning_rate=lr, amsgrad=True)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy', tf.keras.metrics.Precision(), tf.keras.metrics.Recall()])
history = model.fit(train_generator,
epochs=epochs,
batch_size=batch_size,
validation_data=val_generator,
callbacks=[ckpt_saver, early_stop, tensorboard_callback, csv_logger]
)
acc = history.history['accuracy']
print(acc)
model.save("first_model.h5")
from matplotlib.pyplot import figure
figure(figsize=(8, 6))
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig('./plots/accuracy', dpi=200)
plt.show()
figure(figsize=(8, 6))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('./plots/loss', dpi=200)
plt.show()
if TEST:
model = tf.keras.models.load_model('./Models')
model.summary()
print("Evaluating validation set: ")
model.evaluate(val_generator)
print("Evaluating test set: ")
model.evaluate(test_generator)
Sorry that it may be a bit of a newbie question but I would love to know what I need to add to the above code to make it plot a confusion matrix for my after it runs.
I'm able to plot the graphs of both accuracy and loss for a few epochs, but I want to include Confusion Matrix before running for more epochs. Here are the plots already obtained:
accuracy plot
loss plot
Try this
The basic concept is to get prediction results from your model using X_test and then to compare these predictions to the real y_test results.
# 'Fake' and 'Real' are your dependent features for your classification use case,
# where Fake == 0 and Real == 1. It is important that you use this form for the matrix.
('Fake', 'Real') == (0, 1)
('Fake', 'Real') == (0, 1)
# Data handling
import pandas as pd
# Exploratory Data Analysis & Visualisation
import matplotlib.pyplot as plt
import seaborn as sns
# Model improvement and Evaluation
from sklearn import metrics
from sklearn.metrics import confusion_matrix
# Plotting confusion matrix
matrix = pd.DataFrame((metrics.confusion_matrix(y_test, y_prediction)),
('Fake', 'Real'),
('Fake', 'Real'))
print(matrix)
# Visualising confusion matrix
plt.figure(figsize = (16,14),facecolor='white')
heatmap = sns.heatmap(matrix, annot = True, annot_kws = {'size': 20}, fmt = 'd', cmap = 'YlGnBu')
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation = 0, ha = 'right', fontsize = 18, weight='bold')
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation = 0, ha = 'right', fontsize = 18, weight='bold')
plt.title('Confusion Matrix\n', fontsize = 18, color = 'darkblue')
plt.ylabel('True label', fontsize = 14)
plt.xlabel('Predicted label', fontsize = 14)
plt.show()
If you need further help email us here: theanalyticsolutions#gmail.com
With very few epochs this model learns to classify beween 1 and 0 extremely quickly which leads me to consider something is wrong.
Below code downloads mnist dataset, extracts the mnist images that contain 1 or 0 only. A random sample of size 200 is selected from this subset of mnist images. This random sample is the dataset the model is trained on. With just 2 epochs the model achieves 90%+ test set accuracy, is this expected behaviour ? I expected many more epochs would be required in order to train the model to achieve this level of test set accuracy.
Model code :
%reset -f
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import torch.utils.data as data_utils
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_moons
from matplotlib import pyplot
from pandas import DataFrame
import torchvision.datasets as dset
import os
import torch.nn.functional as F
import time
import random
import pickle
from sklearn.metrics import confusion_matrix
import pandas as pd
import sklearn
trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (1.0,))])
root = './data'
if not os.path.exists(root):
os.mkdir(root)
train_set = dset.MNIST(root=root, train=True, transform=trans, download=True)
test_set = dset.MNIST(root=root, train=False, transform=trans, download=True)
batch_size = 64
train_loader = torch.utils.data.DataLoader(
dataset=train_set,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(
dataset=test_set,
batch_size=batch_size,
shuffle=True)
class NeuralNet(nn.Module):
def __init__(self):
super(NeuralNet, self).__init__()
self.fc1 = nn.Linear(28*28, 500)
self.fc2 = nn.Linear(500, 256)
self.fc3 = nn.Linear(256, 2)
def forward(self, x):
x = x.view(-1, 28*28)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
num_epochs = 2
random_sample_size = 200
values_0_or_1 = [t for t in train_set if (int(t[1]) == 0 or int(t[1]) == 1)]
values_0_or_1_testset = [t for t in test_set if (int(t[1]) == 0 or int(t[1]) == 1)]
print(len(values_0_or_1))
print(len(values_0_or_1_testset))
train_loader_subset = torch.utils.data.DataLoader(
dataset=values_0_or_1,
batch_size=batch_size,
shuffle=True)
test_loader_subset = torch.utils.data.DataLoader(
dataset=values_0_or_1_testset,
batch_size=batch_size,
shuffle=False)
train_loader = train_loader_subset
# Hyper-parameters
input_size = 100
hidden_size = 100
num_classes = 2
# learning_rate = 0.00001
learning_rate = .0001
# Device configuration
device = 'cpu'
print_progress_every_n_epochs = 1
model = NeuralNet().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
N = len(train_loader)
# Train the model
total_step = len(train_loader)
most_recent_prediction = []
test_actual_predicted_dict = {}
rm = random.sample(list(values_0_or_1), random_sample_size)
train_loader_subset = data_utils.DataLoader(rm, batch_size=4)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader_subset):
# Move tensors to the configured device
images = images.reshape(-1, 2).to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch) % print_progress_every_n_epochs == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
predicted_test = []
model.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
probs_l = []
predicted_values = []
actual_values = []
labels_l = []
with torch.no_grad():
for images, labels in test_loader_subset:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
predicted_test.append(predicted.cpu().numpy())
sm = torch.nn.Softmax()
probabilities = sm(outputs)
probs_l.append(probabilities)
labels_l.append(labels.cpu().numpy())
predicted_values.append(np.concatenate(predicted_test).ravel())
actual_values.append(np.concatenate(labels_l).ravel())
if (epoch) % 1 == 0:
print('test accuracy : ', 100 * len((np.where(np.array(predicted_values[0])==(np.array(actual_values[0])))[0])) / len(actual_values[0]))
Output of model (12665 & 2115 represents the training and test set sizes) :
12665
2115
Epoch [1/2], Step [50/198], Loss: 0.1256
Epoch [2/2], Step [50/198], Loss: 0.0151
test accuracy : 99.76359338061465
/anaconda3/envs/pytorch/lib/python3.7/site-packages/ipykernel_launcher.py:143: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.
Here's my 2 cents on your binary experiment.
It would seem like you have severely reduce the complexity of your dataset and with the high number of neurons in your intermediate layers, your model is expected to converge very quickly.
Note that MNIST dataset has channel of 1 and this makes the task very simple.
You may try to play with CIFAR10 and see if you are still getting high accuracy in just 2 epochs.
That's not a particularly well-posed question, because what is expected is entirely subjective. That being said, I am not surprised because 0 and 1 are very different digits. For instance, 0 has background surrounded by foreground, whereas 1 does not - that's an almost infallible test to distinguish the two. As a sanity check, I would swap out 0 for 7, which is similar to 1. I would expect to see significantly lower success rate. That being said, that's a sanity check - even if it passes, there may still be bugs or errors in your method.
Here is my version of an autoencoder written using PyTorch :
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib import pyplot as plt
from sklearn import metrics
import datetime
from sklearn.preprocessing import MultiLabelBinarizer
import seaborn as sns
sns.set_style("darkgrid")
from ast import literal_eval
import numpy as np
from sklearn.preprocessing import scale
import seaborn as sns
sns.set_style("darkgrid")
import torch
%matplotlib inline
f = []
f.append(np.random.uniform(0,10,(1 , 10)).flatten())
f.append(np.random.uniform(10,20,(1 , 10)).flatten())
f.append(np.random.uniform(20,30,(1 , 10)).flatten())
x_data = torch.FloatTensor(np.array(f))
x_data
dimensions_input = 10
hidden_layer_nodes = 5
output_dimension = 10
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = torch.nn.Linear(dimensions_input,hidden_layer_nodes)
self.sigmoid = torch.nn.Sigmoid()
self.linear2 = torch.nn.Linear(hidden_layer_nodes,output_dimension)
def forward(self, x):
l_out1 = self.linear(x)
l_out2 = self.sigmoid(l_out1)
y_pred = self.linear2(l_out2)
return y_pred
model = Model()
criterion = torch.nn.MSELoss(size_average = False)
optim = torch.optim.SGD(model.parameters(), lr = 0.00001)
def train_model():
y_data = x_data.clone()
for i in range(150000):
y_pred = model(x_data)
loss = criterion(y_pred, y_data)
if i % 5000 == 0:
print(loss)
optim.zero_grad()
loss.backward()
optim.step()
Using x_data.clone() I train the network to learn a feature representation of the input data.
I'm attempting to generate hidden layer weights that match the dimensionality of rows of the input data so that each vector of x_data has a corresponding encoding. But the hidden later is of is a vector of size 5. How to change this network so that a matrix is generated that represents a reduced dimensionality of the input data ?
I am using InceptionV3 after fine tuning it to my own data set for a multi class multi label classification problem I made the most important changes like softmax to sigmoid and I am using this loss function
model.compile(loss='binary_crossentropy',optimizer=keras.optimizers.Adam(),metrics=['accuracy'])
but when I predict using the generated model I get a small values like this [ 2.74303748e-04 7.97736086e-03 2.44359515e-04 7.09630767e-05
5.43296163e-04 4.08404367e-03 3.28547925e-01 1.05091414e-04
1.80469989e-03 2.85170972e-03 1.44978316e-04 7.78235449e-03
1.72435939e-02 1.55413849e-02 3.82270187e-01 1.06311939e-03
2.70067930e-01 6.08937175e-04 7.47230020e-04 1.07850268e-04]
the source code is this :(can it be my validation set ?)
import keras
import os
import sys
import glob
import argparse
import matplotlib.pyplot as plt
from keras import __version__
from keras.applications.inception_v3 import InceptionV3,
preprocess_input
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD
IM_WIDTH, IM_HEIGHT = 299, 299 #fixed size for InceptionV3
NB_EPOCHS = 3
BAT_SIZE = 32
FC_SIZE = 1024
NB_IV3_LAYERS_TO_FREEZE = 172
def get_nb_files(directory):
"""Get number of files by searching directory recursively"""
if not os.path.exists(directory):
return 0
cnt = 0
for r, dirs, files in os.walk(directory):
for dr in dirs:
cnt += len(glob.glob(os.path.join(r, dr + "/*")))
return cnt
def setup_to_transfer_learn(model, base_model):
"""Freeze all layers and compile the model"""
for layer in base_model.layers:
layer.trainable = False
#model.compile(optimizer='rmsprop', loss='categorical_crossentropy',
#metrics=['accuracy'])
model.compile(loss='binary_crossentropy',
optimizer=keras.optimizers.Adam(),metrics=['accuracy'])
def add_new_last_layer(base_model, nb_classes):
"""Add last layer to the convnet
Args:
base_model: keras model excluding top
nb_classes: # of classes
Returns:
new keras model with last layer
"""
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(FC_SIZE, activation='relu')(x) #new FC layer, random init
predictions = Dense(nb_classes, activation='sigmoid')(x)
model = Model(input=base_model.input, output=predictions)
return model
def setup_to_finetune(model):
"""Freeze the bottom NB_IV3_LAYERS and retrain the remaining top
layers.
note: NB_IV3_LAYERS corresponds to the top 2 inception blocks in the
inceptionv3 arch
Args:
model: keras model
"""
for layer in model.layers[:NB_IV3_LAYERS_TO_FREEZE]:
layer.trainable = False
for layer in model.layers[NB_IV3_LAYERS_TO_FREEZE:]:
layer.trainable = True
model.compile(loss='binary_crossentropy',
optimizer=keras.optimizers.Adam(),metrics=['accuracy'])
def train(args):
"""Use transfer learning and fine-tuning to train a network on a new
dataset"""
nb_train_samples = get_nb_files(args.train_dir)
nb_classes = len(glob.glob(args.train_dir + "/*"))
nb_val_samples = get_nb_files(args.val_dir)
nb_epoch = int(args.nb_epoch)
batch_size = int(args.batch_size)
# data prep
train_datagen = ImageDataGenerator(
preprocessing_function=preprocess_input,
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True
)
test_datagen = ImageDataGenerator(
preprocessing_function=preprocess_input,
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True
)
train_generator = train_datagen.flow_from_directory(
args.train_dir,
target_size=(IM_WIDTH, IM_HEIGHT),
batch_size=batch_size,
)
validation_generator = test_datagen.flow_from_directory(
args.val_dir,
target_size=(IM_WIDTH, IM_HEIGHT),
batch_size=batch_size,
)
# setup model
base_model = InceptionV3(weights='imagenet', include_top=False)
#include_top=False excludes final FC layer
model = add_new_last_layer(base_model, nb_classes)
# transfer learning
setup_to_transfer_learn(model, base_model)
history_tl = model.fit_generator(
train_generator,
nb_epoch=nb_epoch,
samples_per_epoch=nb_train_samples,
validation_data=validation_generator,
nb_val_samples=nb_val_samples,
class_weight='auto')
# fine-tuning
setup_to_finetune(model)
history_ft = model.fit_generator(
train_generator,
samples_per_epoch=nb_train_samples,
nb_epoch=nb_epoch,
validation_data=validation_generator,
nb_val_samples=nb_val_samples,
class_weight='auto')
model.save(args.output_model_file)
if args.plot:
plot_training(history_ft)
def plot_training(history):
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'r.')
plt.plot(epochs, val_acc, 'r')
plt.title('Training and validation accuracy')
plt.figure()
plt.plot(epochs, loss, 'r.')
plt.plot(epochs, val_loss, 'r-')
plt.title('Training and validation loss')
plt.show()
if __name__=="__main__":
a = argparse.ArgumentParser()
a.add_argument("--train_dir")
a.add_argument("--val_dir")
a.add_argument("--nb_epoch", default=NB_EPOCHS)
a.add_argument("--batch_size", default=BAT_SIZE)
a.add_argument("--output_model_file", default="inceptionv3-ft.model")
a.add_argument("--plot", action="store_true")
args = a.parse_args()
if args.train_dir is None or args.val_dir is None:
a.print_help()
sys.exit(1)
if (not os.path.exists(args.train_dir)) or (not
os.path.exists(args.val_dir)):
print("directories do not exist")
sys.exit(1)
train(args)
You answer your own question. This is the reason.
I made the most important changes like softmax to sigmoid
Sigmoid function maps the input to the range [0, 1]. Hence the output values of the network are just the outputs of this function and are not class probabilities. Maximum value from the nodes will give you the neuron that has the maximum output and hence the present class.
When you say
but some values should be >0.5(the present classes ) and some others should be <0.5( the non present classes )
in the comments, you are referring to the probability of >0.5 of the present class. What you require for class probabilities is the softmax function.
Therefore replacing your last sigmoid layer with a softmax layer will get you what you think you should be observing.