My training dataset contains a large number of images but I converted them into numpy arrays and stored them into a pickle type file and now it is of size 1.19GB and the test dataset is 181MB and my neural network is also small but when I run my CNN it takes as much as 15GBs of my RAM that too considering that I have 16GB RAM. What could be the possible explanation for this and the solution cause it takes 45mins just for completing 1 epoch.
Thanks in advance for answering...
def load_dataset(self,address):
train_data = {}
test_data = {}
with open(address+"\dataset_facial_exp_gray_train.pickle",'rb') as f:
train_data = pickle.load(f)
with open(address+"\dataset_facial_exp_gray_test.pickle",'rb') as f:
test_data = pickle.load(f)
x_train = np.asarray(train_data["img_arrays"])
y_train = to_categorical(np.asarray(train_data["lables"]))
x_test = np.asarray(test_data["img_arrays"])
y_test = to_categorical(np.asarray(test_data["lables"]))
return x_train,y_train,x_test,y_test
def neural_network():
x_train,y_train,x_test,y_test = load_dataset()
x_train = np.reshape(x_train,(-1,64,64,1))/255
x_test = np.reshape(x_test,(-1,64,64,1))/255
model = Sequential()
model.add(Conv2D(64,4,(2,2),activation='relu',input_shape=(64,64,1),kernel_regularizer=l2(0.0005)))
model.add(BatchNormalization())
model.add(MaxPooling2D((2,2)))
model.add(Dropout(0.3))
# model.add(Conv2D(32,4,(2,2),activation='relu'))#, kernel_regularizer=l2(0.0005)))
# model.add(BatchNormalization())
# model.add(MaxPooling2D((2,2)))
# model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(32,activation='relu'))#,kernel_regularizer=l2(0.01)))
model.add(Dense(16,activation='relu'))#,kernel_regularizer=l2(0.001)))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Dense(len(lables),activation='softmax'))
def scheduler(epoch,learning_rate):
if epoch%15==0:
return learning_rate*0.1
else:
return learning_rate
lr = tensorflow.keras.callbacks.LearningRateScheduler(scheduler)
adam = Adam(learning_rate=1e-4)
checkpoint = tensorflow.keras.callbacks.ModelCheckpoint("emotion_model/emotion_weights.h5",monitor="loss",save_best_only=True,mode='min')
model.compile(loss=["categorical_crossentropy"],metrics=['accuracy'],optimizer=adam)
if os.path.exists("emotion_model\emotion_weights.h5"):
model.load_weights("emotion_model\emotion_weights.h5")
model.fit(x_train,y_train,batch_size=10,epochs=50,validation_data=(x_test,y_test),shuffle=True,callbacks=[lr,checkpoint])
json_model = model.to_json()
with open("emotion_model/model.json",'w') as f:
f.write(json_model)
neural_network()
Suppose your data is stored in the array of batch. You trained the NN by the batch. After the training of the batch, delete it by del batch. Then train new batch. The other way to reduce memory usage is using pooling methods. Add pooling layer after the convolution layers.
Split the batches by this way
save_directory="/content/gdrive/My Drive/Image Dataset"
liste=[0,0,0,0,0,0,0,0,0,0]
liste[0]=os.listdir()[0:50]
liste[1]=os.listdir()[50:100]
liste[2]=os.listdir()[100:150]
liste[3]=os.listdir()[150:200]
liste[4]=os.listdir()[200:250]
liste[5]=os.listdir()[250:300]
liste[6]=os.listdir()[300:350]
liste[7]=os.listdir()[350:400]
liste[8]=os.listdir()[400:450]
liste[9]=os.listdir()[450:(len(os.listdir())-1)]
The liste contains the index values of the batches. Then process the data
for i in range(6,10):
out_array=[]
for tmp in liste[i]:
print(tmp)
im=cv2.resize(np.array(Image.open(tmp)),(992,744))
out_array.append(im)
out_array=np.array(out_array).astype(np.uint8)
[process out_array]
del out_array
Related
I've trained an LSTM model with 8 features and 1 output. I have one dataset and split it into two separate files to train and predict with the first half of the set, and then attempt to predict the second half of the set using the trained model from the first part of my dataset. My model predicts the trained and testing sets from the dataset I used to train the model pretty well (RMSE of around 5-7), however when I attempt to predict using the second half of the set I get very poor predictions (RMSE of around 50-60). How can I get my trained model to predict outside datasets well?
dataset at this link
file = r'/content/drive/MyDrive/only_force_pt1.csv'
df = pd.read_csv(file)
df.head()
X = df.iloc[:, 1:9]
y = df.iloc[:,9]
print(X.shape)
print(y.shape)
plt.figure(figsize = (20, 6), dpi = 100)
plt.plot(y)
WINDOW_LEN = 50
def window_size(size, inputdata, targetdata):
X = []
y = []
i=0
while(i + size) <= len(inputdata)-1:
X.append(inputdata[i: i+size])
y.append(targetdata[i+size])
i+=1
assert len(X)==len(y)
return (X,y)
X_series, y_series = window_size(WINDOW_LEN, X, y)
print(len(X))
print(len(X_series))
print(len(y_series))
X_train, X_val, y_train, y_val = train_test_split(np.array(X_series),np.array(y_series),test_size=0.3, shuffle = True)
X_val, X_test,y_val, y_test = train_test_split(np.array(X_val),np.array(y_val),test_size=0.3, shuffle = False)
n_timesteps, n_features, n_outputs = X_train.shape[1], X_train.shape[2],1
[verbose, epochs, batch_size] = [1, 300, 32]
input_shape = (n_timesteps, n_features)
model = Sequential()
# LSTM
model.add(LSTM(64, input_shape=input_shape, return_sequences = False))
model.add(Dropout(0.2))
model.add(Dense(64, activation='relu', kernel_regularizer=keras.regularizers.l2(0.001)))
#model.add(Dropout(0.2))
model.add(Dense(32, activation='relu', kernel_regularizer=keras.regularizers.l2(0.001)))
model.add(Dense(1, activation='relu'))
earlystopper = EarlyStopping(monitor='val_loss', min_delta=0, patience = 30, verbose =1, mode = 'auto')
model.summary()
model.compile(loss = 'mse', optimizer = Adam(learning_rate = 0.001), metrics=[tf.keras.metrics.RootMeanSquaredError()])
history = model.fit(X_train, y_train, batch_size = batch_size, epochs = epochs, verbose = verbose, validation_data=(X_val,y_val), callbacks = [earlystopper])
Second dataset:
tests = r'/content/drive/MyDrive/only_force_pt2.csv'
df_testing = pd.read_csv(tests)
X_testing = df_testing.iloc[:4038,1:9]
torque = df_testing.iloc[:4038,9]
print(X_testing.shape)
print(torque.shape)
plt.figure(figsize = (20, 6), dpi = 100)
plt.plot(torque)
X_testing = X_testing.to_numpy()
X_testing_series, y_testing_series = window_size(WINDOW_LEN, X_testing, torque)
X_testing_series = np.array(X_testing_series)
y_testing_series = np.array(y_testing_series)
scores = model.evaluate(X_testing_series, y_testing_series, verbose =1)
X_prediction = model.predict(X_testing_series, batch_size = 32)
If your model is working fine on training data but performs bad on validation data, then your model did not learn the "true" connection between input and output variables but simply memorized the corresponding output to your input. To tackle this you can do multiple things:
Typically you would use 80% of your data to train and 20% to test, this will present more data to the model, which should make it learn more of the true underlying function
If your model is too complex, it will have neurons which will just be used to memorize input-output data pairs. Try to reduce the complexity of your model (layers, neurons) to make it more simple, so that the remaining layers can really learn instead of memorize
Look into more detail on training performance here
I am trying to classify 2 classes of images. Though I am getting high train and validation accuracy (0.97) after 10 epochs, my test results are awful (precision 0.48) and the confusion matrix shows the network is predicting the images for the wrong class (attached results).
There are only 2 classes in the dataset, each class has 10,000 image examples (after augmentation). I am using the VGG16 network. The full dataset is split 20% to test set (this split was performed by taking random images from each class therefore it is shuffled). The remaining images are split to 80% train and 20% valid sets (as indicated in the ImageDataGenerator line of the code). So in the end there are:
12,904 Train images belonging to 2 classes
3,224 Valid images belonging to 2 classes
4,032 Test images belonging to 2 classes
This is my code:
def CNN(CNN='VGG16', choice='predict', prediction='./dataset/Test/image.jpg'):
''' Train images using one of several CNNs '''
Train = './dataset/Train'
Tests = './dataset/Test'
shape = (224, 224)
epochs = 10
batches = 16
classes = []
for c in os.listdir(Train): classes.append(c)
IDG = keras.preprocessing.image.ImageDataGenerator(validation_split=0.2)
train = IDG.flow_from_directory(Train, target_size=shape, color_mode='rgb',
classes=classes, batch_size=batches, shuffle=True, subset='training')
valid = IDG.flow_from_directory(Train, target_size=shape, color_mode='rgb',
classes=classes, batch_size=batches, shuffle=True, subset='validation')
tests = IDG.flow_from_directory(Tests, target_size=shape, color_mode='rgb',
classes=classes, batch_size=batches, shuffle=True)
input_shape = train.image_shape
if CNN == 'VGG16' or 'vgg16':
model = VGG16(weights=None, input_shape=input_shape,
classes=len(classes))
elif CNN == 'VGG19' or 'vgg19':
model = VGG19(weights=None, input_shape=input_shape,
classes=len(classes))
elif CNN == 'ResNet50' or 'resnet50':
model = ResNet50(weights=None, input_shape=input_shape,
classes=len(classes))
elif CNN == 'DenseNet201' or 'densenet201':
model = DenseNet201(weights=None, input_shape=input_shape,
classes=len(classes))
model.compile(optimizer=keras.optimizers.SGD(
lr=1e-3,
decay=1e-6,
momentum=0.9,
nesterov=True),
loss='categorical_crossentropy',
metrics=['accuracy'])
Esteps = int(train.samples/train.next()[0].shape[0])
Vsteps = int(valid.samples/valid.next()[0].shape[0])
if choice == 'train':
history= model.fit_generator(train,
steps_per_epoch=Esteps,
epochs=epochs,
validation_data=valid,
validation_steps=Vsteps,
verbose=1)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='upper left')
plt.show()
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='upper left')
plt.show()
Y_pred = model.predict_generator(tests, verbose=1)
y_pred = np.argmax(Y_pred, axis=1)
matrix = confusion_matrix(tests.classes, y_pred)
df_cm = pd.DataFrame(matrix, index=classes, columns=classes)
plt.figure(figsize=(10,7))
sn.heatmap(df_cm, annot=True)
print(classification_report(tests.classes,y_pred,target_names=classes))
model.save_weights('weights.h5')
elif choice == 'predict':
model.load_weights('./weights.h5')
img = image.load_img(prediction, target_size=shape)
im = image.img_to_array(img)
im = np.expand_dims(im, axis=0)
if CNN == 'VGG16' or 'vgg16':
im = keras.applications.vgg16.preprocess_input(im)
prediction = model.predict(im)
print(prediction)
elif CNN == 'VGG19' or 'vgg19':
im = keras.applications.vgg19.preprocess_input(im)
prediction = model.predict(im)
print(prediction)
elif CNN == 'ResNet50' or 'resnet50':
im = keras.applications.resnet50.preprocess_input(im)
prediction = model.predict(im)
print(prediction)
print(keras.applications.resnet50.decode_predictions(prediction))
elif CNN == 'DenseNet201' or 'densenet201':
im = keras.applications.densenet201.preprocess_input(im)
prediction = model.predict(im)
print(prediction)
print(keras.applications.densenet201.decode_predictions(prediction))
CNN(CNN='VGG16', choice='train')
Results:
precision recall f1-score support
Predator 0.49 0.49 0.49 2016
Omnivore 0.49 0.49 0.49 2016
accuracy -- -- 0.49 4032
I suspect that the ImageDataGenerator() is not shuffling the images "before" the train/valid split. If this is the case how can i force the ImageDataGenerator here in Keras to shuffle the dataset before the split?
If shuffling is not the case, how can i solve my issue? what am I doing wrong?
So your model is basically overfitting, which means that it is "memorizing" your training set. I have a few suggestions:
check that your 2 prediction classes are balanced in your training set. I.e. 50-50 split of 0 and 1. For example, if 90% of your training data is labeled 0, then your model will simply predict everything to be 0 and get right in the validation 90% of the time.
if your training data is already balanced, it means that your model isn't generalizing. Perhaps you could try using the pre-trained model instead of custom training every layer of VGG? You can load the pre-trained weights of VGG but do not include top and train only the dense layers.
Use cross validation. Reshuffle the data in each validation and see whether results in the test set improve.
Somehow, the image generator of Keras works well when combined with fit() or fit_generator() function, but fails miserably when combined
with predict_generator() or the predict() function.
When using Plaid-ML Keras back-end for AMD processor, I would rather loop through all test images one-by-one and get the prediction for each image in each iteration.
import os
from PIL import Image
import keras
import numpy
# code for creating dan training model is not included
print("Prediction result:")
dir = "/path/to/test/images"
files = os.listdir(dir)
correct = 0
total = 0
#dictionary to label all animal category class.
classes = {
0:'This is Cat',
1:'This is Dog',
}
for file_name in files:
total += 1
image = Image.open(dir + "/" + file_name).convert('RGB')
image = image.resize((100,100))
image = numpy.expand_dims(image, axis=0)
image = numpy.array(image)
image = image/255
pred = model.predict_classes([image])[0]
animals_category = classes[pred]
if ("cat" in file_name) and ("cat" in sign):
print(correct,". ", file_name, animals_category)
correct+=1
elif ("dog" in file_name) and ("dog" in animals_category):
print(correct,". ", file_name, animals_category)
correct+=1
print("accuracy: ", (correct/total))
I’ve made a custom CNN in PyTorch for classifying 10 classes in the CIFAR-10 dataset. My classification accuracy on the test dataset is 45.739%, this is very low and I thought it’s because my model is not very deep but I implemented the same model in Keras and the classification accuracy come outs to be 78.92% on test dataset. No problem in Keras however I think there's something I'm missing in my PyTorch program.
I have used the same model architecture, strides, padding, dropout rate, optimizer, loss function, learning rate, batch size, number of epochs on both PyTorch and Keras and despite that, the difference in the classification accuracy is still huge thus I’m not able to decide how I should debug my PyTorch program further.
For now I suspect 3 things: in Keras, I use the categorical cross entropy loss function (one hot vector labels) and in PyTorch I use the standard cross entropy loss function (scalar indices labels), can this be a problem?, if not then I suspect either my training loop or the code for calculating classification accuracy in PyTorch. I have attached both my programs below, will be grateful to any suggestions.
My program in Keras:
#================Function that defines the CNN model===========
def CNN_model():
model = Sequential()
model.add(Conv2D(32,(3,3),activation='relu',padding='same', input_shape=(size,size,channels))) #SAME PADDING
model.add(Conv2D(32,(3,3),activation='relu')) #VALID PADDING
model.add(MaxPooling2D(pool_size=(2,2))) #VALID PADDING
model.add(Dropout(0.25))
model.add(Conv2D(64,(3,3),activation='relu', padding='same')) #SAME PADDING
model.add(Conv2D(64,(3,3),activation='relu')) #VALID PADDING
model.add(MaxPooling2D(pool_size=(2,2))) #VALID PADDING
model.add(Dropout(0.25))
model.add(Conv2D(128,(3,3),activation='relu', padding='same')) #SAME PADDING
model.add(Conv2D(128,(3,3),activation='relu')) #VALID PADDING
model.add(MaxPooling2D(pool_size=(2,2),name='feature_extractor_layer')) #VALID PADDING
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation='relu', name='second_last_layer'))
model.add(Dropout(0.25))
model.add(Dense(10, activation='softmax', name='softmax_layer')) #10 nodes in the softmax layer
model.summary()
return model
#=====Main program starts here========
#get_train_data() and get_test_data() are my own custom functions to get CIFAR-10 dataset
images_train, labels_train, class_train = get_train_data(0,10)
images_test, labels_test, class_test = get_test_data(0,10)
model = CNN_model()
model.compile(loss='categorical_crossentropy', #loss function of the CNN
optimizer=Adam(lr=1.0e-4), #Optimizer
metrics=['accuracy'])#'accuracy' metric is to be evaluated
#images_train and images_test contain images and
#class_train and class_test contains one hot vectors labels
model.fit(images_train,class_train,
batch_size=128,
epochs=50,
validation_data=(images_test,class_test),
verbose=1)
scores=model.evaluate(images_test,class_test,verbose=0)
print("Accuracy: "+str(scores[1]*100)+"% \n")
My program in PyTorch:
#========DEFINE THE CNN MODEL=====
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 32, 3,1,1)#SAME PADDING
self.conv2 = nn.Conv2d(32,32,3,1,0)#VALID PADDING
self.pool1 = nn.MaxPool2d(2,2) #VALID PADDING
self.drop1 = nn.Dropout2d(0.25) #DROPOUT OF 0.25
self.conv3 = nn.Conv2d(32,64,3,1,1)#SAME PADDING
self.conv4 = nn.Conv2d(64,64,3,1,0)#VALID PADDING
self.pool2 = nn.MaxPool2d(2,2)#VALID PADDING
self.drop2 = nn.Dropout2d(0.25) #DROPOUT OF 0.25
self.conv5 = nn.Conv2d(64,128,3,1,1)#SAME PADDING
self.conv6 = nn.Conv2d(128,128,3,1,0)#VALID PADDING
self.pool3 = nn.MaxPool2d(2,2)#VALID PADDING
self.drop3 = nn.Dropout2d(0.25) #DROPOUT OF 0.25
self.fc1 = nn.Linear(128*2*2, 512)#128*2*2 IS OUTPUT DIMENSION AFTER THE PREVIOUS LAYER
self.drop4 = nn.Dropout(0.25) #DROPOUT OF 0.25
self.fc2 = nn.Linear(512,10) #10 output nodes
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = self.pool1(x)
x = self.drop1(x)
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = self.pool2(x)
x = self.drop2(x)
x = F.relu(self.conv5(x))
x = F.relu(self.conv6(x))
x = self.pool3(x)
x = self.drop3(x)
x = x.view(-1,2*2*128) #FLATTENING OPERATION 2*2*128 IS OUTPUT AFTER THE PREVIOUS LAYER
x = F.relu(self.fc1(x))
x = self.drop4(x)
x = self.fc2(x) #LAST LAYER DOES NOT NEED SOFTMAX BECAUSE THE LOSS FUNCTION WILL TAKE CARE OF IT
return x
#=======FUNCTION TO CONVERT INPUT AND TARGET TO TORCH TENSORS AND LOADING INTO GPU======
def PrepareInputDataAndTargetData(device,images,labels,batch_size):
#GET MINI BATCH OF TRAINING IMAGES AND RESHAPE THE TORCH TENSOR FOR CNN PROCESSING
mini_batch_images = torch.tensor(images)
mini_batch_images = mini_batch_images.view(batch_size,3,32,32)
#GET MINI BATCH OF TRAINING LABELS, TARGET SHOULD BE IN LONG FORMAT SO CONVERT THAT TOO
mini_batch_labels = torch.tensor(labels)
mini_batch_labels = mini_batch_labels.long()
#FEED THE INPUT DATA AND TARGET LABELS TO GPU
mini_batch_images = mini_batch_images.to(device)
mini_batch_labels = mini_batch_labels.to(device)
return mini_batch_images,mini_batch_labels
#==========MAIN PROGRAM==========
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#get_train_data() and get_test_data() are my own custom functions to get CIFAR-10 dataset
Images_train, Labels_train, Class_train = get_train_data(0,10)
Images_test, Labels_test, Class_test = get_test_data(0,10)
net = Net()
net = net.double() #https://discuss.pytorch.org/t/runtimeerror-expected-object-of-scalar-type-double-but-got-scalar-type-float-for-argument-2-weight/38961
print(net)
#MAP THE MODEL ONTO THE GPU
net = net.to(device)
#CROSS ENTROPY LOSS FUNCTION AND ADAM OPTIMIZER
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=1e-4)
#PREPARE THE DATALOADER
#Images_train contains images and Labels_trains contains indices i.e. 0,1,...,9
dataset = TensorDataset( Tensor(Images_train), Tensor(Labels_train) )
trainloader = DataLoader(dataset, batch_size= 128, shuffle=True)
#START TRAINING THE CNN MODEL FOR 50 EPOCHS
for epoch in range(0,50):
for i, data in enumerate(trainloader, 0):
inputs, labels = data
inputs = torch.tensor(inputs).double()
inputs = inputs.view(len(inputs),3,32,32) #RESHAPE THE IMAGES
labels = labels.long() #MUST CONVERT LABEL TO LONG FORMAT
#MAP THE INPUT AND LABELS TO THE GPU
inputs=inputs.to(device)
labels=labels.to(device)
#FORWARD PROP, BACKWARD PROP, PARAMETER UPDATE
optimizer.zero_grad()
outputs = net.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
#CALCULATE CLASSIFICATION ACCURACY ON ALL 10 CLASSES
with torch.no_grad():
Images_class,Labels_class = PrepareInputDataAndTargetData(device,Images_test,Labels_test,len(Images_test))
network_outputs = net.forward(Images_class)
correct = (torch.argmax(network_outputs.data,1) == Labels_class.data).float().sum()
acc = float(100.0*(correct/len(Images_class)))
print("Accuracy is: "+str(acc)+"\n")
del Images_class
del Labels_class
del network_outputs
del correct
del acc
torch.cuda.empty_cache()
print("Done\n")
I am not fully aware of how the actual core backend works in both libraries however I suppose that the classification accuracy of any model should be almost the same regardless of the library.
I have a CNN that saves the bottleneck features of the training and test data with the VGG16 architecture, then uploads the features to my custom fully connected layers to classify the images.
#create data augmentations for training set; helps reduce overfitting and find more features
train_datagen = ImageDataGenerator(rescale=1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip=True)
#use ImageDataGenerator to upload validation images; data augmentation not necessary for
validating process
val_datagen = ImageDataGenerator(rescale=1./255)
#load VGG16 model, pretrained on imagenet database
model = applications.VGG16(include_top=False, weights='imagenet')
#generator to load images into NN
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
#total number of images used for training data
num_train = len(train_generator.filenames)
#save features to numpy array file so features do not overload memory
bottleneck_features_train = model.predict_generator(train_generator, num_train // batch_size)
val_generator = val_datagen.flow_from_directory(
val_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
num_val = len(val_generator.filenames)
bottleneck_features_validation = model.predict_generator(val_generator, num_val // batch_size)`
#used to retrieve the labels of the images
label_datagen = ImageDataGenerator(rescale=1./255)
#generators can create class labels for each image in either
train_label_generator = label_datagen.flow_from_directory(
train_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
#total number of images used for training data
num_train = len(train_label_generator.filenames)
#load features from VGG16 and pair each image with corresponding label (0 for normal, 1 for pneumonia)
#train_data = np.load('xray/bottleneck_features_train.npy')
#get the class labels generated by train_label_generator
train_labels = train_label_generator.classes
val_label_generator = label_datagen.flow_from_directory(
val_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
num_val = len(val_label_generator.filenames)
#val_data = np.load('xray/bottleneck_features_validation.npy')
val_labels = val_label_generator.classes
#create fully connected layers, replacing the ones cut off from the VGG16 model
model = Sequential()
#converts model's expected input dimensions to same shape as bottleneck feature arrays
model.add(Flatten(input_shape=bottleneck_features_train.shape[1:]))
#ignores a fraction of input neurons so they do not become co-dependent on each other; helps prevent overfitting
model.add(Dropout(0.7))
#normal fully-connected layer with relu activation. Replaces all negative inputs with 0 and does not fire neuron,
#creating a lighetr network
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.7))
#output layer to classify 0 or 1
model.add(Dense(1, activation='sigmoid'))
#compile model and specify which optimizer and loss function to use
#optimizer used to update the weights to optimal values; adam optimizer maintains seperate learning rates
#for each weight and updates accordingly
#cross-entropy function measures the ability of model to correctly classify 0 or 1
model.compile(optimizer=optimizers.Adam(lr=0.0007), loss='binary_crossentropy', metrics=['accuracy'])
#used to stop training if NN shows no improvement for 5 epochs
early_stop = EarlyStopping(monitor='val_loss', min_delta=0.01, patience=5, verbose=1)
#checks each epoch as it runs and saves the weight file from the model with the lowest validation loss
checkpointer = ModelCheckpoint(filepath=top_model_weights_dir, verbose=1, save_best_only=True)
#fit the model to the data
history = model.fit(bottleneck_features_train, train_labels,
epochs=epochs,
batch_size=batch_size,
callbacks = [early_stop, checkpointer],
verbose=2,
validation_data=(bottleneck_features_validation, val_labels))`
After calling train_top_model(), the CNN gets an 86% accuracy after around 10 epochs.
However, when I try implementing this architecture in by building the fully connected layers directly on top of the VGG16 layers, The network gets stuck at a val_acc of 0.5000 and basically does not train. Are there any issues with the code?
epochs = 10
batch_size = 20
train_datagen = ImageDataGenerator(rescale=1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip=True)
val_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary',
shuffle=False)
num_train = len(train_generator.filenames)
val_generator = val_datagen.flow_from_directory(
val_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary',
shuffle=False)
num_val = len(val_generator.filenames)`
base_model = applications.VGG16(weights='imagenet', include_top=False, input_shape=(img_width,
img_height, 3))
x = base_model.output
x = Flatten()(x)
x = Dropout(0.7)(x)
x = Dense(128, activation='relu')(x)
x = Dropout(0.7)(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(inputs=base_model.input, outputs=predictions)
for layer in model.layers[:19]:
layer.trainable = False
checkpointer = ModelCheckpoint(filepath=top_model_weights_dir, verbose=1, save_best_only=True)
model.compile(optimizer=optimizers.Adam(lr=0.0007), loss='binary_crossentropy', metrics=
['accuracy'])
history = model.fit_generator(train_generator,
steps_per_epoch=(num_train//batch_size),
validation_data=val_generator,
validation_steps=(num_val//batch_size),
callbacks=[checkpointer],
verbose=1,
epochs=epochs)
The reason is that in the second approach, you have not frozen the VGG16 layers. In other words, you are training the whole network. Whereas in the first approach you are just training the weights of your fully connected layers.
Use something like this:
for layer in base_model.layers[:end_layer]:
layer.trainable = False
where end_layer is the last layer you are importing.
I don't know how could I evaluate the training accuracy and test accuracy every epoch in the following code? This CNN is used for MNIST classification and the code is copied from TensorFlow tutorial https://www.tensorflow.org/tutorials/layers.
It seems that it only records the loss for every epoch and I cannot find a way to add accuracy to the code.
How could I do that?
def cnn_model_fn(features, labels, mode):
"""Model function for CNN."""
# Input Layer
input_layer = tf.reshape(features["x"], [-1, 28, 28, 1])
# Convolutional Layer #1
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #1
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2 and Pooling Layer #2
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Dense Layer
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Logits Layer
logits = tf.layers.dense(inputs=dropout, units=10)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=10)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
# Load training and eval data
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
train_data = mnist.train.images # Returns np.array
train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
eval_data = mnist.test.images # Returns np.array
eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)
# Create the Estimator
mnist_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir="/tmp/mnist_convnet_model")
# Set up logging for predictions
tensors_to_log = {"probabilities": "softmax_tensor"}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=50)
# Train the model
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": train_data},
y=train_labels,
batch_size=100,
num_epochs=None,
shuffle=True)
mnist_classifier.train(
input_fn=train_input_fn,
steps=20000,
hooks=[logging_hook])
# Evaluate the model and print results
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": eval_data},
y=eval_labels,
num_epochs=1,
shuffle=False)
eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
print(eval_results)
main(1)
When training a neural network, it is common to train the model for many epochs. It may be preferable to print the accuracy after every n epochs, where you might set n based on the total epochs you plan to use. Personally, I prefer to log the data and view it in Tensorboard.