I am running a VAE in Keras. the model compiles, and its summary is :
however, when I try to train the model I get the following error:
ValueError: Dimensions must be equal, but are 32 and 16 for '{{node vae_mlp/tf_op_layer_AddV2_14/AddV2_14}} = AddV2[T=DT_FLOAT, _cloned=true](vae_mlp/tf_op_layer_Mul_10/Mul_10, vae_mlp/tf_op_layer_Mul_11/Mul_11)' with input shapes: [16,32,32], [16].
The 16 is the batch size. I know because if I change to any number greater than 1, I get the same error that mentions the batch size (and it works for a batch size of 1). I suspect that the problem is that stimuli have 3 channels and that for some reason, it treats it as if it is greyscaled. But I am not sure.
I am attaching the full code as well:
"""### VAE Cifar 10"""
from keras import layers
from keras.layers import Conv2D, MaxPool2D, Flatten, Dense
from keras.layers import Dropout
from keras import regularizers
from keras.datasets import cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
input_shape=(x_train.shape[1], x_train.shape[2], x_train.shape[3])
original_dim=x_train.shape[1]*x_train.shape[2]
latent_dim = 12
import keras
#encoder architecture
encoder_input = keras.Input(shape=input_shape)
cx=layers.Conv2D(filters=64,
kernel_size=(3, 3),
activation='relu',
padding='same')(encoder_input)
cx=layers.Conv2D(filters=64,
kernel_size=(3, 3),
activation='relu',
input_shape=(32, 32, 3),padding='same')(cx)
cx=layers.MaxPool2D(2,2)(cx)
cx=layers.Dropout(0.2)(cx)
cx=layers.Conv2D(filters=64,
kernel_size=(3, 3),
activation='relu',padding='same')(cx)
cx=layers.Conv2D(filters=64,
kernel_size=(3, 3),
activation='relu',padding='same')(cx)
cx=layers.MaxPool2D(2,2)(cx)
cx=layers.Dropout(0.2)(cx)
cx=layers.Conv2D(filters=128,
kernel_size=(3, 3),
activation='relu',padding='same')(cx)
cx=layers.Conv2D(filters=128,
kernel_size=(3, 3),
activation='relu',padding='same')(cx)
cx=layers.MaxPool2D(2,2)(cx)
cx=layers.Dropout(0.2)(cx)
x=layers.Flatten()(cx)
z_mean=layers.Dense(latent_dim, activation='relu', name = 'z_mean')(x) #I removed the softmax layer
z_log_sigma=layers.Dense(latent_dim, activation='relu',name = 'z_sd' )(x)
from keras import backend as K #what is that...
def sampling(args):
z_mean, z_log_sigma = args
epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),
mean=0., stddev=0.1)
return z_mean + K.exp(z_log_sigma) * epsilon
z = layers.Lambda(sampling)([z_mean, z_log_sigma])
# Create encoder
encoder = keras.Model(encoder_input, [z_mean, z_log_sigma, z], name='encoder')
encoder.summary()
# Get Conv2D shape for Conv2DTranspose operation in decoder
conv_shape = K.int_shape(cx)
# Create decoder
#look at : https://www.machinecurve.com/index.php/2019/12/30/how-to-create-a-variational-autoencoder-with-keras/
from keras.layers import Conv2DTranspose, Reshape
latent_inputs = keras.Input(shape=(latent_dim, ), name='z_sampling') #shape=(latent_dim,) or shape=late_dim?
d0 = layers.Dense(conv_shape[1] * conv_shape[2] * conv_shape[3], activation='relu')(latent_inputs)
d05 = Reshape((conv_shape[1], conv_shape[2], conv_shape[3]))(d0)
d1=layers.Conv2DTranspose(filters=128,
kernel_size=(3, 3),
strides=2,
activation='relu',padding='same')(d05)#(latent_inputs)
d2=layers.Conv2DTranspose(filters=128,
kernel_size=(3, 3),
strides=2,
activation='relu',padding='same')(d1)
d3=layers.Conv2DTranspose(filters=64,
kernel_size=(3, 3),
strides=2,
activation='relu',padding='same')(d2)
d4=layers.Conv2DTranspose(filters=64,
kernel_size=(3, 3),
activation='relu',padding='same')(d3)
d5=layers.Conv2DTranspose(filters=64,
kernel_size=(3, 3),
activation='relu',
padding='same')(d4)
d6=layers.Conv2DTranspose(filters=64,
kernel_size=(3, 3),
activation='relu',
input_shape=input_shape,padding='same')(d5)
outputs = layers.Conv2D(filters=3, kernel_size=3, activation='sigmoid', padding='same', name='decoder_output')(d6) #Dense(128, activation='relu')
from keras import Model
decoder = Model(latent_inputs, outputs, name='decoder')
decoder.summary()
# instantiate VAE model
outputs = decoder(encoder(encoder_input)[2])
vae = keras.Model(encoder_input, outputs, name='vae_mlp')
vae.summary()
#loss
reconstruction_loss = keras.losses.binary_crossentropy(encoder_input, outputs)
reconstruction_loss *= original_dim
kl_loss = 1 + z_log_sigma - K.square(z_mean) - K.exp(z_log_sigma)
kl_loss = K.sum(kl_loss, axis=-1)
kl_loss *= -0.5
vae_loss = K.mean(reconstruction_loss + kl_loss)
vae.add_loss(vae_loss)
vae.compile(optimizer='adam')
#batch size = 1 doens't break after one epoch
print('you use x_train_t')
vae.fit(x_train, x_train,
epochs=20,
batch_size=16,
validation_data=(x_test, x_test))
There are two things that required to solve the issue:
First, the way to attach the loss function to the model should be by:
vae.compile(optimizer='adam', loss=val_loss_func)
Second, before training one should run:
import tensorflow as tf
tf.config.run_functions_eagerly(True)
I am not sure what this does..
Related
This question is in line with the question posted here but with a slight nuance of the CNN. Using the feature extraction definition:
max_pad_len = 174
n_mels = 128
def extract_features(file_name):
try:
audio, sample_rate = librosa.core.load(file_name, res_type='kaiser_fast')
mely = librosa.feature.melspectrogram(y=audio, sr=sample_rate, n_mels=n_mels)
#pad_width = max_pad_len - mely.shape[1]
#mely = np.pad(mely, pad_width=((0, 0), (0, pad_width)), mode='constant')
except Exception as e:
print("Error encountered while parsing file: ", file_name)
return None
return mely
How do you go about getting the correct dimension of the num_rows, num_columns and num_channels to be input to the train and test data?
In constructing the CNN Model, how to determine the correct shape to input?
model = Sequential()
model.add(Conv2D(filters=16, kernel_size=2, input_shape=(num_rows, num_columns, num_channels), activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
I dont know if it is exactly your problem but I also have to use a MEL as an input to a CNN.
Short answer:
input_shape = (x_train.shape[1], x_train.shape[2], 1)
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2], 1)
or
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2], 1)
input_shape = x_train.shape[1:]
Long answer
In my case I have a DataFrame with speakers_id and mel spectrograms (previously calculated with librosa).
The Keras CNN models are prepared for images with width, height and channels of colors (grayscale - RGB)
The Mel Spectrograms given by librosa are image-like arrays with width and height, so you need to do a reshape to add the channel dimension.
Define the input and expected output
# It looks stupid but that way i could convert the panda.Series to a np.array
x = np.array(list(df.mel))
y = df.speaker_id
print('X shape:', x.shape)
X shape: (2204, 128, 24)
2204 Mels, 128x24
Split in train-test
x_train, x_test, y_train, y_test = train_test_split(x, y)
print(f'Train: {len(x_train)}', f'Test: {len(x_test)}')
Train: 1653 Test: 551
Reshape to add the extra dimension
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2], 1)
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], 1)
print('Shapes:', x_train.shape, x_test.shape)
Shapes: (1653, 128, 24, 1) (551, 128, 24, 1)
Set input_shape
# The input shape is independent of the amount of inputs
input_shape = x_train.shape[1:]
print('Input shape:', input_shape)
Input shape: (128, 24, 1)
Put it into the model
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=input_shape))
model.add(MaxPooling2D())
# More layers...
model.compile(optimizer='adam',loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),metrics=['accuracy'])
Run model
model.fit(x_train, y_train, epochs=20, validation_data=(x_test, y_test))
Hope this is helpfull
I am writing a code for running autoencoder on CIFAR10 dataset and see the reconstructed images.
The requirement is to create
Encoder with First Layer
Input shape: (32,32,3)
Conv2D Layer with 64 Filters of (3,3)
BatchNormalization layer
ReLu activation
2D MaxpoolingLayer with (2,2) filter
Encoder with Second Layer
Conv2D layer with 16 filters (3,3)
BatchNormalization layer
ReLu activation
2D MaxpoolingLayer with (2,2) filter
Final Encoded as MaxPool with (2,2) with all previous layers
Decoder with First Layer
Input shape: encoder output
Conv2D Layer with 16 Filters of (3,3)
BatchNormalization layer
ReLu activation
UpSampling2D with (2,2) filter
Decoder with Second Layer
Conv2D Layer with 32 Filters of (3,3)
BatchNormalization layer
ReLu activation
UpSampling2D with (2,2) filter
Final Decoded as Sigmoid with all previous layers
I understand that
When we are creating Convolutional Autoencoder (or any AE), we need to pass the output of the previous layer to the next layer.
So, when I create the first Conv2D layer with ReLu and then perform BatchNormalization .. in which I pass the Conv2D layer .. right?
But when I do MaxPooling2D .. what should I pass .. BatchNormalization output or Conv2D layer output?
Also, is there any order in which I should be performing these operations?
Conv2D --> BatchNormalization --> MaxPooling2D
OR
Conv2D --> MaxPooling2D --> BatchNormalization
I am attaching my code below ... I have attempted it to two different ways and hence getting different outputs (in terms of model summary and also model training graph)
Can someone please help me by explaining which is the correct method (Method-1 or Method-2)? Also, how do I understand which graph shows better model performance?
Method - 1
input_image = Input(shape=(32, 32, 3))
### Encoder
conv1_1 = Conv2D(64, (3, 3), activation='relu', padding='same')(input_image)
bnorm1_1 = BatchNormalization()(conv1_1)
mpool1_1 = MaxPooling2D((2, 2), padding='same')(conv1_1)
conv1_2 = Conv2D(16, (3, 3), activation='relu', padding='same')(mpool1_1)
borm1_2 = BatchNormalization()(conv1_2)
encoder = MaxPooling2D((2, 2), padding='same')(conv1_2)
### Decoder
conv2_1 = Conv2D(16, (3, 3), activation='relu', padding='same')(encoder)
bnorm2_1 = BatchNormalization()(conv2_1)
up1_1 = UpSampling2D((2, 2))(conv2_1)
conv2_2 = Conv2D(32, (3, 3), activation='relu', padding='same')(up1_1)
bnorm2_2 = BatchNormalization()(conv2_2)
up2_1 = UpSampling2D((2, 2))(conv2_2)
decoder = Conv2D(3, (3, 3), activation='sigmoid', padding='same')(up2_1)
model = Model(input_image, decoder)
model.compile(optimizer='adam', loss='binary_crossentropy')
model.summary()
history = model.fit(trainX, trainX,
epochs=50,
batch_size=1000,
shuffle=True,
verbose=2,
validation_data=(testX, testX)
)
As an output of the model summary, I get this
Total params: 18,851
Trainable params: 18,851
Non-trainable params: 0
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.show()
Method - 2
input_image = Input(shape=(32, 32, 3))
### Encoder
x = Conv2D(64, (3, 3), activation='relu', padding='same')(input_image)
x = BatchNormalization()(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
x = BatchNormalization()(x)
encoder = MaxPooling2D((2, 2), padding='same')(x)
### Decoder
x = Conv2D(16, (3, 3), activation='relu', padding='same')(encoder)
x = BatchNormalization()(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
x = BatchNormalization()(x)
x = UpSampling2D((2, 2))(x)
decoder = Conv2D(3, (3, 3), activation='sigmoid', padding='same')(x)
model = Model(input_image, decoder)
model.compile(optimizer='adam', loss='binary_crossentropy')
model.summary()
history = model.fit(trainX, trainX,
epochs=50,
batch_size=1000,
shuffle=True,
verbose=2,
validation_data=(testX, testX)
)
As an output of the model summary, I get this
Total params: 19,363
Trainable params: 19,107
Non-trainable params: 256
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.show()
In method 1, BatchNormalization layers does not exist in the compiled model, as the output of these layers are not used anywhere. You can check this by running model1.summary()
Method 2 is perfectly alright.
Order of the operations :
Conv2D --> BatchNormalization --> MaxPooling2D is usually the common approach.
Though either order would work since, since BatchNorm is just mean and variance normalization.
Edit:
For Conv2D --> BatchNormalization --> MaxPooling2D :
conv1_1 = Conv2D(64, (3, 3), activation='relu', padding='same')(input_image)
bnorm1_1 = BatchNormalization()(conv1_1)
mpool1_1 = MaxPooling2D((2, 2), padding='same')(bnorm1_1)
and then use mpool1_1 as input for next layer.
For Conv2D --> MaxPooling2D --> BatchNormalization:
conv1_1 = Conv2D(64, (3, 3), activation='relu', padding='same')(input_image)
mpool1_1 = MaxPooling2D((2, 2), padding='same')(conv1_1)
bnorm1_1 = BatchNormalization()(mpool1_1)
and then use bnorm1_1 as input for next layer.
To effectively use BatchNormalization layer, you should always use it before activation.
Instead of:
conv1_1 = Conv2D(64, (3, 3), activation='relu', padding='same')(input_image)
bnorm1_1 = BatchNormalization()(conv1_1)
mpool1_1 = MaxPooling2D((2, 2), padding='same')(bnorm1_1)
Use it like this:
conv1_1 = Conv2D(64, (3, 3), padding='same')(input_image)
bnorm1_1 = BatchNormalization()(conv1_1)
act_1 = Activation('relu')(bnorm1_1)
mpool1_1 = MaxPooling2D((2, 2), padding='same')(act_1)
For more details, check here:
Where do I call the BatchNormalization function in Keras?
I have a simple sequential deep model as below, which performs a binary classification. I pass the 3 color channels of my dataset images to the model for training. How can I add grayscale as the 4th channel to my model? What changes do I need to make?
from keras.models import Sequential, load_model, Model
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from PIL import Image
from random import shuffle, choice
import numpy as np
import os
from keras.callbacks import ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from keras import optimizers
IMAGE_SIZE = 300
epochs_num = 100
batch_size = 64
IMAGE_DIRECTORY = './data'
def label_img(name):
if name == 'fire': return np.array([1, 0])
elif name == 'none' : return np.array([0, 1])
def load_data():
print("Loading images...")
train_data = []
directories = next(os.walk(IMAGE_DIRECTORY))[1]
for dirname in directories:
print("Loading {0}".format(dirname))
file_names = next(os.walk(os.path.join(IMAGE_DIRECTORY, dirname)))[2]
for i in range(len(file_names)):
image_name = choice(file_names)
image_path = os.path.join(IMAGE_DIRECTORY, dirname, image_name)
label = label_img(dirname)
if "DS_Store" not in image_path:
img = Image.open(image_path)
img = img.resize((IMAGE_SIZE, IMAGE_SIZE), Image.ANTIALIAS)
train_data.append([np.array(img), label])
shuffle(train_data)
return train_data
def create_model():
model = Sequential()
model.add(Conv2D(32, kernel_size = (5, 5), activation='relu', input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3)))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size=(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())
model.add(Conv2D(128, kernel_size=(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size=(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(128, activation='relu'))
model.add(Dense(2, activation = 'softmax'))
return model
training_data = load_data()
training_images = np.array([i[0] for i in training_data])
training_labels = np.array([i[1] for i in training_data])
print(str(len(training_images)))
# Split the data
training_images, validation_images, training_labels, validation_labels = train_test_split(training_images, training_labels, test_size=0.2, shuffle= True)
print(str(len(training_images)))
print('creating model')
#========================
model = create_model()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
filepath="./checkpoints/model_{epoch:03d}_{accuracy:.4f}_{val_accuracy:.4f}_{val_loss:.7f}.h5"
checkpoint = ModelCheckpoint(filepath, monitor=["accuracy"], verbose=1, mode='max', save_weights_only=False)
callbacks_list = [checkpoint]
datagen = ImageDataGenerator(zoom_range=0.2, horizontal_flip=True)
datagen.fit(training_images)
train_gen=datagen.flow(training_images, training_labels, batch_size=batch_size)
#validation
val_datagen = ImageDataGenerator(horizontal_flip=True)
val_datagen.fit(training_images)
val_gen=datagen.flow(validation_images, validation_labels, batch_size=batch_size)
model.fit(train_gen, validation_data=val_gen, epochs=epochs_num, verbose=1, callbacks=callbacks_list)
i dont't think that you need to add gray scale and i 'm not sure if you can "add" it as an extra channel. Gray scale image is an image that RGB values are the same on the same pixel. You can train your classifier to classify gray scale images or coloured ones or both (not recommended). But if your raw data is coloured images keep them as they are and use data augmentation if you need more.
I trained my model of CNN net on images with good val_acc=0.97 and using model.fit_generator.
Here is the output of last epoch, proofing high validation accuracy:
199/200 [============================>.] - ETA: 1s - loss: 0.1563 - acc: 0.9563
200/200 [==============================] - 306s 2s/step - loss: 0.1556 - acc: 0.9565 - val_loss: 0.1402 - val_acc: 0.9691
Epoch 00005: val_acc improved from 0.96701 to 0.96907, saving model to /home/sergorl/cars/color_weights.hdf5
But when I use the same validation data set, which I use during training, but test only one image and for every image in my validation set I always get the wrong predicted label and the predicted probabilities looks like a uniform distribution.
I read this links:
Wrong prediction on images
Why is Keras training well but returning wrong predictions?
Keras Val_acc is good but prediction for same data is poor
But I don't find the solution!
from keras.models import Sequential,Model,load_model
from keras.optimizers import SGD
from keras.layers import BatchNormalization, Lambda, Input, Dense, Convolution2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, Dropout, Flatten, merge, Reshape, Activation
from keras.layers.merge import Concatenate
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint
import os
import cv2
import numpy as np
class CarColorNet:
def __init__(self, numClasses=6, imageWidth=256, imageHeight=256):
self.classes = {}
self.numClasses = numClasses
self.imageWidth = imageWidth
self.imageHeight = imageHeight
input_image = Input(shape=(self.imageWidth, self.imageHeight, 3))
# ------------------------------------ TOP BRANCH ------------------------------------
# first top convolution layer
top_conv1 = Convolution2D(filters=48, kernel_size=(11, 11), strides=(4, 4),
input_shape=(self.imageWidth, self.imageHeight, 3), activation='relu')(input_image)
top_conv1 = BatchNormalization()(top_conv1)
top_conv1 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(top_conv1)
# second top convolution layer
# split feature map by half
top_top_conv2 = Lambda(lambda x: x[:, :, :, :24])(top_conv1)
top_bot_conv2 = Lambda(lambda x: x[:, :, :, 24:])(top_conv1)
top_top_conv2 = Convolution2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='same')(top_top_conv2)
top_top_conv2 = BatchNormalization()(top_top_conv2)
top_top_conv2 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(top_top_conv2)
top_bot_conv2 = Convolution2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='same')(top_bot_conv2)
top_bot_conv2 = BatchNormalization()(top_bot_conv2)
top_bot_conv2 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(top_bot_conv2)
# third top convolution layer
# concat 2 feature map
top_conv3 = Concatenate()([top_top_conv2, top_bot_conv2])
top_conv3 = Convolution2D(filters=192, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='same')(top_conv3)
# fourth top convolution layer
# split feature map by half
top_top_conv4 = Lambda(lambda x: x[:, :, :, :96])(top_conv3)
top_bot_conv4 = Lambda(lambda x: x[:, :, :, 96:])(top_conv3)
top_top_conv4 = Convolution2D(filters=96, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='same')(top_top_conv4)
top_bot_conv4 = Convolution2D(filters=96, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='same')(top_bot_conv4)
# fifth top convolution layer
top_top_conv5 = Convolution2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='same')(top_top_conv4)
top_top_conv5 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(top_top_conv5)
top_bot_conv5 = Convolution2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='same')(top_bot_conv4)
top_bot_conv5 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(top_bot_conv5)
# ------------------------------------ TOP BOTTOM ------------------------------------
# first bottom convolution layer
bottom_conv1 = Convolution2D(filters=48, kernel_size=(11, 11), strides=(4, 4),
input_shape=(224, 224, 3), activation='relu')(input_image)
bottom_conv1 = BatchNormalization()(bottom_conv1)
bottom_conv1 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(bottom_conv1)
# second bottom convolution layer
# split feature map by half
bottom_top_conv2 = Lambda(lambda x: x[:, :, :, :24])(bottom_conv1)
bottom_bot_conv2 = Lambda(lambda x: x[:, :, :, 24:])(bottom_conv1)
bottom_top_conv2 = Convolution2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='same')(bottom_top_conv2)
bottom_top_conv2 = BatchNormalization()(bottom_top_conv2)
bottom_top_conv2 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(bottom_top_conv2)
bottom_bot_conv2 = Convolution2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='same')(bottom_bot_conv2)
bottom_bot_conv2 = BatchNormalization()(bottom_bot_conv2)
bottom_bot_conv2 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(bottom_bot_conv2)
# third bottom convolution layer
# concat 2 feature map
bottom_conv3 = Concatenate()([bottom_top_conv2, bottom_bot_conv2])
bottom_conv3 = Convolution2D(filters=192, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='same')(bottom_conv3)
# fourth bottom convolution layer
# split feature map by half
bottom_top_conv4 = Lambda(lambda x: x[:, :, :, :96])(bottom_conv3)
bottom_bot_conv4 = Lambda(lambda x: x[:, :, :, 96:])(bottom_conv3)
bottom_top_conv4 = Convolution2D(filters=96, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='same')(bottom_top_conv4)
bottom_bot_conv4 = Convolution2D(filters=96, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='same')(bottom_bot_conv4)
# fifth bottom convolution layer
bottom_top_conv5 = Convolution2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='same')(bottom_top_conv4)
bottom_top_conv5 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(bottom_top_conv5)
bottom_bot_conv5 = Convolution2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='same')(bottom_bot_conv4)
bottom_bot_conv5 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(bottom_bot_conv5)
# ---------------------------------- CONCATENATE TOP AND BOTTOM BRANCH ------------------------------------
conv_output = Concatenate()([top_top_conv5, top_bot_conv5, bottom_top_conv5, bottom_bot_conv5])
# Flatten
flatten = Flatten()(conv_output)
# Fully-connected layer
FC_1 = Dense(units=4096, activation='relu')(flatten)
FC_1 = Dropout(0.6)(FC_1)
FC_2 = Dense(units=4096, activation='relu')(FC_1)
FC_2 = Dropout(0.6)(FC_2)
output = Dense(units=self.numClasses, activation='softmax')(FC_2)
self.model = Model(inputs=input_image, outputs=output)
sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
# sgd = SGD(lr=0.01, momentum=0.9, decay=0.0005, nesterov=True)
self.model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
def train(self,
pathToTrainSet,
pathToValidSet,
pathToSaveModel,
epochs=7,
batchSize=32,
stepsPerEpoch=200,
validationSteps=1000):
fileOfWeights = 'color_weights.hdf5'
checkpoint = ModelCheckpoint(os.path.join(pathToSaveModel, fileOfWeights),
monitor='val_acc', verbose=1,
save_best_only=True, mode='max')
checkpoint2 = ModelCheckpoint(os.path.join(pathToSaveModel, fileOfWeights),
monitor='val_loss', verbose=1,
save_best_only=True, mode='max')
trainDataGen = ImageDataGenerator(rescale=1.0/255, shear_range=0.2,
zoom_range=0.3, horizontal_flip=True)
validDataGen = ImageDataGenerator(rescale=1.0/255)
trainSet = trainDataGen.flow_from_directory(
pathToTrainSet,
target_size=(self.imageWidth, self.imageHeight),
batch_size=batchSize,
class_mode='categorical'
)
self.classes = {v: k for k, v in trainSet.class_indices.items()}
np.save(os.path.join(pathToSaveModel, 'class_index.npy'), self.classes)
validSet = validDataGen.flow_from_directory(
pathToValidSet,
target_size=(self.imageWidth, self.imageHeight),
batch_size=batchSize,
class_mode='categorical'
)
self.model.fit_generator(
trainSet,
steps_per_epoch=stepsPerEpoch,
epochs=epochs,
validation_data=validSet,
validation_steps=validationSteps//batchSize,
callbacks=[checkpoint, checkpoint2])
print('============================ Saving is here ============================')
self.model.save(os.path.join(pathToSaveModel, 'car_color_net.h5'))
#staticmethod
def load(pathToModel, pathToClassIndexes):
model = load_model(pathToModel)
layers = model.layers
inputShape, outputShape = layers[0].input_shape, layers[-1].output_shape,
imageWidth, imageHeight = inputShape[1], inputShape[2]
numClasses = outputShape[1]
net = CarColorNet(numClasses, imageWidth, imageHeight)
net.classes = np.load(os.path.join(pathToClassIndexes, 'class_index.npy')).item()
return net
def predictOneImage(self, pathToImage):
frame = cv2.imread(pathToImage)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (self.imageWidth, self.imageHeight))
frame = np.expand_dims(frame, axis=0)
# cv2.imshow("boxed", frame[0, :, :, :])
# cv2.waitKey(0)
frame = np.asarray(frame, dtype='float32')
img = frame/255
probs = self.model.predict(img)
ind = probs.argmax(axis=-1)[0]
return self.classes[ind]
if __name__ == '__main__':
pathToTrainSet = '/home/sergorl/cars/train'
pathToValidSet = '/home/sergorl/cars/valid'
pathToSaveModel = '/home/sergorl/cars'
## Train net
# net = CarColorNet(numClasses=6)
# net.train(pathToTrainSet, pathToValidSet, pathToSaveModel)
# Test on all images from validSet
net = CarColorNet.load(os.path.join(pathToSaveModel, 'car_color_net.h5'), pathToSaveModel)
count, countTrueLabels = 0, 0
for dirpath, _dirnames, filenames in os.walk(pathToValidSet):
trueLabel = dirpath.split('/')[-1]
for file in filenames:
label = net.predictOneImage(os.path.join(dirpath, file))
print(trueLabel, label)
if label == trueLabel:
countTrueLabels += 1
count += 1
print('rate is {0:.2f}'.format(float(countTrueLabels) / float(count) * 100))
If I have a good val_acc=0.97, I'll expect the same result (or nearly), testing every image in validation set. But always have wrong prediction!
I ran net immediately after train was done and see that learning was good:
if __name__ == '__main__':
pathToTrainSet = '/home/sergorl/cars/train'
pathToValidSet = '/home/sergorl/cars/valid'
pathToSaveModel = '/home/sergorl/cars'
# Train net
net = CarColorNet(numClasses=6)
net.train(pathToTrainSet, pathToValidSet, pathToSaveModel)
# Test on all images from validSet
count, countTrueLabels = 0, 0
for dirpath, _dirnames, filenames in os.walk(pathToValidSet):
trueLabel = dirpath.split('/')[-1]
for file in filenames:
label = net.predictOneImage(os.path.join(dirpath, file))
print(trueLabel, label)
if label == trueLabel:
countTrueLabels += 1
count += 1
print('rate is {0:.2f}'.format(float(countTrueLabels) / float(count) * 100))
So it seems the problem is inside model.save and it looks like saving doesn't work!. I found many related issues on git, for example:
https://github.com/keras-team/keras/issues/4875
https://github.com/keras-team/keras/issues/4904
But I don't know how to fix it with Python 3.7.3 and keras 2.0.0
Can you share more about the issue like what is the output you are getting. From the code i can see that you are training for 6 classes and using categorical cross entropy so ideally you should be getting an array with 6 values with each value bw 0 and 1 and the index of highest value in that array should be the output.
I am trying to rewrite a Sequential model of Network In Network CNN using Functional API. I use it with CIFAR-10 dataset. The Sequential model trains without a problem, but Functional API model gets stuck. I probably missed something when rewriting the model.
Here's a reproducible example:
Dependencies:
from keras.models import Model, Input, Sequential
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Dropout, Activation
from keras.utils import to_categorical
from keras.losses import categorical_crossentropy
from keras.optimizers import Adam
from keras.datasets import cifar10
Loading the dataset:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train / 255.
x_test = x_test / 255.
y_train = to_categorical(y_train, num_classes=10)
y_test = to_categorical(y_test, num_classes=10)
input_shape = x_train[0,:,:,:].shape
Here's the working Sequential model:
model = Sequential()
#mlpconv block1
model.add(Conv2D(32, (5, 5), activation='relu',padding='valid',input_shape=input_shape))
model.add(Conv2D(32, (1, 1), activation='relu'))
model.add(Conv2D(32, (1, 1), activation='relu'))
model.add(MaxPooling2D((2,2)))
model.add(Dropout(0.5))
#mlpconv block2
model.add(Conv2D(64, (3, 3), activation='relu',padding='valid'))
model.add(Conv2D(64, (1, 1), activation='relu'))
model.add(Conv2D(64, (1, 1), activation='relu'))
model.add(MaxPooling2D((2,2)))
model.add(Dropout(0.5))
#mlpconv block3
model.add(Conv2D(128, (3, 3), activation='relu',padding='valid'))
model.add(Conv2D(32, (1, 1), activation='relu'))
model.add(Conv2D(10, (1, 1), activation='relu'))
model.add(GlobalAveragePooling2D())
model.add(Activation('softmax'))
Compile and train:
model.compile(loss=categorical_crossentropy, optimizer=Adam(), metrics=['acc'])
_ = model.fit(x=x_train, y=y_train, batch_size=32,
epochs=200, verbose=1,validation_split=0.2)
In three epochs the model gets close to 50% validation accuracy.
Here's the same model rewritten using Functional API:
model_input = Input(shape=input_shape)
#mlpconv block1
x = Conv2D(32, (5, 5), activation='relu',padding='valid')(model_input)
x = Conv2D(32, (1, 1), activation='relu')(x)
x = Conv2D(32, (1, 1), activation='relu')(x)
x = MaxPooling2D((2,2))(x)
x = Dropout(0.5)(x)
#mlpconv block2
x = Conv2D(64, (3, 3), activation='relu',padding='valid')(x)
x = Conv2D(64, (1, 1), activation='relu')(x)
x = Conv2D(64, (1, 1), activation='relu')(x)
x = MaxPooling2D((2,2))(x)
x = Dropout(0.5)(x)
#mlpconv block3
x = Conv2D(128, (3, 3), activation='relu',padding='valid')(x)
x = Conv2D(32, (1, 1), activation='relu')(x)
x = Conv2D(10, (1, 1), activation='relu')(x)
x = GlobalAveragePooling2D()(x)
x = Activation(activation='softmax')(x)
model = Model(model_input, x, name='nin_cnn')
This model is then compiled using the same parameters as the Sequential model. When trained, the training accuracy gets stuck at 0.10, meaning the model doesn't get better and randomly chooses one of 10 classes.
What did I miss when rewriting the model? When calling model.summary() the models look identical except for the explicit Input layer in the Functional API model.
Removing activation in the final conv layer solves the problem:
x = Conv2D(10, (1, 1))(x)
Still not sure why the Sequential model works fine with activation in that layer.