I am facing an error in plotting my confusion matrix. I am giving the test labels and my predicted label in confusion matrix function but it is giving me the value error having the problem in number of samples.
Shape of My data is below.
Trainig Data Shape (4162, 224, 224, 3)
Training Data Labels Shape (4162, 5)
Testing Data Shape (3921, 224, 224, 3)
Testing Data Labels Shape (3921, 5)
Predicted Label is a bit ugly because of only 2 epochs run, I just wanted to plot the confusion matrix first so thats why.
predictingimage = "D:/compCarsThesisData/data/image/78/3/2010/0ba8d018cdc994.jpg" #67/1698/2010/6805eb92ac6c70.jpg"
predictImageRead = mpg.imread(predictingimage)
resizingImage = cv2.cv2.resize(predictImageRead,(224,224))
reshapedFinalImage = np.expand_dims(resizingImage, axis=0)
npimage = np.asarray(reshapedFinalImage)
m = model.predict(npimage)
print(m)
[array([[0.02502811, 0.01959323, 0.6556284 , 0.26472655, 0.03502375]],
dtype=float32), array([[5.8234303e-04, 3.1917400e-04, 9.4957882e-01, 1.8873921e-02,
3.0645736e-02]], dtype=float32), array([[0.02581117, 0.04752538, 0.81816435, 0.04812173, 0.06037736]],
dtype=float32)]
cm = confusion_matrix(train_labels_Encode,m)
plt.imshow(cm)
plt.show()
ERROR
Traceback (most recent call last):
File "d:/ThesisWork/seriouswork/Inception_SVM_CompCarsGoogleNetArchitecture.py", line 299, in <module>
cm = confusion_matrix(train_labels_hotEncode,n)
File "C:\Users\zeele\Miniconda3\lib\site-packages\sklearn\metrics\classification.py", line 253, in confusion_matrix
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
File "C:\Users\zeele\Miniconda3\lib\site-packages\sklearn\metrics\classification.py", line 71, in _check_targets
check_consistent_length(y_true, y_pred)
File "C:\Users\zeele\Miniconda3\lib\site-packages\sklearn\utils\validation.py", line 235, in check_consistent_length
" samples: %r" % [int(l) for l in lengths])
ValueError: Found input variables with inconsistent numbers of samples: [4162, 3]
Classifier Code:
X_train = np.load('D:/Inception_preprocessed_data_Labels_2004/Top5/TrainingData_Top5.npy')#('D:/ThesisWork/S_224_Training_data.npy')#training_images
X_test = np.load('D:/Inception_preprocessed_data_Labels_2004/Top5/TrainingLabels_Top5.npy')#('D:/ThesisWork/S_224_Training_labels.npy')#training_labels
y_train = np.load('D:/Inception_preprocessed_data_Labels_2004/Top5/TestingData_Top5.npy')#('D:/ThesisWork/S_224_Testing_data.npy')#testing_images
y_test = np.load('D:/Inception_preprocessed_data_Labels_2004/Top5/TestingLabels_Top5.npy')#('D:/ThesisWork/S_224_Testing_labels.npy')#testing_labels
print(X_test)
le = preprocessing.LabelEncoder()
le.fit(X_test)
transform_trainLabels = le.transform(X_test)
print(transform_trainLabels)
print(le.inverse_transform(transform_trainLabels))
train_labels_hotEncode = np_utils.to_categorical(transform_trainLabels,len(set(transform_trainLabels)))
shuffle(X_train)
shuffle(train_labels_hotEncode)
le2 = preprocessing.LabelEncoder()
le2.fit(y_test)
transform_testLabels = le2.transform(y_test)
test_labels_hotEncode = np_utils.to_categorical(transform_testLabels,len(set(transform_testLabels)))
print(test_labels_hotEncode.shape)
shuffle(y_train)
shuffle(test_labels_hotEncode)
# print(train_labels_hotEncode[3000])
# exit()
# X_train = np.asarray(X_train / 255.0)
# y_train = np.asarray(y_train / 255.0)
# print("X_Training" ,X_train.shape, X_train)
# print("X_TEST", X_test.shape)
# print("Y_train", y_train.shape)
# print("y_test", y_test.shape)
# exit()
# plt.imshow(X_train[1])
# print(X_test)
# plt.imshow(y_train[1])
# print(y_test)
# plt.show()
print("Trainig Data Shape",X_train.shape)
print("Training Data Labels Shape",train_labels_hotEncode.shape)
print("Testing Data Shape", y_train.shape)
print("Testing Data Labels Shape", test_labels_hotEncode.shape)
# X_train = np.array(X_train).astype(np.float32)
# y_train = np.array(y_train).astype(np.float32)
def inception_module(image,
filters_1x1,
filters_3x3_reduce,
filter_3x3,
filters_5x5_reduce,
filters_5x5,
filters_pool_proj,
name=None):
conv_1x1 = Conv2D(filters_1x1, (1,1), padding='same', activation='relu', kernel_initializer=kernel_init, bias_initializer= bias_init)(image)
conv_3x3 = Conv2D(filters_3x3_reduce, (1,1), padding='same', activation='relu', kernel_initializer=kernel_init, bias_initializer= bias_init)(image)
conv_3x3 = Conv2D(filter_3x3,(3,3), padding='same', activation='relu', kernel_initializer=kernel_init, bias_initializer=bias_init)(conv_3x3)
conv_5x5 = Conv2D(filters_5x5_reduce,(1,1), padding='same', activation='relu',kernel_initializer=kernel_init, bias_initializer= bias_init)(image)
conv_5x5 = Conv2D(filters_5x5, (3,3), padding='same', activation='relu',kernel_initializer=kernel_init, bias_initializer=bias_init)(conv_5x5)
pool_proj = MaxPool2D((3,3), strides=(1,1), padding='same')(image)
pool_proj = Conv2D(filters_pool_proj, (1,1), padding='same', activation='relu', kernel_initializer=kernel_init, bias_initializer= bias_init)(pool_proj)
output = concatenate([conv_1x1, conv_3x3, conv_5x5, pool_proj], axis=3, name=name)
return output
kernel_init = keras.initializers.glorot_uniform()
bias_init = keras.initializers.Constant(value=0.2)
# IMG_SIZE = 64
input_layer = Input(shape=(224,224,3))
image = Conv2D(64,(7,7),padding='same', strides=(2,2), activation='relu', name='conv_1_7x7/2', kernel_initializer=kernel_init, bias_initializer=bias_init)(input_layer)
image = MaxPool2D((3,3), padding='same', strides=(2,2), name='max_pool_1_3x3/2')(image)
image = Conv2D(64, (1,1), padding='same', strides=(1,1), activation='relu', name='conv_2a_3x3/1' )(image)
image = Conv2D(192, (3,3), padding='same', strides=(1,1), activation='relu', name='conv_2b_3x3/1')(image)
image = MaxPool2D((3,3), padding='same', strides=(2,2), name='max_pool_2_3x3/2')(image)
image = inception_module(image,
filters_1x1= 64,
filters_3x3_reduce= 96,
filter_3x3 = 128,
filters_5x5_reduce=16,
filters_5x5= 32,
filters_pool_proj=32,
name='inception_3a')
image = inception_module(image,
filters_1x1=128,
filters_3x3_reduce=128,
filter_3x3=192,
filters_5x5_reduce=32,
filters_5x5=96,
filters_pool_proj=64,
name='inception_3b')
image = MaxPool2D((3,3), padding='same', strides=(2,2), name='max_pool_3_3x3/2')(image)
image = inception_module(image,
filters_1x1=192,
filters_3x3_reduce=96,
filter_3x3=208,
filters_5x5_reduce=16,
filters_5x5=48,
filters_pool_proj=64,
name='inception_4a')
image1 = AveragePooling2D((5,5), strides=3)(image)
image1 = Conv2D(128, (1,1), padding='same', activation='relu')(image1)
image1 = Flatten()(image1)
image1 = Dense(1024, activation='relu')(image1)
image1 = Dropout(0.7)(image1)
image1 = Dense(5, activation='softmax', name='auxilliary_output_1')(image1)
image = inception_module(image,
filters_1x1 = 160,
filters_3x3_reduce= 112,
filter_3x3= 224,
filters_5x5_reduce= 24,
filters_5x5= 64,
filters_pool_proj=64,
name='inception_4b')
image = inception_module(image,
filters_1x1= 128,
filters_3x3_reduce = 128,
filter_3x3= 256,
filters_5x5_reduce= 24,
filters_5x5=64,
filters_pool_proj=64,
name='inception_4c')
image = inception_module(image,
filters_1x1=112,
filters_3x3_reduce=144,
filter_3x3= 288,
filters_5x5_reduce= 32,
filters_5x5=64,
filters_pool_proj=64,
name='inception_4d')
image2 = AveragePooling2D((5,5), strides=3)(image)
image2 = Conv2D(128, (1,1), padding='same', activation='relu')(image2)
image2 = Flatten()(image2)
image2 = Dense(1024, activation='relu')(image2)
image2 = Dropout(0.7)(image2) #Changed from 0.7
image2 = Dense(5, activation='softmax', name='auxilliary_output_2')(image2)
image = inception_module(image,
filters_1x1=256,
filters_3x3_reduce=160,
filter_3x3=320,
filters_5x5_reduce=32,
filters_5x5=128,
filters_pool_proj=128,
name= 'inception_4e')
image = MaxPool2D((3,3), padding='same', strides=(2,2), name='max_pool_4_3x3/2')(image)
image = inception_module(image,
filters_1x1=256,
filters_3x3_reduce=160,
filter_3x3= 320,
filters_5x5_reduce=32,
filters_5x5= 128,
filters_pool_proj=128,
name='inception_5a')
image = inception_module(image,
filters_1x1=384,
filters_3x3_reduce=192,
filter_3x3=384,
filters_5x5_reduce=48,
filters_5x5=128,
filters_pool_proj=128,
name='inception_5b')
image = GlobalAveragePooling2D(name='avg_pool_5_3x3/1')(image)
image = Dropout(0.7)(image)
image = Dense(5, activation='softmax', name='output')(image)
model = Model(input_layer, [image,image1,image2], name='inception_v1')
model.summary()
epochs = 2
initial_lrate = 0.001 # Changed From 0.01
def decay(epoch, steps=100):
initial_lrate = 0.01
drop = 0.96
epochs_drop = 8
lrate = initial_lrate * math.pow(drop,math.floor((1+epoch)/epochs_drop))#
return lrate
sgd = keras.optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
# nadam = keras.optimizers.Nadam(lr= 0.002, beta_1=0.9, beta_2=0.999, epsilon=None)
# keras
lr_sc = LearningRateScheduler(decay)
# rms = keras.optimizers.RMSprop(lr = initial_lrate, rho=0.9, epsilon=1e-08, decay=0.0)
# ad = keras.optimizers.adam(lr=initial_lrate)
model.compile(loss=['categorical_crossentropy', 'categorical_crossentropy','categorical_crossentropy'],loss_weights=[1,0.3,0.3], optimizer='sgd', metrics=['accuracy'])
# loss = 'categorical_crossentropy', 'categorical_crossentropy','categorical_crossentropy'
history = model.fit(X_train, [train_labels_hotEncode,train_labels_hotEncode,train_labels_hotEncode], validation_split=0.3,shuffle=True,epochs=epochs, batch_size= 32, callbacks=[lr_sc]) # batch size changed from 256 or 64 to 16(y_train,[y_test,y_test,y_test])
# validation_data=(y_train,[test_labels_hotEncode,test_labels_hotEncode,test_labels_hotEncode]), validation_data= (X_train, [train_labels_hotEncode,train_labels_hotEncode,train_labels_hotEncode]),
print(history.history.keys())
plt.plot(history.history['output_acc'])
plt.plot(history.history['val_output_acc'])
plt.title('Model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'],loc = 'upper left')
plt.show()
# predictingimage = "D:/compCarsThesisData/data/image/78/3/2010/0ba8d018cdc994.jpg" #67/1698/2010/6805eb92ac6c70.jpg"
predictImageRead = X_train
# resizingImage = cv2.cv2.resize(predictImageRead,(224,224))
# reshapedFinalImage = np.expand_dims(predictImageRead, axis=0)
# print(reshapedFinalImage.shape)
# npimage = np.array(reshapedFinalImage)
m = model.predict(predictImageRead)
print(m)
print(predictImageRead.shape)
print(train_labels_hotEncode)
# print(m.shape)
plt.imshow(predictImageRead[1])
plt.show()
# n = np.argmax(m,axis=-1)
# n = np.array(m)
print(confusion_matrix(X_test,m[0]))
cm = confusion_matrix(X_test,m[0])
plt.imshow(cm)
plt.show()
Please guide me through this.
Thanks!
If you want to compute a confusion matrix of your training data you have to make your moddel predict all your training examples, roughly like this:
m = model.predict(train_data) # train_data should have the shape (4162, 224, 224, 3)
m should then have a length of 4162 and you can plot the confusion matrix like this:
cm = confusion_matrix(train_labels_Encode, m)
plt.imshow(cm)
plt.show()
Related
My data has the following shapes:
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0)
print(X_train.shape, X_test.shape, Y_train.shape, Y_test.shape)
(942, 32, 32, 1) (236, 32, 32, 1) (942, 3, 3) (236, 3, 3)
And whenever I try to run my CNN I get the following error:
from tensorflow.keras import layers
from tensorflow.keras import Model
img_input = layers.Input(shape=(32, 32, 1))
x = layers.Conv2D(16, (3,3), activation='relu', strides = 1, padding = 'same')(img_input)
x = layers.Conv2D(32, (3,3), activation='relu', strides = 2)(x)
x = layers.Conv2D(128, (3,3), activation='relu', strides = 2)(x)
x = layers.MaxPool2D(pool_size=2)(x)
x = layers.Conv2D(3, 3, activation='linear', strides = 2)(x)
output = layers.Flatten()(x)
model = Model(img_input, output)
model.summary()
model.compile(loss='mean_squared_error',optimizer= 'adam', metrics=['mse'])
history = model.fit(X_train,Y_train,validation_data=(X_test, Y_test), epochs = 100,verbose=1)
Error:
InvalidArgumentError: Incompatible shapes: [32,3] vs. [32,3,3]
[[node BroadcastGradientArgs_2 (defined at /usr/local/lib/python3.7/site-packages/tensorflow_core/python/framework/ops.py:1751) ]] [Op:__inference_distributed_function_7567]
Function call stack:
distributed_function
What am I missing here?
you don't handle the dimensionality inside your network properly. Firstly expand the dimension of your y in order to get them in this format (n_sample, 3, 3, 1). At this point adjust the network (I remove flatten and max pooling and adjust the last conv output)
# create dummy data
n_sample = 10
X = np.random.uniform(0,1, (n_sample, 32, 32, 1))
y = np.random.uniform(0,1, (n_sample, 3, 3))
# expand y dim
y = y[...,np.newaxis]
print(X.shape, y.shape)
img_input = Input(shape=(32, 32, 1))
x = Conv2D(16, (3,3), activation='relu', strides = 1, padding = 'same')(img_input)
x = Conv2D(32, (3,3), activation='relu', strides = 2)(x)
x = Conv2D(128, (3,3), activation='relu', strides = 2)(x)
x = Conv2D(1, (3,3), activation='linear', strides = 2)(x)
model = Model(img_input, x)
model.summary()
model.compile(loss='mean_squared_error',optimizer= 'adam', metrics=['mse'])
model.fit(X,y, epochs=3)
I have a dataset where x_train shape is (34650,10,1) , y_train shape is (34650,) , x_test shape is (17067,10,1) and y_test is (17067,) .
I am making a simple cnn model -
input_layer = Input(shape=(10, 1))
conv2 = Conv1D(filters=64,
kernel_size=3,
strides=1,
activation='relu')(input_layer)
pool1 = MaxPooling1D(pool_size=1)(conv2)
drop1 = Dropout(0.5)(pool1)
pool2 = MaxPooling1D(pool_size=1)(drop1)
conv3 = Conv1D(filters=64,
kernel_size=3,
strides=1,
activation='relu')(pool2)
drop2 = Dropout(0.5)(conv3)
conv4 = Conv1D(filters=64,
kernel_size=3,
strides=1,
activation='relu')(drop2)
pool3 = MaxPooling1D(pool_size=1)(conv4)
conv5 = Conv1D(filters=64,
kernel_size=3,
strides=1,
activation='relu')(pool3)
output_layer = Dense(1, activation='sigmoid')(conv5)
model_2 = Model(inputs=input_layer, outputs=output_layer)
But when i am trying to fit the model
model_2.compile(loss='mse',optimizer='adam')
model_2 = model_2.fit(x_train, y_train,
batch_size=128,
epochs=2,
verbose=1,
validation_data=(x_test, y_test))
I am getting this error
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-177-aee9b3241a20> in <module>()
4 epochs=2,
5 verbose=1,
----> 6 validation_data=(x_test, y_test))
2 frames
/usr/local/lib/python3.6/dist-packages/keras/engine/training_utils.py in standardize_input_data(data, names, shapes, check_batch_axis, exception_prefix)
133 ': expected ' + names[i] + ' to have ' +
134 str(len(shape)) + ' dimensions, but got array '
--> 135 'with shape ' + str(data_shape))
136 if not check_batch_axis:
137 data_shape = data_shape[1:]
ValueError: Error when checking target: expected dense_14 to have 3 dimensions, but got array with shape (34650, 1)
The shape of x_train and x_test is already 3 dimensional, then why it is showing this error
this is because your input is 3d and your target is 2d. Inside your network there isn't anything that enables you to pass from to 3d to 2d. to do this you can use global pooling or flatten. below an example
n_sample = 100
X = np.random.uniform(0,1, (n_sample,10,1))
y = np.random.randint(0,2, n_sample)
input_layer = Input(shape=(10, 1))
conv2 = Conv1D(filters=64,
kernel_size=3,
strides=1,
activation='relu')(input_layer)
pool1 = MaxPooling1D(pool_size=1)(conv2)
drop1 = Dropout(0.5)(pool1)
pool2 = MaxPooling1D(pool_size=1)(drop1)
conv3 = Conv1D(filters=64,
kernel_size=3,
strides=1,
activation='relu')(pool2)
drop2 = Dropout(0.5)(conv3)
conv4 = Conv1D(filters=64,
kernel_size=3,
strides=1,
activation='relu')(drop2)
pool3 = MaxPooling1D(pool_size=1)(conv4)
conv5 = Conv1D(filters=64,
kernel_size=3,
strides=1,
activation='relu')(pool3)
x = GlobalMaxPool1D()(conv5) # =====> from 3d to 2d (also GlobalAvg1D or Flatten are ok)
output_layer = Dense(1, activation='sigmoid')(x)
model_2 = Model(inputs=input_layer, outputs=output_layer)
model_2.compile('adam', 'binary_crossentropy')
model_2.fit(X,y, epochs=3)
I trained my model of CNN net on images with good val_acc=0.97 and using model.fit_generator.
Here is the output of last epoch, proofing high validation accuracy:
199/200 [============================>.] - ETA: 1s - loss: 0.1563 - acc: 0.9563
200/200 [==============================] - 306s 2s/step - loss: 0.1556 - acc: 0.9565 - val_loss: 0.1402 - val_acc: 0.9691
Epoch 00005: val_acc improved from 0.96701 to 0.96907, saving model to /home/sergorl/cars/color_weights.hdf5
But when I use the same validation data set, which I use during training, but test only one image and for every image in my validation set I always get the wrong predicted label and the predicted probabilities looks like a uniform distribution.
I read this links:
Wrong prediction on images
Why is Keras training well but returning wrong predictions?
Keras Val_acc is good but prediction for same data is poor
But I don't find the solution!
from keras.models import Sequential,Model,load_model
from keras.optimizers import SGD
from keras.layers import BatchNormalization, Lambda, Input, Dense, Convolution2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, Dropout, Flatten, merge, Reshape, Activation
from keras.layers.merge import Concatenate
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint
import os
import cv2
import numpy as np
class CarColorNet:
def __init__(self, numClasses=6, imageWidth=256, imageHeight=256):
self.classes = {}
self.numClasses = numClasses
self.imageWidth = imageWidth
self.imageHeight = imageHeight
input_image = Input(shape=(self.imageWidth, self.imageHeight, 3))
# ------------------------------------ TOP BRANCH ------------------------------------
# first top convolution layer
top_conv1 = Convolution2D(filters=48, kernel_size=(11, 11), strides=(4, 4),
input_shape=(self.imageWidth, self.imageHeight, 3), activation='relu')(input_image)
top_conv1 = BatchNormalization()(top_conv1)
top_conv1 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(top_conv1)
# second top convolution layer
# split feature map by half
top_top_conv2 = Lambda(lambda x: x[:, :, :, :24])(top_conv1)
top_bot_conv2 = Lambda(lambda x: x[:, :, :, 24:])(top_conv1)
top_top_conv2 = Convolution2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='same')(top_top_conv2)
top_top_conv2 = BatchNormalization()(top_top_conv2)
top_top_conv2 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(top_top_conv2)
top_bot_conv2 = Convolution2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='same')(top_bot_conv2)
top_bot_conv2 = BatchNormalization()(top_bot_conv2)
top_bot_conv2 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(top_bot_conv2)
# third top convolution layer
# concat 2 feature map
top_conv3 = Concatenate()([top_top_conv2, top_bot_conv2])
top_conv3 = Convolution2D(filters=192, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='same')(top_conv3)
# fourth top convolution layer
# split feature map by half
top_top_conv4 = Lambda(lambda x: x[:, :, :, :96])(top_conv3)
top_bot_conv4 = Lambda(lambda x: x[:, :, :, 96:])(top_conv3)
top_top_conv4 = Convolution2D(filters=96, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='same')(top_top_conv4)
top_bot_conv4 = Convolution2D(filters=96, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='same')(top_bot_conv4)
# fifth top convolution layer
top_top_conv5 = Convolution2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='same')(top_top_conv4)
top_top_conv5 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(top_top_conv5)
top_bot_conv5 = Convolution2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='same')(top_bot_conv4)
top_bot_conv5 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(top_bot_conv5)
# ------------------------------------ TOP BOTTOM ------------------------------------
# first bottom convolution layer
bottom_conv1 = Convolution2D(filters=48, kernel_size=(11, 11), strides=(4, 4),
input_shape=(224, 224, 3), activation='relu')(input_image)
bottom_conv1 = BatchNormalization()(bottom_conv1)
bottom_conv1 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(bottom_conv1)
# second bottom convolution layer
# split feature map by half
bottom_top_conv2 = Lambda(lambda x: x[:, :, :, :24])(bottom_conv1)
bottom_bot_conv2 = Lambda(lambda x: x[:, :, :, 24:])(bottom_conv1)
bottom_top_conv2 = Convolution2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='same')(bottom_top_conv2)
bottom_top_conv2 = BatchNormalization()(bottom_top_conv2)
bottom_top_conv2 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(bottom_top_conv2)
bottom_bot_conv2 = Convolution2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='same')(bottom_bot_conv2)
bottom_bot_conv2 = BatchNormalization()(bottom_bot_conv2)
bottom_bot_conv2 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(bottom_bot_conv2)
# third bottom convolution layer
# concat 2 feature map
bottom_conv3 = Concatenate()([bottom_top_conv2, bottom_bot_conv2])
bottom_conv3 = Convolution2D(filters=192, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='same')(bottom_conv3)
# fourth bottom convolution layer
# split feature map by half
bottom_top_conv4 = Lambda(lambda x: x[:, :, :, :96])(bottom_conv3)
bottom_bot_conv4 = Lambda(lambda x: x[:, :, :, 96:])(bottom_conv3)
bottom_top_conv4 = Convolution2D(filters=96, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='same')(bottom_top_conv4)
bottom_bot_conv4 = Convolution2D(filters=96, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='same')(bottom_bot_conv4)
# fifth bottom convolution layer
bottom_top_conv5 = Convolution2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='same')(bottom_top_conv4)
bottom_top_conv5 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(bottom_top_conv5)
bottom_bot_conv5 = Convolution2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation='relu',
padding='same')(bottom_bot_conv4)
bottom_bot_conv5 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(bottom_bot_conv5)
# ---------------------------------- CONCATENATE TOP AND BOTTOM BRANCH ------------------------------------
conv_output = Concatenate()([top_top_conv5, top_bot_conv5, bottom_top_conv5, bottom_bot_conv5])
# Flatten
flatten = Flatten()(conv_output)
# Fully-connected layer
FC_1 = Dense(units=4096, activation='relu')(flatten)
FC_1 = Dropout(0.6)(FC_1)
FC_2 = Dense(units=4096, activation='relu')(FC_1)
FC_2 = Dropout(0.6)(FC_2)
output = Dense(units=self.numClasses, activation='softmax')(FC_2)
self.model = Model(inputs=input_image, outputs=output)
sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
# sgd = SGD(lr=0.01, momentum=0.9, decay=0.0005, nesterov=True)
self.model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
def train(self,
pathToTrainSet,
pathToValidSet,
pathToSaveModel,
epochs=7,
batchSize=32,
stepsPerEpoch=200,
validationSteps=1000):
fileOfWeights = 'color_weights.hdf5'
checkpoint = ModelCheckpoint(os.path.join(pathToSaveModel, fileOfWeights),
monitor='val_acc', verbose=1,
save_best_only=True, mode='max')
checkpoint2 = ModelCheckpoint(os.path.join(pathToSaveModel, fileOfWeights),
monitor='val_loss', verbose=1,
save_best_only=True, mode='max')
trainDataGen = ImageDataGenerator(rescale=1.0/255, shear_range=0.2,
zoom_range=0.3, horizontal_flip=True)
validDataGen = ImageDataGenerator(rescale=1.0/255)
trainSet = trainDataGen.flow_from_directory(
pathToTrainSet,
target_size=(self.imageWidth, self.imageHeight),
batch_size=batchSize,
class_mode='categorical'
)
self.classes = {v: k for k, v in trainSet.class_indices.items()}
np.save(os.path.join(pathToSaveModel, 'class_index.npy'), self.classes)
validSet = validDataGen.flow_from_directory(
pathToValidSet,
target_size=(self.imageWidth, self.imageHeight),
batch_size=batchSize,
class_mode='categorical'
)
self.model.fit_generator(
trainSet,
steps_per_epoch=stepsPerEpoch,
epochs=epochs,
validation_data=validSet,
validation_steps=validationSteps//batchSize,
callbacks=[checkpoint, checkpoint2])
print('============================ Saving is here ============================')
self.model.save(os.path.join(pathToSaveModel, 'car_color_net.h5'))
#staticmethod
def load(pathToModel, pathToClassIndexes):
model = load_model(pathToModel)
layers = model.layers
inputShape, outputShape = layers[0].input_shape, layers[-1].output_shape,
imageWidth, imageHeight = inputShape[1], inputShape[2]
numClasses = outputShape[1]
net = CarColorNet(numClasses, imageWidth, imageHeight)
net.classes = np.load(os.path.join(pathToClassIndexes, 'class_index.npy')).item()
return net
def predictOneImage(self, pathToImage):
frame = cv2.imread(pathToImage)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (self.imageWidth, self.imageHeight))
frame = np.expand_dims(frame, axis=0)
# cv2.imshow("boxed", frame[0, :, :, :])
# cv2.waitKey(0)
frame = np.asarray(frame, dtype='float32')
img = frame/255
probs = self.model.predict(img)
ind = probs.argmax(axis=-1)[0]
return self.classes[ind]
if __name__ == '__main__':
pathToTrainSet = '/home/sergorl/cars/train'
pathToValidSet = '/home/sergorl/cars/valid'
pathToSaveModel = '/home/sergorl/cars'
## Train net
# net = CarColorNet(numClasses=6)
# net.train(pathToTrainSet, pathToValidSet, pathToSaveModel)
# Test on all images from validSet
net = CarColorNet.load(os.path.join(pathToSaveModel, 'car_color_net.h5'), pathToSaveModel)
count, countTrueLabels = 0, 0
for dirpath, _dirnames, filenames in os.walk(pathToValidSet):
trueLabel = dirpath.split('/')[-1]
for file in filenames:
label = net.predictOneImage(os.path.join(dirpath, file))
print(trueLabel, label)
if label == trueLabel:
countTrueLabels += 1
count += 1
print('rate is {0:.2f}'.format(float(countTrueLabels) / float(count) * 100))
If I have a good val_acc=0.97, I'll expect the same result (or nearly), testing every image in validation set. But always have wrong prediction!
I ran net immediately after train was done and see that learning was good:
if __name__ == '__main__':
pathToTrainSet = '/home/sergorl/cars/train'
pathToValidSet = '/home/sergorl/cars/valid'
pathToSaveModel = '/home/sergorl/cars'
# Train net
net = CarColorNet(numClasses=6)
net.train(pathToTrainSet, pathToValidSet, pathToSaveModel)
# Test on all images from validSet
count, countTrueLabels = 0, 0
for dirpath, _dirnames, filenames in os.walk(pathToValidSet):
trueLabel = dirpath.split('/')[-1]
for file in filenames:
label = net.predictOneImage(os.path.join(dirpath, file))
print(trueLabel, label)
if label == trueLabel:
countTrueLabels += 1
count += 1
print('rate is {0:.2f}'.format(float(countTrueLabels) / float(count) * 100))
So it seems the problem is inside model.save and it looks like saving doesn't work!. I found many related issues on git, for example:
https://github.com/keras-team/keras/issues/4875
https://github.com/keras-team/keras/issues/4904
But I don't know how to fix it with Python 3.7.3 and keras 2.0.0
Can you share more about the issue like what is the output you are getting. From the code i can see that you are training for 6 classes and using categorical cross entropy so ideally you should be getting an array with 6 values with each value bw 0 and 1 and the index of highest value in that array should be the output.
I have a pandas dataframe containing filenames of positive and negative examples as below
img1 img2 y
001.jpg 002.jpg 1
003.jpg 004.jpg 0
003.jpg 002.jpg 1
I want to train my Siamese network using Keras ImageDataGenerator and flow_from_dataframe. How do I set up my training so that the code inputs 2 images with 1 label simultaneously.
Below is the code for my model
def siamese_model(input_shape) :
left = Input(input_shape)
right = Input(input_shape)
model = Sequential()
model.add(Conv2D(32, (3,3), activation='relu', input_shape=input_shape))
model.add(BatchNormalization())
model.add(Conv2D(64, (3,3), activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(128, (3,3), activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(256, (3,3), activation='relu')
model.add(BatchNormalization())
model.add(Conv2D(256, (3,3), activation='relu')
model.add(MaxPooling2D())
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(512, activation='sigmoid'))
left_encoded = model(left)
right_encoded = model(right)
L1_layer = Lambda(lambda tensors:K.abs(tensors[0] - tensors[1]))
L1_distance = L1_layer([left_encoded, right_encoded])
prediction = Dense(1,activation='sigmoid')(L1_distance)
siamese_net = Model(inputs=[left,right],outputs=prediction)
return siamese_net
model = siamese_model((224,224,3))
model.compile(loss="binary_crossentropy",optimizer="adam", metrics=['accuracy'])
datagen_left = ImageDataGenerator(rotation_range=10,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
vertical_flip = True)
datagen_right = ImageDataGenerator(rotation_range=10,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
vertical_flip = True)
Join the generators in a custom generator.
Make one of them output the desired labels, discard the label of the other.
class DoubleGenerator(Sequence):
def __init__(self, gen1, gen2):
self.gen1 = gen1
self.gen2 = gen2
def __len__(self):
return len(self.gen1)
def __getitem__(self, i):
x1,y = self.gen1[i]
x2,y2 = self.gen2[i]
return (x1,x2), y
Use it:
double_gen = DoubleGenerator(datagen_left.flow_from_directory(...),
datagen_right.flow_from_directory(...))
In my test, I do two classes of segmentation in keras to mask cloud from satellite image.
Two samples for training and validation and testing, intentionally for overfitting.
The predicted image is very strange with strips like this(Right is predicted label. Left are image and label.):
My code is here.What's wrong with my code or is the problem of keras?
#coding=utf-8
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Conv2D,MaxPooling2D,UpSampling2D,BatchNormalization,Reshape,Permute,Activation
from keras.utils.np_utils import to_categorical
from keras.preprocessing.image import img_to_array
from keras.optimizers import SGD
from keras.optimizers import RMSprop,Adadelta,Adagrad,Adam
from keras.wrappers.scikit_learn import KerasClassifier
from keras.callbacks import ModelCheckpoint,LearningRateScheduler
from sklearn.preprocessing import LabelEncoder
from PIL import Image
import matplotlib.pyplot as plt
from libtiff import TIFF
from skimage import exposure
from keras import backend as k
from keras.callbacks import TensorBoard
import pandas as pd
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
set_session(tf.Session(config=config))
seed = 7
np.random.seed(seed)
#data_shape = 360*480
img_w = 256
img_h = 256
n_label = 2
classes = [0. , 1.]
labelencoder = LabelEncoder()
labelencoder.fit(classes)
def load_img(path, grayscale=False, target_size=None):
img = Image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
if target_size:
wh_tuple = (target_size[1], target_size[0])
if img.size != wh_tuple:
img = img.resize(wh_tuple)
return img
train_url = open(r'/media/private/yanyuan/yan/image4band16bit/train104/test4.txt','r').readlines()
#trainval_url = open(r'/media/wmy/document/BigData/VOCdevkit/VOC2012/ImageSets/Segmentation/trainval.txt','r').readlines()
val_url = open(r'/media/private/yanyuan/yan/image4band16bit/train104/test4.txt','r').readlines()
train_numb = len(train_url)
valid_numb = len(val_url)
print "the number of train data is",train_numb
print "the number of val data is",valid_numb
directory = '/media/private/yanyuan/yan/image_block/image256/'
def generateData(batch_size):
with open(r'/media/private/yanyuan/yan/image4band16bit/train104/test4.txt','r') as f:
train_url = [line.strip('\n') for line in f.readlines()]
while True:
train_data = []
train_label = []
batch = 0
for url in train_url:
batch += 1
# img = load_img(filepath + 'JPEGImages/' + url.strip('\n') + '.jpg', target_size=(img_w, img_h))
# img = img_to_array(img)
tif = TIFF.open(directory + 'images/' + url + '.tiff')
img = tif.read_image()
mean_vec = np.array([456,495,440,446],dtype=np.float32)
mean_vec = mean_vec.reshape(1,1,4)
TIFF.close(tif)
img = np.array(img, dtype=np.float32)
img = img - mean_vec
img *= 1.525902189e-5
# print img.shape
train_data.append(img)
# label = load_img(filepath + 'SegmentationClass/' + url.strip('\n') + '.png', target_size=(img_w, img_h))
label = load_img(directory + 'labels/' + url + '.png', target_size=(img_w, img_h))
label = img_to_array(label).reshape((img_w * img_h,))
# print label.shape
train_label.append(label)
if batch % batch_size==0:
train_data = np.array(train_data)
train_label = np.array(train_label).flatten()
train_label = labelencoder.transform(train_label)
train_label = to_categorical(train_label, num_classes=n_label)
train_label = train_label.reshape((batch_size,img_w * img_h,n_label))
yield (train_data,train_label)
train_data = []
train_label = []
batch = 0
def generateValidData(batch_size):
with open(r'/media/private/yanyuan/yan/image4band16bit/train104/test4.txt','r') as f:
val_url = [line.strip('\n') for line in f.readlines()]
while True:
valid_data = []
valid_label = []
batch = 0
for url in val_url:
batch += 1
#img = load_img(filepath + 'JPEGImages/' + url.strip('\n') + '.jpg', target_size=(img_w, img_h))
#img = img_to_array(img)
tif = TIFF.open(directory + 'images/' + url + '.tiff')
img = tif.read_image()
mean_vec = np.array([456,495,440,446],dtype=np.float32)
mean_vec = mean_vec.reshape(1,1,4)
TIFF.close(tif)
img = np.array(img, dtype=np.float32)
img = img - mean_vec
img *= 1.525902189e-5
#print(img.shape)
# print img.shape
valid_data.append(img)
label = load_img(directory + 'labels/' + url + '.png', target_size=(img_w, img_h))
label = img_to_array(label).reshape((img_w * img_h,))
# print label.shape
valid_label.append(label)
if batch % batch_size==0:
valid_data = np.array(valid_data)
valid_label = np.array(valid_label).flatten()
valid_label = labelencoder.transform(valid_label)
valid_label = to_categorical(valid_label, num_classes=n_label)
valid_label = valid_label.reshape((batch_size,img_w * img_h,n_label))
yield (valid_data,valid_label)
valid_data = []
valid_label = []
batch = 0
def SegNet():
model = Sequential()
#encoder
model.add(Conv2D(64,(7,7),strides=(1,1),input_shape=(img_w,img_h,4),padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
#(128,128)
model.add(Conv2D(64, (7, 7), strides=(1, 1), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
#(64,64)
model.add(Conv2D(64, (7, 7), strides=(1, 1), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
#(32,32)
model.add(Conv2D(64, (7, 7), strides=(1, 1), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
#(16,16)
#decoder
model.add(UpSampling2D(size=(2,2)))
#(16,16)
model.add(Conv2D(64, (7, 7), strides=(1, 1), padding='same'))
model.add(BatchNormalization())
model.add(UpSampling2D(size=(2, 2)))
#(32,32)
model.add(Conv2D(64, (7, 7), strides=(1, 1), padding='same'))
model.add(BatchNormalization())
model.add(UpSampling2D(size=(2, 2)))
#(64,64)
model.add(Conv2D(64, (7, 7), strides=(1, 1), padding='same'))
model.add(BatchNormalization())
model.add(UpSampling2D(size=(2, 2)))
#(128,128)
model.add(Conv2D(64, (7, 7), strides=(1, 1), padding='same'))
model.add(BatchNormalization())
#(256,256)
model.add(Conv2D(n_label, (1, 1), strides=(1, 1), padding='same'))
model.add(Reshape((n_label,img_w*img_h)))
model.add(Permute((2,1)))
model.add(Activation('softmax'))
sgd=SGD(lr=0.1,momentum=0.95,decay=0.0005,nesterov=False)
adam = Adam(lr=0.001,beta_1=0.9,beta_2=0.999,decay=0.0005)
#model.compile(loss='categorical_crossentropy',optimizer=sgd,metrics=['accuracy'])
model.compile(loss=keras.losses.categorical_crossentropy,optimizer=sgd,metrics=['accuracy'])
model.summary()
return model
def train():
k.set_learning_phase(1)
model = SegNet()
modelcheck = ModelCheckpoint('Segnet_params.h5',monitor='val_acc',save_best_only=True,mode='auto')
callable = [modelcheck]
history = model.fit_generator(verbose=2,
generator=generateData(2),
validation_data=generateValidData(2),
steps_per_epoch=1,
epochs=600,
callbacks=callable,
max_queue_size=1,
class_weight = None,
validation_steps=1)
drawLoss(history)
def drawLoss(history):
plt.figure()
plt.plot(history.history['acc'],'g')
plt.plot(history.history['val_acc'],'r')
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
# summarize history for loss
plt.figure()
plt.plot(history.history['loss'],'g')
plt.plot(history.history['val_loss'],'r')
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
def predict():
k.set_learning_phase(0)
model = SegNet()
model.load_weights('Segnet_params.h5')
file = open(r'/media/private/yanyuan/yan/image4band16bit/train104/test4.txt','r')
train_url = [line.strip('\n') for line in file.readlines()]
for url in train_url:
tif = TIFF.open(directory + 'images/' + url + '.tiff')
img = tif.read_image()
TIFF.close(tif)
mean_vec = np.array([456,495,440,446],dtype=np.float32)
mean_vec = mean_vec.reshape(1,1,4)
img = np.array(img, dtype=np.float32)
img = img - mean_vec
img *= 1.525902189e-5
im = np.empty_like(img)
for j in range(4):
l_val,r_val = np.percentile(img[:,:,j],(2,98),interpolation='linear')
im[:,:,j] = exposure.rescale_intensity(img[:,:,j], in_range=(l_val,r_val),out_range='uint8')
im = im[:,:,(2,1,0)]
im = im.astype(np.uint8)
img = img.reshape(1,img_h,img_w,-1)
pred = model.predict_classes(img,verbose=2)
pred = labelencoder.inverse_transform(pred[0])
print np.unique(pred)
pred = pred.reshape((img_h,img_w)).astype(np.uint8)
pred_img = Image.fromarray(pred)
# pred_img.save('1.png',format='png')
label = load_img(directory + 'labels/' + url + '.png', target_size=(img_w, img_h))
#print pred
plt.figure()
plt.subplot(2,2,1)
plt.imshow(im)
plt.subplot(2,2,2)
plt.imshow(pred)
plt.subplot(2,2,3)
plt.imshow(label)
plt.show()
if __name__=='__main__':
train()
predict()
Thanks in advance.