i got around some references and research papers and taking idea from one of them i thought to go ahead and implement the same the image reference-
So, here we are inputing a 2d input and the model outputs a 3d model of the same.
The network code which i have written is as follows:
Edit
image = Input(shape=(None, None, 3))
# Encoder
l1 = Conv2D(64, (3,3), strides = (2), padding='same', activation='leaky_relu')(image)
l2 = MaxPooling2D()(l1)
l3 = Conv2D(32, (5,5), strides = (2), padding='same', activation='leaky_relu')(l2)
l4 = MaxPooling2D(padding='same')(l3)
l5 = Conv2D(16, (7,7), strides = (2), padding='same', activation='leaky_relu')(l4)
l6 = MaxPooling2D(padding='same')(l5)
l7 = Conv2D(8, (5, 5), strides = (2), padding = 'same', activation = 'leaky_relu')(l6)
l8 = MaxPooling2D(padding='same')(l7)
l9 = Conv2D(4, (3, 3), strides = (2), padding = 'same', activation = 'leaky_relu')(l8)
l10 = MaxPooling2D(padding='same')(l9)
l11 = Conv2D(2, (4, 4), strides = (2), padding = 'same', activation = 'leaky_relu')(l10)
l12 = MaxPooling2D(padding='same')(l11)
l13 = Conv2D(1, (2, 2), strides = (2), padding = 'same', activation = 'leaky_relu')(l12)
# latent variable z
l14 = Reshape((60,512))(l13)
print(l14.shape)#-->output=(None, 60, 512)
l15 = Dense((512), activation = 'leaky_relu')(l14)
print(l15.shape) #-->output=(None, 60, 512)
l16 = Dense((128), activation = 'leaky_relu')(l15)
print(l16.shape)#-->output=(60, 128)
l17 = Reshape((60,128))(l16)
print(l17.shape) #-->output=(60, 128)
#Decoder
l18 = UpSampling3D(size = (3,3,3))(l17) #-->throws error->IndexError: list index out of range
l19 = Conv3DTranspose(60, (8, 8, 8), strides = (64), padding='same', activation = 'leaky_relu') (l17)
l20 = UpSampling3D((3,3,3))(l19)
l21 = Conv3DTranspose(60, (16,16,16), strides =(32), padding='same', activation = 'leaky_relu')(l20)
l22 = UpSampling3D((3,3,3))(l21)
l23 = Conv3DTranspose(60, (32, 32, 32), strides = (32), padding='same', activation = 'lealy_relu')(l22)
l24 = UpSampling3D((3,3,3))(l23)
l25 = Conv3DTranspose(60, (64, 64, 64), strides = (24), padding='same', activation = 'leaky_relu')(l24)
l26 = UpSampling3D((3,3,3))(l25)
l27 = Conv3DTranspose(60, (64, 64, 64), strides = (1), padding='same', activation = 'leaky_relu')(l26)
model3D = Model(image, l27)
This is giving me endless errors i solved some initially and seems to get stuck at this one really bad!!
the error persists at l17, and says:
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
/tmp/ipykernel_33/907378238.py in <module>
27
28 #Decoder
---> 29 l18 = UpSampling3D(size = (3,3,3))(l17) #-->throws error->IndexError: list index out of range
30 l19 = Conv3DTranspose(60, (8, 8, 8), strides = (64), padding='same', activation = 'leaky_relu') (l17)
31 l20 = UpSampling3D((3,3,3))(l19)
/opt/conda/lib/python3.7/site-packages/keras/engine/base_layer.py in __call__(self, *args, **kwargs)
975 if _in_functional_construction_mode(self, inputs, args, kwargs, input_list):
976 return self._functional_construction_call(inputs, args, kwargs,
--> 977 input_list)
978
979 # Maintains info about the `Layer.call` stack.
/opt/conda/lib/python3.7/site-packages/keras/engine/base_layer.py in _functional_construction_call(self, inputs, args, kwargs, input_list)
1113 # Check input assumptions set after layer building, e.g. input shape.
1114 outputs = self._keras_tensor_symbolic_call(
-> 1115 inputs, input_masks, args, kwargs)
1116
1117 if outputs is None:
/opt/conda/lib/python3.7/site-packages/keras/engine/base_layer.py in _keras_tensor_symbolic_call(self, inputs, input_masks, args, kwargs)
846 return tf.nest.map_structure(keras_tensor.KerasTensor, output_signature)
847 else:
--> 848 return self._infer_output_signature(inputs, args, kwargs, input_masks)
849
850 def _infer_output_signature(self, inputs, args, kwargs, input_masks):
/opt/conda/lib/python3.7/site-packages/keras/engine/base_layer.py in _infer_output_signature(self, inputs, args, kwargs, input_masks)
886 self._maybe_build(inputs)
887 inputs = self._maybe_cast_inputs(inputs)
--> 888 outputs = call_fn(inputs, *args, **kwargs)
889
890 self._handle_activity_regularization(inputs, outputs)
/opt/conda/lib/python3.7/site-packages/keras/layers/convolutional.py in call(self, inputs)
2720 def call(self, inputs):
2721 return backend.resize_volumes(
-> 2722 inputs, self.size[0], self.size[1], self.size[2], self.data_format)
2723
2724 def get_config(self):
/opt/conda/lib/python3.7/site-packages/tensorflow/python/util/dispatch.py in wrapper(*args, **kwargs)
204 """Call target, and fall back on dispatchers if there is a TypeError."""
205 try:
--> 206 return target(*args, **kwargs)
207 except (TypeError, ValueError):
208 # Note: convert_to_eager_tensor currently raises a ValueError, not a
/opt/conda/lib/python3.7/site-packages/keras/backend.py in resize_volumes(x, depth_factor, height_factor, width_factor, data_format)
3215 output = repeat_elements(x, depth_factor, axis=1)
3216 output = repeat_elements(output, height_factor, axis=2)
-> 3217 output = repeat_elements(output, width_factor, axis=3)
3218 return output
3219 else:
/opt/conda/lib/python3.7/site-packages/tensorflow/python/util/dispatch.py in wrapper(*args, **kwargs)
204 """Call target, and fall back on dispatchers if there is a TypeError."""
205 try:
--> 206 return target(*args, **kwargs)
207 except (TypeError, ValueError):
208 # Note: convert_to_eager_tensor currently raises a ValueError, not a
/opt/conda/lib/python3.7/site-packages/keras/backend.py in repeat_elements(x, rep, axis)
3248 x_shape = x.shape.as_list()
3249 # For static axis
-> 3250 if x_shape[axis] is not None:
3251 # slices along the repeat axis
3252 splits = tf.split(value=x,
IndexError: list index out of range```
```
At this point i seem to be directionless, any help would be really appreciated. thanks in advance
The shape of l16 is:
l16.shape
TensorShape([None, 60, 8192])
and now you want to change the shape [60, 8192] into a shape [4,4,4,128] with the call Reshape((4,4,4,128))(l16). But 60 * 8192 = 491520 and 4 * 4 * 4 * 128 = 8192. So those two shapes are incompatible (491520 != 8192). That's why the error message correctly states:
ValueError: total size of new array must be unchanged, input_shape = [60, 8192], output_shape = [4, 4, 4, 128]```
The total number of cells must be the same before and after a reshape. E.g., you can change a (4,) tensor into a (2,2) tensor, but not e.g. into a (3,2) tensor.
The origin lies with l14, which you give the shape [60, 512]:
l14.shape
TensorShape([None, 60, 512])
Now, when you apply a Dense layer to a 2-dim shape like this, it will be applied to the last dimension, i.e. the first dimension of the shape stays the same. That is why l15 still has the shape [60, 512]:
l15.shape
TensorShape([None, 60, 512])
Similarly, l16 will have a shape [60, 128 * 4 * 4 * 4] = [60, 8192]. Then, this is the input into the line for l17 where Reshape chokes as explained above.
Related
I am currently trying to fit my customised cnn model (Alexnet) with the input shape of (224, 224, 1) as I have the the image shape of 224 x 224 and I am dealing with black and white image.
So this is where I am trying to load the data and then get I got the dataset sizes such as the number of samples, features, and height and widths of the images, and finally the number of classes
lfw_people = fetch_lfw_people(min_faces_per_person = 70, resize = 2.39)
n_samples, h, w = lfw_people.images.shape
X = lfw_people.data
n_features = X.shape[1]
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
after that, I splitted the data using the train test split and reshape with height and width of the image and then I reduced the height to the same length as width which makes it 224 x 224. The result of the counts of y_train and y test is
y_train Count: Counter({3: 384, 1: 176, 6: 108, 2: 94, 4: 84, 0: 64, 5: 56})
y_test Count: Counter({3: 146, 1: 60, 6: 36, 2: 27, 4: 25, 5: 15, 0: 13})
and then I am trying to convert both y_train and y_test to categorical which 7 classes
y_train = to_categorical(
y_train,
num_classes = len(set(y)),
dtype = 'uint8'
)
y_test = to_categorical(
y_test,
num_classes = len(set(y)),
dtype = 'uint8'
)
here is my code for my model where it has the 8 layers in total:
model = Sequential()
# 1st Convolutional Layer
model.add(Conv2D(filters = 96, input_shape = (224, 224, 1),
kernel_size = (11, 11), strides = (4, 4),
padding = 'valid'))
model.add(Activation('relu'))
#Max-Pooling
model.add(MaxPooling2D(pool_size = (2, 2),
strides = (2, 2), padding = 'valid'))
# Batch Normalisation
model.add(BatchNormalization())
# 2nd Convolutional Layer
model.add(Conv2D(filters = 256, kernel_size = (11, 11),
strides = (1, 1), padding = 'valid'))
model.add(Activation('relu'))
# Max-Pooling
model.add(Activation('relu'))
# Batch Normalisation
model.add(BatchNormalization())
# 3rd Convolutional Layer
model.add(Conv2D(filters = 384, kernel_size = (3, 3),
strides = (1, 1), padding = 'valid'))
model.add(Activation('relu'))
# Batch Normalization
model.add(BatchNormalization())
# 4th Convolutional Lauer
model.add(Conv2D(filters = 384, kernel_size = (3, 3),
strides = (1, 1), padding = 'valid'))
model.add(Activation('relu'))
# Batch Normalisation
model.add(BatchNormalization())
# 5th Convolutional Layer
model.add(Conv2D(filters = 256, kernel_size = (3, 3),
strides = (1, 1), padding = 'valid'))
model.add(Activation('relu'))
# Max-pooling
model.add(MaxPooling2D(pool_size = (2, 2), strides = (2, 2),
padding = 'valid'))
# Batch Normalisation
model.add(BatchNormalization())
# Flattening
model.add(Flatten())
# 1st Dense Layer
model.add(Dense(4096, input_shape = (224*224*1, )))
model.add(Activation('relu'))
# Add Dropout to prevent overfitting
model.add(Dropout(0.4))
# Batch Normalisatoin
model.add(BatchNormalization())
# 2nd Dense Layer
model.add(Dense(4096))
model.add(Activation('relu'))
#Add Dropout
model.add(Dropout(0.4))
# Batch Normalisation
model.add(BatchNormalization())
# output softmax layer
model.add(Dense(7))
model.add(Activation('softmax'))
This is my augmentation where I am trying to generate the image
# Data Augmentation
datagen = ImageDataGenerator(
featurewise_center = True, # set input mean to 0 over the dataset
samplewise_center = True, # set each sample mean to 0
featurewise_std_normalization = True, # divide inputs by std of the dataset
samplewise_std_normalization = True, # divide each inputs by its std
zca_whitening = False, # dimension reduction
rotation_range = 20, # randomly rotate images in the range 5 degrees
zoom_range = 0.1, # Randomly zoom image 10%
width_shift_range = 0.2, # randomly shift images horizontally 10%
height_shift_range = 0.2, # randomly shift images vertically 10%
horizontal_flip = True, # randomly flip images
# vertical_flip = 0.2 # Random flip images
vertical_flip = 0.8
)
history = model.fit(datagen.flow(X_train, y_train, batch_size = batch_size),
epochs = 100, validation_data = (X_test, y_test),
steps_per_epoch = X_train.shape[0] // batch_size,
verbose = 0)
After I ran the model, it gave me the validation accuracy of 0.45 and this my confusion matrix which tells me that it kept predicting 'class 3'
| 0 1 2 3 4 5 6
---------------------------------
0| 0 0 0 13 0 0 0
1| 0 0 0 60 0 0 0
2| 0 0 0 27 0 0 0
3| 0 0 0 146 0 0 0
4| 0 0 0 25 0 0 0
5| 0 0 0 15 0 0 0
6| 0 0 0 36 0 0 0
So how to make it predict classes other than 3?
My data has the following shapes:
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0)
print(X_train.shape, X_test.shape, Y_train.shape, Y_test.shape)
(942, 32, 32, 1) (236, 32, 32, 1) (942, 3, 3) (236, 3, 3)
And whenever I try to run my CNN I get the following error:
from tensorflow.keras import layers
from tensorflow.keras import Model
img_input = layers.Input(shape=(32, 32, 1))
x = layers.Conv2D(16, (3,3), activation='relu', strides = 1, padding = 'same')(img_input)
x = layers.Conv2D(32, (3,3), activation='relu', strides = 2)(x)
x = layers.Conv2D(128, (3,3), activation='relu', strides = 2)(x)
x = layers.MaxPool2D(pool_size=2)(x)
x = layers.Conv2D(3, 3, activation='linear', strides = 2)(x)
output = layers.Flatten()(x)
model = Model(img_input, output)
model.summary()
model.compile(loss='mean_squared_error',optimizer= 'adam', metrics=['mse'])
history = model.fit(X_train,Y_train,validation_data=(X_test, Y_test), epochs = 100,verbose=1)
Error:
InvalidArgumentError: Incompatible shapes: [32,3] vs. [32,3,3]
[[node BroadcastGradientArgs_2 (defined at /usr/local/lib/python3.7/site-packages/tensorflow_core/python/framework/ops.py:1751) ]] [Op:__inference_distributed_function_7567]
Function call stack:
distributed_function
What am I missing here?
you don't handle the dimensionality inside your network properly. Firstly expand the dimension of your y in order to get them in this format (n_sample, 3, 3, 1). At this point adjust the network (I remove flatten and max pooling and adjust the last conv output)
# create dummy data
n_sample = 10
X = np.random.uniform(0,1, (n_sample, 32, 32, 1))
y = np.random.uniform(0,1, (n_sample, 3, 3))
# expand y dim
y = y[...,np.newaxis]
print(X.shape, y.shape)
img_input = Input(shape=(32, 32, 1))
x = Conv2D(16, (3,3), activation='relu', strides = 1, padding = 'same')(img_input)
x = Conv2D(32, (3,3), activation='relu', strides = 2)(x)
x = Conv2D(128, (3,3), activation='relu', strides = 2)(x)
x = Conv2D(1, (3,3), activation='linear', strides = 2)(x)
model = Model(img_input, x)
model.summary()
model.compile(loss='mean_squared_error',optimizer= 'adam', metrics=['mse'])
model.fit(X,y, epochs=3)
I have a dataset where x_train shape is (34650,10,1) , y_train shape is (34650,) , x_test shape is (17067,10,1) and y_test is (17067,) .
I am making a simple cnn model -
input_layer = Input(shape=(10, 1))
conv2 = Conv1D(filters=64,
kernel_size=3,
strides=1,
activation='relu')(input_layer)
pool1 = MaxPooling1D(pool_size=1)(conv2)
drop1 = Dropout(0.5)(pool1)
pool2 = MaxPooling1D(pool_size=1)(drop1)
conv3 = Conv1D(filters=64,
kernel_size=3,
strides=1,
activation='relu')(pool2)
drop2 = Dropout(0.5)(conv3)
conv4 = Conv1D(filters=64,
kernel_size=3,
strides=1,
activation='relu')(drop2)
pool3 = MaxPooling1D(pool_size=1)(conv4)
conv5 = Conv1D(filters=64,
kernel_size=3,
strides=1,
activation='relu')(pool3)
output_layer = Dense(1, activation='sigmoid')(conv5)
model_2 = Model(inputs=input_layer, outputs=output_layer)
But when i am trying to fit the model
model_2.compile(loss='mse',optimizer='adam')
model_2 = model_2.fit(x_train, y_train,
batch_size=128,
epochs=2,
verbose=1,
validation_data=(x_test, y_test))
I am getting this error
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-177-aee9b3241a20> in <module>()
4 epochs=2,
5 verbose=1,
----> 6 validation_data=(x_test, y_test))
2 frames
/usr/local/lib/python3.6/dist-packages/keras/engine/training_utils.py in standardize_input_data(data, names, shapes, check_batch_axis, exception_prefix)
133 ': expected ' + names[i] + ' to have ' +
134 str(len(shape)) + ' dimensions, but got array '
--> 135 'with shape ' + str(data_shape))
136 if not check_batch_axis:
137 data_shape = data_shape[1:]
ValueError: Error when checking target: expected dense_14 to have 3 dimensions, but got array with shape (34650, 1)
The shape of x_train and x_test is already 3 dimensional, then why it is showing this error
this is because your input is 3d and your target is 2d. Inside your network there isn't anything that enables you to pass from to 3d to 2d. to do this you can use global pooling or flatten. below an example
n_sample = 100
X = np.random.uniform(0,1, (n_sample,10,1))
y = np.random.randint(0,2, n_sample)
input_layer = Input(shape=(10, 1))
conv2 = Conv1D(filters=64,
kernel_size=3,
strides=1,
activation='relu')(input_layer)
pool1 = MaxPooling1D(pool_size=1)(conv2)
drop1 = Dropout(0.5)(pool1)
pool2 = MaxPooling1D(pool_size=1)(drop1)
conv3 = Conv1D(filters=64,
kernel_size=3,
strides=1,
activation='relu')(pool2)
drop2 = Dropout(0.5)(conv3)
conv4 = Conv1D(filters=64,
kernel_size=3,
strides=1,
activation='relu')(drop2)
pool3 = MaxPooling1D(pool_size=1)(conv4)
conv5 = Conv1D(filters=64,
kernel_size=3,
strides=1,
activation='relu')(pool3)
x = GlobalMaxPool1D()(conv5) # =====> from 3d to 2d (also GlobalAvg1D or Flatten are ok)
output_layer = Dense(1, activation='sigmoid')(x)
model_2 = Model(inputs=input_layer, outputs=output_layer)
model_2.compile('adam', 'binary_crossentropy')
model_2.fit(X,y, epochs=3)
I am facing an error in plotting my confusion matrix. I am giving the test labels and my predicted label in confusion matrix function but it is giving me the value error having the problem in number of samples.
Shape of My data is below.
Trainig Data Shape (4162, 224, 224, 3)
Training Data Labels Shape (4162, 5)
Testing Data Shape (3921, 224, 224, 3)
Testing Data Labels Shape (3921, 5)
Predicted Label is a bit ugly because of only 2 epochs run, I just wanted to plot the confusion matrix first so thats why.
predictingimage = "D:/compCarsThesisData/data/image/78/3/2010/0ba8d018cdc994.jpg" #67/1698/2010/6805eb92ac6c70.jpg"
predictImageRead = mpg.imread(predictingimage)
resizingImage = cv2.cv2.resize(predictImageRead,(224,224))
reshapedFinalImage = np.expand_dims(resizingImage, axis=0)
npimage = np.asarray(reshapedFinalImage)
m = model.predict(npimage)
print(m)
[array([[0.02502811, 0.01959323, 0.6556284 , 0.26472655, 0.03502375]],
dtype=float32), array([[5.8234303e-04, 3.1917400e-04, 9.4957882e-01, 1.8873921e-02,
3.0645736e-02]], dtype=float32), array([[0.02581117, 0.04752538, 0.81816435, 0.04812173, 0.06037736]],
dtype=float32)]
cm = confusion_matrix(train_labels_Encode,m)
plt.imshow(cm)
plt.show()
ERROR
Traceback (most recent call last):
File "d:/ThesisWork/seriouswork/Inception_SVM_CompCarsGoogleNetArchitecture.py", line 299, in <module>
cm = confusion_matrix(train_labels_hotEncode,n)
File "C:\Users\zeele\Miniconda3\lib\site-packages\sklearn\metrics\classification.py", line 253, in confusion_matrix
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
File "C:\Users\zeele\Miniconda3\lib\site-packages\sklearn\metrics\classification.py", line 71, in _check_targets
check_consistent_length(y_true, y_pred)
File "C:\Users\zeele\Miniconda3\lib\site-packages\sklearn\utils\validation.py", line 235, in check_consistent_length
" samples: %r" % [int(l) for l in lengths])
ValueError: Found input variables with inconsistent numbers of samples: [4162, 3]
Classifier Code:
X_train = np.load('D:/Inception_preprocessed_data_Labels_2004/Top5/TrainingData_Top5.npy')#('D:/ThesisWork/S_224_Training_data.npy')#training_images
X_test = np.load('D:/Inception_preprocessed_data_Labels_2004/Top5/TrainingLabels_Top5.npy')#('D:/ThesisWork/S_224_Training_labels.npy')#training_labels
y_train = np.load('D:/Inception_preprocessed_data_Labels_2004/Top5/TestingData_Top5.npy')#('D:/ThesisWork/S_224_Testing_data.npy')#testing_images
y_test = np.load('D:/Inception_preprocessed_data_Labels_2004/Top5/TestingLabels_Top5.npy')#('D:/ThesisWork/S_224_Testing_labels.npy')#testing_labels
print(X_test)
le = preprocessing.LabelEncoder()
le.fit(X_test)
transform_trainLabels = le.transform(X_test)
print(transform_trainLabels)
print(le.inverse_transform(transform_trainLabels))
train_labels_hotEncode = np_utils.to_categorical(transform_trainLabels,len(set(transform_trainLabels)))
shuffle(X_train)
shuffle(train_labels_hotEncode)
le2 = preprocessing.LabelEncoder()
le2.fit(y_test)
transform_testLabels = le2.transform(y_test)
test_labels_hotEncode = np_utils.to_categorical(transform_testLabels,len(set(transform_testLabels)))
print(test_labels_hotEncode.shape)
shuffle(y_train)
shuffle(test_labels_hotEncode)
# print(train_labels_hotEncode[3000])
# exit()
# X_train = np.asarray(X_train / 255.0)
# y_train = np.asarray(y_train / 255.0)
# print("X_Training" ,X_train.shape, X_train)
# print("X_TEST", X_test.shape)
# print("Y_train", y_train.shape)
# print("y_test", y_test.shape)
# exit()
# plt.imshow(X_train[1])
# print(X_test)
# plt.imshow(y_train[1])
# print(y_test)
# plt.show()
print("Trainig Data Shape",X_train.shape)
print("Training Data Labels Shape",train_labels_hotEncode.shape)
print("Testing Data Shape", y_train.shape)
print("Testing Data Labels Shape", test_labels_hotEncode.shape)
# X_train = np.array(X_train).astype(np.float32)
# y_train = np.array(y_train).astype(np.float32)
def inception_module(image,
filters_1x1,
filters_3x3_reduce,
filter_3x3,
filters_5x5_reduce,
filters_5x5,
filters_pool_proj,
name=None):
conv_1x1 = Conv2D(filters_1x1, (1,1), padding='same', activation='relu', kernel_initializer=kernel_init, bias_initializer= bias_init)(image)
conv_3x3 = Conv2D(filters_3x3_reduce, (1,1), padding='same', activation='relu', kernel_initializer=kernel_init, bias_initializer= bias_init)(image)
conv_3x3 = Conv2D(filter_3x3,(3,3), padding='same', activation='relu', kernel_initializer=kernel_init, bias_initializer=bias_init)(conv_3x3)
conv_5x5 = Conv2D(filters_5x5_reduce,(1,1), padding='same', activation='relu',kernel_initializer=kernel_init, bias_initializer= bias_init)(image)
conv_5x5 = Conv2D(filters_5x5, (3,3), padding='same', activation='relu',kernel_initializer=kernel_init, bias_initializer=bias_init)(conv_5x5)
pool_proj = MaxPool2D((3,3), strides=(1,1), padding='same')(image)
pool_proj = Conv2D(filters_pool_proj, (1,1), padding='same', activation='relu', kernel_initializer=kernel_init, bias_initializer= bias_init)(pool_proj)
output = concatenate([conv_1x1, conv_3x3, conv_5x5, pool_proj], axis=3, name=name)
return output
kernel_init = keras.initializers.glorot_uniform()
bias_init = keras.initializers.Constant(value=0.2)
# IMG_SIZE = 64
input_layer = Input(shape=(224,224,3))
image = Conv2D(64,(7,7),padding='same', strides=(2,2), activation='relu', name='conv_1_7x7/2', kernel_initializer=kernel_init, bias_initializer=bias_init)(input_layer)
image = MaxPool2D((3,3), padding='same', strides=(2,2), name='max_pool_1_3x3/2')(image)
image = Conv2D(64, (1,1), padding='same', strides=(1,1), activation='relu', name='conv_2a_3x3/1' )(image)
image = Conv2D(192, (3,3), padding='same', strides=(1,1), activation='relu', name='conv_2b_3x3/1')(image)
image = MaxPool2D((3,3), padding='same', strides=(2,2), name='max_pool_2_3x3/2')(image)
image = inception_module(image,
filters_1x1= 64,
filters_3x3_reduce= 96,
filter_3x3 = 128,
filters_5x5_reduce=16,
filters_5x5= 32,
filters_pool_proj=32,
name='inception_3a')
image = inception_module(image,
filters_1x1=128,
filters_3x3_reduce=128,
filter_3x3=192,
filters_5x5_reduce=32,
filters_5x5=96,
filters_pool_proj=64,
name='inception_3b')
image = MaxPool2D((3,3), padding='same', strides=(2,2), name='max_pool_3_3x3/2')(image)
image = inception_module(image,
filters_1x1=192,
filters_3x3_reduce=96,
filter_3x3=208,
filters_5x5_reduce=16,
filters_5x5=48,
filters_pool_proj=64,
name='inception_4a')
image1 = AveragePooling2D((5,5), strides=3)(image)
image1 = Conv2D(128, (1,1), padding='same', activation='relu')(image1)
image1 = Flatten()(image1)
image1 = Dense(1024, activation='relu')(image1)
image1 = Dropout(0.7)(image1)
image1 = Dense(5, activation='softmax', name='auxilliary_output_1')(image1)
image = inception_module(image,
filters_1x1 = 160,
filters_3x3_reduce= 112,
filter_3x3= 224,
filters_5x5_reduce= 24,
filters_5x5= 64,
filters_pool_proj=64,
name='inception_4b')
image = inception_module(image,
filters_1x1= 128,
filters_3x3_reduce = 128,
filter_3x3= 256,
filters_5x5_reduce= 24,
filters_5x5=64,
filters_pool_proj=64,
name='inception_4c')
image = inception_module(image,
filters_1x1=112,
filters_3x3_reduce=144,
filter_3x3= 288,
filters_5x5_reduce= 32,
filters_5x5=64,
filters_pool_proj=64,
name='inception_4d')
image2 = AveragePooling2D((5,5), strides=3)(image)
image2 = Conv2D(128, (1,1), padding='same', activation='relu')(image2)
image2 = Flatten()(image2)
image2 = Dense(1024, activation='relu')(image2)
image2 = Dropout(0.7)(image2) #Changed from 0.7
image2 = Dense(5, activation='softmax', name='auxilliary_output_2')(image2)
image = inception_module(image,
filters_1x1=256,
filters_3x3_reduce=160,
filter_3x3=320,
filters_5x5_reduce=32,
filters_5x5=128,
filters_pool_proj=128,
name= 'inception_4e')
image = MaxPool2D((3,3), padding='same', strides=(2,2), name='max_pool_4_3x3/2')(image)
image = inception_module(image,
filters_1x1=256,
filters_3x3_reduce=160,
filter_3x3= 320,
filters_5x5_reduce=32,
filters_5x5= 128,
filters_pool_proj=128,
name='inception_5a')
image = inception_module(image,
filters_1x1=384,
filters_3x3_reduce=192,
filter_3x3=384,
filters_5x5_reduce=48,
filters_5x5=128,
filters_pool_proj=128,
name='inception_5b')
image = GlobalAveragePooling2D(name='avg_pool_5_3x3/1')(image)
image = Dropout(0.7)(image)
image = Dense(5, activation='softmax', name='output')(image)
model = Model(input_layer, [image,image1,image2], name='inception_v1')
model.summary()
epochs = 2
initial_lrate = 0.001 # Changed From 0.01
def decay(epoch, steps=100):
initial_lrate = 0.01
drop = 0.96
epochs_drop = 8
lrate = initial_lrate * math.pow(drop,math.floor((1+epoch)/epochs_drop))#
return lrate
sgd = keras.optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
# nadam = keras.optimizers.Nadam(lr= 0.002, beta_1=0.9, beta_2=0.999, epsilon=None)
# keras
lr_sc = LearningRateScheduler(decay)
# rms = keras.optimizers.RMSprop(lr = initial_lrate, rho=0.9, epsilon=1e-08, decay=0.0)
# ad = keras.optimizers.adam(lr=initial_lrate)
model.compile(loss=['categorical_crossentropy', 'categorical_crossentropy','categorical_crossentropy'],loss_weights=[1,0.3,0.3], optimizer='sgd', metrics=['accuracy'])
# loss = 'categorical_crossentropy', 'categorical_crossentropy','categorical_crossentropy'
history = model.fit(X_train, [train_labels_hotEncode,train_labels_hotEncode,train_labels_hotEncode], validation_split=0.3,shuffle=True,epochs=epochs, batch_size= 32, callbacks=[lr_sc]) # batch size changed from 256 or 64 to 16(y_train,[y_test,y_test,y_test])
# validation_data=(y_train,[test_labels_hotEncode,test_labels_hotEncode,test_labels_hotEncode]), validation_data= (X_train, [train_labels_hotEncode,train_labels_hotEncode,train_labels_hotEncode]),
print(history.history.keys())
plt.plot(history.history['output_acc'])
plt.plot(history.history['val_output_acc'])
plt.title('Model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'],loc = 'upper left')
plt.show()
# predictingimage = "D:/compCarsThesisData/data/image/78/3/2010/0ba8d018cdc994.jpg" #67/1698/2010/6805eb92ac6c70.jpg"
predictImageRead = X_train
# resizingImage = cv2.cv2.resize(predictImageRead,(224,224))
# reshapedFinalImage = np.expand_dims(predictImageRead, axis=0)
# print(reshapedFinalImage.shape)
# npimage = np.array(reshapedFinalImage)
m = model.predict(predictImageRead)
print(m)
print(predictImageRead.shape)
print(train_labels_hotEncode)
# print(m.shape)
plt.imshow(predictImageRead[1])
plt.show()
# n = np.argmax(m,axis=-1)
# n = np.array(m)
print(confusion_matrix(X_test,m[0]))
cm = confusion_matrix(X_test,m[0])
plt.imshow(cm)
plt.show()
Please guide me through this.
Thanks!
If you want to compute a confusion matrix of your training data you have to make your moddel predict all your training examples, roughly like this:
m = model.predict(train_data) # train_data should have the shape (4162, 224, 224, 3)
m should then have a length of 4162 and you can plot the confusion matrix like this:
cm = confusion_matrix(train_labels_Encode, m)
plt.imshow(cm)
plt.show()
I have a problem. I want to make 3D convolutional U-net. For this purpose I'm using Keras.
My data are MRI images from Data Science Bowl 2017 Competition. All MRI's were saved in numpy arrays (all pixels are scaled from 0 to 1) with shape:
data_ch.shape
(94, 50, 50, 50, 1)
94 - patients, 50 MRI slices of 50x50 images, 1 channel:
I want to make 3D Convolutional U-net, so the inputs and outputs of this net are same 3d arrays.
The 3D U-net:
input_img= Input(shape=(data_ch.shape[1], data_ch.shape[2], data_ch.shape[3], data_ch.shape[4]))
x=Conv3D(filters=8, kernel_size=(3, 3, 3), activation='relu', padding='same')(input_img)
x=MaxPooling3D(pool_size=(2, 2, 2), padding='same')(x)
x=Conv3D(filters=8, kernel_size=(3, 3, 3), activation='relu', padding='same')(x)
x=MaxPooling3D(pool_size=(2, 2, 2), padding='same')(x)
x=UpSampling3D(size=(2, 2, 2))(x)
x=Conv3D(filters=8, kernel_size=(3, 3, 3), activation='relu', padding='same')(x) # PADDING IS NOT THE SAME!!!!!
x=UpSampling3D(size=(2, 2, 2))(x)
x=Conv3D(filters=1, kernel_size=(3, 3, 3), activation='sigmoid')(x)
model=Model(input_img, x)
model.compile(optimizer='adadelta', loss='binary_crossentropy')
model.summary()
Layer (type) Output Shape Param #
=================================================================
input_5 (InputLayer) (None, 50, 50, 50, 1) 0
_________________________________________________________________
conv3d_27 (Conv3D) (None, 50, 50, 50, 8) 224
_________________________________________________________________
max_pooling3d_12 (MaxPooling (None, 25, 25, 25, 8) 0
_________________________________________________________________
conv3d_28 (Conv3D) (None, 25, 25, 25, 8) 1736
_________________________________________________________________
max_pooling3d_13 (MaxPooling (None, 13, 13, 13, 8) 0
_________________________________________________________________
up_sampling3d_12 (UpSampling (None, 26, 26, 26, 8) 0
_________________________________________________________________
conv3d_29 (Conv3D) (None, 26, 26, 26, 8) 1736
_________________________________________________________________
up_sampling3d_13 (UpSampling (None, 52, 52, 52, 8) 0
_________________________________________________________________
conv3d_30 (Conv3D) (None, 50, 50, 50, 1) 217
=================================================================
Total params: 3,913
Trainable params: 3,913
Non-trainable params: 0
But, when I attempted to fit data to this net:
model.fit(data_ch, data_ch, epochs=1, batch_size=10, shuffle=True, verbose=1)
the program displayed an error:
ValueError Traceback (most recent call last)
C:\Users\Taranov\Anaconda3\lib\site-packages\theano\compile\function_module.py in __call__(self, *args, **kwargs)
883 outputs =\
--> 884 self.fn() if output_subset is None else\
885 self.fn(output_subset=output_subset)
ValueError: CudaNdarray_CopyFromCudaNdarray: need same dimensions for dim 1, destination=13, source=14
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
<ipython-input-26-b334d38d9608> in <module>()
----> 1 model.fit(data_ch, data_ch, epochs=1, batch_size=10, shuffle=True, verbose=1)
C:\Users\Taranov\Anaconda3\lib\site-packages\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, **kwargs)
1496 val_f=val_f, val_ins=val_ins, shuffle=shuffle,
1497 callback_metrics=callback_metrics,
-> 1498 initial_epoch=initial_epoch)
1499
1500 def evaluate(self, x, y, batch_size=32, verbose=1, sample_weight=None):
C:\Users\Taranov\Anaconda3\lib\site-packages\keras\engine\training.py in _fit_loop(self, f, ins, out_labels, batch_size, epochs, verbose, callbacks, val_f, val_ins, shuffle, callback_metrics, initial_epoch)
1150 batch_logs['size'] = len(batch_ids)
1151 callbacks.on_batch_begin(batch_index, batch_logs)
-> 1152 outs = f(ins_batch)
1153 if not isinstance(outs, list):
1154 outs = [outs]
C:\Users\Taranov\Anaconda3\lib\site-packages\keras\backend\theano_backend.py in __call__(self, inputs)
1156 def __call__(self, inputs):
1157 assert isinstance(inputs, (list, tuple))
-> 1158 return self.function(*inputs)
1159
1160
C:\Users\Taranov\Anaconda3\lib\site-packages\theano\compile\function_module.py in __call__(self, *args, **kwargs)
896 node=self.fn.nodes[self.fn.position_of_error],
897 thunk=thunk,
--> 898 storage_map=getattr(self.fn, 'storage_map', None))
899 else:
900 # old-style linkers raise their own exceptions
C:\Users\Taranov\Anaconda3\lib\site-packages\theano\gof\link.py in raise_with_op(node, thunk, exc_info, storage_map)
323 # extra long error message in that case.
324 pass
--> 325 reraise(exc_type, exc_value, exc_trace)
326
327
C:\Users\Taranov\Anaconda3\lib\site-packages\six.py in reraise(tp, value, tb)
683 value = tp()
684 if value.__traceback__ is not tb:
--> 685 raise value.with_traceback(tb)
686 raise value
687
C:\Users\Taranov\Anaconda3\lib\site-packages\theano\compile\function_module.py in __call__(self, *args, **kwargs)
882 try:
883 outputs =\
--> 884 self.fn() if output_subset is None else\
885 self.fn(output_subset=output_subset)
886 except Exception:
ValueError: CudaNdarray_CopyFromCudaNdarray: need same dimensions for dim 1, destination=13, source=14
Apply node that caused the error: GpuAlloc(GpuDimShuffle{0,2,x,3,4,1}.0, Shape_i{0}.0, TensorConstant{13}, TensorConstant{2}, TensorConstant{13}, TensorConstant{13}, TensorConstant{8})
Toposort index: 163
Inputs types: [CudaNdarrayType(float32, (False, False, True, False, False, False)), TensorType(int64, scalar), TensorType(int64, scalar), TensorType(int8, scalar), TensorType(int64, scalar), TensorType(int64, scalar), TensorType(int64, scalar)]
Inputs shapes: [(10, 14, 1, 14, 14, 8), (), (), (), (), (), ()]
Inputs strides: [(21952, 196, 0, 14, 1, 2744), (), (), (), (), (), ()]
Inputs values: ['not shown', array(10, dtype=int64), array(13, dtype=int64), array(2, dtype=int8), array(13, dtype=int64), array(13, dtype=int64), array(8, dtype=int64)]
Outputs clients: [[GpuReshape{5}(GpuAlloc.0, MakeVector{dtype='int64'}.0)]]
HINT: Re-running with most Theano optimization disabled could give you a back-trace of when this node was created. This can be done with by setting the Theano flag 'optimizer=fast_compile'. If that does not work, Theano optimizations can be disabled with 'optimizer=None'.
HINT: Use the Theano flag 'exception_verbosity=high' for a debugprint and storage map footprint of this apply node.
I tried to follow recommendations and use theano flags:
import theano
import os
os.environ["THEANO_FLAGS"] = "mode=FAST_RUN,device=gpu,floatX=float32, optimizer='None',exception_verbosity=high"
But it still doesn't work.
Could you help me?
Many thanks!
Ok.... that sounds weird, but MaxPooling3D has some kind of bug with padding='same'. So I wrote your code without it, and added an initial padding just to make your dimensions compatible:
import keras.backend as K
inputShape = (data_ch.shape[1], data_ch.shape[2], data_ch.shape[3], data_ch.shape[4])
paddedShape = (data_ch.shape[1]+2, data_ch.shape[2]+2, data_ch.shape[3]+2, data_ch.shape[4])
#initial padding
input_img= Input(shape=inputShape)
x = Lambda(lambda x: K.spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1))),
output_shape=paddedShape)(input_img) #Lambda layers require output_shape
#your original code without padding for MaxPooling layers (replace input_img with x)
x=Conv3D(filters=8, kernel_size=3, activation='relu', padding='same')(x)
x=MaxPooling3D(pool_size=2)(x)
x=Conv3D(filters=8, kernel_size=3, activation='relu', padding='same')(x)
x=MaxPooling3D(pool_size=2)(x)
x=UpSampling3D(size=2)(x)
x=Conv3D(filters=8, kernel_size=3, activation='relu', padding='same')(x) # PADDING IS NOT THE SAME!!!!!
x=UpSampling3D(size=2)(x)
x=Conv3D(filters=1, kernel_size=3, activation='sigmoid')(x)
model=Model(input_img, x)
model.compile(optimizer='adadelta', loss='binary_crossentropy')
model.summary()
print(model.predict(data_ch)[1])
model.fit(data_ch,data_ch,epochs=1,verbose=2,batch_size=10)
Try reducing the batch size to something like 2, and if you see, your network needs more GPU, So try upgrading that as well.