Anger_train = Anger_train.to_numpy().reshape(Anger_train.shape[0], 1, Anger_train.shape[1])
Anger_test = Anger_test.to_numpy().reshape(Anger_test.shape[0], 1, Anger_test.shape[1])
def getModel():
model = Sequential()
model.add(Conv1D(64,1, activation='relu', input_shape=(941,1,43)))
model.add(Conv1D(64,1, activation='relu'))
model.add(MaxPooling1D())
model.add(Flatten())
model.add(Dense(100, activation='relu'))
model.add(Dropout(rate=0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='mean_squared_error', optimizer="sgd")
return model
where the input is 941 instances with 43 scalar features, and the output is a continuous variable between 0 and 1. The error so far is
ValueError: Input 0 of layer maxpooling1D is incompatible with the layer: expected ndim=3, found ndim=4. Full shape received: (None, 1, 43, 256)
Related
I am working with KDDTrain+ dataset and trying to implement CNN-LSTM Model.
I have converted the given dataset into (125973,121) size and then convert to (125973,11,11,1) size which I named as "X_Train_new".
Following is the model I try to write for CNN-LSTM
model = Sequential()
model.add(TimeDistributed(Convolution2D(64, (3,3), strides=(1,1), padding="same", activation="relu"),input_shape=(10,11,11,1)))
model.add(TimeDistributed(MaxPooling2D(pool_size=(2,2), strides=(1,1), padding="same")))
model.add(TimeDistributed(Convolution2D(64,kernel_size=(3,3),strides=(1,1), padding="same",activation="relu")))
model.add(TimeDistributed(MaxPooling2D(pool_size=(2,2),strides=(1,1),padding="same")))
model.add(TimeDistributed(Flatten()))
model.add(LSTM(35,dropout=0.1,stateful=True, return_sequences=True))
model.add(Dropout(0.1))
model.add(LSTM(25,dropout=0.1))
model.add(Dropout(0.1))
model.add(Flatten())
model.add(Dense(256, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(5, activation="softmax"))
model.compile(optimizer ='adam',loss = 'sparse_categorical_crossentropy', metrics = ['accuracy'])
model.fit(X_Train_new, Y_Train, epochs = 10, batch_size = 32)
But I keep getting this error:
ValueError: Input 0 of layer "sequential_24" is incompatible with the layer:
expected shape=(None, 10, 11, 11, 1), found shape=(None, 11, 11, 1)
How can I solve this error?
I am trying to incorporate a CNN layer into the LSTM network as shown.
model = Sequential()
model.add(LSTM(64, return_sequences = True, input_shape=(X_train.shape[1], X_train.shape[2]),activation='relu'))
model.add(Dropout(0.1)) model.add(LSTM(128, activation= 'relu'))
model.add(Conv1D(32, kernel_size=3, activation='relu'))
model.add(Flatten()) model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
But it is giving the following error about the input shape. Please help to resolve the issue.
Try this:
model = Sequential()
model.add(LSTM(64, return_sequences = True, input_shape = (X_train.shape[1], X_train.shape[2]), activation='relu'))
model.add(Dropout(0.1))
model.add(LSTM(128, activation = 'relu', return_sequences = True))
model.add(Conv1D(32, kernel_size= 1, input_shape = (None, 128, 1), activation = 'relu'))
model.add(Flatten())
model.add(Dense(1))
I'm trying to train a GAN for generating samples of images and ground truths for a semantic segmentation task, however I'm getting an error regarding the shape of my input. From the errors it seems that it expects my input arrays are 4 dimensions however I believe that the shape I need (5 dimensions) is required for the problem at hand.
The shape of my tensors is (64, 2, 128, 128, 3)
64 for the images in the batch, 2 for either the image or ground truth, 128, 128 for the image dimension, and 3 for RGB channels.
My generator looks like this:
def build_generator(input_dim, output_size):
"""
生成器を構築
# 引数
input_dim : Integer, 入力ノイズの次元
output_size : List, 出力画像サイズ
# 戻り値
model : Keras model, 生成器
"""
model = Sequential()
model.add(Dense(256, input_dim=(input_dim)))
unit_size = 128 * output_size[0] // 8 * output_size[1] // 8
model.add(Dense(unit_size))
model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.2))
shape = (output_size[0] // 8, output_size[1] // 8, 128)
model.add(Reshape(shape))
model.add(UpSampling2D(size=(2, 2)))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.2))
model.add(UpSampling2D(size=(2, 2)))
model.add(Conv2D(32, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.2))
model.add(UpSampling2D(size=(2, 2)))
model.add(Conv2D(3, (5, 5), padding='same'))
model.add(Activation('sigmoid'))
return model
ValueError: Error when checking input: expected conv2d_9_input to have 4 dimensions, but got array with shape (64, 2, 128, 128, 3)
Is there anywhere I can change so that my generator will accept these 5 dimensional array inputs?
I am trying to use U-net network architeture for stereo vision.
I have datasets with 3 different image sizes (1240x368, 1224x368 and 1384x1104).
Here is My whole class:
import pickle
from keras.models import Sequential
from keras.layers import Convolution2D, MaxPooling2D, UpSampling2D, Conv2DTranspose
from keras.utils import np_utils
import sys, numpy as np
import keras
import cv2
pkl_file = open('data.p', 'rb')
dict = pickle.load(pkl_file)
X_data = dict['images']
Y_data = dict['disparity']
data_num = len(X_data)
train_num = int(data_num * 0.8)
X_train = X_data[:train_num]
X_test = X_data[train_num:]
Y_train = Y_data[:train_num]
Y_test = Y_data[train_num:]
def gen(X, Y):
while True:
for x, y in zip(X, Y):
yield x, y
model = Sequential()
model.add(Convolution2D(6, (2, 2), input_shape=(None, None, 6), activation='relu', padding='same'))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(UpSampling2D(size=(2, 2)))
model.add(Conv2DTranspose(256, (3, 3), activation='relu'))
model.add(Conv2DTranspose(256, (3, 3), activation='relu'))
model.add(Conv2DTranspose(128, (3, 3), activation='relu'))
model.add(UpSampling2D(size=(2, 2)))
model.add(Conv2DTranspose(128, (3, 3), activation='relu'))
model.add(Conv2DTranspose(128, (3, 3), activation='relu'))
model.add(Conv2DTranspose(64, (3, 3), activation='relu'))
model.add(UpSampling2D(size=(2, 2)))
model.add(Conv2DTranspose(64, (3, 3), activation='relu'))
model.add(Conv2DTranspose(64, (3, 3), activation='relu'))
model.add(Conv2DTranspose(3, (3, 3), activation='relu'))
model.compile(loss=['mse'], optimizer='adam', metrics=['accuracy'])
model.fit_generator(gen(X_train, Y_train), steps_per_epoch=len(X_train), epochs=5)
scores = model.evaluate(X_test, Y_test, verbose=0)
When I try to run this code, I get an error in which it says:
Incompatible shapes: [1,370,1242,3] vs. [1,368,1240,3]
I resized the pictures to be divisible by 8 since I have 3 maxpool layers.
As input I put 2 images (I am doing stereo vision) and as an output I get disparity map for the first image. I am concatenating 2 images by putting the second one in third dimension (np.concatenate((img1,img2), axis=-1).
Can somebody tell me what I am doing wrong?
Here is my trace:
Traceback (most recent call last):
File "C:\Users\Ivan\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\client\session.py", line 1322, in _do_call
return fn(*args)
File "C:\Users\Ivan\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\client\session.py", line 1307, in _run_fn
options, feed_dict, fetch_list, target_list, run_metadata)
File "C:\Users\Ivan\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\client\session.py", line 1409, in _call_tf_sessionrun
run_metadata)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [1,370,1242,3] vs. [1,368,1240,3]
[[Node: loss/conv2d_transpose_9_loss/sub = Sub[T=DT_FLOAT, _class=["loc:#training/Adam/gradients/loss/conv2d_transpose_9_loss/sub_grad/Reshape"], _device="/job:localhost/replica:0/task:0/device:GPU:0"](conv2d_transpose_9/Relu-1-0-TransposeNCHWToNHWC-LayoutOptimizer, _arg_conv2d_transpose_9_target_0_2/_303)]]
[[Node: loss/mul/_521 = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device_incarnation=1, tensor_name="edge_2266_loss/mul", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]
I tried resizing pictures and learning works, but since as a result I get disparity maps, resizing is not a good option. Does anybody have any advice?
If the picture is too big to fit in conv2dTransponse, you can use Cropping2d layer so it crops the picture on wished size. This works if input picture has even number of pixels.
I'm trying to implement a convolutional neural network within Keras using a TF backend for image segmentation of 111 images of size 141 x 166. When I run the code below, I get the error message:
Error when checking target: expected dense_36 to have 2 dimensions, but got array with shape (88, 141, 166, 1)
My X_train variable is the shape (88, 141, 166, 1) as well as the y_train variable. My X_test variable is the shape (23, 141, 166, 1) as well as the y_test variable, as split by the function train_test_split from sklearn.
I'm not sure what the error message means as per dense_36. I have tried using the Flatten() function before fitting the model, but it says that I have a ndim = 2 and cannot be flattened.
# set input
batch_size = 111
num_epochs = 50
img_rows = 141
img_cols = 166
input_shape = (img_rows, img_cols, 1)
num_classes = img_rows*img_cols
# split training and test data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 4)
X_train = X_train.astype('float32')
X_test = X_train.astype('float32')
# CNN itself
model = Sequential()
model.add(Conv2D(32, kernel_size=(3,3), activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
# compile CNN
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(), metrics=['accuracy'])
# fit CNN
model.fit(X_train, y_train, batch_size=batch_size, epochs=num_epochs,
verbose=1, validation_data=(X_test, y_test))
My model summary is:
Layer (type) Output Shape Param #
=================================================================
conv2d_35 (Conv2D) (None, 139, 164, 32) 320
_________________________________________________________________
conv2d_36 (Conv2D) (None, 137, 162, 64) 18496
_________________________________________________________________
max_pooling2d_18 (MaxPooling (None, 68, 81, 64) 0
_________________________________________________________________
dropout_35 (Dropout) (None, 68, 81, 64) 0
_________________________________________________________________
flatten_28 (Flatten) (None, 352512) 0
_________________________________________________________________
dense_33 (Dense) (None, 128) 45121664
_________________________________________________________________
dropout_36 (Dropout) (None, 128) 0
_________________________________________________________________
dense_34 (Dense) (None, 2) 258
_________________________________________________________________
Total params: 45,140,738
Trainable params: 45,140,738
Non-trainable params: 0
_________________________________________________________________
None