I am kind of new to using the mlxtend package and as well as the Keras package so please bear with me. I have been trying to combine predictions of various models, i.e., Random Forest, Logistic Regression, and a Neural Network model, using StackingCVClassifier. I am trying to stack these classifiers that operate on different feature subsets. Kindly see the code as follows.
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from tensorflow import keras
from keras import layers
from keras.constraints import maxnorm
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, Input
from mlxtend.classifier import StackingCVClassifier
from mlxtend.feature_selection import ColumnSelector
from sklearn.pipeline import make_pipeline
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.neural_network import MLPClassifier
X, y = make_classification()
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=0)
# defining neural network model
def create_model ():
# create model
model = Sequential()
model.add(Dense(10, input_dim=10, activation='relu'))
model.add(Dropout(0.2))
model.add(Flatten())
optimizer= keras.optimizers.RMSprop(lr=0.001)
model.add(Dense(units = 1, activation = 'sigmoid')) # Compile model
model.compile(loss='binary_crossentropy',
optimizer=optimizer, metrics=[keras.metrics.AUC(), 'accuracy'])
return model
# using KerasClassifier on the neural network model
NN_clf=KerasClassifier(build_fn=create_model, epochs=5, batch_size= 5)
NN_clf._estimator_type = "classifier"
# stacking of classifiers that operate on different feature subsets
pipeline1 = make_pipeline(ColumnSelector(cols=(np.arange(0, 5, 1))), LogisticRegression())
pipeline2 = make_pipeline(ColumnSelector(cols=(np.arange(5, 10, 1))), RandomForestClassifier())
pipeline3 = make_pipeline(ColumnSelector(cols=(np.arange(10, 20, 1))), NN_clf)
# final stacking
clf = StackingCVClassifier(classifiers=[pipeline1, pipeline2, pipeline3], meta_classifier=MLPClassifier())
clf.fit(X_train, y_train)
print("Stacking model score: %.3f" % clf.score(X_val, y_val))
However, I am getting this error:
ValueError Traceback (most recent call last)
<ipython-input-11-ef342536824f> in <module>
42 # final stacking
43 clf = StackingCVClassifier(classifiers=[pipeline1, pipeline2, pipeline3], meta_classifier=MLPClassifier())
---> 44 clf.fit(X_train, y_train)
45
46 print("Stacking model score: %.3f" % clf.score(X_val, y_val))
~\anaconda3\lib\site-packages\mlxtend\classifier\stacking_cv_classification.py in fit(self, X, y, groups, sample_weight)
282 meta_features = prediction
283 else:
--> 284 meta_features = np.column_stack((meta_features, prediction))
285
286 if self.store_train_meta_features:
~\anaconda3\lib\site-packages\numpy\core\overrides.py in column_stack(*args, **kwargs)
~\anaconda3\lib\site-packages\numpy\lib\shape_base.py in column_stack(tup)
654 arr = array(arr, copy=False, subok=True, ndmin=2).T
655 arrays.append(arr)
--> 656 return _nx.concatenate(arrays, 1)
657
658
~\anaconda3\lib\site-packages\numpy\core\overrides.py in concatenate(*args, **kwargs)
ValueError: all the input arrays must have same number of dimensions, but the array at index 0 has 2 dimension(s) and the array at index 1 has 3 dimension(s)
Please help me. Thanks!
The error is happening because you are combining prediction from traditional ML models and DL model.
ML models are giving predictions in the shape like this (80,1) whereas DL model is predicting in shape like this (80,1,1), so there is mismatch while trying to append all the predictions.
Common workaround for this is to strip the extra dimension of predictions given by DL method to make it (80,1) instead of (80,1,1)
So, open the py file located inside:
anaconda3\lib\site-packages\mlxtend\classifier\stacking_cv_classification.py
In the line 280 and 356, outside of if block, add this:
prediction = prediction.squeeze(axis=1) if len(prediction.shape)>2 else prediction
So, it will look something like this:
...
...
...
if not self.use_probas:
prediction = prediction[:, np.newaxis]
elif self.drop_proba_col == "last":
prediction = prediction[:, :-1]
elif self.drop_proba_col == "first":
prediction = prediction[:, 1:]
prediction = prediction.squeeze(axis=1) if len(prediction.shape)>2 else prediction
if meta_features is None:
meta_features = prediction
else:
meta_features = np.column_stack((meta_features, prediction))
...
...
...
for model in self.clfs_:
if not self.use_probas:
prediction = model.predict(X)[:, np.newaxis]
else:
if self.drop_proba_col == "last":
prediction = model.predict_proba(X)[:, :-1]
elif self.drop_proba_col == "first":
prediction = model.predict_proba(X)[:, 1:]
else:
prediction = model.predict_proba(X)
prediction = prediction.squeeze(axis=1) if len(prediction.shape)>2 else prediction
per_model_preds.append(prediction)
...
...
...
Prakash's answer raises really good points.
If you want to get this running without too many changes, you can roll your own version of a scikit-learn BaseEstimator/ClassifierMixin object, or wrap in the recommended KerasClassifier object.
i.e. You can roll your own estimator like this:
class MyKerasModel(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
model = keras.Sequential()
model.add(layers.Input(shape=X.shape[1]))
model.add(layers.Dense(10, input_dim=10, activation='relu'))
model.add(layers.Dropout(0.2))
model.add(layers.Flatten())
model.add(layers.Dense(units = 1, activation = 'sigmoid'))
optimizer= keras.optimizers.RMSprop(learning_rate=0.001)
model.compile(loss='binary_crossentropy',
optimizer=optimizer, metrics=[keras.metrics.AUC(), 'accuracy'])
model.fit(X, y)
self.model = model
return self
def predict(self, X):
return (self.model.predict(X) > 0.5).flatten()
And putting all the pieces together allows you to stack the predictions:
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras import layers
from mlxtend.classifier import StackingCVClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
X, y = make_classification()
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=0)
class MyKerasModel(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
model = keras.Sequential()
model.add(layers.Input(shape=X.shape[1]))
model.add(layers.Dense(10, input_dim=10, activation='relu'))
model.add(layers.Dropout(0.2))
model.add(layers.Flatten())
model.add(layers.Dense(units = 1, activation = 'sigmoid'))
optimizer= keras.optimizers.RMSprop(learning_rate=0.001)
model.compile(loss='binary_crossentropy',
optimizer=optimizer, metrics=[keras.metrics.AUC(), 'accuracy'])
model.fit(X, y)
self.model = model
return self
def predict(self, X):
return (self.model.predict(X) > 0.5).flatten()
clf = StackingCVClassifier(
classifiers=[RandomForestClassifier(), LogisticRegression(), MyKerasModel()],
meta_classifier=MLPClassifier(),
).fit(X_train, y_train)
print("Stacking model score: %.3f" % clf.score(X_val, y_val))
Output:
2/2 [==============================] - 0s 11ms/step - loss: 0.8580 - auc: 0.5050 - accuracy: 0.5500
2/2 [==============================] - 0s 1ms/step
2/2 [==============================] - 0s 4ms/step - loss: 0.6955 - auc_1: 0.5777 - accuracy: 0.5750
2/2 [==============================] - 0s 1ms/step
3/3 [==============================] - 0s 3ms/step - loss: 0.7655 - auc_2: 0.6037 - accuracy: 0.6125
Stacking model score: 1.000
Related
I am trying to design a LSTM model for forecasting price movement.
I have issues regarding the results I obtain for my predictions. I did not normalize my target set y (nor train nor test), only X because it's a classification (-1,0,1) but the predictions I obtain are float.
Maybe I did not normalize the righ sets. My code is below :
Many thanks for you help and feel free to add comments other my other lines of code too I am a beginner.
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from datetime import datetime as dt
from pandas_datareader import data as pdr
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.layers import LSTM
startdate=dt(2018,3,31)
enddate=dt(2022,3,31)
tickers = ['ETH-USD']
Data=pdr.get_data_yahoo(tickers,start=startdate, end=enddate)['Adj Close']
df_change = Data.apply(lambda x: np.log(x) - np.log(x.shift(1)))
df_change.drop(index=df_change.index[0], axis=0, inplace=True)
df_change = df_change*100
pd.options.mode.chained_assignment = None #to not display the error of copy dataframe
df_y = df_change.copy()
df_y.columns = ['ETH-y']
def Target(df,column,df2,column2):
for i in range(len(df)):
if df[column].iloc[i] > 0:
df2[column2][i] = 1 #value is up par rapport au jour d'avant
elif -0.5 < df[column].iloc[i] < 0.5 :
df2[column2][i] = 0 #value is steady
else:
df2[column2][i] = -1 #value is down
Target(df_change,'ETH-USD',df_y,'ETH-y')
print(df_y['ETH-y'].value_counts())
Data.drop(index=Data.index[0], axis=0, inplace=True) #drop first row to have same values
X = Data
y = df_y
## split my train val and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1, stratify = y)
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler().fit(X_train)
X_train = sc.fit_transform(X_train)
X_test = sc.fit_transform(X_test)
#reshaping for 3D array
X_train = np.reshape(X_train,(1169,1,1))
X_test = np.reshape(X_test,(293,1,1))
from keras.models import Sequential
from keras.layers import Dense, LSTM
model = Sequential()
model.add(LSTM(64, activation='relu', input_shape=(X_train.shape[1], X_train.shape[2]), return_sequences=True))
model.add(LSTM(32, activation='relu', return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(y_train.shape[1]))
model.compile(optimizer='adam', loss='mse')
model.summary()
history = model.fit(X_train, y_train, epochs=10, batch_size=16, validation_split=0.1, verbose=1)
pred = model.predict(X_test)
pred = sc.inverse_transform(pred)
plt.plot(history.history['loss'], label='Training loss')
plt.plot(history.history['val_loss'], label='Validation loss')
plt.legend()
This original work is presented here
How to go about plotting the confusion matrix based of a CNN model?
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, Conv2D, MaxPooling2D, GlobalAveragePooling2D
from keras.optimizers import Adam
from keras.utils import np_utils
from sklearn import metrics
##Need to put this block of code in for cuDNN to initialize properly
import tensorflow as tf
config = tf.compat.v1.ConfigProto(gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.8)
# device_count = {'GPU': 1}
)
config.gpu_options.allow_growth = True
session = tf.compat.v1.Session(config=config)
tf.compat.v1.keras.backend.set_session(session)
#------------------------------------------------------------------------------------------------------------------
num_rows = 40
num_columns = 174
num_channels = 1
x_train = x_train.reshape(x_train.shape[0], num_rows, num_columns, num_channels)
x_test = x_test.reshape(x_test.shape[0], num_rows, num_columns, num_channels)
num_labels = yy.shape[1]
filter_size = 2
# Construct model
model = Sequential()
model.add(Conv2D(filters=16, kernel_size=2, input_shape=(num_rows, num_columns, num_channels), activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv2D(filters=32, kernel_size=2, activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv2D(filters=64, kernel_size=2, activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv2D(filters=128, kernel_size=2, activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(GlobalAveragePooling2D())
model.add(Dense(num_labels, activation='softmax'))
then trained as:
from keras.callbacks import ModelCheckpoint
from datetime import datetime
#num_epochs = 12
#num_batch_size = 128
num_epochs = 72
num_batch_size = 256
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.basic_cnn.hdf5',
verbose=1, save_best_only=True)
model.fit(x_train, y_train, batch_size=num_batch_size, epochs=num_epochs, validation_data=(x_test, y_test), callbacks=[checkpointer], verbose=1)
I have been trying a few things, one of which is:
import matplotlib.pyplot as plt
plt.figure(figsize=(12, 4))
plot_confusion_matrix=(model(),x_test, y_test)
plt.plot(plot_confusion_matrix)
but I cannot get the confusion matrix to plot.
I also looked at tf.math.confusion_matrix(), but what is the labels and predictions as defined from the CNN model above??
The confusion matrix is a multi-classification.
Is
y_true = np.argmax(y_test, 1)??
and
y_pred = model.predict_classes(x_test)??
labels: 1-D Tensor of real labels for the classification task.
predictions: 1-D Tensor of predictions for a given classification.
As they say in official documentation , labels are the names of Output classes and predictions, However as they say everything has to be 1D tensor it means labels will be Ground truth for one instance and the corresponding indexed value in the Predictions will hold its predicted value.
So what you can do is, get the predictions and labels for each instances,in your code,you have passed the x_test and y_test which arent the supposed to be passed elements.
instead use model.predict to get the output labels.
y_predict=model.predict(x_test)
y_true=y_test
res = tf.math.confusion_matrix(y_true,y_predict)
This res is a 2D matrix now to print it you need to
plot_confusion_matrix(classifier, X_test, y_test,
display_labels=class_names,
cmap=plt.cm.Blues,
normalize=normalize)
Here put classifer = "model",not functional model().
Hope this helps,here are some more resources.
Here You can see the multiclass classification Confusion matrix technique.
Multiclass plot github function
Another custom plot function
I have finished a PyTorch MLP model for the MNIST dataset, but got two different results: 0.90+ accuracy when using MNIST dataset from PyTorch, but ~0.10 accuracy when using MNIST dataset from Keras.
Below is my code with dependency: PyTorch 0.3.0.post4, keras 2.1.3, tensorflow backend 1.4.1 gpu version.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch as pt
import torchvision as ptv
from keras.datasets import mnist
from torch.nn import functional as F
from torch.utils.data import Dataset, DataLoader
# training data from PyTorch
train_set = ptv.datasets.MNIST("./data/mnist/train", train=True, transform=ptv.transforms.ToTensor(), download=True)
test_set = ptv.datasets.MNIST("./data/mnist/test", train=False, transform=ptv.transforms.ToTensor(), download=True)
train_dataset = DataLoader(train_set, batch_size=100, shuffle=True)
test_dataset = DataLoader(test_set, batch_size=10000, shuffle=True)
class MLP(pt.nn.Module):
"""The Multi-layer perceptron"""
def __init__(self):
super(MLP, self).__init__()
self.fc1 = pt.nn.Linear(784, 512)
self.fc2 = pt.nn.Linear(512, 128)
self.fc3 = pt.nn.Linear(128, 10)
self.use_gpu = True
def forward(self, din):
din = din.view(-1, 28 * 28)
dout = F.relu(self.fc1(din))
dout = F.relu(self.fc2(dout))
# return F.softmax(self.fc3(dout))
return self.fc3(dout)
model = MLP().cuda()
print(model)
# loss func and optim
optimizer = pt.optim.SGD(model.parameters(), lr=1)
criterion = pt.nn.CrossEntropyLoss().cuda()
def evaluate_acc(pred, label):
pred = pred.cpu().data.numpy()
label = label.cpu().data.numpy()
test_np = (np.argmax(pred, 1) == label)
test_np = np.float32(test_np)
return np.mean(test_np)
def evaluate_loader(loader):
print("evaluating ...")
accurarcy_list = []
for i, (inputs, labels) in enumerate(loader):
inputs = pt.autograd.Variable(inputs).cuda()
labels = pt.autograd.Variable(labels).cuda()
outputs = model(inputs)
accurarcy_list.append(evaluate_acc(outputs, labels))
print(sum(accurarcy_list) / len(accurarcy_list))
def training(d, epochs):
for x in range(epochs):
for i, data in enumerate(d):
optimizer.zero_grad()
(inputs, labels) = data
inputs = pt.autograd.Variable(inputs).cuda()
labels = pt.autograd.Variable(labels).cuda()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if i % 200 == 0:
print(i, ":", evaluate_acc(outputs, labels))
# Training MLP for 4 epochs with MNIST dataset from PyTorch
training(train_dataset, 4)
# The accuracy is ~0.96.
evaluate_loader(test_dataset)
print("###########################################################")
def load_mnist():
(x, y), (x_test, y_test) = mnist.load_data()
x = x.reshape((-1, 1, 28, 28)).astype(np.float32)
x_test = x_test.reshape((-1, 1, 28, 28)).astype(np.float32)
y = y.astype(np.int64)
y_test = y_test.astype(np.int64)
print("x.shape", x.shape, "y.shape", y.shape,
"\nx_test.shape", x_test.shape, "y_test.shape", y_test.shape,
)
return x, y, x_test, y_test
class TMPDataset(Dataset):
"""Dateset for loading Keras MNIST dataset."""
def __init__(self, a, b):
self.x = a
self.y = b
def __getitem__(self, item):
return self.x[item], self.y[item]
def __len__(self):
return len(self.y)
x_train, y_train, x_test, y_test = load_mnist()
# Create dataloader for MNIST dataset from Keras.
test_loader = DataLoader(TMPDataset(x_test, y_test), num_workers=1, batch_size=10000)
train_loader = DataLoader(TMPDataset(x_train, y_train), shuffle=True, batch_size=100)
# Evaluate the performance of MLP trained on PyTorch dataset and the accurach is ~0.96.
evaluate_loader(test_loader)
evaluate_loader(train_loader)
model = MLP().cuda()
print(model)
optimizer = pt.optim.SGD(model.parameters(), lr=1)
criterion = pt.nn.CrossEntropyLoss().cuda()
# Train now on MNIST dataset from Keras.
training(train_loader, 4)
# Evaluate the trianed model on MNIST dataset from Keras and result in performance ~0.10...
evaluate_loader(test_loader)
evaluate_loader(train_loader)
I had checked some samples from Keras MNIST dataset and found no error.
I am wondering what is wrong with the datasets?
The code can run without error, run it to see the results.
The MNIST data coming from Keras are not normalized; following the Keras MNIST MLP example, you should do it manually, i.e. you should include the following in your load_data() function:
x /= 255
x_test /= 255
Not sure about PyTorch, but it would seem that the MNIST data from their own utility functions come already normalized (as is the case with Tensorflow - see the third point in my answer here).
A 10% accuracy (i.e. equivalent to random guessing) in case of not-normalized input data is perfectly consistent.
I am new to machine learning, and I am trying to handle Keras to perform regression tasks. I have implemented this code, based on this example.
X = df[['full_sq','floor','build_year','num_room','sub_area_2','sub_area_3','state_2.0','state_3.0','state_4.0']]
y = df['price_doc']
X = np.asarray(X)
y = np.asarray(y)
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=.2)
def baseline_model():
model = Sequential()
model.add(Dense(13, input_dim=9, kernel_initializer='normal',
activation='relu'))
model.add(Dense(1, kernel_initializer='normal'))
model.compile(loss='mean_squared_error', optimizer='adam')
return model
estimator = KerasRegressor(build_fn=baseline_model, nb_epoch=100, batch_size=100, verbose=False)
kfold = KFold(n_splits=10, random_state=seed)
results = cross_val_score(estimator, X_train, Y_train, cv=kfold)
print("Results: %.2f (%.2f) MSE" % (results.mean(), results.std()))
prediction = estimator.predict(X_test)
accuracy_score(Y_test, prediction)
When I run the code I get this error:
AttributeError: 'KerasRegressor' object has no attribute 'model'
How could I correctly 'insert' the model in KerasRegressor?
you have to fit the estimator again after cross_val_score to evaluate on the new data:
estimator = KerasRegressor(build_fn=baseline_model, nb_epoch=100, batch_size=100, verbose=False)
kfold = KFold(n_splits=10, random_state=seed)
results = cross_val_score(estimator, X_train, Y_train, cv=kfold)
print("Results: %.2f (%.2f) MSE" % (results.mean(), results.std()))
estimator.fit(X, y)
prediction = estimator.predict(X_test)
accuracy_score(Y_test, prediction)
Working Test version:
from sklearn import datasets, linear_model
from sklearn.model_selection import cross_val_score, KFold
from keras.models import Sequential
from sklearn.metrics import accuracy_score
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
seed = 1
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
def baseline_model():
model = Sequential()
model.add(Dense(10, input_dim=10, activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
return model
estimator = KerasRegressor(build_fn=baseline_model, nb_epoch=100, batch_size=100, verbose=False)
kfold = KFold(n_splits=10, random_state=seed)
results = cross_val_score(estimator, X, y, cv=kfold)
print("Results: %.2f (%.2f) MSE" % (results.mean(), results.std()))
estimator.fit(X, y)
prediction = estimator.predict(X)
accuracy_score(y, prediction)
For evaluation of your system performance, you can calculate the error like following.
You also do not need to call KFold and cross_val_score.
import numpy as np
from sklearn import datasets, linear_model
from sklearn.model_selection import cross_val_score, KFold
from keras.models import Sequential
from sklearn.metrics import accuracy_score
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
seed = 1
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
def baseline_model():
model = Sequential()
model.add(Dense(10, input_dim=10, activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
return model
estimator = KerasRegressor(build_fn=baseline_model, nb_epoch=100, batch_size=100, verbose=False)
estimator.fit(X, y)
prediction = estimator.predict(X)
train_error = np.abs(y - prediction)
mean_error = np.mean(train_error)
min_error = np.min(train_error)
max_error = np.max(train_error)
std_error = np.std(train_error)
Instead of kerasRegressor, you can directly use model itself.
These two snippets of the code give the exact same results:
estimator = KerasRegressor(build_fn=baseline_model)
estimator.fit(X, y, nb_epoch=100, batch_size=100, verbose=False, shuffle=False)
prediction = estimator.predict(X)
model = baseline_model()
model.fit(X, y, nb_epoch=100, batch_size=100, verbose=False, shuffle=False)
prediction = model.predict(X)
Please note that the shuffle argument of fit() function for both kerasRegressor and model needs to be False. Moreover, for having the fixed initial state and obtain reproducible results, you need to add these lines of code at the beginning of your script:
session = K.get_session()
init_op = tf.group(tf.tables_initializer(),tf.global_variables_initializer(), tf.local_variables_initializer())
session.run(init_op)
np.random.seed(1)
tf.set_random_seed(1)
you should train model on X_train and y_train
you can not train model on X and y unless you should have extra data for testing
train should be in Train
then test/predict should be on X_test.
I am trying to feed a huge sparse matrix to Keras model. As the dataset doesn`t fit into RAM, the way around is to train the model on a data generated batch-by-batch by a generator.
To test this approach and make sure my solution works fine, I slightly modified a Kera`s simple MLP on the Reuters newswire topic classification task. So, the idea is to compare original and edited models. I just convert numpy.ndarray into scipy.sparse.csr.csr_matrix and feed it to the model.
But my model crashes at some point and I need a hand to figure out a reason.
Here is the original model and my additions below
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.datasets import reuters
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.utils import np_utils
from keras.preprocessing.text import Tokenizer
max_words = 1000
batch_size = 32
nb_epoch = 5
print('Loading data...')
(X_train, y_train), (X_test, y_test) = reuters.load_data(nb_words=max_words, test_split=0.2)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
nb_classes = np.max(y_train)+1
print(nb_classes, 'classes')
print('Vectorizing sequence data...')
tokenizer = Tokenizer(nb_words=max_words)
X_train = tokenizer.sequences_to_matrix(X_train, mode='binary')
X_test = tokenizer.sequences_to_matrix(X_test, mode='binary')
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Convert class vector to binary class matrix (for use with categorical_crossentropy)')
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
print('Y_train shape:', Y_train.shape)
print('Y_test shape:', Y_test.shape)
print('Building model...')
model = Sequential()
model.add(Dense(512, input_shape=(max_words,)))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
history = model.fit(X_train, Y_train,
nb_epoch=nb_epoch, batch_size=batch_size,
verbose=1)#, validation_split=0.1)
#score = model.evaluate(X_test, Y_test,
# batch_size=batch_size, verbose=1)
print('Test score:', score[0])
print('Test accuracy:', score[1])
It outputs:
Loading data...
8982 train sequences
2246 test sequences
46 classes
Vectorizing sequence data...
X_train shape: (8982, 1000)
X_test shape: (2246, 1000)
Convert class vector to binary class matrix (for use with categorical_crossentropy)
Y_train shape: (8982, 46)
Y_test shape: (2246, 46)
Building model...
Epoch 1/5
8982/8982 [==============================] - 5s - loss: 1.3932 - acc: 0.6906
Epoch 2/5
8982/8982 [==============================] - 4s - loss: 0.7522 - acc: 0.8234
Epoch 3/5
8982/8982 [==============================] - 5s - loss: 0.5407 - acc: 0.8681
Epoch 4/5
8982/8982 [==============================] - 5s - loss: 0.4160 - acc: 0.8980
Epoch 5/5
8982/8982 [==============================] - 5s - loss: 0.3338 - acc: 0.9136
Test score: 1.01453569163
Test accuracy: 0.797417631398
Finally, here is my part
X_train_sparse = sparse.csr_matrix(X_train)
def batch_generator(X, y, batch_size):
n_batches_for_epoch = X.shape[0]//batch_size
for i in range(n_batches_for_epoch):
index_batch = range(X.shape[0])[batch_size*i:batch_size*(i+1)]
X_batch = X[index_batch,:].todense()
y_batch = y[index_batch,:]
yield(np.array(X_batch),y_batch)
model.fit_generator(generator=batch_generator(X_train_sparse, Y_train, batch_size),
nb_epoch=nb_epoch,
samples_per_epoch=X_train_sparse.shape[0])
The crash:
Exception Traceback (most recent call last)
<ipython-input-120-6722a4f77425> in <module>()
1 model.fit_generator(generator=batch_generator(X_trainSparse, Y_train, batch_size),
2 nb_epoch=nb_epoch,
----> 3 samples_per_epoch=X_trainSparse.shape[0])
/home/kk/miniconda2/envs/tensorflow/lib/python2.7/site-packages/keras/models.pyc in fit_generator(self, generator, samples_per_epoch, nb_epoch, verbose, callbacks, validation_data, nb_val_samples, class_weight, max_q_size, **kwargs)
648 nb_val_samples=nb_val_samples,
649 class_weight=class_weight,
--> 650 max_q_size=max_q_size)
651
652 def evaluate_generator(self, generator, val_samples, max_q_size=10, **kwargs):
/home/kk/miniconda2/envs/tensorflow/lib/python2.7/site-packages/keras/engine/training.pyc in fit_generator(self, generator, samples_per_epoch, nb_epoch, verbose, callbacks, validation_data, nb_val_samples, class_weight, max_q_size)
1356 raise Exception('output of generator should be a tuple '
1357 '(x, y, sample_weight) '
-> 1358 'or (x, y). Found: ' + str(generator_output))
1359 if len(generator_output) == 2:
1360 x, y = generator_output
Exception: output of generator should be a tuple (x, y, sample_weight) or (x, y). Found: None
I believe the problem is due to wrong setup of samples_per_epoch. I`d trully appreciate if someone could comment on this.
Here is my solution.
def batch_generator(X, y, batch_size):
number_of_batches = samples_per_epoch/batch_size
counter=0
shuffle_index = np.arange(np.shape(y)[0])
np.random.shuffle(shuffle_index)
X = X[shuffle_index, :]
y = y[shuffle_index]
while 1:
index_batch = shuffle_index[batch_size*counter:batch_size*(counter+1)]
X_batch = X[index_batch,:].todense()
y_batch = y[index_batch]
counter += 1
yield(np.array(X_batch),y_batch)
if (counter < number_of_batches):
np.random.shuffle(shuffle_index)
counter=0
In my case, X - sparse matrix, y - array.
If you can use Lasagne instead of Keras I've written a small MLP class with the following features:
supports both dense and sparse matrices
supports drop-out and hidden layer
Supports complete probability distribution instead of one-hot labels so supports multilabel training.
Supports scikit-learn like API (fit, predict, accuracy, etc.)
Is very easy to configure and modify