Plot Confusion Matrix from CNN Model - machine-learning

This original work is presented here
How to go about plotting the confusion matrix based of a CNN model?
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, Conv2D, MaxPooling2D, GlobalAveragePooling2D
from keras.optimizers import Adam
from keras.utils import np_utils
from sklearn import metrics
##Need to put this block of code in for cuDNN to initialize properly
import tensorflow as tf
config = tf.compat.v1.ConfigProto(gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.8)
# device_count = {'GPU': 1}
)
config.gpu_options.allow_growth = True
session = tf.compat.v1.Session(config=config)
tf.compat.v1.keras.backend.set_session(session)
#------------------------------------------------------------------------------------------------------------------
num_rows = 40
num_columns = 174
num_channels = 1
x_train = x_train.reshape(x_train.shape[0], num_rows, num_columns, num_channels)
x_test = x_test.reshape(x_test.shape[0], num_rows, num_columns, num_channels)
num_labels = yy.shape[1]
filter_size = 2
# Construct model
model = Sequential()
model.add(Conv2D(filters=16, kernel_size=2, input_shape=(num_rows, num_columns, num_channels), activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv2D(filters=32, kernel_size=2, activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv2D(filters=64, kernel_size=2, activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv2D(filters=128, kernel_size=2, activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(GlobalAveragePooling2D())
model.add(Dense(num_labels, activation='softmax'))
then trained as:
from keras.callbacks import ModelCheckpoint
from datetime import datetime
#num_epochs = 12
#num_batch_size = 128
num_epochs = 72
num_batch_size = 256
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.basic_cnn.hdf5',
verbose=1, save_best_only=True)
model.fit(x_train, y_train, batch_size=num_batch_size, epochs=num_epochs, validation_data=(x_test, y_test), callbacks=[checkpointer], verbose=1)
I have been trying a few things, one of which is:
import matplotlib.pyplot as plt
plt.figure(figsize=(12, 4))
plot_confusion_matrix=(model(),x_test, y_test)
plt.plot(plot_confusion_matrix)
but I cannot get the confusion matrix to plot.
I also looked at tf.math.confusion_matrix(), but what is the labels and predictions as defined from the CNN model above??
The confusion matrix is a multi-classification.
Is
y_true = np.argmax(y_test, 1)??
and
y_pred = model.predict_classes(x_test)??

labels: 1-D Tensor of real labels for the classification task.
predictions: 1-D Tensor of predictions for a given classification.
As they say in official documentation , labels are the names of Output classes and predictions, However as they say everything has to be 1D tensor it means labels will be Ground truth for one instance and the corresponding indexed value in the Predictions will hold its predicted value.
So what you can do is, get the predictions and labels for each instances,in your code,you have passed the x_test and y_test which arent the supposed to be passed elements.
instead use model.predict to get the output labels.
y_predict=model.predict(x_test)
y_true=y_test
res = tf.math.confusion_matrix(y_true,y_predict)
This res is a 2D matrix now to print it you need to
plot_confusion_matrix(classifier, X_test, y_test,
display_labels=class_names,
cmap=plt.cm.Blues,
normalize=normalize)
Here put classifer = "model",not functional model().
Hope this helps,here are some more resources.
Here You can see the multiclass classification Confusion matrix technique.
Multiclass plot github function
Another custom plot function

Related

Stacking classifiers (sklearn and keras models) via StackingCVClassifier problem

I am kind of new to using the mlxtend package and as well as the Keras package so please bear with me. I have been trying to combine predictions of various models, i.e., Random Forest, Logistic Regression, and a Neural Network model, using StackingCVClassifier. I am trying to stack these classifiers that operate on different feature subsets. Kindly see the code as follows.
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from tensorflow import keras
from keras import layers
from keras.constraints import maxnorm
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, Input
from mlxtend.classifier import StackingCVClassifier
from mlxtend.feature_selection import ColumnSelector
from sklearn.pipeline import make_pipeline
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.neural_network import MLPClassifier
X, y = make_classification()
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=0)
# defining neural network model
def create_model ():
# create model
model = Sequential()
model.add(Dense(10, input_dim=10, activation='relu'))
model.add(Dropout(0.2))
model.add(Flatten())
optimizer= keras.optimizers.RMSprop(lr=0.001)
model.add(Dense(units = 1, activation = 'sigmoid')) # Compile model
model.compile(loss='binary_crossentropy',
optimizer=optimizer, metrics=[keras.metrics.AUC(), 'accuracy'])
return model
# using KerasClassifier on the neural network model
NN_clf=KerasClassifier(build_fn=create_model, epochs=5, batch_size= 5)
NN_clf._estimator_type = "classifier"
# stacking of classifiers that operate on different feature subsets
pipeline1 = make_pipeline(ColumnSelector(cols=(np.arange(0, 5, 1))), LogisticRegression())
pipeline2 = make_pipeline(ColumnSelector(cols=(np.arange(5, 10, 1))), RandomForestClassifier())
pipeline3 = make_pipeline(ColumnSelector(cols=(np.arange(10, 20, 1))), NN_clf)
# final stacking
clf = StackingCVClassifier(classifiers=[pipeline1, pipeline2, pipeline3], meta_classifier=MLPClassifier())
clf.fit(X_train, y_train)
print("Stacking model score: %.3f" % clf.score(X_val, y_val))
However, I am getting this error:
ValueError Traceback (most recent call last)
<ipython-input-11-ef342536824f> in <module>
42 # final stacking
43 clf = StackingCVClassifier(classifiers=[pipeline1, pipeline2, pipeline3], meta_classifier=MLPClassifier())
---> 44 clf.fit(X_train, y_train)
45
46 print("Stacking model score: %.3f" % clf.score(X_val, y_val))
~\anaconda3\lib\site-packages\mlxtend\classifier\stacking_cv_classification.py in fit(self, X, y, groups, sample_weight)
282 meta_features = prediction
283 else:
--> 284 meta_features = np.column_stack((meta_features, prediction))
285
286 if self.store_train_meta_features:
~\anaconda3\lib\site-packages\numpy\core\overrides.py in column_stack(*args, **kwargs)
~\anaconda3\lib\site-packages\numpy\lib\shape_base.py in column_stack(tup)
654 arr = array(arr, copy=False, subok=True, ndmin=2).T
655 arrays.append(arr)
--> 656 return _nx.concatenate(arrays, 1)
657
658
~\anaconda3\lib\site-packages\numpy\core\overrides.py in concatenate(*args, **kwargs)
ValueError: all the input arrays must have same number of dimensions, but the array at index 0 has 2 dimension(s) and the array at index 1 has 3 dimension(s)
Please help me. Thanks!
The error is happening because you are combining prediction from traditional ML models and DL model.
ML models are giving predictions in the shape like this (80,1) whereas DL model is predicting in shape like this (80,1,1), so there is mismatch while trying to append all the predictions.
Common workaround for this is to strip the extra dimension of predictions given by DL method to make it (80,1) instead of (80,1,1)
So, open the py file located inside:
anaconda3\lib\site-packages\mlxtend\classifier\stacking_cv_classification.py
In the line 280 and 356, outside of if block, add this:
prediction = prediction.squeeze(axis=1) if len(prediction.shape)>2 else prediction
So, it will look something like this:
...
...
...
if not self.use_probas:
prediction = prediction[:, np.newaxis]
elif self.drop_proba_col == "last":
prediction = prediction[:, :-1]
elif self.drop_proba_col == "first":
prediction = prediction[:, 1:]
prediction = prediction.squeeze(axis=1) if len(prediction.shape)>2 else prediction
if meta_features is None:
meta_features = prediction
else:
meta_features = np.column_stack((meta_features, prediction))
...
...
...
for model in self.clfs_:
if not self.use_probas:
prediction = model.predict(X)[:, np.newaxis]
else:
if self.drop_proba_col == "last":
prediction = model.predict_proba(X)[:, :-1]
elif self.drop_proba_col == "first":
prediction = model.predict_proba(X)[:, 1:]
else:
prediction = model.predict_proba(X)
prediction = prediction.squeeze(axis=1) if len(prediction.shape)>2 else prediction
per_model_preds.append(prediction)
...
...
...
Prakash's answer raises really good points.
If you want to get this running without too many changes, you can roll your own version of a scikit-learn BaseEstimator/ClassifierMixin object, or wrap in the recommended KerasClassifier object.
i.e. You can roll your own estimator like this:
class MyKerasModel(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
model = keras.Sequential()
model.add(layers.Input(shape=X.shape[1]))
model.add(layers.Dense(10, input_dim=10, activation='relu'))
model.add(layers.Dropout(0.2))
model.add(layers.Flatten())
model.add(layers.Dense(units = 1, activation = 'sigmoid'))
optimizer= keras.optimizers.RMSprop(learning_rate=0.001)
model.compile(loss='binary_crossentropy',
optimizer=optimizer, metrics=[keras.metrics.AUC(), 'accuracy'])
model.fit(X, y)
self.model = model
return self
def predict(self, X):
return (self.model.predict(X) > 0.5).flatten()
And putting all the pieces together allows you to stack the predictions:
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras import layers
from mlxtend.classifier import StackingCVClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
X, y = make_classification()
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=0)
class MyKerasModel(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
model = keras.Sequential()
model.add(layers.Input(shape=X.shape[1]))
model.add(layers.Dense(10, input_dim=10, activation='relu'))
model.add(layers.Dropout(0.2))
model.add(layers.Flatten())
model.add(layers.Dense(units = 1, activation = 'sigmoid'))
optimizer= keras.optimizers.RMSprop(learning_rate=0.001)
model.compile(loss='binary_crossentropy',
optimizer=optimizer, metrics=[keras.metrics.AUC(), 'accuracy'])
model.fit(X, y)
self.model = model
return self
def predict(self, X):
return (self.model.predict(X) > 0.5).flatten()
clf = StackingCVClassifier(
classifiers=[RandomForestClassifier(), LogisticRegression(), MyKerasModel()],
meta_classifier=MLPClassifier(),
).fit(X_train, y_train)
print("Stacking model score: %.3f" % clf.score(X_val, y_val))
Output:
2/2 [==============================] - 0s 11ms/step - loss: 0.8580 - auc: 0.5050 - accuracy: 0.5500
2/2 [==============================] - 0s 1ms/step
2/2 [==============================] - 0s 4ms/step - loss: 0.6955 - auc_1: 0.5777 - accuracy: 0.5750
2/2 [==============================] - 0s 1ms/step
3/3 [==============================] - 0s 3ms/step - loss: 0.7655 - auc_2: 0.6037 - accuracy: 0.6125
Stacking model score: 1.000

LSTM multiclass classifier : why my predictions have nothing to do with my target set?

I am trying to design a LSTM model for forecasting price movement.
I have issues regarding the results I obtain for my predictions. I did not normalize my target set y (nor train nor test), only X because it's a classification (-1,0,1) but the predictions I obtain are float.
Maybe I did not normalize the righ sets. My code is below :
Many thanks for you help and feel free to add comments other my other lines of code too I am a beginner.
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from datetime import datetime as dt
from pandas_datareader import data as pdr
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.layers import LSTM
startdate=dt(2018,3,31)
enddate=dt(2022,3,31)
tickers = ['ETH-USD']
Data=pdr.get_data_yahoo(tickers,start=startdate, end=enddate)['Adj Close']
df_change = Data.apply(lambda x: np.log(x) - np.log(x.shift(1)))
df_change.drop(index=df_change.index[0], axis=0, inplace=True)
df_change = df_change*100
pd.options.mode.chained_assignment = None #to not display the error of copy dataframe
df_y = df_change.copy()
df_y.columns = ['ETH-y']
def Target(df,column,df2,column2):
for i in range(len(df)):
if df[column].iloc[i] > 0:
df2[column2][i] = 1 #value is up par rapport au jour d'avant
elif -0.5 < df[column].iloc[i] < 0.5 :
df2[column2][i] = 0 #value is steady
else:
df2[column2][i] = -1 #value is down
Target(df_change,'ETH-USD',df_y,'ETH-y')
print(df_y['ETH-y'].value_counts())
Data.drop(index=Data.index[0], axis=0, inplace=True) #drop first row to have same values
X = Data
y = df_y
## split my train val and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1, stratify = y)
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler().fit(X_train)
X_train = sc.fit_transform(X_train)
X_test = sc.fit_transform(X_test)
#reshaping for 3D array
X_train = np.reshape(X_train,(1169,1,1))
X_test = np.reshape(X_test,(293,1,1))
from keras.models import Sequential
from keras.layers import Dense, LSTM
model = Sequential()
model.add(LSTM(64, activation='relu', input_shape=(X_train.shape[1], X_train.shape[2]), return_sequences=True))
model.add(LSTM(32, activation='relu', return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(y_train.shape[1]))
model.compile(optimizer='adam', loss='mse')
model.summary()
history = model.fit(X_train, y_train, epochs=10, batch_size=16, validation_split=0.1, verbose=1)
pred = model.predict(X_test)
pred = sc.inverse_transform(pred)
plt.plot(history.history['loss'], label='Training loss')
plt.plot(history.history['val_loss'], label='Validation loss')
plt.legend()

How to draw ROC and PR curves for a classification model trained with KFold CV

How should I draw the ROC and PR curves for this NN model which I am training with 10 fold cross-validation?
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPooling2D
from tensorflow.keras.losses import sparse_categorical_crossentropy
from tensorflow.keras.optimizers import Adam
from sklearn.model_selection import KFold
from numpy import loadtxt
import numpy as np
import pandas as pd
from google.colab import files
uploaded = files.upload()
dataset = loadtxt('mod_dfn.csv', delimiter=',')
X = dataset[:,0:25]
y = dataset[:,25]
kfold = KFold(n_splits=10, shuffle=True)
fold_no = 1
for train, test in kfold.split(X, y):
model = Sequential()
model.add(Dense(12, input_dim=25, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print('------------------------------------------------------------------------')
print(f'Training for fold {fold_no} ...')
history = model.fit(X[train], y[train], batch_size=10, epochs=150, verbose=0)
scores = model.evaluate(X[test], y[test], verbose=0)
print(f'Score for fold {fold_no}: {model.metrics_names[0]} of {scores[0]}; {model.metrics_names[1]} of {scores[1]*100}%')
acc_per_fold.append(scores[1] * 100)
loss_per_fold.append(scores[0])
fold_no = fold_no + 1
You can use RocCurveDisplay and PrecisionRecallDisplay for this purpose.
Try adding these lines just before last line of your code fold_no = fold_no + 1, and see if it works for you.
pred = model.predict(X[test])
# ROC curve
fpr, tpr, thresholds = metrics.roc_curve(y[test], pred)
roc_auc = metrics.auc(fpr, tpr)
roc_display = metrics.RocCurveDisplay(fpr=fpr, tpr=tpr, roc_auc=roc_auc)
roc_display.plot()
roc_display.figure_.savefig(f'./out/ROC_curve_for_fold#{fold_no}.jpeg')
# PR Curve
precision, recall, _ = metrics.precision_recall_curve(y[test], pred)
pr_display = metrics.PrecisionRecallDisplay(precision=precision, recall=recall)
pr_display.plot()
pr_display.figure_.savefig(f'./out/PR_curve_for_fold#{fold_no}.jpeg')

label_binarize Does not fit for sklearn Naive Bayes classifier showing bad input shape

I was trying to create roc curve for multiclass using Naive Bayes But it ending with
ValueError: bad input shape.
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.naive_bayes import BernoulliNB
from scipy import interp
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = BernoulliNB(alpha=1.0, binarize=6, class_prior=None, fit_prior=True)
y_score = classifier.fit(X_train, y_train).predict(X_test)
raise ValueError("bad input shape {0}".format(shape))
ValueError: bad input shape (75, 6)
The error because of binarizing the y variable. The estimator can work with string values itself.
Remove the following lines,
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
You are good to go!
To get the predicted probabilities for roc_curve, use the following:
classifier.fit(X_train, y_train)
y_score = classifier.predict_proba(X_test)
y_score.shape
# (75, 3)

Keras - How to perform a prediction using KerasRegressor?

I am new to machine learning, and I am trying to handle Keras to perform regression tasks. I have implemented this code, based on this example.
X = df[['full_sq','floor','build_year','num_room','sub_area_2','sub_area_3','state_2.0','state_3.0','state_4.0']]
y = df['price_doc']
X = np.asarray(X)
y = np.asarray(y)
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=.2)
def baseline_model():
model = Sequential()
model.add(Dense(13, input_dim=9, kernel_initializer='normal',
activation='relu'))
model.add(Dense(1, kernel_initializer='normal'))
model.compile(loss='mean_squared_error', optimizer='adam')
return model
estimator = KerasRegressor(build_fn=baseline_model, nb_epoch=100, batch_size=100, verbose=False)
kfold = KFold(n_splits=10, random_state=seed)
results = cross_val_score(estimator, X_train, Y_train, cv=kfold)
print("Results: %.2f (%.2f) MSE" % (results.mean(), results.std()))
prediction = estimator.predict(X_test)
accuracy_score(Y_test, prediction)
When I run the code I get this error:
AttributeError: 'KerasRegressor' object has no attribute 'model'
How could I correctly 'insert' the model in KerasRegressor?
you have to fit the estimator again after cross_val_score to evaluate on the new data:
estimator = KerasRegressor(build_fn=baseline_model, nb_epoch=100, batch_size=100, verbose=False)
kfold = KFold(n_splits=10, random_state=seed)
results = cross_val_score(estimator, X_train, Y_train, cv=kfold)
print("Results: %.2f (%.2f) MSE" % (results.mean(), results.std()))
estimator.fit(X, y)
prediction = estimator.predict(X_test)
accuracy_score(Y_test, prediction)
Working Test version:
from sklearn import datasets, linear_model
from sklearn.model_selection import cross_val_score, KFold
from keras.models import Sequential
from sklearn.metrics import accuracy_score
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
seed = 1
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
def baseline_model():
model = Sequential()
model.add(Dense(10, input_dim=10, activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
return model
estimator = KerasRegressor(build_fn=baseline_model, nb_epoch=100, batch_size=100, verbose=False)
kfold = KFold(n_splits=10, random_state=seed)
results = cross_val_score(estimator, X, y, cv=kfold)
print("Results: %.2f (%.2f) MSE" % (results.mean(), results.std()))
estimator.fit(X, y)
prediction = estimator.predict(X)
accuracy_score(y, prediction)
For evaluation of your system performance, you can calculate the error like following.
You also do not need to call KFold and cross_val_score.
import numpy as np
from sklearn import datasets, linear_model
from sklearn.model_selection import cross_val_score, KFold
from keras.models import Sequential
from sklearn.metrics import accuracy_score
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
seed = 1
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
def baseline_model():
model = Sequential()
model.add(Dense(10, input_dim=10, activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
return model
estimator = KerasRegressor(build_fn=baseline_model, nb_epoch=100, batch_size=100, verbose=False)
estimator.fit(X, y)
prediction = estimator.predict(X)
train_error = np.abs(y - prediction)
mean_error = np.mean(train_error)
min_error = np.min(train_error)
max_error = np.max(train_error)
std_error = np.std(train_error)
Instead of kerasRegressor, you can directly use model itself.
These two snippets of the code give the exact same results:
estimator = KerasRegressor(build_fn=baseline_model)
estimator.fit(X, y, nb_epoch=100, batch_size=100, verbose=False, shuffle=False)
prediction = estimator.predict(X)
model = baseline_model()
model.fit(X, y, nb_epoch=100, batch_size=100, verbose=False, shuffle=False)
prediction = model.predict(X)
Please note that the shuffle argument of fit() function for both kerasRegressor and model needs to be False. Moreover, for having the fixed initial state and obtain reproducible results, you need to add these lines of code at the beginning of your script:
session = K.get_session()
init_op = tf.group(tf.tables_initializer(),tf.global_variables_initializer(), tf.local_variables_initializer())
session.run(init_op)
np.random.seed(1)
tf.set_random_seed(1)
you should train model on X_train and y_train
you can not train model on X and y unless you should have extra data for testing
train should be in Train
then test/predict should be on X_test.

Resources