Please I would love some assistance to plot a confusion matrix from my model. Code displayed below:
import os
import glob
from sklearn.model_selection import train_test_split
import shutil
from tensorflow.keras import callbacks
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from my_utils import create_generators
from CNN_models import amazon_model
import tensorflow as tf
import matplotlib.pyplot as plt
if name=="main":
path_to_train = "data\\train"
path_to_val = "data\\val"
path_to_test = "data\\test"
batch_size = 128
epochs = 5
lr = 0.0001
train_generator, val_generator, test_generator = create_generators(batch_size, path_to_train, path_to_val, path_to_test)
nbr_classes = train_generator.num_classes
TRAIN=True
TEST=False
if TRAIN:
path_to_save_model = './Models'
ckpt_saver = ModelCheckpoint(
path_to_save_model,
monitor="val_accuracy",
mode='max',
save_best_only=True,
save_freq='epoch',
verbose=1
)
early_stop = EarlyStopping(monitor="val_accuracy", patience=5)
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="./logs")
csv_logger = tf.keras.callbacks.CSVLogger('first_model_training.log', separator=",", append=False)
model = amazon_model(nbr_classes)
optimizer = tf.keras.optimizers.Adam(learning_rate=lr, amsgrad=True)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy', tf.keras.metrics.Precision(), tf.keras.metrics.Recall()])
history = model.fit(train_generator,
epochs=epochs,
batch_size=batch_size,
validation_data=val_generator,
callbacks=[ckpt_saver, early_stop, tensorboard_callback, csv_logger]
)
acc = history.history['accuracy']
print(acc)
model.save("first_model.h5")
from matplotlib.pyplot import figure
figure(figsize=(8, 6))
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig('./plots/accuracy', dpi=200)
plt.show()
figure(figsize=(8, 6))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('./plots/loss', dpi=200)
plt.show()
if TEST:
model = tf.keras.models.load_model('./Models')
model.summary()
print("Evaluating validation set: ")
model.evaluate(val_generator)
print("Evaluating test set: ")
model.evaluate(test_generator)
Sorry that it may be a bit of a newbie question but I would love to know what I need to add to the above code to make it plot a confusion matrix for my after it runs.
I'm able to plot the graphs of both accuracy and loss for a few epochs, but I want to include Confusion Matrix before running for more epochs. Here are the plots already obtained:
accuracy plot
loss plot
Try this
The basic concept is to get prediction results from your model using X_test and then to compare these predictions to the real y_test results.
# 'Fake' and 'Real' are your dependent features for your classification use case,
# where Fake == 0 and Real == 1. It is important that you use this form for the matrix.
('Fake', 'Real') == (0, 1)
('Fake', 'Real') == (0, 1)
# Data handling
import pandas as pd
# Exploratory Data Analysis & Visualisation
import matplotlib.pyplot as plt
import seaborn as sns
# Model improvement and Evaluation
from sklearn import metrics
from sklearn.metrics import confusion_matrix
# Plotting confusion matrix
matrix = pd.DataFrame((metrics.confusion_matrix(y_test, y_prediction)),
('Fake', 'Real'),
('Fake', 'Real'))
print(matrix)
# Visualising confusion matrix
plt.figure(figsize = (16,14),facecolor='white')
heatmap = sns.heatmap(matrix, annot = True, annot_kws = {'size': 20}, fmt = 'd', cmap = 'YlGnBu')
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation = 0, ha = 'right', fontsize = 18, weight='bold')
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation = 0, ha = 'right', fontsize = 18, weight='bold')
plt.title('Confusion Matrix\n', fontsize = 18, color = 'darkblue')
plt.ylabel('True label', fontsize = 14)
plt.xlabel('Predicted label', fontsize = 14)
plt.show()
If you need further help email us here: theanalyticsolutions#gmail.com
Related
I am using the Chexpert dataset (found here on kaggle) to build a CNN model that can predict disease conditions (e.g. cardiomegaly, pleural effusion, atelectasis, etc) from chest x-ray image. I am using PyTorch lightning and my code is attached to this question. I have tried several architectures and I don’t seem to get the models to perform well. I performed the overfit test (in which I try to overfit a model on one batch of data) and the models were able to overfit the single batch - showing that they are capable of fitting the data. However, regardless of the architecture I use, there is quite a difference between training loss (which can get as low as 0.2) and validation (which can get as low as 0.49). On sensitivity and precision (the metrics I am interested in), the models perform terribly during validation. After leaving the models for longer epochs, I also observed that the loss values start to increase. I will appreciate any help or suggestion to help me solve this problem. Thank you.
This is my code:
import torch
import torch.nn as nn
import torchvision
import torchvision.models as models
import torchvision.transforms as transforms
import torch.nn.functional as F
import matplotlib.pyplot as plt
import pytorch_lightning as pl
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TensorBoardLogger
from sklearn.metrics import roc_auc_score
from pytorch_lightning.trainer.states import RunningStage, TrainerFn, TrainerState, TrainerStatus
import numpy as np
import time
import pandas as pd
import gc
import random
from chexpertloader import ChestXrayDataset
# from ipykernel import kernelapp as app
from torch.utils.tensorboard import SummaryWriter
import torchmetrics
from torchmetrics import AUROC
from pytorch_lightning.callbacks import LearningRateMonitor
from pytorch_lightning.callbacks import ModelCheckpoint
from torch.optim.lr_scheduler import ReduceLROnPlateau
from sklearn.metrics import confusion_matrix
seed = 123
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
backbones = {
'efficientnetb0': models.efficientnet_b0(weights='IMAGENET1K_V1'),
'efficientnetb1': models.efficientnet_b1(weights='IMAGENET1K_V1'),
'efficientnetb2': models.efficientnet_b2(weights='IMAGENET1K_V1'),
'efficientnetb3': models.efficientnet_b3(weights='IMAGENET1K_V1'),
'efficientnetb4': models.efficientnet_b4(weights='IMAGENET1K_V1'),
'efficientnetb5': models.efficientnet_b5(weights='IMAGENET1K_V1'),
'efficientnetb6': models.efficientnet_b6(weights='IMAGENET1K_V1'),
'efficientnetb7': models.efficientnet_b7(weights='IMAGENET1K_V1'),
'densenet121': models.densenet121(weights='IMAGENET1K_V1'),
'densenet161': models.densenet161(weights='IMAGENET1K_V1'),
'densenet169': models.densenet169(weights='IMAGENET1K_V1'),
'densenet201': models.densenet201(weights='IMAGENET1K_V1'),
'resnet50': models.resnet50(weights='IMAGENET1K_V1'),
'efficientnetV2_m': models.efficientnet_v2_m(weights='IMAGENET1K_V1')
}
class LitEfficientNet(pl.LightningModule):
def __init__(self, arch, num_classes, lr):
super(LitEfficientNet, self).__init__()
self.arch = arch
self.lr = lr
self.sizes = {
'efficientnetb0': (256, 224), 'efficientnetb1': (256, 240), 'efficientnetb2': (288, 288), 'efficientnetb3': (320, 300),
'efficientnetb4': (384, 380), 'efficientnetb5': (489, 456), 'efficientnetb6': (561, 528), 'efficientnetb7': (633, 600),
'densenet121':(256,256), 'densenet161':(256,256), 'densenet169':(256,256), 'densenet201':(256,256),
'resnet50':(224,224), 'efficientnetV2_m':(384,384)
}
self.batch_sizes = {
'efficientnetb0': 64, 'efficientnetb1': 64, 'efficientnetb2': 64, 'efficientnetb3': 32,
'efficientnetb4': 20, 'efficientnetb5': 7, 'efficientnetb6': 5, 'efficientnetb7': 2,
'densenet121':64, 'densenet161':32, 'densenet169':32, 'densenet201':32, 'resnet50':32,
'efficientnetV2_m':16
}
self.model = backbones[arch]
if 'densenet' in arch:
self.model.classifier = nn.Sequential(
nn.Linear(self.model.classifier.in_features, 2048),
nn.ReLU(),
nn.Dropout(p=0.6),
nn.Linear(2048, 512),
nn.ReLU(),
nn.Dropout(p=0.2),
nn.Linear(512, num_classes),
)
elif 'resnet' in arch:
self.model.fc = nn.Linear(self.model.fc.in_features, num_classes)
elif 'efficientnet' in arch:
self.model.classifier = nn.Sequential(
nn.Dropout(p=self.model.classifier[0].p, inplace=True),
nn.Linear(self.model.classifier[1].in_features, num_classes),
)
def forward(self, x):
y_pred = self.model(x)
return y_pred
def training_step(self, batch, batch_idx):
images, labels = batch
# Forward pass
m = nn.Sigmoid()
outputs = self.model(images)
classes = {0:'Cardiomegaly', 1:'Edema', 2:'Atelectasis',
3:'Pleural Effuion', 4:'Lung Opacity'
}
Loss = nn.BCEWithLogitsLoss()
loss = Loss(outputs, labels)
self.log('train_loss', loss, sync_dist=True)
return loss
def train_dataloader(self):
train_csv = pd.read_csv('CheXpert-v1.0-small/train.csv')
train_csv.fillna(0, inplace=True)
train_dataset = ChestXrayDataset("CheXpert-v1.0-small/train", train_csv, self.sizes[self.arch], True)
# Data loader
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset, batch_size=self.batch_sizes[self.arch], num_workers=8, shuffle=False
)
return train_loader
def validation_step(self, batch, batch_idx):
images, labels = batch
images = images
m = nn.Sigmoid()
outputs = self.model(images)
classes = {0:'Cardiomegaly', 1:'Edema', 2:'Atelectasis',
3:'Pleural Effuion', 4:'Lung Opacity'
}
Loss = nn.BCEWithLogitsLoss()
loss = Loss(outputs, labels)
self.log('val_loss', loss, sync_dist=True)
tensorboard_logs = {'val_loss': loss}
return loss
def val_dataloader(self):
validation_csv = pd.read_csv('CheXpert-v1.0-small/valid.csv')
validation_csv.fillna(0, inplace=True)
validation_csv = validation_csv.sample(frac=1)
validation_dataset = ChestXrayDataset("CheXpert-v1.0-small/valid", validation_csv, self.sizes[self.arch], True)
# Data loader
validation_loader = torch.utils.data.DataLoader(
dataset=validation_dataset, batch_size=self.batch_sizes[self.arch], num_workers=8, shuffle=False
)
return validation_loader
def configure_optimizers(self):
optimizer = optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
return optimizer
if __name__ == '__main__':
archs = ['efficientnetV2_m']
learning_rates = [0.001]
num_classes = 5
for i in range(len(learning_rates)):
arch = archs[0]
learning_rate = learning_rates[i]
logger = TensorBoardLogger(f"tb_logs_binary/{arch}",name=f"{arch}_{learning_rate}_ppv_npv_sensitive")
model = LitEfficientNet(arch,num_classes, learning_rate)
trainer = Trainer(
log_every_n_steps=1411,
logger=logger,
accelerator='gpu',
devices=-1,
# devices=1,
# overfit_batches=10,
max_epochs=50,
val_check_interval=0.1,
deterministic=True,
fast_dev_run=False)
trainer.fit(model)
del model, trainer
gc.collect()
How to go about plotting the decision boundaries for a Random Forest analysis with 10 classes?
I get the error:
ValueError: X has 2 features, but RandomForestClassifier is expecting
240 features as input.
Can you help me get the decision boundaries for the 10 classes if possible? Thanks for your time!
Here is my code:
from sklearn.datasets import make_classification
import seaborn as sns
import numpy as np
from matplotlib import pyplot as plt
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
f, (ax1,ax2) = plt.subplots(nrows=1, ncols=2,figsize=(20,8))
# Generate noisy Data
num_trainsamples = 500
num_testsamples = 50
X_train,y_train = make_classification(n_samples=num_trainsamples,
n_features=240,
n_informative=9,
n_redundant=0,
n_repeated=0,
n_classes=10,
n_clusters_per_class=1,
class_sep=9,
flip_y=0.2,
#weights=[0.5,0.5],
random_state=17)
X_test,y_test = make_classification(n_samples=50,
n_features=num_testsamples,
n_informative=9,
n_redundant=0,
n_repeated=0,
n_classes=10,
n_clusters_per_class=1,
class_sep=10,
flip_y=0.2,
#weights=[0.5,0.5],
random_state=17)
model = RandomForestClassifier()
parameter_space = {
'n_estimators': [10,50,100],
'criterion': ['gini', 'entropy'],
'max_depth': np.linspace(10,50,11),
}
clf = GridSearchCV(model, parameter_space, cv = 5, scoring = "accuracy", verbose = True) # model
my_model = clf.fit(X_train, y_train)
# define bounds of the domain
min1, max1 = X_train[:, 0].min()-1, X_train[:, 0].max()+1
min2, max2 = X_train[:, 1].min()-1, X_train[:, 1].max()+1
# define the x and y scale
x1grid = np.arange(min1, max1, 0.1)
x2grid = np.arange(min2, max2, 0.1)
# create all of the lines and rows of the grid
xx, yy = np.meshgrid(x1grid, x2grid)
# flatten each grid to a vector
r1, r2 = xx.flatten(), yy.flatten()
r1, r2 = r1.reshape((len(r1), 1)), r2.reshape((len(r2), 1))
# horizontal stack vectors to create x1,x2 input for the model
grid = np.hstack((r1,r2))
yhat = clf.predict(grid)
# reshape the predictions back into a grid
zz = yhat.reshape(xx.shape)
# plot the grid of x, y and z values as a surface
plt.contourf(xx, yy, zz, cmap='Paired')
# create scatter plot for samples from each class
for class_value in range(2):
# get row indexes for samples with this class
row_ix = np.where(y == class_value)
# create scatter of these samples
plt.scatter(X_train[row_ix, 0], X_train[row_ix, 1], cmap='Paired')
I am trying to design a LSTM model for forecasting price movement.
I have issues regarding the results I obtain for my predictions. I did not normalize my target set y (nor train nor test), only X because it's a classification (-1,0,1) but the predictions I obtain are float.
Maybe I did not normalize the righ sets. My code is below :
Many thanks for you help and feel free to add comments other my other lines of code too I am a beginner.
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from datetime import datetime as dt
from pandas_datareader import data as pdr
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.layers import LSTM
startdate=dt(2018,3,31)
enddate=dt(2022,3,31)
tickers = ['ETH-USD']
Data=pdr.get_data_yahoo(tickers,start=startdate, end=enddate)['Adj Close']
df_change = Data.apply(lambda x: np.log(x) - np.log(x.shift(1)))
df_change.drop(index=df_change.index[0], axis=0, inplace=True)
df_change = df_change*100
pd.options.mode.chained_assignment = None #to not display the error of copy dataframe
df_y = df_change.copy()
df_y.columns = ['ETH-y']
def Target(df,column,df2,column2):
for i in range(len(df)):
if df[column].iloc[i] > 0:
df2[column2][i] = 1 #value is up par rapport au jour d'avant
elif -0.5 < df[column].iloc[i] < 0.5 :
df2[column2][i] = 0 #value is steady
else:
df2[column2][i] = -1 #value is down
Target(df_change,'ETH-USD',df_y,'ETH-y')
print(df_y['ETH-y'].value_counts())
Data.drop(index=Data.index[0], axis=0, inplace=True) #drop first row to have same values
X = Data
y = df_y
## split my train val and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1, stratify = y)
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler().fit(X_train)
X_train = sc.fit_transform(X_train)
X_test = sc.fit_transform(X_test)
#reshaping for 3D array
X_train = np.reshape(X_train,(1169,1,1))
X_test = np.reshape(X_test,(293,1,1))
from keras.models import Sequential
from keras.layers import Dense, LSTM
model = Sequential()
model.add(LSTM(64, activation='relu', input_shape=(X_train.shape[1], X_train.shape[2]), return_sequences=True))
model.add(LSTM(32, activation='relu', return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(y_train.shape[1]))
model.compile(optimizer='adam', loss='mse')
model.summary()
history = model.fit(X_train, y_train, epochs=10, batch_size=16, validation_split=0.1, verbose=1)
pred = model.predict(X_test)
pred = sc.inverse_transform(pred)
plt.plot(history.history['loss'], label='Training loss')
plt.plot(history.history['val_loss'], label='Validation loss')
plt.legend()
i'm trying to capture long-term dependencies using LSTM, by creating a unit pulse signal every 62 points.
The idea is to go back 62 time-steps and copy the value for the next time-step, so as to predict the pulse, but lstm is not doing this...
import sys
import os
import numpy as np
import math
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from keras.models import Sequential, load_model
from keras.layers import LSTM, Dense, Flatten, Dropout
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras import backend as K
import tensorflow as tf
from tensorflow.python.client import device_lib
K.clear_session() #pulire eventuali sessioni precedenti (cosi i nomi dei layer ripartono da 0)
print(K.tensorflow_backend._get_available_gpus())
print(device_lib.list_local_devices())
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
config = tf.ConfigProto( device_count = {'GPU': 1 , 'CPU': 4} )
sess = tf.Session(config=config)
K.set_session(sess)
# hyper-parametri
params = {
"batch_size": 20,
"epochs": 1000,
"time_steps": 70,
}
OUTPUT_PATH = "/home/..."
TIME_STEPS = params["time_steps"]
BATCH_SIZE = params["batch_size"]
def generate_impulse(dim):
arr = np.zeros(dim)
frequency = 62
for i in range(0, len(arr)):
if i % frequency == 0:
arr[i] = 1
return arr
y = generate_impulse(1300)
plt.figure(figsize=(20,5))
plt.plot(y)
plt.title('unit impulse')
plt.ylabel('y')
plt.xlabel('x')
plt.show()
dataset
def create_timeseries(arr):
# Costruzione time series univariata, predict di un single-step.
# Prende i primi TIME_STEPS valori come input e calcola il sin del valore TIME_STEPS+1
dim_0 = len(arr) - TIME_STEPS
x = np.zeros((dim_0, TIME_STEPS))
y = np.zeros((dim_0,))
for i in range(dim_0):
x[i] = arr[i:TIME_STEPS+i] #TIME_STEPS+i non compreso
y[i] = arr[TIME_STEPS+i]
#print(x[i], y[i])
print("length of time-series i/o",x.shape,y.shape)
return x, y
x_ts, y_ts = create_timeseries(y)
len_train = int(len(x_ts)*80/100)
len_val = int(len(x_ts)*10/100)
#DATASET DI TRAINING: 80%
x_train = x_ts[0:len_train]
y_train = y_ts[0:len_train]
#DATASET DI VALIDATION: 10%
x_val = x_ts[len_train:len_train+len_val]
y_val = y_ts[len_train:len_train+len_val]
#DATASET DI TEST 10%
x_test = x_ts[len_train+len_val:]
y_test = y_ts[len_train+len_val:]
x_train =x_train.reshape((x_train.shape[0], x_train.shape[1], 1))
x_val =x_val.reshape((x_val.shape[0], x_val.shape[1], 1))
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], 1)
def create_model():
model = Sequential()
model.add(LSTM(1, input_shape=(TIME_STEPS, 1)))
model.compile(optimizer='adam', loss='mse')
return model
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1,
patience=50, min_delta=0.0001)
model = create_model()
history = model.fit(x_train, y_train, epochs=params["epochs"], verbose=2, batch_size=BATCH_SIZE, shuffle=False,
validation_data=(x_val, y_val), callbacks=[es])
plt.figure()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('MSE LOSS')
plt.ylabel('Loss')
plt.xlabel('Epochs')
plt.legend(['Train', 'Validation'], loc='upper left')
plt.show()
mse loss
y_pred = model.predict(x_test, batch_size=BATCH_SIZE)
y_pred = y_pred.flatten()
error = mean_squared_error(y_test, y_pred)
plt.figure(figsize=(20,5))
plt.plot(y_pred)
plt.plot(y_test)
plt.title('PREDICTION ON TEST SET')
plt.ylabel('sin(x)')
plt.xlabel('x')
plt.legend(['Prediction', 'Real'], loc='upper left')
plt.show()
prediction test set
Training set give me the same results (it is the same signal..). I tried others LSTM models with more neurons but it doesn't work anyway.
You might consider training for more epochs. I created a simplified model and training set based on what I believe is the core of your idea:
from keras.models import Sequential
from keras.layers import LSTM
import numpy as np
TIME_STEPS=10
x_train = np.array([ [ [1],[0],[0],[0],[0],[0],[0],[0],[0],[0] ],
[ [0],[1],[0],[0],[0],[0],[0],[0],[0],[0] ],
[ [0],[0],[1],[0],[0],[0],[0],[0],[0],[0] ],
[ [0],[0],[0],[1],[0],[0],[0],[0],[0],[0] ],
[ [0],[0],[0],[0],[1],[0],[0],[0],[0],[0] ],
[ [0],[0],[0],[0],[0],[1],[0],[0],[0],[0] ],
[ [0],[0],[0],[0],[0],[0],[1],[0],[0],[0] ],
[ [0],[0],[0],[0],[0],[0],[0],[1],[0],[0] ],
[ [0],[0],[0],[0],[0],[0],[0],[0],[1],[0] ],
[ [0],[0],[0],[0],[0],[0],[0],[0],[0],[1] ]])
y_train = np.array([[1],[0],[0],[0],[0],[0],[0],[0],[0],[0]])
print(x_train.shape)
print(y_train.shape)
model = Sequential()
model.add(LSTM(1, input_shape=(TIME_STEPS,1)))
model.compile(optimizer='adam', loss='mse', metrics=['mse'])
model.fit(x_train, y_train, epochs=10000, verbose=0)
After training, I get the following predictions:
model.predict(x_train)
array([[ 0.9870746 ],
[ 0.00665453],
[-0.00303702],
[ 0.00697759],
[-0.02432432],
[-0.00701594],
[ 0.01387464],
[ 0.02281112],
[ 0.00439195],
[-0.04109564]], dtype=float32)
I'm not sure if it solves your problem completely, but it might give you a suggested direction to investigate. I hope this helps.
I have plotted few ROC curve to calculate the AUC. I am having ROC curve is actually doesn't plots like a curve. I have attached the images for better understanding. If any one can tell me what is wrong in there. I will be obliged. This is one kind of plot I am getting
This is the another type
However I am not getting a curve like this one.
This is the link to my dataset
https://drive.google.com/open?id=1luj8d863_IOA36cQTo772GEWgUsrXlbJ
I will thankful if anyone can help me understand the problem if any or if my curves are correct then why it is not actually in a curve like structure
import numpy as np
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.decomposition import PCA
from keras.layers import Dense, Input
from keras.wrappers.scikit_learn import KerasClassifier
from keras.models import Model,Sequential
from keras.utils import np_utils
from sklearn.model_selection import train_test_split, cross_val_score, KFold
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler
from itertools import cycle
from sklearn.metrics import roc_curve, auc
from imblearn.over_sampling import SMOTE
seed = 7
np.random.seed(seed)
dataset = pd.read_csv('dataset/prostate.csv')
labels = dataset.values[:,-1]
features_set = dataset.iloc[:,0:12600]
oversampler = SMOTE(random_state=0)
oversampler_feature_set, oversampler_labels = oversampler.fit_sample(features_set,labels)
feature_df = pd.DataFrame(oversampler_feature_set)
labels_df = pd.DataFrame(oversampler_labels)
scalar = MinMaxScaler()
scaled_data = scalar.fit_transform(feature_df)
pca = PCA(n_components=30)
pca_data = pd.DataFrame(pca.fit_transform(scaled_data))
recreated_df = pd.concat([pca_data,labels_df], axis=1)
train, test = train_test_split(recreated_df,test_size=0.2)
X_train = train.values[:,0:30]
Y_train = train.values[:,-1]
X_test = test.values[:,0:30]
y_test = test.values[:,-1]
def my_model():
model = Sequential()
model.add(Dense(20, input_dim=30,activation='sigmoid'))
model.add(Dense(10, activation='sigmoid'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam',metrics=['accuracy'])
return model
estimator = KerasClassifier(build_fn=my_model, epochs=1000, batch_size=10, shuffle=True,verbose=1)
kfold = KFold(n_splits=10, shuffle=True, random_state=seed)
results = cross_val_score(estimator,X_train,Y_train, cv=kfold)
results.mean()
estimator.fit(X_train,Y_train)
y_pred = estimator.predict(X_test).ravel()
sensitivity, specificity, thresholds_keras = roc_curve(y_test,y_pred,pos_label=2)
auc_keras = auc(sensitivity,specificity)
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(sensitivity, specificity, label='Keras (area =:.3f})'.format(auc_keras))
plt.xlabel('Specificity')
plt.ylabel('Sensitivity')
plt.title('Prostate')
plt.legend(loc='best')
plt.show()