setting up hyper parameters for LSTM - time-series

This is our code can you please tell if LSTM can be used? and what how do we see if prediction is accurate as this code is predicting right the values of csv itself, but unsure about forecasting part. It is forecasting future but unreliably.
This is our data it has missing dates as well. The data ends at 1-Dec-2021
import pandas as pd
import flask
import numpy as np
import keras
import matplotlib.pyplot as plt
import tensorflow as tf
import plotly.graph_objects as go
from keras.preprocessing.sequence import TimeseriesGenerator
filename = "china cotton import concatinated.csv"
df = pd.read_csv(filename)
print(df.info())
df['date'] = pd.to_datetime(df['date'])
#df.set_index(df['date'], inplace=True,)
df.set_axis(df['date'], inplace=True)
df.drop(columns=['CottonChina importFC Index MUS Cents/Lb', 'CottonChina importFC Index LUS Cents/Lb', 'CottonChinadomestic3128BUSCents/Lb', 'CottonChina domestic2227BUS Cents/Lb','CottonChina domestic2129BUS Cents/Lb','CottonChina importUSD1 year = 100','CottonChina domesticUSD1 year = 100'], inplace=True)
close_data = df['CottonChina importFC Index SUS Cents/Lb'].values
close_data = close_data.reshape((-1,1))
split_percent = 0.80
split = int(split_percent*len(close_data))
close_train = close_data[:split]
close_test = close_data[split:]
date_train = df['date'][:split]
date_test = df['date'][split:]
print(len(close_train))
print(len(close_test))
look_back = 15
train_generator = TimeseriesGenerator(close_train, close_train, length=look_back, batch_size=20)
test_generator = TimeseriesGenerator(close_test, close_test, length=look_back, batch_size=1)
from keras.models import Sequential
from keras.layers import LSTM, Dense
model = Sequential()
model.add(
LSTM(10,
activation='relu', return_sequences=True,
input_shape=(look_back,1))
)
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
num_epochs = 25
prediction = model.predict_generator(test_generator)
close_train = close_train.reshape((-1))
close_test = close_test.reshape((-1))
prediction = prediction.reshape((-1))
"""trace1 = go.Scatter(
x = date_train,
y = close_train,
mode = 'lines',
name = 'Data'
)
trace2 = go.Scatter(
x = date_test,
y = prediction,
mode = 'lines',
name = 'Prediction'
)
trace3 = go.Scatter(
x = date_test,
y = close_test,
mode='lines',
name = 'Ground Truth'
)
layout = go.Layout(
title = "Google Stock",
xaxis = {'title' : "Date"},
yaxis = {'title' : "Close"}
) """
"""fig = go.Figure(data=[trace1, trace2, trace3], layout=layout)
fig.show()"""
close_data = close_data.reshape((-1))
def predict(num_prediction, model):
prediction_list = close_data[-look_back:]
for _ in range(num_prediction):
x = prediction_list[-look_back:]
x = x.reshape((1, look_back, 1))
out = model.predict(x)[0][0]
prediction_list = np.append(prediction_list, out)
prediction_list = prediction_list[look_back-1:]
return prediction_list
def predict_dates(num_prediction):
last_date = df['date'].values[-1]
prediction_dates = pd.date_range(last_date, periods=num_prediction+1).tolist()
return prediction_dates
num_prediction = 30
forecast = predict(num_prediction, model)
forecast_dates = predict_dates(num_prediction)
trace1 = go.Scatter(
x = date_train,
y = close_train,
mode = 'lines',
name = 'Data'
)
trace2 = go.Scatter(
x = forecast_dates,
y = forecast,
mode = 'lines',
name = 'Prediction'
)
trace3 = go.Scatter(
x = date_test,
y = close_test,
mode='lines',
name = 'Ground Truth')
layout = go.Layout(
title = "Future Prediction",
xaxis = {'title' : "Date"},
yaxis = {'title' : "Close"}
)
fig = go.Figure(data=[trace1, trace2,trace3], layout=layout)
fig.write_html('first_figure.html',auto_open=True)
This is the graph plotted after ran the code. It has negative values of prices and prices are small as compare to test and train data.

Related

How to deal with big dataset when using pyG?

I am a beginer learning to using torch_geometric to build my GNN models. I refered the sample of the pyG example of node classification and build my own dataset, however, I tried to use my GPU to run the code and it tells me that it run out of memory, maybe my dataset is too large to allocate the GPU memory? I don't know. I shared an machine of 8 A100 with my classmates. Could you please give me some suggestions, thank you!
from torch_geometric.nn import GATConv,GCNConv
from torch_geometric.data import Dataset,DataLoader,HeteroData,Data
import torch.nn as nn
from torch_geometric.nn import DataParallel
from torch_geometric.loader import DataListLoader
import torch.nn.functional as F
import torch
import pandas as pd
from transformers import BertTokenizer,BertModel
import pickle
import time
from tqdm import tqdm
from numba import jit
import json
from torch.optim import lr_scheduler
import matplotlib.pyplot as plt
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "4,5,6,7"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
plt.grid(True)
plt.grid(color='gray',
linestyle='--',
linewidth=1,
alpha=0.3)
begin = time.time()
punctuation = "!#$%&'\(\)-*+,-./:;<=>?#\\\[\]^_`{|}~():;,。【】·、“”‘’《》\"%……——·"
def dataCleanifier(s):
for i in punctuation:
s.replace(i," ")
s = s.replace(" "," ")
s = s.replace("\n","")
return s
class BertClassifier(nn.Module):
def __init__(self,bertType:str,max_length,tag_size):
super(BertClassifier,self).__init__()
self.bertType = bertType
self.tokenizer = BertTokenizer.from_pretrained(self.bertType)
self.encoder = BertModel.from_pretrained(self.bertType)
self.outputDim = self.encoder.pooler.dense.out_features
self.max_length = max_length
self.tag_size = tag_size
self.dropout = nn.Dropout(0.1)
self.activation = nn.LeakyReLU(0.1)
self.convs = nn.ModuleList(
[nn.Conv2d(1, 512, (k, self.outputDim)) for k in (2,3,4)])
self.fc_cnn = nn.Linear(512 * len((2,3,4)), self.tag_size)
def conv_and_pool(self, x, conv):
x = F.relu(conv(x)).squeeze(3)
x = F.max_pool1d(x, x.size(2)).squeeze(2)
return x
def forward(self,x):
x = self.tokenizer.batch_encode_plus(x,return_tensors="pt",max_length=self.max_length,truncation=True,padding="max_length")
attention = x["attention_mask"]
x = x["input_ids"]
x = x.cuda(2)
x = self.encoder(x,attention_mask=attention.cuda(2))['last_hidden_state'][:]
x = x.unsqueeze(1)
encoded = torch.cat([self.conv_and_pool(x,conv) for conv in self.convs],1)
x = self.fc_cnn(encoded)
x = self.activation(x)
# x = F.softmax(x,dim=1)
return x,encoded
class ContrastiveLoss(nn.Module):
def __init__(self):
super(ContrastiveLoss, self).__init__()
def forward(self,representations,label,y_hat):
n = label.shape[0]
T = 0.5
similarity_matrix = F.cosine_similarity(representations.unsqueeze(1), representations.unsqueeze(0), dim=2)
mask = torch.ones_like(similarity_matrix) * (label.expand(n, n).eq(label.expand(n, n).t()))
mask_no_sim = torch.ones_like(mask) - mask
mask_dui_jiao_0 = torch.ones(n ,n) - torch.eye(n, n )
similarity_matrix = torch.exp(similarity_matrix/T)
similarity_matrix = similarity_matrix*mask_dui_jiao_0
sim = mask*similarity_matrix
no_sim = similarity_matrix - sim
no_sim_sum = torch.sum(no_sim , dim=1)
no_sim_sum_expend = no_sim_sum.repeat(n, 1).T
sim_sum = sim + no_sim_sum_expend
loss = torch.div(sim , sim_sum)
loss = mask_no_sim + loss + torch.eye(n, n )
#接下来就是算一个批次中的loss了
loss = -torch.log(loss) #求-log
loss = torch.sum(torch.sum(loss, dim=1) )/(2*n)+nn.CrossEntropyLoss()(y_hat,label)
return loss
class GAT(nn.Module):
def __init__(self, hidden_channels) -> None:
super().__init__()
self.conv1 = GATConv(data.num_features,hidden_channels)
self.conv2 = GATConv(hidden_channels,9)
self.activation = nn.ReLU()
def forward(self,x,edge_index):
x = self.conv1(x,edge_index)
x = self.activation(x)
# print(x)
# x = F.dropout(x,p=0.2)
x = self.conv2(x,edge_index)
return x
x=None
y=None
edge_index = None
train_mask = None
with open("X.pkl","rb") as f1:
x = pickle.load(f1)
with open("Y.pkl","rb") as f2:
y = pickle.load(f2)
y = y.long()
with open("edge_index.pkl","rb") as f3:
edge_index = pickle.load(f3)
# print(edge_index.shape)
with open("train_mask.pkl","rb") as f4:
train_mask = pickle.load(f4)
data = Data(x=x,y=y,edge_index=edge_index)
data.train_mask = train_mask
model = GAT(hidden_channels=32)
model.cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
scheduler = lr_scheduler.StepLR(optimizer, 100, 0.8)
criterion = ContrastiveLoss()
def train():
model.train()
optimizer.zero_grad() # Clear gradients.
out = model(data.x,data.edge_index) # Perform a single forward pass.
loss = criterion(data.x[data.train_mask], data.y[data.train_mask],out[data.train_mask]) # Compute the loss solely based on the training nodes.
loss.backward() # Derive gradients.
optimizer.step() # Update parameters based on gradients.
return loss
def test():
model.eval()
out = model(data.x, data.edge_index)
pred = out.argmax(dim=1) # Use the class with highest probability.
test_correct = pred[data.train_mask] == data.y[data.train_mask] # Check against ground-truth labels.
test_acc = int(test_correct.sum()) / int(data.train_mask.sum()) # Derive ratio of correct predictions.
return test_acc
accs = []
for epoch in range(1, 1025):
loss = train()
print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}',end=" ")
acc = test()
print("acc:",acc)
accs.append(acc)
scheduler.step()
plt.plot(range(len(accs)),accs)
print(time.time()-begin)
with open("./accs_gat_GCL.pkl","wb") as f1:
pickle.dump(accs,f1)
plt.savefig("./res_GAT_GCL.png",dpi=600)
I have tried to use DataPararel to use multiple GPU to load my model and dataset but failed.

I have been training a decoder based transformer for word generation. But it keeps generating the same words over and over again

I have been trying to create a decoder based transformer for text generation and the text its generating is the same no matter the input sequence
The following is my code some of , the code for preprocessing was remove
def process_batch(ds):
ds = tokenizer(ds)
## padd short senteces to max len using the [PAD] id
## add special tokens [START] and [END]
ds_start_end_packer = StartEndPacker(
sequence_length=MAX_SEQUENCE_LENGTH + 1,
start_value = tokenizer.token_to_id("[START]"),
end_value = tokenizer.token_to_id("[END]"),
pad_value = tokenizer.token_to_id("[PAD]")
)
ds = ds_start_end_packer(ds)
return ({"decoder_inputs":ds[:, :-1]}, ds[:, 1:])
def make_ds(seq):
dataset = tf.data.Dataset.from_tensor_slices(seq)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.map(process_batch, num_parallel_calls=tf.data.AUTOTUNE)
return dataset.shuffle(128).prefetch(32).cache()
train_ds = make_ds(train_seq)
val_ds = make_ds(val_seq)
This is the decoder section i was using keras_nlp
It have 2 decoders layers
decoder_inputs = Input(shape=(None,), dtype="int64",
name="decoder_inputs")
x = TokenAndPositionEmbedding(
vocabulary_size= VOCAB_SIZE,
sequence_length = MAX_SEQUENCE_LENGTH,
embedding_dim = EMBED_DIM,
mask_zero =True
)(decoder_inputs)
x = TransformerDecoder(
intermediate_dim = INTERMEDIATE_DIM, num_heads= NUM_HEADS
)(x)
x = TransformerDecoder(
intermediate_dim = INTERMEDIATE_DIM, num_heads= NUM_HEADS
)(x)
x = Dropout(0.5)(x)
decoder_ouput = Dense(VOCAB_SIZE, activation="softmax")(x)
decoder = Model([decoder_inputs],decoder_ouput)
decoder_outputs = decoder([decoder_inputs])
transformer = Model(inputs=decoder_inputs, outputs=decoder_outputs, name="transformer")
#transformer.load_weights("/content/my-drive/MyDrive/projects/Olsen/weights-improvement-07-0.41.hdf5")
transformer.compile("adam",loss="sparse_categorical_crossentropy", metrics=['accuracy'])

Struggling with Normalization values when training model

I am trying to train an adversarial patch located at the bottom left corner of the image to cause a misclassification. Currently, I am using these parameters to normalize the CIFAR10 dataset.
transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.4914,0.4822,0.4465),(0.2023,0.1994,0.201))]
This would result in the images having a maximum and minimum value of around 2.55 and -2.55 respectively. However, I'm not sure how to work with this range when training my patch. I struggle between converting the patch from a range of (0,1) to (-2.55,2.55). Any help is appreciated!
My code for training is below: (I don't think its training properly for now)
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import matplotlib.pyplot as plt
import numpy as np
from torch.autograd import Variable
import torchattacks
import random
import torch.nn.functional as F
dictionary ={
'0':'airplane',
'1':'automobile',
'2':'bird',
'3':'cat',
'4':'deer',
'5':'dog',
'6':'frog',
'7':'horse',
'8':'ship',
'9':'truck',
}
transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.4914,0.4822,0.4465),(0.2023,0.1994,0.201))])
#transform1 = transforms.Compose([transforms.ToTensor()])
normalize = transforms.Normalize((0.4914,0.4822,0.4465),(0.2023,0.1994,0.201))
mean =(0.4914,0.4822,0.4465)
std =(0.2023,0.1994,0.201)
inv_normalize = transforms.Normalize(
mean=[-0.4914/0.2023, -0.4822/0.1994, -0.4465/0.201],
std=[1/0.2023, 1/0.1994, 1/0.201])
batch_size = 1
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
shuffle=False, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=True, num_workers=2)
model = torch.hub.load("chenyaofo/pytorch-cifar-models", "cifar10_resnet20", pretrained=True)
model = model.cuda()
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
patch = np.random.rand(3,32,32)
model.eval()
def mask_generation(mask_type='rectangle', patch = patch, image_size=(3, 7, 7)):
applied_patch = np.zeros(image_size) #0,1
#patch = torch.tensor(patch)
#padding = (3,3,3,3)
#patch = F.pad(patch, padding)
if mask_type == 'rectangle':
rotation_angle = 0
for i in range(patch.shape[0]):
patch[i] = np.rot90(patch[i], rotation_angle)
x_location , y_location = 25,0
for i in range(patch.shape[0]):
applied_patch[:, x_location:x_location + patch.shape[1], y_location:y_location + patch.shape[2]] = patch
mask = applied_patch.copy()
mask[mask != 0] = 1.0
return patch , applied_patch, mask, x_location, y_location , rotation_angle
def patch_attack(image, applied_patch, mask, target, probability_threshold, model, lr, max_iteration):
applied_patch = torch.from_numpy(applied_patch)
mask = torch.from_numpy(mask)
image = inv_normalize(image)
target_probability, count = 0,0
perturbated_image = torch.mul(mask.type(torch.FloatTensor), applied_patch.type(torch.FloatTensor)) + torch.mul((1 - mask.type(torch.FloatTensor)), image.type(torch.FloatTensor))
perturbated_image = normalize(perturbated_image)
while target_probability < probability_threshold and count < max_iteration:
count += 1
# Optimize the patch
perturbated_image = Variable(perturbated_image.data, requires_grad=True)
per_image = perturbated_image.cuda()
output = model(per_image)
target_log_softmax = torch.nn.functional.log_softmax(output, dim=1)[0][target]
target_log_softmax.backward()
patch_grad = perturbated_image.grad.clone().cpu()
applied_patch = (lr * patch_grad) + applied_patch.type(torch.FloatTensor)
applied_patch = torch.clamp(applied_patch,0,1)
perturbated_image.grad.data.zero_()
# Test the patch
perturbated_image = torch.mul(mask.type(torch.FloatTensor), applied_patch.type(torch.FloatTensor)) + torch.mul((1-mask.type(torch.FloatTensor)), image.type(torch.FloatTensor))
perturbated_image = normalize(perturbated_image)
perturbated_image = perturbated_image.cuda()
output = model(perturbated_image)
target_probability = torch.nn.functional.softmax(output, dim=1).data[0][target]
perturbated_image = perturbated_image.detach().cpu().numpy()
applied_patch = applied_patch.cpu().numpy()
return perturbated_image, applied_patch
def test_patch(patch_type, target, patch, test_loader, model):
test_total, test_actual_total, test_success = 0, 0, 0
for (image, label) in test_loader:
test_total += label.shape[0]
assert image.shape[0] == 1, 'Only one picture should be loaded each time.'
image = image.cuda() #-3,3
label = label.cuda()
output = model(image)
_, predicted = torch.max(output.data, 1)
if predicted[0] != label and predicted[0].data.cpu().numpy() != target:
test_actual_total += 1
patch ,applied_patch, mask, x_location, y_location = mask_generation('rectangle', patch, (3, 32, 32))
applied_patch = torch.from_numpy(applied_patch)
mask = torch.from_numpy(mask)
mask = normalize(mask)
applied_patch = normalize(applied_patch)
perturbated_image = torch.mul(mask.type(torch.FloatTensor), applied_patch.type(torch.FloatTensor)) + torch.mul((1 - mask.type(torch.FloatTensor)), image.type(torch.FloatTensor))
perturbated_image = perturbated_image.cuda() #-3,3
output = model(perturbated_image)
_, predicted = torch.max(output.data, 1)
if predicted[0].data.cpu().numpy() == target:
test_success += 1
return test_success / test_actual_total
#training parameters
epochs = 1
target = 0
probability_threshold = 0.99
lr = 1/255
max_iteration = 1
runs = 0
for epoch in range(epochs):
train_total, train_actual_total, train_success = 0, 0, 0
for (image, label) in trainloader:
runs+=1
assert image.shape[0] == 1
image = image.cuda()
label = label.cuda()
train_total += label.shape[0]
output = model(image)
_, predicted = torch.max(output.data, 1)
if predicted[0] != label or predicted[0].data.cpu().numpy() != target:
train_actual_total += 1
patch , applied_patch, mask, x_location, y_location ,rotation_angle = mask_generation('rectangle', patch, (3, 32, 32))
perturbated_image, applied_patch = patch_attack(image, applied_patch, mask, target, probability_threshold, model, lr,max_iteration)
perturbated_image = torch.from_numpy(perturbated_image).cuda()
output = model(perturbated_image)
_, predicted = torch.max(output.data, 1)
if predicted[0].data.cpu().numpy() == target:
train_success += 1
patch = applied_patch[0][:, x_location:x_location + patch.shape[1], y_location:y_location + patch.shape[2]]
patch = np.array(patch)
To convert a number x in the range [0,1] to the range [-2.55,2.55]:
Multiply by size of final range / size of original range or in this case 5.1/1.0.
Add min of final range - min of starting range to the result, so in this case -2.55+0 = 0.

NLP model's accuracy stuck on 0.5098 while training

I built a two layered LSTM model(keras model) for a movie review dataset from kaggle : Dataset
While training the model, every epoch was giving the same accuracy of 0.5098.
Then I thought it might not be learning the long distance dependencies.Then instead of LSTM I used bidirectional LSTM. But, still model's accuracy while training was 0.5098 for every epoch. I trained the model for 8 hours/35 epochs on CPU. Then I stopped training.
Code:
import pandas as pd
from sentiment_utils import *
import keras
import keras.backend as k
import numpy as np
train_data = pd.read_table('train.tsv')
X_train = train_data.iloc[:,2]
Y_train = train_data.iloc[:,3]
from sklearn.preprocessing import OneHotEncoder
Y_train = Y_train.reshape(Y_train.shape[0],1)
ohe = OneHotEncoder(categorical_features=[0])
Y_train = ohe.fit_transform(Y_train).toarray()
maxLen = len(max(X_train, key=len).split())
words_to_index, index_to_words, word_to_vec_map = read_glove_vectors("glove/glove.6B.50d.txt")
m = X_train.shape[0]
def read_glove_vectors(path):
with open(path, encoding='utf8') as f:
words = set()
word_to_vec_map = {}
for line in f:
line = line.strip().split()
cur_word = line[0]
words.add(cur_word)
word_to_vec_map[cur_word] = np.array(line[1:], dtype=np.float64)
i = 1
words_to_index = {}
index_to_words = {}
for w in sorted(words):
words_to_index[w] = i
index_to_words[i] = w
i = i + 1
return words_to_index, index_to_words, word_to_vec_map
def sentance_to_indices(X_train, words_to_index, maxLen, dash_index_list, keys):
m = X_train.shape[0]
X_indices = np.zeros((m, maxLen))
for i in range(m):
if i in dash_index_list:
continue
sentance_words = X_train[i].lower().strip().split()
j = 0
for word in sentance_words:
if word in keys:
X_indices[i, j] = words_to_index[word]
j += 1
return X_indices
def pretrained_embedding_layer(word_to_vec_map, words_to_index):
emb_dim = word_to_vec_map['pen'].shape[0]
vocab_size = len(words_to_index) + 1
emb_matrix = np.zeros((vocab_size, emb_dim))
for word, index in words_to_index.items():
emb_matrix[index, :] = word_to_vec_map[word]
emb_layer= keras.layers.embeddings.Embedding(vocab_size, emb_dim, trainable= False)
emb_layer.build((None,))
emb_layer.set_weights([emb_matrix])
return emb_layer
def get_model(input_shape, word_to_vec_map, words_to_index):
sentance_indices = keras.layers.Input(shape = input_shape, dtype='int32')
embedding_layer = pretrained_embedding_layer(word_to_vec_map, words_to_index)
embeddings = embedding_layer(sentance_indices)
X = keras.layers.Bidirectional(keras.layers.LSTM(128, return_sequences=True))(embeddings)
X = keras.layers.Dropout(0.5)(X)
X = keras.layers.Bidirectional(keras.layers.LSTM(128, return_sequences=True))(X)
X = keras.layers.Dropout(0.5)(X)
X = keras.layers.Bidirectional(keras.layers.LSTM(128, return_sequences=False))(X)
X = keras.layers.Dropout(0.5)(X)
X = keras.layers.Dense(5)(X)
X = keras.layers.Activation('softmax')(X)
model = keras.models.Model(sentance_indices, X)
return model
model = get_model((maxLen,), word_to_vec_map,words_to_index)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
dash_index_list = []
for i in range(m):
if '-' in X_train[i]:
dash_index_list.append(i)
keys = []
for key in word_to_vec_map.keys():
keys.append(key)
X_train_indices = sentance_to_indices(X_train, words_to_index, maxLen, dash_index_list, keys)
model.fit(X_train_indices, Y_train, epochs = 50, batch_size = 32, shuffle=True)
I think the way you defined the model architecture doesn't make sense! Try looking at this example on IMDB movie reviews with LSTM on Keras github repo: Trains an LSTM model on the IMDB sentiment classification task.

Keras saved model predicting different values on different session

I haved trained a named entity recognition model, after saving it and loading it back it is giving correct prediction on the same IPython session, but whenever I close the session and open it again, the loaded model prediction randomly. Can you help me with that?
I have saved the model in hdf5 format using:
Model.save("filename")
And I am loading it using:
Model.load_model("filename")
here is my full code
import pandas as pd
import numpy as np
import os
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
from keras.models import Model, Input,load_model
from keras.layers import LSTM, Embedding, Dense, TimeDistributed, Dropout,
Bidirectional
from nltk import pos_tag, word_tokenize,sent_tokenize
data = pd.read_csv("E:\ml tut\entity recognition\exdataset.csv",
encoding="latin1")
data = data.fillna(method="ffill")
words = list(set(data["Word"].values))
words.append("ENDPAD")
n_words = len(words); n_words
tags = list(set(data["Tag"].values))
n_tags = len(tags); n_tags
class SentenceGetter(object):
def __init__(self, data):
self.n_sent = 1
self.data = data
self.empty = False
agg_func = lambda s: [((w, p), t) for w, p, t in
zip(s["Word"].values.tolist(),s["POS"].values.tolist(),
s["Tag"].values.tolist())]
self.grouped = self.data.groupby("Sentence #").apply(agg_func)
self.sentences = [s for s in self.grouped]
def get_next(self):
try:
s = self.grouped["Sentence: {}".format(self.n_sent)]
self.n_sent += 1
return s
except:
return None
getter = SentenceGetter(data)
sent = getter.get_next()
print(sent)
sentences = getter.sentences
max_len = 50
word2idx = {w: i for i, w in enumerate(words)}
tag2idx = {t: i for i, t in enumerate(tags)}
input = Input(shape=(max_len,))
model = Embedding(input_dim=n_words, output_dim=50, input_length=max_len)
(input)
model = Dropout(0.1)(model)
model = Bidirectional(LSTM(units=100, return_sequences=True,
recurrent_dropout=0.1))(model)
out = TimeDistributed(Dense(n_tags, activation="softmax"))(model)
if os.path.exists('my_model.h5'):
print("loading model")
model = load_model('my_model.h5')
else:
print("training model")
X = [[word2idx[w[0][0]] for w in s] for s in sentences]
X = pad_sequences(maxlen=max_len, sequences=X, padding="post",
value=n_words - 1)
y = [[tag2idx[w[1]] for w in s] for s in sentences]
y = pad_sequences(maxlen=max_len, sequences=y, padding="post",
value=tag2idx["O"])
y = [to_categorical(i, num_classes=n_tags) for i in y]
X_tr, X_te, y_tr, y_te = train_test_split(X, y, test_size=0.1)
model = Model(input, out)
model.compile(optimizer="rmsprop", loss="categorical_crossentropy",
metrics=["accuracy"])
model.fit(X_tr, np.array(y_tr), batch_size=32, epochs=5,
validation_split=0.1, verbose=1)
model.save('my_model.h5')
my_input="Albert Einstein is a great guy,he lives in berlin, Germany."
print("--------------")
test_sentence = word_tokenize(my_input)
x_test_sent = pad_sequences(sequences=[[word2idx.get(w, 0) for w in
test_sentence]],padding="post", value=0, maxlen=max_len)
i = 0
p = model.predict(np.array([x_test_sent[i]]))
p = np.argmax(p, axis=-1)
print("{:15}||{}".format("Word", "Prediction"))
print(30 * "=")
for w, pred in zip(test_sentence, p[0]):
if w != 0:
print("{:15}: {}".format(w, tags[pred]))
please save your tags (tags = list(set(data["Tag"].values))) in pickle while generating your model.. This is will solve your problem.
There fore you need to save the following:
1.tags
2.model
3.word2idx
import pandas as pd
import numpy as np
import os
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
from keras.models import Model, Input,load_model
from keras.layers import LSTM, Embedding, Dense, TimeDistributed, Dropout,
Bidirectional
from nltk import pos_tag, word_tokenize,sent_tokenize
data = pd.read_csv("E:\ml tut\entity recognition\exdataset.csv",
encoding="latin1")
data = data.fillna(method="ffill")
words = list(set(data["Word"].values))
words.append("ENDPAD")
n_words = len(words); n_words
tags = list(set(data["Tag"].values))
save your tags in pickle or any other format
n_tags = len(tags); n_tags
class SentenceGetter(object):
def __init__(self, data):
self.n_sent = 1
self.data = data
self.empty = False
agg_func = lambda s: [((w, p), t) for w, p, t in
zip(s["Word"].values.tolist(),s["POS"].values.tolist(),
s["Tag"].values.tolist())]
self.grouped = self.data.groupby("Sentence #").apply(agg_func)
self.sentences = [s for s in self.grouped]
def get_next(self):
try:
s = self.grouped["Sentence: {}".format(self.n_sent)]
self.n_sent += 1
return s
except:
return None
getter = SentenceGetter(data)
sent = getter.get_next()
print(sent)
sentences = getter.sentences
max_len = 50
word2idx = {w: i for i, w in enumerate(words)}
save your word2idx in pickle or any other format
tag2idx = {t: i for i, t in enumerate(tags)}
input = Input(shape=(max_len,))
model = Embedding(input_dim=n_words, output_dim=50, input_length=max_len)
(input)
model = Dropout(0.1)(model)
model = Bidirectional(LSTM(units=100, return_sequences=True,
recurrent_dropout=0.1))(model)
out = TimeDistributed(Dense(n_tags, activation="softmax"))(model)
if os.path.exists('my_model.h5'):
print("loading model")
model = load_model('my_model.h5')
else:
print("training model")
X = [[word2idx[w[0][0]] for w in s] for s in sentences]
X = pad_sequences(maxlen=max_len, sequences=X, padding="post",
value=n_words - 1)
y = [[tag2idx[w[1]] for w in s] for s in sentences]
y = pad_sequences(maxlen=max_len, sequences=y, padding="post",
value=tag2idx["O"])
y = [to_categorical(i, num_classes=n_tags) for i in y]
X_tr, X_te, y_tr, y_te = train_test_split(X, y, test_size=0.1)
model = Model(input, out)
model.compile(optimizer="rmsprop", loss="categorical_crossentropy",
metrics=["accuracy"])
model.fit(X_tr, np.array(y_tr), batch_size=32, epochs=5,
validation_split=0.1, verbose=1)
model.save('my_model.h5')
my_input="Albert Einstein is a great guy,he lives in berlin, Germany."
print("--------------")
test_sentence = word_tokenize(my_input)
x_test_sent = pad_sequences(sequences=[[word2idx.get(w, 0) for w in
test_sentence]],padding="post", value=0, maxlen=max_len)
i = 0
p = model.predict(np.array([x_test_sent[i]]))
p = np.argmax(p, axis=-1)
print("{:15}||{}".format("Word", "Prediction"))
print(30 * "=")
for w, pred in zip(test_sentence, p[0]):
if w != 0:
print("{:15}: {}".format(w, tags[pred]))

Resources