Using torchtext for inference - machine-learning

I wonder what is the right way to use torchtext for inference.
Let's assume I've trained the model and dump all Fields with built vocabularies. It seems the next step is to use torchtext.data.Example to load one single example. Somehow I should numeralize it by using loaded Fields and create an Iterator.
I would appreciate any simple examples of using torchtext for inference.

For a trained model and vocabulary (which is part of the text field , you don't have to save the whole class) :
def read_vocab(path):
#read vocabulary pkl
import pickle
pkl_file = open(path, 'rb')
vocab = pickle.load(pkl_file)
pkl_file.close()
return vocab
def load_model_and_vocab():
import torch
import os.path
my_path = os.path.abspath(os.path.dirname(__file__))
vocab_path = os.path.join(my_path, vocab_file)
weights_path = os.path.join(my_path, WEIGHTS)
vocab = read_vocab(vocab_path)
model = classifier(vocab_size=len(vocab))
model.load_state_dict(torch.load(weights_path))
model.eval()
return model, vocab
def predict(model, vocab, sentence):
tokenized = [w.text.lower() for w in nlp(sentence)] # tokenize the sentence
indexed = [vocab.stoi[t] for t in tokenized] # convert to integer sequence
length = [len(indexed)] # compute no. of words
tensor = torch.LongTensor(indexed).to('cpu') # convert to tensor
tensor = tensor.unsqueeze(1).T # reshape in form of batch,no. of words
length_tensor = torch.LongTensor(length) # convert to tensor
prediction = model(tensor, length_tensor) # prediction
return round(1-prediction.item())
"classifier" is the class I defined for my model.
For saving the vocabulary pkl :
def save_vocab(vocab):
import pickle
output = open('vocab.pkl', 'wb')
pickle.dump(vocab, output)
output.close()
And for saving the model after training you can use :
torch.save(model.state_dict(), 'saved_weights.pt')
Tell me if it worked for you!

Related

Integrate the ImageDataGenerator in own customized fit_generator

I want to fit a Siamese CNN with multiple inputs that are stored in my memory and no label (just an arbitrary dummy label). Therefore, I had to write my own data_generator function for using a CNN model in Keras.
My data generator is of the following form
class DataGenerator(keras.utils.Sequence):
def __init__(self, train_data, train_triplets, batch_size=32, dim=(128,128), n_channels=3, shuffle=True):
self.dim = dim
self.batch_size = batch_size
#Added
self.train_data = train_data
self.train_triplets = train_triplets
self.n_channels = n_channels
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
n_row = self.train_triplets.shape[0]
return int(np.floor(n_row / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
#print(index)
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
list_IDs_temp = self.train_triplets.iloc[indexes,]
# Generate data
[anchor, positive, negative] = self.__data_generation(list_IDs_temp)
y_train = np.random.randint(2, size=(1,2,self.batch_size)).T
return [anchor,positive, negative], y_train
def on_epoch_end(self):
'Updates indexes after each epoch'
n_row = self.train_triplets.shape[0]
self.indexes = np.arange(n_row)
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, list_IDs_temp):
'Generates data containing batch_size samples'
# anchor positive and negatives: (n_samples, *dim, n_channels)
# Initialization
anchor = np.zeros((self.batch_size,*self.dim,self.n_channels))
positive = np.zeros((self.batch_size,*self.dim,self.n_channels))
negative = np.zeros((self.batch_size,*self.dim,self.n_channels))
nrow_temp = list_IDs_temp.shape[0]
# Generate data
for i in range(nrow_temp):
list_ind = list_IDs_temp.iloc[i,]
anchor[i] = self.train_data[list_ind[0]]
positive[i] = self.train_data[list_ind[1]]
negative[i] = self.train_data[list_ind[2]]
return [anchor, positive, negative]
where train_data is a list of all images and train triplets a data frame containing image indices to create my inputs containing of a triplet of images.
Now, I want to do some data augmenting for each mini batch supplied to my CNN. I have tried to integrate the ImageDataGenarator of Keras but I couldn't implement it in my code. Is it somehow possible to do it ? I am not very experienced with python and would appreciate any help.
Does this article answer your question?
To put it in a nutshell, Kera's ImageDataGenerator lacks flexibility when it comes to personalized batch generators, and the easiest way to still use data augmentation is simply to switch to another data augmentation tool (like the albumentations library described in the previous article, but you could also use imgaug as well).
I just want to warn you that I encountered several issues with albumentations (that I described in this question on GitHub, but for now I still have had no answers), so maybe using imgaug is a better idea.
Hope this helps, good luck with your model !

unable to get any entity after training a blank spacy model

I have been working on a legal data to recognize the custom entities within a legal document. I am training empty spacy model on my cleaned training dataset(JSON format as mentioned on spacy documentation). I have tried to remove punctuation, special character, brackets etc from the training dataset. I have labeled the new entity by name 'dictkey' and mentioned the startindex and endindex in it's JSON in order to create the training dataset.
Below is the links for training dataset and the code which i am using. Can you please have a look on the attached training dataset if there is any issue or further cleaning required for this dataset?
https://techmailer.online/TRAIN_DATA3json.txt
https://techmailer.online/train_ner%20-%20Copy.py
from __future__ import unicode_literals, print_function
import plac
import random
from pathlib import Path
import spacy
from spacy.util import minibatch, compounding
import re
#import createtrainingdataset_updated_v2 as traindataset
# training data
#TRAIN_DATA = [
# ("Who is Shaka Khan?", {"entities": [(7, 17, "PERSON")]}),
# ("I like London and Berlin.", {"entities": [(7, 13, "LOC"), (18, 24, "LOC")]}),
#]
with open('/Users/modis1/Desktop/24-01/TRAIN_DATA3json.txt', 'r', encoding='utf-8') as openfile:
TRAIN_DATA = openfile.read()
#plac.annotations(
model=("Model name. Defaults to blank 'en' model.", "option", "m", str),
output_dir=("Optional output directory", "option", "o", Path),
n_iter=("Number of training iterations", "option", "n", int),
)
def main(model=None, output_dir='/Users/A-GUPTA50/Desktop/ShubhamModi/NewSavedModel/', n_iter=100):
"""Load the model, set up the pipeline and train the entity recognizer."""
if model is not None:
nlp = spacy.load(model) # load existing spaCy model
print("Loaded model '%s'" % model)
else:
nlp = spacy.blank("en") # create blank Language class
print("Created blank 'en' model")
# create the built-in pipeline components and add them to the pipeline
# nlp.create_pipe works for built-ins that are registered with spaCy
if "ner" not in nlp.pipe_names:
ner = nlp.create_pipe("ner")
nlp.add_pipe(ner, last=True)
# otherwise, get it so we can add labels
else:
ner = nlp.get_pipe("ner")
# add labels
for _, annotations in TRAIN_DATA:
for ent in annotations.get("entities"):
ner.add_label(ent[2])
# get names of other pipes to disable them during training
pipe_exceptions = ["ner", "trf_wordpiecer", "trf_tok2vec"]
other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions]
with nlp.disable_pipes(*other_pipes): # only train NER
# reset and initialize the weights randomly – but only if we're
# training a new model
if model is None:
nlp.begin_training()
for itn in range(n_iter):
random.shuffle(TRAIN_DATA)
losses = {}
# batch up the examples using spaCy's minibatch
batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001))
for batch in batches:
texts, annotations = zip(*batch)
nlp.update(
texts, # batch of texts
annotations, # batch of annotations
drop=0.5, # dropout - make it harder to memorise data
losses=losses,
)
print("Losses", losses)
# test the trained model
for text, _ in TRAIN_DATA:
# text = re.sub(r'\\u\d{4,}','', text.rstrip())
doc = nlp(text)
print("Entities", [(ent.text, ent.label_) for ent in doc.ents])
print("Tokens", [(t.text, t.ent_type_, t.ent_iob) for t in doc])
# save model to output directory
if output_dir is not None:
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir()
nlp.to_disk(output_dir)
print("Saved model to", output_dir)
# test the saved model
print("Loading from", output_dir)
nlp2 = spacy.load(output_dir)
for text, _ in TRAIN_DATA:
doc = nlp2(text)
print("Entities", [(ent.text, ent.label_) for ent in doc.ents])
print("Tokens", [(t.text, t.ent_type_, t.ent_iob) for t in doc])
if __name__ == "__main__":
plac.call(main)

How to get feature_names after encoding text avg_word to vec?

I am performing analysis on donor_choose data-set.Created a glove words file for essays and encoded essays using average word to vec .
Now, i want to get feature_names.How to perform that?
I performed BOW on essays and extracted feature names too.But couldn't perform the same with average word to vec.I extracted features fro BOW through model and get_feature_names().But,how to apply the same on average word to vec,where we are not using any model but the vector of that word.
"""Encoding Essay- Bow"""
vectorizer = CountVectorizer()
vectorizer.fit(essay_train)
clean_essay_bow_X_train = vectorizer.transform(essay_train)
clean_essay_bow_X_test = vectorizer.transform(essay_test)
for i in vectorizer.get_feature_names():
feature_names_bow.append(i)
"""Encoding Essay- avgw2v"""
import pickle
with open('glove_vectors', 'rb') as f:
model = pickle.load(f)
glove_words = set(model.keys())
def avgvectorizer(data):
avw2v_data = []
for sentance in tqdm(data.values):
vector=np.zeros(300)
cnt_words=0;
for word in sentance.split():
if word in glove_words:
vector+=model[word]
cnt_words+=1
if cnt_words!=0:
vector/=cnt_words
avw2v_data.append(vector)
return avw2v_data

How do I convert my machine learning model in a rest node api

This is my code, how can I convert the model into a node rest api. I have created the traning set and saved the model. Can anyone help me with the api part I have tried but was not successful.
training = []
output = []
create an empty array for our output
output_empty = [0] * len(classes)
training set, bag of words for each sentence
for doc in documents:
# initialize our bag of words
bag = []
# list of tokenized words for the pattern
pattern_words = doc[0]
# stem each word
pattern_words = [stemmer.stem(word.lower()) for word in pattern_words]
# create our bag of words array
for w in words:
bag.append(1) if w in pattern_words else bag.append(0)
# output is a '0' for each tag and '1' for current tag
output_row = list(output_empty)
output_row[classes.index(doc[1])] = 1
training.append([bag, output_row])
# shuffle our features and turn into np.array
random.shuffle(training)training = []
output = []
# create an empty array for our output
output_empty = [0] * len(classes)
# training set, bag of words for each sentence
for doc in documents:
# initialize our bag of words
bag = []
# list of tokenized words for the pattern
pattern_words = doc[0]
# stem each word
pattern_words = [stemmer.stem(word.lower()) for word in pattern_words]
# create our bag of words array
for w in words:
bag.append(1) if w in pattern_words else bag.append(0)
# output is a '0' for each tag and '1' for current tag
output_row = list(output_empty)
output_row[classes.index(doc[1])] = 1
training.append([bag, output_row])
# shuffle our features and turn into np.array
random.shuffle(training)
training = np.array(training)
# create train and test lists
train_x = list(training[:,0])
train_y = list(training[:,1])
training = np.array(training)
# create train and test lists
train_x = list(training[:,0])
train_y = list(training[:,1])
tf.reset_default_graph()
# Build neural network
net = tflearn.input_data(shape=[None, len(train_x[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(train_y[0]), activation='softmax')
net = tflearn.regression(net)
Define model and setup tensorboard
model = tflearn.DNN(net, tensorboard_dir='tflearn_logs')
# Start training (apply gradient descent algorithm)
model.fit(train_x, train_y, n_epoch=4000, batch_size=8, show_metric=True)
saving the model
model.save('model.tflearn')
# save all of our data structures
import pickle
pickle.dump( {'words':words, 'classes':classes, 'train_x':train_x, 'train_y':train_y}, open( "training_data", "wb" ) )
import pickle
data = pickle.load( open( "training_data", "rb" ) )
words = data['words']
classes = data['classes']
train_x = data['train_x']
train_y = data['train_y']
# import our chat-bot intents file
import json
with open('D:\\android\\ad.json') as json_data:
intents = json.load(json_data)
def clean_up_sentence(sentence):
# tokenize the pattern
sentence_words = nltk.word_tokenize(sentence)
# stem each word
sentence_words = [stemmer.stem(word.lower()) for word in sentence_words]
return sentence_words
# return bag of words array: 0 or 1 for each word in the bag that exists in the sentence
def bow(sentence, words, show_details=False):
# tokenize the pattern
sentence_words = clean_up_sentence(sentence)
# bag of words
bag = [0]*len(words)
for s in sentence_words:
for i,w in enumerate(words):
if w == s:
bag[i] = 1
if show_details:
print ("found in bag: %s" % w)
return(np.array(bag))
ERROR_THRESHOLD = 0.25
classifying the inputs
def classify(sentence):
# generate probabilities from the model
results = model.predict([bow(sentence, words)])[0]
# filter out predictions below a threshold
results = [[i,r] for i,r in enumerate(results) if r>ERROR_THRESHOLD]
# sort by strength of probability
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append((classes[r[0]], r[1]))
# return tuple of intent and probability
return return_list
def response(sentence, userID='123', show_details=False):
results = classify(sentence)
# if we have a classification then find the matching intent tag
if results:
# loop as long as there are matches to process
while results:
for i in intents['intents']:
# find a tag matching the first result
if i['tag'] == results[0][0]:
# a random response from the intent
return print(random.choice(i['response']))
There are multiple ways you can do this. You can use Server frameworks like Flask or Django. I will be presenting a simple example using flask : (note this is just an abstract prototype)
Create a model class
import libraries
class Model ():
def __init__(self):
self.model = load()
def inference(self, inpts):
return self.model.predict(inputs)
Note, this is just a prototype, the functions are implemented by you.
Create a REST endpoint
from flask import Flask, request, jsonify
from model import Model
app = Flask("__main__")
model = Model()
#app.route("/inference", methods =["POST"])
def inference():
data = request.get_json()
results = model.inference(data["inputs"])
return jsonify(
{"result" : results }
)
Then you can use curl to test the endpoint, and you can alse axios or fetch to send post request to the endpoint. don't forget to add cors if you are trying on same domains.
Thank you
According to Tflearn documentation, the library remains compatible with Tensorflow.
Google released Tensorflow JS which works both as browser based and NodeJS Javascript libraries.
Tensorflow models can be loaded to Tensorflow.JS as described in the link:
https://js.tensorflow.org/tutorials/import-saved-model.html
For the reference; model needs to be converted to TF.JS format
-You need to install Tensorflow.JS to your Python environment first:
pip install tensorflowjs
-Convert an existing TensorFlow model to TensorFlow.js Web format
tensorflowjs_converter \
--input_format=tf_saved_model \
--output_node_names='Some/Model/Name' \
--saved_model_tags=serve \
/my/saved_model \
/my/web_model
Load the saved model in NodeJS environment:
const model = await tf.loadModel('file:///mypath/mymodel.json');

Labeling Images using Inception Getting ValueError: GraphDef cannot be larger than 2GB

I am using the TensorFlow for Poets code lab to guide me as I retrain the Inceptionv3 CNN to classify a list of images. I have successfully trained the model, and it works when i employ the given code to classify individual images. But when i try and use it on a large batch of images, then i get the GraphDef cannot be larger than 2GB. Please advise.
import pandas as pd
import os, sys
import tensorflow as tf
test_images = pd.read_csv('test_images.csv')
testid = test_images['Id']
listx= list(range(4320))
predlist=[]
output = pd.DataFrame({'Id': listx})
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
for x in listx:
path = 'test/'+str(x+1)+'.jpg'
# change this as you see fit
image_path = path
# Read in the image_data
image_data = tf.gfile.FastGFile(image_path, 'rb').read()
# Loads label file, strips off carriage return
label_lines = [line.rstrip() for line
in tf.gfile.GFile("retrained_labels.txt")]
# Unpersists graph from file
with tf.gfile.FastGFile("retrained_graph.pb", 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
with tf.Session() as sess:
# Feed the image_data as input to the graph and get first prediction
with tf.Graph().as_default():
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
predictions = sess.run(softmax_tensor, \
{'DecodeJpeg/contents:0': image_data})
# Sort to show labels of first prediction in order of confidence
top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]
# print('the top result is' + label_lines[node_id])
flag = 0
for node_id in top_k:
while flag == 0:
human_string = label_lines[node_id]
score = predictions[0][node_id]
predlist.append(int(human_string[:3]))
print('%s' % (human_string))
flag = 1 # we only want the top prediction
output['Prediction']=predlist
output.to_csv('outputtest.csv')
One way by which this error can e solved is by placing
with tf.Graph().as_default():
after for loop.
This is the piece of code that worked for me while trying to read bulk image:
for filename in os.listdir(image_path):
with tf.Graph().as_default():
# Read in the image_data
image_data = tf.gfile.FastGFile(image_path + filename, 'rb').read()

Resources