I was trying to deploy ml model using node_js with help of ChildProcess package ,while running __predict(), it is taking too long and end with code_1 error.
Here I share all related code to decode the issue :
Model python code -->
import keras
import time
start = time.time()
encoder = keras.models.load_model('enc', compile = False)
decoder = keras.models.load_model('dec', compile = False)
import numpy as np
from flask import Flask, request, jsonify , render_template
import tensorflow as tf
import pickle
import string
import re
from keras_preprocessing.sequence import pad_sequences
def initialize_hidden_state():
return tf.zeros((1, 1024))
eng_tokenizer , hin_tokenizer = pickle.load( open('tokenizer.pkl','rb'))
def clean(text):
text = text.lower()
special_char = set(string.punctuation+'ред') # Set of all special characters
# Remove all the special characters
text = ''.join(word for word in text if word not in special_char)
seq = eng_tokenizer.texts_to_sequences([text])
seq = pad_sequences(seq, maxlen=23, padding='post')
return seq
def __predict(data):
# Get the data from the POST request.
#data = request.get_json(force=True)
clean_input = clean(data)
# Make prediction using model loaded from disk as per the data.
hidden_enc = initialize_hidden_state()
enc_out, enc_hidden = encoder(clean_input, hidden_enc)
result = ''
dec_hidden = enc_hidden
dec_input = tf.expand_dims(hin_tokenizer.texts_to_sequences(['<Start>'])[0], 0)
#------------------------------------------------------------------
for t in range(25):
predictions, dec_hidden, attention_weights = decoder(dec_input, dec_hidden, enc_out)
predicted_id = tf.argmax(predictions[0]).numpy()
x = hin_tokenizer.sequences_to_texts([[predicted_id]])[0]
if x == 'end':
break
result += x + ' '
# the predicted ID is fed back into the model
dec_input = tf.expand_dims([predicted_id], 0)
CLEANR = re.compile(r"([A-Za-z])", re.DOTALL)
result = re.sub(CLEANR, '', result)
return result
# import json
# with open('data.json', 'r') as openfile:
# json_object = json.load(openfile).get('data')
data =__predict("file")
end= time.time()
# print(start-end)
data1 = data +"abcd"
print(data1)
# print("abcd")
# dictionary = {
# "data": data,
# }
# json_object = json.dumps(dictionary, indent=2)
# with open("result.json", "w") as outfile:
# outfile.write(json_object)
When I type print("abcd") or print(start-end), it is giving result ,ending with code_0. But when I type print("data") not giving any result and ending with code_1 .
Here is the childProcess code -->
app.get('/', (req, res) => {
let dataToSend
let largeDataSet = []
// spawn new child process to call the python script
const python = spawn('python', ['app.py'])
// console.log(python);
// collect data from script
python.stdout.on('data', function (data) {
console.log('Pipe data from python script ...')
//dataToSend = data;
largeDataSet.push(data)
})
// in close event we are sure that stream is from child process is closed
python.on('close', (code) => {
console.log(`child process close all stdio with code ${code}`)
// send data to browser
// largeDataSet = []
console.log(largeDataSet.join(''));
res.send(largeDataSet.join(''))
})
})
Here is the error --->
child process close all stdio with code 1
Pls help , I tried to understand the problem but failed severely even in understanding it.
Thanks in advance !!!
Related
I am trying loop the ChiSquare test and outcome is not shown as required, that is in Dataframe.
All columns are coming one row..
Please help
# Import the function
from scipy.stats import chi2_contingency
chi2_check = []
for i in df_clean.select_dtypes(['object']):
if chi2_contingency(pd.crosstab(df_clean['Final_Comments'], df_clean[i]))[1] < 0.05:
chi2_check.append('Reject Null Hypothesis')
else:
chi2_check.append('Fail to Reject Null Hypothesis')
res = pd.DataFrame(data = [df_clean.select_dtypes(['object']), chi2_check]
).T
res.columns = ['Column', 'Hypothesis']
print(res)
res.columns
my code is
import streamlit as st
import pickle
import string
from nltk.corpus import stopwords
import nltk
from nltk.stem.porter import PorterStemmer
ps = PorterStemmer()
def transform_text(text):
text = text.lower()
text = nltk.word_tokenize(text)
y = []
for i in text:
if i.isalnum():
y.append(i)
text = y[:]
y.clear()
for i in text:
if i not in stopwords.words('english') and i not in string.punctuation:
y.append(i)
text = y[:]
y.clear()
for i in text:
y.append(ps.stem(i))
return " ".join(y)
tfidf = pickle.load(open('vectorizer.pkl','rb'))
model = pickle.load(open('model.pkl','rb'))
st.title("Email/SMS Spam Classifier")
input_sms = st.text_area("Enter the message")
if st.button('Predict'):
# 1. preprocess
transformed_sms = transform_text(input_sms)
# 2. vectorize
vector_input = tfidf.transform([transformed_sms])
# 3. predict
result = model.predict(vector_input)[0]
# 4. Display
if result == 1:
st.header("Spam")
else:
st.header("Not Spam")
Code for a single raster file:
import geopandas as gpd
#import os
import rasterio
import scipy.sparse as sparse
import pandas as pd
import numpy as np
# Create an empty pandas dataframe called 'table'
table = pd.DataFrame(index = np.arange(0,1))
# Read the points shapefile using GeoPandas
stations = gpd.read_file(r'E:/anakonda/Shape files/AAQ_st1/AAQ_ST1.shp')
stations['lon'] = stations['geometry'].x
stations['lat'] = stations['geometry'].y
Matrix = pd.DataFrame()
# Iterate through the rasters and save the data as individual arrays to a Matrix
dataset = rasterio.open(r'E:/anakonda/LST_day/MOD11A1.006_LST_Day_1km_doy2019082_aid0001.tif')
data_array = dataset.read(1)
data_array_sparse = sparse.coo_matrix(data_array, shape = (351, 545))
for records_date in Matrix.columns.tolist():
a = Matrix
LST_day_value = a.loc[int(row)][int(col)]
table[records_date] = LST_day_value
transpose_mat = table.T
transpose_mat.rename(columns = {0: 'LST_Day(Kel)'}, inplace = True)
transpose_mat.to_csv(r'E:/anakonda/LST_day'+'\\'+station_name+'.csv')
Error code lines:
LST_day_value = a.loc[int(row)][int(col)]
transpose_mat.to_csv(r'E:/anakonda/LST_day'+'\'+station_name+'.csv')
Errors Shown:
Undefined Name 'row' (pyflakes E)
Undefined Name 'col' (pyflakes E)
NameError: name 'transpose_mat' is not defined
I'm using the above code for creating a Raster Time-series for Modis LST data. the code ran well till 'transposing the matrix'. the error shown is mentioned below the code. Im new to python, so kindly help me with this issue.
import os
import rasterio
import scipy.sparse as sparse
import pandas as pd
import numpy as np
# Create an empty pandas dataframe called 'table'
table = pd.DataFrame(index = np.arange(0,1))
# Read the points shapefile using GeoPandas
stations = gpd.read_file(r'E:/anakonda/Shape files/AAQ_st1/AAQ_ST1.shp')
stations['lon'] = stations['geometry'].x
stations['lat'] = stations['geometry'].y
Matrix = pd.DataFrame()
# Iterate through the rasters and save the data as individual arrays to a Matrix
for files in os.listdir(r'E:/anakonda/LST_Night'):
if files[-4: ] == '.tif':
dataset = rasterio.open(r'E:/anakonda/LST_Night'+'\\'+files)
data_array = dataset.read(1)
data_array_sparse = sparse.coo_matrix(data_array, shape = (351,545))
data = files[ :-20]
Matrix[data] = data_array_sparse.toarray().tolist()
print('Processing is done for the raster: '+ files[:-20])
# Iterate through the stations and get the corresponding row and column for the related x, y coordinates
for index, row in stations.iterrows():
station_name = str(row['Station'])
lon = float(row['lon'])
lat = float(row['lat'])
x,y = (lon, lat)
row, col = dataset.index(x, y)
print('Processing: '+ station_name)
# Pick the LST value from each stored raster array and record it into the previously created 'table'
for records_date in Matrix.columns.tolist():
a = Matrix[records_date]
LST_Night_value = a.loc[int(row)][int(col)]
table[records_date] = LST_Night_value
transpose_mat = table.T
transpose_mat.rename(columns = {0: 'LstNight(Kel)'}, inplace = True)
transpose_mat.to_csv(r'E:/anakonda/LST_Night'+'\\'+station_name+'.csv')```
This is the error shown:
```File "C:\Anaconda\envs\timeseries\lib\site-packages\pandas\core\indexes\range.py", line 357, in get_loc
raise KeyError(key) from err
KeyError: 2278```
I'm trying to deploy my model built using Pipeline via Flask, however I'm facing the following Attribute error
'Can't get attribute 'FeatureSelector' on main' from 'app.py''
Here is my code for model.py:
(After loading the necessary libraries and reading the data, I have defined the class for my pipeline)
class FeatureSelector( BaseEstimator, TransformerMixin ):
#Class Constructor
def __init__( self, feature_names ):
self._feature_names = feature_names
#Return self nothing else to do here
def fit( self, X, y =None):
return self
#Method that describes what we need this transformer to do
def transform( self, X, y = None):
return X[ self._feature_names ]
LE = LabelEncoder()
class CategoricalTransformer( BaseEstimator, TransformerMixin ):
#Class constructor method that takes in a list of values as its argument
def __init__(self, cat_cols = ['Response', 'EmploymentStatus', 'Number of Open Complaints',
'Number of Policies', 'Policy Type', 'Renew Offer Type',
'Vehicle Class']):
self._cat_cols = cat_cols
#Return self nothing else to do here
def fit( self, X, y = None ):
return self
#Transformer method we wrote for this transformer
def transform(self, X , y = None ):
if self._cat_cols:
for i in X[cat_cols]:
X[i]= LE.fit_transform(X[i])
return X.values
class NumericalTransformer(BaseEstimator, TransformerMixin):
#Class Constructor
def __init__( self, MPA_log = True):
self._MPA_log = MPA_log
#Return self, nothing else to do here
def fit( self, X, y = None):
return self
#Custom transform method we wrote that creates aformentioned features and drops redundant ones
def transform(self, X, y = None):
if self._MPA_log:
X.loc[:,'MPA_log'] = np.log(X['Monthly Premium Auto'])
X.drop(['Monthly Premium Auto'], axis =1)
return X.values
I have created different pipelines for numerical and categorical festures. They have been combined using Feture Union in Full Pipeline.
full_pipeline = FeatureUnion( transformer_list = [ ( 'categorical_pipeline', categorical_pipeline ), ( 'numerical_pipeline', numerical_pipeline ) ] )
X = df.drop('Customer Lifetime Value', axis = 1)
y = df['Customer Lifetime Value']
y = np.log(y) #Transforming the y variable
full_pipeline_RF = Pipeline( steps = [('full_pipeline', full_pipeline),('model',
RandomForestRegressor(max_depth=21, min_samples_leaf= 8, random_state=0))])
full_pipeline_RF.fit(X, y)
# Saving model to disk
pickle.dump(full_pipeline_RF, open('model.pkl','wb'))
# Loading model to compare the results
model = pickle.load(open('model.pkl','rb'))
Model has been called in app.py file with the following code:
import numpy as np
from flask import Flask, request, jsonify, render_template
import pickle
app = Flask(__name__)
model = pickle.load(open('model.pkl', 'rb'))
#app.route('/')
def home():
return render_template('index.html')
#app.route('/predict',methods=['POST'])
def predict():
'''
For rendering results on HTML GUI
'''
int_features = [float(x) for x in request.form.values()]
final_features = [np.array(int_features)]
prediction = model.predict(final_features)
output = round(np.exp(prediction[0]),2)
return render_template('index.html', prediction_text='Customer Lifetime Value $ {}'.format(output))
if __name__ == "__main__":
app.run(debug=True)
The code works fine in Jupyter. Even while running in Spyder, it doesnt throw any errors. Please help me with this code, I'm stuck only on the execution bit.
This was actually simple. All i had to do was jn my app.py I had to pass the classes created during pipeline.
These are new classes customised for the case, hence those classes need to be pass.
Simply write 'pass' after every class is defined.
Using google-cloud-dataflow/Cloud Composer for CSV to Avro and everything works on my local environment. When trying to read the .avsc file that contains the Avro schema from a Cloud Storage bucket, I keep getting:
IOError: [Errno 2] No such file or directory:'gs://my-bucket/xxx.avsc'
Code:
from __future__ import absolute_import
import argparse
import logging
import ntpath
import avro.schema
import apache_beam as beam
from apache_beam.options import pipeline_options
from apache_beam.options.pipeline_options import SetupOptions
from datetime import datetime
class RowTransformer(object):
def __init__(self, delimiter, header, filename):
self.delimiter = delimiter
self.keys = re.split(',', header)
self.filename = filename
def parse(self, row):
self.load_dt = datetime.utcnow()
split_row = row.split(self.delimiter)
#Need to cast anything that is not a string into proper type
split_row[8] = float('0' if not split_row[8] else split_row[8])
split_row[9] = float('0' if not split_row[9] else split_row[9])
split_row[10] = float('0' if not split_row[10] else split_row[10])
split_row[11] = float('0' if not split_row[11] else split_row[11])
split_row[12] = float('0' if not split_row[12] else split_row[12])
split_row[13] = float('0' if not split_row[13] else split_row[13])
split_row[14] = float('0' if not split_row[14] else split_row[14])
split_row[15] = float('0' if not split_row[15] else split_row[15])
split_row[16] = float('0' if not split_row[16] else split_row[16])
split_row[17] = float('0' if not split_row[17] else split_row[17])
split_row[18] = str('0' if not split_row[18] else split_row[18])
split_row[19] = str('0' if not split_row[19] else split_row[19])
split_row.append(self.filename)
split_row.append(self.load_dt.strftime('%Y-%m-%d %H:%M:%S.%f'))
decode_row = [i.decode('UTF-8') if isinstance(i, basestring) else i for i in split_row]
row = dict(zip(self.keys, decode_row))
return row
def run(argv=None):
"""The main function which creates the pipeline and runs it."""
parser = argparse.ArgumentParser()
parser.add_argument('--input', dest='input', required=False,
help='Input file to read. This can be a local file or '
'a file in a Google Storage Bucket.',
default='gs://my-bucket/receive/xxx.txt')
parser.add_argument('--output', dest='output', required=False,
help='Output Avro to Cloud Storage',
default='gs://my-bucket/')
parser.add_argument('--schema', dest='schema', required=False,
help='Avro Schema',
default='gs://my-bucket/xxx.avsc')
parser.add_argument('--delimiter', dest='delimiter', required=False,
help='Delimiter to split input records.',
default='|')
parser.add_argument('--fields', dest='fields', required=False,
help='list of field names expected',
default='Col1,Col2...etc')
known_args, pipeline_args = parser.parse_known_args(argv)
row_transformer = RowTransformer(delimiter=known_args.delimiter,
header=known_args.fields,
filename=ntpath.basename(known_args.input))
p_opts = pipeline_options.PipelineOptions(pipeline_args)
with beam.Pipeline(options=p_opts) as pipeline:
schema_file = avro.schema.parse(open(known_args.schema, "rb").read())
rows = pipeline | "Read from text file" >> beam.io.ReadFromText(known_args.input, skip_header_lines=1)
dict_records = rows | "Convert to Avro" >> beam.Map(lambda r: row_transformer.parse(r))
dict_records | "Write to Cloud Storage as Avro" >> beam.io.avroio.WriteToAvro(known_args.output,schema=schema_file)
run()
You need to use the apache_beam.io.gcp.gcsio class instead of beam.io.ReadFromText which will only read local files, https://beam.apache.org/documentation/sdks/pydoc/2.6.0/apache_beam.io.gcp.gcsio.html