I have this below error when trying to Apply this code below :
I am doing a tutorial based on this page : https://towardsdatascience.com/sentiment-analysis-with-python-part-1-5ce197074184
File "reviewsML.py", line 58, in <module>
X_train, X_val, y_train, y_val = train_test_split(X, target, train_size = 0.50)
….
ValueError: Found input variables with inconsistent numbers of samples: [25707, 25000]
Here the part of code
reviews_train = []
for line in codecs.open('movie_data/full_train.txt', 'r', 'utf-8'):
reviews_train.append(line.strip())
reviews_test = []
for line in codecs.open('movie_data/full_test.txt', 'r', 'utf-8'):
reviews_test.append(line.strip())
REPLACE_NO_SPACE = re.compile("[.;:!\'?,\"()\[\]]")
REPLACE_WITH_SPACE = re.compile("(<br\s*/><br\s*/>)|(\-)|(\/)")
def preprocess_reviews(reviews):
reviews = [REPLACE_NO_SPACE.sub("", line.lower()) for line in reviews]
reviews = [REPLACE_WITH_SPACE.sub(" ", line) for line in reviews]
return reviews
reviews_train_clean = preprocess_reviews(reviews_train)
reviews_test_clean = preprocess_reviews(reviews_test)
print(len(reviews_train_clean))
from sklearn.feature_extraction.text import CountVectorizer
#construction of the classfier : hyperparameter c => adjusts the regularization
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(binary=True)
cv.fit(reviews_train_clean)
X = cv.transform(reviews_train_clean) #dimensionality reduction, return transformed data
X_test = cv.transform(reviews_test_clean)
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
target = [1 if i < 12500 else 0 for i in range(25000)]
X_train, X_val, y_train, y_val = train_test_split(X, target, train_size = 0.75)
for c in [0.01, 0.05, 0.25, 0.5, 1]:
lr = LogisticRegression(C=c)
lr.fit(X_train, y_train)
print ("Accuracy for C=%s: %s"
% (c, accuracy_score(y_val, lr.predict(X_val))))
Do you know what I am doing wrong ?
I tried to print (X.shape[0])
it gives me 25707
But I do not know why beacuse the original file contains 25 000 for the train and the test
Related
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
x = np.linspace(-5.0, 5.0, 100)
y = np.sqrt(10**2 - x**2)
y=np.hstack([y,-y])
x=np.hstack([x,-x])
x1 = np.linspace(-5.0, 5.0, 100)
y1 = np.sqrt(5**2 - x1**2)
y1=np.hstack([y1,-y1])
x1=np.hstack([x1,-x1])
plt.scatter(y,x)
plt.scatter(y1,x1)
# print(plt.show())
import pandas as pd
df1 =pd.DataFrame(np.vstack([y,x]).T,columns=['X1','X2'])
df1['Y']=0
df2 =pd.DataFrame(np.vstack([y1,x1]).T,columns=['X1','X2'])
df2['Y']=1
df1.merge(df2)
# We need to find components for the Polynomical Kernel
#X1,X2,X1_square,X2_square,X1*X2
df1['X1_Square']= df1['X1']**2
df1['X2_Square']= df1['X2']**2
df1['X1*X2'] = (df1['X1'] *df1['X2'])
# print(df1.head())
### Independent and Dependent features
X = df1[['X1','X2','X1_Square','X2_Square','X1*X2']]
y = df1['Y']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size = 0.25,
random_state = 0)
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
classifier = SVC(kernel="linear")
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
accuracy_score(y_test, y_pred)
ValueError: The number of classes has to be greater than one; got 1 class
I Don't know How to resolve this one error.May be there is error in merge of two data frames or I to append df1 and df2 but i tried it that doesn't work.
The error is because y has only one value of 0 because your code logic is at line y = df1['Y'].
You can replace line df1.merge(df2) code like this:
df1 = pd.concat([df1,df2])
I am trying to fit a ridge regression model to my data using a pipeline and GridSearchCV.
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline
X = transformed_data.iloc[:, :-1]
y = transformed_data['class']
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 1)
params = {}
params['ridge__alpha'] = np.arange(0, 100, 1).tolist()
t = [('labelenc',LabelEncoder() , [0]), ('stand', StandardScaler(), [1,2,3,4,5,6]), ('poly'),PolynomialFeatures(degree=2),[1,2,3,4,5,6] ]
transformer = ColumnTransformer(transformers=t)
pipe = Pipeline(steps=[('t', transformer), ('m',Ridge())])
#grid_ridge2_r2 = GridSearchCV(pipe, params, cv=10, scoring='r2', n_jobs=-1)
#results_ridge2_r2 = grid_ridge2_r2.fit(X_train,y_train)
grid_ridge2_rmse = GridSearchCV(pipe, params, cv=10, scoring='neg_root_mean_squared_error', n_jobs=-1)
results_ridge2_rmse = grid_ridge2_rmse.fit(X_train,y_train)
I keep getting
ValueError: too many values to unpack (expected 3)
in the last line grid_ridge2_rmse.fit(X_train,y_train). My intuition is that there is something wrong with how I am splitting the dataset.
There is a few error within your pipeline.
First LabelEncoder cannot be used inside a scikit-learn pipeline as it is used to modify y not X. Assuming that you want to encode a categorical value of your feature it should be replaced by OrdinalEncoder.
Then, to set the grid parameter it has to be named with the following name convention <step>__<hyperparameter. Setting the ridge parameter in your case should be m__alpha.
The pipeline parameters can be seen using pipe.get_params().
I would do as follows:
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import PolynomialFeatures, OrdinalEncoder, StandardScaler
from sklearn.linear_model import Ridge
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
import numpy as np
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 1)
params = {'m__alpha' : np.arange(0, 100, 1).tolist()}
t = [
('labelenc',OrdinalEncoder() , [0]),
('stand', StandardScaler(), [1,2,3,4,5,6]),
('poly', PolynomialFeatures(degree=2), [1,2,3,4,5,6])
]
transformer = ColumnTransformer(transformers=t)
pipe = Pipeline(steps=[('t', transformer), ('m',Ridge())])
grid_ridge2_rmse = GridSearchCV(pipe, params, cv=10, scoring='neg_root_mean_squared_error', n_jobs=-1)
results_ridge2_rmse = grid_ridge2_rmse.fit(X_train,y_train)
I am doing a project on landslide classification, I have dropped null values and the unwanted columns but I am getting both training and testing accuracy of 1.
df = pd.read_csv("full_dataset_v1.csv")
df.head()
# filter by severity. na is for non-landslide data
df = df[df['severity'].isin(["medium", "small", "large", "very_large", "na"])]
df = shuffle(df)
df.reset_index(inplace=True, drop=True)
print(len(df))
X = df.copy()
df_col_length = len(df.columns)
X.drop(X.columns[[0]], axis = 1, inplace = True)
def generate_labels(binary = False):
y = []
idx_to_severity = [ "large","medium","na", "small","very_large"]
for severity in X.severity:
y.append(idx_to_severity.index(severity))
X.drop(X.columns[[-1]], axis = 1, inplace = True)
print(y.count(1))
return y
y = generate_labels(False)
X.drop(X.columns[[0,1]],axis = 1, inplace = True)
df = X
def cat(string):
df[string] = df[string].astype('category')
cat('country')
cat('type')
cat('trigger')
cat('location')
cat('severity')
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
def label_encode(string):
le.fit(df[string])
df[string] = le.transform(df[string])
label_encode('country')
label_encode('type')
label_encode('trigger')
label_encode('location')
label_encode('severity')
df.dropna(axis='columns')
df.fillna(X.mean(), inplace=True)
df.head()
X = df
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.33,random_state=0)
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(X_train,y_train)
print("Train accuracy",model.score(X_train,y_train))
print("Test acuracy",model.score(X_test,y_test))
the size of the dataset is 4396 rows x193 columns, is my code wrong or can I do something to correct my accuracy???
The accuracy you got seems good because your model is not trained for test data.This is the reason why we perform train_test_split to see how our model performs on unseen data that is our test data.I hope you followed the concept.
I use pretrained embedding vectors for my keras model. Before I did it everything worked and now I get this error:
ValueError: Error when checking input: expected embedding_1_input to
have shape (4,) but got array with shape (1,)
Maybe somebody can help me, what I do wrong here. I am not sure if I did correct model.fit and model.evaluate. Maybe there is a problem?
import csv
import numpy as np
np.random.seed(42)
from keras.models import Sequential, Model
from keras.layers import *
from random import shuffle
from sklearn.model_selection import train_test_split
from keras import optimizers
from keras.callbacks import EarlyStopping
from itertools import groupby
from numpy import asarray
from numpy import zeros
from numpy import array
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
#function makes a list of antonyms and synonyms from the files
def preprocessing(filename):
list_words = []
with open(filename) as tsv:
for line in csv.reader(tsv, dialect="excel-tab"):
list_words.append([line[0], line[1]])
return list_words
#function make a list of not relevant pairs by mixing synonyms and
antonyms
def notrelevant(filename, filename2):
list_words = []
with open(filename) as tsv:
with open(filename2) as tsv2:
for lines in zip(csv.reader(tsv, dialect="excel-tab"),csv.reader(tsv2, dialect="excel-tab")):
list_words.append([lines[0][0], lines[1][1]])
return list_words
antonyms_list = preprocessing("antonyms.tsv")
synonyms_list = preprocessing("synonyms.tsv")
notrelevant_list = notrelevant("antonyms.tsv", "synonyms.tsv")
# function combines all antonyms, synonyms in one list with labels,
shuffle them
def data_prepare(ant,syn,nrel):
data = []
for elem1,elem2 in ant:
data.append([[elem1,elem2], "Antonyms"])
for elem1, elem2 in syn:
data.append([[elem1, elem2], "Synonyms"])
for elem1, elem2 in nrel:
data.append([[elem1, elem2], "Not relevant"])
shuffle(data)
return data
data_with_labels_shuffled =
data_prepare(antonyms_list,synonyms_list,notrelevant_list)
def label_to_onehot(labels):
mapping = {label: i for i, label in enumerate(set(labels))}
one_hot = np.empty((len(labels), 3))
for i, label in enumerate(labels):
entry = [0] * len(mapping)
entry[mapping[label]] = 1
one_hot[i] = entry
return (one_hot)
def words_to_ids(labels):
vocabulary = []
word_to_id = {}
ids = []
for word1,word2 in labels:
vocabulary.append(word1)
vocabulary.append(word2)
counter = 0
for word in vocabulary:
if word not in word_to_id:
word_to_id[word] = counter
counter += 1
for word1,word2 in labels:
ids.append([word_to_id [word1], word_to_id [word2]])
return (ids)
def split_data(datas):
data = np.array(datas)
X, y = data[:, 0], data[:, 1]
# split the data to get 60% train and 40% test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=42)
y_train = label_to_onehot(y_train)
X_dev, X_test, y_dev, y_test = train_test_split(X_test, y_test, test_size=0.5, random_state=42)
y_dev = label_to_onehot(y_dev)
y_test = label_to_onehot(y_test)
return X_train, y_train, X_dev, y_dev, X_test, y_test
X_train, y_train, X_dev, y_dev, X_test, y_test = split_data(data_with_labels_shuffled)
# prepare tokenizer
t = Tokenizer()
t.fit_on_texts(X_train)
vocab_size = len(t.word_index) + 1
# integer encode the documents
encoded_docs = t.texts_to_sequences(X_train)
# load the whole embedding into memory
embeddings_index = dict()
f = open('glove.6B.50d.txt')
for line in f:
values = line.split()
word = values[0]
coefs = asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
# create a weight matrix for words in training docs
embedding_matrix = zeros((vocab_size, 50))
for word, i in t.word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
VOCABSIZE = len(data_with_labels_shuffled)
EMBSIZE = 50
HIDDENSIZE = 50
KERNELSIZE = 5
MAXEPOCHS = 5
model = Sequential()
model.add(Embedding(vocab_size, 50, weights=[embedding_matrix],
input_length=4, trainable=False))
model.add(Dropout(0.25))
model.add(Bidirectional(GRU(units = HIDDENSIZE // 2)))
#model.add(Flatten())
model.add(Dense(units = 3, activation = "softmax"))
model.compile(loss='categorical_crossentropy', optimizer="adam",
metrics=['accuracy'])
earlystop = EarlyStopping(monitor='val_loss', min_delta=0, patience=2, verbose=0, mode='min')
model.fit (X_train, y_train,
batch_size=64,
callbacks = [earlystop],
epochs=100,
validation_data=(X_dev, y_dev),
verbose=1)
scores = model.evaluate(X_test, y_testbatch_size=64)
print("Accuracy is: %.2f%%" %(scores[1] * 100))
I think the problem is that you should pass encoded_docs to your model.fit() function instead of X_train since encoded_docs contains the tokenization of your training data and X_train still only contains a list of words. Moreover, you have to make sure that the input_length parameter of your Embedding layer matches the length of these tokenized training examples that you have created in encoded_docs.
I was solving assignment 2 (link) of Andrej Karpathy's course on Neural network. The programming environment is ipython notebook. When i am trying to load CIFAR10 data I am repeatedly getting memory error. I tried to google any solution but nothing worked. Please help me here.
from cs231n.data_utils import load_CIFAR10
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):
"""
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for the two-layer neural net classifier. These are the same steps as
we used for the SVM, but condensed to a single function.
"""
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
# Normalize the data: subtract the mean image
mean_image = np.mean(X_train, axis=0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
# Reshape data to rows
X_train = X_train.reshape(num_training, -1)
X_val = X_val.reshape(num_validation, -1)
X_test = X_test.reshape(num_test, -1)
return X_train, y_train, X_val, y_val, X_test, y_test
# Invoke the above function to get our data.
X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()
print 'Train data shape: ', X_train.shape
print 'Train labels shape: ', y_train.shape
print 'Validation data shape: ', X_val.shape
print 'Validation labels shape: ', y_val.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape
import cPickle as pickle
import numpy as np
import os
def load_CIFAR_batch(filename):
""" load single batch of cifar """
with open(filename, 'rb') as f:
datadict = pickle.load(f)
X = datadict['data']
Y = datadict['labels']
X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype("float")
Y = np.array(Y)
return X, Y
def load_CIFAR10(ROOT):
""" load all of cifar """
xs = []
ys = []
for b in range(1,6):
f = os.path.join(ROOT, 'data_batch_%d' % (b, ))
X, Y = load_CIFAR_batch(f)
xs.append(X)
ys.append(Y)
Xtr = np.concatenate(xs)
Ytr = np.concatenate(ys)
del X, Y
Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))
return Xtr, Ytr, Xte, Yte
If someone is facing the same issue on windows os please install x64 python distribution. Memory usage of x86 distribution is capped at 2GB.