Singleton array cannot be considered a valid collection - machine-learning

I want to perform hyperparameter tuning for Kernel PCA. My code raised Singleton array cannot be considered a valid collection error during train_test_split.
Someone asked a similar question ("TypeError: Singleton array cannot be considered a valid collection" using sklearn train_test_split) but the answer is not applicable here because I still get an error after specifying test_size.
import pandas as pd
import numpy as np
from sklearn.decomposition import KernelPCA
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import accuracy_score
X = meth_clin_sub_nt_2_kipan.iloc[:,7:-1]
y_type = meth_clin_sub_nt_2_kipan["type"]
# Color by meth_clin_sub_nt_2_kipan["type"]
X_train, X_test, y_train, y_test = train_test_split(X_kpca, y_type, test_size=0.3, random_state=30)
## Kernel PCA
kpca = KernelPCA()
kpca.fit(X_train, y_train)
## Parameters
param_grid = {'n_components': list(range(1,9)), 'kernel': ('linear', 'poly', 'rbf', 'sigmoid', 'cosine', 'precomputed'), 'degree': list(range(1,9)), 'tol': [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], 'fit_inverse_transform': (bool, False), 'eigen_solver': ('auto', 'dense', 'arpack', 'randomized'), 'alpha': [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]}
gs = GridSearchCV(X_kpca, param_grid, cv=10)
gs.fit(X_train, y_train)
Traceback:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
/tmp/ipykernel_16/3046291160.py in <module>
1 # Color by meth_clin_sub_nt_2_kipan["type"]
----> 2 X_train, X_test, y_train, y_test = train_test_split(X_kpca, y_type, test_size=0.3)
3
4 ## Kernel PCA
5 kpca = KernelPCA()
/opt/conda/lib/python3.7/site-packages/sklearn/model_selection/_split.py in train_test_split(test_size, train_size, random_state, shuffle, stratify, *arrays)
2415 raise ValueError("At least one array required as input")
2416
-> 2417 arrays = indexable(*arrays)
2418
2419 n_samples = _num_samples(arrays[0])
/opt/conda/lib/python3.7/site-packages/sklearn/utils/validation.py in indexable(*iterables)
376
377 result = [_make_indexable(X) for X in iterables]
--> 378 check_consistent_length(*result)
379 return result
380
/opt/conda/lib/python3.7/site-packages/sklearn/utils/validation.py in check_consistent_length(*arrays)
327 """
328
--> 329 lengths = [_num_samples(X) for X in arrays if X is not None]
330 uniques = np.unique(lengths)
331 if len(uniques) > 1:
/opt/conda/lib/python3.7/site-packages/sklearn/utils/validation.py in <listcomp>(.0)
327 """
328
--> 329 lengths = [_num_samples(X) for X in arrays if X is not None]
330 uniques = np.unique(lengths)
331 if len(uniques) > 1:
/opt/conda/lib/python3.7/site-packages/sklearn/utils/validation.py in _num_samples(x)
268 if len(x.shape) == 0:
269 raise TypeError(
--> 270 "Singleton array %r cannot be considered a valid collection." % x
271 )
272 # Check that shape is returning an integer or default to len
TypeError: Singleton array array(KernelPCA(gamma=15, kernel='rbf', n_components=2, random_state=42),
dtype=object) cannot be considered a valid collection.
Sample data:
`meth_clin_sub_nt_2_kipan.iloc[0:3,-10:].to_dict()`
{'cg09560533': {'TCGA-2K-A9WE-01A': 0.900095504652308,
'TCGA-2Z-A9J1-01A': 0.81104132439858,
'TCGA-2Z-A9J2-01A': 0.457870839535175},
'cg09560549': {'TCGA-2K-A9WE-01A': 0.731219716727976,
'TCGA-2Z-A9J1-01A': 0.816886151477418,
'TCGA-2Z-A9J2-01A': 0.804368893288968},
'cg09560590': {'TCGA-2K-A9WE-01A': 0.331126978536942,
'TCGA-2Z-A9J1-01A': 0.606999048224196,
'TCGA-2Z-A9J2-01A': 0.638951186043284},
'cg09560599': {'TCGA-2K-A9WE-01A': 0.0166606663293514,
'TCGA-2Z-A9J1-01A': 0.0304682590716158,
'TCGA-2Z-A9J2-01A': 0.0188827320665545},
'cg09560658': {'TCGA-2K-A9WE-01A': 0.92977749464931,
'TCGA-2Z-A9J1-01A': 0.684140609221568,
'TCGA-2Z-A9J2-01A': 0.934064932323703},
'cg09560763': {'TCGA-2K-A9WE-01A': 0.821841061291043,
'TCGA-2Z-A9J1-01A': 0.287309064005432,
'TCGA-2Z-A9J2-01A': 0.393975491255026},
'cg09560811': {'TCGA-2K-A9WE-01A': 0.927202214991965,
'TCGA-2Z-A9J1-01A': 0.932595863242732,
'TCGA-2Z-A9J2-01A': 0.949126686353227},
'cg09560911': {'TCGA-2K-A9WE-01A': 0.018890114367182,
'TCGA-2Z-A9J1-01A': 0.0182049878300791,
'TCGA-2Z-A9J2-01A': 0.0272930258041942},
'cg09560953': {'TCGA-2K-A9WE-01A': 0.844136240512521,
'TCGA-2Z-A9J1-01A': 0.253988367060448,
'TCGA-2Z-A9J2-01A': 0.898586638943748},
'type': {'TCGA-2K-A9WE-01A': 'kirp',
'TCGA-2Z-A9J1-01A': 'kirp',
'TCGA-2Z-A9J2-01A': 'kirp'}}

Related

ValueError: Input X contains NaN

I'm training to classify my traffic using SVM ML..as below
import pandas as pd # for process the DataSet
import matplotlib.pyplot as plt
ds= pd.read_csv("dataset_sdn.csv") # to read the dataset with name (ds)
ds.fillna(0)
ds #
ds output
X = ds.iloc[: , [4,5,6,7,8,9,10,11,12,13,14,17,18,19,20,21]] # Input Features
Y = ds.iloc[:, 22] # OutPut
print (X)
print (Y)
X output
Y output
from sklearn.model_selection import train_test_split
X_Train, X_Test, Y_Train, Y_Test = train_test_split (X, Y, test_size=0.25, random_state=0)
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_Train = sc_X.fit_transform(X_Train)
X_Test = sc_X.transform(X_Test)
from sklearn.svm import SVC
classifier = SVC (kernel='linear', random_state=0)
classifier.fit(X_Train, Y_Train)
Y_pred = classifier.predict(X_Test)
here in this last step i get error message
ValueError Traceback (most recent call
last) Input In [43], in <cell line: 3>()
1 from sklearn.svm import SVC
2 classifier = SVC (kernel='linear', random_state=0)
----> 3 classifier.fit(X_Train, Y_Train)
5 # The output predect
6 Y_pred = classifier.predict(X_Test)
File
~\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\svm_base.py:173,
in BaseLibSVM.fit(self, X, y, sample_weight)
171 check_consistent_length(X, y)
172 else:
--> 173 X, y = self._validate_data(
174 X,
175 y,
176 dtype=np.float64,
177 order="C",
178 accept_sparse="csr",
179 accept_large_sparse=False,
180 )
182 y = self._validate_targets(y)
184 sample_weight = np.asarray(
185 [] if sample_weight is None else sample_weight, dtype=np.float64
186 )
File
~\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\base.py:596,
in BaseEstimator._validate_data(self, X, y, reset,
validate_separately, **check_params)
594 y = check_array(y, input_name="y", **check_y_params)
595 else:
--> 596 X, y = check_X_y(X, y, **check_params)
597 out = X, y
599 if not no_val_X and check_params.get("ensure_2d", True):
File
~\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\utils\validation.py:1074,
in check_X_y(X, y, accept_sparse, accept_large_sparse, dtype, order,
copy, force_all_finite, ensure_2d, allow_nd, multi_output,
ensure_min_samples, ensure_min_features, y_numeric, estimator) 1069
estimator_name = _check_estimator_name(estimator) 1070 raise
ValueError( 1071 f"{estimator_name} requires y to be
passed, but the target y is None" 1072 )
-> 1074 X = check_array( 1075 X, 1076 accept_sparse=accept_sparse, 1077
accept_large_sparse=accept_large_sparse, 1078 dtype=dtype,
1079 order=order, 1080 copy=copy, 1081
force_all_finite=force_all_finite, 1082 ensure_2d=ensure_2d,
1083 allow_nd=allow_nd, 1084
ensure_min_samples=ensure_min_samples, 1085
ensure_min_features=ensure_min_features, 1086
estimator=estimator, 1087 input_name="X", 1088 ) 1090 y =
_check_y(y, multi_output=multi_output, y_numeric=y_numeric, estimator=estimator) 1092 check_consistent_length(X, y)
File
~\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\utils\validation.py:899,
in check_array(array, accept_sparse, accept_large_sparse, dtype,
order, copy, force_all_finite, ensure_2d, allow_nd,
ensure_min_samples, ensure_min_features, estimator, input_name)
893 raise ValueError(
894 "Found array with dim %d. %s expected <= 2."
895 % (array.ndim, estimator_name)
896 )
898 if force_all_finite:
--> 899 _assert_all_finite(
900 array,
901 input_name=input_name,
902 estimator_name=estimator_name,
903 allow_nan=force_all_finite == "allow-nan",
904 )
906 if ensure_min_samples > 0:
907 n_samples = _num_samples(array)
File
~\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\utils\validation.py:146,
in _assert_all_finite(X, allow_nan, msg_dtype, estimator_name,
input_name)
124 if (
125 not allow_nan
126 and estimator_name (...)
130 # Improve the error message on how to handle missing values in
131 # scikit-learn.
132 msg_err += (
133 f"\n{estimator_name} does not accept missing values"
134 " encoded as NaN natively. For supervised learning, you might want" (...)
144 "#estimators-that-handle-nan-values"
145 )
--> 146 raise ValueError(msg_err)
148 # for object dtype data, we only check for NaNs (GH-13254)
149 elif X.dtype == np.dtype("object") and not allow_nan:
ValueError: Input X contains NaN. SVC does not accept missing values
encoded as NaN natively. For supervised learning, you might want to
consider sklearn.ensemble.HistGradientBoostingClassifier and Regressor
which accept missing values encoded as NaNs natively. Alternatively,
it is possible to preprocess the data, for instance by using an
imputer transformer in a pipeline or drop samples with missing values.
See https://scikit-learn.org/stable/modules/impute.html You can find a
list of all estimators that handle NaN values at the following page:
https://scikit-learn.org/stable/modules/impute.html#estimators-that-handle-nan-values
So, plz any advice to solve this error, although there isn't any NaN value in the dataset
You are not replacing old dataframe with new dataframe.
Use this:
ds = ds.fillna(0)
OR
ds.fillna(0, inplace=True)

Simple classification using scikit-learn not working

This is the code that I used to solve a classification problem pertaining to credit card fraud detection:
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
df = pd.read_csv(r'C:\Users\SVISHWANATH\Downloads\creditcard.csv')
f = df.drop(['Class'], axis = 1)
g = df.Class
g.values.reshape(-1,1)
X_train, X_test, y_train, y_test = train_test_split(f, g, stratify = g)
knn = KNeighborsClassifier(n_neighbors = 5)
knn.fit(X_train, y_train)
knn.predict(y_test)
For some reason, even if I specify the reshape parameter, the above code is resulting in an error. This is the error:
ValueError Traceback (most recent call last)
<ipython-input-37-d24a7d3e9bd3> in <module>
12 knn = KNeighborsClassifier(n_neighbors = 5)
13 knn.fit(X_train, y_train)
---> 14 knn.predict(y_test)
~\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\neighbors\_classification.py in predict(self, X)
171 Class labels for each data sample.
172 """
--> 173 X = check_array(X, accept_sparse='csr')
174
175 neigh_dist, neigh_ind = self.kneighbors(X)
~\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py in inner_f(*args, **kwargs)
71 FutureWarning)
72 kwargs.update({k: arg for k, arg in zip(sig.parameters, args)})
---> 73 return f(**kwargs)
74 return inner_f
75
~\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py in check_array(array, accept_sparse, accept_large_sparse, dtype, order, copy, force_all_finite, ensure_2d, allow_nd, ensure_min_samples, ensure_min_features, estimator)
622 "Reshape your data either using array.reshape(-1, 1) if "
623 "your data has a single feature or array.reshape(1, -1) "
--> 624 "if it contains a single sample.".format(array))
625
626 # in the future np.flexible dtypes will be handled like object dtypes
ValueError: Expected 2D array, got 1D array instead:
array=[0 0 0 ... 0 0 0].
Reshape your data either using array.reshape(-1, 1) if your data has a single feature or array.reshape(1, -1) if it contains a single sample.
y_test are the results you're trying to predict (i.e. classes). You need to predict from the available data, i.e. data you would have when trying to classify, which would be everything else except the classes: in your case that is X_test, so you need to change knn.predict(y_test) to knn.predict(X_test). You can then use y_test to compare your predictions and see how accurate they are.

How to use Recursive Feature elimination?

I am new to ML and have been trying Feature selection with RFE approach. My dataset has 5K records and its binary classification problem. This is the code that I am following based on a tutorial online
#no of features
nof_list=np.arange(1,13)
high_score=0
#Variable to store the optimum features
nof=0
score_list =[]
for n in range(len(nof_list)):
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.3, random_state = 0)
model = RandomForestClassifier()
rfe = RFE(model,nof_list[n])
X_train_rfe = rfe.fit_transform(X_train,y_train)
X_test_rfe = rfe.transform(X_test)
model.fit(X_train_rfe,y_train)
score = model.score(X_test_rfe,y_test)
score_list.append(score)
if(score>high_score):
high_score = score
nof = nof_list[n]
print("Optimum number of features: %d" %nof)
print("Score with %d features: %f" % (nof, high_score))
I encounter the below error. Can someone please help
TypeError Traceback (most recent call last)
<ipython-input-332-a23dfb331001> in <module>
9 model = RandomForestClassifier()
10 rfe = RFE(model,nof_list[n])
---> 11 X_train_rfe = rfe.fit_transform(X_train,y_train)
12 X_test_rfe = rfe.transform(X_test)
13 model.fit(X_train_rfe,y_train)
~\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\base.py in fit_transform(self, X, y, **fit_params)
554 Training set.
555
--> 556 y : numpy array of shape [n_samples]
557 Target values.
558
~\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\feature_selection\_base.py in transform(self, X)
75 X = check_array(X, dtype=None, accept_sparse='csr',
76 force_all_finite=not tags.get('allow_nan', True))
---> 77 mask = self.get_support()
78 if not mask.any():
79 warn("No features were selected: either the data is"
~\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\feature_selection\_base.py in get_support(self, indices)
44 values are indices into the input feature vector.
45 """
---> 46 mask = self._get_support_mask()
47 return mask if not indices else np.where(mask)[0]
48
~\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\feature_selection\_rfe.py in _get_support_mask(self)
269
270 def _get_support_mask(self):
--> 271 check_is_fitted(self)
272 return self.support_
273
TypeError: check_is_fitted() missing 1 required positional argument: 'attributes'
What is your sklearn version ?
The following (using artificial data) should work fine:
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.feature_selection import RFE
from sklearn.ensemble import RandomForestClassifier
X = np.random.rand(100,20)
y = np.ones((X.shape[0]))
#no of features
nof_list=np.arange(1,13)
high_score=0
#Variable to store the optimum features
nof=0
score_list =[]
for n in range(len(nof_list)):
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.3, random_state = 0)
model = RandomForestClassifier()
rfe = RFE(model,nof_list[n])
X_train_rfe = rfe.fit_transform(X_train,y_train)
X_test_rfe = rfe.transform(X_test)
model.fit(X_train_rfe,y_train)
score = model.score(X_test_rfe,y_test)
score_list.append(score)
if(score>high_score):
high_score = score
nof = nof_list[n]
print("Optimum number of features: %d" %nof)
print("Score with %d features: %f" % (nof, high_score))
Optimum number of features: 1
Score with 1 features: 1.000000
Versions tested:
sklearn.__version__
'0.20.4'
sklearn.__version__
'0.21.3'

Error in making train - test sets from iris data by sklearn.train_test_split()

I'm trying to use simple command: train_test_split on iris dataset and use svm for prediction but when I use "fit" as follows:
dat_iris = datasets.load_iris()
x1 = dat_iris.data[:,2]
y1 = dat_iris.target
x_train,y_train,x_test,y_test = train_test_split(x1, y1, test_size = 0.3,
random_state=0)
svm_model = SVC(kernel='linear',C=1.0, random_state=0)
svm_model.fit(x_train,y_train)
y_pred = svm_model.predict(x_train)
but the following error appears:
ValueError Traceback (most recent call last)
<ipython-input-245-120527f222b3> in <module>()
7
8 svm_model = SVC(kernel='linear',C=1.0, random_state=0)
----> 9 svm_model.fit(x_train,y_train)
10 y_pred = svm_model.predict(x_train)
11 metrics.classification_report(y_pred, y_train)
~/anaconda3/lib/python3.6/site-packages/sklearn/svm/base.py in fit(self, X, y, sample_weight)
147 self._sparse = sparse and not callable(self.kernel)
148
--> 149 X, y = check_X_y(X, y, dtype=np.float64, order='C', accept_sparse='csr')
150 y = self._validate_targets(y)
151
~/anaconda3/lib/python3.6/site-packages/sklearn/utils/validation.py in check_X_y(X, y, accept_sparse, dtype, order, copy, force_all_finite, ensure_2d, allow_nd, multi_output, ensure_min_samples, ensure_min_features, y_numeric, warn_on_dtype, estimator)
550 y = y.astype(np.float64)
551
--> 552 check_consistent_length(X, y)
553
554 return X, y
~/anaconda3/lib/python3.6/site-packages/sklearn/utils/validation.py in check_consistent_length(*arrays)
171 if len(uniques) > 1:
172 raise ValueError("Found input variables with inconsistent numbers of"
--> 173 " samples: %r" % [int(l) for l in lengths])
174
175
ValueError: Found input variables with inconsistent numbers of samples: [105, 45]
This may arise because of the size of the target or inputs, how can I resolve this problem?
You mixed the order of the return arguments.
It should be:
X_train, X_test, y_train, y_test = train_test_split(x1, y1, test_size = 0.3,
random_state=0)

Tensorflow BiRNN dynamic dimension size for input data

I have issue with constructing Input data for BiRNN network.
I'm create License Plate detection system like described : https://arxiv.org/pdf/1601.05610v1.pdf
I have got to "4.2.3 Sequence Labelling" part where I need to train BiRNN with dataset of (total_count_of_images, None, 256) shape, None because it's length of image and and it is different for every picture in data set.
Let's say I have 3000 Images. Then shape would look like :
train.shape : (3000,) but really it is (3000, None, 256) !?
So I got example code from
https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/3_NeuralNetworks/bidirectional_rnn.ipynb
So I'm struggling even with starting to train my RNN. I don't understand how I need to constrct input data/model, input placeholders, variables etc to achieve any training process.
As far as I know everything should work. My code :
reset_graph()
'''
Dataset : (10000, 784)
Labels : (10000, 10)
To classify images using a bidirectional reccurent neural network, we consider
every image row as a sequence of pixels. Because MNIST image shape is 28*28px,
we will then handle 28 sequences of 28 steps for every sample.
'''
# Parameters
learning_rate = 0.001
training_iters = 100 # 100000
display_step = 10
batch_size = 40
# Network Parameters
n_input = 256 # data inpit size/256D
n_steps = 256 # timesteps
n_hidden = 200 # hidden layer num of features
n_classes = 36 # MNIST total classes (0-9 digits and a-z letters)
# tf Graph input
x = tf.placeholder("float", [batch_size, None , n_input], name='input_placeholder')
y = tf.placeholder("float", [batch_size, None, n_classes], name='labels_placeholder')
# Define weights
weights = {
# Hidden layer weights => 2*n_hidden because of foward + backward cells
'out': tf.Variable(tf.random_normal([2*n_hidden, n_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([n_classes]))
}
def BiRNN(x, weights, biases):
print('Input x',x.get_shape().as_list())
print('weights[\'out\']', weights['out'].get_shape().as_list())
print('biases[\'out\']', biases['out'].get_shape().as_list())
# Prepare data shape to match `bidirectional_rnn` function requirements
# Current data input shape: (batch_size, n_steps, n_input)
# Required shape: 'n_steps' tensors list of shape (batch_size, n_input)
# Permuting batch_size and n_steps
#x = tf.transpose(x, [1, 0, 2])
#print('Transposed x',x.get_shape().as_list())
# Reshape to (n_steps*batch_size, n_input)
x = tf.reshape(x, [-1, n_steps])
print('Reshaped x',x.get_shape().as_list())
# Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
x = tf.split(0, n_input, x)
print(len(x),'of [ ',x[0],' ] kinds')
# Define lstm cells with tensorflow
# Forward direction cell
lstm_fw_cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)
# Backward direction cell
lstm_bw_cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)
# Get lstm cell output
outputs, _, _ = rnn.bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x, dtype=tf.float32)
print( len(outputs),'of [ ',outputs[0],' ] kinds' )
# Linear activation, using rnn inner loop last output
ret = tf.matmul(outputs[-1], weights['out']) + biases['out']
print('ret', ret.get_shape().as_list())
return ret
pred = BiRNN(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.initialize_all_variables()
OUTPUT :
Input x [40, None, 256]
weights['out'] [400, 36]
biases['out'] [36]
Reshaped x [None, 256]
256 of [ Tensor("split:0", shape=(?, 256), dtype=float32) ] kinds
256 of [ Tensor("concat:0", shape=(?, 400), dtype=float32) ] kinds
ret [None, 36]
Everything just right there.
Problems start at session part :
# Launch the graph
with tf.Session() as sess:
sess.run(init)
step = 1
batch_data = batch_gen(batch_size)
# Keep training until reach max iterations
while step * batch_size < training_iters:
batch_x, batch_y = next(batch_data)
print(batch_x.shape)
print(batch_y.shape)
#m[:,0, None, None].shape
#Run optimization op (backprop)
print('Optimizer')
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
if step % display_step == 0:
print('Display')
# Calculate batch accuracy
acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
# Calculate batch loss
loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
print("Iter " + str(step * batch_size) + ", Minibatch Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " + \
"{:.5f}".format(acc))
step += 1
print("Optimization Finished!")
# Calculate accuracy for 128 mnist test images
test_len = 128
test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input))
test_label = mnist.test.labels[:test_len]
print("Testing Accuracy:", \
sess.run(accuracy, feed_dict={x: test_data, y: test_label}))
There I got following error :
(40,)
(40,)
Optimizer
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-96-a53814db8181> in <module>()
14 #Run optimization op (backprop)
15 print('Optimizer')
---> 16 sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
17
18 if step % display_step == 0:
/home/nauris/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
715 try:
716 result = self._run(None, fetches, feed_dict, options_ptr,
--> 717 run_metadata_ptr)
718 if run_metadata:
719 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
/home/nauris/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
886 ' to a larger type (e.g. int64).')
887
--> 888 np_val = np.asarray(subfeed_val, dtype=subfeed_dtype)
889
890 if not subfeed_t.get_shape().is_compatible_with(np_val.shape):
/home/nauris/anaconda3/lib/python3.5/site-packages/numpy/core/numeric.py in asarray(a, dtype, order)
480
481 """
--> 482 return array(a, dtype, copy=False, order=order)
483
484 def asanyarray(a, dtype=None, order=None):
ValueError: setting an array element with a sequence.
Any help would be highly appreciated. Thanks everyone in advance.
Realized that error occurs because you cannot feed numpy ndarray with inconsistent dimensions such as (3000, None, 256) in my case. Haven't found any solution yet.

Resources