Related
I have used pre_trained vgg16 for cnn_part to get features of image (which I am not training) and defining the decoder class, which is trained through model. I don't know how resources are getting exhausted in just training decoder part, which I think is not too complex as vgg16. Here I am attaching all the relevant code .
Here is code for vgg16 -->
image_model = tf.keras.applications.VGG16(include_top=False,weights='imagenet' )
image_model.trainable = False
new_input = image_model.input # Any arbitrary shapes with 3 channels
hidden_layer = image_model.layers[-1].output
image_features_extract_model = tf.keras.Model(new_input, hidden_layer)
# https://www.tensorflow.org/tutorials/text/image_captioning
class VGG16_Encoder(tf.keras.Model):
# This encoder passes the features through a Fully connected layer
def __init__(self , cnn_model ):
super(VGG16_Encoder, self).__init__()
# shape after fc : (batch_size, 49, embedding_dim)
self.conv_base = cnn_model
#self.fc = tf.keras.layers.Dense(embedding_dim)
#self.dropout = tf.keras.layers.Dropout(0.5, noise_shape=None, seed=None)
def call(self, x):
#x = self.fc(x)
#x = tf.nn.relu(x)
x = self.conv_base(x)
x = tf.reshape(x , (BATCH_SIZE, 49 , 512))
return x
Here is the code of decoder --->
def rnn_type(units):
# If you have a GPU, we recommend using CuDNNGRU(provides a 3x speedup than GRU)
# the code automatically does that.
if tf.test.is_gpu_available():
return tf.compat.v1.keras.layers.CuDNNGRU(units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
else:
return tf.keras.layers.GRU(units,
return_sequences=True,
return_state=True,
recurrent_activation='sigmoid',
'''The encoder_output(i.e. 'features'), hidden_state(initialized to 0)(i.e. 'hidden') and
the decoder_input (which is the start token)(i.e. 'x') is passed to the decoder.'''
class Rnn_Local_Decoder(tf.keras.Model):
def __init__(self, embedding_dim, units, vocab_size):
super().__init__()
self.units = units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
self.fc1 = tf.keras.layers.Dense(self.units)
self.dropout = tf.keras.layers.Dropout(0.5, noise_shape=None, seed=None)
self.batchnormalization = tf.keras.layers.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None)
self.fc2 = tf.keras.layers.Dense(vocab_size)
# Implementing Attention Mechanism
self.U_attn = tf.keras.layers.Dense(units)
self.W_attn = tf.keras.layers.Dense(units)
self.V_attn = tf.keras.layers.Dense(1)
#_________________________________________________________________________________________________________________________
def call(self, x, features, hidden):
# features : (batch_size,49,512) (Output from ENCODER)
# hidden : (batch_size, hidden_size) <==> (64,512)
# hidden_with_time_axis : (batch_size, 1, hidden_size) <==> (64,1,512)
hidden_with_time_axis = tf.expand_dims(hidden, 1)
# score shape : (64, 49, 1)
# Attention Function
'''e_ij = f( s_(t-1) , h_j )
e_ij = V_attn(T)*tanh(U_attn * h_j + W_attn * s_t )'''
score = self.V_attn(tf.nn.tanh(self.U_attn(features) + self.W_attn(hidden_with_time_axis)))
# self.Uattn(features) : (64,49,512)
# self.Wattn(hidden_with_time_axis) : (64,1,512)
# tf.nn.tanh(self.Uattn(features) + self.Wattn(hidden_with_time_axis)) : (64,49,512)
# self.Vattn(tf.nn.tanh(self.Uattn(features) + self.Wattn(hidden_with_time_axis))) : (64,49,1) ==> score
# you get 1 at the last axis because you are applying score to self.Vattn
# Then find Probability using Softmax
'''attention_weights(alpha_ij) = softmax(e_ij)'''
attention_weights = tf.nn.softmax(score, axis=1)
# attention_weights : (64, 49, 1)
# Give weights to the different pixels in the image
''' C(t) = Summation(j=1 to T) (attention_weights * VGG-16 features) '''
context_vector = attention_weights * features
context_vector = tf.reduce_sum(context_vector, axis=1)
# Context Vector(64,256) = AttentionWeights(64,49,1) * features(64,49,256)
# context_vector shape after sum : (64, 256) ---> doing ele_wise sum of features_vec (axis=1)
# x shape after passing through embedding : (64, 1, 256)
x = self.embedding(x)
# x shape after concatenation : (64, 1, 512)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
# passing the concatenated vector to the GRU
output, state = self.gru(x)
# shape == (batch_size, max_length, hidden_size)
x = self.fc1(output)
# x : (batch_size * max_length, hidden_size)
x = tf.reshape(x, (-1, x.shape[2]))
# Adding Dropout and BatchNorm Layers
x= self.dropout(x)
x= self.batchnormalization(x)
# output : (64 * 512)
x = self.fc2(x)
# shape : (64 * 8329(vocab))
return x, state, attention_weights
#_______________________________________________________________________________________________________________________
def reset_state(self, batch_size):
return tf.zeros((batch_size, self.units)) recurrent_initializer='glorot_uniform')
encoder = VGG16_Encoder(image_features_extract_model)
decoder = Rnn_Local_Decoder(embedding_dim, units, vocab_size)
Here is the training code --->
def train_step(img_tensor, target):
loss = 0
# initializing the hidden state for each batch
# because the captions are not related from image to image
hidden = decoder.reset_state(batch_size=target.shape[0])
dec_input = tf.expand_dims([tokenizer.word_index['<start>']] * BATCH_SIZE, 1)
features = encoder(img_tensor)
with tf.GradientTape() as tape:
for i in range(1, max_len):
# passing the features through the decoder
predictions, hidden, _ = decoder(dec_input, features, hidden)
loss += loss_function(target[:, i], predictions)
# using teacher forcing
dec_input = tf.expand_dims(target[:, i], 1)
total_loss = (loss / int(target.shape[1]))
trainable_variables = decoder.trainable_variables
gradients = tape.gradient(loss, trainable_variables)
optimizer.apply_gradients(zip(gradients, trainable_variables))
return loss, total_loss
Here is the error --->
ResourceExhaustedError: Graph execution error:
Detected at node 'gradient_tape/rnn__local__decoder_1/dense_6/MatMul_3/MatMul_1' defined at (most recent call last):
File "/usr/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/usr/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/usr/local/lib/python3.8/dist-packages/ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "/usr/local/lib/python3.8/dist-packages/traitlets/config/application.py", line 992, in launch_instance
app.start()
File "/usr/local/lib/python3.8/dist-packages/ipykernel/kernelapp.py", line 612, in start
self.io_loop.start()
File "/usr/local/lib/python3.8/dist-packages/tornado/platform/asyncio.py", line 149, in start
self.asyncio_loop.run_forever()
File "/usr/lib/python3.8/asyncio/base_events.py", line 570, in run_forever
self._run_once()
File "/usr/lib/python3.8/asyncio/base_events.py", line 1859, in _run_once
handle._run()
File "/usr/lib/python3.8/asyncio/events.py", line 81, in _run
self._context.run(self._callback, *self._args)
File "/usr/local/lib/python3.8/dist-packages/tornado/ioloop.py", line 690, in <lambda>
lambda f: self._run_callback(functools.partial(callback, future))
File "/usr/local/lib/python3.8/dist-packages/tornado/ioloop.py", line 743, in _run_callback
ret = callback()
File "/usr/local/lib/python3.8/dist-packages/tornado/gen.py", line 787, in inner
self.run()
File "/usr/local/lib/python3.8/dist-packages/tornado/gen.py", line 748, in run
yielded = self.gen.send(value)
File "/usr/local/lib/python3.8/dist-packages/ipykernel/kernelbase.py", line 381, in dispatch_queue
yield self.process_one()
File "/usr/local/lib/python3.8/dist-packages/tornado/gen.py", line 225, in wrapper
runner = Runner(result, future, yielded)
File "/usr/local/lib/python3.8/dist-packages/tornado/gen.py", line 714, in __init__
self.run()
File "/usr/local/lib/python3.8/dist-packages/tornado/gen.py", line 748, in run
yielded = self.gen.send(value)
File "/usr/local/lib/python3.8/dist-packages/ipykernel/kernelbase.py", line 365, in process_one
yield gen.maybe_future(dispatch(*args))
File "/usr/local/lib/python3.8/dist-packages/tornado/gen.py", line 209, in wrapper
yielded = next(result)
File "/usr/local/lib/python3.8/dist-packages/ipykernel/kernelbase.py", line 268, in dispatch_shell
yield gen.maybe_future(handler(stream, idents, msg))
File "/usr/local/lib/python3.8/dist-packages/tornado/gen.py", line 209, in wrapper
yielded = next(result)
File "/usr/local/lib/python3.8/dist-packages/ipykernel/kernelbase.py", line 543, in execute_request
self.do_execute(
File "/usr/local/lib/python3.8/dist-packages/tornado/gen.py", line 209, in wrapper
yielded = next(result)
File "/usr/local/lib/python3.8/dist-packages/ipykernel/ipkernel.py", line 306, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "/usr/local/lib/python3.8/dist-packages/ipykernel/zmqshell.py", line 536, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "/usr/local/lib/python3.8/dist-packages/IPython/core/interactiveshell.py", line 2854, in run_cell
result = self._run_cell(
File "/usr/local/lib/python3.8/dist-packages/IPython/core/interactiveshell.py", line 2881, in _run_cell
return runner(coro)
File "/usr/local/lib/python3.8/dist-packages/IPython/core/async_helpers.py", line 68, in _pseudo_sync_runner
coro.send(None)
File "/usr/local/lib/python3.8/dist-packages/IPython/core/interactiveshell.py", line 3057, in run_cell_async
has_raised = await self.run_ast_nodes(code_ast.body, cell_name,
File "/usr/local/lib/python3.8/dist-packages/IPython/core/interactiveshell.py", line 3249, in run_ast_nodes
if (await self.run_code(code, result, async_=asy)):
File "/usr/local/lib/python3.8/dist-packages/IPython/core/interactiveshell.py", line 3326, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-82-94347d84883d>", line 11, in <module>
batch_loss, t_loss = train_step(img_tensor, target)
File "<ipython-input-63-9f15c0ea6d9d>", line 30, in train_step
gradients = tape.gradient(loss, trainable_variables)
Node: 'gradient_tape/rnn__local__decoder_1/dense_6/MatMul_3/MatMul_1'
Sorry for uploading so much of code , but I feel that all is necessary to sort this issue.
Thanks in advance !!!
I tried to reduce the data from 40000 images to just 500 images , but then also same error stayed. I even tried to reduce batch size, embedding dim of decoder (512-->128) but all in vain.
Kindly help me fix this issue.
I'm getting the following error when trying to extract festues. The y contains the labels so not sure why I'm getting the error
Code:
for cls in os.listdir(path):
for sound in tqdm(os.listdir(os.path.join(path, cls))):
wav = librosa.load(os.path.join(os.path.join(path, cls, sound)), sr=16000)[0].astype(np.float32)
tmp_samples.append(wav)
tmp_labels.append(cls)
X_train, y_train , X_test , y_test = train_test_split( tmp_samples, tmp_labels , test_size=0.60,shuffle=True)
for x,y in zip(X_test,y_test):
extract_features(x, y, model, plain_samples , plain_labels )
def extract_features(wav, cls, model, plain_samples, plain_labels):
for feature in model(wav)[1]:
plain_samples.append(feature)
plain_labels.append(cls)
Error:
Traceback (most recent call last):
File "optunaCopy.py", line 481, in <module>
main(sys.argv[1:])
File "optunaCopy.py", line 397, in main
X_train, y_train , X_test , y_test,X_valid,y_valid = create_dataset(path)
File "optunaCopy.py", line 93, in create_dataset
extract_features(x, y, model, plain_samples , plain_labels )
File "optunaCopy.py", line 321, in extract_features
for feature in model(wav)[1]:
File "C:\Users\XXX\anaconda3\envs\yamnet\lib\site-packages\keras\engine\base_layer.py", line 1020, in __call__
input_spec.assert_input_compatibility(self.input_spec, inputs, self.name)
File "C:\Users\XXX\anaconda3\envs\yamnet\lib\site-packages\keras\engine\input_spec.py", line 196, in assert_input_compatibility
raise TypeError('Inputs to a layer should be tensors. Got: %s' % (x,))
TypeError: Inputs to a layer should be tensors. Got:hi
I am trying to write a machine learning program. The idea was to train a model (defined in q_model) which could be trained with RMSProp. I report here a really simplified version of my code, which is not working.
import tensorflow as tf
import numpy as np
#--------------------------------------
# Model definition
#--------------------------------------
# Let's use a simple nn for the Q value function
W = tf.Variable(tf.random_normal([3,10],dtype=tf.float64), name='W')
b = tf.Variable(tf.random_normal([10],dtype=tf.float64), name='b')
def q_model(X,A):
input = tf.concat((X,A), axis=1)
return tf.reduce_sum( tf.nn.relu(tf.matmul(input, W) + b), axis=1)
#--------------------------------------
# Model and model initializer
#--------------------------------------
optimizer = tf.train.RMSPropOptimizer(0.9)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
#--------------------------------------
# Learning
#--------------------------------------
x = np.matrix(np.random.uniform((0.,0.),(1.,1.), (1000,2)))
a = np.matrix(np.random.uniform((0),(1), 1000)).T
y = np.matrix(np.random.uniform((0),(1), 1000)).T
y_batch , x_batch, a_batch = tf.placeholder("float64",shape=(None,1), name='y'), tf.placeholder("float64",shape=(None,2), name='x'), tf.placeholder("float64",shape=(None,1), name='a')
error = tf.reduce_sum(tf.square(y_batch - q_model(x_batch,a_batch))) / 100.
train = optimizer.minimize(error)
indx = range(1000)
for i in range(100):
# batches
np.random.shuffle(indx)
indx = indx[:100]
print sess.run({'train':train}, feed_dict={'x:0':x[indx],'a:0':a[indx],'y:0':y[indx]})
The error is:
Traceback (most recent call last):
File "/home/samuele/Projects/GBFQI/test/tf_test.py", line 45, in <module>
print sess.run({'train':train}, feed_dict={'x:0':x[indx],'a:0':a[indx],'y:0':y[indx]})
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 789, in run
run_metadata_ptr)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 997, in _run
feed_dict_string, options, run_metadata)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 1132, in _do_run
target_list, options, run_metadata)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 1152, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.FailedPreconditionError: Attempting to use uninitialized value b/RMSProp
[[Node: RMSProp/update_b/ApplyRMSProp = ApplyRMSProp[T=DT_DOUBLE, _class=["loc:#b"], use_locking=false, _device="/job:localhost/replica:0/task:0/cpu:0"](b, b/RMSProp, b/RMSProp_1, RMSProp/update_b/Cast, RMSProp/update_b/Cast_1, RMSProp/update_b/Cast_2, RMSProp/update_b/Cast_3, gradients/add_grad/tuple/control_dependency_1)]]
Caused by op u'RMSProp/update_b/ApplyRMSProp', defined at:
File "/home/samuele/Projects/GBFQI/test/tf_test.py", line 38, in <module>
train = optimizer.minimize(error)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/optimizer.py", line 325, in minimize
name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/optimizer.py", line 456, in apply_gradients
update_ops.append(processor.update_op(self, grad))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/optimizer.py", line 97, in update_op
return optimizer._apply_dense(g, self._v) # pylint: disable=protected-access
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/rmsprop.py", line 140, in _apply_dense
use_locking=self._use_locking).op
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/gen_training_ops.py", line 449, in apply_rms_prop
use_locking=use_locking, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/op_def_library.py", line 767, in apply_op
op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 2506, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 1269, in __init__
self._traceback = _extract_stack()
FailedPreconditionError (see above for traceback): Attempting to use uninitialized value b/RMSProp
[[Node: RMSProp/update_b/ApplyRMSProp = ApplyRMSProp[T=DT_DOUBLE, _class=["loc:#b"], use_locking=false, _device="/job:localhost/replica:0/task:0/cpu:0"](b, b/RMSProp, b/RMSProp_1, RMSProp/update_b/Cast, RMSProp/update_b/Cast_1, RMSProp/update_b/Cast_2, RMSProp/update_b/Cast_3, gradients/add_grad/tuple/control_dependency_1)]]
I cannot explain myself this error since the model is initialized, and actually if I run
print sess.run(q_model(x,a))
the model is working as expected without raising any error.
EDIT:
My question is different from this question. I was already aware of
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
but I didn't know that it should have been performed after the optimization too.
You need to put this piece of code:
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
after having created these tensors:
y_batch , x_batch, a_batch = tf.placeholder("float64",shape=(None,1), name='y'), tf.placeholder("float64",shape=(None,2), name='x'), tf.placeholder("float64",shape=(None,1), name='a')
error = tf.reduce_sum(tf.square(y_batch - q_model(x_batch,a_batch))) / 100.
train = optimizer.minimize(error)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
Otherwise the hidden variables added to the Graph when calling the optimiser.minimize method won't be initialised.
Meantime, the call to print sess.run(q_model(x,a)) works because the variables used by this part of the Graph have been all initialised.
BTW: Use tf.global_variables_initializer rather than tf.initialize_all_variables.
EDIT:
To perform a selective initialisation, you could do something like that:
with tf.variable_scope("to_be_initialised"):
train = optimizer.minimize(error)
sess.run(tf.variables_initializer(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='to_be_initialised')))
This is my first post. I've been trying to combine features with FeatureUnion and Pipeline, but when I add a tf-idf + svd piepline the test fails with a 'dimension mismatch' error. My simple task is to create a regression model to predict search relevance. Code and errors are reported below. Is there something wrong in my code?
df = read_tsv_data(input_file)
df = tokenize(df)
df_train, df_test = train_test_split(df, test_size = 0.2, random_state=2016)
x_train = df_train['sq'].values
y_train = df_train['relevance'].values
x_test = df_test['sq'].values
y_test = df_test['relevance'].values
# char ngrams
char_ngrams = CountVectorizer(ngram_range=(2,5), analyzer='char_wb', encoding='utf-8')
# TFIDF word ngrams
tfidf_word_ngrams = TfidfVectorizer(ngram_range=(1, 4), analyzer='word', encoding='utf-8')
# SVD
svd = TruncatedSVD(n_components=100, random_state = 2016)
# SVR
svr_lin = SVR(kernel='linear', C=0.01)
pipeline = Pipeline([
('feature_union',
FeatureUnion(
transformer_list = [
('char_ngrams', char_ngrams),
('char_ngrams_svd_pipeline', make_pipeline(char_ngrams, svd)),
('tfidf_word_ngrams', tfidf_word_ngrams),
('tfidf_word_ngrams_svd', make_pipeline(tfidf_word_ngrams, svd))
]
)
),
('svr_lin', svr_lin)
])
model = pipeline.fit(x_train, y_train)
y_pred = model.predict(x_test)
When adding the pipeline below to the FeatureUnion list:
('tfidf_word_ngrams_svd', make_pipeline(tfidf_word_ngrams, svd))
The exception below is generated:
2016-07-31 10:34:08,712 : Testing ... Test Shape: (400,) - Training Shape: (1600,)
Traceback (most recent call last):
File "src/model/end_to_end_pipeline.py", line 236, in <module>
main()
File "src/model/end_to_end_pipeline.py", line 233, in main
process_data(input_file, output_file)
File "src/model/end_to_end_pipeline.py", line 175, in process_data
y_pred = model.predict(x_test)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/sklearn/utils/metaestimators.py", line 37, in <lambda>
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/sklearn/pipeline.py", line 203, in predict
Xt = transform.transform(Xt)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/sklearn/pipeline.py", line 523, in transform
for name, trans in self.transformer_list)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/sklearn/externals/joblib/parallel.py", line 800, in __call__
while self.dispatch_one_batch(iterator):
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/sklearn/externals/joblib/parallel.py", line 658, in dispatch_one_batch
self._dispatch(tasks)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/sklearn/externals/joblib/parallel.py", line 566, in _dispatch
job = ImmediateComputeBatch(batch)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/sklearn/externals/joblib/parallel.py", line 180, in __init__
self.results = batch()
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/sklearn/externals/joblib/parallel.py", line 72, in __call__
return [func(*args, **kwargs) for func, args, kwargs in self.items]
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/sklearn/pipeline.py", line 399, in _transform_one
return transformer.transform(X)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/sklearn/utils/metaestimators.py", line 37, in <lambda>
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/sklearn/pipeline.py", line 291, in transform
Xt = transform.transform(Xt)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/sklearn/decomposition/truncated_svd.py", line 201, in transform
return safe_sparse_dot(X, self.components_.T)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/sklearn/utils/extmath.py", line 179, in safe_sparse_dot
ret = a * b
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/scipy/sparse/base.py", line 389, in __mul__
raise ValueError('dimension mismatch')
ValueError: dimension mismatch
What if you change second svd usage to new svd?
transformer_list = [
('char_ngrams', char_ngrams),
('char_ngrams_svd_pipeline', make_pipeline(char_ngrams, svd)),
('tfidf_word_ngrams', tfidf_word_ngrams),
('tfidf_word_ngrams_svd', make_pipeline(tfidf_word_ngrams, clone(svd)))
]
Seems your problem occurs because you're using same object 2 times. I is fitted first time on CountVectorizer, and second time on TfidfVectorizer (Or vice versa), and after you call predict of whole pipeline this svd object cannot understand output of CountVectorizer, because it was fitted on or TfidfVectorizer's output (Or again, vice versa).
I get the above unexpected error when trying to run this code:
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 24 10:38:04 2016
#author: andrea
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from pylab import *
import argparse
import mlp
# Basic model parameters as external flags.
tf.app.flags.FLAGS = tf.python.platform.flags._FlagValues()
tf.app.flags._global_parser = argparse.ArgumentParser()
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
flags.DEFINE_integer('max_steps', 20, 'Number of steps to run trainer.')
flags.DEFINE_integer('batch_size', 1000, 'Batch size. Must divide evenly into the dataset sizes.')
flags.DEFINE_integer('num_samples', 100000, 'Total number of samples. Needed by the reader')
flags.DEFINE_string('training_set_file', 'godzilla_dataset_size625', 'Training set file')
flags.DEFINE_string('test_set_file', 'godzilla_testset_size625', 'Test set file')
flags.DEFINE_string('test_size', 1000, 'Test set size')
def placeholder_inputs(batch_size):
images_placeholder = tf.placeholder(tf.float32, shape=(batch_size, mlp.NUM_INPUT))
labels_placeholder = tf.placeholder(tf.float32, shape=(batch_size, mlp.NUM_OUTPUT))
return images_placeholder, labels_placeholder
def fill_feed_dict(data_set_file, images_pl, labels_pl):
for l in range(int(FLAGS.num_samples/FLAGS.batch_size)):
data_set = genfromtxt("../dataset/" + data_set_file, skip_header=l*FLAGS.batch_size, max_rows=FLAGS.batch_size)
data_set = reshape(data_set, [FLAGS.batch_size, mlp.NUM_INPUT + mlp.NUM_OUTPUT])
images = data_set[:, :mlp.NUM_INPUT]
labels_feed = reshape(data_set[:, mlp.NUM_INPUT:], [FLAGS.batch_size, mlp.NUM_OUTPUT])
images_feed = reshape(images, [FLAGS.batch_size, mlp.NUM_INPUT])
feed_dict = {
images_pl: images_feed,
labels_pl: labels_feed,
}
yield feed_dict
def reader(data_set_file, images_pl, labels_pl):
data_set = loadtxt("../dataset/" + data_set_file)
images = data_set[:, :mlp.NUM_INPUT]
labels_feed = reshape(data_set[:, mlp.NUM_INPUT:], [data_set.shape[0], mlp.NUM_OUTPUT])
images_feed = reshape(images, [data_set.shape[0], mlp.NUM_INPUT])
feed_dict = {
images_pl: images_feed,
labels_pl: labels_feed,
}
return feed_dict, labels_pl
def run_training():
tot_training_loss = []
tot_test_loss = []
tf.reset_default_graph()
with tf.Graph().as_default() as g:
images_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size)
test_images_pl, test_labels_pl = placeholder_inputs(FLAGS.test_size)
logits = mlp.inference(images_placeholder)
test_pred = mlp.inference(test_images_pl, reuse=True)
loss = mlp.loss(logits, labels_placeholder)
test_loss = mlp.loss(test_pred, test_labels_pl)
train_op = mlp.training(loss, FLAGS.learning_rate)
#summary_op = tf.merge_all_summaries()
init = tf.initialize_all_variables()
saver = tf.train.Saver()
sess = tf.Session()
#summary_writer = tf.train.SummaryWriter("./", sess.graph)
sess.run(init)
test_feed, test_labels_placeholder = reader(FLAGS.test_set_file, test_images_pl, test_labels_pl)
# Start the training loop.
for step in xrange(FLAGS.max_steps):
start_time = time.time()
feed_gen = fill_feed_dict(FLAGS.training_set_file, images_placeholder, labels_placeholder)
i=1
for feed_dict in feed_gen:
_, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)
_, test_loss_val = sess.run([test_pred, test_loss], feed_dict=test_feed)
tot_training_loss.append(loss_value)
tot_test_loss.append(test_loss_val)
#if i % 10 == 0:
#print('%d minibatches analyzed...'%i)
i+=1
if step % 1 == 0:
duration = time.time() - start_time
print('Epoch %d (%.3f sec):\n training loss = %f \n test loss = %f ' % (step, duration, loss_value, test_loss_val))
predictions = sess.run(test_pred, feed_dict=test_feed)
savetxt("predictions", predictions)
savetxt("training_loss", tot_training_loss)
savetxt("test_loss", tot_test_loss)
plot(tot_training_loss)
plot(tot_test_loss)
figure()
scatter(test_feed[test_labels_placeholder], predictions)
#plot([.4, .6], [.4, .6])
run_training()
#if __name__ == '__main__':
# tf.app.run()
this is mlp:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
NUM_OUTPUT = 1
NUM_INPUT = 625
NUM_HIDDEN = 5
def inference(images, reuse=None):
with tf.variable_scope('hidden1', reuse=reuse):
weights = tf.get_variable(name='weights', shape=[NUM_INPUT, NUM_HIDDEN], initializer=tf.contrib.layers.xavier_initializer())
weight_decay = tf.mul(tf.nn.l2_loss(weights), 0.00001, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
biases = tf.Variable(tf.constant(0.0, name='biases', shape=[NUM_HIDDEN]))
hidden1_output = tf.nn.relu(tf.matmul(images, weights)+biases, name='hidden1')
with tf.variable_scope('output', reuse=reuse):
weights = tf.get_variable(name='weights', shape=[NUM_HIDDEN, NUM_OUTPUT], initializer=tf.contrib.layers.xavier_initializer())
weight_decay = tf.mul(tf.nn.l2_loss(weights), 0.00001, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
biases = tf.Variable(tf.constant(0.0, name='biases', shape=[NUM_OUTPUT]))
output = tf.nn.relu(tf.matmul(hidden1_output, weights)+biases, name='output')
return output
def loss(outputs, labels):
rmse = tf.sqrt(tf.reduce_mean(tf.square(tf.sub(labels, outputs))), name="rmse")
tf.add_to_collection('losses', rmse)
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def training(loss, learning_rate):
tf.scalar_summary(loss.op.name, loss)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
global_step = tf.Variable(0, name='global_step', trainable=False)
train_op = optimizer.minimize(loss, global_step=global_step)
return train_op
here the error:
Traceback (most recent call last):
File "<ipython-input-1-f16dfed3b99b>", line 1, in <module>
runfile('/home/andrea/test/python/main_mlp_yield.py', wdir='/home/andrea/test/python')
File "/usr/local/lib/python2.7/dist-packages/spyderlib/widgets/externalshell/sitecustomize.py", line 714, in runfile
execfile(filename, namespace)
File "/usr/local/lib/python2.7/dist-packages/spyderlib/widgets/externalshell/sitecustomize.py", line 81, in execfile
builtins.execfile(filename, *where)
File "/home/andrea/test/python/main_mlp_yield.py", line 127, in <module>
run_training()
File "/home/andrea/test/python/main_mlp_yield.py", line 105, in run_training
_, test_loss_val = sess.run([test_pred, test_loss], feed_dict=test_feed)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 372, in run
run_metadata_ptr)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 636, in _run
feed_dict_string, options, run_metadata)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 708, in _do_run
target_list, options, run_metadata)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 728, in _do_call
raise type(e)(node_def, op, message)
InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder' with dtype float and shape [1000,625]
[[Node: Placeholder = Placeholder[dtype=DT_FLOAT, shape=[1000,625], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
Caused by op u'Placeholder', defined at:
File "/usr/local/lib/python2.7/dist-packages/spyderlib/widgets/externalshell/start_ipython_kernel.py", line 205, in <module>
__ipythonkernel__.start()
File "/usr/local/lib/python2.7/dist-packages/ipykernel/kernelapp.py", line 442, in start
ioloop.IOLoop.instance().start()
File "/usr/local/lib/python2.7/dist-packages/zmq/eventloop/ioloop.py", line 162, in start
super(ZMQIOLoop, self).start()
File "/usr/local/lib/python2.7/dist-packages/tornado/ioloop.py", line 883, in start
handler_func(fd_obj, events)
File "/usr/local/lib/python2.7/dist-packages/tornado/stack_context.py", line 275, in null_wrapper
return fn(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/zmq/eventloop/zmqstream.py", line 440, in _handle_events
self._handle_recv()
File "/usr/local/lib/python2.7/dist-packages/zmq/eventloop/zmqstream.py", line 472, in _handle_recv
self._run_callback(callback, msg)
File "/usr/local/lib/python2.7/dist-packages/zmq/eventloop/zmqstream.py", line 414, in _run_callback
callback(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/tornado/stack_context.py", line 275, in null_wrapper
return fn(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/ipykernel/kernelbase.py", line 276, in dispatcher
return self.dispatch_shell(stream, msg)
File "/usr/local/lib/python2.7/dist-packages/ipykernel/kernelbase.py", line 228, in dispatch_shell
handler(stream, idents, msg)
File "/usr/local/lib/python2.7/dist-packages/ipykernel/kernelbase.py", line 391, in execute_request
user_expressions, allow_stdin)
File "/usr/local/lib/python2.7/dist-packages/ipykernel/ipkernel.py", line 199, in do_execute
shell.run_cell(code, store_history=store_history, silent=silent)
File "/usr/local/lib/python2.7/dist-packages/IPython/core/interactiveshell.py", line 2723, in run_cell
interactivity=interactivity, compiler=compiler, result=result)
File "/usr/local/lib/python2.7/dist-packages/IPython/core/interactiveshell.py", line 2831, in run_ast_nodes
if self.run_code(code, result):
File "/usr/local/lib/python2.7/dist-packages/IPython/core/interactiveshell.py", line 2885, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-1-f16dfed3b99b>", line 1, in <module>
runfile('/home/andrea/test/python/main_mlp_yield.py', wdir='/home/andrea/test/python')
File "/usr/local/lib/python2.7/dist-packages/spyderlib/widgets/externalshell/sitecustomize.py", line 714, in runfile
execfile(filename, namespace)
File "/usr/local/lib/python2.7/dist-packages/spyderlib/widgets/externalshell/sitecustomize.py", line 81, in execfile
builtins.execfile(filename, *where)
File "/home/andrea/test/python/main_mlp_yield.py", line 127, in <module>
run_training()
File "/home/andrea/test/python/main_mlp_yield.py", line 79, in run_training
images_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size)
File "/home/andrea/test/python/main_mlp_yield.py", line 37, in placeholder_inputs
images_placeholder = tf.placeholder(tf.float32, shape=(batch_size, mlp.NUM_INPUT))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/array_ops.py", line 895, in placeholder
name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_array_ops.py", line 1238, in _placeholder
name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/op_def_library.py", line 704, in apply_op
op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 2260, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 1230, in __init__
self._traceback = _extract_stack()
I really don't understand why. It looks to me that I'm feeding all the placeholders before using them. I also removed the "merge_all_summaries" since this problem is similar to other (this and this), but it didn't help
EDIT: training data: 100000 samples x 625 features
test data: 1000 samples x 625 features
num. output: 1
I think the problem is in this code:
def loss(outputs, labels):
rmse = tf.sqrt(tf.reduce_mean(tf.square(tf.sub(labels, outputs))), name="rmse")
tf.add_to_collection('losses', rmse)
return tf.add_n(tf.get_collection('losses'), name='total_loss')
You're adding up all the losses from collection 'losses', including both your training and test losses. In particular, in this code:
loss = mlp.loss(logits, labels_placeholder)
test_loss = mlp.loss(test_pred, test_labels_pl)
The first call to mlp.loss will add training losses to the 'losses' collection. The second call to mlp.loss will incorporate those values in its result. So when you try to compute the test_loss, Tensorflow complains that you didn't feed all of the inputs (the training placeholders).
Perhaps you meant something like this?
def loss(outputs, labels):
rmse = tf.sqrt(tf.reduce_mean(tf.square(tf.sub(labels, outputs))), name="rmse")
return rmse
I hope that helps!