I want to setup a convolutional neuronal net in tensorflow r1.2 with following structure:
def construct_nn2(input_layer, net2={}):
input_layer1 = tf.nn.dilation2d(
input=input_layer,
strides=[1, 1, 1, 1],
rates=[1, 1, 1, 1],
filter=[1.0, 1.0, 1],
padding='SAME',
name='dil'
)
net2['conv11'] = tf.layers.conv2d(
inputs=input_layer1,
filters=64,
kernel_size=[5, 5],
padding='same',
activation=tf.tanh,
name='conv11'
)
net2['conv12'] = tf.layers.conv2d(
inputs=net2['conv11'],
filters=64,
kernel_size=[3, 3],
padding='same',
activation=tf.tanh,
name='conv12'
)
net2['logits']= tf.layers.conv2d(
inputs=net2['conv12'],
filters=1,
kernel_size=[3, 3],
padding='same',
activation=tf.sigmoid,
name='logits'
)
return net2['logits']
The dilation layer spits out these errors:
Traceback (most recent call last):
File "/home/test/Dropbox/occlusion_thesis/occ_small _2_add/main.py", line 137, in <module>
tn_prediction = construct_nn2(t_img)
File "/home/test/Dropbox/occlusion_thesis/occ_small _2_add/main.py", line 18, in construct_nn2
name='dil'
File "/home/test/anaconda2/envs/tensorflow/lib/python2.7/site-packages/tensorflow/python/ops/gen_nn_ops.py", line 860, in dilation2d
name=name)
File "/home/test/anaconda2/envs/tensorflow/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py", line 767, in apply_op
op_def=op_def)
File "/home/test/anaconda2/envs/tensorflow/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 2508, in create_op
set_shapes_for_outputs(ret)
File "/home/test/anaconda2/envs/tensorflow/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 1873, in set_shapes_for_outputs
shapes = shape_func(op)
File "/home/test/anaconda2/envs/tensorflow/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 1823, in call_with_requiring
return call_cpp_shape_fn(op, require_shape_fn=True)
File "/home/test/anaconda2/envs/tensorflow/lib/python2.7/site-packages/tensorflow/python/framework/common_shapes.py", line 610, in call_cpp_shape_fn
debug_python_shape_fn, require_shape_fn)
File "/home/test/anaconda2/envs/tensorflow/lib/python2.7/site-packages/tensorflow/python/framework/common_shapes.py", line 676, in _call_cpp_shape_fn_impl
raise ValueError(err.message)
ValueError: Shape must be rank 3 but is rank 1 for 'dil' (op: 'Dilation2D') with input shapes: [1,248,360,128], [3].
What I have to change that the errors do not raise. I have read the dilation2d, but still I do not have any clue, what I have to change that may get this layer work at least without errors.
Edit:
The filter must be a tensor with the same shape.
input_layer1 = tf.nn.dilation2d(
input=input_layer,
strides=[1, 1, 1, 1],
rates=[1, 1, 1, 1],
filter=tf.squeeze(input_layer, axis=0),
padding='SAME',
name='dil'
)
Refactoring the filter does run through at least.
Related
Encountered the error while trying to fit model of encoder-decoder using ConvLSTM2D. the x_train is of shape (31567, 7, 210, 203, 1)(batch_size,framelength,H,W,C).
The encoder part works when executed in isolation but the error occurs when i add the decoder part, seems like the problem is in the input part of decoder but not sure.
tried reshaping the encoder_state_c_1 and encoder_state_h_1 to 5D before passing it to the decoder ConvLSTM2D but doesn't help either.
Please find the code and error here:
MODEL
def define_models_1_moving_1(framelength, n_filter, filter_size):
# define training encoder
encoder_inputs = Input(name = "encoder_input",
shape=(x_train.shape[1], x_train.shape[2], x_train.shape[3], x_train.shape[4]))
encoder_1 = ConvLSTM2D(name = "encoder_ConvLSTM",
filters = n_filter, kernel_size=filter_size, padding='same', return_sequences=True, return_state=True,
kernel_regularizer=l2(0.0005), recurrent_regularizer=l2(0.0005), bias_regularizer=l2(0.0005))
# input_shape=(x_train.shape[1], x_train.shape[2], x_train.shape[3], x_train.shape[4]))
encoder_outputs_1, encoder_state_h_1, encoder_state_c_1 = encoder_1(encoder_inputs)
# define training decoder
decoder_inputs = Input(name = "decoder_input",
shape=(x_train.shape[1], x_train.shape[2], x_train.shape[3], x_train.shape[4]))
decoder_1 = ConvLSTM2D(name = "decoder_ConvLSTM",
filters=n_filter, kernel_size=filter_size, padding='same', return_sequences=True, return_state=True,
kernel_regularizer=l2(0.0005), recurrent_regularizer=l2(0.0005), bias_regularizer=l2(0.0005))
decoder_outputs_1, _, _ = decoder_1([decoder_inputs, encoder_state_h_1, encoder_state_c_1]) #### This line is giving Error
model = Model([encoder_inputs, decoder_inputs], decoder_outputs_1)
return model
Error
Traceback (most recent call last):
File "D:\Chintan\Dataset\model.py", line 155, in
training_history = train_1_moving_1.fit(
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 70, in error_handler
raise e.with_traceback(filtered_tb) from None
File "C:\Users\Admin\AppData\Local\Temp_autograph_generated_filernuwcygs.py", line 15, in tf__train_function
retval = ag__.converted_call(ag__.ld(step_function), (ag__.ld(self), ag__.ld(iterator)), None, fscope)
ValueError: in user code:
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py", line 1249, in train_function *
return step_function(self, iterator)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py", line 1233, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py", line 1222, in run_step **
outputs = model.train_step(data)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py", line 1023, in train_step
y_pred = self(x, training=True)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 70, in error_handler
raise e.with_traceback(filtered_tb) from None
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\input_spec.py", line 216, in assert_input_compatibility
raise ValueError(
ValueError: Layer "model_120" expects 2 input(s), but it received 1 input tensors. Inputs received: [<tf.Tensor 'IteratorGetNext:0' shape=(None, None, None, None, None) dtype=float32>]
I run the code below on Tensorflow 1.0 using python 3.5
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
adder_node = a + b # + provides a shortcut for tf.add(a, b)
sess = tf.Session()
print(sess.run(adder_node, {a: 3, b: 4.5}))
print(sess.run(adder_node, {a: [1, 3], b: [2, 4]}))
I got this error
print(sess.run(adder_node, {a:3, b:4.5}))
Traceback (most recent call last):
File "C:\Users\xxxx\AppData\Local\Continuum\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\client\session.py", line 1068, in _run
allow_operation=False)
File "C:\Users\xxxx\AppData\Local\Continuum\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\framework\ops.py", line 2708, in as_graph_element
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
File "C:\Users\xxxx\AppData\Local\Continuum\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\framework\ops.py", line 2787, in _as_graph_element_locked
raise ValueError("Tensor %s is not an element of this graph." % obj)
ValueError: Tensor Tensor("Placeholder_3:0", dtype=float32) is not an element of this graph.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "", line 1, in
File "C:\Users\xxxx\AppData\Local\Continuum\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\client\session.py", line 895, in run
run_metadata_ptr)
File "C:\Users\xxxx\AppData\Local\Continuum\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\client\session.py", line 1071, in _run
+ e.args[0])
TypeError: Cannot interpret feed_dict key as Tensor: Tensor Tensor("Placeholder_3:0", dtype=float32) is not an element of this graph.
>
Please help me debug this, not too sure where the error is coming from
Here is what I want to implement f(x) with tensorflow
input x = (x1,x2,x3,x4,x5,x6,x7,x8,x9)
define f(x) = f1(x1,x2,x3,x4,x5) + f2(x5,x6,x7,x8,x9)
where
f1(x1,x2,x3,x4,x5) = {1 if
(x1,x2,x3,x4,x5)=(0,0,0,0,0),
g1(x1,x2,x3,x4,x5) otherwise}
f2(x5,x6,x7,x8,x9) = {1 if
(x5,x6,x7,x8,x9)=(0,0,0,0,0),
g2(x5,x6,x7,x8,x9) otherwise}
This is my tensorflow code
import tensorflow as tf
import numpy as np
ph = tf.placeholder(dtype=tf.float32, shape=[None, 9])
x1 = tf.slice(ph, [0, 0], [-1, 5])
x2 = tf.slice(ph, [0, 4], [-1, 5])
fixed1 = tf.placeholder(dtype=tf.float32, shape=[1, 5])
fixed2 = tf.placeholder(dtype=tf.float32, shape=[1, 5])
# MLP 1
w1 = tf.Variable(tf.ones([5, 1]))
g1 = tf.matmul(x1, w1)
# MLP 2
w2 = tf.Variable(-tf.ones([5, 1]))
g2 = tf.matmul(x2, w2)
check1 = tf.reduce_all(tf.equal(x1, fixed1), axis=1, keep_dims=True)
check2 = tf.reduce_all(tf.equal(x2, fixed2), axis=1, keep_dims=True)
#### with Problem
f1 = tf.cond(check1,
lambda: tf.constant([2], dtype=tf.float32), lambda: g1)
f2 = tf.cond(check2,
lambda: tf.constant([1], dtype=tf.float32), lambda: g2)
####
f = tf.add(f1, f2)
x = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[2, 0, 0, 0, 0, 0, 0, 0, 0],
[9, 0, 0, 0, 0, 0, 0, 0, 0]])
fixed = np.array([[0, 0, 0, 0, 0]])
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print('(1)\n', sess.run(check1, feed_dict={ph: x, fixed1: fixed, fixed2: fixed}))
print('(2)\n', sess.run(check2, feed_dict={ph: x, fixed1: fixed, fixed2: fixed}))
print('(3)\n', sess.run(f, feed_dict={ph: x, fixed1: fixed, fixed2: fixed}))
print('(4)\n', sess.run(f1, feed_dict={ph: x, fixed1: fixed, fixed2: fixed}))
print('(5)\n', sess.run(f2, feed_dict={ph: x, fixed1: fixed, fixed2: fixed}))
In this case,
check1 is [[ True], [ True], [False], [False], [False]] with shape (5, 1)
check2 is [[ True], [False], [ True], [ True], [ True]] with shape (5, 1)
I expect result of f is [[3], [1], [2], [3], [10]]
but seems like tf.cond() can not handle input as boolean tensors with shape (5, 1)
Could you advice how to implement f(x) with tensorflow, please.
This is Error message what i received
Traceback (most recent call last): File
"C:\Users\hong\AppData\Local\Continuum\Anaconda3\lib\site-packages\tensorflow\python\framework\common_shapes.py",
line 670, in _call_cpp_shape_fn_impl
status) File "C:\Users\hong\AppData\Local\Continuum\Anaconda3\lib\contextlib.py",
line 66, in exit
next(self.gen) File "C:\Users\hong\AppData\Local\Continuum\Anaconda3\lib\site-packages\tensorflow\python\framework\errors_impl.py",
line 469, in raise_exception_on_not_ok_status
pywrap_tensorflow.TF_GetCode(status)) tensorflow.python.framework.errors_impl.InvalidArgumentError: Shape
must be rank 0 but is rank 2 for 'cond/Switch' (op: 'Switch') with
input shapes: [?,1], [?,1].
During handling of the above exception, another exception occurred:
Traceback (most recent call last): File
"C:/Users/hong/Dropbox/MLILAB/Research/GM-MLP/code/tensorflow_cond.py",
line 23, in
lambda: tf.constant([2], dtype=tf.float32), lambda: g1) File "C:\Users\hong\AppData\Local\Continuum\Anaconda3\lib\site-packages\tensorflow\python\ops\control_flow_ops.py",
line 1765, in cond
p_2, p_1 = switch(pred, pred) File "C:\Users\hong\AppData\Local\Continuum\Anaconda3\lib\site-packages\tensorflow\python\ops\control_flow_ops.py",
line 318, in switch
return gen_control_flow_ops._switch(data, pred, name=name) File "C:\Users\hong\AppData\Local\Continuum\Anaconda3\lib\site-packages\tensorflow\python\ops\gen_control_flow_ops.py",
line 368, in _switch
result = _op_def_lib.apply_op("Switch", data=data, pred=pred, name=name) File
"C:\Users\hong\AppData\Local\Continuum\Anaconda3\lib\site-packages\tensorflow\python\framework\op_def_library.py",
line 759, in apply_op
op_def=op_def) File "C:\Users\hong\AppData\Local\Continuum\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py",
line 2242, in create_op
set_shapes_for_outputs(ret) File "C:\Users\hong\AppData\Local\Continuum\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py",
line 1617, in set_shapes_for_outputs
shapes = shape_func(op) File "C:\Users\hong\AppData\Local\Continuum\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py",
line 1568, in call_with_requiring
return call_cpp_shape_fn(op, require_shape_fn=True) File "C:\Users\hong\AppData\Local\Continuum\Anaconda3\lib\site-packages\tensorflow\python\framework\common_shapes.py",
line 610, in call_cpp_shape_fn
debug_python_shape_fn, require_shape_fn) File "C:\Users\hong\AppData\Local\Continuum\Anaconda3\lib\site-packages\tensorflow\python\framework\common_shapes.py",
line 675, in _call_cpp_shape_fn_impl
raise ValueError(err.message) ValueError: Shape must be rank 0 but is rank 2 for 'cond/Switch' (op: 'Switch') with input shapes: [?,1],
[?,1].
Process finished with exit code 1
I think you need tf.where, not tf.cond.
See the answer to this question: How to use tf.cond for batch processing
I'm trying to use Keras Scikit Learn Wrapper in order to make random search for parameters easier. I wrote an example code here where :
I generate an artificial dataset:
I am using moons from scikit learn
from sklearn.datasets import make_moons
dataset = make_moons(1000)
Model builder definition:
I define build_fn function needed:
def build_fn(nr_of_layers = 2,
first_layer_size = 10,
layers_slope_coeff = 0.8,
dropout = 0.5,
activation = "relu",
weight_l2 = 0.01,
act_l2 = 0.01,
input_dim = 2):
result_model = Sequential()
result_model.add(Dense(first_layer_size,
input_dim = input_dim,
activation=activation,
W_regularizer= l2(weight_l2),
activity_regularizer=activity_l2(act_l2)
))
current_layer_size = int(first_layer_size * layers_slope_coeff) + 1
for index_of_layer in range(nr_of_layers - 1):
result_model.add(BatchNormalization())
result_model.add(Dropout(dropout))
result_model.add(Dense(current_layer_size,
W_regularizer= l2(weight_l2),
activation=activation,
activity_regularizer=activity_l2(act_l2)
))
current_layer_size = int(current_layer_size * layers_slope_coeff) + 1
result_model.add(Dense(1,
activation = "sigmoid",
W_regularizer = l2(weight_l2)))
result_model.compile(optimizer="rmsprop", metrics = ["accuracy"], loss = "binary_crossentropy")
return result_model
NeuralNet = KerasClassifier(build_fn)
Parameter grid definition :
Then I defined a parameter grid :
param_grid = {
"nr_of_layers" : [2, 3, 4, 5],
"first_layer_size" : [5, 10, 15],
"layers_slope_coeff" : [0.4, 0.6, 0.8],
"dropout" : [0.3, 0.5, 0.8],
"weight_l2" : [0.01, 0.001, 0.0001],
"verbose" : [0],
"batch_size" : [1],
"nb_epoch" : [30]
}
RandomizedSearchCV phase:
I defined RandomizedSearchCV object and fitted with values from artificial dataset:
random_search = RandomizedSearchCV(NeuralNet,
param_distributions=param_grid, verbose=2, n_iter=1, scoring="roc_auc")
random_search.fit(dataset[0], dataset[1])
What I got (after running this code in console) is :
Traceback (most recent call last):
File "C:\Anaconda2\lib\site-packages\IPython\core\interactiveshell.py", line 2885, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-3-c5bdbc2770b7>", line 2, in <module>
random_search.fit(dataset[0], dataset[1])
File "C:\Anaconda2\lib\site-packages\sklearn\grid_search.py", line 996, in fit
return self._fit(X, y, sampled_params)
File "C:\Anaconda2\lib\site-packages\sklearn\grid_search.py", line 553, in _fit
for parameters in parameter_iterable
File "C:\Anaconda2\lib\site-packages\sklearn\externals\joblib\parallel.py", line 800, in __call__
while self.dispatch_one_batch(iterator):
File "C:\Anaconda2\lib\site-packages\sklearn\externals\joblib\parallel.py", line 658, in dispatch_one_batch
self._dispatch(tasks)
File "C:\Anaconda2\lib\site-packages\sklearn\externals\joblib\parallel.py", line 566, in _dispatch
job = ImmediateComputeBatch(batch)
File "C:\Anaconda2\lib\site-packages\sklearn\externals\joblib\parallel.py", line 180, in __init__
self.results = batch()
File "C:\Anaconda2\lib\site-packages\sklearn\externals\joblib\parallel.py", line 72, in __call__
return [func(*args, **kwargs) for func, args, kwargs in self.items]
File "C:\Anaconda2\lib\site-packages\sklearn\cross_validation.py", line 1550, in _fit_and_score
test_score = _score(estimator, X_test, y_test, scorer)
File "C:\Anaconda2\lib\site-packages\sklearn\cross_validation.py", line 1606, in _score
score = scorer(estimator, X_test, y_test)
File "C:\Anaconda2\lib\site-packages\sklearn\metrics\scorer.py", line 175, in __call__
y_pred = y_pred[:, 1]
IndexError: index 1 is out of bounds for axis 1 with size 1
This code work fine when instead of using scoring = "roc_auc" I used accuracy metric. Can anyone explain me what's wrong? Have anyone had similiar problem?
There is a bug in the KerasClassifier that is causing this issue. I have opened an issue for it on the repo. https://github.com/fchollet/keras/issues/2864
The fix is also in there. You can define your own KerasClassifier in the mean time as a temporary workaround.
class FixedKerasClassifier(KerasClassifier):
def predict_proba(self, X, **kwargs):
kwargs = self.filter_sk_params(Sequential.predict_proba, kwargs)
probs = self.model.predict_proba(X, **kwargs)
if(probs.shape[1] == 1):
probs = np.hstack([1-probs,probs])
return probs
I am creating a convolution autoencoder in tensorflow. I got this exact error:
tensorflow.python.framework.errors.InvalidArgumentError: Conv2DBackpropInput: Number of rows of out_backprop doesn't match computed: actual = 8, computed = 12
[[Node: conv2d_transpose = Conv2DBackpropInput[T=DT_FLOAT, data_format="NHWC", padding="SAME", strides=[1, 1, 1, 1], use_cudnn_on_gpu=true, _device="/job:localhost/replica:0/task:0/cpu:0"](conv2d_transpose/output_shape, Variable_1/read, MaxPool_1)]]
Relevant code:
l1d = tf.nn.relu(tf.nn.conv2d_transpose(l1da, w2, [10, 12, 12, 32], strides=[1, 1, 1, 1], padding='SAME'))
where
w2 = tf.Variable(tf.random_normal([5, 5, 32, 64], stddev=0.01))
I checked the shape of the input to conv2d_transpose i.e. l1da and it is correct(10x8x8x64). The batch size is 10, input to this layer is in the form of 8x8x64, and the output is supposed to be 12x12x32.
What am I missing?
Found the error. Padding should be "Valid", not "Same".