load_from_checkpoint fails after transfer learning a LightningModule - machine-learning

I try to transfer learn a LightningModule. The relevant part of the code is this:
class DeepFilteringTransferLearning(pl.LightningModule):
def __init__(self, chk_path = None):
super().__init__()
#init class members
self.prediction = []
self.label = []
self.loss = MSELoss()
#init pretrained model
self.chk_path = chk_path
model = DeepFiltering.load_from_checkpoint(chk_path)
backbone = model.sequential
layers = list(backbone.children())[:-1]
self.groundModel = Sequential(*layers)
#use the pretrained modell the same way to regress Lshall and neq
self.regressor = nn.Linear(64,2)
def forward(self, x):
self.groundModel.eval()
with torch.no_grad():
groundOut = self.groundModel(x)
yPred = self.regressor(groundOut)
return yPred
I save my model in a different, main file which relevant part is:
#callbacks
callbacks = [
ModelCheckpoint(
dirpath = "checkpoints/maxPooling16StandardizedL2RegularizedReproduceableSeeded42Ampl1ConvTransferLearned",
save_top_k=5,
monitor="val_loss",
),
]
#trainer
trainer = pl.Trainer(gpus=[1,2],strategy="dp",max_epochs=150,logger=wandb_logger,callbacks=callbacks,precision=32,deterministic=True)
trainer.fit(model,train_dataloaders=trainDl,val_dataloaders=valDl)
After try to load the modell from checkpoint like this:
chk_patH = "path/to/transfer_learned/model"
standardizedL2RegularizedL1 = DeepFilteringTransferLearning("path/to/model/trying/to/use/for/transfer_learning").load_from_checkpoint(chk_patH)
I got the following error:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
~/anaconda3/envs/skimageTrial/lib/python3.6/site-packages/torch/serialization.py in _check_seekable(f)
307 try:
--> 308 f.seek(f.tell())
309 return True
AttributeError: 'NoneType' object has no attribute 'seek'
During handling of the above exception, another exception occurred:
AttributeError Traceback (most recent call last)
<ipython-input-6-13f5fd0c7b85> in <module>
1 chk_patH = "checkpoints/maxPooling16StandardizedL2RegularizedReproduceableSeeded42Ampl1/epoch=4-step=349.ckpt"
----> 2 standardizedL2RegularizedL1 = DeepFilteringTransferLearning("checkpoints/maxPooling16StandardizedL2RegularizedReproduceableSeeded42Ampl2/epoch=145-step=10219.ckpt").load_from_checkpoint(chk_patH)
~/anaconda3/envs/skimageTrial/lib/python3.6/site-packages/pytorch_lightning/core/saving.py in load_from_checkpoint(cls, checkpoint_path, map_location, hparams_file, strict, **kwargs)
154 checkpoint[cls.CHECKPOINT_HYPER_PARAMS_KEY].update(kwargs)
155
--> 156 model = cls._load_model_state(checkpoint, strict=strict, **kwargs)
157 return model
158
~/anaconda3/envs/skimageTrial/lib/python3.6/site-packages/pytorch_lightning/core/saving.py in _load_model_state(cls, checkpoint, strict, **cls_kwargs_new)
196 _cls_kwargs = {k: v for k, v in _cls_kwargs.items() if k in cls_init_args_name}
197
--> 198 model = cls(**_cls_kwargs)
199
200 # give model a chance to load something
~/whistlerProject/gitHub/whistler/mathe/gwInspired/deepFilteringTransferLearning.py in __init__(self, chk_path)
34 #init pretrained model
35 self.chk_path = chk_path
---> 36 model = DeepFiltering.load_from_checkpoint(chk_path)
37 backbone = model.sequential
38 layers = list(backbone.children())[:-1]
~/anaconda3/envs/skimageTrial/lib/python3.6/site-packages/pytorch_lightning/core/saving.py in load_from_checkpoint(cls, checkpoint_path, map_location, hparams_file, strict, **kwargs)
132 checkpoint = pl_load(checkpoint_path, map_location=map_location)
133 else:
--> 134 checkpoint = pl_load(checkpoint_path, map_location=lambda storage, loc: storage)
135
136 if hparams_file is not None:
~/anaconda3/envs/skimageTrial/lib/python3.6/site-packages/pytorch_lightning/utilities/cloud_io.py in load(path_or_url, map_location)
31 if not isinstance(path_or_url, (str, Path)):
32 # any sort of BytesIO or similiar
---> 33 return torch.load(path_or_url, map_location=map_location)
34 if str(path_or_url).startswith("http"):
35 return torch.hub.load_state_dict_from_url(str(path_or_url), map_location=map_location)
~/anaconda3/envs/skimageTrial/lib/python3.6/site-packages/torch/serialization.py in load(f, map_location, pickle_module, **pickle_load_args)
579 pickle_load_args['encoding'] = 'utf-8'
580
--> 581 with _open_file_like(f, 'rb') as opened_file:
582 if _is_zipfile(opened_file):
583 # The zipfile reader is going to advance the current file position.
~/anaconda3/envs/skimageTrial/lib/python3.6/site-packages/torch/serialization.py in _open_file_like(name_or_buffer, mode)
233 return _open_buffer_writer(name_or_buffer)
234 elif 'r' in mode:
--> 235 return _open_buffer_reader(name_or_buffer)
236 else:
237 raise RuntimeError(f"Expected 'r' or 'w' in mode but got {mode}")
~/anaconda3/envs/skimageTrial/lib/python3.6/site-packages/torch/serialization.py in __init__(self, buffer)
218 def __init__(self, buffer):
219 super(_open_buffer_reader, self).__init__(buffer)
--> 220 _check_seekable(buffer)
221
222
~/anaconda3/envs/skimageTrial/lib/python3.6/site-packages/torch/serialization.py in _check_seekable(f)
309 return True
310 except (io.UnsupportedOperation, AttributeError) as e:
--> 311 raise_err_msg(["seek", "tell"], e)
312 return False
313
~/anaconda3/envs/skimageTrial/lib/python3.6/site-packages/torch/serialization.py in raise_err_msg(patterns, e)
302 + " Please pre-load the data into a buffer like io.BytesIO and"
303 + " try to load from it instead.")
--> 304 raise type(e)(msg)
305 raise e
306
AttributeError: 'NoneType' object has no attribute 'seek'. You can only torch.load from a file that is seekable. Please pre-load the data into a buffer like io.BytesIO and try to load from it instead.
which I can't resolve.
I try to this according to the available tutorials on the official page of pytorch lightning here. I can't figure it out what I miss.
Could somebody point me in the right direction?

Related

Dtype issue using transformer function from sklearn

I've been receiving the following error after running the following line:
transformer = preprocessing.FunctionTransformer(func=np.log1p, inverse_func=np.expm1)
scaler = preprocessing.StandardScaler()
X1_t = transformer.fit_transform(X_t)
Error:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
Cell In [103], line 3
1 transformer = preprocessing.FunctionTransformer(func=np.log1p, inverse_func=np.expm1)
2 scaler = preprocessing.StandardScaler()
----> 3 X1_t = transformer.fit_transform(X_t)
4 X2_t = scaler.fit_transform(X1_t)
5 print(X2_t.shape)
File /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/sklearn/base.py:867, in TransformerMixin.fit_transform(self, X, y, **fit_params)
863 # non-optimized default implementation; override when a better
864 # method is possible for a given clustering algorithm
865 if y is None:
866 # fit method of arity 1 (unsupervised transformation)
--> 867 return self.fit(X, **fit_params).transform(X)
868 else:
869 # fit method of arity 2 (supervised transformation)
870 return self.fit(X, y, **fit_params).transform(X)
File /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/sklearn/preprocessing/_function_transformer.py:195, in FunctionTransformer.fit(self, X, y)
193 X = self._check_input(X, reset=True)
194 if self.check_inverse and not (self.func is None or self.inverse_func is None):
--> 195 self._check_inverse_transform(X)
196 return self
File /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/sklearn/preprocessing/_function_transformer.py:160, in FunctionTransformer._check_inverse_transform(self, X)
157 idx_selected = slice(None, None, max(1, X.shape[0] // 100))
158 X_round_trip = self.inverse_transform(self.transform(X[idx_selected]))
--> 160 if not np.issubdtype(X.dtype, np.number):
161 raise ValueError(
162 "'check_inverse' is only supported when all the elements in `X` is"
163 " numerical."
164 )
166 if not _allclose_dense_sparse(X[idx_selected], X_round_trip):
File /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/pandas/core/generic.py:5575, in NDFrame.__getattr__(self, name)
5568 if (
5569 name not in self._internal_names_set
5570 and name not in self._metadata
5571 and name not in self._accessors
5572 and self._info_axis._can_hold_identifiers_and_holds_name(name)
5573 ):
5574 return self[name]
-> 5575 return object.__getattribute__(self, name)
AttributeError: 'DataFrame' object has no attribute 'dtype'
I was able to run this code before, but had to reinstall Jupyter notebook and when reinstalling and downloading all libraries, started getting this issue. My hypothesis is that it is related to combinations of versions of Jupyter + libraries (pandas, sklearn), but don't remember the versions I previously had.
Any idea?

DeepChem GraphConvodel (GNN) training TypeError

I am a beginner to GNNs and I was trying out a code for predicting drug toxicity using DeepChem's Tox21 dataset. It is a dataset with a training set of 12 thousand compounds and test set of 650 compounds. I need in help in debugging and rectifying this error:"TypeError: 'NoneType' object is not subscriptable", which I get at the end.
Here is the code snippet:
model = GraphConvModel(len(tox21_tasks),
batch_size=32,
mode='classification')
print("Fitting the model")
model.fit(train_dataset, nb_epoch=10)
And here is my error:
TypeError Traceback (most recent call last)
<ipython-input-5-8088249b7fd6> in <module>
4 mode='classification')
5 print("Fitting the model")
----> 6 model.fit(train_dataset, nb_epoch=10)
~\anaconda3\lib\site-packages\deepchem\models\keras_model.py in fit(self, dataset, nb_epoch, max_checkpoints_to_keep, checkpoint_interval, deterministic, restore, variables, loss, callbacks, all_losses)
322 dataset, epochs=nb_epoch,
323 deterministic=deterministic), max_checkpoints_to_keep,
--> 324 checkpoint_interval, restore, variables, loss, callbacks, all_losses)
325
326 def fit_generator(self,
~\anaconda3\lib\site-packages\deepchem\models\keras_model.py in fit_generator(self, generator, max_checkpoints_to_keep, checkpoint_interval, restore, variables, loss, callbacks, all_losses)
407 inputs = inputs[0]
408
--> 409 batch_loss = apply_gradient_for_batch(inputs, labels, weights, loss)
410 current_step = self._global_step.numpy()
411
~\anaconda3\lib\site-packages\tensorflow_core\python\eager\def_function.py in __call__(self, *args, **kwds)
455
456 tracing_count = self._get_tracing_count()
--> 457 result = self._call(*args, **kwds)
458 if tracing_count == self._get_tracing_count():
459 self._call_counter.called_without_tracing()
~\anaconda3\lib\site-packages\tensorflow_core\python\eager\def_function.py in _call(self, *args, **kwds)
501 # This is the first call of __call__, so we have to initialize.
502 initializer_map = object_identity.ObjectIdentityDictionary()
--> 503 self._initialize(args, kwds, add_initializers_to=initializer_map)
504 finally:
505 # At this point we know that the initialization is complete (or less
~\anaconda3\lib\site-packages\tensorflow_core\python\eager\def_function.py in _initialize(self, args, kwds, add_initializers_to)
406 self._concrete_stateful_fn = (
407 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
--> 408 *args, **kwds))
409
410 def invalid_creator_scope(*unused_args, **unused_kwds):
~\anaconda3\lib\site-packages\tensorflow_core\python\eager\function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
1846 if self.input_signature:
1847 args, kwargs = None, None
-> 1848 graph_function, _, _ = self._maybe_define_function(args, kwargs)
1849 return graph_function
1850
~\anaconda3\lib\site-packages\tensorflow_core\python\eager\function.py in _maybe_define_function(self, args, kwargs)
2148 graph_function = self._function_cache.primary.get(cache_key, None)
2149 if graph_function is None:
-> 2150 graph_function = self._create_graph_function(args, kwargs)
2151 self._function_cache.primary[cache_key] = graph_function
2152 return graph_function, args, kwargs
~\anaconda3\lib\site-packages\tensorflow_core\python\eager\function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
2039 arg_names=arg_names,
2040 override_flat_arg_shapes=override_flat_arg_shapes,
-> 2041 capture_by_value=self._capture_by_value),
2042 self._function_attributes,
2043 # Tell the ConcreteFunction to clean up its graph once it goes out of
~\anaconda3\lib\site-packages\tensorflow_core\python\framework\func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
913 converted_func)
914
--> 915 func_outputs = python_func(*func_args, **func_kwargs)
916
917 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~\anaconda3\lib\site-packages\tensorflow_core\python\eager\def_function.py in wrapped_fn(*args, **kwds)
356 # __wrapped__ allows AutoGraph to swap in a converted function. We give
357 # the function a weak reference to itself to avoid a reference cycle.
--> 358 return weak_wrapped_fn().__wrapped__(*args, **kwds)
359 weak_wrapped_fn = weakref.ref(wrapped_fn)
360
~\anaconda3\lib\site-packages\tensorflow_core\python\framework\func_graph.py in wrapper(*args, **kwargs)
903 except Exception as e: # pylint:disable=broad-except
904 if hasattr(e, "ag_error_metadata"):
--> 905 raise e.ag_error_metadata.to_exception(e)
906 else:
907 raise
TypeError: in converted code:
relative to C:\Users\Madiha\anaconda3\lib\site-packages:
deepchem\models\keras_model.py:474 apply_gradient_for_batch *
grads = tape.gradient(batch_loss, vars)
tensorflow_core\python\eager\backprop.py:1014 gradient
unconnected_gradients=unconnected_gradients)
tensorflow_core\python\eager\imperative_grad.py:76 imperative_grad
compat.as_str(unconnected_gradients.value))
tensorflow_core\python\eager\backprop.py:138 _gradient_function
return grad_fn(mock_op, *out_grads)
tensorflow_core\python\ops\math_grad.py:455 _UnsortedSegmentMaxGrad
return _UnsortedSegmentMinOrMaxGrad(op, grad)
tensorflow_core\python\ops\math_grad.py:432 _UnsortedSegmentMinOrMaxGrad
_GatherDropNegatives(op.outputs[0], op.inputs[1])
TypeError: 'NoneType' object is not subscriptable
As an advise, check some examples on the DeepChem website. Here is a code which will work:
tasks, datasets, transformers = dc.molnet.load_tox21(featurizer='GraphConv')
train_dataset, valid_dataset, test_dataset = datasets
model = dc.models.GraphConvModel(len(tasks),
batch_size=32,
mode='classification')
print("Fitting the model")
model.fit(train_dataset)
Hope is work for you!

ValueError: Unknown fields ['image']

I am trying to deploy Dask Gateway integrated with JupyterHub which is the reason I decided to give DaskHub Chart a try.
After following the instructions on https://docs.dask.org/en/latest/setup/kubernetes-helm.html#helm-install-dask-for-mulitple-users. The JH is working fine but When I try to create a new Dask cluster via the UI or through:
[from dask_gateway import GatewayCluster
cluster = GatewayCluster()][2]
I get this error:
ValueError Traceback (most recent call last)
<ipython-input-3-36809e239298> in <module>
1 from dask_gateway import GatewayCluster
----> 2 cluster = GatewayCluster()
3
/srv/conda/envs/notebook/lib/python3.7/site-packages/dask_gateway/client.py in __init__(self, address, proxy_address, public_address, auth, cluster_options, shutdown_on_close, asynchronous, loop, **kwargs)
816 shutdown_on_close=shutdown_on_close,
817 asynchronous=asynchronous,
--> 818 loop=loop,
819 )
820
/srv/conda/envs/notebook/lib/python3.7/site-packages/dask_gateway/client.py in _init_internal(self, address, proxy_address, public_address, auth, cluster_options, cluster_kwargs, shutdown_on_close, asynchronous, loop, name)
912 self.status = "starting"
913 if not self.asynchronous:
--> 914 self.gateway.sync(self._start_internal)
915
916 #property
/srv/conda/envs/notebook/lib/python3.7/site-packages/dask_gateway/client.py in sync(self, func, *args, **kwargs)
337 )
338 try:
--> 339 return future.result()
340 except BaseException:
341 future.cancel()
/srv/conda/envs/notebook/lib/python3.7/concurrent/futures/_base.py in result(self, timeout)
433 raise CancelledError()
434 elif self._state == FINISHED:
--> 435 return self.__get_result()
436 else:
437 raise TimeoutError()
/srv/conda/envs/notebook/lib/python3.7/concurrent/futures/_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
/srv/conda/envs/notebook/lib/python3.7/site-packages/dask_gateway/client.py in _start_internal(self)
926 self._start_task = asyncio.ensure_future(self._start_async())
927 try:
--> 928 await self._start_task
929 except BaseException:
930 # On exception, cleanup
/srv/conda/envs/notebook/lib/python3.7/site-packages/dask_gateway/client.py in _start_async(self)
940 self.status = "starting"
941 self.name = await self.gateway._submit(
--> 942 cluster_options=self._cluster_options, **self._cluster_kwargs
943 )
944 # Connect to cluster
/srv/conda/envs/notebook/lib/python3.7/site-packages/dask_gateway/client.py in _submit(self, cluster_options, **kwargs)
529 options = self._config_cluster_options()
530 options.update(kwargs)
--> 531 resp = await self._request("POST", url, json={"cluster_options": options})
532 data = await resp.json()
533 return data["name"]
/srv/conda/envs/notebook/lib/python3.7/site-packages/dask_gateway/client.py in _request(self, method, url, json)
407
408 if resp.status in {404, 422}:
--> 409 raise ValueError(msg)
410 elif resp.status == 409:
411 raise GatewayClusterError(msg)
ValueError: Unknown fields ['image']
Any help will be greatly appreciated.

Problem with evaluation function in tensorflow federated

I was trying to reimplement the github tutorial with my own CNN-based model with Keras. But I got an error when evaluating.
from __future__ import absolute_import, division, print_function
import collections
from six.moves import range
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow_federated import python as tff
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data()
example_dataset = emnist_train.create_tf_dataset_for_client(
emnist_train.client_ids[0])
NUM_EPOCHS = 10
BATCH_SIZE = 20
SHUFFLE_BUFFER = 500
def preprocess(dataset):
def element_fn(element):
return collections.OrderedDict([
('x', tf.reshape(element['pixels'], [-1])),
('y', tf.reshape(element['label'], [1])),
])
return dataset.repeat(NUM_EPOCHS).map(element_fn).shuffle(
SHUFFLE_BUFFER).batch(BATCH_SIZE)
preprocessed_example_dataset = preprocess(example_dataset)
sample_batch = nest.map_structure(
lambda x: x.numpy(), iter(preprocessed_example_dataset).next())
def make_federated_data(client_data, client_ids):
return [preprocess(client_data.create_tf_dataset_for_client(x))
for x in client_ids]
NUM_CLIENTS = 3
sample_clients = emnist_train.client_ids[0:NUM_CLIENTS]
federated_train_data = make_federated_data(emnist_train, sample_clients)
len(federated_train_data), federated_train_data[0]
def create_compiled_keras_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Reshape((28,28,1), input_shape=(784,)),
tf.keras.layers.Conv2D(32, kernel_size=(5,5), activation="relu", padding = "same", strides = 1),
tf.keras.layers.MaxPooling2D(pool_size=2, strides=2, padding='valid'),
tf.keras.layers.Conv2D(64, kernel_size=(5,5), activation="relu", padding = "same", strides = 1),
tf.keras.layers.MaxPooling2D(pool_size=2, strides=2, padding='valid'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation="relu"),
tf.keras.layers.Dense(10, activation="softmax"),
])
def loss_fn(y_true, y_pred):
return tf.reduce_mean(tf.keras.losses.sparse_categorical_crossentropy(
y_true, y_pred))
model.compile(
loss=loss_fn,
optimizer=gradient_descent.SGD(learning_rate=0.02),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
return model
def model_fn():
keras_model = create_compiled_keras_model()
return tff.learning.from_compiled_keras_model(keras_model, sample_batch)
iterative_process = tff.learning.build_federated_averaging_process(model_fn)
state = iterative_process.initialize()
for round_num in range(1,10):
state, metrics = iterative_process.next(state, federated_train_data)
print('round {:2d}, metrics={}'.format(round_num, metrics))
##Evaluation of the model
#This function doesn't work
evaluation = tff.learning.build_federated_evaluation(model_fn)
federated_test_data = make_federated_data(emnist_test, sample_clients)
test_metrics = evaluation(state.model, federated_test_data)
I expect the evaluation of the test data, but the actual output is the following error:
---------------------------------------------------------------------------
_FallbackException Traceback (most recent call last)
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/gen_functional_ops.py in stateful_partitioned_call(args, Tout, f, config, config_proto, executor_type, name)
482 "Tout", Tout, "f", f, "config", config, "config_proto", config_proto,
--> 483 "executor_type", executor_type)
484 return _result
_FallbackException: This function does not handle the case of the path where all inputs are not already EagerTensors.
During handling of the above exception, another exception occurred:
AttributeError Traceback (most recent call last)
<ipython-input-23-6e9c77f70201> in <module>()
----> 1 evaluation = tff.learning.build_federated_evaluation(model_fn)
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_federated/python/learning/federated_evaluation.py in build_federated_evaluation(model_fn)
83 #tff.federated_computation(
84 tff.FederatedType(model_weights_type, tff.SERVER, all_equal=True),
---> 85 tff.FederatedType(tff.SequenceType(batch_type), tff.CLIENTS))
86 def server_eval(server_model_weights, federated_dataset):
87 client_outputs = tff.federated_map(
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_federated/python/core/impl/computation_wrapper.py in <lambda>(fn)
406 args = (args,)
407 arg_type = computation_types.to_type(args[0])
--> 408 return lambda fn: _wrap(fn, arg_type, self._wrapper_fn)
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_federated/python/core/impl/computation_wrapper.py in _wrap(fn, parameter_type, wrapper_fn)
94 function_utils.wrap_as_zero_or_one_arg_callable(fn, parameter_type),
95 parameter_type,
---> 96 name=fn_name)
97 py_typecheck.check_type(concrete_fn, function_utils.ConcreteFunction,
98 'value returned by the wrapper')
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_federated/python/core/impl/computation_wrapper_instances.py in _federated_computation_wrapper_fn(target_fn, parameter_type, name)
52 parameter_type,
53 ctx_stack,
---> 54 suggested_name=name))
55 return computation_impl.ComputationImpl(target_lambda.proto, ctx_stack)
56
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_federated/python/core/impl/federated_computation_utils.py in zero_or_one_arg_fn_to_building_block(fn, parameter_name, parameter_type, context_stack, suggested_name)
73 value_impl.ValueImpl(
74 computation_building_blocks.Reference(
---> 75 parameter_name, parameter_type), context_stack))
76 else:
77 result = fn()
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_federated/python/core/impl/function_utils.py in <lambda>(arg)
551 # and to force any parameter bindings to be resolved now.
552 # pylint: disable=unnecessary-lambda,undefined-variable
--> 553 return (lambda fn, at, kt: lambda arg: _unpack_and_call(fn, at, kt, arg))(
554 fn, arg_types, kwarg_types)
555 # pylint: enable=unnecessary-lambda,undefined-variable
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_federated/python/core/impl/function_utils.py in _unpack_and_call(fn, arg_types, kwarg_types, arg)
545 name, str(expected_type), str(actual_type)))
546 kwargs[name] = element_value
--> 547 return fn(*args, **kwargs)
548
549 # Deliberate wrapping to isolate the caller from the underlying function
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_federated/python/learning/federated_evaluation.py in server_eval(server_model_weights, federated_dataset)
88 client_eval,
89 [tff.federated_broadcast(server_model_weights), federated_dataset])
---> 90 return model.federated_output_computation(client_outputs.local_outputs)
91
92 return server_eval
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_federated/python/learning/model_utils.py in federated_output_computation(self)
531 #property
532 def federated_output_computation(self):
--> 533 return self._model.federated_output_computation
534
535
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_federated/python/learning/model_utils.py in federated_output_computation(self)
406 def federated_output_computation(self):
407 metric_variable_type_dict = nest.map_structure(tf.TensorSpec.from_tensor,
--> 408 self.report_local_outputs())
409 federated_local_outputs_type = tff.FederatedType(
410 metric_variable_type_dict, tff.CLIENTS, all_equal=False)
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
314 if not self._created_variables:
315 # If we did not create any variables the trace we have is good enough.
--> 316 return self._concrete_stateful_fn._filtered_call(canon_args, canon_kwds) # pylint: disable=protected-access
317
318 def fn_with_cond(*inner_args, **inner_kwds):
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/eager/function.py in _filtered_call(self, args, kwargs)
382 """
383 return self._call_flat(
--> 384 (t for t in nest.flatten((args, kwargs))
385 if isinstance(
386 t, (ops.Tensor, resource_variable_ops.ResourceVariable))))
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/eager/function.py in _call_flat(self, args)
431 # Only need to override the gradient in graph mode and when we have outputs.
432 if context.executing_eagerly() or not self.outputs:
--> 433 outputs = self._inference_function.call(ctx, args)
434 else:
435 if not self._gradient_name:
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/eager/function.py in call(self, ctx, args)
267 executing_eagerly=executing_eagerly,
268 config=function_call_options.config_proto_serialized,
--> 269 executor_type=function_call_options.executor_type)
270
271 if executing_eagerly:
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/functional_ops.py in partitioned_call(args, f, tout, executing_eagerly, config, executor_type)
1081 outputs = gen_functional_ops.stateful_partitioned_call(
1082 args=args, Tout=tout, f=f, config_proto=config,
-> 1083 executor_type=executor_type)
1084 else:
1085 outputs = gen_functional_ops.partitioned_call(
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/gen_functional_ops.py in stateful_partitioned_call(args, Tout, f, config, config_proto, executor_type, name)
487 return stateful_partitioned_call_eager_fallback(
488 args, Tout=Tout, f=f, config=config, config_proto=config_proto,
--> 489 executor_type=executor_type, name=name, ctx=_ctx)
490 except _core._SymbolicException:
491 pass # Add nodes to the TensorFlow graph.
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/gen_functional_ops.py in stateful_partitioned_call_eager_fallback(args, Tout, f, config, config_proto, executor_type, name, ctx)
548 executor_type = ""
549 executor_type = _execute.make_str(executor_type, "executor_type")
--> 550 _attr_Tin, args = _execute.convert_to_mixed_eager_tensors(args, _ctx)
551 _inputs_flat = list(args)
552 _attrs = ("Tin", _attr_Tin, "Tout", Tout, "f", f, "config", config,
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/eager/execute.py in convert_to_mixed_eager_tensors(values, ctx)
207 def convert_to_mixed_eager_tensors(values, ctx):
208 v = [ops.internal_convert_to_tensor(t, ctx=ctx) for t in values]
--> 209 types = [t._datatype_enum() for t in v] # pylint: disable=protected-access
210 return types, v
211
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/eager/execute.py in <listcomp>(.0)
207 def convert_to_mixed_eager_tensors(values, ctx):
208 v = [ops.internal_convert_to_tensor(t, ctx=ctx) for t in values]
--> 209 types = [t._datatype_enum() for t in v] # pylint: disable=protected-access
210 return types, v
211
AttributeError: 'Tensor' object has no attribute '_datatype_enum'
Nuria: this should just have been fixed earlier today. If you do not want to wait for the next release (coming soon), I would recommend that you simply build a local pip package from source. You can find instructions in the install guide.
As a followup here: TFF 0.4.0 has just been released, which contains this bugfix.

neo4django with Neo4j 2.0

I'm just starting out using Neo4j and I'd like to use 2.0 (I have 2.0.1 community installed). I see that neo4django was only tested against neo4j 1.8.2-1.9.4, but have people gotten it working with 2.x? I installed the gremlin plugin but can't create or query through neo4django.
create:
In [8]: NeoProfile.objects.create(profile_id=1234)
[INFO] requests.packages.urllib3.connectionpool#214: Resetting dropped connection: localhost
---------------------------------------------------------------------------
StatusException Traceback (most recent call last)
/Users/atomos/workspace/Project-Vitamin/lib/python2.7/site-packages/django/core/management/commands/shell.pyc in <module>()
----> 1 NeoProfile.objects.create(profile_id=1234)
/Users/atomos/workspace/Project-Vitamin/src/neo4django/neo4django/db/models/manager.pyc in create(self, **kwargs)
41
42 def create(self, **kwargs):
---> 43 return self.get_query_set().create(**kwargs)
44
45 def filter(self, *args, **kwargs):
/Users/atomos/workspace/Project-Vitamin/src/neo4django/neo4django/db/models/query.pyc in create(self, **kwargs)
1295 if 'id' in kwargs or 'pk' in kwargs:
1296 raise FieldError("Neo4j doesn't allow node ids to be assigned.")
-> 1297 return super(NodeQuerySet, self).create(**kwargs)
1298
1299 #TODO would be awesome if this were transactional
/Users/atomos/workspace/Project-Vitamin/lib/python2.7/site-packages/django/db/models/query.pyc in create(self, **kwargs)
375 obj = self.model(**kwargs)
376 self._for_write = True
--> 377 obj.save(force_insert=True, using=self.db)
378 return obj
379
/Users/atomos/workspace/Project-Vitamin/src/neo4django/neo4django/db/models/base.pyc in save(self, using, **kwargs)
315
316 def save(self, using=DEFAULT_DB_ALIAS, **kwargs):
--> 317 return super(NodeModel, self).save(using=using, **kwargs)
318
319 #alters_data
/Users/atomos/workspace/Project-Vitamin/lib/python2.7/site-packages/django/db/models/base.pyc in save(self, force_insert, force_update, using)
461 if force_insert and force_update:
462 raise ValueError("Cannot force both insert and updating in model saving.")
--> 463 self.save_base(using=using, force_insert=force_insert, force_update=force_update)
464
465 save.alters_data = True
/Users/atomos/workspace/Project-Vitamin/src/neo4django/neo4django/db/models/base.pyc in save_base(self, raw, cls, origin, force_insert, force_update, using, *args, **kwargs)
331
332 is_new = self.id is None
--> 333 self._save_neo4j_node(using)
334 self._save_properties(self, self.__node, is_new)
335 self._save_neo4j_relationships(self, self.__node)
/Users/atomos/workspace/Project-Vitamin/src/neo4django/neo4django/db/models/base.pyc in _save_neo4j_node(self, using)
/Users/atomos/workspace/Project-Vitamin/src/neo4django/neo4django/db/models/base.pyc in trans_method(func, *args, **kw)
95 #TODO this is where generalized transaction support will go,
96 #when it's ready in neo4jrestclient
---> 97 ret = func(*args, **kw)
98 #tx.commit()
99 return ret
/Users/atomos/workspace/Project-Vitamin/src/neo4django/neo4django/db/models/base.pyc in _save_neo4j_node(self, using)
359 self.__node = conn.gremlin_tx(script, types=type_hier_props,
360 indexName=self.index_name(),
--> 361 typesToIndex=type_names_to_index)
362 return self.__node
363
/Users/atomos/workspace/Project-Vitamin/src/neo4django/neo4django/neo4jclient.pyc in gremlin_tx(self, script, **params)
177 will be wrapped in a transaction.
178 """
--> 179 return self.gremlin(script, tx=True, **params)
180
181 def cypher(self, query, **params):
/Users/atomos/workspace/Project-Vitamin/src/neo4django/neo4django/neo4jclient.pyc in gremlin(self, script, tx, raw, **params)
166 try:
167 return send_script(include_unloaded_libraries(lib_script),
--> 168 params)
169 except LibraryCouldNotLoad:
170 if i == 0:
/Users/atomos/workspace/Project-Vitamin/src/neo4django/neo4django/neo4jclient.pyc in send_script(s, params)
151 if raw:
152 execute_kwargs['returns'] = RETURNS_RAW
--> 153 script_rv = ext.execute_script(s, params=params, **execute_kwargs)
154 if isinstance(script_rv, basestring):
155 if LIBRARY_ERROR_REGEX.match(script_rv):
/Users/atomos/workspace/Project-Vitamin/src/neo4j-rest-client/neo4jrestclient/client.py in __call__(self, *args, **kwargs)
2313 except (ValueError, AttributeError, KeyError, TypeError):
2314 pass
-> 2315 raise StatusException(response.status_code, msg)
2316
2317 def __repr__(self):
StatusException: Code [400]: Bad Request. Bad request syntax or unsupported method.
Invalid data sent: javax.script.ScriptException: groovy.lang.MissingMethodException: No signature of method: groovy.lang.MissingMethodException.setMaxBufferSize() is applicable for argument types: () values: []
query:
In [9]: NeoProfile.objects.filter(profile_id=1234)
Out[9]: ---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
/Users/atomos/workspace/Project-Vitamin/lib/python2.7/site-packages/django/core/management/commands/shell.pyc in <module>()
----> 1 NeoProfile.objects.filter(profile_id=1234)
/Users/atomos/workspace/Project-Vitamin/lib/python2.7/site-packages/IPython/core/displayhook.pyc in __call__(self, result)
236 self.start_displayhook()
237 self.write_output_prompt()
--> 238 format_dict = self.compute_format_data(result)
239 self.write_format_data(format_dict)
240 self.update_user_ns(result)
/Users/atomos/workspace/Project-Vitamin/lib/python2.7/site-packages/IPython/core/displayhook.pyc in compute_format_data(self, result)
148 MIME type representation of the object.
149 """
--> 150 return self.shell.display_formatter.format(result)
151
152 def write_format_data(self, format_dict):
/Users/atomos/workspace/Project-Vitamin/lib/python2.7/site-packages/IPython/core/formatters.pyc in format(self, obj, include, exclude)
124 continue
125 try:
--> 126 data = formatter(obj)
127 except:
128 # FIXME: log the exception
/Users/atomos/workspace/Project-Vitamin/lib/python2.7/site-packages/IPython/core/formatters.pyc in __call__(self, obj)
445 type_pprinters=self.type_printers,
446 deferred_pprinters=self.deferred_printers)
--> 447 printer.pretty(obj)
448 printer.flush()
449 return stream.getvalue()
/Users/atomos/workspace/Project-Vitamin/lib/python2.7/site-packages/IPython/lib/pretty.pyc in pretty(self, obj)
358 if callable(meth):
359 return meth(obj, self, cycle)
--> 360 return _default_pprint(obj, self, cycle)
361 finally:
362 self.end_group()
/Users/atomos/workspace/Project-Vitamin/lib/python2.7/site-packages/IPython/lib/pretty.pyc in _default_pprint(obj, p, cycle)
478 if getattr(klass, '__repr__', None) not in _baseclass_reprs:
479 # A user-provided repr.
--> 480 p.text(repr(obj))
481 return
482 p.begin_group(1, '<')
/Users/atomos/workspace/Project-Vitamin/lib/python2.7/site-packages/django/db/models/query.pyc in __repr__(self)
70
71 def __repr__(self):
---> 72 data = list(self[:REPR_OUTPUT_SIZE + 1])
73 if len(data) > REPR_OUTPUT_SIZE:
74 data[-1] = "...(remaining elements truncated)..."
/Users/atomos/workspace/Project-Vitamin/lib/python2.7/site-packages/django/db/models/query.pyc in __len__(self)
85 self._result_cache = list(self.iterator())
86 elif self._iter:
---> 87 self._result_cache.extend(self._iter)
88 if self._prefetch_related_lookups and not self._prefetch_done:
89 self._prefetch_related_objects()
/Users/atomos/workspace/Project-Vitamin/src/neo4django/neo4django/db/models/query.pyc in iterator(self)
1274 using = self.db
1275 if not self.query.can_filter():
-> 1276 for model in self.query.execute(using):
1277 yield model
1278 else:
/Users/atomos/workspace/Project-Vitamin/src/neo4django/neo4django/db/models/query.pyc in execute(self, using)
1161 conn = connections[using]
1162
-> 1163 groovy, params = self.as_groovy(using)
1164
1165 raw_result_set = conn.gremlin_tx(groovy, **params) if groovy is not None else []
/Users/atomos/workspace/Project-Vitamin/src/neo4django/neo4django/db/models/query.pyc in as_groovy(self, using)
925 # add the typeNodeId param, either for type verification or initial
926 # type tree traversal
--> 927 cypher_params['typeNodeId'] = self.model._type_node(using).id
928
929 type_restriction_expr = """
/Users/atomos/workspace/Project-Vitamin/src/neo4django/neo4django/db/models/base.pyc in _type_node(cls, using)
411 return cls.__type_node_memoized(using)
412 else:
--> 413 return cls.__type_node_classmethod(using)
414
415 #classmethod
/Users/atomos/workspace/Project-Vitamin/src/neo4django/neo4django/db/models/base.pyc in __type_node(cls, using)
394 script_rv = conn.gremlin_tx(script, types=type_hier_props)
395 except Exception, e:
--> 396 raise RuntimeError(error_message, e)
397 if not hasattr(script_rv, 'properties'):
398 raise RuntimeError(error_message + '\n\n%s' % script_rv)
RuntimeError: ('The type node for class NeoProfile could not be created in the database.', StatusException())
My model is incredibly complex:
class NeoProfile(neomodels.NodeModel):
profile_id = neomodels.IntegerProperty(indexed=True)

Resources