Related
I am trying to a scikit-learnpipeline on Databricks. However, I encounter the following issue:
import dill
f = open("/dbfs/HR_pipe.p", mode='wb')
dill.dump(fitted,f)
where:
fitted = main_pipeline.fit(X_train, y_train)
Below you can find the error traceback, I am actually using scikit-learn version 0.24.2 and the class of the fitted object is a sklearn pipeline.
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<command-97102145541341> in <module>
1 import dill
2 f = open("/dbfs/HR_pipe.p", mode='wb')
----> 3 dill.dump(fitted,f)
/local_disk0/.ephemeral_nfs/envs/pythonEnv-1f8e9f3e-114d-4f9b-915f-25ba440655c4/lib/python3.7/site-packages/dill/_dill.py in dump(obj, file, protocol, byref, fmode, recurse, **kwds)
334 _kwds = kwds.copy()
335 _kwds.update(dict(byref=byref, fmode=fmode, recurse=recurse))
--> 336 Pickler(file, protocol, **_kwds).dump(obj)
337 return
338
/local_disk0/.ephemeral_nfs/envs/pythonEnv-1f8e9f3e-114d-4f9b-915f-25ba440655c4/lib/python3.7/site-packages/dill/_dill.py in dump(self, obj)
618 raise PicklingError(msg)
619 else:
--> 620 StockPickler.dump(self, obj)
621 return
622 dump.__doc__ = StockPickler.dump.__doc__
/usr/lib/python3.7/pickle.py in dump(self, obj)
435 if self.proto >= 4:
436 self.framer.start_framing()
--> 437 self.save(obj)
438 self.write(STOP)
439 self.framer.end_framing()
/usr/lib/python3.7/pickle.py in save(self, obj, save_persistent_id)
547
548 # Save the reduce() output and finally memoize the object
--> 549 self.save_reduce(obj=obj, *rv)
550
551 def persistent_id(self, obj):
/usr/lib/python3.7/pickle.py in save_reduce(self, func, args, state, listitems, dictitems, obj)
660
661 if state is not None:
--> 662 save(state)
663 write(BUILD)
664
/usr/lib/python3.7/pickle.py in save(self, obj, save_persistent_id)
502 f = self.dispatch.get(t)
503 if f is not None:
--> 504 f(self, obj) # Call unbound method with explicit self
505 return
506
/local_disk0/.ephemeral_nfs/envs/pythonEnv-1f8e9f3e-114d-4f9b-915f-25ba440655c4/lib/python3.7/site-packages/dill/_dill.py in save_module_dict(pickler, obj)
1249 # we only care about session the first pass thru
1250 pickler._first_pass = False
-> 1251 StockPickler.save_dict(pickler, obj)
1252 log.info("# D2")
1253 return
/usr/lib/python3.7/pickle.py in save_dict(self, obj)
857
858 self.memoize(obj)
--> 859 self._batch_setitems(obj.items())
860
861 dispatch[dict] = save_dict
/usr/lib/python3.7/pickle.py in _batch_setitems(self, items)
883 for k, v in tmp:
884 save(k)
--> 885 save(v)
886 write(SETITEMS)
887 elif n:
/usr/lib/python3.7/pickle.py in save(self, obj, save_persistent_id)
502 f = self.dispatch.get(t)
503 if f is not None:
--> 504 f(self, obj) # Call unbound method with explicit self
505 return
506
/usr/lib/python3.7/pickle.py in save_list(self, obj)
817
818 self.memoize(obj)
--> 819 self._batch_appends(obj)
820
821 dispatch[list] = save_list
/usr/lib/python3.7/pickle.py in _batch_appends(self, items)
841 write(MARK)
842 for x in tmp:
--> 843 save(x)
844 write(APPENDS)
845 elif n:
/usr/lib/python3.7/pickle.py in save(self, obj, save_persistent_id)
502 f = self.dispatch.get(t)
503 if f is not None:
--> 504 f(self, obj) # Call unbound method with explicit self
505 return
506
/usr/lib/python3.7/pickle.py in save_tuple(self, obj)
772 if n <= 3 and self.proto >= 2:
773 for element in obj:
--> 774 save(element)
775 # Subtle. Same as in the big comment below.
776 if id(obj) in memo:
/usr/lib/python3.7/pickle.py in save(self, obj, save_persistent_id)
547
548 # Save the reduce() output and finally memoize the object
--> 549 self.save_reduce(obj=obj, *rv)
550
551 def persistent_id(self, obj):
/usr/lib/python3.7/pickle.py in save_reduce(self, func, args, state, listitems, dictitems, obj)
631 "args[0] from __newobj__ args has the wrong class")
632 args = args[1:]
--> 633 save(cls)
634 save(args)
635 write(NEWOBJ)
/usr/lib/python3.7/pickle.py in save(self, obj, save_persistent_id)
502 f = self.dispatch.get(t)
503 if f is not None:
--> 504 f(self, obj) # Call unbound method with explicit self
505 return
506
/local_disk0/.ephemeral_nfs/envs/pythonEnv-1f8e9f3e-114d-4f9b-915f-25ba440655c4/lib/python3.7/site-packages/dill/_dill.py in save_type(pickler, obj, postproc_list)
1838 _save_with_postproc(pickler, (_create_type, (
1839 type(obj), obj.__name__, obj.__bases__, _dict
-> 1840 )), obj=obj, postproc_list=postproc_list)
1841 log.info("# %s" % _t)
1842 else:
/local_disk0/.ephemeral_nfs/envs/pythonEnv-1f8e9f3e-114d-4f9b-915f-25ba440655c4/lib/python3.7/site-packages/dill/_dill.py in _save_with_postproc(pickler, reduction, is_pickler_dill, obj, postproc_list)
1152 if source:
1153 pickler.write(pickler.get(pickler.memo[id(dest)][0]))
-> 1154 pickler._batch_setitems(iter(source.items()))
1155 else:
1156 # Updating with an empty dictionary. Same as doing nothing.
/usr/lib/python3.7/pickle.py in _batch_setitems(self, items)
883 for k, v in tmp:
884 save(k)
--> 885 save(v)
886 write(SETITEMS)
887 elif n:
/usr/lib/python3.7/pickle.py in save(self, obj, save_persistent_id)
522 reduce = getattr(obj, "__reduce_ex__", None)
523 if reduce is not None:
--> 524 rv = reduce(self.proto)
525 else:
526 reduce = getattr(obj, "__reduce__", None)
/databricks/spark/python/pyspark/context.py in __getnewargs__(self)
354 # This method is called when attempting to pickle SparkContext, which is always an error:
355 raise Exception(
--> 356 "It appears that you are attempting to reference SparkContext from a broadcast "
357 "variable, action, or transformation. SparkContext can only be used on the driver, "
358 "not in code that it run on workers. For more information, see SPARK-5063."
Exception: It appears that you are attempting to reference SparkContext from a broadcast variable, action, or transformation. SparkContext can only be used on the driver, not in code that it run on workers. For more information, see SPARK-5063.
Do you happen to know how to solve such issue?
details about the questions ..........................................................................
I am unable to figure out why my BERT model dosen't get pas the training command. I am using pytorch-lightning. I am running the code on AWS EC2(p3.2xLarge) and it does show me the available GPU but I can't really figure out the device side error. Could someone please guide me towards a direction? I really appreciate you time and consideration.
PS: The results are after setting CUDA_LAUNCH_BLOCKING=1.
trainer = pl.Trainer(
logger=logger,
checkpoint_callback=checkpoint_callback,
callbacks=[early_stopping_callback],
max_epochs=N_EPOCHS,
gpus=1,
progress_bar_refresh_rate=30,
)
GPU available: True, used: True
TPU available: False, using: 0 TPU cores
IPU available: False, using: 0 IPUs
In [155]:
trainer.fit(model, data_module)
LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-155-7b6b8391c42e> in <module>
----> 1 trainer.fit(model, data_module)
~/.local/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloaders, val_dataloaders, datamodule, train_dataloader, ckpt_path)
739 train_dataloaders = train_dataloader
740 self._call_and_handle_interrupt(
--> 741 self._fit_impl, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path
742 )
743
~/.local/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py in _call_and_handle_interrupt(self, trainer_fn, *args, **kwargs)
683 """
684 try:
--> 685 return trainer_fn(*args, **kwargs)
686 # TODO: treat KeyboardInterrupt as BaseException (delete the code below) in v1.7
687 except KeyboardInterrupt as exception:
~/.local/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py in _fit_impl(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)
775 # TODO: ckpt_path only in v1.7
776 ckpt_path = ckpt_path or self.resume_from_checkpoint
--> 777 self._run(model, ckpt_path=ckpt_path)
778
779 assert self.state.stopped
~/.local/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py in _run(self, model, ckpt_path)
1143
1144 self._call_configure_sharded_model() # allow user to setup in model sharded environment
-> 1145 self.accelerator.setup(self)
1146
1147 # ----------------------------
~/.local/lib/python3.6/site-packages/pytorch_lightning/accelerators/gpu.py in setup(self, trainer)
44 def setup(self, trainer: "pl.Trainer") -> None:
45 self.set_nvidia_flags(trainer.local_rank)
---> 46 return super().setup(trainer)
47
48 def on_train_start(self) -> None:
~/.local/lib/python3.6/site-packages/pytorch_lightning/accelerators/accelerator.py in setup(self, trainer)
89 trainer: the trainer instance
90 """
---> 91 self.setup_training_type_plugin()
92 if not self.training_type_plugin.setup_optimizers_in_pre_dispatch:
93 self.setup_optimizers(trainer)
~/.local/lib/python3.6/site-packages/pytorch_lightning/accelerators/accelerator.py in setup_training_type_plugin(self)
361 def setup_training_type_plugin(self) -> None:
362 """Attaches the training type plugin to the accelerator."""
--> 363 self.training_type_plugin.setup()
364
365 def setup_precision_plugin(self) -> None:
~/.local/lib/python3.6/site-packages/pytorch_lightning/plugins/training_type/single_device.py in setup(self)
69
70 def setup(self) -> None:
---> 71 self.model_to_device()
72
73 #property
~/.local/lib/python3.6/site-packages/pytorch_lightning/plugins/training_type/single_device.py in model_to_device(self)
66
67 def model_to_device(self) -> None:
---> 68 self._model.to(self.root_device)
69
70 def setup(self) -> None:
~/.local/lib/python3.6/site-packages/pytorch_lightning/core/mixins/device_dtype_mixin.py in to(self, *args, **kwargs)
109 out = torch._C._nn._parse_to(*args, **kwargs)
110 self.__update_properties(device=out[0], dtype=out[1])
--> 111 return super().to(*args, **kwargs)
112
113 def cuda(self, device: Optional[Union[torch.device, int]] = None) -> "DeviceDtypeModuleMixin":
~/.local/lib/python3.6/site-packages/torch/nn/modules/module.py in to(self, *args, **kwargs)
897 return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)
898
--> 899 return self._apply(convert)
900
901 def register_backward_hook(
~/.local/lib/python3.6/site-packages/torch/nn/modules/module.py in _apply(self, fn)
568 def _apply(self, fn):
569 for module in self.children():
--> 570 module._apply(fn)
571
572 def compute_should_use_set_data(tensor, tensor_applied):
~/.local/lib/python3.6/site-packages/torch/nn/modules/module.py in _apply(self, fn)
568 def _apply(self, fn):
569 for module in self.children():
--> 570 module._apply(fn)
571
572 def compute_should_use_set_data(tensor, tensor_applied):
~/.local/lib/python3.6/site-packages/torch/nn/modules/module.py in _apply(self, fn)
568 def _apply(self, fn):
569 for module in self.children():
--> 570 module._apply(fn)
571
572 def compute_should_use_set_data(tensor, tensor_applied):
~/.local/lib/python3.6/site-packages/torch/nn/modules/module.py in _apply(self, fn)
591 # `with torch.no_grad():`
592 with torch.no_grad():
--> 593 param_applied = fn(param)
594 should_use_set_data = compute_should_use_set_data(param, param_applied)
595 if should_use_set_data:
~/.local/lib/python3.6/site-packages/torch/nn/modules/module.py in convert(t)
895 return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None,
896 non_blocking, memory_format=convert_to_format)
--> 897 return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)
898
899 return self._apply(convert)
RuntimeError: CUDA error: device-side assert triggered
CUDA kernel errors might be asynchronously reported at some other API call,so the stacktrace below might be incorrect.
For debugging consider passing CUDA_LAUNCH_BLOCKING=1.
Restarting the machine returned this:
LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]
Missing logger folder: lightning_logs/nara-comments
| Name | Type | Params
-----------------------------------------
0 | bert | BertModel | 108 M
1 | classifier | Linear | 288 K
2 | criterion | BCELoss | 0
-----------------------------------------
108 M Trainable params
0 Non-trainable params
108 M Total params
434.395 Total estimated model params size (MB)
/home/ubuntu/.local/lib/python3.6/site-packages/pytorch_lightning/utilities/data.py:60: UserWarning: Trying to infer the `batch_size` from an ambiguous collection. The batch size we found is 4540. To avoid any miscalculations, use `self.log(..., batch_size=batch_size)`.
"Trying to infer the `batch_size` from an ambiguous collection. The batch size we"
/home/ubuntu/.local/lib/python3.6/site-packages/pytorch_lightning/utilities/data.py:60: UserWarning: Trying to infer the `batch_size` from an ambiguous collection. The batch size we found is 4374. To avoid any miscalculations, use `self.log(..., batch_size=batch_size)`.
"Trying to infer the `batch_size` from an ambiguous collection. The batch size we"
Global seed set to 42
Epoch 0: 0%
0/397 [00:00<?, ?it/s]
/home/ubuntu/.local/lib/python3.6/site-packages/pytorch_lightning/loops/optimization/closure.py:36: LightningDeprecationWarning: One of the returned values {'predictions', 'labels'} has a `grad_fn`. We will detach it automatically but this behaviour will change in v1.6. Please detach it manually: `return {'loss': ..., 'something': something.detach()}`
f"One of the returned values {set(extra.keys())} has a `grad_fn`. We will detach it automatically"
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-48-7b6b8391c42e> in <module>
----> 1 trainer.fit(model, data_module)
~/.local/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloaders, val_dataloaders, datamodule, train_dataloader, ckpt_path)
739 train_dataloaders = train_dataloader
740 self._call_and_handle_interrupt(
--> 741 self._fit_impl, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path
742 )
743
~/.local/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py in _call_and_handle_interrupt(self, trainer_fn, *args, **kwargs)
683 """
684 try:
--> 685 return trainer_fn(*args, **kwargs)
686 # TODO: treat KeyboardInterrupt as BaseException (delete the code below) in v1.7
687 except KeyboardInterrupt as exception:
~/.local/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py in _fit_impl(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)
775 # TODO: ckpt_path only in v1.7
776 ckpt_path = ckpt_path or self.resume_from_checkpoint
--> 777 self._run(model, ckpt_path=ckpt_path)
778
779 assert self.state.stopped
~/.local/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py in _run(self, model, ckpt_path)
1197
1198 # dispatch `start_training` or `start_evaluating` or `start_predicting`
-> 1199 self._dispatch()
1200
1201 # plugin will finalized fitting (e.g. ddp_spawn will load trained model)
~/.local/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py in _dispatch(self)
1277 self.training_type_plugin.start_predicting(self)
1278 else:
-> 1279 self.training_type_plugin.start_training(self)
1280
1281 def run_stage(self):
~/.local/lib/python3.6/site-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py in start_training(self, trainer)
200 def start_training(self, trainer: "pl.Trainer") -> None:
201 # double dispatch to initiate the training loop
--> 202 self._results = trainer.run_stage()
203
204 def start_evaluating(self, trainer: "pl.Trainer") -> None:
~/.local/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py in run_stage(self)
1287 if self.predicting:
1288 return self._run_predict()
-> 1289 return self._run_train()
1290
1291 def _pre_training_routine(self):
~/.local/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py in _run_train(self)
1317 self.fit_loop.trainer = self
1318 with torch.autograd.set_detect_anomaly(self._detect_anomaly):
-> 1319 self.fit_loop.run()
1320
1321 def _run_evaluate(self) -> _EVALUATE_OUTPUT:
~/.local/lib/python3.6/site-packages/pytorch_lightning/loops/base.py in run(self, *args, **kwargs)
143 try:
144 self.on_advance_start(*args, **kwargs)
--> 145 self.advance(*args, **kwargs)
146 self.on_advance_end()
147 self.restarting = False
~/.local/lib/python3.6/site-packages/pytorch_lightning/loops/fit_loop.py in advance(self)
232
233 with self.trainer.profiler.profile("run_training_epoch"):
--> 234 self.epoch_loop.run(data_fetcher)
235
236 # the global step is manually decreased here due to backwards compatibility with existing loggers
~/.local/lib/python3.6/site-packages/pytorch_lightning/loops/base.py in run(self, *args, **kwargs)
143 try:
144 self.on_advance_start(*args, **kwargs)
--> 145 self.advance(*args, **kwargs)
146 self.on_advance_end()
147 self.restarting = False
~/.local/lib/python3.6/site-packages/pytorch_lightning/loops/epoch/training_epoch_loop.py in advance(self, *args, **kwargs)
191
192 with self.trainer.profiler.profile("run_training_batch"):
--> 193 batch_output = self.batch_loop.run(batch, batch_idx)
194
195 self.batch_progress.increment_processed()
~/.local/lib/python3.6/site-packages/pytorch_lightning/loops/base.py in run(self, *args, **kwargs)
143 try:
144 self.on_advance_start(*args, **kwargs)
--> 145 self.advance(*args, **kwargs)
146 self.on_advance_end()
147 self.restarting = False
~/.local/lib/python3.6/site-packages/pytorch_lightning/loops/batch/training_batch_loop.py in advance(self, batch, batch_idx)
86 if self.trainer.lightning_module.automatic_optimization:
87 optimizers = _get_active_optimizers(self.trainer.optimizers, self.trainer.optimizer_frequencies, batch_idx)
---> 88 outputs = self.optimizer_loop.run(split_batch, optimizers, batch_idx)
89 else:
90 outputs = self.manual_loop.run(split_batch, batch_idx)
~/.local/lib/python3.6/site-packages/pytorch_lightning/loops/base.py in run(self, *args, **kwargs)
143 try:
144 self.on_advance_start(*args, **kwargs)
--> 145 self.advance(*args, **kwargs)
146 self.on_advance_end()
147 self.restarting = False
~/.local/lib/python3.6/site-packages/pytorch_lightning/loops/optimization/optimizer_loop.py in advance(self, batch, *args, **kwargs)
217 self._batch_idx,
218 self._optimizers[self.optim_progress.optimizer_position],
--> 219 self.optimizer_idx,
220 )
221 if result.loss is not None:
~/.local/lib/python3.6/site-packages/pytorch_lightning/loops/optimization/optimizer_loop.py in _run_optimization(self, split_batch, batch_idx, optimizer, opt_idx)
264 # gradient update with accumulated gradients
265 else:
--> 266 self._optimizer_step(optimizer, opt_idx, batch_idx, closure)
267
268 result = closure.consume_result()
~/.local/lib/python3.6/site-packages/pytorch_lightning/loops/optimization/optimizer_loop.py in _optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
384 on_tpu=(self.trainer._device_type == DeviceType.TPU and _TPU_AVAILABLE),
385 using_native_amp=(self.trainer.amp_backend is not None and self.trainer.amp_backend == AMPType.NATIVE),
--> 386 using_lbfgs=is_lbfgs,
387 )
388
~/.local/lib/python3.6/site-packages/pytorch_lightning/core/lightning.py in optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, optimizer_closure, on_tpu, using_native_amp, using_lbfgs)
1650
1651 """
-> 1652 optimizer.step(closure=optimizer_closure)
1653
1654 def optimizer_zero_grad(self, epoch: int, batch_idx: int, optimizer: Optimizer, optimizer_idx: int):
~/.local/lib/python3.6/site-packages/pytorch_lightning/core/optimizer.py in step(self, closure, **kwargs)
162 assert trainer is not None
163 with trainer.profiler.profile(profiler_action):
--> 164 trainer.accelerator.optimizer_step(self._optimizer, self._optimizer_idx, closure, **kwargs)
~/.local/lib/python3.6/site-packages/pytorch_lightning/accelerators/accelerator.py in optimizer_step(self, optimizer, opt_idx, closure, model, **kwargs)
337 """
338 model = model or self.lightning_module
--> 339 self.precision_plugin.optimizer_step(model, optimizer, opt_idx, closure, **kwargs)
340
341 def optimizer_zero_grad(self, current_epoch: int, batch_idx: int, optimizer: Optimizer, opt_idx: int) -> None:
~/.local/lib/python3.6/site-packages/pytorch_lightning/plugins/precision/precision_plugin.py in optimizer_step(self, model, optimizer, optimizer_idx, closure, **kwargs)
161 if isinstance(model, pl.LightningModule):
162 closure = partial(self._wrap_closure, model, optimizer, optimizer_idx, closure)
--> 163 optimizer.step(closure=closure, **kwargs)
164
165 def _track_grad_norm(self, trainer: "pl.Trainer") -> None:
~/.local/lib/python3.6/site-packages/torch/optim/lr_scheduler.py in wrapper(*args, **kwargs)
63 instance._step_count += 1
64 wrapped = func.__get__(instance, cls)
---> 65 return wrapped(*args, **kwargs)
66
67 # Note that the returned function here is no longer a bound method,
~/.local/lib/python3.6/site-packages/torch/optim/optimizer.py in wrapper(*args, **kwargs)
86 profile_name = "Optimizer.step#{}.step".format(obj.__class__.__name__)
87 with torch.autograd.profiler.record_function(profile_name):
---> 88 return func(*args, **kwargs)
89 return wrapper
90
~/.local/lib/python3.6/site-packages/transformers/optimization.py in step(self, closure)
330 loss = None
331 if closure is not None:
--> 332 loss = closure()
333
334 for group in self.param_groups:
~/.local/lib/python3.6/site-packages/pytorch_lightning/plugins/precision/precision_plugin.py in _wrap_closure(self, model, optimizer, optimizer_idx, closure)
146 consistent with the ``PrecisionPlugin`` subclasses that cannot pass ``optimizer.step(closure)`` directly.
147 """
--> 148 closure_result = closure()
149 self._after_closure(model, optimizer, optimizer_idx)
150 return closure_result
~/.local/lib/python3.6/site-packages/pytorch_lightning/loops/optimization/optimizer_loop.py in __call__(self, *args, **kwargs)
158
159 def __call__(self, *args: Any, **kwargs: Any) -> Optional[Tensor]:
--> 160 self._result = self.closure(*args, **kwargs)
161 return self._result.loss
162
~/.local/lib/python3.6/site-packages/pytorch_lightning/loops/optimization/optimizer_loop.py in closure(self, *args, **kwargs)
153 if self._backward_fn is not None and step_output.closure_loss is not None:
154 with self._profiler.profile("backward"):
--> 155 self._backward_fn(step_output.closure_loss)
156
157 return step_output
~/.local/lib/python3.6/site-packages/pytorch_lightning/loops/optimization/optimizer_loop.py in backward_fn(loss)
325
326 def backward_fn(loss: Tensor) -> None:
--> 327 self.trainer.accelerator.backward(loss, optimizer, opt_idx)
328
329 # check if model weights are nan
~/.local/lib/python3.6/site-packages/pytorch_lightning/accelerators/accelerator.py in backward(self, closure_loss, *args, **kwargs)
312 closure_loss = self.precision_plugin.pre_backward(self.lightning_module, closure_loss)
313
--> 314 self.precision_plugin.backward(self.lightning_module, closure_loss, *args, **kwargs)
315
316 closure_loss = self.precision_plugin.post_backward(self.lightning_module, closure_loss)
~/.local/lib/python3.6/site-packages/pytorch_lightning/plugins/precision/precision_plugin.py in backward(self, model, closure_loss, optimizer, *args, **kwargs)
89 # do backward pass
90 if model is not None and isinstance(model, pl.LightningModule):
---> 91 model.backward(closure_loss, optimizer, *args, **kwargs)
92 else:
93 self._run_backward(closure_loss, *args, **kwargs)
~/.local/lib/python3.6/site-packages/pytorch_lightning/core/lightning.py in backward(self, loss, optimizer, optimizer_idx, *args, **kwargs)
1432 loss.backward()
1433 """
-> 1434 loss.backward(*args, **kwargs)
1435
1436 def toggle_optimizer(self, optimizer: Union[Optimizer, LightningOptimizer], optimizer_idx: int) -> None:
~/.local/lib/python3.6/site-packages/torch/_tensor.py in backward(self, gradient, retain_graph, create_graph, inputs)
305 create_graph=create_graph,
306 inputs=inputs)
--> 307 torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)
308
309 def register_hook(self, hook):
~/.local/lib/python3.6/site-packages/torch/autograd/__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)
154 Variable._execution_engine.run_backward(
155 tensors, grad_tensors_, retain_graph, create_graph, inputs,
--> 156 allow_unreachable=True, accumulate_grad=True) # allow_unreachable flag
157
158
RuntimeError: CUDA error: device-side assert triggered
CUDA kernel errors might be asynchronously reported at some other API call,so the stacktrace below might be incorrect.
For debugging consider passing CUDA_LAUNCH_BLOCKING=1.
I've recently watched a YouTube (DataSchool) video where the guy used only 3 columns from the Titanic dataset and made a pipeline. I wanted to add more columns to get better accuracy so I added Age and Fare.
I think it's probably because of the values of Age and Fare that I'm getting this error when I perform cross_val_score
columns_trans = make_column_transformer(
(OneHotEncoder(), ['Sex', 'Embarked']),
remainder='passthrough')
logreg = LogisticRegression(solver='lbfgs')
pipe = make_pipeline(columns_trans, logreg)
cross_val_score(pipe, X, y, cv=5, scoring='accuracy').mean()
/opt/conda/lib/python3.7/site-packages/sklearn/model_selection/_validation.py:552: FitFailedWarning: Estimator fit failed. The score on this train-test partition for these parameters will be set to nan.
If I remove Age and Fare, everything works fine. I was wondering if the Column Transformer or the make_pipeline had a problem with values like that.
I also tried scaling the values of Fare and Age, then it gave a cross_val_score but failed in pipe.predict() giving an error:
ValueError: Input contains NaN, infinity or a value too large for dtype('float64')
Traceback:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
/tmp/ipykernel_119/4279568460.py in <module>
----> 1 cross_val_score(pipe, X, y, cv=5, scoring='accuracy', error_score="raise").mean()
/opt/conda/lib/python3.7/site-packages/sklearn/utils/validation.py in inner_f(*args, **kwargs)
70 FutureWarning)
71 kwargs.update({k: arg for k, arg in zip(sig.parameters, args)})
---> 72 return f(**kwargs)
73 return inner_f
74
/opt/conda/lib/python3.7/site-packages/sklearn/model_selection/_validation.py in cross_val_score(estimator, X, y, groups, scoring, cv, n_jobs, verbose, fit_params, pre_dispatch, error_score)
404 fit_params=fit_params,
405 pre_dispatch=pre_dispatch,
--> 406 error_score=error_score)
407 return cv_results['test_score']
408
/opt/conda/lib/python3.7/site-packages/sklearn/utils/validation.py in inner_f(*args, **kwargs)
70 FutureWarning)
71 kwargs.update({k: arg for k, arg in zip(sig.parameters, args)})
---> 72 return f(**kwargs)
73 return inner_f
74
/opt/conda/lib/python3.7/site-packages/sklearn/model_selection/_validation.py in cross_validate(estimator, X, y, groups, scoring, cv, n_jobs, verbose, fit_params, pre_dispatch, return_train_score, return_estimator, error_score)
246 return_times=True, return_estimator=return_estimator,
247 error_score=error_score)
--> 248 for train, test in cv.split(X, y, groups))
249
250 zipped_scores = list(zip(*scores))
/opt/conda/lib/python3.7/site-packages/joblib/parallel.py in __call__(self, iterable)
1039 # remaining jobs.
1040 self._iterating = False
-> 1041 if self.dispatch_one_batch(iterator):
1042 self._iterating = self._original_iterator is not None
1043
/opt/conda/lib/python3.7/site-packages/joblib/parallel.py in dispatch_one_batch(self, iterator)
857 return False
858 else:
--> 859 self._dispatch(tasks)
860 return True
861
/opt/conda/lib/python3.7/site-packages/joblib/parallel.py in _dispatch(self, batch)
775 with self._lock:
776 job_idx = len(self._jobs)
--> 777 job = self._backend.apply_async(batch, callback=cb)
778 # A job can complete so quickly than its callback is
779 # called before we get here, causing self._jobs to
/opt/conda/lib/python3.7/site-packages/joblib/_parallel_backends.py in apply_async(self, func, callback)
206 def apply_async(self, func, callback=None):
207 """Schedule a func to be run"""
--> 208 result = ImmediateResult(func)
209 if callback:
210 callback(result)
/opt/conda/lib/python3.7/site-packages/joblib/_parallel_backends.py in __init__(self, batch)
570 # Don't delay the application, to avoid keeping the input
571 # arguments in memory
--> 572 self.results = batch()
573
574 def get(self):
/opt/conda/lib/python3.7/site-packages/joblib/parallel.py in __call__(self)
261 with parallel_backend(self._backend, n_jobs=self._n_jobs):
262 return [func(*args, **kwargs)
--> 263 for func, args, kwargs in self.items]
264
265 def __reduce__(self):
/opt/conda/lib/python3.7/site-packages/joblib/parallel.py in <listcomp>(.0)
261 with parallel_backend(self._backend, n_jobs=self._n_jobs):
262 return [func(*args, **kwargs)
--> 263 for func, args, kwargs in self.items]
264
265 def __reduce__(self):
/opt/conda/lib/python3.7/site-packages/sklearn/model_selection/_validation.py in _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score, return_parameters, return_n_test_samples, return_times, return_estimator, error_score)
529 estimator.fit(X_train, **fit_params)
530 else:
--> 531 estimator.fit(X_train, y_train, **fit_params)
532
533 except Exception as e:
/opt/conda/lib/python3.7/site-packages/sklearn/pipeline.py in fit(self, X, y, **fit_params)
333 if self._final_estimator != 'passthrough':
334 fit_params_last_step = fit_params_steps[self.steps[-1][0]]
--> 335 self._final_estimator.fit(Xt, y, **fit_params_last_step)
336
337 return self
/opt/conda/lib/python3.7/site-packages/sklearn/linear_model/_logistic.py in fit(self, X, y, sample_weight)
1415 penalty=penalty, max_squared_sum=max_squared_sum,
1416 sample_weight=sample_weight)
-> 1417 for class_, warm_start_coef_ in zip(classes_, warm_start_coef))
1418
1419 fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
/opt/conda/lib/python3.7/site-packages/joblib/parallel.py in __call__(self, iterable)
1039 # remaining jobs.
1040 self._iterating = False
-> 1041 if self.dispatch_one_batch(iterator):
1042 self._iterating = self._original_iterator is not None
1043
/opt/conda/lib/python3.7/site-packages/joblib/parallel.py in dispatch_one_batch(self, iterator)
857 return False
858 else:
--> 859 self._dispatch(tasks)
860 return True
861
/opt/conda/lib/python3.7/site-packages/joblib/parallel.py in _dispatch(self, batch)
775 with self._lock:
776 job_idx = len(self._jobs)
--> 777 job = self._backend.apply_async(batch, callback=cb)
778 # A job can complete so quickly than its callback is
779 # called before we get here, causing self._jobs to
/opt/conda/lib/python3.7/site-packages/joblib/_parallel_backends.py in apply_async(self, func, callback)
206 def apply_async(self, func, callback=None):
207 """Schedule a func to be run"""
--> 208 result = ImmediateResult(func)
209 if callback:
210 callback(result)
/opt/conda/lib/python3.7/site-packages/joblib/_parallel_backends.py in __init__(self, batch)
570 # Don't delay the application, to avoid keeping the input
571 # arguments in memory
--> 572 self.results = batch()
573
574 def get(self):
/opt/conda/lib/python3.7/site-packages/joblib/parallel.py in __call__(self)
261 with parallel_backend(self._backend, n_jobs=self._n_jobs):
262 return [func(*args, **kwargs)
--> 263 for func, args, kwargs in self.items]
264
265 def __reduce__(self):
/opt/conda/lib/python3.7/site-packages/joblib/parallel.py in <listcomp>(.0)
261 with parallel_backend(self._backend, n_jobs=self._n_jobs):
262 return [func(*args, **kwargs)
--> 263 for func, args, kwargs in self.items]
264
265 def __reduce__(self):
/opt/conda/lib/python3.7/site-packages/sklearn/linear_model/_logistic.py in _logistic_regression_path(X, y, pos_class, Cs, fit_intercept, max_iter, tol, verbose, solver, coef, class_weight, dual, penalty, intercept_scaling, multi_class, random_state, check_input, max_squared_sum, sample_weight, l1_ratio)
762 n_iter_i = _check_optimize_result(
763 solver, opt_res, max_iter,
--> 764 extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)
765 w0, loss = opt_res.x, opt_res.fun
766 elif solver == 'newton-cg':
/opt/conda/lib/python3.7/site-packages/sklearn/utils/optimize.py in _check_optimize_result(solver, result, max_iter, extra_warning_msg)
241 " https://scikit-learn.org/stable/modules/"
242 "preprocessing.html"
--> 243 ).format(solver, result.status, result.message.decode("latin1"))
244 if extra_warning_msg is not None:
245 warning_msg += "\n" + extra_warning_msg
AttributeError: 'str' object has no attribute 'decode'
I solved this error by changing solver=lbfgs to solver=liblinear in LogisticRegression()
logreg = LogisticRegression(solver='lbfgs')
to
logreg = LogisticRegression(solver='liblinear')
And for the following error:
ValueError: Input contains NaN, infinity or a value too large for dtype('float64')
It's best to check if your test data contains any null values or strings.
I am trying to deploy Dask Gateway integrated with JupyterHub which is the reason I decided to give DaskHub Chart a try.
After following the instructions on https://docs.dask.org/en/latest/setup/kubernetes-helm.html#helm-install-dask-for-mulitple-users. The JH is working fine but When I try to create a new Dask cluster via the UI or through:
[from dask_gateway import GatewayCluster
cluster = GatewayCluster()][2]
I get this error:
ValueError Traceback (most recent call last)
<ipython-input-3-36809e239298> in <module>
1 from dask_gateway import GatewayCluster
----> 2 cluster = GatewayCluster()
3
/srv/conda/envs/notebook/lib/python3.7/site-packages/dask_gateway/client.py in __init__(self, address, proxy_address, public_address, auth, cluster_options, shutdown_on_close, asynchronous, loop, **kwargs)
816 shutdown_on_close=shutdown_on_close,
817 asynchronous=asynchronous,
--> 818 loop=loop,
819 )
820
/srv/conda/envs/notebook/lib/python3.7/site-packages/dask_gateway/client.py in _init_internal(self, address, proxy_address, public_address, auth, cluster_options, cluster_kwargs, shutdown_on_close, asynchronous, loop, name)
912 self.status = "starting"
913 if not self.asynchronous:
--> 914 self.gateway.sync(self._start_internal)
915
916 #property
/srv/conda/envs/notebook/lib/python3.7/site-packages/dask_gateway/client.py in sync(self, func, *args, **kwargs)
337 )
338 try:
--> 339 return future.result()
340 except BaseException:
341 future.cancel()
/srv/conda/envs/notebook/lib/python3.7/concurrent/futures/_base.py in result(self, timeout)
433 raise CancelledError()
434 elif self._state == FINISHED:
--> 435 return self.__get_result()
436 else:
437 raise TimeoutError()
/srv/conda/envs/notebook/lib/python3.7/concurrent/futures/_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
/srv/conda/envs/notebook/lib/python3.7/site-packages/dask_gateway/client.py in _start_internal(self)
926 self._start_task = asyncio.ensure_future(self._start_async())
927 try:
--> 928 await self._start_task
929 except BaseException:
930 # On exception, cleanup
/srv/conda/envs/notebook/lib/python3.7/site-packages/dask_gateway/client.py in _start_async(self)
940 self.status = "starting"
941 self.name = await self.gateway._submit(
--> 942 cluster_options=self._cluster_options, **self._cluster_kwargs
943 )
944 # Connect to cluster
/srv/conda/envs/notebook/lib/python3.7/site-packages/dask_gateway/client.py in _submit(self, cluster_options, **kwargs)
529 options = self._config_cluster_options()
530 options.update(kwargs)
--> 531 resp = await self._request("POST", url, json={"cluster_options": options})
532 data = await resp.json()
533 return data["name"]
/srv/conda/envs/notebook/lib/python3.7/site-packages/dask_gateway/client.py in _request(self, method, url, json)
407
408 if resp.status in {404, 422}:
--> 409 raise ValueError(msg)
410 elif resp.status == 409:
411 raise GatewayClusterError(msg)
ValueError: Unknown fields ['image']
Any help will be greatly appreciated.
I was trying to reimplement the github tutorial with my own CNN-based model with Keras. But I got an error when evaluating.
from __future__ import absolute_import, division, print_function
import collections
from six.moves import range
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow_federated import python as tff
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data()
example_dataset = emnist_train.create_tf_dataset_for_client(
emnist_train.client_ids[0])
NUM_EPOCHS = 10
BATCH_SIZE = 20
SHUFFLE_BUFFER = 500
def preprocess(dataset):
def element_fn(element):
return collections.OrderedDict([
('x', tf.reshape(element['pixels'], [-1])),
('y', tf.reshape(element['label'], [1])),
])
return dataset.repeat(NUM_EPOCHS).map(element_fn).shuffle(
SHUFFLE_BUFFER).batch(BATCH_SIZE)
preprocessed_example_dataset = preprocess(example_dataset)
sample_batch = nest.map_structure(
lambda x: x.numpy(), iter(preprocessed_example_dataset).next())
def make_federated_data(client_data, client_ids):
return [preprocess(client_data.create_tf_dataset_for_client(x))
for x in client_ids]
NUM_CLIENTS = 3
sample_clients = emnist_train.client_ids[0:NUM_CLIENTS]
federated_train_data = make_federated_data(emnist_train, sample_clients)
len(federated_train_data), federated_train_data[0]
def create_compiled_keras_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Reshape((28,28,1), input_shape=(784,)),
tf.keras.layers.Conv2D(32, kernel_size=(5,5), activation="relu", padding = "same", strides = 1),
tf.keras.layers.MaxPooling2D(pool_size=2, strides=2, padding='valid'),
tf.keras.layers.Conv2D(64, kernel_size=(5,5), activation="relu", padding = "same", strides = 1),
tf.keras.layers.MaxPooling2D(pool_size=2, strides=2, padding='valid'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation="relu"),
tf.keras.layers.Dense(10, activation="softmax"),
])
def loss_fn(y_true, y_pred):
return tf.reduce_mean(tf.keras.losses.sparse_categorical_crossentropy(
y_true, y_pred))
model.compile(
loss=loss_fn,
optimizer=gradient_descent.SGD(learning_rate=0.02),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
return model
def model_fn():
keras_model = create_compiled_keras_model()
return tff.learning.from_compiled_keras_model(keras_model, sample_batch)
iterative_process = tff.learning.build_federated_averaging_process(model_fn)
state = iterative_process.initialize()
for round_num in range(1,10):
state, metrics = iterative_process.next(state, federated_train_data)
print('round {:2d}, metrics={}'.format(round_num, metrics))
##Evaluation of the model
#This function doesn't work
evaluation = tff.learning.build_federated_evaluation(model_fn)
federated_test_data = make_federated_data(emnist_test, sample_clients)
test_metrics = evaluation(state.model, federated_test_data)
I expect the evaluation of the test data, but the actual output is the following error:
---------------------------------------------------------------------------
_FallbackException Traceback (most recent call last)
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/gen_functional_ops.py in stateful_partitioned_call(args, Tout, f, config, config_proto, executor_type, name)
482 "Tout", Tout, "f", f, "config", config, "config_proto", config_proto,
--> 483 "executor_type", executor_type)
484 return _result
_FallbackException: This function does not handle the case of the path where all inputs are not already EagerTensors.
During handling of the above exception, another exception occurred:
AttributeError Traceback (most recent call last)
<ipython-input-23-6e9c77f70201> in <module>()
----> 1 evaluation = tff.learning.build_federated_evaluation(model_fn)
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_federated/python/learning/federated_evaluation.py in build_federated_evaluation(model_fn)
83 #tff.federated_computation(
84 tff.FederatedType(model_weights_type, tff.SERVER, all_equal=True),
---> 85 tff.FederatedType(tff.SequenceType(batch_type), tff.CLIENTS))
86 def server_eval(server_model_weights, federated_dataset):
87 client_outputs = tff.federated_map(
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_federated/python/core/impl/computation_wrapper.py in <lambda>(fn)
406 args = (args,)
407 arg_type = computation_types.to_type(args[0])
--> 408 return lambda fn: _wrap(fn, arg_type, self._wrapper_fn)
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_federated/python/core/impl/computation_wrapper.py in _wrap(fn, parameter_type, wrapper_fn)
94 function_utils.wrap_as_zero_or_one_arg_callable(fn, parameter_type),
95 parameter_type,
---> 96 name=fn_name)
97 py_typecheck.check_type(concrete_fn, function_utils.ConcreteFunction,
98 'value returned by the wrapper')
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_federated/python/core/impl/computation_wrapper_instances.py in _federated_computation_wrapper_fn(target_fn, parameter_type, name)
52 parameter_type,
53 ctx_stack,
---> 54 suggested_name=name))
55 return computation_impl.ComputationImpl(target_lambda.proto, ctx_stack)
56
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_federated/python/core/impl/federated_computation_utils.py in zero_or_one_arg_fn_to_building_block(fn, parameter_name, parameter_type, context_stack, suggested_name)
73 value_impl.ValueImpl(
74 computation_building_blocks.Reference(
---> 75 parameter_name, parameter_type), context_stack))
76 else:
77 result = fn()
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_federated/python/core/impl/function_utils.py in <lambda>(arg)
551 # and to force any parameter bindings to be resolved now.
552 # pylint: disable=unnecessary-lambda,undefined-variable
--> 553 return (lambda fn, at, kt: lambda arg: _unpack_and_call(fn, at, kt, arg))(
554 fn, arg_types, kwarg_types)
555 # pylint: enable=unnecessary-lambda,undefined-variable
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_federated/python/core/impl/function_utils.py in _unpack_and_call(fn, arg_types, kwarg_types, arg)
545 name, str(expected_type), str(actual_type)))
546 kwargs[name] = element_value
--> 547 return fn(*args, **kwargs)
548
549 # Deliberate wrapping to isolate the caller from the underlying function
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_federated/python/learning/federated_evaluation.py in server_eval(server_model_weights, federated_dataset)
88 client_eval,
89 [tff.federated_broadcast(server_model_weights), federated_dataset])
---> 90 return model.federated_output_computation(client_outputs.local_outputs)
91
92 return server_eval
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_federated/python/learning/model_utils.py in federated_output_computation(self)
531 #property
532 def federated_output_computation(self):
--> 533 return self._model.federated_output_computation
534
535
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_federated/python/learning/model_utils.py in federated_output_computation(self)
406 def federated_output_computation(self):
407 metric_variable_type_dict = nest.map_structure(tf.TensorSpec.from_tensor,
--> 408 self.report_local_outputs())
409 federated_local_outputs_type = tff.FederatedType(
410 metric_variable_type_dict, tff.CLIENTS, all_equal=False)
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
314 if not self._created_variables:
315 # If we did not create any variables the trace we have is good enough.
--> 316 return self._concrete_stateful_fn._filtered_call(canon_args, canon_kwds) # pylint: disable=protected-access
317
318 def fn_with_cond(*inner_args, **inner_kwds):
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/eager/function.py in _filtered_call(self, args, kwargs)
382 """
383 return self._call_flat(
--> 384 (t for t in nest.flatten((args, kwargs))
385 if isinstance(
386 t, (ops.Tensor, resource_variable_ops.ResourceVariable))))
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/eager/function.py in _call_flat(self, args)
431 # Only need to override the gradient in graph mode and when we have outputs.
432 if context.executing_eagerly() or not self.outputs:
--> 433 outputs = self._inference_function.call(ctx, args)
434 else:
435 if not self._gradient_name:
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/eager/function.py in call(self, ctx, args)
267 executing_eagerly=executing_eagerly,
268 config=function_call_options.config_proto_serialized,
--> 269 executor_type=function_call_options.executor_type)
270
271 if executing_eagerly:
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/functional_ops.py in partitioned_call(args, f, tout, executing_eagerly, config, executor_type)
1081 outputs = gen_functional_ops.stateful_partitioned_call(
1082 args=args, Tout=tout, f=f, config_proto=config,
-> 1083 executor_type=executor_type)
1084 else:
1085 outputs = gen_functional_ops.partitioned_call(
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/gen_functional_ops.py in stateful_partitioned_call(args, Tout, f, config, config_proto, executor_type, name)
487 return stateful_partitioned_call_eager_fallback(
488 args, Tout=Tout, f=f, config=config, config_proto=config_proto,
--> 489 executor_type=executor_type, name=name, ctx=_ctx)
490 except _core._SymbolicException:
491 pass # Add nodes to the TensorFlow graph.
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/gen_functional_ops.py in stateful_partitioned_call_eager_fallback(args, Tout, f, config, config_proto, executor_type, name, ctx)
548 executor_type = ""
549 executor_type = _execute.make_str(executor_type, "executor_type")
--> 550 _attr_Tin, args = _execute.convert_to_mixed_eager_tensors(args, _ctx)
551 _inputs_flat = list(args)
552 _attrs = ("Tin", _attr_Tin, "Tout", Tout, "f", f, "config", config,
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/eager/execute.py in convert_to_mixed_eager_tensors(values, ctx)
207 def convert_to_mixed_eager_tensors(values, ctx):
208 v = [ops.internal_convert_to_tensor(t, ctx=ctx) for t in values]
--> 209 types = [t._datatype_enum() for t in v] # pylint: disable=protected-access
210 return types, v
211
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/eager/execute.py in <listcomp>(.0)
207 def convert_to_mixed_eager_tensors(values, ctx):
208 v = [ops.internal_convert_to_tensor(t, ctx=ctx) for t in values]
--> 209 types = [t._datatype_enum() for t in v] # pylint: disable=protected-access
210 return types, v
211
AttributeError: 'Tensor' object has no attribute '_datatype_enum'
Nuria: this should just have been fixed earlier today. If you do not want to wait for the next release (coming soon), I would recommend that you simply build a local pip package from source. You can find instructions in the install guide.
As a followup here: TFF 0.4.0 has just been released, which contains this bugfix.