Related
I am trying to a scikit-learnpipeline on Databricks. However, I encounter the following issue:
import dill
f = open("/dbfs/HR_pipe.p", mode='wb')
dill.dump(fitted,f)
where:
fitted = main_pipeline.fit(X_train, y_train)
Below you can find the error traceback, I am actually using scikit-learn version 0.24.2 and the class of the fitted object is a sklearn pipeline.
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<command-97102145541341> in <module>
1 import dill
2 f = open("/dbfs/HR_pipe.p", mode='wb')
----> 3 dill.dump(fitted,f)
/local_disk0/.ephemeral_nfs/envs/pythonEnv-1f8e9f3e-114d-4f9b-915f-25ba440655c4/lib/python3.7/site-packages/dill/_dill.py in dump(obj, file, protocol, byref, fmode, recurse, **kwds)
334 _kwds = kwds.copy()
335 _kwds.update(dict(byref=byref, fmode=fmode, recurse=recurse))
--> 336 Pickler(file, protocol, **_kwds).dump(obj)
337 return
338
/local_disk0/.ephemeral_nfs/envs/pythonEnv-1f8e9f3e-114d-4f9b-915f-25ba440655c4/lib/python3.7/site-packages/dill/_dill.py in dump(self, obj)
618 raise PicklingError(msg)
619 else:
--> 620 StockPickler.dump(self, obj)
621 return
622 dump.__doc__ = StockPickler.dump.__doc__
/usr/lib/python3.7/pickle.py in dump(self, obj)
435 if self.proto >= 4:
436 self.framer.start_framing()
--> 437 self.save(obj)
438 self.write(STOP)
439 self.framer.end_framing()
/usr/lib/python3.7/pickle.py in save(self, obj, save_persistent_id)
547
548 # Save the reduce() output and finally memoize the object
--> 549 self.save_reduce(obj=obj, *rv)
550
551 def persistent_id(self, obj):
/usr/lib/python3.7/pickle.py in save_reduce(self, func, args, state, listitems, dictitems, obj)
660
661 if state is not None:
--> 662 save(state)
663 write(BUILD)
664
/usr/lib/python3.7/pickle.py in save(self, obj, save_persistent_id)
502 f = self.dispatch.get(t)
503 if f is not None:
--> 504 f(self, obj) # Call unbound method with explicit self
505 return
506
/local_disk0/.ephemeral_nfs/envs/pythonEnv-1f8e9f3e-114d-4f9b-915f-25ba440655c4/lib/python3.7/site-packages/dill/_dill.py in save_module_dict(pickler, obj)
1249 # we only care about session the first pass thru
1250 pickler._first_pass = False
-> 1251 StockPickler.save_dict(pickler, obj)
1252 log.info("# D2")
1253 return
/usr/lib/python3.7/pickle.py in save_dict(self, obj)
857
858 self.memoize(obj)
--> 859 self._batch_setitems(obj.items())
860
861 dispatch[dict] = save_dict
/usr/lib/python3.7/pickle.py in _batch_setitems(self, items)
883 for k, v in tmp:
884 save(k)
--> 885 save(v)
886 write(SETITEMS)
887 elif n:
/usr/lib/python3.7/pickle.py in save(self, obj, save_persistent_id)
502 f = self.dispatch.get(t)
503 if f is not None:
--> 504 f(self, obj) # Call unbound method with explicit self
505 return
506
/usr/lib/python3.7/pickle.py in save_list(self, obj)
817
818 self.memoize(obj)
--> 819 self._batch_appends(obj)
820
821 dispatch[list] = save_list
/usr/lib/python3.7/pickle.py in _batch_appends(self, items)
841 write(MARK)
842 for x in tmp:
--> 843 save(x)
844 write(APPENDS)
845 elif n:
/usr/lib/python3.7/pickle.py in save(self, obj, save_persistent_id)
502 f = self.dispatch.get(t)
503 if f is not None:
--> 504 f(self, obj) # Call unbound method with explicit self
505 return
506
/usr/lib/python3.7/pickle.py in save_tuple(self, obj)
772 if n <= 3 and self.proto >= 2:
773 for element in obj:
--> 774 save(element)
775 # Subtle. Same as in the big comment below.
776 if id(obj) in memo:
/usr/lib/python3.7/pickle.py in save(self, obj, save_persistent_id)
547
548 # Save the reduce() output and finally memoize the object
--> 549 self.save_reduce(obj=obj, *rv)
550
551 def persistent_id(self, obj):
/usr/lib/python3.7/pickle.py in save_reduce(self, func, args, state, listitems, dictitems, obj)
631 "args[0] from __newobj__ args has the wrong class")
632 args = args[1:]
--> 633 save(cls)
634 save(args)
635 write(NEWOBJ)
/usr/lib/python3.7/pickle.py in save(self, obj, save_persistent_id)
502 f = self.dispatch.get(t)
503 if f is not None:
--> 504 f(self, obj) # Call unbound method with explicit self
505 return
506
/local_disk0/.ephemeral_nfs/envs/pythonEnv-1f8e9f3e-114d-4f9b-915f-25ba440655c4/lib/python3.7/site-packages/dill/_dill.py in save_type(pickler, obj, postproc_list)
1838 _save_with_postproc(pickler, (_create_type, (
1839 type(obj), obj.__name__, obj.__bases__, _dict
-> 1840 )), obj=obj, postproc_list=postproc_list)
1841 log.info("# %s" % _t)
1842 else:
/local_disk0/.ephemeral_nfs/envs/pythonEnv-1f8e9f3e-114d-4f9b-915f-25ba440655c4/lib/python3.7/site-packages/dill/_dill.py in _save_with_postproc(pickler, reduction, is_pickler_dill, obj, postproc_list)
1152 if source:
1153 pickler.write(pickler.get(pickler.memo[id(dest)][0]))
-> 1154 pickler._batch_setitems(iter(source.items()))
1155 else:
1156 # Updating with an empty dictionary. Same as doing nothing.
/usr/lib/python3.7/pickle.py in _batch_setitems(self, items)
883 for k, v in tmp:
884 save(k)
--> 885 save(v)
886 write(SETITEMS)
887 elif n:
/usr/lib/python3.7/pickle.py in save(self, obj, save_persistent_id)
522 reduce = getattr(obj, "__reduce_ex__", None)
523 if reduce is not None:
--> 524 rv = reduce(self.proto)
525 else:
526 reduce = getattr(obj, "__reduce__", None)
/databricks/spark/python/pyspark/context.py in __getnewargs__(self)
354 # This method is called when attempting to pickle SparkContext, which is always an error:
355 raise Exception(
--> 356 "It appears that you are attempting to reference SparkContext from a broadcast "
357 "variable, action, or transformation. SparkContext can only be used on the driver, "
358 "not in code that it run on workers. For more information, see SPARK-5063."
Exception: It appears that you are attempting to reference SparkContext from a broadcast variable, action, or transformation. SparkContext can only be used on the driver, not in code that it run on workers. For more information, see SPARK-5063.
Do you happen to know how to solve such issue?
details about the questions ..........................................................................
I am unable to figure out why my BERT model dosen't get pas the training command. I am using pytorch-lightning. I am running the code on AWS EC2(p3.2xLarge) and it does show me the available GPU but I can't really figure out the device side error. Could someone please guide me towards a direction? I really appreciate you time and consideration.
PS: The results are after setting CUDA_LAUNCH_BLOCKING=1.
trainer = pl.Trainer(
logger=logger,
checkpoint_callback=checkpoint_callback,
callbacks=[early_stopping_callback],
max_epochs=N_EPOCHS,
gpus=1,
progress_bar_refresh_rate=30,
)
GPU available: True, used: True
TPU available: False, using: 0 TPU cores
IPU available: False, using: 0 IPUs
In [155]:
trainer.fit(model, data_module)
LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-155-7b6b8391c42e> in <module>
----> 1 trainer.fit(model, data_module)
~/.local/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloaders, val_dataloaders, datamodule, train_dataloader, ckpt_path)
739 train_dataloaders = train_dataloader
740 self._call_and_handle_interrupt(
--> 741 self._fit_impl, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path
742 )
743
~/.local/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py in _call_and_handle_interrupt(self, trainer_fn, *args, **kwargs)
683 """
684 try:
--> 685 return trainer_fn(*args, **kwargs)
686 # TODO: treat KeyboardInterrupt as BaseException (delete the code below) in v1.7
687 except KeyboardInterrupt as exception:
~/.local/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py in _fit_impl(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)
775 # TODO: ckpt_path only in v1.7
776 ckpt_path = ckpt_path or self.resume_from_checkpoint
--> 777 self._run(model, ckpt_path=ckpt_path)
778
779 assert self.state.stopped
~/.local/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py in _run(self, model, ckpt_path)
1143
1144 self._call_configure_sharded_model() # allow user to setup in model sharded environment
-> 1145 self.accelerator.setup(self)
1146
1147 # ----------------------------
~/.local/lib/python3.6/site-packages/pytorch_lightning/accelerators/gpu.py in setup(self, trainer)
44 def setup(self, trainer: "pl.Trainer") -> None:
45 self.set_nvidia_flags(trainer.local_rank)
---> 46 return super().setup(trainer)
47
48 def on_train_start(self) -> None:
~/.local/lib/python3.6/site-packages/pytorch_lightning/accelerators/accelerator.py in setup(self, trainer)
89 trainer: the trainer instance
90 """
---> 91 self.setup_training_type_plugin()
92 if not self.training_type_plugin.setup_optimizers_in_pre_dispatch:
93 self.setup_optimizers(trainer)
~/.local/lib/python3.6/site-packages/pytorch_lightning/accelerators/accelerator.py in setup_training_type_plugin(self)
361 def setup_training_type_plugin(self) -> None:
362 """Attaches the training type plugin to the accelerator."""
--> 363 self.training_type_plugin.setup()
364
365 def setup_precision_plugin(self) -> None:
~/.local/lib/python3.6/site-packages/pytorch_lightning/plugins/training_type/single_device.py in setup(self)
69
70 def setup(self) -> None:
---> 71 self.model_to_device()
72
73 #property
~/.local/lib/python3.6/site-packages/pytorch_lightning/plugins/training_type/single_device.py in model_to_device(self)
66
67 def model_to_device(self) -> None:
---> 68 self._model.to(self.root_device)
69
70 def setup(self) -> None:
~/.local/lib/python3.6/site-packages/pytorch_lightning/core/mixins/device_dtype_mixin.py in to(self, *args, **kwargs)
109 out = torch._C._nn._parse_to(*args, **kwargs)
110 self.__update_properties(device=out[0], dtype=out[1])
--> 111 return super().to(*args, **kwargs)
112
113 def cuda(self, device: Optional[Union[torch.device, int]] = None) -> "DeviceDtypeModuleMixin":
~/.local/lib/python3.6/site-packages/torch/nn/modules/module.py in to(self, *args, **kwargs)
897 return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)
898
--> 899 return self._apply(convert)
900
901 def register_backward_hook(
~/.local/lib/python3.6/site-packages/torch/nn/modules/module.py in _apply(self, fn)
568 def _apply(self, fn):
569 for module in self.children():
--> 570 module._apply(fn)
571
572 def compute_should_use_set_data(tensor, tensor_applied):
~/.local/lib/python3.6/site-packages/torch/nn/modules/module.py in _apply(self, fn)
568 def _apply(self, fn):
569 for module in self.children():
--> 570 module._apply(fn)
571
572 def compute_should_use_set_data(tensor, tensor_applied):
~/.local/lib/python3.6/site-packages/torch/nn/modules/module.py in _apply(self, fn)
568 def _apply(self, fn):
569 for module in self.children():
--> 570 module._apply(fn)
571
572 def compute_should_use_set_data(tensor, tensor_applied):
~/.local/lib/python3.6/site-packages/torch/nn/modules/module.py in _apply(self, fn)
591 # `with torch.no_grad():`
592 with torch.no_grad():
--> 593 param_applied = fn(param)
594 should_use_set_data = compute_should_use_set_data(param, param_applied)
595 if should_use_set_data:
~/.local/lib/python3.6/site-packages/torch/nn/modules/module.py in convert(t)
895 return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None,
896 non_blocking, memory_format=convert_to_format)
--> 897 return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)
898
899 return self._apply(convert)
RuntimeError: CUDA error: device-side assert triggered
CUDA kernel errors might be asynchronously reported at some other API call,so the stacktrace below might be incorrect.
For debugging consider passing CUDA_LAUNCH_BLOCKING=1.
Restarting the machine returned this:
LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]
Missing logger folder: lightning_logs/nara-comments
| Name | Type | Params
-----------------------------------------
0 | bert | BertModel | 108 M
1 | classifier | Linear | 288 K
2 | criterion | BCELoss | 0
-----------------------------------------
108 M Trainable params
0 Non-trainable params
108 M Total params
434.395 Total estimated model params size (MB)
/home/ubuntu/.local/lib/python3.6/site-packages/pytorch_lightning/utilities/data.py:60: UserWarning: Trying to infer the `batch_size` from an ambiguous collection. The batch size we found is 4540. To avoid any miscalculations, use `self.log(..., batch_size=batch_size)`.
"Trying to infer the `batch_size` from an ambiguous collection. The batch size we"
/home/ubuntu/.local/lib/python3.6/site-packages/pytorch_lightning/utilities/data.py:60: UserWarning: Trying to infer the `batch_size` from an ambiguous collection. The batch size we found is 4374. To avoid any miscalculations, use `self.log(..., batch_size=batch_size)`.
"Trying to infer the `batch_size` from an ambiguous collection. The batch size we"
Global seed set to 42
Epoch 0: 0%
0/397 [00:00<?, ?it/s]
/home/ubuntu/.local/lib/python3.6/site-packages/pytorch_lightning/loops/optimization/closure.py:36: LightningDeprecationWarning: One of the returned values {'predictions', 'labels'} has a `grad_fn`. We will detach it automatically but this behaviour will change in v1.6. Please detach it manually: `return {'loss': ..., 'something': something.detach()}`
f"One of the returned values {set(extra.keys())} has a `grad_fn`. We will detach it automatically"
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-48-7b6b8391c42e> in <module>
----> 1 trainer.fit(model, data_module)
~/.local/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloaders, val_dataloaders, datamodule, train_dataloader, ckpt_path)
739 train_dataloaders = train_dataloader
740 self._call_and_handle_interrupt(
--> 741 self._fit_impl, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path
742 )
743
~/.local/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py in _call_and_handle_interrupt(self, trainer_fn, *args, **kwargs)
683 """
684 try:
--> 685 return trainer_fn(*args, **kwargs)
686 # TODO: treat KeyboardInterrupt as BaseException (delete the code below) in v1.7
687 except KeyboardInterrupt as exception:
~/.local/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py in _fit_impl(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)
775 # TODO: ckpt_path only in v1.7
776 ckpt_path = ckpt_path or self.resume_from_checkpoint
--> 777 self._run(model, ckpt_path=ckpt_path)
778
779 assert self.state.stopped
~/.local/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py in _run(self, model, ckpt_path)
1197
1198 # dispatch `start_training` or `start_evaluating` or `start_predicting`
-> 1199 self._dispatch()
1200
1201 # plugin will finalized fitting (e.g. ddp_spawn will load trained model)
~/.local/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py in _dispatch(self)
1277 self.training_type_plugin.start_predicting(self)
1278 else:
-> 1279 self.training_type_plugin.start_training(self)
1280
1281 def run_stage(self):
~/.local/lib/python3.6/site-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py in start_training(self, trainer)
200 def start_training(self, trainer: "pl.Trainer") -> None:
201 # double dispatch to initiate the training loop
--> 202 self._results = trainer.run_stage()
203
204 def start_evaluating(self, trainer: "pl.Trainer") -> None:
~/.local/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py in run_stage(self)
1287 if self.predicting:
1288 return self._run_predict()
-> 1289 return self._run_train()
1290
1291 def _pre_training_routine(self):
~/.local/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py in _run_train(self)
1317 self.fit_loop.trainer = self
1318 with torch.autograd.set_detect_anomaly(self._detect_anomaly):
-> 1319 self.fit_loop.run()
1320
1321 def _run_evaluate(self) -> _EVALUATE_OUTPUT:
~/.local/lib/python3.6/site-packages/pytorch_lightning/loops/base.py in run(self, *args, **kwargs)
143 try:
144 self.on_advance_start(*args, **kwargs)
--> 145 self.advance(*args, **kwargs)
146 self.on_advance_end()
147 self.restarting = False
~/.local/lib/python3.6/site-packages/pytorch_lightning/loops/fit_loop.py in advance(self)
232
233 with self.trainer.profiler.profile("run_training_epoch"):
--> 234 self.epoch_loop.run(data_fetcher)
235
236 # the global step is manually decreased here due to backwards compatibility with existing loggers
~/.local/lib/python3.6/site-packages/pytorch_lightning/loops/base.py in run(self, *args, **kwargs)
143 try:
144 self.on_advance_start(*args, **kwargs)
--> 145 self.advance(*args, **kwargs)
146 self.on_advance_end()
147 self.restarting = False
~/.local/lib/python3.6/site-packages/pytorch_lightning/loops/epoch/training_epoch_loop.py in advance(self, *args, **kwargs)
191
192 with self.trainer.profiler.profile("run_training_batch"):
--> 193 batch_output = self.batch_loop.run(batch, batch_idx)
194
195 self.batch_progress.increment_processed()
~/.local/lib/python3.6/site-packages/pytorch_lightning/loops/base.py in run(self, *args, **kwargs)
143 try:
144 self.on_advance_start(*args, **kwargs)
--> 145 self.advance(*args, **kwargs)
146 self.on_advance_end()
147 self.restarting = False
~/.local/lib/python3.6/site-packages/pytorch_lightning/loops/batch/training_batch_loop.py in advance(self, batch, batch_idx)
86 if self.trainer.lightning_module.automatic_optimization:
87 optimizers = _get_active_optimizers(self.trainer.optimizers, self.trainer.optimizer_frequencies, batch_idx)
---> 88 outputs = self.optimizer_loop.run(split_batch, optimizers, batch_idx)
89 else:
90 outputs = self.manual_loop.run(split_batch, batch_idx)
~/.local/lib/python3.6/site-packages/pytorch_lightning/loops/base.py in run(self, *args, **kwargs)
143 try:
144 self.on_advance_start(*args, **kwargs)
--> 145 self.advance(*args, **kwargs)
146 self.on_advance_end()
147 self.restarting = False
~/.local/lib/python3.6/site-packages/pytorch_lightning/loops/optimization/optimizer_loop.py in advance(self, batch, *args, **kwargs)
217 self._batch_idx,
218 self._optimizers[self.optim_progress.optimizer_position],
--> 219 self.optimizer_idx,
220 )
221 if result.loss is not None:
~/.local/lib/python3.6/site-packages/pytorch_lightning/loops/optimization/optimizer_loop.py in _run_optimization(self, split_batch, batch_idx, optimizer, opt_idx)
264 # gradient update with accumulated gradients
265 else:
--> 266 self._optimizer_step(optimizer, opt_idx, batch_idx, closure)
267
268 result = closure.consume_result()
~/.local/lib/python3.6/site-packages/pytorch_lightning/loops/optimization/optimizer_loop.py in _optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
384 on_tpu=(self.trainer._device_type == DeviceType.TPU and _TPU_AVAILABLE),
385 using_native_amp=(self.trainer.amp_backend is not None and self.trainer.amp_backend == AMPType.NATIVE),
--> 386 using_lbfgs=is_lbfgs,
387 )
388
~/.local/lib/python3.6/site-packages/pytorch_lightning/core/lightning.py in optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, optimizer_closure, on_tpu, using_native_amp, using_lbfgs)
1650
1651 """
-> 1652 optimizer.step(closure=optimizer_closure)
1653
1654 def optimizer_zero_grad(self, epoch: int, batch_idx: int, optimizer: Optimizer, optimizer_idx: int):
~/.local/lib/python3.6/site-packages/pytorch_lightning/core/optimizer.py in step(self, closure, **kwargs)
162 assert trainer is not None
163 with trainer.profiler.profile(profiler_action):
--> 164 trainer.accelerator.optimizer_step(self._optimizer, self._optimizer_idx, closure, **kwargs)
~/.local/lib/python3.6/site-packages/pytorch_lightning/accelerators/accelerator.py in optimizer_step(self, optimizer, opt_idx, closure, model, **kwargs)
337 """
338 model = model or self.lightning_module
--> 339 self.precision_plugin.optimizer_step(model, optimizer, opt_idx, closure, **kwargs)
340
341 def optimizer_zero_grad(self, current_epoch: int, batch_idx: int, optimizer: Optimizer, opt_idx: int) -> None:
~/.local/lib/python3.6/site-packages/pytorch_lightning/plugins/precision/precision_plugin.py in optimizer_step(self, model, optimizer, optimizer_idx, closure, **kwargs)
161 if isinstance(model, pl.LightningModule):
162 closure = partial(self._wrap_closure, model, optimizer, optimizer_idx, closure)
--> 163 optimizer.step(closure=closure, **kwargs)
164
165 def _track_grad_norm(self, trainer: "pl.Trainer") -> None:
~/.local/lib/python3.6/site-packages/torch/optim/lr_scheduler.py in wrapper(*args, **kwargs)
63 instance._step_count += 1
64 wrapped = func.__get__(instance, cls)
---> 65 return wrapped(*args, **kwargs)
66
67 # Note that the returned function here is no longer a bound method,
~/.local/lib/python3.6/site-packages/torch/optim/optimizer.py in wrapper(*args, **kwargs)
86 profile_name = "Optimizer.step#{}.step".format(obj.__class__.__name__)
87 with torch.autograd.profiler.record_function(profile_name):
---> 88 return func(*args, **kwargs)
89 return wrapper
90
~/.local/lib/python3.6/site-packages/transformers/optimization.py in step(self, closure)
330 loss = None
331 if closure is not None:
--> 332 loss = closure()
333
334 for group in self.param_groups:
~/.local/lib/python3.6/site-packages/pytorch_lightning/plugins/precision/precision_plugin.py in _wrap_closure(self, model, optimizer, optimizer_idx, closure)
146 consistent with the ``PrecisionPlugin`` subclasses that cannot pass ``optimizer.step(closure)`` directly.
147 """
--> 148 closure_result = closure()
149 self._after_closure(model, optimizer, optimizer_idx)
150 return closure_result
~/.local/lib/python3.6/site-packages/pytorch_lightning/loops/optimization/optimizer_loop.py in __call__(self, *args, **kwargs)
158
159 def __call__(self, *args: Any, **kwargs: Any) -> Optional[Tensor]:
--> 160 self._result = self.closure(*args, **kwargs)
161 return self._result.loss
162
~/.local/lib/python3.6/site-packages/pytorch_lightning/loops/optimization/optimizer_loop.py in closure(self, *args, **kwargs)
153 if self._backward_fn is not None and step_output.closure_loss is not None:
154 with self._profiler.profile("backward"):
--> 155 self._backward_fn(step_output.closure_loss)
156
157 return step_output
~/.local/lib/python3.6/site-packages/pytorch_lightning/loops/optimization/optimizer_loop.py in backward_fn(loss)
325
326 def backward_fn(loss: Tensor) -> None:
--> 327 self.trainer.accelerator.backward(loss, optimizer, opt_idx)
328
329 # check if model weights are nan
~/.local/lib/python3.6/site-packages/pytorch_lightning/accelerators/accelerator.py in backward(self, closure_loss, *args, **kwargs)
312 closure_loss = self.precision_plugin.pre_backward(self.lightning_module, closure_loss)
313
--> 314 self.precision_plugin.backward(self.lightning_module, closure_loss, *args, **kwargs)
315
316 closure_loss = self.precision_plugin.post_backward(self.lightning_module, closure_loss)
~/.local/lib/python3.6/site-packages/pytorch_lightning/plugins/precision/precision_plugin.py in backward(self, model, closure_loss, optimizer, *args, **kwargs)
89 # do backward pass
90 if model is not None and isinstance(model, pl.LightningModule):
---> 91 model.backward(closure_loss, optimizer, *args, **kwargs)
92 else:
93 self._run_backward(closure_loss, *args, **kwargs)
~/.local/lib/python3.6/site-packages/pytorch_lightning/core/lightning.py in backward(self, loss, optimizer, optimizer_idx, *args, **kwargs)
1432 loss.backward()
1433 """
-> 1434 loss.backward(*args, **kwargs)
1435
1436 def toggle_optimizer(self, optimizer: Union[Optimizer, LightningOptimizer], optimizer_idx: int) -> None:
~/.local/lib/python3.6/site-packages/torch/_tensor.py in backward(self, gradient, retain_graph, create_graph, inputs)
305 create_graph=create_graph,
306 inputs=inputs)
--> 307 torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)
308
309 def register_hook(self, hook):
~/.local/lib/python3.6/site-packages/torch/autograd/__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)
154 Variable._execution_engine.run_backward(
155 tensors, grad_tensors_, retain_graph, create_graph, inputs,
--> 156 allow_unreachable=True, accumulate_grad=True) # allow_unreachable flag
157
158
RuntimeError: CUDA error: device-side assert triggered
CUDA kernel errors might be asynchronously reported at some other API call,so the stacktrace below might be incorrect.
For debugging consider passing CUDA_LAUNCH_BLOCKING=1.
I try to transfer learn a LightningModule. The relevant part of the code is this:
class DeepFilteringTransferLearning(pl.LightningModule):
def __init__(self, chk_path = None):
super().__init__()
#init class members
self.prediction = []
self.label = []
self.loss = MSELoss()
#init pretrained model
self.chk_path = chk_path
model = DeepFiltering.load_from_checkpoint(chk_path)
backbone = model.sequential
layers = list(backbone.children())[:-1]
self.groundModel = Sequential(*layers)
#use the pretrained modell the same way to regress Lshall and neq
self.regressor = nn.Linear(64,2)
def forward(self, x):
self.groundModel.eval()
with torch.no_grad():
groundOut = self.groundModel(x)
yPred = self.regressor(groundOut)
return yPred
I save my model in a different, main file which relevant part is:
#callbacks
callbacks = [
ModelCheckpoint(
dirpath = "checkpoints/maxPooling16StandardizedL2RegularizedReproduceableSeeded42Ampl1ConvTransferLearned",
save_top_k=5,
monitor="val_loss",
),
]
#trainer
trainer = pl.Trainer(gpus=[1,2],strategy="dp",max_epochs=150,logger=wandb_logger,callbacks=callbacks,precision=32,deterministic=True)
trainer.fit(model,train_dataloaders=trainDl,val_dataloaders=valDl)
After try to load the modell from checkpoint like this:
chk_patH = "path/to/transfer_learned/model"
standardizedL2RegularizedL1 = DeepFilteringTransferLearning("path/to/model/trying/to/use/for/transfer_learning").load_from_checkpoint(chk_patH)
I got the following error:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
~/anaconda3/envs/skimageTrial/lib/python3.6/site-packages/torch/serialization.py in _check_seekable(f)
307 try:
--> 308 f.seek(f.tell())
309 return True
AttributeError: 'NoneType' object has no attribute 'seek'
During handling of the above exception, another exception occurred:
AttributeError Traceback (most recent call last)
<ipython-input-6-13f5fd0c7b85> in <module>
1 chk_patH = "checkpoints/maxPooling16StandardizedL2RegularizedReproduceableSeeded42Ampl1/epoch=4-step=349.ckpt"
----> 2 standardizedL2RegularizedL1 = DeepFilteringTransferLearning("checkpoints/maxPooling16StandardizedL2RegularizedReproduceableSeeded42Ampl2/epoch=145-step=10219.ckpt").load_from_checkpoint(chk_patH)
~/anaconda3/envs/skimageTrial/lib/python3.6/site-packages/pytorch_lightning/core/saving.py in load_from_checkpoint(cls, checkpoint_path, map_location, hparams_file, strict, **kwargs)
154 checkpoint[cls.CHECKPOINT_HYPER_PARAMS_KEY].update(kwargs)
155
--> 156 model = cls._load_model_state(checkpoint, strict=strict, **kwargs)
157 return model
158
~/anaconda3/envs/skimageTrial/lib/python3.6/site-packages/pytorch_lightning/core/saving.py in _load_model_state(cls, checkpoint, strict, **cls_kwargs_new)
196 _cls_kwargs = {k: v for k, v in _cls_kwargs.items() if k in cls_init_args_name}
197
--> 198 model = cls(**_cls_kwargs)
199
200 # give model a chance to load something
~/whistlerProject/gitHub/whistler/mathe/gwInspired/deepFilteringTransferLearning.py in __init__(self, chk_path)
34 #init pretrained model
35 self.chk_path = chk_path
---> 36 model = DeepFiltering.load_from_checkpoint(chk_path)
37 backbone = model.sequential
38 layers = list(backbone.children())[:-1]
~/anaconda3/envs/skimageTrial/lib/python3.6/site-packages/pytorch_lightning/core/saving.py in load_from_checkpoint(cls, checkpoint_path, map_location, hparams_file, strict, **kwargs)
132 checkpoint = pl_load(checkpoint_path, map_location=map_location)
133 else:
--> 134 checkpoint = pl_load(checkpoint_path, map_location=lambda storage, loc: storage)
135
136 if hparams_file is not None:
~/anaconda3/envs/skimageTrial/lib/python3.6/site-packages/pytorch_lightning/utilities/cloud_io.py in load(path_or_url, map_location)
31 if not isinstance(path_or_url, (str, Path)):
32 # any sort of BytesIO or similiar
---> 33 return torch.load(path_or_url, map_location=map_location)
34 if str(path_or_url).startswith("http"):
35 return torch.hub.load_state_dict_from_url(str(path_or_url), map_location=map_location)
~/anaconda3/envs/skimageTrial/lib/python3.6/site-packages/torch/serialization.py in load(f, map_location, pickle_module, **pickle_load_args)
579 pickle_load_args['encoding'] = 'utf-8'
580
--> 581 with _open_file_like(f, 'rb') as opened_file:
582 if _is_zipfile(opened_file):
583 # The zipfile reader is going to advance the current file position.
~/anaconda3/envs/skimageTrial/lib/python3.6/site-packages/torch/serialization.py in _open_file_like(name_or_buffer, mode)
233 return _open_buffer_writer(name_or_buffer)
234 elif 'r' in mode:
--> 235 return _open_buffer_reader(name_or_buffer)
236 else:
237 raise RuntimeError(f"Expected 'r' or 'w' in mode but got {mode}")
~/anaconda3/envs/skimageTrial/lib/python3.6/site-packages/torch/serialization.py in __init__(self, buffer)
218 def __init__(self, buffer):
219 super(_open_buffer_reader, self).__init__(buffer)
--> 220 _check_seekable(buffer)
221
222
~/anaconda3/envs/skimageTrial/lib/python3.6/site-packages/torch/serialization.py in _check_seekable(f)
309 return True
310 except (io.UnsupportedOperation, AttributeError) as e:
--> 311 raise_err_msg(["seek", "tell"], e)
312 return False
313
~/anaconda3/envs/skimageTrial/lib/python3.6/site-packages/torch/serialization.py in raise_err_msg(patterns, e)
302 + " Please pre-load the data into a buffer like io.BytesIO and"
303 + " try to load from it instead.")
--> 304 raise type(e)(msg)
305 raise e
306
AttributeError: 'NoneType' object has no attribute 'seek'. You can only torch.load from a file that is seekable. Please pre-load the data into a buffer like io.BytesIO and try to load from it instead.
which I can't resolve.
I try to this according to the available tutorials on the official page of pytorch lightning here. I can't figure it out what I miss.
Could somebody point me in the right direction?
I've recently watched a YouTube (DataSchool) video where the guy used only 3 columns from the Titanic dataset and made a pipeline. I wanted to add more columns to get better accuracy so I added Age and Fare.
I think it's probably because of the values of Age and Fare that I'm getting this error when I perform cross_val_score
columns_trans = make_column_transformer(
(OneHotEncoder(), ['Sex', 'Embarked']),
remainder='passthrough')
logreg = LogisticRegression(solver='lbfgs')
pipe = make_pipeline(columns_trans, logreg)
cross_val_score(pipe, X, y, cv=5, scoring='accuracy').mean()
/opt/conda/lib/python3.7/site-packages/sklearn/model_selection/_validation.py:552: FitFailedWarning: Estimator fit failed. The score on this train-test partition for these parameters will be set to nan.
If I remove Age and Fare, everything works fine. I was wondering if the Column Transformer or the make_pipeline had a problem with values like that.
I also tried scaling the values of Fare and Age, then it gave a cross_val_score but failed in pipe.predict() giving an error:
ValueError: Input contains NaN, infinity or a value too large for dtype('float64')
Traceback:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
/tmp/ipykernel_119/4279568460.py in <module>
----> 1 cross_val_score(pipe, X, y, cv=5, scoring='accuracy', error_score="raise").mean()
/opt/conda/lib/python3.7/site-packages/sklearn/utils/validation.py in inner_f(*args, **kwargs)
70 FutureWarning)
71 kwargs.update({k: arg for k, arg in zip(sig.parameters, args)})
---> 72 return f(**kwargs)
73 return inner_f
74
/opt/conda/lib/python3.7/site-packages/sklearn/model_selection/_validation.py in cross_val_score(estimator, X, y, groups, scoring, cv, n_jobs, verbose, fit_params, pre_dispatch, error_score)
404 fit_params=fit_params,
405 pre_dispatch=pre_dispatch,
--> 406 error_score=error_score)
407 return cv_results['test_score']
408
/opt/conda/lib/python3.7/site-packages/sklearn/utils/validation.py in inner_f(*args, **kwargs)
70 FutureWarning)
71 kwargs.update({k: arg for k, arg in zip(sig.parameters, args)})
---> 72 return f(**kwargs)
73 return inner_f
74
/opt/conda/lib/python3.7/site-packages/sklearn/model_selection/_validation.py in cross_validate(estimator, X, y, groups, scoring, cv, n_jobs, verbose, fit_params, pre_dispatch, return_train_score, return_estimator, error_score)
246 return_times=True, return_estimator=return_estimator,
247 error_score=error_score)
--> 248 for train, test in cv.split(X, y, groups))
249
250 zipped_scores = list(zip(*scores))
/opt/conda/lib/python3.7/site-packages/joblib/parallel.py in __call__(self, iterable)
1039 # remaining jobs.
1040 self._iterating = False
-> 1041 if self.dispatch_one_batch(iterator):
1042 self._iterating = self._original_iterator is not None
1043
/opt/conda/lib/python3.7/site-packages/joblib/parallel.py in dispatch_one_batch(self, iterator)
857 return False
858 else:
--> 859 self._dispatch(tasks)
860 return True
861
/opt/conda/lib/python3.7/site-packages/joblib/parallel.py in _dispatch(self, batch)
775 with self._lock:
776 job_idx = len(self._jobs)
--> 777 job = self._backend.apply_async(batch, callback=cb)
778 # A job can complete so quickly than its callback is
779 # called before we get here, causing self._jobs to
/opt/conda/lib/python3.7/site-packages/joblib/_parallel_backends.py in apply_async(self, func, callback)
206 def apply_async(self, func, callback=None):
207 """Schedule a func to be run"""
--> 208 result = ImmediateResult(func)
209 if callback:
210 callback(result)
/opt/conda/lib/python3.7/site-packages/joblib/_parallel_backends.py in __init__(self, batch)
570 # Don't delay the application, to avoid keeping the input
571 # arguments in memory
--> 572 self.results = batch()
573
574 def get(self):
/opt/conda/lib/python3.7/site-packages/joblib/parallel.py in __call__(self)
261 with parallel_backend(self._backend, n_jobs=self._n_jobs):
262 return [func(*args, **kwargs)
--> 263 for func, args, kwargs in self.items]
264
265 def __reduce__(self):
/opt/conda/lib/python3.7/site-packages/joblib/parallel.py in <listcomp>(.0)
261 with parallel_backend(self._backend, n_jobs=self._n_jobs):
262 return [func(*args, **kwargs)
--> 263 for func, args, kwargs in self.items]
264
265 def __reduce__(self):
/opt/conda/lib/python3.7/site-packages/sklearn/model_selection/_validation.py in _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score, return_parameters, return_n_test_samples, return_times, return_estimator, error_score)
529 estimator.fit(X_train, **fit_params)
530 else:
--> 531 estimator.fit(X_train, y_train, **fit_params)
532
533 except Exception as e:
/opt/conda/lib/python3.7/site-packages/sklearn/pipeline.py in fit(self, X, y, **fit_params)
333 if self._final_estimator != 'passthrough':
334 fit_params_last_step = fit_params_steps[self.steps[-1][0]]
--> 335 self._final_estimator.fit(Xt, y, **fit_params_last_step)
336
337 return self
/opt/conda/lib/python3.7/site-packages/sklearn/linear_model/_logistic.py in fit(self, X, y, sample_weight)
1415 penalty=penalty, max_squared_sum=max_squared_sum,
1416 sample_weight=sample_weight)
-> 1417 for class_, warm_start_coef_ in zip(classes_, warm_start_coef))
1418
1419 fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
/opt/conda/lib/python3.7/site-packages/joblib/parallel.py in __call__(self, iterable)
1039 # remaining jobs.
1040 self._iterating = False
-> 1041 if self.dispatch_one_batch(iterator):
1042 self._iterating = self._original_iterator is not None
1043
/opt/conda/lib/python3.7/site-packages/joblib/parallel.py in dispatch_one_batch(self, iterator)
857 return False
858 else:
--> 859 self._dispatch(tasks)
860 return True
861
/opt/conda/lib/python3.7/site-packages/joblib/parallel.py in _dispatch(self, batch)
775 with self._lock:
776 job_idx = len(self._jobs)
--> 777 job = self._backend.apply_async(batch, callback=cb)
778 # A job can complete so quickly than its callback is
779 # called before we get here, causing self._jobs to
/opt/conda/lib/python3.7/site-packages/joblib/_parallel_backends.py in apply_async(self, func, callback)
206 def apply_async(self, func, callback=None):
207 """Schedule a func to be run"""
--> 208 result = ImmediateResult(func)
209 if callback:
210 callback(result)
/opt/conda/lib/python3.7/site-packages/joblib/_parallel_backends.py in __init__(self, batch)
570 # Don't delay the application, to avoid keeping the input
571 # arguments in memory
--> 572 self.results = batch()
573
574 def get(self):
/opt/conda/lib/python3.7/site-packages/joblib/parallel.py in __call__(self)
261 with parallel_backend(self._backend, n_jobs=self._n_jobs):
262 return [func(*args, **kwargs)
--> 263 for func, args, kwargs in self.items]
264
265 def __reduce__(self):
/opt/conda/lib/python3.7/site-packages/joblib/parallel.py in <listcomp>(.0)
261 with parallel_backend(self._backend, n_jobs=self._n_jobs):
262 return [func(*args, **kwargs)
--> 263 for func, args, kwargs in self.items]
264
265 def __reduce__(self):
/opt/conda/lib/python3.7/site-packages/sklearn/linear_model/_logistic.py in _logistic_regression_path(X, y, pos_class, Cs, fit_intercept, max_iter, tol, verbose, solver, coef, class_weight, dual, penalty, intercept_scaling, multi_class, random_state, check_input, max_squared_sum, sample_weight, l1_ratio)
762 n_iter_i = _check_optimize_result(
763 solver, opt_res, max_iter,
--> 764 extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)
765 w0, loss = opt_res.x, opt_res.fun
766 elif solver == 'newton-cg':
/opt/conda/lib/python3.7/site-packages/sklearn/utils/optimize.py in _check_optimize_result(solver, result, max_iter, extra_warning_msg)
241 " https://scikit-learn.org/stable/modules/"
242 "preprocessing.html"
--> 243 ).format(solver, result.status, result.message.decode("latin1"))
244 if extra_warning_msg is not None:
245 warning_msg += "\n" + extra_warning_msg
AttributeError: 'str' object has no attribute 'decode'
I solved this error by changing solver=lbfgs to solver=liblinear in LogisticRegression()
logreg = LogisticRegression(solver='lbfgs')
to
logreg = LogisticRegression(solver='liblinear')
And for the following error:
ValueError: Input contains NaN, infinity or a value too large for dtype('float64')
It's best to check if your test data contains any null values or strings.
I'm just starting out using Neo4j and I'd like to use 2.0 (I have 2.0.1 community installed). I see that neo4django was only tested against neo4j 1.8.2-1.9.4, but have people gotten it working with 2.x? I installed the gremlin plugin but can't create or query through neo4django.
create:
In [8]: NeoProfile.objects.create(profile_id=1234)
[INFO] requests.packages.urllib3.connectionpool#214: Resetting dropped connection: localhost
---------------------------------------------------------------------------
StatusException Traceback (most recent call last)
/Users/atomos/workspace/Project-Vitamin/lib/python2.7/site-packages/django/core/management/commands/shell.pyc in <module>()
----> 1 NeoProfile.objects.create(profile_id=1234)
/Users/atomos/workspace/Project-Vitamin/src/neo4django/neo4django/db/models/manager.pyc in create(self, **kwargs)
41
42 def create(self, **kwargs):
---> 43 return self.get_query_set().create(**kwargs)
44
45 def filter(self, *args, **kwargs):
/Users/atomos/workspace/Project-Vitamin/src/neo4django/neo4django/db/models/query.pyc in create(self, **kwargs)
1295 if 'id' in kwargs or 'pk' in kwargs:
1296 raise FieldError("Neo4j doesn't allow node ids to be assigned.")
-> 1297 return super(NodeQuerySet, self).create(**kwargs)
1298
1299 #TODO would be awesome if this were transactional
/Users/atomos/workspace/Project-Vitamin/lib/python2.7/site-packages/django/db/models/query.pyc in create(self, **kwargs)
375 obj = self.model(**kwargs)
376 self._for_write = True
--> 377 obj.save(force_insert=True, using=self.db)
378 return obj
379
/Users/atomos/workspace/Project-Vitamin/src/neo4django/neo4django/db/models/base.pyc in save(self, using, **kwargs)
315
316 def save(self, using=DEFAULT_DB_ALIAS, **kwargs):
--> 317 return super(NodeModel, self).save(using=using, **kwargs)
318
319 #alters_data
/Users/atomos/workspace/Project-Vitamin/lib/python2.7/site-packages/django/db/models/base.pyc in save(self, force_insert, force_update, using)
461 if force_insert and force_update:
462 raise ValueError("Cannot force both insert and updating in model saving.")
--> 463 self.save_base(using=using, force_insert=force_insert, force_update=force_update)
464
465 save.alters_data = True
/Users/atomos/workspace/Project-Vitamin/src/neo4django/neo4django/db/models/base.pyc in save_base(self, raw, cls, origin, force_insert, force_update, using, *args, **kwargs)
331
332 is_new = self.id is None
--> 333 self._save_neo4j_node(using)
334 self._save_properties(self, self.__node, is_new)
335 self._save_neo4j_relationships(self, self.__node)
/Users/atomos/workspace/Project-Vitamin/src/neo4django/neo4django/db/models/base.pyc in _save_neo4j_node(self, using)
/Users/atomos/workspace/Project-Vitamin/src/neo4django/neo4django/db/models/base.pyc in trans_method(func, *args, **kw)
95 #TODO this is where generalized transaction support will go,
96 #when it's ready in neo4jrestclient
---> 97 ret = func(*args, **kw)
98 #tx.commit()
99 return ret
/Users/atomos/workspace/Project-Vitamin/src/neo4django/neo4django/db/models/base.pyc in _save_neo4j_node(self, using)
359 self.__node = conn.gremlin_tx(script, types=type_hier_props,
360 indexName=self.index_name(),
--> 361 typesToIndex=type_names_to_index)
362 return self.__node
363
/Users/atomos/workspace/Project-Vitamin/src/neo4django/neo4django/neo4jclient.pyc in gremlin_tx(self, script, **params)
177 will be wrapped in a transaction.
178 """
--> 179 return self.gremlin(script, tx=True, **params)
180
181 def cypher(self, query, **params):
/Users/atomos/workspace/Project-Vitamin/src/neo4django/neo4django/neo4jclient.pyc in gremlin(self, script, tx, raw, **params)
166 try:
167 return send_script(include_unloaded_libraries(lib_script),
--> 168 params)
169 except LibraryCouldNotLoad:
170 if i == 0:
/Users/atomos/workspace/Project-Vitamin/src/neo4django/neo4django/neo4jclient.pyc in send_script(s, params)
151 if raw:
152 execute_kwargs['returns'] = RETURNS_RAW
--> 153 script_rv = ext.execute_script(s, params=params, **execute_kwargs)
154 if isinstance(script_rv, basestring):
155 if LIBRARY_ERROR_REGEX.match(script_rv):
/Users/atomos/workspace/Project-Vitamin/src/neo4j-rest-client/neo4jrestclient/client.py in __call__(self, *args, **kwargs)
2313 except (ValueError, AttributeError, KeyError, TypeError):
2314 pass
-> 2315 raise StatusException(response.status_code, msg)
2316
2317 def __repr__(self):
StatusException: Code [400]: Bad Request. Bad request syntax or unsupported method.
Invalid data sent: javax.script.ScriptException: groovy.lang.MissingMethodException: No signature of method: groovy.lang.MissingMethodException.setMaxBufferSize() is applicable for argument types: () values: []
query:
In [9]: NeoProfile.objects.filter(profile_id=1234)
Out[9]: ---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
/Users/atomos/workspace/Project-Vitamin/lib/python2.7/site-packages/django/core/management/commands/shell.pyc in <module>()
----> 1 NeoProfile.objects.filter(profile_id=1234)
/Users/atomos/workspace/Project-Vitamin/lib/python2.7/site-packages/IPython/core/displayhook.pyc in __call__(self, result)
236 self.start_displayhook()
237 self.write_output_prompt()
--> 238 format_dict = self.compute_format_data(result)
239 self.write_format_data(format_dict)
240 self.update_user_ns(result)
/Users/atomos/workspace/Project-Vitamin/lib/python2.7/site-packages/IPython/core/displayhook.pyc in compute_format_data(self, result)
148 MIME type representation of the object.
149 """
--> 150 return self.shell.display_formatter.format(result)
151
152 def write_format_data(self, format_dict):
/Users/atomos/workspace/Project-Vitamin/lib/python2.7/site-packages/IPython/core/formatters.pyc in format(self, obj, include, exclude)
124 continue
125 try:
--> 126 data = formatter(obj)
127 except:
128 # FIXME: log the exception
/Users/atomos/workspace/Project-Vitamin/lib/python2.7/site-packages/IPython/core/formatters.pyc in __call__(self, obj)
445 type_pprinters=self.type_printers,
446 deferred_pprinters=self.deferred_printers)
--> 447 printer.pretty(obj)
448 printer.flush()
449 return stream.getvalue()
/Users/atomos/workspace/Project-Vitamin/lib/python2.7/site-packages/IPython/lib/pretty.pyc in pretty(self, obj)
358 if callable(meth):
359 return meth(obj, self, cycle)
--> 360 return _default_pprint(obj, self, cycle)
361 finally:
362 self.end_group()
/Users/atomos/workspace/Project-Vitamin/lib/python2.7/site-packages/IPython/lib/pretty.pyc in _default_pprint(obj, p, cycle)
478 if getattr(klass, '__repr__', None) not in _baseclass_reprs:
479 # A user-provided repr.
--> 480 p.text(repr(obj))
481 return
482 p.begin_group(1, '<')
/Users/atomos/workspace/Project-Vitamin/lib/python2.7/site-packages/django/db/models/query.pyc in __repr__(self)
70
71 def __repr__(self):
---> 72 data = list(self[:REPR_OUTPUT_SIZE + 1])
73 if len(data) > REPR_OUTPUT_SIZE:
74 data[-1] = "...(remaining elements truncated)..."
/Users/atomos/workspace/Project-Vitamin/lib/python2.7/site-packages/django/db/models/query.pyc in __len__(self)
85 self._result_cache = list(self.iterator())
86 elif self._iter:
---> 87 self._result_cache.extend(self._iter)
88 if self._prefetch_related_lookups and not self._prefetch_done:
89 self._prefetch_related_objects()
/Users/atomos/workspace/Project-Vitamin/src/neo4django/neo4django/db/models/query.pyc in iterator(self)
1274 using = self.db
1275 if not self.query.can_filter():
-> 1276 for model in self.query.execute(using):
1277 yield model
1278 else:
/Users/atomos/workspace/Project-Vitamin/src/neo4django/neo4django/db/models/query.pyc in execute(self, using)
1161 conn = connections[using]
1162
-> 1163 groovy, params = self.as_groovy(using)
1164
1165 raw_result_set = conn.gremlin_tx(groovy, **params) if groovy is not None else []
/Users/atomos/workspace/Project-Vitamin/src/neo4django/neo4django/db/models/query.pyc in as_groovy(self, using)
925 # add the typeNodeId param, either for type verification or initial
926 # type tree traversal
--> 927 cypher_params['typeNodeId'] = self.model._type_node(using).id
928
929 type_restriction_expr = """
/Users/atomos/workspace/Project-Vitamin/src/neo4django/neo4django/db/models/base.pyc in _type_node(cls, using)
411 return cls.__type_node_memoized(using)
412 else:
--> 413 return cls.__type_node_classmethod(using)
414
415 #classmethod
/Users/atomos/workspace/Project-Vitamin/src/neo4django/neo4django/db/models/base.pyc in __type_node(cls, using)
394 script_rv = conn.gremlin_tx(script, types=type_hier_props)
395 except Exception, e:
--> 396 raise RuntimeError(error_message, e)
397 if not hasattr(script_rv, 'properties'):
398 raise RuntimeError(error_message + '\n\n%s' % script_rv)
RuntimeError: ('The type node for class NeoProfile could not be created in the database.', StatusException())
My model is incredibly complex:
class NeoProfile(neomodels.NodeModel):
profile_id = neomodels.IntegerProperty(indexed=True)