Permission error while running GET on docker - docker

I'm running an MVP on my computer, but every time I try to use an "export" functionality, I get a permission error.
It seems to be a problem with my system since no one else is facing this problem.
This is the error I see on docker logs:
172.18.0.1 - - [21/Jul/2022 16:28:20] "GET /service/export/all HTTP/1.1" 500 -
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 2091, in __call__
return self.wsgi_app(environ, start_response)
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 2076, in wsgi_app
response = self.handle_exception(e)
File "/usr/local/lib/python3.7/site-packages/flask_restful/__init__.py", line 271, in error_router
return original_handler(e)
File "/usr/local/lib/python3.7/site-packages/flask_cors/extension.py", line 165, in wrapped_function
return cors_after_request(app.make_response(f(*args, **kwargs)))
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 2073, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1518, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.7/site-packages/flask_restful/__init__.py", line 271, in error_router
return original_handler(e)
File "/usr/local/lib/python3.7/site-packages/flask_cors/extension.py", line 165, in wrapped_function
return cors_after_request(app.make_response(f(*args, **kwargs)))
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1516, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1502, in dispatch_request
return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)
File "/usr/local/lib/python3.7/site-packages/flask_restful/__init__.py", line 467, in wrapper
resp = resource(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/flask/views.py", line 84, in view
return current_app.ensure_sync(self.dispatch_request)(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/flask_restful/__init__.py", line 582, in dispatch_request
resp = meth(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/flasgger/utils.py", line 273, in wrapper
return function(*args, **kwargs)
File "/home/api/routes/export/exports.py", line 36, in get
os.mkdir(FOLDER_NAME)
PermissionError: [Errno 1] Operation not permitted: './exports'
Does anyone know how to solve this?
Thanks

Related

using the sampler of stable diffusion results in a dimension error

I'm trying to use stable diffusion's sampler:
covered_images=diffusion_model_model.get_first_stage_encoding(diffusion_model_model.encode_first_stage(covered_images))
print(covered_images.shape)
destenation_images=diffusion_model_model.encode_first_stage(destenation_images)
The size of covered images (in the print; in the latent space) is:
torch.Size([1, 4, 64, 64])
The size of the original picture is:
torch.Size([1, 3, 512, 512])
The error I get is:
Traceback (most recent call last):
File "/home/user/stable-diffusion/VAE/train.py", line 71, in <module>
latenet_covered_images=sampler.sample(S=50,batch_size=BATCH_SIZE,shape=(4,64,64),conditioning=covered_images)
File "/home/user/.local/lib/python3.8/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/home/user/stable-diffusion/ldm/models/diffusion/ddim.py", line 96, in sample
samples, intermediates = self.ddim_sampling(conditioning, size,
File "/home/user/.local/lib/python3.8/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/home/user/stable-diffusion/ldm/models/diffusion/ddim.py", line 149, in ddim_sampling
outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
File "/home/user/.local/lib/python3.8/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/home/user/stable-diffusion/ldm/models/diffusion/ddim.py", line 172, in p_sample_ddim
e_t = self.model.apply_model(x, t, c)
File "/home/user/stable-diffusion/ldm/models/diffusion/ddpm.py", line 987, in apply_model
x_recon = self.model(x_noisy, t, **cond)
File "/home/user/.local/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "/home/user/stable-diffusion/ldm/models/diffusion/ddpm.py", line 1410, in forward
out = self.diffusion_model(x, t, context=cc)
File "/home/user/.local/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "/home/user/stable-diffusion/ldm/modules/diffusionmodules/openaimodel.py", line 732, in forward
h = module(h, emb, context)
File "/home/user/.local/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "/home/user/stable-diffusion/ldm/modules/diffusionmodules/openaimodel.py", line 85, in forward
x = layer(x, context)
File "/home/user/.local/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "/home/user/stable-diffusion/ldm/modules/attention.py", line 258, in forward
x = block(x, context=context)
File "/home/user/.local/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "/home/user/stable-diffusion/ldm/modules/attention.py", line 209, in forward
return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint)
File "/home/user/stable-diffusion/ldm/modules/diffusionmodules/util.py", line 114, in checkpoint
return CheckpointFunction.apply(func, len(inputs), *args)
File "/home/user/stable-diffusion/ldm/modules/diffusionmodules/util.py", line 127, in forward
output_tensors = ctx.run_function(*ctx.input_tensors)
File "/home/user/stable-diffusion/ldm/modules/attention.py", line 213, in _forward
x = self.attn2(self.norm2(x), context=context) + x
File "/home/user/.local/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "/home/user/stable-diffusion/ldm/modules/attention.py", line 175, in forward
k = self.to_k(context)
File "/home/user/.local/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "/home/user/.local/lib/python3.8/site-packages/torch/nn/modules/linear.py", line 103, in forward
return F.linear(input, self.weight, self.bias)
RuntimeError: mat1 and mat2 shapes cannot be multiplied (256x64 and 768x320)
I tried to play with shape of the sampler, it hasn't changed much.

How to upgrading Docker Odoo CE to EE

I'm trying to upgrade my local Odoo CE 14.0 to Enterprise, i follow this video tutorial https://www.youtube.com/watch?v=-eCHJAq1QdY and official steps https://www.odoo.com/documentation/14.0/administration/maintain/enterprise.html
But when i trying to instal web_enterpsie module get this error:
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/odoo/addons/base/models/ir_http.py", line 237, in _dispatch
result = request.dispatch()
File "/usr/lib/python3/dist-packages/odoo/http.py", line 683, in dispatch
result = self._call_function(**self.params)
File "/usr/lib/python3/dist-packages/odoo/http.py", line 359, in _call_function
return checked_call(self.db, *args, **kwargs)
File "/usr/lib/python3/dist-packages/odoo/service/model.py", line 94, in wrapper
return f(dbname, *args, **kwargs)
File "/usr/lib/python3/dist-packages/odoo/http.py", line 347, in checked_call
result = self.endpoint(*a, **kw)
File "/usr/lib/python3/dist-packages/odoo/http.py", line 912, in __call__
return self.method(*args, **kw)
File "/usr/lib/python3/dist-packages/odoo/http.py", line 531, in response_wrap
response = f(*args, **kw)
File "/usr/lib/python3/dist-packages/odoo/addons/web/controllers/main.py", line 1398, in call_button
action = self._call_kw(model, method, args, kwargs)
File "/usr/lib/python3/dist-packages/odoo/addons/web/controllers/main.py", line 1386, in _call_kw
return call_kw(request.env[model], method, args, kwargs)
File "/usr/lib/python3/dist-packages/odoo/api.py", line 399, in call_kw
result = _call_kw_multi(method, model, args, kwargs)
File "/usr/lib/python3/dist-packages/odoo/api.py", line 386, in _call_kw_multi
result = method(recs, *args, **kwargs)
File "<decorator-gen-71>", line 2, in button_immediate_install
File "/usr/lib/python3/dist-packages/odoo/addons/base/models/ir_module.py", line 74, in check_and_log
return method(self, *args, **kwargs)
File "/usr/lib/python3/dist-packages/odoo/addons/base/models/ir_module.py", line 475, in button_immediate_install
return self._button_immediate_function(type(self).button_install)
File "/usr/lib/python3/dist-packages/odoo/addons/base/models/ir_module.py", line 593, in _button_immediate_function
modules.registry.Registry.new(self._cr.dbname, update_module=True)
File "/usr/lib/python3/dist-packages/odoo/modules/registry.py", line 89, in new
odoo.modules.load_modules(registry._db, force_demo, status, update_module)
File "/usr/lib/python3/dist-packages/odoo/modules/loading.py", line 461, in load_modules
loaded_modules, update_module, models_to_check)
File "/usr/lib/python3/dist-packages/odoo/modules/loading.py", line 349, in load_marked_modules
perform_checks=perform_checks, models_to_check=models_to_check
File "/usr/lib/python3/dist-packages/odoo/modules/loading.py", line 198, in load_module_graph
registry.setup_models(cr)
File "/usr/lib/python3/dist-packages/odoo/modules/registry.py", line 276, in setup_models
model._setup_fields()
File "/usr/lib/python3/dist-packages/odoo/models.py", line 2845, in _setup_fields
field.setup_full(self)
File "/usr/lib/python3/dist-packages/odoo/fields.py", line 401, in setup_full
self._setup_related_full(model)
File "/usr/lib/python3/dist-packages/odoo/fields.py", line 458, in _setup_related_full
field = model.pool[model_name]._fields[name]
Exception
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/odoo/http.py", line 639, in _handle_exception
return super(JsonRequest, self)._handle_exception(exception)
File "/usr/lib/python3/dist-packages/odoo/http.py", line 315, in _handle_exception
raise exception.with_traceback(None) from new_cause
KeyError: 'avatar_128'
odoo.conf:
addons_path = /usr/lib/python3/dist-packages/odoo/addons,/mnt/extra-addons/enterprise,/mnt/extra-addons/custom
Anybody could help me please ?

AWS cdk running locally using sam

I have a CDK app which I generate template.yml file using:
cdk synth --no-staging > ./template.yml
and then run sam local:
sam local start-api
but I get the following error:
Traceback (most recent call last):
File "/usr/local/bin/sam", line 8, in <module>
sys.exit(cli())
File "/usr/local/Cellar/aws-sam-cli/1.37.0/libexec/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/usr/local/Cellar/aws-sam-cli/1.37.0/libexec/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/usr/local/Cellar/aws-sam-cli/1.37.0/libexec/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/Cellar/aws-sam-cli/1.37.0/libexec/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/Cellar/aws-sam-cli/1.37.0/libexec/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/Cellar/aws-sam-cli/1.37.0/libexec/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/usr/local/Cellar/aws-sam-cli/1.37.0/libexec/lib/python3.8/site-packages/click/decorators.py", line 73, in new_func
return ctx.invoke(f, obj, *args, **kwargs)
File "/usr/local/Cellar/aws-sam-cli/1.37.0/libexec/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/usr/local/Cellar/aws-sam-cli/1.37.0/libexec/lib/python3.8/site-packages/samcli/lib/telemetry/metric.py", line 166, in wrapped
raise exception # pylint: disable=raising-bad-type
File "/usr/local/Cellar/aws-sam-cli/1.37.0/libexec/lib/python3.8/site-packages/samcli/lib/telemetry/metric.py", line 124, in wrapped
return_value = func(*args, **kwargs)
File "/usr/local/Cellar/aws-sam-cli/1.37.0/libexec/lib/python3.8/site-packages/samcli/lib/utils/version_checker.py", line 41, in wrapped
actual_result = func(*args, **kwargs)
File "/usr/local/Cellar/aws-sam-cli/1.37.0/libexec/lib/python3.8/site-packages/samcli/cli/main.py", line 87, in wrapper
return func(*args, **kwargs)
File "/usr/local/Cellar/aws-sam-cli/1.37.0/libexec/lib/python3.8/site-packages/samcli/commands/local/start_api/cli.py", line 94, in cli
do_cli(
File "/usr/local/Cellar/aws-sam-cli/1.37.0/libexec/lib/python3.8/site-packages/samcli/commands/local/start_api/cli.py", line 192, in do_cli
service = LocalApiService(lambda_invoke_context=invoke_context, port=port, host=host, static_dir=static_dir)
File "/usr/local/Cellar/aws-sam-cli/1.37.0/libexec/lib/python3.8/site-packages/samcli/commands/local/lib/local_api_service.py", line 37, in __init__
self.api_provider = ApiProvider(lambda_invoke_context.stacks, cwd=self.cwd)
File "/usr/local/Cellar/aws-sam-cli/1.37.0/libexec/lib/python3.8/site-packages/samcli/lib/providers/api_provider.py", line 37, in __init__
self.api = self._extract_api()
File "/usr/local/Cellar/aws-sam-cli/1.37.0/libexec/lib/python3.8/site-packages/samcli/lib/providers/api_provider.py", line 64, in _extract_api
provider.extract_resources(self.stacks, collector, cwd=self.cwd)
File "/usr/local/Cellar/aws-sam-cli/1.37.0/libexec/lib/python3.8/site-packages/samcli/lib/providers/cfn_api_provider.py", line 73, in extract_resources
self._extract_cfn_gateway_v2_route(stack.stack_path, resources, logical_id, resource, collector)
File "/usr/local/Cellar/aws-sam-cli/1.37.0/libexec/lib/python3.8/site-packages/samcli/lib/providers/cfn_api_provider.py", line 315, in _extract_cfn_gateway_v2_route
method, path = self._parse_route_key(route_key)
File "/usr/local/Cellar/aws-sam-cli/1.37.0/libexec/lib/python3.8/site-packages/samcli/lib/providers/cfn_api_provider.py", line 488, in _parse_route_key
[method, path] = route_key.split()
ValueError: not enough values to unpack (expected 2, got 1)
and ideas what's the issue?

Running papermill I get a FileNotFound Error

I'm running papermill 2.1.0 in a newly created virtual environment in Mac OS
When I execute a simple notebook I get: FileNotFoundError, referring to and old file that no longer exists.
papermill 59848931.ipynb 59848931-out.ipynb
I get the following:
Input Notebook: 59848931.ipynb
Output Notebook: 59848931-out.ipynb
Executing: 0%| | 0/2 [00:00<?, ?cell/s]Failed to run command:
['/Users/user/Documents/Development/python/virtual_environments/udemy_tensorflow_venv/bin/python3', '-m', 'ipykernel_launcher', '-f', '/var/folders/p2/jh8vcbv51ks2gzvfx3dw1bd000_wjb/T/tmp3cf56dkh.json', '--HistoryManager.hist_file=:memory:']
PATH='/Users/user/Documents/notebooks/venv/bin:/Users/user/google-cloud-sdk/bin:/Users/user/Downloads/google-cloud-sdk/bin:/anaconda3/bin:/anaconda/bin:/Users/user/homebrew/bin:/Users/user/bin:/Library/Frameworks/R.framework/Versions/Current/Resources/:/usr/local/git/current/bin:/usr/local/bin:/usr/bin:/bin:/usr/local/sbin:/usr/sbin:/sbin:/usr/local/go/bin:/usr/local/share/dotnet:/opt/X11/bin:~/.dotnet/tools:/Applications/Wireshark.app/Contents/MacOS:/Users/user/Documents/Development'
with kwargs:
{'stdin': -1, 'stdout': None, 'stderr': None, 'cwd': None, 'start_new_session': True}
Executing: 0%| | 0/2 [00:01<?, ?cell/s]
Traceback (most recent call last):
File "/Users/user/Documents/notebooks/venv/bin/papermill", line 10, in <module>
sys.exit(papermill())
File "/Users/user/Documents/notebooks/venv/lib/python3.6/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/Users/user/Documents/notebooks/venv/lib/python3.6/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/Users/user/Documents/notebooks/venv/lib/python3.6/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/user/Documents/notebooks/venv/lib/python3.6/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/Users/user/Documents/notebooks/venv/lib/python3.6/site-packages/papermill/cli.py", line 235, in papermill
execution_timeout=execution_timeout,
File "/Users/user/Documents/notebooks/venv/lib/python3.6/site-packages/papermill/execute.py", line 104, in execute_notebook
**engine_kwargs
File "/Users/user/Documents/notebooks/venv/lib/python3.6/site-packages/papermill/engines.py", line 49, in execute_notebook_with_engine
return self.get_engine(engine_name).execute_notebook(nb, kernel_name, **kwargs)
File "/Users/user/Documents/notebooks/venv/lib/python3.6/site-packages/papermill/engines.py", line 343, in execute_notebook
cls.execute_managed_notebook(nb_man, kernel_name, log_output=log_output, **kwargs)
File "/Users/user/Documents/notebooks/venv/lib/python3.6/site-packages/papermill/engines.py", line 402, in execute_managed_notebook
return PapermillNotebookClient(nb_man, **final_kwargs).execute()
File "/Users/user/Documents/notebooks/venv/lib/python3.6/site-packages/papermill/clientwrap.py", line 36, in execute
with self.setup_kernel(**kwargs):
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/contextlib.py", line 81, in __enter__
return next(self.gen)
File "/Users/user/Documents/notebooks/venv/lib/python3.6/site-packages/nbclient/client.py", line 404, in setup_kernel
self.start_new_kernel_client(**kwargs)
File "/Users/user/Documents/notebooks/venv/lib/python3.6/site-packages/nbclient/util.py", line 37, in wrapped
result = loop.run_until_complete(coro(self, *args, **kwargs))
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/asyncio/base_events.py", line 467, in run_until_complete
return future.result()
File "/Users/user/Documents/notebooks/venv/lib/python3.6/site-packages/nbclient/client.py", line 375, in async_start_new_kernel_client
await ensure_async(self.km.start_kernel(extra_arguments=self.extra_arguments, **kwargs))
File "/Users/user/Documents/notebooks/venv/lib/python3.6/site-packages/nbclient/util.py", line 57, in ensure_async
result = await obj
File "/Users/user/Documents/notebooks/venv/lib/python3.6/site-packages/jupyter_client/manager.py", line 542, in start_kernel
self.kernel = await self._launch_kernel(kernel_cmd, **kw)
File "/Users/user/Documents/notebooks/venv/lib/python3.6/site-packages/jupyter_client/manager.py", line 523, in _launch_kernel
res = launch_kernel(kernel_cmd, **kw)
File "/Users/user/Documents/notebooks/venv/lib/python3.6/site-packages/jupyter_client/launcher.py", line 135, in launch_kernel
proc = Popen(cmd, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/subprocess.py", line 709, in __init__
restore_signals, start_new_session)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/subprocess.py", line 1344, in _execute_child
raise child_exception_type(errno_num, err_msg, err_filename)
FileNotFoundError: [Errno 2] No such file or directory: '/Users/user/Documents/Development/python/virtual_environments/udemy_tensorflow_venv/bin/python3': '/Users/user/Documents/Development/python/virtual_environments/udemy_tensorflow_venv/bin/python3'
I reinstall Python3, papermill and virtualenv and same issue
I reinstalled my IPython kernel
python3 -m pip install ipykernel
python3 -m ipykernel install --user

Could not serialize object: Py4JError: An error occurred while calling o281.__getstate__

Im a beginner in Streaming and trying to work on a predictive maintenance use case with structured streaming but getting an error while predicting on the (csv-)DataStream.
The following function is called and if I remove the prediction part it is not returning any error - the model is preloaded and the DataStreamreader is working too:
def process_row(row):
"""Fif and preprocess"""
list_feat_col_num = [item[0] for item in row.dtypes if item[1].startswith('int')|item[1].startswith('double')]
vec_assembler = VectorAssembler(inputCols=list_feat_col_num, outputCol="features")
row_transformed = vec_assembler.transform(row).select('machineID','datetime','failure','features')
featureIndexer = VectorIndexer(inputCol="features",outputCol="indexedFeatures",
handleInvalid ="skip",
maxCategories=10).fit(row_transformed)
print(row_transformed)
# error comes from prediction part
"""predict"""
rf = RandomForestClassificationModel.load("content/model")
pipeline_rf_pred = Pipeline(stages=[featureIndexer, rf])
row_transformed = pipeline_rf_pred.fit(row_transformed)
prediction = model_rf.transform(row_transformed)
print(prediction)
pass
> Traceback (most recent call last):
File "/content/spark-2.4.3-bin-hadoop2.7/python/pyspark/serializers.py", line 590, in dumps
return cloudpickle.dumps(obj, 2)
File "/content/spark-2.4.3-bin-hadoop2.7/python/pyspark/cloudpickle.py", line 863, in dumps
cp.dump(obj)
File "/content/spark-2.4.3-bin-hadoop2.7/python/pyspark/cloudpickle.py", line 260, in dump
return Pickler.dump(self, obj)
File "/usr/lib/python3.6/pickle.py", line 409, in dump
self.save(obj)
File "/usr/lib/python3.6/pickle.py", line 476, in save
f(self, obj) # Call unbound method with explicit self
File "/usr/lib/python3.6/pickle.py", line 751, in save_tuple
save(element)
File "/usr/lib/python3.6/pickle.py", line 476, in save
f(self, obj) # Call unbound method with explicit self
File "/content/spark-2.4.3-bin-hadoop2.7/python/pyspark/cloudpickle.py", line 406, in save_function
self.save_function_tuple(obj)
File "/content/spark-2.4.3-bin-hadoop2.7/python/pyspark/cloudpickle.py", line 549, in save_function_tuple
save(state)
File "/usr/lib/python3.6/pickle.py", line 476, in save
f(self, obj) # Call unbound method with explicit self
File "/usr/lib/python3.6/pickle.py", line 821, in save_dict
self._batch_setitems(obj.items())
File "/usr/lib/python3.6/pickle.py", line 847, in _batch_setitems
save(v)
File "/usr/lib/python3.6/pickle.py", line 476, in save
f(self, obj) # Call unbound method with explicit self
File "/usr/lib/python3.6/pickle.py", line 781, in save_list
self._batch_appends(obj)
File "/usr/lib/python3.6/pickle.py", line 808, in _batch_appends
save(tmp[0])
File "/usr/lib/python3.6/pickle.py", line 476, in save
f(self, obj) # Call unbound method with explicit self
File "/content/spark-2.4.3-bin-hadoop2.7/python/pyspark/cloudpickle.py", line 400, in save_function
self.save_function_tuple(obj)
File "/content/spark-2.4.3-bin-hadoop2.7/python/pyspark/cloudpickle.py", line 549, in save_function_tuple
save(state)
File "/usr/lib/python3.6/pickle.py", line 476, in save
f(self, obj) # Call unbound method with explicit self
File "/usr/lib/python3.6/pickle.py", line 821, in save_dict
self._batch_setitems(obj.items())
File "/usr/lib/python3.6/pickle.py", line 847, in _batch_setitems
save(v)
File "/usr/lib/python3.6/pickle.py", line 476, in save
f(self, obj) # Call unbound method with explicit self
File "/usr/lib/python3.6/pickle.py", line 821, in save_dict
self._batch_setitems(obj.items())
File "/usr/lib/python3.6/pickle.py", line 847, in _batch_setitems
save(v)
File "/usr/lib/python3.6/pickle.py", line 521, in save
self.save_reduce(obj=obj, *rv)
File "/usr/lib/python3.6/pickle.py", line 634, in save_reduce
save(state)
File "/usr/lib/python3.6/pickle.py", line 476, in save
f(self, obj) # Call unbound method with explicit self
File "/usr/lib/python3.6/pickle.py", line 821, in save_dict
self._batch_setitems(obj.items())
File "/usr/lib/python3.6/pickle.py", line 847, in _batch_setitems
save(v)
File "/usr/lib/python3.6/pickle.py", line 476, in save
f(self, obj) # Call unbound method with explicit self
File "/usr/lib/python3.6/pickle.py", line 781, in save_list
self._batch_appends(obj)
File "/usr/lib/python3.6/pickle.py", line 805, in _batch_appends
save(x)
File "/usr/lib/python3.6/pickle.py", line 521, in save
self.save_reduce(obj=obj, *rv)
File "/usr/lib/python3.6/pickle.py", line 634, in save_reduce
save(state)
File "/usr/lib/python3.6/pickle.py", line 476, in save
f(self, obj) # Call unbound method with explicit self
File "/usr/lib/python3.6/pickle.py", line 821, in save_dict
self._batch_setitems(obj.items())
File "/usr/lib/python3.6/pickle.py", line 847, in _batch_setitems
save(v)
File "/usr/lib/python3.6/pickle.py", line 496, in save
rv = reduce(self.proto)
File "/content/spark-2.4.3-bin-hadoop2.7/python/lib/py4j-0.10.7-src.zip/py4j/java_gateway.py", line 1257, in __call__
answer, self.gateway_client, self.target_id, self.name)
File "/content/spark-2.4.3-bin-hadoop2.7/python/pyspark/sql/utils.py", line 63, in deco
return f(*a, **kw)
File "/content/spark-2.4.3-bin-hadoop2.7/python/lib/py4j-0.10.7-src.zip/py4j/protocol.py", line 332, in get_return_value
format(target_id, ".", name, value))
py4j.protocol.Py4JError: An error occurred while calling o281.__getstate__. Trace:
py4j.Py4JException: Method __getstate__([]) does not exist
at py4j.reflection.ReflectionEngine.getMethod(ReflectionEngine.java:318)
at py4j.reflection.ReflectionEngine.getMethod(ReflectionEngine.java:326)
at py4j.Gateway.invoke(Gateway.java:274)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:238)
at java.lang.Thread.run(Thread.java:748)
---------------------------------------------------------------------------
Py4JError Traceback (most recent call last)
/content/spark-2.4.3-bin-hadoop2.7/python/pyspark/serializers.py in dumps(self, obj)
589 try:
--> 590 return cloudpickle.dumps(obj, 2)
591 except pickle.PickleError:
44 frames
Py4JError: An error occurred while calling o281.__getstate__. Trace:
py4j.Py4JException: Method __getstate__([]) does not exist
at py4j.reflection.ReflectionEngine.getMethod(ReflectionEngine.java:318)
at py4j.reflection.ReflectionEngine.getMethod(ReflectionEngine.java:326)
at py4j.Gateway.invoke(Gateway.java:274)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:238)
at java.lang.Thread.run(Thread.java:748)
During handling of the above exception, another exception occurred:
PicklingError Traceback (most recent call last)
/content/spark-2.4.3-bin-hadoop2.7/python/pyspark/serializers.py in dumps(self, obj)
598 msg = "Could not serialize object: %s: %s" % (e.__class__.__name__, emsg)
599 cloudpickle.print_exec(sys.stderr)
--> 600 raise pickle.PicklingError(msg)
601
602
PicklingError: Could not serialize object: Py4JError: An error occurred while calling o281.__getstate__. Trace:
py4j.Py4JException: Method __getstate__([]) does not exist
at py4j.reflection.ReflectionEngine.getMethod(ReflectionEngine.java:318)
at py4j.reflection.ReflectionEngine.getMethod(ReflectionEngine.java:326)
at py4j.Gateway.invoke(Gateway.java:274)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:238)
at java.lang.Thread.run(Thread.java:748)

Resources