Iam getting below error while building using ant tool
installer.izpack.exe:
[exec] Traceback (most recent call last):
[exec] File "C:\PROGRA~1\IzPack/utils/wrappers/izpack2exe/izpack2exe.py", line 126, in <module>
[exec] main()
[exec] File "C:\PROGRA~1\IzPack/utils/wrappers/izpack2exe/izpack2exe.py", line 123, in main
[exec] create_exe(parse_options())
[exec] File "C:\PROGRA~1\IzPack/utils/wrappers/izpack2exe/izpack2exe.py", line 77, in create_exe
[exec] subprocess.call(p7zcmd, shell=use_shell)
[exec] File "C:\Python27\lib\subprocess.py", line 493, in call
[exec] return Popen(*popenargs, **kwargs).wait()
[exec] File "C:\Python27\lib\subprocess.py", line 679, in __init__
[exec] errread, errwrite)
[exec] File "C:\Python27\lib\subprocess.py", line 896, in _execute_child
[exec] startupinfo)
[exec] WindowsError: [Error 193] %1 is not a valid Win32 application
BUILD FAILED
E:\Java Projects\Spark Projects\EastIT - Copy\build\build.xml:873: exec returned: 1
Below is code at which getting error.
<target name="installer.izpack.exe" depends="installer.izpack" description="build release executable izpack installer">
<exec executable="python" failonerror="true">
<arg line="${installer.izpack.dir}/utils/wrappers/izpack2exe/izpack2exe.py"/>
<arg line="--file=${basedir}/installer/EasyIT-installer.jar"/>
<arg line="--output=${basedir}/installer/EasyIT-installer.exe"/>
<arg line="--no-upx"/>
</exec>
</target>
please can anyone figureout how to resolve this?
I had the same problem running the izpack2exe.py script manually on a Windows 7 x64 machine. I made some adjustments to the python script, which now looks as follows:
import os
import sys
import subprocess
import shutil
import optparse
import shlex
def parse_options():
parser = optparse.OptionParser()
parser.add_option("--file", action="append", dest="file",
help="The installer JAR file / files")
parser.add_option("--output", action="store", dest="output",
default="setup.exe",
help="The executable file")
parser.add_option("--with-jdk", action="store", dest="with_jre", default="",
help="The bundled JRE to run the exe independently of the system resources. ") # choosen JDK that may came with the package
parser.add_option("--with-7z", action="store", dest="p7z",
default="7za",
help="Path to the 7-Zip executable")
parser.add_option("--with-upx", action="store", dest="upx",
default="upx",
help="Path to the UPX executable")
parser.add_option("--no-upx", action="store_true", dest="no_upx",
default=False,
help="Do not use UPX to further compress the output")
parser.add_option("--launch-file", action="store", dest="launch",
default="",
help="File to launch after extract")
parser.add_option("--launch-args", action="store", dest="launchargs",
default="",
help="Arguments for file to launch after extract")
parser.add_option("--name", action="store", dest="name",
default="IzPack",
help="Name of package for title bar and prompts")
parser.add_option("--prompt", action="store_true", dest="prompt",
default=False,
help="Prompt the user before extraction?")
(options, args) = parser.parse_args()
if (options.file is None):
parser.error("no installer file has been given")
return options
def create_exe(settings):
if len(settings.file) > 0:
filename = os.path.basename(settings.file[0])
else:
filename = ''
if len(settings.with_jre) > 0:
jdk = os.path.basename(settings.with_jre) #inside the jdk/jre that was given, there must be a bin/java.exe file
jdk = jdk + "\\bin\\javaw.exe"
print(jdk)
settings.file.append(settings.with_jre) #jdk/jre is going in the package
else:
jdk = 'javaw' #java is added somehow to the PATH
if settings.p7z == '7za':
p7z = os.path.join(os.path.dirname(sys.argv[0]), '7za')
else:
p7z = settings.p7z
# really no need right now
# use_shell = sys.platform != 'win32'
if (os.access('installer.7z', os.F_OK)):
os.remove('installer.7z')
files = '" "'.join(settings.file)
p7zcmd = '7za.exe a -mmt -t7z -mx=9 installer.7z "%s"' % files
zip_proc = subprocess.Popen(shlex.split(p7zcmd))
zip_proc.communicate()
config = open('config.txt', 'w')
config.write(';!#Install#!UTF-8!\n')
config.write('Title="%s"\n' % settings.name)
if settings.prompt:
config.write('BeginPrompt="Install %s?"\n' % settings.name)
config.write('Progress="yes"\n')
if settings.launch == '':
config.write('ExecuteFile="' + jdk + '"\n') # who is going to run my installer.jar?
config.write('ExecuteParameters="-jar \\\"%s\\\"' % filename)
if settings.launchargs != '':
config.write(' %s"\n' % settings.launchargs)
else:
config.write('"\n')
else:
config.write('ExecuteFile="%s"\n' % settings.launch)
if settings.launchargs != '':
config.write('ExecuteParameters="%s"\n' % settings.launchargs)
config.write(';!#InstallEnd#!\n')
config.close()
sfx = os.path.join(os.path.dirname(p7z), '7zS.sfx')
files = [sfx, 'config.txt', 'installer.7z']
output = open(settings.output, 'wb')
for f in files:
in_file = open(f, 'rb')
shutil.copyfileobj(in_file, output, 2048)
in_file.close()
output.close()
if (not settings.no_upx):
if settings.upx == 'upx':
upx = os.path.join(os.path.dirname(sys.argv[0]), 'upx')
else:
upx = settings.upx
upx = 'upx.exe --ultra-brute "%s"' % settings.output
upx_proc = subprocess.Popen(shlex.split(upx))
upx_proc.communicate()
os.remove('config.txt')
os.remove('installer.7z')
def main():
create_exe(parse_options())
if __name__ == "__main__":
main()
I statically put the 7za.exe in the p7zcmd variable which doesn't result in problems as it is on my path. I blocked the script execution until the subprocesses where done (7zip and upx respectively) which resolved my problems.
Hope I could help you.
Regards
Related
enter image description here
root#f083f367b874:/app# vim test.py
root#f083f367b874:/app# python test.py
Traceback (most recent call last):
File "test.py", line 13, in
run(playwright)
File "test.py", line 3, in run
browser = playwright.chromium.launch(headless=False)
File "/usr/local/lib/python3.7/site-packages/playwright/sync_api/_generated.py", line 9449, in launch
firefoxUserPrefs=mapping.to_impl(firefox_user_prefs),
File "/usr/local/lib/python3.7/site-packages/playwright/_impl/_sync_base.py", line 103, in _sync
return task.result()
File "/usr/local/lib/python3.7/site-packages/playwright/_impl/_browser_type.py", line 90, in launch
raise e
File "/usr/local/lib/python3.7/site-packages/playwright/_impl/_browser_type.py", line 86, in launch
return from_channel(await self._channel.send("launch", params))
File "/usr/local/lib/python3.7/site-packages/playwright/_impl/_connection.py", line 36, in send
return await self.inner_send(method, params, False)
File "/usr/local/lib/python3.7/site-packages/playwright/_impl/_connection.py", line 54, in inner_send
result = next(iter(done)).result()
playwright._impl._api_types.Error: Host system is missing dependencies!
Missing libraries are:
libnss3.so
libnssutil3.so
libsmime3.so
libnspr4.so
libatk-1.0.so.0
libatk-bridge-2.0.so.0
libcups.so.2
libdrm.so.2
libdbus-1.so.3
libxkbcommon.so.0
libXcomposite.so.1
libXdamage.so.1
libXfixes.so.3
libXrandr.so.2
libgbm.so.1
libasound.so.2
libatspi.so.0
libxshmfence.so.1
How to solve this problem??
If you're using Playwright for Python, run: playwright install-deps
If you're using vanilla JS Playwright, run: npx playwright install-deps
Looks like you're using the Python version.
Try appending
# { pkgs ? import <nixpkgs> {} }:
with import <nixpkgs> {};
stdenv.mkDerivation {
name = "kubelt";
nativeBuildInputs = [ pkg-config ];
buildInputs = [
clojure
clojure-lsp
babashka
leiningen
nodejs-16_x
jdk
docker
google-chrome
chromedriver
act
docker
rustup
libuuid
act
wayland
google-chrome-dev
firefox-bin
]
APPEND_LIBRARY_PATH = "${lib.makeLibraryPath [ libGL libuuid wayland google-chrome-dev firefox-bin]}";
shellHook = ''
LD=$CC
export LD_LIBRARY_PATH="$APPEND_LIBRARY_PATH:$LD_LIBRARY_PATH"
'';
}
I've been trying to deploy a pipeline on Google Cloud Dataflow. It's been a quite a challenge so far.
I'm facing an import issue because I realised that ParDo functions require the requirements.txt to be present if not it will say that it can't find the required module. https://beam.apache.org/documentation/sdks/python-pipeline-dependencies/
So I tried fixing the problem by passing in the requirements.txt file, only to be met with a very incomprehensible error message.
import apache_beam as beam
from apache_beam.runners.interactive.interactive_runner import InteractiveRunner
from apache_beam.io.gcp.bigtableio import WriteToBigTable
from apache_beam.runners import DataflowRunner
import apache_beam.runners.interactive.interactive_beam as ib
from apache_beam.options import pipeline_options
from apache_beam.options.pipeline_options import GoogleCloudOptions
import google.auth
from google.cloud.bigtable.row import DirectRow
import datetime
# Setting up the Apache Beam pipeline options.
options = pipeline_options.PipelineOptions(flags=[])
# Sets the project to the default project in your current Google Cloud environment.
_, options.view_as(GoogleCloudOptions).project = google.auth.default()
# Sets the Google Cloud Region in which Cloud Dataflow runs.
options.view_as(GoogleCloudOptions).region = 'us-central1'
# IMPORTANT! Adjust the following to choose a Cloud Storage location.
dataflow_gcs_location = 'gs://tunnel-insight-2-0-dev-291100/dataflow'
# Dataflow Staging Location. This location is used to stage the Dataflow Pipeline and SDK binary.
options.view_as(GoogleCloudOptions).staging_location = '%s/staging' % dataflow_gcs_location
# Sets the pipeline mode to streaming, so we can stream the data from PubSub.
options.view_as(pipeline_options.StandardOptions).streaming = True
# Sets the requirements.txt file
options.view_as(pipeline_options.SetupOptions).requirements_file = "requirements.txt"
# Dataflow Temp Location. This location is used to store temporary files or intermediate results before finally outputting to the sink.
options.view_as(GoogleCloudOptions).temp_location = '%s/temp' % dataflow_gcs_location
# The directory to store the output files of the job.
output_gcs_location = '%s/output' % dataflow_gcs_location
ib.options.recording_duration = '1m'
...
...
pipeline_result = DataflowRunner().run_pipeline(p, options=options)
I've tried to pass requirements using "options.view_as(pipeline_options.SetupOptions).requirements_file = "requirements.txt""
I get this error
---------------------------------------------------------------------------
CalledProcessError Traceback (most recent call last)
~/apache-beam-custom/packages/beam/sdks/python/apache_beam/utils/processes.py in check_output(*args, **kwargs)
90 try:
---> 91 out = subprocess.check_output(*args, **kwargs)
92 except OSError:
/opt/conda/lib/python3.7/subprocess.py in check_output(timeout, *popenargs, **kwargs)
410 return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,
--> 411 **kwargs).stdout
412
/opt/conda/lib/python3.7/subprocess.py in run(input, capture_output, timeout, check, *popenargs, **kwargs)
511 raise CalledProcessError(retcode, process.args,
--> 512 output=stdout, stderr=stderr)
513 return CompletedProcess(process.args, retcode, stdout, stderr)
CalledProcessError: Command '['/root/apache-beam-custom/bin/python', '-m', 'pip', 'download', '--dest', '/tmp/dataflow-requirements-cache', '-r', 'requirements.txt', '--exists-action', 'i', '--no-binary', ':all:']' returned non-zero exit status 1.
During handling of the above exception, another exception occurred:
RuntimeError Traceback (most recent call last)
<ipython-input-12-f018e5c84d08> in <module>
----> 1 pipeline_result = DataflowRunner().run_pipeline(p, options=options)
~/apache-beam-custom/packages/beam/sdks/python/apache_beam/runners/dataflow/dataflow_runner.py in run_pipeline(self, pipeline, options)
491 environments.DockerEnvironment.from_container_image(
492 apiclient.get_container_image_from_options(options),
--> 493 artifacts=environments.python_sdk_dependencies(options)))
494
495 # This has to be performed before pipeline proto is constructed to make sure
~/apache-beam-custom/packages/beam/sdks/python/apache_beam/transforms/environments.py in python_sdk_dependencies(options, tmp_dir)
624 options,
625 tmp_dir,
--> 626 skip_prestaged_dependencies=skip_prestaged_dependencies))
~/apache-beam-custom/packages/beam/sdks/python/apache_beam/runners/portability/stager.py in create_job_resources(options, temp_dir, build_setup_args, populate_requirements_cache, skip_prestaged_dependencies)
178 populate_requirements_cache if populate_requirements_cache else
179 Stager._populate_requirements_cache)(
--> 180 setup_options.requirements_file, requirements_cache_path)
181 for pkg in glob.glob(os.path.join(requirements_cache_path, '*')):
182 resources.append((pkg, os.path.basename(pkg)))
~/apache-beam-custom/packages/beam/sdks/python/apache_beam/utils/retry.py in wrapper(*args, **kwargs)
234 while True:
235 try:
--> 236 return fun(*args, **kwargs)
237 except Exception as exn: # pylint: disable=broad-except
238 if not retry_filter(exn):
~/apache-beam-custom/packages/beam/sdks/python/apache_beam/runners/portability/stager.py in _populate_requirements_cache(requirements_file, cache_dir)
569 ]
570 _LOGGER.info('Executing command: %s', cmd_args)
--> 571 processes.check_output(cmd_args, stderr=processes.STDOUT)
572
573 #staticmethod
~/apache-beam-custom/packages/beam/sdks/python/apache_beam/utils/processes.py in check_output(*args, **kwargs)
97 "Full traceback: {} \n Pip install failed for package: {} \
98 \n Output from execution of subprocess: {}" \
---> 99 .format(traceback.format_exc(), args[0][6], error.output))
100 else:
101 raise RuntimeError("Full trace: {}, \
RuntimeError: Full traceback: Traceback (most recent call last):
File "/root/apache-beam-custom/packages/beam/sdks/python/apache_beam/utils/processes.py", line 91, in check_output
out = subprocess.check_output(*args, **kwargs)
File "/opt/conda/lib/python3.7/subprocess.py", line 411, in check_output
**kwargs).stdout
File "/opt/conda/lib/python3.7/subprocess.py", line 512, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command '['/root/apache-beam-custom/bin/python', '-m', 'pip', 'download', '--dest', '/tmp/dataflow-requirements-cache', '-r', 'requirements.txt', '--exists-action', 'i', '--no-binary', ':all:']' returned non-zero exit status 1.
Pip install failed for package: -r
Output from execution of subprocess: b'Obtaining file:///root/apache-beam-custom/packages/beam/sdks/python (from -r requirements.txt (line 3))\n Saved /tmp/dataflow-requirements-cache/apache-beam-2.25.0.zip\nCollecting absl-py==0.11.0\n Downloading absl-py-0.11.0.tar.gz (110 kB)\n Saved /tmp/dataflow-requirements-cache/absl-py-0.11.0.tar.gz\nCollecting argon2-cffi==20.1.0\n Downloading argon2-cffi-20.1.0.tar.gz (1.8 MB)\n Installing build dependencies: started\n Installing build dependencies: finished with status \'error\'\n ERROR: Command errored out with exit status 1:\n command: /root/apache-beam-custom/bin/python /root/apache-beam-custom/lib/python3.7/site-packages/pip install --ignore-installed --no-user --prefix /tmp/pip-build-env-3iuiaex9/overlay --no-warn-script-location --no-binary :all: --only-binary :none: -i https://pypi.org/simple -- \'setuptools>=40.6.0\' wheel \'cffi>=1.0\'\n cwd: None\n Complete output (85 lines):\n Collecting setuptools>=40.6.0\n Downloading setuptools-51.1.1.tar.gz (2.1 MB)\n Collecting wheel\n Downloading wheel-0.36.2.tar.gz (65 kB)\n Collecting cffi>=1.0\n Downloading cffi-1.14.4.tar.gz (471 kB)\n Collecting pycparser\n Downloading pycparser-2.20.tar.gz (161 kB)\n Skipping wheel build for setuptools, due to binaries being disabled for it.\n Skipping wheel build for wheel, due to binaries being disabled for it.\n Skipping wheel build for cffi, due to binaries being disabled for it.\n Skipping wheel build for pycparser, due to binaries being disabled for it.\n Installing collected packages: setuptools, wheel, pycparser, cffi\n Running setup.py install for setuptools: started\n Running setup.py install for setuptools: finished with status \'done\'\n Running setup.py install for wheel: started\n Running setup.py install for wheel: finished with status \'done\'\n Running setup.py install for pycparser: started\n Running setup.py install for pycparser: finished with status \'done\'\n Running setup.py install for cffi: started\n Running setup.py install for cffi: finished with status \'error\'\n ERROR: Command errored out with exit status 1:\n command: /root/apache-beam-custom/bin/python -u -c \'import sys, setuptools, tokenize; sys.argv[0] = \'"\'"\'/tmp/pip-install-6zs5jguv/cffi/setup.py\'"\'"\'; __file__=\'"\'"\'/tmp/pip-install-6zs5jguv/cffi/setup.py\'"\'"\';f=getattr(tokenize, \'"\'"\'open\'"\'"\', open)(__file__);code=f.read().replace(\'"\'"\'\\r\\n\'"\'"\', \'"\'"\'\\n\'"\'"\');f.close();exec(compile(code, __file__, \'"\'"\'exec\'"\'"\'))\' install --record /tmp/pip-record-z8o69lka/install-record.txt --single-version-externally-managed --prefix /tmp/pip-build-env-3iuiaex9/overlay --compile --install-headers /root/apache-beam-custom/include/site/python3.7/cffi\n cwd: /tmp/pip-install-6zs5jguv/cffi/\n Complete output (56 lines):\n Package libffi was not found in the pkg-config search path.\n Perhaps you should add the directory containing `libffi.pc\'\n to the PKG_CONFIG_PATH environment variable\n No package \'libffi\' found\n Package libffi was not found in the pkg-config search path.\n Perhaps you should add the directory containing `libffi.pc\'\n to the PKG_CONFIG_PATH environment variable\n No package \'libffi\' found\n Package libffi was not found in the pkg-config search path.\n Perhaps you should add the directory containing `libffi.pc\'\n to the PKG_CONFIG_PATH environment variable\n No package \'libffi\' found\n Package libffi was not found in the pkg-config search path.\n Perhaps you should add the directory containing `libffi.pc\'\n to the PKG_CONFIG_PATH environment variable\n No package \'libffi\' found\n Package libffi was not found in the pkg-config search path.\n Perhaps you should add the directory containing `libffi.pc\'\n to the PKG_CONFIG_PATH environment variable\n No package \'libffi\' found\n running install\n running build\n running build_py\n creating build\n creating build/lib.linux-x86_64-3.7\n creating build/lib.linux-x86_64-3.7/cffi\n copying cffi/setuptools_ext.py -> build/lib.linux-x86_64-3.7/cffi\n copying cffi/pkgconfig.py -> build/lib.linux-x86_64-3.7/cffi\n copying cffi/verifier.py -> build/lib.linux-x86_64-3.7/cffi\n copying cffi/vengine_gen.py -> build/lib.linux-x86_64-3.7/cffi\n copying cffi/backend_ctypes.py -> build/lib.linux-x86_64-3.7/cffi\n copying cffi/__init__.py -> build/lib.linux-x86_64-3.7/cffi\n copying cffi/cffi_opcode.py -> build/lib.linux-x86_64-3.7/cffi\n copying cffi/error.py -> build/lib.linux-x86_64-3.7/cffi\n copying cffi/api.py -> build/lib.linux-x86_64-3.7/cffi\n copying cffi/commontypes.py -> build/lib.linux-x86_64-3.7/cffi\n copying cffi/ffiplatform.py -> build/lib.linux-x86_64-3.7/cffi\n copying cffi/lock.py -> build/lib.linux-x86_64-3.7/cffi\n copying cffi/cparser.py -> build/lib.linux-x86_64-3.7/cffi\n copying cffi/recompiler.py -> build/lib.linux-x86_64-3.7/cffi\n copying cffi/vengine_cpy.py -> build/lib.linux-x86_64-3.7/cffi\n copying cffi/model.py -> build/lib.linux-x86_64-3.7/cffi\n copying cffi/_cffi_include.h -> build/lib.linux-x86_64-3.7/cffi\n copying cffi/parse_c_type.h -> build/lib.linux-x86_64-3.7/cffi\n copying cffi/_embedding.h -> build/lib.linux-x86_64-3.7/cffi\n copying cffi/_cffi_errors.h -> build/lib.linux-x86_64-3.7/cffi\n running build_ext\n building \'_cffi_backend\' extension\n creating build/temp.linux-x86_64-3.7\n creating build/temp.linux-x86_64-3.7/c\n gcc -pthread -B /opt/conda/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -DUSE__THREAD -DHAVE_SYNC_SYNCHRONIZE -I/usr/include/ffi -I/usr/include/libffi -I/root/apache-beam-custom/include -I/opt/conda/include/python3.7m -c c/_cffi_backend.c -o build/temp.linux-x86_64-3.7/c/_cffi_backend.o\n c/_cffi_backend.c:15:10: fatal error: ffi.h: No such file or directory\n #include <ffi.h>\n ^~~~~~~\n compilation terminated.\n error: command \'gcc\' failed with exit status 1\n ----------------------------------------\n ERROR: Command errored out with exit status 1: /root/apache-beam-custom/bin/python -u -c \'import sys, setuptools, tokenize; sys.argv[0] = \'"\'"\'/tmp/pip-install-6zs5jguv/cffi/setup.py\'"\'"\'; __file__=\'"\'"\'/tmp/pip-install-6zs5jguv/cffi/setup.py\'"\'"\';f=getattr(tokenize, \'"\'"\'open\'"\'"\', open)(__file__);code=f.read().replace(\'"\'"\'\\r\\n\'"\'"\', \'"\'"\'\\n\'"\'"\');f.close();exec(compile(code, __file__, \'"\'"\'exec\'"\'"\'))\' install --record /tmp/pip-record-z8o69lka/install-record.txt --single-version-externally-managed --prefix /tmp/pip-build-env-3iuiaex9/overlay --compile --install-headers /root/apache-beam-custom/include/site/python3.7/cffi Check the logs for full command output.\n WARNING: You are using pip version 20.1.1; however, version 20.3.3 is available.\n You should consider upgrading via the \'/root/apache-beam-custom/bin/python -m pip install --upgrade pip\' command.\n ----------------------------------------\nERROR: Command errored out with exit status 1: /root/apache-beam-custom/bin/python /root/apache-beam-custom/lib/python3.7/site-packages/pip install --ignore-installed --no-user --prefix /tmp/pip-build-env-3iuiaex9/overlay --no-warn-script-location --no-binary :all: --only-binary :none: -i https://pypi.org/simple -- \'setuptools>=40.6.0\' wheel \'cffi>=1.0\' Check the logs for full command output.\nWARNING: You are using pip version 20.1.1; however, version 20.3.3 is available.\nYou should consider upgrading via the \'/root/apache-beam-custom/bin/python -m pip install --upgrade pip\' command.\n'
Did I do something wrong?
-------------- EDIT---------------------------------------
Ok, I've got my pipeline to work, but I'm still having a problem with my requirements.txt file which I believe I'm passing in correctly.
My pipeline code:
import apache_beam as beam
from apache_beam.runners.interactive.interactive_runner import InteractiveRunner
from apache_beam.io.gcp.bigtableio import WriteToBigTable
from apache_beam.runners import DataflowRunner
import apache_beam.runners.interactive.interactive_beam as ib
from apache_beam.options import pipeline_options
from apache_beam.options.pipeline_options import GoogleCloudOptions
import google.auth
from google.cloud.bigtable.row import DirectRow
import datetime
# Setting up the Apache Beam pipeline options.
options = pipeline_options.PipelineOptions(flags=[])
# Sets the project to the default project in your current Google Cloud environment.
_, options.view_as(GoogleCloudOptions).project = google.auth.default()
# Sets the Google Cloud Region in which Cloud Dataflow runs.
options.view_as(GoogleCloudOptions).region = 'us-central1'
# IMPORTANT! Adjust the following to choose a Cloud Storage location.
dataflow_gcs_location = ''
# Dataflow Staging Location. This location is used to stage the Dataflow Pipeline and SDK binary.
options.view_as(GoogleCloudOptions).staging_location = '%s/staging' % dataflow_gcs_location
# Sets the pipeline mode to streaming, so we can stream the data from PubSub.
options.view_as(pipeline_options.StandardOptions).streaming = True
# Sets the requirements.txt file
options.view_as(pipeline_options.SetupOptions).requirements_file = "requirements.txt"
# Dataflow Temp Location. This location is used to store temporary files or intermediate results before finally outputting to the sink.
options.view_as(GoogleCloudOptions).temp_location = '%s/temp' % dataflow_gcs_location
# The directory to store the output files of the job.
output_gcs_location = '%s/output' % dataflow_gcs_location
ib.options.recording_duration = '1m'
# The Google Cloud PubSub topic for this example.
topic = ""
subscription = ""
output_topic = ""
# Info
project_id = ""
bigtable_instance = ""
bigtable_table_id = ""
class CreateRowFn(beam.DoFn):
def process(self,words):
from google.cloud.bigtable.row import DirectRow
import datetime
direct_row = DirectRow(row_key="phone#4c410523#20190501")
direct_row.set_cell(
"stats_summary",
b"os_build",
b"android",
datetime.datetime.now())
return [direct_row]
p = beam.Pipeline(InteractiveRunner(),options=options)
words = p | "read" >> beam.io.ReadFromPubSub(subscription=subscription)
windowed_words = (words | "window" >> beam.WindowInto(beam.window.FixedWindows(10)))
# Writing to BigTable
test = words | beam.ParDo(CreateRowFn()) | WriteToBigTable(
project_id=project_id,
instance_id=bigtable_instance,
table_id=bigtable_table_id)
pipeline_result = DataflowRunner().run_pipeline(p, options=options)
As you can see in "CreateRowFn", I need to import
from google.cloud.bigtable.row import DirectRow
import datetime
Only then this works.
I've passed in requirements.txt as options.view_as(pipeline_options.SetupOptions).requirements_file = "requirements.txt" and I see it on Dataflow console.
If I remove the import statements, I get "in process NameError: name 'DirectRow' is not defined".
Is there anyway to overcome this?
I've found the answer in the FAQs. My mistake was not about how to pass in requirements.txt but how to handle NameErrors
https://cloud.google.com/dataflow/docs/resources/faq
How do I handle NameErrors?
If you're getting a NameError when you execute your pipeline using the Dataflow service but not when you execute locally (i.e. using the DirectRunner), your DoFns may be using values in the global namespace that are not available on the Dataflow worker.
By default, global imports, functions, and variables defined in the main session are not saved during the serialization of a Dataflow job. If, for example, your DoFns are defined in the main file and reference imports and functions in the global namespace, you can set the --save_main_session pipeline option to True. This will cause the state of the global namespace to be pickled and loaded on the Dataflow worker.
Notice that if you have objects in your global namespace that cannot be pickled, you will get a pickling error. If the error is regarding a module that should be available in the Python distribution, you can solve this by importing the module locally, where it is used.
For example, instead of:
import re
…
def myfunc():
# use re module
use:
def myfunc():
import re
# use re module
Alternatively, if your DoFns span multiple files, you should use a different approach to packaging your workflow and managing dependencies.
So the conclusion is:
It is ok to use import statements within the functions
Google Dataflow workers already have the these packages installed: https://cloud.google.com/dataflow/docs/concepts/sdk-worker-dependencies.
If you are running it from cloud composer
In that case you need to add the new Packages to PYPI PACKAGES as shown below.
You can also pass --requirements_file path://requirements.txt as flag in the command while running it.
I prefer to use --setup_file path://setup.py flag instead. The format of setup file is as follows
import setuptools
REQUIRED_PACKAGES = [
'joblib==0.15.1',
'numpy==1.18.5',
'google',
'google-cloud',
'google-cloud-storage',
'cassandra-driver==3.22.0'
]
PACKAGE_NAME = 'my_package'
PACKAGE_VERSION = '0.0.1'
setuptools.setup(
name=PACKAGE_NAME,
version=PACKAGE_VERSION,
description='Searh Rank project',
install_requires=REQUIRED_PACKAGES,
author="Mohd Faisal",
packages=setuptools.find_packages()
)
Use the format below for dataflow script:
from __future__ import absolute_import
import argparse
import logging
import apache_beam as beam
from apache_beam.options.pipeline_options import (GoogleCloudOptions,
PipelineOptions,
SetupOptions,
StandardOptions,
WorkerOptions)
from datetime import date
class Userprocess(beam.DoFn):
def process(self, msg):
yield "OK"
def run(argv=None):
logging.info("Parsing dataflow flags... ")
pipeline_options = PipelineOptions()
pipeline_options.view_as(SetupOptions).save_main_session = True
parser = argparse.ArgumentParser()
parser.add_argument(
'--project',
required=True,
help=(
'project id staging or production '))
parser.add_argument(
'--temp_location',
required=True,
help=(
'temp location'))
parser.add_argument(
'--job_name',
required=True,
help=(
'job name'))
known_args, pipeline_args = parser.parse_known_args(argv)
today = date.today()
logging.info("Processing Date is " + str(today))
google_cloud_options = pipeline_options.view_as(GoogleCloudOptions)
google_cloud_options.project = known_args.project
google_cloud_options.job_name = known_args.job_name
google_cloud_options.temp_location = known_args.temp_location
# pipeline_options.view_as(StandardOptions).runner = known_args.runner
with beam.Pipeline(argv=pipeline_args, options=pipeline_options) as p:
beam.ParDo(Userprocess())
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
logging.info("Starting dataflow daily pipeline ")
try:
run()
except:
pass
Try running the script locally for errors.
I am trying to use https://github.com/atlassian/dc-app-performance-toolkit on windows 10
I have installed taurus from https://gettaurus.org/install/Installation/
jira is installed on my system and i can access it using http://localhost:2990/jira
However when i run bzt jira.yml i am getting below error.
14:43:35 INFO: Starting shell command: python util/post_run/jmeter_post_check.py←[0m
14:43:36 INFO: Starting shell command: python util/jtl_convertor/jtls-to-csv.py kpi.jtl selenium.jtl←[0m
←[33m14:43:36 WARNING: Errors for python util/jtl_convertor/jtls-to-csv.py kpi.jtl selenium.jtl:
Traceback (most recent call last):
File "util/jtl_convertor/jtls-to-csv.py", line 8, in <module>
import pandas
File "C:\Users\Admin\AppData\Local\Programs\Python\Python38-32\lib\site-packages\pandas\__init__.py", line 11, in <module>
__import__(dependency)
File "C:\Program Files\Taurus\pkgs\numpy\__init__.py", line 138, in <module>
from . import _distributor_init
File "C:\Program Files\Taurus\pkgs\numpy\_distributor_init.py", line 26, in <module>
WinDLL(os.path.abspath(filename))
File "C:\Users\Admin\AppData\Local\Programs\Python\Python38-32\lib\ctypes\__init__.py", line 373, in __init__
self._handle = _dlopen(self._name, mode)
OSError: [WinError 193] %1 is not a valid Win32 application
←[0m
←[31m14:43:36 ERROR: Child Process Error: Test runner selenium (PyTestExecutor) has failed with retcode 1←[0m
←[31m14:43:36 ERROR: PyTestExecutor STDERR:
Traceback (most recent call last):
File "C:\Program Files\Taurus\pkgs\bzt\resources\pytest_runner.py", line 22, in <module>
import pytest
File "C:\Program Files\Taurus\pkgs\pytest\__init__.py", line 7, in <module>
from _pytest.assertion import register_assert_rewrite
File "C:\Program Files\Taurus\pkgs\_pytest\assertion\__init__.py", line 10, in <module>
from _pytest.assertion import rewrite
File "C:\Program Files\Taurus\pkgs\_pytest\assertion\rewrite.py", line 30, in <module>
from _pytest.assertion import util
File "C:\Program Files\Taurus\pkgs\_pytest\assertion\util.py", line 14, in <module>
import _pytest._code
File "C:\Program Files\Taurus\pkgs\_pytest\_code\__init__.py", line 2, in <module>
from .code import Code
File "C:\Program Files\Taurus\pkgs\_pytest\_code\code.py", line 29, in <module>
import pluggy
File "C:\Program Files\Taurus\pkgs\pluggy\__init__.py", line 16, in <module>
from .manager import PluginManager, PluginValidationError
File "C:\Program Files\Taurus\pkgs\pluggy\manager.py", line 11, in <module>
import importlib_metadata
File "C:\Program Files\Taurus\pkgs\importlib_metadata\__init__.py", line 623, in <module>
__version__ = version(__name__)
File "C:\Program Files\Taurus\pkgs\importlib_metadata\__init__.py", line 585, in version
return distribution(distribution_name).version
File "C:\Program Files\Taurus\pkgs\importlib_metadata\__init__.py", line 558, in distribution
return Distribution.from_name(distribution_name)
File "C:\Program Files\Taurus\pkgs\importlib_metadata\__init__.py", line 215, in from_name
raise PackageNotFoundError(name)
importlib_metadata.PackageNotFoundError: No package metadata was found for importlib_metadata←[0m
What am i doing wrong?
The error you're getting is about missing importlib-metadata package, you need to have all the packages listed under requirements.txt installed in order to be able to run these tests, it seems like you omitted Installation and set up chapter
So make sure to execute pip install -r requirements.txt prior to launching the Taurus test as Taurus doesn't take care of dependencies on its own.
More information:
Installing Python Package Dependencies
Navigating your First Steps Using Taurus
I had to remove python installed at
C:\Users\Admin\AppData\Local\Programs\Python\Python38-32\lib\
which was installed prior to bzt
To correct this, I have uninstalled taurus and installed it manually.
Installed python using python-3.8.5-amd64.exe. and not using python-3.8.5.exe
My build environment:
OS:OS X El Capitan 10.11.6
Scons version :
v2.5.1.rel_2.5.1:3735:9dc6cee5c168[MODIFIED],
boost framework copy to ~/Desktop/iotivity-1.2.1/extlibs/boost/ios/framework
build cmd:
scons TARGET_OS=ios TARGET_ARCH=arm64
build errors:
scons: Reading SConscript files ...
NameError: name 'bIn' is not defined:
File /Users/John/Desktop/iotivity-1.2.1/SConstruct, line 28: SConscript('build_common/SConscript')
File "/usr/local/Cellar/scons/2.5.1/libexec/scons-local/SCons/Script/SConscript.py", line 604: return method(*args, **kw) File "/usr/local/Cellar/scons/2.5.1/libexec/scons-local/SCons/Script/SConscript.py", line 541: return _SConscript(self.fs, *files, **subst_kw)
File "/usr/local/Cellar/scons/2.5.1/libexec/scons-local/SCons/Script/SConscript.py", line 250: exec file in call_stack[-1].globals
File "/Users/John/Desktop/iotivity-1.2.1/build_common/SConscript", line 426: env.SConscript(target_os + '/SConscript')
File "/usr/local/Cellar/scons/2.5.1/libexec/scons-local/SCons/Script/SConscript.py", line 541: return _SConscript(self.fs, *files, **subst_kw)
File "/usr/local/Cellar/scons/2.5.1/libexec/scons-local/SCons/Script/SConscript.py", line 250: exec file in call_stack[-1].globals
File "/Users/John/Desktop/iotivity-1.2.1/build_common/ios/SConscript", line 8: env.SConscript('../darwin/SConscript')
File "/usr/local/Cellar/scons/2.5.1/libexec/scons-local/SCons/Script/SConscript.py", line 541: return _SConscript(self.fs, *files, **subst_kw)
File "/usr/local/Cellar/scons/2.5.1/libexec/scons-local/SCons/Script/SConscript.py", line 250: exec file in call_stack[-1].globals
File "/Users/John/Desktop/iotivity-1.2.1/build_common/darwin/SConscript", line 29: elif bIn:
The code sequence looks buggy to me, in that you can percolate through the if statement chain ending up at the failing line without ever setting bIn. Might be worth filing a bug on this one.
File mirrored here:
https://github.com/iotivity/iotivity/blob/1.2-rel/build_common/darwin/SConscript
this is driving me crazy, I already tried a lot of things,e.g. changing locale, but still getting this after workspace init and then doing catkin_make on subdir.
Running on a recent arch dist, ros-indigo, python2.7 and python 3.5 installed
johnny#localhost:~/ros$ catkin_make
Base path: /home/johnny/ros
Source space: /home/johnny/ros/src
Build space: /home/johnny/ros/build
Devel space: /home/johnny/ros/devel
Install space: /home/johnny/ros/install
####
#### Running command: "cmake /home/johnny/ros/src -DCATKIN_DEVEL_PREFIX=/home/johnny/ros/devel -DCMAKE_INSTALL_PREFIX=/home/johnny/ros/install -G Unix Makefiles" in "/home/johnny/ros/build"
####
-- Using CATKIN_DEVEL_PREFIX: /home/johnny/ros/devel
-- Using CMAKE_PREFIX_PATH: /opt/ros/indigo
-- This workspace overlays: /opt/ros/indigo
-- Using PYTHON_EXECUTABLE: /usr/bin/python
-- Using default Python package layout
-- Using empy: /usr/lib/python2.7/site-packages/em.py
-- Using CATKIN_ENABLE_TESTING: ON
-- Call enable_testing()
-- Using CATKIN_TEST_RESULTS_DIR: /home/johnny/ros/build/test_results
-- Found gtest: gtests will be built
CMake Warning at /opt/ros/indigo/share/catkin/cmake/test/nosetests.cmake:96 (message):
nosetests not found, Python tests can not be run (try installing package
'python3-nose')
Call Stack (most recent call first):
/opt/ros/indigo/share/catkin/cmake/all.cmake:147 (include)
/opt/ros/indigo/share/catkin/cmake/catkinConfig.cmake:20 (include)
CMakeLists.txt:52 (find_package)
-- catkin 0.6.18
-- BUILD_SHARED_LIBS is on
/opt/ros/indigo/share/catkin/cmake/em/order_packages.cmake.em:23: error: <class 'UnicodeDecodeError'>: 'ascii' codec can't decode byte 0xc3 in position 205: ordinal not in range(128)
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/em.py", line 3302, in <module>
if __name__ == '__main__': main()
File "/usr/lib/python2.7/site-packages/em.py", line 3300, in main
invoke(sys.argv[1:])
File "/usr/lib/python2.7/site-packages/em.py", line 3283, in invoke
interpreter.wrap(interpreter.file, (file, name))
File "/usr/lib/python2.7/site-packages/em.py", line 2295, in wrap
self.fail(e)
File "/usr/lib/python2.7/site-packages/em.py", line 2284, in wrap
callable(*args)
File "/usr/lib/python2.7/site-packages/em.py", line 2359, in file
self.safe(scanner, done, locals)
File "/usr/lib/python2.7/site-packages/em.py", line 2401, in safe
self.parse(scanner, locals)
File "/usr/lib/python2.7/site-packages/em.py", line 2421, in parse
token.run(self, locals)
File "/usr/lib/python2.7/site-packages/em.py", line 1425, in run
interpreter.execute(self.code, locals)
File "/usr/lib/python2.7/site-packages/em.py", line 2595, in execute
_exec(statements, self.globals, locals)
File "<string>", line 17, in <module>
File "/usr/lib/python2.7/site-packages/catkin_pkg/topological_order.py", line 111, in topological_order
for path, package in find_packages(space).items():
File "/usr/lib/python2.7/site-packages/catkin_pkg/packages.py", line 83, in find_packages
packages = find_packages_allowing_duplicates(basepath, exclude_paths=exclude_paths, exclude_subspaces=exclude_subspaces, warnings=warnings)
File "/usr/lib/python2.7/site-packages/catkin_pkg/packages.py", line 110, in find_packages_allowing_duplicates
packages[path] = parse_package(os.path.join(basepath, path), warnings=warnings)
File "/usr/lib/python2.7/site-packages/catkin_pkg/package.py", line 370, in parse_package
return parse_package_string(f.read(), filename, warnings=warnings)
File "/usr/lib/python3.5/encodings/ascii.py", line 26, in decode
return codecs.ascii_decode(input, self.errors)[0]
UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 205: ordinal not in range(128)
CMake Error at /opt/ros/indigo/share/catkin/cmake/safe_execute_process.cmake:11 (message):
execute_process(/home/johnny/ros/build/catkin_generated/env_cached.sh
"/usr/bin/python" "/usr/lib/python2.7/site-packages/em.py" "--raw-errors"
"-F" "/home/johnny/ros/build/catkin_generated/order_packages.py" "-o"
"/home/johnny/ros/build/catkin_generated/order_packages.cmake"
"/opt/ros/indigo/share/catkin/cmake/em/order_packages.cmake.em") returned
error code 1
Call Stack (most recent call first):
/opt/ros/indigo/share/catkin/cmake/em_expand.cmake:25 (safe_execute_process)
/opt/ros/indigo/share/catkin/cmake/catkin_workspace.cmake:35 (em_expand)
CMakeLists.txt:63 (catkin_workspace)
-- Configuring incomplete, errors occurred!
See also "/home/johnny/ros/build/CMakeFiles/CMakeOutput.log".
See also "/home/johnny/ros/build/CMakeFiles/CMakeError.log".
Invoking "cmake" failed'
Thank you very much!
It looks like an unicode problem. In very brief summary, you have passed something that is being interpreted as a string of bytes to something that needs to decode it into Unicode characters, but the default codec (ascii) is failing.
More on this here.
there's a lot of solutions to this one. the one I suggest is doing this :
yassin#pc:/usr/local/lib/python2.7/site-packages# cat sitecustomize.py
# encoding=utf8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
and to check if that works, you need to do the following :
yassin#pc:~/home# python
Python 2.7.6 (default, Dec 6 2013, 14:49:02)
[GCC 4.4.5] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import sys
>>> reload(sys)
<module 'sys' (built-in)>
>>> sys.getdefaultencoding()
'utf8'
>>>
Hope it helps ! Cheers.
Set the system default locale to en_IN.utf8 should solve the problem.
For example, if you use bash, you can add below lines into your ~/.bashrc:
LANG=en_IN.utf8
export LANG
The catkin toolkit use system default locale to load message or service files.