I have a project that includes several JS packages and organized with Yarn workspaces:
<root>
├── WORKSPACE
├── package.json
├── workspaces
│ ├── foo
│ │ ├── package.json
│ │ ├── BUILD.bazel
│ │ ├── src
│
├── bar
│ ├── package.json
│ ├── BUILD.bazel
│ ├── src
FOO package depends on BAR package and it's defined in FOO/package.json:
workspaces/foo/package.json
{
"name": "FOO",
"dependencies": {
"BAR": "link:../bar",
}
workspaces/bar/BUILD.bazel looks like this
load("#build_bazel_rules_nodejs//:index.bzl", "js_library")
js_library(
name = "bar",
package_name = "BAR",
srcs = [
"package.json",
"index.js",
],
visibility = ["//visibility:public"],
)
and here workspaces/foo/BUILD.bazel
load("#build_bazel_rules_nodejs//:index.bzl", "js_library")
js_library(
name = "foo",
package_name = "FOO",
srcs = [
"package.json",
"index.js",
],
deps = [
"//workspaces/bar:bar",
"#npm//:node_modules" # <-- this line causes an error because linked package couldn't be found in node_modules
],
visibility = ["//visibility:public"],
)
and WORKSPACE file includes:
yarn_install(
name = "npm_foo",
package_json = "//workspaces/foo:package.json",
yarn_lock = "//workspaces/foo:yarn.lock",
package_path = "workspaces/foo",
strict_visibility = False,
# links = {
# "bar": "//workspaces/bar",
# },
# generate_local_modules_build_files = True,
)
yarn_install(
name = "npm",
package_json = "//:package.json",
yarn_lock = "//:yarn.lock",
)
yarn_install(
name = "npm_bar",
package_json = "//workspaces/bar:package.json",
yarn_lock = "//workspaces/bar:yarn.lock",
package_path = "workspaces/bar",
)
with all of this setup, I run bazel build //workspaces/foo:foo and it fails.
Do I need to configure links or generate_local_modules_build_files attributes in yarn_install? Or what changes required to get this linked packages work together?
As far as yarn workspaces aren't supported by rules_nodejs, following workaround worked for me:
here modified workspaces/foo/BUILD.bazel
load("#build_bazel_rules_nodejs//:index.bzl", "js_library")
# this list contains all dependencies from the top level `package.json` file
# do not include linked `bar` package!
DEPENDENCIES = [
"#npm//react",
"#npm//react-dom",
...
]
# node_modules filegroup includes all dependencies from workspaces/foo/node_modules
filegroup(
name = "node_modules",
srcs = glob(
include = [
"node_modules/**",
],
),
)
js_library(
name = "foo",
package_name = "FOO",
srcs = [
"package.json",
"index.js",
],
deps = [
"//workspaces/bar:bar",
":node_modules"
] + DEPENDENCIES,
visibility = ["//visibility:public"],
)
and WORKSPACE file has only one yarn_install rule for top level package.json since nested workspaces are treated as self-managed:
yarn_install(
name = "npm",
package_json = "//:package.json",
yarn_lock = "//:yarn.lock",
)
Related
Looking to create dynamic Azure vwan vpn site connections. I am using this map to define the sites and links. I would like to be able to add and remove sites and links as my requirements change.
virtual_wan_vpn_sites = {
vwan_site_dc = {
name = "site-shr-infra-dc"
location_map_key = "primary"
resource_group_key = "rg_vwan"
vwan_key = "vwan"
device_vendor = "Fortigate"
device_model = "FGT60F"
links = {
link_1 = {
name = "link-shr-infra-dc-1"
bgp_asn = "64512"
public_ip_address = "1.1.1.1"
bgp_peering_ip = "10.10.100.1"
}
link_2 = {
name = "link-shr-infra-dc-2"
bgp_asn = "64513"
public_ip_address = "2.2.2.2"
bgp_peering_ip = "10.10.100.100"
}
}
}
}
and this code to create the sites, links, and connections.
# Create vpn site(s)
module "virtualWanVpnSites" {
source = "../../modules/networking/virtual_wan_vpn_site"
for_each = var.virtual_wan_vpn_sites
name = each.value.name
location = var.location_map[each.value.location_map_key]
resource_group_name = azurerm_resource_group.resource_group[each.value.resource_group_key].name
virtual_wan_id = module.virtualWan[each.value.vwan_key].virtual_wan_id
vwan_key = each.value.vwan_key
vwan_sites = each.value.links
device_vendor = each.value.device_vendor
device_model = each.value.device_model
vpn_gateways = values(module.virtualHubVpn)[*].virtual_hub_vpn_gateway_id
tags = merge(lookup(each.value, "tags", {}), local.tags)
../../modules/networking/virtual_wan_vpn_site
# Virtual Wan vpn site
resource "azurerm_vpn_site" "vwan_vpn_site" {
name = var.name
location = var.location
resource_group_name = var.resource_group_name
virtual_wan_id = var.virtual_wan_id
device_vendor = var.device_vendor
device_model = var.device_model
tags = local.tags
dynamic "link" {
for_each = try(var.vwan_sites, {})
content {
name = link.value.name
ip_address = link.value.public_ip_address
bgp {
asn = link.value.bgp_asn
peering_address = link.value.bgp_peering_ip
}
}
}
}
# vhub vpn gateway connection
resource "azurerm_vpn_gateway_connection" "vhub_vpn_gateway_connection" {
for_each = toset(var.vpn_gateways)
name = "example"
vpn_gateway_id = each.key
remote_vpn_site_id = azurerm_vpn_site.vwan_vpn_site.id
*dynamic "vpn_link" {
for_each = try(azurerm_vpn_site.vwan_vpn_site.link, [])
content {
name = vpn_link.value.name
vpn_site_link_id = vpn_link.value.id
bgp_enabled = true
}
}*
}
The vwan site and links get created successfully, however, the link connections error out with this:
Error: Missing required argument
│
│ with module.virtualWanVpnSites["vwan_site_dc"].azurerm_vpn_gateway_connection.vhub_vpn_gateway_connection["/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Network/vpnGateways/vpn-shr-infra-usce"],
│ on ../../modules/networking/virtual_wan_vpn_site/module.tf line 25, in resource "azurerm_vpn_gateway_connection" "vhub_vpn_gateway_connection":
│ 25: resource "azurerm_vpn_gateway_connection" "vhub_vpn_gateway_connection" {
│
│ The argument "vpn_link.1.vpn_site_link_id" is required, but no definition
│ was found.
╵
╷
│ Error: Missing required argument
│
│ with module.virtualWanVpnSites["vwan_site_dc"].azurerm_vpn_gateway_connection.vhub_vpn_gateway_connection["/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Network/vpnGateways/vpn-shr-infra-use2"],
│ on ../../modules/networking/virtual_wan_vpn_site/module.tf line 25, in resource "azurerm_vpn_gateway_connection" "vhub_vpn_gateway_connection":
│ 25: resource "azurerm_vpn_gateway_connection" "vhub_vpn_gateway_connection" {
│
│ The argument "vpn_link.1.vpn_site_link_id" is required, but no definition
│ was found.
It appears that the resource azurerm_vpn_gateway_connection wants the vpn_site_link_id like this vpn_site_link_id = azurerm_vpn_site.example.link[0].id.
https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/vpn_gateway_connection
I can see the links in the state file like this:
"module": "module.virtualWanVpnSites[\"vwan_site_dc\"]",
"mode": "managed",
"type": "azurerm_vpn_site",
"name": "vwan_vpn_site",
"provider": "provider[\"registry.terraform.io/hashicorp/azurerm\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"address_cidrs": [],
"device_model": "FGT60F",
"device_vendor": "Fortigate",
"id": "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Network/vpnSites/site-shr-infra-dc",
"link": [
{
"bgp": [
{
"asn": 64512,
"peering_address": "10.10.100.1"
}
],
"fqdn": "",
"id": "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Network/vpnSites/site-shr-infra-dc/vpnSiteLinks/link-shr-infra-dc-1",
"ip_address": "1.1.1.1",
"name": "link-shr-infra-dc-1",
"provider_name": "",
"speed_in_mbps": 0
},
{
"bgp": [
{
"asn": 64513,
"peering_address": "10.10.100.100"
}
],
"fqdn": "",
"id": "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Network/vpnSites/site-shr-infra-dc/vpnSiteLinks/link-shr-infra-dc-2",
"ip_address": "2.2.2.2",
"name": "link-shr-infra-dc-2",
"provider_name": "",
"speed_in_mbps": 0
}
],
Any ideas would be greatly appreciated. Thanks.
Please check this terraform-azurerm-virtual-wan · GitHub
Initially , check if the version is the issue and see with v2.78.0 version of the Terraform Provider.
Try also setting address_cidrs to create an address space
I have used address_cidrs = ["10.0.0.0/16"] for azurerm_vpn_site
And address_prefix = "10.0.0.0/24" in azurerm_virtual_hub.
vpn_sites = [
{
name = "site1"
links = [
{
name = "site1-primary-endpoint"
ip_address = "20.20.20.20"
bgp = [
{
asn = 65530
peering_address = "169.254.21.2"
}
]
},
{
name = "site1-secondary-endpoint"
ip_address = "21.21.21.21"
bgp = [
{
asn = 65530
peering_address = "169.254.22.2"
}
]
}
]
}
]
As you said vpn_link is expected in the format vpn_site_link_id = vpn_link.value[0].id
Please also check the Reference : Terraform for-each with list of objects - Stack Overflow
Is there a way to convert a tar target (created using rules_pkg) and turn it into a zip target in Bazel?
pkg_tar(
name = "bundle-tar",
srcs = [
":aws-lambda",
],
include_runfiles = True,
package_dir = "/",
)
pkg_zip(
name = "bundle-zip",
srcs = [
":bundle-tar", # ???
],
)
I imagine this could be accomplished using #bazel_tools//tools/zip:zipper?
This kind of works, but it is not deterministic:
genrule(
name = "bundle-zip",
srcs = [ ':bundle-tar' ],
outs = [ "bundle.zip" ],
cmd = " && ".join([
"tar xf $(location :bundle-tar)",
"zip $# $$(tar tf $(location :bundle-tar))",
]),
)
load("#bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
http_archive(
name = "rules_pkg",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.7.0/rules_pkg-0.7.0.tar.gz",
"https://github.com/bazelbuild/rules_pkg/releases/download/0.7.0/rules_pkg-0.7.0.tar.gz",
],
sha256 = "8a298e832762eda1830597d64fe7db58178aa84cd5926d76d5b744d6558941c2",
)
load("#rules_pkg//:deps.bzl", "rules_pkg_dependencies")
rules_pkg_dependencies()
Please refer to this issue for a detailed explanation on how to convert tar to zip files. Thank you.
I'm trying to understand Bazel and do cross-compilation for a simple c++ file
Here is my example:
├── aarch64_compiler
│ ├── BUILD
│ ├── cc_toolchain_config.bzl
│ └── CROSSTOOL
├── aarch64_compiler.BUILD
├── helloworld
│ ├── BUILD.bazel
│ └── src
│ └── helloworld.cpp
└── WORKSPACE
and here is each file,
WORKSPACE
new_local_repository(
name = "aarch64_compiler",
path = "/",
build_file = "aarch64_compiler.BUILD",
)
aarch64_compiler.BUILD
package(default_visibility = ['//visibility:public'])
filegroup(
name = 'gcc',
srcs = [
'usr/bin/aarch64-linux-gnu-gcc',
],
)
filegroup(
name = 'ar',
srcs = [
'usr/bin/aarch64-linux-gnu-ar',
],
)
filegroup(
name = 'ld',
srcs = [
'usr/bin/aarch64-linux-gnu-ld',
],
)
filegroup(
name = 'nm',
srcs = [
'usr/bin/aarch64-linux-gnu-nm',
],
)
filegroup(
name = 'objcopy',
srcs = [
'usr/bin/aarch64-linux-gnu-objcopy',
],
)
filegroup(
name = 'objdump',
srcs = [
'usr/bin/aarch64-linux-gnu-objdump',
],
)
filegroup(
name = 'strip',
srcs = [
'usr/bin/aarch64-linux-gnu-strip',
],
)
filegroup(
name = 'as',
srcs = [
'usr/bin/aarch64-linux-gnu-as',
],
)
filegroup(
name = 'compiler_pieces',
srcs = glob([
'usr/lib/gcc-cross/aarch64-linux-gnu/7/**',
'usr/aarch64-linux-gnu/**',
]),
)
filegroup(
name = 'compiler_components',
srcs = [
':gcc',
':ar',
':ld',
':nm',
':objcopy',
':objdump',
':strip',
':as',
],
)
aarch64_compiler/BUILD
package(default_visibility = ["//visibility:public"])
load(":cc_toolchain_config.bzl", "cc_toolchain_config")
cc_toolchain_suite(
name = 'toolchain',
toolchains = {
'aarch64|compiler':':gcc-linux-aarch64',
"aarch64": ":gcc-linux-aarch64",
},
)
filegroup(
name = "empty",
srcs = [],
)
filegroup(
name = "arm_linux_all_files",
srcs = [
"#aarch64_compiler//:compiler_pieces",
],
)
cc_toolchain_config(name = "aarch64_toolchain_config")
cc_toolchain(
name = 'gcc-linux-aarch64',
toolchain_identifier = "aarch64-linux-gnu",
toolchain_config = ":aarch64_toolchain_config",
all_files = ':arm_linux_all_files',
compiler_files = ':arm_linux_all_files',
#cpu = 'aarch64',
dwp_files = ':empty',
#dynamic_runtime_libs = [':empty'],
linker_files = ':arm_linux_all_files',
objcopy_files = ':arm_linux_all_files',
#static_runtime_libs = [':empty'],
strip_files = ':arm_linux_all_files',
supports_param_files = 1,
)
aarch64_compiler/cc_toolchain_config.bzl
# toolchain/cc_toolchain_config.bzl:
load("#bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl", "tool_path")
def _impl(ctx):
tool_paths = [
tool_path(
name = "gcc",
path = "/usr/bin/aarch64-linux-gnu-gcc",
),
tool_path(
name = "ld",
path = "/usr/bin/aarch64-linux-gnu-ld",
),
tool_path(
name = "ar",
path = "/usr/bin/aarch64-linux-gnu-ar",
),
tool_path(
name = "cpp",
path = "/usr/bin/aarch64-linux-gnu-cpp",
),
tool_path(
name = "gcov",
path = "/usr/bin/aarch64-linux-gnu-gcov",
),
tool_path(
name = "nm",
path = "/usr/bin/aarch64-linux-gnu-nm",
),
tool_path(
name = "objdump",
path = "/usr/bin/aarch64-linux-gnu-objdump",
),
tool_path(
name = "strip",
path = "/usr/bin/aarch64-linux-gnu-strip",
),
]
return cc_common.create_cc_toolchain_config_info(
ctx = ctx,
toolchain_identifier = "aarch64-linux-gnu",
host_system_name = "x86_64-unknown-linux-gnu",
target_system_name = "aarch64-unknown-linux-gnu",
target_cpu = "aarch64",
target_libc = "unknown",
compiler = "aarch64",
abi_version = "unknown",
abi_libc_version = "unknown",
tool_paths = tool_paths,
)
cc_toolchain_config = rule(
implementation = _impl,
attrs = {},
provides = [CcToolchainConfigInfo],
)
aarch64_compiler/CROSSTOOL
major_version: "local"
minor_version: ""
default_target_cpu: "aarch64"
default_toolchain {
cpu: "aarch64"
toolchain_identifier: "aarch64-linux-gnu"
}
toolchain {
abi_version: "aarch64"
abi_libc_version: "aarch64"
builtin_sysroot: ""
compiler: "compiler"
host_system_name: "aarch64"
needsPic: true
supports_gold_linker: true
supports_incremental_linker: false
supports_fission: false
supports_interface_shared_objects: false
supports_normalizing_ar: false
supports_start_end_lib: true
target_libc: "aarch64"
target_cpu: "aarch64"
target_system_name: "aarch64"
toolchain_identifier: "aarch64-linux-gnu"
cxx_flag: "-std=c++11"
linker_flag: "-lstdc++"
linker_flag: "-lm"
linker_flag: "-fuse-ld=gold"
linker_flag: "-Wl,-no-as-needed"
linker_flag: "-Wl,-z,relro,-z,now"
linker_flag: "-pass-exit-codes"
cxx_builtin_include_directory: "/usr/aarch64-linux-gnu/include/c++/7/"
cxx_builtin_include_directory: "/usr/aarch64-linux-gnu/include/c++/7/backward"
cxx_builtin_include_directory: "/usr/aarch64-linux-gnu/include/"
cxx_builtin_include_directory: "/usr/lib/gcc-cross/aarch64-linux-gnu/7/include"
cxx_builtin_include_directory: "/usr/lib/gcc-cross/aarch64-linux-gnu/7/include-fixed"
objcopy_embed_flag: "-I"
objcopy_embed_flag: "binary"
unfiltered_cxx_flag: "-fno-canonical-system-headers"
unfiltered_cxx_flag: "-Wno-builtin-macro-redefined"
unfiltered_cxx_flag: "-D__DATE__=\"redacted\""
unfiltered_cxx_flag: "-D__TIMESTAMP__=\"redacted\""
unfiltered_cxx_flag: "-D__TIME__=\"redacted\""
compiler_flag: "-U_FORTIFY_SOURCE"
compiler_flag: "-fstack-protector"
compiler_flag: "-Wall"
compiler_flag: "-Wunused-but-set-parameter"
compiler_flag: "-Wno-free-nonheap-object"
compiler_flag: "-fno-omit-frame-pointer"
tool_path { name: "ld" path: "/usr/bin/aarch64-linux-gnu-ld" }
tool_path { name: "cpp" path: "/usr/bin/aarch64-linux-gnu-cpp" }
tool_path { name: "dwp" path: "/usr/bin/aarch64-linux-gnu-dwp" }
tool_path { name: "gcov" path: "/usr/bin/aarch64-linux-gnu-gcov" }
tool_path { name: "nm" path: "/usr/bin/aarch64-linux-gnu-nm" }
tool_path { name: "objcopy" path: "/usr/bin/aarch64-linux-gnu-objcopy" }
tool_path { name: "objdump" path: "/usr/bin/aarch64-linux-gnu-objdump" }
tool_path { name: "strip" path: "/usr/bin/aarch64-linux-gnu-strip" }
tool_path { name: "gcc" path: "/usr/bin/aarch64-linux-gnu-gcc" }
tool_path { name: "ar" path: "/usr/bin/aarch64-linux-gnu-ar" }
compilation_mode_flags {
mode: DBG
# Enable debug symbols.
compiler_flag: "-g"
}
compilation_mode_flags {
mode: OPT
# No debug symbols.
# Maybe we should enable https://gcc.gnu.org/wiki/DebugFission for opt or
# even generally? However, that can't happen here, as it requires special
# handling in Bazel.
compiler_flag: "-g0"
# Conservative choice for -O
# -O3 can increase binary size and even slow down the resulting binaries.
# Profile first and / or use FDO if you need better performance than this.
compiler_flag: "-O2"
compiler_flag: "-D_FORTIFY_SOURCE=1"
# Disable assertions
compiler_flag: "-DNDEBUG"
# Removal of unused code and data at link time (can this increase binary size in some cases?).
compiler_flag: "-ffunction-sections"
compiler_flag: "-fdata-sections"
linker_flag: "-Wl,--gc-sections"
}
linking_mode_flags { mode: DYNAMIC }
feature {
name: 'coverage'
provides: 'profile'
flag_set {
action: 'preprocess-assemble'
action: 'c-compile'
action: 'c++-compile'
action: 'c++-header-parsing'
action: 'c++-header-preprocessing'
action: 'c++-module-compile'
flag_group {
flag: '-fprofile-arcs'
flag: '-ftest-coverage'
}
}
flag_set {
action: 'c++-link-interface-dynamic-library'
action: 'c++-link-dynamic-library'
action: 'c++-link-executable'
flag_group {
flag: '-lgcov'
}
}
}
}
helloworld/BUILD.bazel
cc_binary(
name = "helloworld",
srcs = [ "src/helloworld.cpp" ],
)
helloworld/src/helloworld.cpp
#include <iostream>
int main() {
std::cout << "Hello World from BAZEL" << std::endl;
}
The bazel command used: bazel build //helloworld:helloworld --cpu=aarch64 --crosstool_top=//aarch64_compiler:toolchain --host_crosstool_top=#bazel_tools//tools/cpp:toolchain --verbose_failures
and I got this error:
ERROR: /data/cross-compile/bazel_sample/helloworld/BUILD.bazel:1:1: undeclared inclusion(s) in rule '//helloworld:helloworld':
this rule is missing dependency declarations for the following files included by 'helloworld/src/helloworld.cpp':
'/usr/aarch64-linux-gnu/include/stdc-predef.h'
'/usr/aarch64-linux-gnu/include/c++/7/iostream'
'/usr/aarch64-linux-gnu/include/c++/7/aarch64-linux-gnu/bits/c++config.h'
'/usr/aarch64-linux-gnu/include/c++/7/aarch64-linux-gnu/bits/os_defines.h'
'/usr/aarch64-linux-gnu/include/features.h'
'/usr/aarch64-linux-gnu/include/sys/cdefs.h'
'/usr/aarch64-linux-gnu/include/bits/wordsize.h'
'/usr/aarch64-linux-gnu/include/bits/long-double.h'
'/usr/aarch64-linux-gnu/include/gnu/stubs.h'
.....
what is the missing in my configurations?
There are 2 things that might help:
You need to include these search path in the compilation command with compiler_flag.
You might also need to add -no-canonical-prefixes in compiler_flag, otherwise bazel might not be able to match understand that the path used is the same as the one declared.
Say I have a custom rule, my_object. It looks like:
my_object(
name = "foo",
deps = [
//services/image-A:push,
//services/image-B:push,
]
)
Where the labels in deps are rules_docker's container_push rules.
I want to be able to bazel run //:foo and have it push the Docker images within the deps list. How do I do this?
This seems to be a specific case of just generally wanting to run the executables of other rules within the executable of a custom rule.
The thing to do here is to have my_object output an executable that executes the other executables.
Consider this example:
def _impl1(ctx):
ctx.actions.write(
output = ctx.outputs.executable,
is_executable = True,
content = "echo %s 123" % ctx.label.name)
return DefaultInfo(executable = ctx.outputs.executable)
exec_rule1 = rule(
implementation = _impl1,
executable = True,
)
def _impl2(ctx):
executable_paths = []
runfiles = ctx.runfiles()
for dep in ctx.attr.deps:
# the "./" is needed if the executable is in the current directory
# (i.e. in the workspace root)
executable_paths.append("./" + dep.files_to_run.executable.short_path)
# collect the runfiles of the other executables so their own runfiles
# will be available when the top-level executable runs
runfiles = runfiles.merge(dep.default_runfiles)
ctx.actions.write(
output = ctx.outputs.executable,
is_executable = True,
content = "\n".join(executable_paths))
return DefaultInfo(
executable = ctx.outputs.executable,
runfiles = runfiles)
exec_rule2 = rule(
implementation = _impl2,
executable = True,
attrs = {
"deps": attr.label_list(),
},
)
BUILD.bazel:
load(":defs.bzl", "exec_rule1", "exec_rule2")
exec_rule1(name = "foo")
exec_rule1(name = "bar")
exec_rule2(name = "baz", deps = [":foo", ":bar"])
and then running it:
$ bazel run //:baz
INFO: Analyzed target //:baz (4 packages loaded, 19 targets configured).
INFO: Found 1 target...
Target //:baz up-to-date:
bazel-bin/baz
INFO: Elapsed time: 0.211s, Critical Path: 0.01s
INFO: 0 processes.
INFO: Build completed successfully, 6 total actions
INFO: Build completed successfully, 6 total actions
foo 123
bar 123
I managed to achieve this by implementing DefaultInfo in rule.
def build_all_impl(ctx):
targets = ctx.attr.targets
run_files = []
for target in targets:
run_files = run_files + target.files.to_list()
DefaultInfo(
runfiles = ctx.runfiles(run_files),
)
build_all = rule(
implementation = build_all_impl,
attrs = {
"targets": attr.label_list(
doc = "target to build",
),
},
)
And then by running build_all rule
build_all(
name = "all",
targets = [
":target-1",
":target-2",
...
],
)
I have many BUILD files that require Jetty or other common Java libraries (SLF4J, Lucene, Guava, etc.). Each of these has a set of other JARs that it references. I would like to know the best practice for organizing these declarations and their dependencies in a large project.
For example, using generate_workspace via bazel run //src/tools/generate_workspace -- --artifact=org.eclipse.jetty:jetty-server:9.3.8.v20160314 I get the following BUILD
# The following dependencies were calculated from:
# org.eclipse.jetty:jetty-server:9.3.8.v20160314
java_library(
name = "org_eclipse_jetty_jetty_http",
visibility = ["//visibility:public"],
exports = [
"#org_eclipse_jetty_jetty_http//jar",
"#org_eclipse_jetty_jetty_util//jar",
],
)
java_library(
name = "org_eclipse_jetty_jetty_util",
visibility = ["//visibility:public"],
exports = [
"#org_eclipse_jetty_jetty_util//jar",
],
)
java_library(
name = "javax_servlet_javax_servlet_api",
visibility = ["//visibility:public"],
exports = [
"#javax_servlet_javax_servlet_api//jar",
],
)
java_library(
name = "org_eclipse_jetty_jetty_server",
visibility = ["//visibility:public"],
exports = [
"#org_eclipse_jetty_jetty_server//jar",
"#javax_servlet_javax_servlet_api//jar",
"#org_eclipse_jetty_jetty_http//jar",
"#org_eclipse_jetty_jetty_io//jar",
"#org_eclipse_jetty_jetty_util//jar",
],
)
java_library(
name = "org_eclipse_jetty_jetty_io",
visibility = ["//visibility:public"],
exports = [
"#org_eclipse_jetty_jetty_io//jar",
"#org_eclipse_jetty_jetty_util//jar",
],
)
and WORKSPACE
# The following dependencies were calculated from:
# org.eclipse.jetty:jetty-server:9.3.8.v20160314
# org.eclipse.jetty:jetty-server:jar:9.3.8.v20160314
maven_jar(
name = "org_eclipse_jetty_jetty_http",
artifact = "org.eclipse.jetty:jetty-http:9.3.8.v20160314",
)
# org.eclipse.jetty:jetty-http:jar:9.3.8.v20160314
# org.eclipse.jetty:jetty-io:jar:9.3.8.v20160314
maven_jar(
name = "org_eclipse_jetty_jetty_util",
artifact = "org.eclipse.jetty:jetty-util:9.3.8.v20160314",
)
# org.eclipse.jetty:jetty-server:jar:9.3.8.v20160314
maven_jar(
name = "javax_servlet_javax_servlet_api",
artifact = "javax.servlet:javax.servlet-api:3.1.0",
)
maven_jar(
name = "org_eclipse_jetty_jetty_server",
artifact = "org.eclipse.jetty:jetty-server:9.3.8.v20160314",
)
# org.eclipse.jetty:jetty-server:jar:9.3.8.v20160314
maven_jar(
name = "org_eclipse_jetty_jetty_io",
artifact = "org.eclipse.jetty:jetty-io:9.3.8.v20160314",
)
files.
I have a dependency on jetty-server and jetty-util in many projects. Is there a better practice than repeating this information in each BUILD file?
Generally you'd put the generate_workspace-generated BUILD file in the root of your workspace (next to your WORKSPACE file) and then, in other BUILD files, you'd reference whatever target they needed to depend on. For example, in src/main/java/com/your-project/subcomponent/BUILD, you might say:
java_library(
name = "my-servlet",
srcs = glob(["*.java"]),
deps = [
"//:javax_servlet_javax_servlet_api",
# other deps...
],
)