Syntax of premake scripts - lua

Can somebody explain to me what the syntax of a premake script means? A premake script is a valid lua script. Then what are solution, configurations, project in the below code? Variables? keywords?
-- A solution contains projects, and defines the available configurations
solution "MyApplication"
configurations { "Debug", "Release" }
-- A project defines one build target
project "MyApplication"
kind "ConsoleApp"
language "C++"
files { "**.h", "**.cpp" }
configuration "Debug"
defines { "DEBUG" }
flags { "Symbols" }
configuration "Release"
defines { "NDEBUG" }
flags { "Optimize" }
Edit: They are function calls. So Then how is this part
configuration "Debug"
defines { "DEBUG" }
flags { "Symbols" }
configuration "Release"
defines { "NDEBUG" }
flags { "Optimize" }
executed? the defines and flags calls are to be called according to the context of configuartion?

Functions
If function takes only one argument that is a table or a string the parentheses can be omitted. Refer to 3.4.10 - Function Calls.
Additionally, in your example indention is arbitrary. You could write:
project("MyApplication")
kind("ConsoleApp")
language("C++")
files({"**.h", "**.cpp"})
And it would be as good as the original.
Regarding the second matter. Most likely configuration and related defines and flags operate on some hidden local state. When you call configuration it changes this local state to refer to e.g. "Debug" configuration and so all following calls also refer to this local state. As in:
do
local state
function set_state (name)
state = name
end
function print_with_suffix (suffix)
print(state, suffix)
end
end
set_state("hello")
print_with_suffix("world") --> hello world

Related

Infinite recursion when referring to pkgs.system from Nix module options section

The following is a minimal reproducer for an infinite recursion error when building a nixos configuration:
(import <nixpkgs/nixos>) {
configuration = { pkgs, ... }: {
options = builtins.trace "Building a system with system ${pkgs.system}" {};
};
system = "x86_64-linux";
}
When evaluated it fails as follows, unless the reference to pkgs.system is removed:
$ nix-build
error: infinite recursion encountered
at /Users/charles/.nix-defexpr/channels/nixpkgs/lib/modules.nix:496:28:
495| builtins.addErrorContext (context name)
496| (args.${name} or config._module.args.${name})
| ^
497| ) (lib.functionArgs f);
If we look at the implementation of nixos/lib/eval-config.nix:33, we see that the value passed for the system argument is set as an overridable default in pkgs. Does this mean we can't access it until later in the evaluation process?
(In the real-world use case, I'm introspecting a flake -- investigating someFlake.packages.${pkgs.system} to find packages for which to generate configuration options.)
This has been cross-posted to NixOS Discourse; see https://discourse.nixos.org/t/accessing-target-system-when-building-options-for-a-module/
In order for the module system to construct the configuration, it needs to know which config and options items exist, at least to the degree necessary to produce the root attribute set of configuration.
The loop is as follows:
Evaluate the attribute names in config
Evaluate the attribute names of the options
Evaluate pkgs (your code)
Evaluate config._module.args.pkgs (definition of module argument)
Evaluate the attribute names in config (loop)
It can be broken by removing or reducing the dependency on pkgs.
For instance, you could define your "dynamic" options as type = attrsOf foo instead of enumerating the each item from your flake as individual options.
Another potential solution is to move the option definitions into a submodule. A submodule without attrsOf as in attrsOf (submodule x) is generally quite useless, but it may create a necessary indirection that separates your dynamic pkgs-dependent options from the module fixpoint that has pkgs.
(import <nixpkgs/nixos>) {
configuration = { pkgs, lib, ... }: {
options.foo = lib.mkOption {
type = lib.types.submodule {
options = builtins.trace "Building a system with system ${pkgs.system}" { };
};
default = { };
};
};
system = "x86_64-linux";
}
nix-repl> config.foo
trace: Building a system with system x86_64-linux
{ }
As an alternate approach for cases where avoiding recursion isn't feasible, one can use specialArgs in invoking nixos/lib/eval-config.nix to pass a final value not capable of being overridden through the module system:
let
configuration = { pkgs, forcedSystem, ... }: {
options = builtins.trace "Building a system with system ${forcedSystem}" {};
};
in
(import <nixpkgs/nixos/lib/eval-config.nix>) {
modules = [ configuration ];
system = "x86_64-linux";
specialArgs = { forcedSystem = "x86_64-linux"; };
}

Reactive choice parameter to read from workspace file

I am trying to implement active reactive choice parameter .
In reactive parameter basically I am hard coding the different options based on the active parameter
Below is the sample code
if (Target_Environment.equals("Dev01")) {
return ["test_DEV"]
} else if (Target_Environment.equals("Dev02")) {
return ["test3_DEV02","test2_DEV02"]
} else if (Target_Environment.equals("Dev03")) {
return ["test3_DEV03"]
} else if (Target_Environment.equals("Sit03")) {
return ["test3_SIT03"]
}else if (Target_Environment.equals("PPTE")) {
return ["test3_PPTE"]
}
else {
return ["Please Select Target Environment"]
}
Instead hard coding the choices I want to read from a file in jenkins workspace and show the content as the choices , what would be an ideal way to go with that ?
The readFile is not working under return function
I am also trying with extended choice parameter but
there I am passing a property file with filename however how can I pass the property file with if else condition
I'm not 100% sure if this is accurate. But I would expect that you can't read from the job workspace in a parameter like that because workspaces are created after a build (see below error message).
So if you create the job with no previous builds, there will be no workspace file to read from?
Whenever I have seen that parameter type used in the past, they usually read from a file on the server instead of the workspace.
Error message from Jenkins jobs that have no previous builds:
Error: no workspace
A project won't have any workspace until at least one build is performed.
Run a build to have Jenkins create a workspace.

How do I load `config_setting()` into my `.bzl` file?

My motivation: our codebase is scattered across over at least 20 git repos. I want to consolidate everything into a single git repo with a single build system. Currently we use SBT, but we think the build would take too long, so I am examining the possibility of using Bazel instead.
Most of our codebase uses Scala 2.12, some of our codebase uses Scala 2.11, and the rest needs to build under both Scala 2.11 and Scala 2.12.
I'm trying to use bazelbuild/rules_scala.
With the following call to scala_repositories in my WORKSPACE, I can build using Scala 2.12:
scala_repositories(("2.12.6", {
"scala_compiler": "3023b07cc02f2b0217b2c04f8e636b396130b3a8544a8dfad498a19c3e57a863",
"scala_library": "f81d7144f0ce1b8123335b72ba39003c4be2870767aca15dd0888ba3dab65e98",
"scala_reflect": "ffa70d522fc9f9deec14358aa674e6dd75c9dfa39d4668ef15bb52f002ce99fa"
}))
If I have the following call instead, I can build using Scala 2.11:
scala_repositories(("2.11.12", {
"scala_compiler": "3e892546b72ab547cb77de4d840bcfd05c853e73390fed7370a8f19acb0735a0",
"scala_library": "0b3d6fd42958ee98715ba2ec5fe221f4ca1e694d7c981b0ae0cd68e97baf6dce",
"scala_reflect": "6ba385b450a6311a15c918cf8688b9af9327c6104f0ecbd35933cfcd3095fe04"
}))
However, it is not possible to specify in my BUILD files on a package level which version(s) of Scala to build with. I must specify this globally in my WORKSPACE.
To workaround this, my plan is to set up configurable attributes, so I can specify --define scala=2.11 to build with Scala 2.11, and specify --define scala=2.12 to build with Scala 2.12.
First I tried by putting this code in my WORKSPACE:
config_setting(
name = "scala-2.11",
define_values = {
"scala": "2.11"
}
)
config_setting(
name = "scala-2.12",
define_values = {
"scala": "2.12"
}
)
scala_repositories(
select(
{
"scala-2.11": "2.11.12",
"scala-2.12": "2.12.6"
}
),
select(
{
"scala-2.11": {
"scala_compiler": "3e892546b72ab547cb77de4d840bcfd05c853e73390fed7370a8f19acb0735a0",
"scala_library": "0b3d6fd42958ee98715ba2ec5fe221f4ca1e694d7c981b0ae0cd68e97baf6dce",
"scala_reflect": "6ba385b450a6311a15c918cf8688b9af9327c6104f0ecbd35933cfcd3095fe04",
},
"scala-2.12": {
"scala_compiler": "3023b07cc02f2b0217b2c04f8e636b396130b3a8544a8dfad498a19c3e57a863",
"scala_library": "f81d7144f0ce1b8123335b72ba39003c4be2870767aca15dd0888ba3dab65e98",
"scala_reflect": "ffa70d522fc9f9deec14358aa674e6dd75c9dfa39d4668ef15bb52f002ce99fa"
}
}
)
)
But this gave me the error config_setting cannot be in the WORKSPACE file.
So then I tried moving code into a Starlark file.
In tools/build_rules/scala.bzl:
config_setting(
name = "scala-2.11",
define_values = {
"scala": "2.11"
}
)
config_setting(
name = "scala-2.12",
define_values = {
"scala": "2.12"
}
)
def scala_version():
return select(
{
"scala-2.11": "2.11.12",
"scala-2.12": "2.12.6"
}
)
def scala_machinery():
return select(
{
"scala-2.11": {
"scala_compiler": "3e892546b72ab547cb77de4d840bcfd05c853e73390fed7370a8f19acb0735a0",
"scala_library": "0b3d6fd42958ee98715ba2ec5fe221f4ca1e694d7c981b0ae0cd68e97baf6dce",
"scala_reflect": "6ba385b450a6311a15c918cf8688b9af9327c6104f0ecbd35933cfcd3095fe04",
},
"scala-2.12": {
"scala_compiler": "3023b07cc02f2b0217b2c04f8e636b396130b3a8544a8dfad498a19c3e57a863",
"scala_library": "f81d7144f0ce1b8123335b72ba39003c4be2870767aca15dd0888ba3dab65e98",
"scala_reflect": "ffa70d522fc9f9deec14358aa674e6dd75c9dfa39d4668ef15bb52f002ce99fa"
}
}
)
And back in my WORKSPACE:
load("//tools/build_rules:scala.bzl", "scala_version", "scala_machinery")
scala_repositories(scala_version(), scala_machinery())
But now I get this error:
tools/build_rules/scala.bzl:1:1: name 'config_setting' is not defined
This confuses me, because I thought config_setting() was built in. I can't find where I should load it in from.
So, my questions:
How do I load config_setting() into my .bzl file?
Or, is there a better way of controlling from the command line which arguments get passed to scala_repositories()?
Or, is this just not possible?
$ bazel version
Build label: 0.17.2-homebrew
Build target: bazel-out/darwin-opt/bin/src/main/java/com/google/devtools/build/lib/bazel/BazelServer_deploy.jar
Build time: Fri Sep 28 10:42:37 2018 (1538131357)
Build timestamp: 1538131357
Build timestamp as int: 1538131357
If you call native code from a bzl file, you must use the native. prefix, so in this case you would call native.config_setting.
However, this is going to lead to the same error: config_setting is a BUILD rule, not a WORKSPACE rule.
If you want to change the build tool used for a particular target, you can change the toolchain, and this seems to be supported via the scala_toolchain
And I believe you can use a config to select the toolchain.
I'm unfamiliar with what scala_repositories does. I hope it defines the toolchain with a proper versioned name, so that you can reference the wanted toolchain correctly. And I hope you can invoke it twice in the same workspace, otherwise I think there is no solution.

Copying default external configuration on first run of Grails web app

In our Grails web applications, we'd like to use external configuration files so that we can change the configuration without releasing a new version. We'd also like these files to be outside of the application directory so that they stay unchanged during continuous integration.
The last thing we need to do is to make sure the external configuration files exist. If they don't, then we'd like to create them, fill them with predefined content (production environment defaults) and then use them as if they existed before. This allows any administrator to change settings of the application without detailed knowledge of the options actually available.
For this purpose, there's a couple of files within web-app/WEB-INF/conf ready to be copied to the external configuration location upon the first run of the application.
So far so good. But we need to do this before the application is initialized so that production-related modifications to data sources definitions are taken into account.
I can do the copy-and-load operation inside the Config.groovy file, but I don't know the absolute location of the WEB-INF/conf directory at the moment.
How can I get the location during this early phase of initialization? Is there any other solution to the problem?
There is a best practice for this.
In general, never write to the folder where the application is deployed. You have no control over it. The next rollout will remove everything you wrote there.
Instead, leverage the builtin configuration capabilities the real pro's use (Spring and/or JPA).
JNDI is the norm for looking up resources like databases, files and URL's.
Operations will have to configure JNDI, but they appreciate the attention.
They also need an initial set of configuration files, and be prepared to make changes at times as required by the development team.
As always, all configuration files should be in your source code repo.
I finally managed to solve this myself by using the Java's ability to locate resources placed on the classpath.
I took the .groovy files later to be copied outside, placed them into the grails-app/conf directory (which is on the classpath) and appended a suffix to their name so that they wouldn't get compiled upon packaging the application. So now I have *Config.groovy files containing configuration defaults (for all environments) and *Config.groovy.production files containing defaults for production environment (overriding the precompiled defaults).
Now - Config.groovy starts like this:
grails.config.defaults.locations = [ EmailConfig, AccessConfig, LogConfig, SecurityConfig ]
environments {
production {
grails.config.locations = ConfigUtils.getExternalConfigFiles(
'.production',
"${userHome}${File.separator}.config${File.separator}${appName}",
'AccessConfig.groovy',
'Config.groovy',
'DataSource.groovy',
'EmailConfig.groovy',
'LogConfig.groovy',
'SecurityConfig.groovy'
)
}
}
Then the ConfigUtils class:
public class ConfigUtils {
// Log4j may not be initialized yet
private static final Logger LOG = Logger.getGlobal()
public static def getExternalConfigFiles(final String defaultSuffix, final String externalConfigFilesLocation, final String... externalConfigFiles) {
final def externalConfigFilesDir = new File(externalConfigFilesLocation)
LOG.info "Loading configuration from ${externalConfigFilesDir}"
if (!externalConfigFilesDir.exists()) {
LOG.warning "${externalConfigFilesDir} not found. Creating..."
try {
externalConfigFilesDir.mkdirs()
} catch (e) {
LOG.severe "Failed to create external configuration storage. Default configuration will be used."
e.printStackTrace()
return []
}
}
final def cl = ConfigUtils.class.getClassLoader()
def result = []
externalConfigFiles.each {
final def file = new File(externalConfigFilesDir, it)
if (file.exists()) {
result << file.toURI().toURL()
return
}
final def error = false
final def defaultFileURL = cl.getResource(it + defaultSuffix)
final def defaultFile
if (defaultFileURL) {
defaultFile = new File(defaultFileURL.toURI())
error = !defaultFile.exists();
} else {
error = true
}
if (error) {
LOG.severe "Neither of ${file} or ${defaultFile} exists. Skipping..."
return
}
LOG.warning "${file} does not exist. Copying ${defaultFile} -> ${file}..."
try {
FileUtils.copyFile(defaultFile, file)
} catch (e) {
LOG.severe "Couldn't copy ${defaultFile} -> ${file}. Skipping..."
e.printStackTrace()
return
}
result << file.toURI().toURL()
}
return result
}
}

How can I get a list of build targets *with dependencies* in Ant?

I found this question:
How can I get a list of build targets in Ant?
What I'd like to know: Is there a way to get a list of targets, together with their depends-on values? We have a large build.xml file and the way it's currently written the presence or absence of a description doesn't really tell me much as to whether a target is a main target or an "other" target.
Running ant 1.8.1, this is an initial bit of due diligence as I prepare to upgrade to Gradle so I need to figure out which targets are truly the "high level" targets and which ones are "supporting" targets.
Note I work in a locked-down environment so downloading third-party software or ant extensions is not an option.
Additional Note If this level of detail is not possible in ant, that is a valid answer as well
In Ant 1.8.2 and above, use the -d flag to print debug info:
ant -p -d <your main build file>
and you'll get details like this:
javadoc
depends on: resolve
javadoc.distribute
latest-ivy
package
depends on: -maybe-package-by-bom, -maybe-package-by-spec, -maybe-package-for-dc
The -d flag will also print the "other" targets (those without descriptions) that aren't printed by ant -p, along with their dependencies.
If you want a recursive tree listing, you can use this XQuery script with Saxon:
(:~
: XQuery to display the dependencies of an Ant target.
:
: There are two modes of operation:
: 1) Display all targets and immediate dependencies, specified by $project-file
: 2) Show a tree of a single targets dependencies, this happens when $target-name is set as well.
:
: External parameters:
: $project-file The initial Ant file to start parsing from (imports will be expanded)
: $target-name If specified we examine only a single target and produce a tree of all dependencies (recursively)
: $show-file Whether the file path of the dependency should be shown
:
: Example Usage: java -cp Saxon-HE-9.7.0-18.jar net.sf.saxon.Query -q:ant-show-deps.xqy \!indent=yes project-file=file:/Users/are/exist-git/build.xml target-name=installer show-file=true
:
: If you don't want to specify the $target-name you can pass ?target-name=\(\) to Saxon on the command line.
:
: #author Adam Retter
:)
xquery version "1.0";
declare variable $project-file external;
declare variable $target-name as xs:string? external;
declare variable $show-file as xs:boolean external;
declare function local:expand-import-targets($file as xs:string) as element(target)* {
local:expand-import-targets($file, ())
};
declare function local:expand-import-targets($file as xs:string, $visited as xs:string*) as element(target)* {
let $path := local:resolve($file, $visited[1])
return
if(not($visited = $path))then
let $imported-project := doc($path)/project
return
(
for $target in $imported-project/target
return
<target name="{$target/#name}" file="{$path}">
{
for $dependency in tokenize(replace($target/#depends, '\s+', ''), ',')
return
<dependency name="{$dependency}"/>
}
</target>
,
for $import in $imported-project/import
return
local:expand-import-targets($import/#file, ($path, $visited))
)
else()
};
declare function local:resolve($file as xs:string, $prev-file as xs:string?) {
if(not($prev-file))then
$file
else if(starts-with($file, "/") or starts-with($file, "file:/"))then
$file
else
resolve-uri($file, $prev-file)
};
declare function local:target-tree($target-name as xs:string, $targets as element(target)*) as element(target)? {
let $target := $targets[#name eq $target-name]
return
element target {
$target/#name,
if($show-file)then
$target/#file
else(),
for $dependency in $target/dependency
return
local:expand-dependency($dependency/#name, $targets)
}
};
declare function local:expand-dependency($dependency-name as xs:string, $targets as element(target)*) {
for $expanded in $targets[#name eq $dependency-name]
return
element dependency {
$expanded/#name,
if($show-file)then
$expanded/#file
else(),
for $sub-dependency in $expanded/dependency
return
local:expand-dependency($sub-dependency/#name, $targets)
}
};
let $targets := local:expand-import-targets($project-file)
return
if($target-name)then
local:target-tree($target-name, $targets)
else
<targets>
{
for $target in $targets
order by $target/#name
return $target
}
</targets>

Resources