Passing parameters from Jenkinsfile to a shared library - jenkins

I have several components(code projects with their own Bitbucket repositories) and each of them has a Jenkinsfile as follows:
properties([parameters([string(defaultValue: "", description: "List of components", name: 'componentsToUpdate'),
string(defaultValue: "refs%2Fheads%2Fproject%2Fintegration", description: "BuildInfo CommitID", name: 'commitId'),
string(defaultValue: "", description: "Tag to release, e.g. 1.1.0-integration", name: 'releaseTag'),
string(defaultValue: "", description: "Forked buildInfo repo. Be aware right commit ID!!!", name: 'fork')]),
[$class: 'BuildDiscarderProperty', strategy: [$class: 'LogRotator', artifactDaysToKeepStr: '', artifactNumToKeepStr: '', daysToKeepStr: '7', numToKeepStr: '5']],
disableConcurrentBuilds()])
#Library('jenkins-shared-stages')
import mergePipeline
import releasePipeline
import ripplePipeline
import componentPipeline
def branchName = env.BRANCH_NAME
def rewriteDependencies = ""
def returnValue = null
def forkedRepo = params.fork
def buildInfoCommitId = params.commitId
def tagToRelease = params.releaseTag
println "buildInfoCommitId: " + buildInfoCommitId
if(params.componentsToUpdate) {
rewriteDependencies = params.componentsToUpdate
}
if (branchName == "project/integration") {
mergePipeline {
}
} else if (branchName == 'master') {
releasePipeline {
releaseTag = tagToRelease
}
} else {
returnValue = componentPipeline {
componentsToUpdate = rewriteDependencies
commitId = buildInfoCommitId
runOnForkedRepo = forkedRepo
}
rewriteDependencies = rewriteDependencies.isEmpty() ? returnValue : rewriteDependencies + "," + returnValue
println "WHAT is rewriteDependencies? " + rewriteDependencies
println "The return value: " + returnValue
ripplePipeline {
commitId = buildInfoCommitId
componentName = returnValue
runOnForkedRepo = forkedRepo
componentsToUpdate = rewriteDependencies
}
}
Need to use a 'wrapper' pipeline, say, wrapperPipeline.groovy:
import mergePipeline
import releasePipeline
import ripplePipeline
import componentPipeline
import org.slf4j.Logger
import org.slf4j.LoggerFactory
def call(body) {
final Logger logger = LoggerFactory.getLogger(wrapperPipeline)
def config = [:]
body.resolveStrategy = Closure.DELEGATE_FIRST
body.delegate = config
body()
// Assuming we have multibranch pipeline job or defined branch name in the env
def branchName = env.BRANCH_NAME
// There is a bug in the Jenkins it will pass a string "null" as a gradle build parameter instead of NULL object if there is
// empty parameter has been passed!!!
def rewriteDependencies = ""
def returnValue = null
def forkedRepo = config.runOnForkedRepo
def buildInfoCommitId = config.commitId
def tagToRelease = config.releaseTag
def globalVars = new se.GlobalVars()
def notifyHandler = new se.NotifyHandler()
node(globalVars.getAgent('buildAgent')) {
def PIPELINE_NAME = "wrapperPipeline"
try {
logger.info("The buildInfoCommitId is {}", buildInfoCommitId)
logger.info("Branch name: {}", branchName)
println "buildInfoCommitId: "+buildInfoCommitId
println"Branch name: "+branchName
if (config.componentsToUpdate) {
rewriteDependencies = config.componentsToUpdate
}
// keep the same integration pipeline for the master branch for now
if (branchName == "project/integration") {
logger.info("Invoking mergePipeline")
println "Invoking mergePipeline"
mergePipeline {
}
} else if (branchName == 'master') {
logger.info("Invoking releasePipeline")
println "Invoking releasePipeline"
releasePipeline {
releaseTag = tagToRelease
}
} else {
logger.info("Invoking componentPipeline")
println "Invoking componentPipeline"
returnValue = componentPipeline {
componentsToUpdate = rewriteDependencies
commitId = buildInfoCommitId
runOnForkedRepo = forkedRepo
}
logger.info("Component pipeline has returned {}", returnValue)
println "Component pipeline has returned"+returnValue
// We need to provide new version of the component to the Ripple builds
rewriteDependencies = rewriteDependencies.isEmpty() ? returnValue : rewriteDependencies + "," + returnValue
logger.info("rewriteDependencies: {}", rewriteDependencies)
println "The return value: " + returnValue
ripplePipeline {
commitId = buildInfoCommitId
componentName = returnValue
runOnForkedRepo = forkedRepo
componentsToUpdate = rewriteDependencies
}
}
}
catch (err) {
def build_status = "Exception ${err.message} in build ${env.BUILD_ID}"
logger.error(build_status,err)
notifyHandler.NotifyFail(build_status, PIPELINE_NAME)
throw err
}
}
}
The modified Jenkinsfile:
properties([parameters([string(defaultValue: "", description: "List of components", name: 'componentsToUpdate'),
string(defaultValue: "refs%2Fheads%2Fproject%2Fintegration", description: "BuildInfo CommitID", name: 'commitId'),
string(defaultValue: "", description: "Tag to release, e.g. 1.1.0-integration", name: 'releaseTag'),
string(defaultValue: "", description: "Forked buildInfo repo. Be aware right commit ID!!!", name: 'fork')]),
[$class: 'BuildDiscarderProperty', strategy: [$class: 'LogRotator', artifactDaysToKeepStr: '', artifactNumToKeepStr: '', daysToKeepStr: '7', numToKeepStr: '5']],
disableConcurrentBuilds()])
#Library('jenkins-shared-stages#integration/CICD-959-wrapper-pipeline-for-the-jenkinsfile') _
import wrapperPipeline
wrapperPipeline{}
Now, I suspect that the params object(the properties from the Jenkinsfile) is not populated correctly. For example
def buildInfoCommitId = config.commitId
.
.
.
println "buildInfoCommitId: "+buildInfoCommitId
prints null.
How do I invoke the wrapperPipeline correctly?
Note: I am new to both Jenkins pipelines and Groovy :)

Because those are Jenkins Parameters, they are not in the config object.
You will access commitId as params.commitId
If you had something within the closure when you call wrapperPipeline(), then those would be in the config object. e.g.
wrapperPipeline({
param="value"
})
then config.param would result in "value"
However, as a word of advice, I recommend avoiding using a closure when calling libs stored under vars/ in the shared library. See http://groovy-lang.org/closures.html for what closures are. The crux of it is, they are fairly complicated and can introduce some issues if you end up trying to pass in dynamic variables due to when the closure is instantiated. (They have their place but for simple things, I think avoiding is better)
I'd recommend instead, implementing a helper function that will allow you use maps OR closures for calling shared libs.
add a shared library called buildConfig under your src path:
package net.my.jenkins.workflow
import com.cloudbees.groovy.cps.NonCPS
class BuildConfig implements Serializable {
static Map resolve(def body = [:]) {
Map config = [:]
config = body
if (body in Map) {
config = body
} else if (body in Closure) {
body.resolveStrategy = Closure.DELEGATE_FIRST
body.delegate = config
body()
} else {
throw new Exception(sprintf("Unsupported build config type:%s", [config.getClass()]))
}
return config
}
}
And then in your shared lib under vars/ start with
import net.my.jenkins.workflow.BuildConfig
def call(def body = [:]) {
// evaluate the body block, and collect configuration into the object
config = BuildConfig.resolve(body)
This then allows you to use Maps which removes the complexity, so you could for instance (not that you would since you would just use params.commitId) re-assign it.
wrapperPipeline ([
"commitId": params.commitId,
])
Which means again config.commitId now has the value of params.commitId
Let me know if you need more detail.
TL;DR - You should be using params object, because you have parameters defined.
If you did start passing in arguments via the shared lib call, I would use a map over a closure. (requires some minimal implementation)

Related

loop array of items in declarative pipeline using groovy script

I am new to Jenkins pipeline can you please provide below out by using groovy declarative pipeline and -D should be appended for every key.
Input request should be string '''name= ram id = 123 role = IT''' and output needed as -Dname=ram -Did=123 -Drole=IT
pipeline{
agent any
parameters {
text defaultValue: '''name = ram id = 123 role = IT]''', description: 'employee details', name : 'details'
}
environment {
emp_details = "${env.details}"
}
stage('build') {
steps{
script{
echo "dislay details, ${emp_details }"
for(e in emp_details){
print e + ":" + emp_details[e])
}
}
}
}
}
Final output: -Dname=ram -Did=123 -Drole=IT
error: No Such property: [ for class: java.lang.String
if passing input is not right, how can we pass it?
You are getting a String not an array, so first, you need to create an iterable list and modify each key by iterating.
pipeline {
agent any
parameters {
text defaultValue: '''[name = ram, id = 123, role = IT]''', description: 'employee details', name : 'details'
}
environment {
emp_details = "${env.details}"
}
stages {
stage('Hello') {
steps{
script{
echo "dislay details, ${params.details}"
def appendedString = ""
// SInce it's a string we need get rid of [ and ] characters and then the spaces.
def lst = params.details.replaceAll("[^0-9a-zA-Z=,]+","").split(',')
println lst
for(e in lst){
print(e)
appendedString += "-D" + e + " ";
}
println "Final String: " + appendedString;
}
}
}
}
}
Update
Update the split logic to support the new string. '''name = ram id = 123 role = IT'''
def lst = params.details.replaceAll("\\s=\\s", "=").split("\\s")

Not able to solve Jenkins NotSerializableException error

import hudson.model.*;
import hudson.AbortException;
import jenkins.model.Jenkins;
#NonCPS
def allNodes = jenkins.model.Jenkins.instance.nodes
def triggerJob = false
for (String node: allNodes) {
if ( node.getComputer().isOnline() && node.nodeName == "ABC" ) {
println node.nodeName + " " + node.getComputer().countBusy() + " " +
node.getComputer().isAcceptingTasks()
triggerJob = true
break
}
}
if (triggerJob) {
println("triggering build as node is available")
build job: 'jobName', parameters:
[
/** list of string Parameters **/
]
}
Above job triggers the build, but fails with:
an exception which occurred:
in field hudson.model.Slave.launcher
Caused: java.io.NotSerializableException: hudson.slaves.JNLPLauncher
You may only use classes which implement the Serializable interface in your code - which neither Node nor Computer does.
In order to use them you need to encapsulate the respective code in a #NonCPS method, e.g.:
import hudson.model.*;
import hudson.AbortException;
import jenkins.model.Jenkins;
#NonCPS
def shallTrigger() {
def triggerJob = true
def allNodes = jenkins.model.Jenkins.instance.nodes
for (String node: allNodes) {
if ( node.getComputer().isOnline() && node.nodeName == "ABC" ) {
println node.nodeName + " " + node.getComputer().countBusy() + " " +
node.getComputer().isAcceptingTasks()
triggerJob = true
break
}
}
}
if (shallTrigger) {
println("triggering build as node is available")
build job: 'jobName', parameters:
[
/** list of string Parameters **/
]
}

use groovy to add an additional parameter to a jenkins job

We've got a set of groovy scripts that our users invoke in their jenkinsfile that sets some common job properties. However, we haven't been able to figure out how to preserve their existing parameters when we do this update.
snippet of our groovy code:
def newParamsList = []
def newbool = booleanParam(defaultValue: false, description: "deploy", name: "deploy_flag")
newParamsList.add(newbool)
def newParams = parameters(newParamsList)
properties([ //job property declaration
jobProperties,
disableConcurrentBuilds(),
newParams,
addSchedule,
])
However, this overwrites the parameter definitions, so if the user had specified a different parameter definition in their jenkins file before invoking our groovy, it's been wiped out.
I can get access to the existing parameters using currentBuild.rawBuild.getAction(ParametersAction), but if I understand correctly, I need the ParameterDefinition not the ParameterValue in order to set the property. I tried currentBuild.rawBuild.getAction(ParametersDefinitionProperty.class) thinking I could use that like ParametersAction, but it returns null.
Is it possible to get the parameter definitions inside the groovy being called from a Jenkinsfile? Or is there a different way that would let us add an additional parameter to the job without wiping out the existing ones currently defined in the jenkinsfile?
So the way we do this, is treat it all like a simple list, then join them together. So jenkinsfile's first get a list from the shared library, before adding their own to the list and then they set the params (not the shared library)
Repos jenkinsfiles do this:
#!groovy
#Library('shared') _
// Call shared libaray for common params
def paramList = jobParams.listParams ([
"var1": "value",
"var2": "value2"
])
// Define repo specific params
def addtionalParams = [
booleanParam(defaultValue: false, name: 'SOMETHING', description: 'description?'),
booleanParam(defaultValue: false, name: 'SOMETHING_ELSE', description: 'description?'),
]
// Set Jenkins job properties, combining both
properties([
buildDiscarder(logRotator(numToKeepStr: '20')),
parameters(paramList + addtionalParams)
])
// Do repo stuff
Our shared library looks like this:
List listParams(def body = [:]) {
//return list of parameters
config = BuildConfig.resolve(body)
// Always common params
def paramsList = [
choice(name: 'ENV', choices: ['dev', 'tst'].join('\n'), description: 'Environment'),
string(name: 'ENV_NO', defaultValue: "1", description: 'Environment number'),
]
// Sometimes common params, switch based on jenkinsfile input
def addtionalParams = []
switch (config.var1) {
case 'something':
case 'something2':
addtionalParams = [
choice(name: 'AWS_REGION', choices: ['us-west-2'].join('\n'), description: 'AWS Region to build/deploy'),
]
break
case 'something3':
addtionalParams = [
string(name: 'DEBUG', defaultValue: '*', description: 'Namespaces for debug logging'),
]
break
}
return paramsList + addtionalParams
}
We did the following groovy code to retrieve the parameters definitions and add new parameters to existing ones (we don't have any knowledge about what the user will put as parameters). If you have something more simple, I take it:
boolean isSupported = true
// nParamsis the List of new parameters to add //
Map initParamsMap = this.initializeParamsMap(nParams)
currentBuild.rawBuild.getParent().getProperties().each { k, v ->
if (v instanceof hudson.model.ParametersDefinitionProperty) {
// get each parameter definition
v.parameterDefinitions.each { ParameterDefinition paramDef ->
String param_symbol_name = null
// get the symbol name from the nested DescriptorImpl class
paramDef.class.getDeclaredClasses().each {
if(it.name.contains('DescriptorImpl')){
param_symbol_name = it.getAnnotation(Symbol).value().first()
}
}
// ... processing... //
if( !initParamsMap.containsKey(paramDef.name) ) {
//Valid parameter types are booleanParam, choice, file, text, password, run, or string.
if (param_symbol_name == 'choice') {
String defaultParamVal = paramDef.defaultParameterValue == null ? null : paramDef.defaultParameterValue.value
tempParams.add(
"$param_symbol_name"(name: paramDef.name,
defaultValue: defaultParamVal,
description: paramDef.description,
choices: paramDef.choices)
)
} else if (param_symbol_name == 'run') {
logError {"buildParametersArray does not support yet already existing RunParameterDefinition " +
"in current job parameters list, so the job parameters will not be modified"}
isSupported = false
} else {
tempParams.add(
"$param_symbol_name"(name: paramDef.name,
defaultValue: paramDef.defaultParameterValue.value,
description: paramDef.description)
)
}
}
}
}
}
if( isSupported) {
properties([parameters(tempParams)])
}
I think you can also do something like this:
// Get existing ParameterDefinitions
existing = currentBuild.rawBuild.parent.properties
.findAll { it.value instanceof hudson.model.ParametersDefinitionProperty }
.collectMany { it.value.parameterDefinitions }
// Create new params and merge them with existing ones
jobParams = [
booleanParam(name: 'boolean_param', defaultValue: false)
/* other params */
] + existing
// Create properties
properties([
parameters(jobParams)
])
Note: But you should either run it in a non-sandboxed environment or use with #NonCPS
There is an example how to add additional string parameter NEW_PARAM into job with name test:
job = Jenkins.instance.getJob("test")
ParametersDefinitionProperty params = job.getProperty(ParametersDefinitionProperty.class);
List<ParameterDefinition> newParams = new ArrayList<>();
newParams.addAll(params.getParameterDefinitions());
newParams.add(new StringParameterDefinition("NEW_PARAM", "default_value"));
job.removeProperty(params);
job.addProperty(new ParametersDefinitionProperty(newParams));

Jenkins declarative pipeline: find out triggering job

We have a Jenkins job that uses a declarative pipeline.
This job can be triggered by different other builds.
In the declarative pipeline how can I find out which build has triggered the pipeline?
Code sample below
pipeline {
agent any
stages {
stage('find upstream job') {
steps {
script {
def causes = currentBuild.rawBuild.getCauses()
for(cause in causes) {
if (cause.class.toString().contains("UpstreamCause")) {
println "This job was caused by job " + cause.upstreamProject
} else {
println "Root cause : " + cause.toString()
}
}
}
}
}
}
}
You can check the job's REST API to get extra information like below
{
"_class" : "org.jenkinsci.plugins.workflow.job.WorkflowRun",
"actions" : [
{
"_class" : "hudson.model.ParametersAction",
"parameters" : [
]
},
{
"_class" : "hudson.model.CauseAction",
"causes" : [
{
"_class" : "hudson.model.Cause$UpstreamCause",
"shortDescription" : "Started by upstream project \"larrycai-sto-46908390\" build number 7",
"upstreamBuild" : 7,
"upstreamProject" : "larrycai-sto-46908390",
"upstreamUrl" : "job/larrycai-sto-46908390/"
}
]
},
Reference:
https://jenkins.io/doc/pipeline/examples/#get-build-cause
Get Jenkins upstream jobs
I realize that this is a couple years old, but the previous response required some additional security setup in my Jenkins instance. After a bit of research, I found that there was a new feature request completed in 11/2018 that addresses this need and exposes build causes in currentBuild. Here is a little lib I wrote that returns the cause with the string "JOB/" prepended if the build was triggered by another build:
def call(body) {
if (body == null) {body = {DEBUG = false}}
def myParams= [:]
body.resolveStrategy = Closure.DELEGATE_FIRST
body.delegate = myParams
body()
def causes = currentBuild.getBuildCauses()
if (myParams.DEBUG) {
echo "causes count: " + causes.size().toString()
echo "causes text : " + causes.toString()
}
for(cause in causes) {
// echo cause
if (cause._class.toString().contains("UpstreamCause")) {
return "JOB/" + cause.upstreamProject
} else {
return cause.toString()
}
}
}
To use this, I place it in a library in a file named "buildCause.groovy". Then I reference the library at the top of my Jenkinsfile:
library identifier: 'lib#master', retriever: modernSCM(
[$class: 'GitSCMSource', remote: '<LIBRARY_REPO_URL>',
credentialsId: '<LIBRARY_REPO_CRED_ID', includes: '*'])
Then I can call it as needed within my pipeline:
def cause=buildCause()
echo cause
if (!cause.contains('JOB/')) {
echo "started by user"
} else {
echo "triggered by job"
}
Larry's answer didn't quite work for me.
But, after I've modified it slightly with the help of these docs and this version works:
def causes = currentBuild.getBuildCauses()
for(cause in causes) {
if (cause._class.toString().contains("UpstreamCause")) {
println "This job was caused by job " + cause.upstreamProject
} else {
println "Root cause : " + cause.toString()
}
}
P.S. Actually, Daniel's answer mentions this method, but there's too much clutter, I only noticed it after I wrote my solution.

How to get Jenkins list of jobs in folder?

I'm trying to get all the jobs in a folder
Jobs = input( id: 'userInput', message: 'Select the job', parameters: [ [$class: 'ChoiceParameterDefinition', choices: "????", description: 'Properties', name: 'jobs'] ])
what should i put on choices to get all the jobs in folder X ?
Unfortunately this code prepared by ritusmart at least on our version of Jenkins doesn't work, we are currently using 2.289.2.
And you can get jobs from specified folder in a simpler way
def folderName = "YOUR/FOLDER/NAME/"
def allJobs = hudson.model.Hudson.getInstance().getAllItems(Job.class).findAll { it.getFullName().contains(folderName) }
This is one way to do it, using script console:
import com.cloudbees.hudson.plugins.folder.Folder
def folderName = "test"
def allJobs= hudson.model.Hudson.getInstance().getItems()
for(int i=0; i<allJobs.size(); i++){
def job = allJobs[i]
if(job instanceof hudson.model.Project && job .getLastBuild() != null ){
processJob(job)
}else if(job instanceof Folder){
processFolderByName(job)
}
}
void processFolderByName(Item folder){
if(folder.getFullName().contains(folderName))
processFolder(folder)
}
void processFolder(Item folder){
//println "Processing Folder -"+folder.getFullName()
folder.getItems().each{
if(it instanceof com.cloudbees.hudson.plugins.folder.AbstractFolder){
processFolder(it)
}else{
processJob(it)
}
}
}
void processJob(Item job){
println job.getFullName()
// you can do operations like enable to disable
// job.disable()
}
Not work for me.
I created new implementation
def folderName = "folder_name_full_path"
def jobsList = []
Jenkins.instance.getAllItems(Job.class).each{
if(it.fullName.contains(folderName)) {
jobsList << it.fullName
}
}

Resources