import hudson.model.*;
import hudson.AbortException;
import jenkins.model.Jenkins;
#NonCPS
def allNodes = jenkins.model.Jenkins.instance.nodes
def triggerJob = false
for (String node: allNodes) {
if ( node.getComputer().isOnline() && node.nodeName == "ABC" ) {
println node.nodeName + " " + node.getComputer().countBusy() + " " +
node.getComputer().isAcceptingTasks()
triggerJob = true
break
}
}
if (triggerJob) {
println("triggering build as node is available")
build job: 'jobName', parameters:
[
/** list of string Parameters **/
]
}
Above job triggers the build, but fails with:
an exception which occurred:
in field hudson.model.Slave.launcher
Caused: java.io.NotSerializableException: hudson.slaves.JNLPLauncher
You may only use classes which implement the Serializable interface in your code - which neither Node nor Computer does.
In order to use them you need to encapsulate the respective code in a #NonCPS method, e.g.:
import hudson.model.*;
import hudson.AbortException;
import jenkins.model.Jenkins;
#NonCPS
def shallTrigger() {
def triggerJob = true
def allNodes = jenkins.model.Jenkins.instance.nodes
for (String node: allNodes) {
if ( node.getComputer().isOnline() && node.nodeName == "ABC" ) {
println node.nodeName + " " + node.getComputer().countBusy() + " " +
node.getComputer().isAcceptingTasks()
triggerJob = true
break
}
}
}
if (shallTrigger) {
println("triggering build as node is available")
build job: 'jobName', parameters:
[
/** list of string Parameters **/
]
}
Related
I am using declarative pipeline wherein when I build my pipeline it is giving me java.io.NotSerializableException: org.jenkinsci.plugins.workflow.job.WorkflowJob error.
These are the 2 methods which I am using:-
#NonCPS
def getJob(name) {
def hi = Hudson.instance
return hi.getItemByFullName(name, Job)
}
#NonCPS
def getParam(WorkflowJob job, String paramName) {
def prop = job.getProperty(ParametersDefinitionProperty.class)
for (param in prop.getParameterDefinitions()) {
if (param.name == paramName) {
return param
}
}
return null
}
And below is the part of my code where I am getting this error.
stages{
stage("A"){
steps{
script {
def job = getJob(JOB_NAME)
def param = getParam(job, "AWS Ser")
def service_name = ("${SERVICE_NAME}".replace('AWS Ser:', '')).toString().tokenize(',[]')
if (service_name != 'All') {
def regions = "${REGIONS}".toString()
regions.split('\n').each() {
service_name.each() {
sh '''
echo "Welcome"
'''
}
}
}
Here, if you see when I put sh script then I get this error and if I remove this sh script then there is no error.
I tried to troubleshoot and something is wrong with the 2 methods which I mentioned above.
Don't return the WorkflowJob object to the Pipeline step. Refactor your functions like below.
#NonCPS
def getJob(name) {
def hi = Hudson.instance
return hi.getItemByFullName(name, Job)
}
#NonCPS
def getParam(String jobName, String paramName) {
def job = getJob(jobName)
def prop = job.getProperty(ParametersDefinitionProperty.class)
for (param in prop.getParameterDefinitions()) {
if (param.name == paramName) {
return param
}
}
return null
}
Then in the Pipeline stage call getParam as.
def param = getParam(JOB_NAME, "AWS Ser")
I am running a groovy script which needs access to two modules
jeninks.model and hudson.model
i tried importing these two via import statement in my jenkinsfile but the issue is still there.
Error:
groovy.lang.MissingPropertyException: No such property: build for class: groovy.lang.Binding
at groovy.lang.Binding.getVariable(Binding.java:63)
Any solution by which i can do this?
The script is working fine when i am using a freestyle job with Execute System Groovy.
import jenkins.model.*
import hudson.model.*
pipeline{
agent any
stages{
stage('py version'){
steps{
bat 'python --version'
}
}
stage('get jobs'){
get_job()
}
}
}
def get_job(){
def cutOfDate = System.currentTimeMillis() - 1000L * 60 * 60 * 24 * 2
def filename = build.workspace.toString() + "/jobs_lists.txt"
targetFile = new File(filename).write("")
targetFile = new File(filename).append("<table><tr><th>Job Name</th><th>Last Build on</th><th>Keep</th><th>username</th></tr>")
println "Cut of Date: " + cutOfDate
}
If you just want to access the Workspace DIR you can use the $WORKSPACE environment variable.
Example :
def get_job(){
def cutOfDate = System.currentTimeMillis() - 1000L * 60 * 60 * 24 * 2
println("$WORKSPACE")
def filename = "$WORKSPACE" + "/jobs_lists.txt"
targetFile = new File(filename).write("")
targetFile = new File(filename).append("<table><tr><th>Job Name</th><th>Last Build on</th><th>Keep</th><th>username</th></tr>")
println "Cut of Date: " + cutOfDate
}
If you want to access the current build context use currentBuild which will return a RunWrapper. Example below.
def changeset = currentBuild.changeSets
Update : Accessing Jobs from the Pipeline
pipeline {
agent any
stages {
stage('Test') {
steps {
script{
getJobs()
}
}
}
}
}
def getJobs() {
Jenkins.instance.getAllItems(Job.class).each { jobitem ->
def jobName = jobitem.name
def jobInfo = Jenkins.instance.getItem(jobName)
println(jobName)
}
}
I have several components(code projects with their own Bitbucket repositories) and each of them has a Jenkinsfile as follows:
properties([parameters([string(defaultValue: "", description: "List of components", name: 'componentsToUpdate'),
string(defaultValue: "refs%2Fheads%2Fproject%2Fintegration", description: "BuildInfo CommitID", name: 'commitId'),
string(defaultValue: "", description: "Tag to release, e.g. 1.1.0-integration", name: 'releaseTag'),
string(defaultValue: "", description: "Forked buildInfo repo. Be aware right commit ID!!!", name: 'fork')]),
[$class: 'BuildDiscarderProperty', strategy: [$class: 'LogRotator', artifactDaysToKeepStr: '', artifactNumToKeepStr: '', daysToKeepStr: '7', numToKeepStr: '5']],
disableConcurrentBuilds()])
#Library('jenkins-shared-stages')
import mergePipeline
import releasePipeline
import ripplePipeline
import componentPipeline
def branchName = env.BRANCH_NAME
def rewriteDependencies = ""
def returnValue = null
def forkedRepo = params.fork
def buildInfoCommitId = params.commitId
def tagToRelease = params.releaseTag
println "buildInfoCommitId: " + buildInfoCommitId
if(params.componentsToUpdate) {
rewriteDependencies = params.componentsToUpdate
}
if (branchName == "project/integration") {
mergePipeline {
}
} else if (branchName == 'master') {
releasePipeline {
releaseTag = tagToRelease
}
} else {
returnValue = componentPipeline {
componentsToUpdate = rewriteDependencies
commitId = buildInfoCommitId
runOnForkedRepo = forkedRepo
}
rewriteDependencies = rewriteDependencies.isEmpty() ? returnValue : rewriteDependencies + "," + returnValue
println "WHAT is rewriteDependencies? " + rewriteDependencies
println "The return value: " + returnValue
ripplePipeline {
commitId = buildInfoCommitId
componentName = returnValue
runOnForkedRepo = forkedRepo
componentsToUpdate = rewriteDependencies
}
}
Need to use a 'wrapper' pipeline, say, wrapperPipeline.groovy:
import mergePipeline
import releasePipeline
import ripplePipeline
import componentPipeline
import org.slf4j.Logger
import org.slf4j.LoggerFactory
def call(body) {
final Logger logger = LoggerFactory.getLogger(wrapperPipeline)
def config = [:]
body.resolveStrategy = Closure.DELEGATE_FIRST
body.delegate = config
body()
// Assuming we have multibranch pipeline job or defined branch name in the env
def branchName = env.BRANCH_NAME
// There is a bug in the Jenkins it will pass a string "null" as a gradle build parameter instead of NULL object if there is
// empty parameter has been passed!!!
def rewriteDependencies = ""
def returnValue = null
def forkedRepo = config.runOnForkedRepo
def buildInfoCommitId = config.commitId
def tagToRelease = config.releaseTag
def globalVars = new se.GlobalVars()
def notifyHandler = new se.NotifyHandler()
node(globalVars.getAgent('buildAgent')) {
def PIPELINE_NAME = "wrapperPipeline"
try {
logger.info("The buildInfoCommitId is {}", buildInfoCommitId)
logger.info("Branch name: {}", branchName)
println "buildInfoCommitId: "+buildInfoCommitId
println"Branch name: "+branchName
if (config.componentsToUpdate) {
rewriteDependencies = config.componentsToUpdate
}
// keep the same integration pipeline for the master branch for now
if (branchName == "project/integration") {
logger.info("Invoking mergePipeline")
println "Invoking mergePipeline"
mergePipeline {
}
} else if (branchName == 'master') {
logger.info("Invoking releasePipeline")
println "Invoking releasePipeline"
releasePipeline {
releaseTag = tagToRelease
}
} else {
logger.info("Invoking componentPipeline")
println "Invoking componentPipeline"
returnValue = componentPipeline {
componentsToUpdate = rewriteDependencies
commitId = buildInfoCommitId
runOnForkedRepo = forkedRepo
}
logger.info("Component pipeline has returned {}", returnValue)
println "Component pipeline has returned"+returnValue
// We need to provide new version of the component to the Ripple builds
rewriteDependencies = rewriteDependencies.isEmpty() ? returnValue : rewriteDependencies + "," + returnValue
logger.info("rewriteDependencies: {}", rewriteDependencies)
println "The return value: " + returnValue
ripplePipeline {
commitId = buildInfoCommitId
componentName = returnValue
runOnForkedRepo = forkedRepo
componentsToUpdate = rewriteDependencies
}
}
}
catch (err) {
def build_status = "Exception ${err.message} in build ${env.BUILD_ID}"
logger.error(build_status,err)
notifyHandler.NotifyFail(build_status, PIPELINE_NAME)
throw err
}
}
}
The modified Jenkinsfile:
properties([parameters([string(defaultValue: "", description: "List of components", name: 'componentsToUpdate'),
string(defaultValue: "refs%2Fheads%2Fproject%2Fintegration", description: "BuildInfo CommitID", name: 'commitId'),
string(defaultValue: "", description: "Tag to release, e.g. 1.1.0-integration", name: 'releaseTag'),
string(defaultValue: "", description: "Forked buildInfo repo. Be aware right commit ID!!!", name: 'fork')]),
[$class: 'BuildDiscarderProperty', strategy: [$class: 'LogRotator', artifactDaysToKeepStr: '', artifactNumToKeepStr: '', daysToKeepStr: '7', numToKeepStr: '5']],
disableConcurrentBuilds()])
#Library('jenkins-shared-stages#integration/CICD-959-wrapper-pipeline-for-the-jenkinsfile') _
import wrapperPipeline
wrapperPipeline{}
Now, I suspect that the params object(the properties from the Jenkinsfile) is not populated correctly. For example
def buildInfoCommitId = config.commitId
.
.
.
println "buildInfoCommitId: "+buildInfoCommitId
prints null.
How do I invoke the wrapperPipeline correctly?
Note: I am new to both Jenkins pipelines and Groovy :)
Because those are Jenkins Parameters, they are not in the config object.
You will access commitId as params.commitId
If you had something within the closure when you call wrapperPipeline(), then those would be in the config object. e.g.
wrapperPipeline({
param="value"
})
then config.param would result in "value"
However, as a word of advice, I recommend avoiding using a closure when calling libs stored under vars/ in the shared library. See http://groovy-lang.org/closures.html for what closures are. The crux of it is, they are fairly complicated and can introduce some issues if you end up trying to pass in dynamic variables due to when the closure is instantiated. (They have their place but for simple things, I think avoiding is better)
I'd recommend instead, implementing a helper function that will allow you use maps OR closures for calling shared libs.
add a shared library called buildConfig under your src path:
package net.my.jenkins.workflow
import com.cloudbees.groovy.cps.NonCPS
class BuildConfig implements Serializable {
static Map resolve(def body = [:]) {
Map config = [:]
config = body
if (body in Map) {
config = body
} else if (body in Closure) {
body.resolveStrategy = Closure.DELEGATE_FIRST
body.delegate = config
body()
} else {
throw new Exception(sprintf("Unsupported build config type:%s", [config.getClass()]))
}
return config
}
}
And then in your shared lib under vars/ start with
import net.my.jenkins.workflow.BuildConfig
def call(def body = [:]) {
// evaluate the body block, and collect configuration into the object
config = BuildConfig.resolve(body)
This then allows you to use Maps which removes the complexity, so you could for instance (not that you would since you would just use params.commitId) re-assign it.
wrapperPipeline ([
"commitId": params.commitId,
])
Which means again config.commitId now has the value of params.commitId
Let me know if you need more detail.
TL;DR - You should be using params object, because you have parameters defined.
If you did start passing in arguments via the shared lib call, I would use a map over a closure. (requires some minimal implementation)
I have a class that i use in my jenkinsfile, simplified version of it here:
class TestBuild {
def build(jenkins) {
jenkins.script {
jenkins.sh(returnStdout: true, script: "echo build")
}
}
}
And i supply this as a jenkins parameter when using it in the jenkinsfile. What would be the best way to mock jenkins object here that has script and sh ?
Thanks for your help
I had similar problems the other week, I came up with this:
import org.jenkinsci.plugins.workflow.cps.CpsScript
def mockCpsScript() {
return [
'sh': { arg ->
def script
def returnStdout
// depending on sh is called arg is either a map or a string vector with arguments
if (arg.length == 1 && arg[0] instanceof Map) {
script = arg[0]['script']
returnStdout = arg[0]['returnStdout']
} else {
script = arg[0]
}
println "Calling sh with script: ${script}"
},
'script' : { arg ->
arg[0]()
},
] as CpsScript
}
and used together with your script (extended with non-named sh call):
class TestBuild {
def build(jenkins) {
jenkins.script {
jenkins.sh(returnStdout: true, script: "echo build")
jenkins.sh("echo no named arguments")
}
}
}
def obj = new TestBuild()
obj.build(mockCpsScript())
it outputs:
[Pipeline] echo
Calling sh with script: echo build
[Pipeline] echo
Calling sh with script: echo no named arguments
Now this it self isn't very useful, but it easy to add logic which defines behaviour of the mock methods, for example, this version controls the contents returned by readFile depending of what directory and file is being read:
import org.jenkinsci.plugins.workflow.cps.CpsScript
def mockCpsScript(Map<String, String> readFileMap) {
def currentDir = null
return [
'dir' : { arg ->
def dir = arg[0]
def subClosure = arg[1]
if (currentDir != null) {
throw new IllegalStateException("Dir '${currentDir}' is already open, trying to open '${dir}'")
}
currentDir = dir
try {
subClosure()
} finally {
currentDir = null
}
},
'echo': { arg ->
println(arg[0])
},
'readFile' : { arg ->
def file = arg[0]
if (currentDir != null) {
file = currentDir + '/' + file
}
def contents = readFileMap[file]
if (contents == null) {
throw new IllegalStateException("There is no mapped file '${file}'!")
}
return contents
},
'script' : { arg ->
arg[0]()
},
] as CpsScript
}
class TestBuild {
def build(jenkins) {
jenkins.script {
jenkins.dir ('a') {
jenkins.echo(jenkins.readFile('some.file'))
}
jenkins.echo(jenkins.readFile('another.file'))
}
}
}
def obj = new TestBuild()
obj.build(mockCpsScript(['a/some.file' : 'Contents of first file', 'another.file' : 'Some other contents']))
This outputs:
[Pipeline] echo
Contents of first file
[Pipeline] echo
Some other contents
If you need to use currentBuild or similar properties, then you can need to assign those after the closure coercion:
import org.jenkinsci.plugins.workflow.cps.CpsScript
def mockCpsScript() {
def jenkins = [
// same as above
] as CpsScript
jenkins.currentBuild = [
// Add attributes you need here. E.g. result:
result:null,
]
return jenkins
}
I'm trying to get all the jobs in a folder
Jobs = input( id: 'userInput', message: 'Select the job', parameters: [ [$class: 'ChoiceParameterDefinition', choices: "????", description: 'Properties', name: 'jobs'] ])
what should i put on choices to get all the jobs in folder X ?
Unfortunately this code prepared by ritusmart at least on our version of Jenkins doesn't work, we are currently using 2.289.2.
And you can get jobs from specified folder in a simpler way
def folderName = "YOUR/FOLDER/NAME/"
def allJobs = hudson.model.Hudson.getInstance().getAllItems(Job.class).findAll { it.getFullName().contains(folderName) }
This is one way to do it, using script console:
import com.cloudbees.hudson.plugins.folder.Folder
def folderName = "test"
def allJobs= hudson.model.Hudson.getInstance().getItems()
for(int i=0; i<allJobs.size(); i++){
def job = allJobs[i]
if(job instanceof hudson.model.Project && job .getLastBuild() != null ){
processJob(job)
}else if(job instanceof Folder){
processFolderByName(job)
}
}
void processFolderByName(Item folder){
if(folder.getFullName().contains(folderName))
processFolder(folder)
}
void processFolder(Item folder){
//println "Processing Folder -"+folder.getFullName()
folder.getItems().each{
if(it instanceof com.cloudbees.hudson.plugins.folder.AbstractFolder){
processFolder(it)
}else{
processJob(it)
}
}
}
void processJob(Item job){
println job.getFullName()
// you can do operations like enable to disable
// job.disable()
}
Not work for me.
I created new implementation
def folderName = "folder_name_full_path"
def jobsList = []
Jenkins.instance.getAllItems(Job.class).each{
if(it.fullName.contains(folderName)) {
jobsList << it.fullName
}
}