I'd like to pass environment variables by publish stage based on the branch that the multi-pipeline job is processing.
While this example below works, I don't like how I'm ending up with additional stages, one per branch. Because I'm using the withCredentials plugin, I need to have the MY_CREDENTIALS variable set before that block. Is there a more elegant approach to solve this?
pipeline {
agent {
dockerfile true
}
stages {
stage("Config vars for staging") {
when {
branch 'staging'
}
environment {
MY_CREDENTIALS = 'creds-staging'
MY_ENVIRONMENT = 'production'
}
steps {
sh "echo MY_ENVIRONMENT: $MY_ENVIRONMENT"
}
}
stage("Config vars for production") {
when {
branch 'master'
}
environment {
MY_CREDENTIALS = 'creds-prod'
MY_ENVIRONMENT = 'staging'
}
steps {
sh "echo MY_ENVIRONMENT: $MY_ENVIRONMENT"
}
}
stage("Publish") {
steps {
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.MY_CREDENTIALS, accessKeyVariable: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
sh 'make takecareofit'
}
}
}
}
}
Use switch statement. In example below I forced FAILURE if branch is not master or staging:
pipeline {
agent {
dockerfile true
}
stages {
stage("Config vars") {
steps {
script {
switch(branch) {
case 'staging':
MY_CREDENTIALS = 'creds-staging'
MY_ENVIRONMENT = 'production'
break
case "master":
MY_CREDENTIALS = 'creds-prod'
MY_ENVIRONMENT = 'staging'
break
default:
println("Branch value error: " + branch)
currentBuild.getRawBuild().getExecutor().interrupt(Result.FAILURE)
}
}
}
}
stage("Publish") {
steps {
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', credentialsId: env.MY_CREDENTIALS, accessKeyVariable: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
sh 'make takecareofit'
}
}
}
}
}
Related
I would like to run pipeline with 2 stages. If any of the stage is failed, next stage should be started (not skipped). Currently if 1st stage is failed, next stage will be skipped.
Thank you for any help.
pipeline {
options { buildDiscarder(logRotator(numToKeepStr: '5', artifactNumToKeepStr: '5')) }
agent { label 'docker_gradle' }
triggers {
cron(env.BRANCH_NAME == 'develop' || env.BRANCH_NAME == 'master' ? '#daily' : '')
}
stages {
stage('Init') {
steps {
sh 'chmod +x gradlew'
}
}
stage('task1') {
when { anyOf { branch 'feature/*'; branch 'develop' }}
steps {
container(name: 'gradle') {
sh 'gradle clean task1'
}
}
}
stage('task2') {
when { anyOf { branch 'feature/*'; branch 'develop' }}
steps {
container(name: 'gradle') {
sh 'gradle clean task2'
}
}
}
}
post {
always {
script {
currentBuild.result = currentBuild.result ?: 'SUCCESS'
cucumber buildStatus: 'UNSTABLE',
failedFeaturesNumber: 1,
failedScenariosNumber: 1,
skippedStepsNumber: 1,
failedStepsNumber: 1,
reportTitle: 'Reoport',
fileIncludePattern: '**/cucumber.json',
sortingMethod: 'ALPHABETICAL',
trendsLimit: 100
}
}
}
}
1.You can change sh 'gradle clean task1' to
sh 'gradle clean task1 || true'
This will make sh return success even if sh scrip fails
2.You can also use try catch
Check this link: https://www.jenkins.io/doc/book/pipeline/syntax/#flow-control
for example:
stage("task1"){
steps {
script {
try {
sh 'gradle clean task1'
} catch (err) {
echo err.getMessage()
}
}
}
}
GITLAB_VERSION: GitLab Enterprise Edition 13.9.3-ee
JENKINS_VERSION: 2.263.4
I have crated a jenkins pipeline which is being triggered by change in gitlab, but its not updating gitlab status.
pipeline {
agent any
stages {
stage('cloning from gitlab'){
steps{
git credentialsId: '7d13ef14-ee65-497b-8fba-7519f5012e81', url: 'git#git.MYDOMAIN.com:root/popoq.git'
}
}
stage('build') {
steps {
echo 'Notify GitLab'
updateGitlabCommitStatus name: 'Jenkins-build', state: 'pending'
echo 'build step goes here'
}
}
stage('echoing') {
steps{
echo "bla blaa bla"
}
}
stage(test) {
steps {
echo 'Notify GitLab'
echo 'test step goes here'
updateGitlabCommitStatus name: 'Jenkins-build', state: 'success'
}
}
}
}
its not showing any pipline in gitlab, any suggestions?
I think you miss a "gitlabBuilds" command in an "option" block declaring the steps you will have in your build.
options {
gitLabConnection('xxx-gitlab')
gitlabBuilds(builds: ['step1', 'step2', 'step3'])
}
Then you can reference those steps with the "updateGitlabCommitStatus" but you'd better use the "gitlabCommitStatus" command like this:
pipeline {
agent any
options {
gitLabConnection('xxx-gitlab')
gitlabBuilds(builds: ['step1', 'step2', 'step3'])
}
stages {
stage('step1'){
steps{
gitlabCommitStatus(name:'step1') {
git credentialsId: '7d13ef14-e', url: 'xxxxxxx'
}
} // end steps
} // end stage
stage('step2'){
steps{
gitlabCommitStatus(name:'step2') {
.......
}
} // end steps
} // end stage
}
pipeline {
agent {
label 'agent_gradle'
}
options {
gitLabConnection('Gitlab Jenkins integration API connection test')
gitlabBuilds(builds: ['step1', 'step2'])
}
stages {
stage('Build') {
steps {
gitlabCommitStatus(name: 'step1') {
container(name: 'gradle') {
echo 'Building the application...'
}
}
}
}
stage('Test') {
steps {
gitlabCommitStatus(name: 'step2') {
container(name: 'gradle') {
echo 'Testing the application...'
}
}
}
}
}
}
I am learning about Jenkins CI and using pipeline to stage my jobs. I've ran into a halt where my tests aren't running. Please take a look at my "Jenkins Images" link. As you can see its stuck in the --coverage table. Typically, if i were to run my tests on my local machine, i would have to enter wcommand to get node to run all tests; however, i don't think it would be the same in a jenkins setting
Jenkins Image
Jenkinsfile
def gv
pipeline {
agent any
tools {nodejs "node"}
parameters {
choice(name: 'VERSION', choices: ['1.1.0', '1.2.0', '1.3.0'], description: '')
booleanParam(name: 'executeTests', defaultValue: true, description: '')
}
stages {
stage("init") {
steps {
script {
gv = load "script.groovy"
CODE_CHANGES = gv.getGitChanges()
}
}
}
stage("build frontend") {
steps {
dir("client") {
sh 'npm install'
echo 'building client'
}
}
}
stage("build backend") {
steps {
dir("server") {
sh 'npm install'
echo 'building server...'
}
}
}
stage("test") {
when {
expression {
script {
env.BRANCH_NAME.toString().equals('feature-jenkins') && CODE_CHANGES == false
}
}
}
steps {
dir("/client") {
sh 'npm test'
echo 'testing application'
}
}
}
stage("deploy") {
steps {
script {
gv.deployApp()
}
}
}
}
}
I have several microservices which use the same pipeline from a shared library which is named jenkins-shared-pipelines . The Jenkinsfile for a microservice is as follows:
#Library(['jenkins-shared-pipelines']) _
gradleProjectPrPipeline([buildAgent: 'oc-docker-jdk11', disableIntegrationTestStage: true])
In jenkins-shared-pipelines/vars, the gradleProjectPrPipeline has the following stages:
/**
* gradleProjectPrPipeline is a generic pipeline
* #param pipelineProperties map used to pass parameters
* #return
*/
void call(Map pipelineProperties = [:]) {
.
.
.
pipeline {
agent {
node {
label "${pipelineProperties.buildAgent}"
}
}
options {
skipDefaultCheckout true
timeout(time: 1, unit: 'HOURS')
buildDiscarder(logRotator(
numToKeepStr: '5',
daysToKeepStr: '7',
artifactNumToKeepStr: '1',
artifactDaysToKeepStr: '7'
))
}
stages {
stage('Clone') {
steps {
//clone step
}
}
stage('Compile') {
steps {
script {
/*Some custom logic*/
}
runGradleTask([task: 'assemble',
rawArgs: defaultGradleArgs + " -Pcurrent_version=${releaseTag}"
])
}
}
stage('Tests') {
parallel {
stage('Unit tests') {
steps {
//Unit tests
}
}
stage('Integration tests') {
steps {
//Integration tests
}
}
}
}
stage('Sonar scan') {
steps {
//Sonar scanning
}
}
}
post {
unsuccessful {
script {
bitbucketHandler.notifyBuildFail([
displayName: pipelineName,
displayMessage: "Build ${env.BUILD_ID} failed at ${env.BUILD_TIMESTAMP}."
])
}
}
success {
script {
bitbucketHandler.notifyBuildSuccess([
displayName: pipelineName,
displayMessage: "Build ${env.BUILD_ID} completed at ${env.BUILD_TIMESTAMP}."
])
}
}
}
}
}
Now, in addition to the above pipeline, there will be several more pipelines in jenkins-shared-pipelines(under the same vars directory) e.g: awsPipeline, azurePipeline and so on which will also incorporate the deployment stages. These additional pipelines will require all the stages in the above gradleProjectBranchWrapper and will also add a few of their own stages. Currently, we simply copy-paste these stages in these additional pipelines,
void call(Map pipelineProperties = [:]) {
.
.
.
pipeline {
agent {
node {
label "${pipelineProperties.buildAgent}"
}
}
options {
skipDefaultCheckout true
timeout(time: 1, unit: 'HOURS')
buildDiscarder(logRotator(
numToKeepStr: '5',
daysToKeepStr: '7',
artifactNumToKeepStr: '1',
artifactDaysToKeepStr: '7'
))
}
stages {
stage('Clone') {
steps {
//clone step
}
}
stage('Compile') {
steps {
script {
/*Some custom logic*/
}
runGradleTask([task: 'assemble',
rawArgs: defaultGradleArgs + " -Pcurrent_version=${releaseTag}"
])
}
}
stage('Tests') {
parallel {
stage('Unit tests') {
steps {
//Unit tests
}
}
stage('Integration tests') {
steps {
//Integration tests
}
}
}
}
stage('Sonar scan') {
steps {
//Sonar scanning
}
}
stage('AWS'){
}
}
post {
unsuccessful {
script {
bitbucketHandler.notifyBuildFail([
displayName: pipelineName,
displayMessage: "Build ${env.BUILD_ID} failed at ${env.BUILD_TIMESTAMP}."
])
}
}
success {
script {
bitbucketHandler.notifyBuildSuccess([
displayName: pipelineName,
displayMessage: "Build ${env.BUILD_ID} completed at ${env.BUILD_TIMESTAMP}."
])
}
}
}
}
}
then, we invoke these new pipelines from the microservices, so for example:
#Library(['jenkins-shared-pipelines']) _
awsPipeline([buildAgent: 'oc-docker-jdk11', disableIntegrationTestStage: true])
As obvious, there is code redundancy as the clone to sonarScan stages are common but there is no 'base pipeline' or another way to include these common stages in all the pipelines. I was wondering if there is a way to 'include' the gradleProjectPrPipeline(which can serve as a 'base pipeline') the pipelines like awsPipeline, azurePipeline and so on.
Note:
The workspace(where the clone stag checks out the code and later stages operate) will be used by awsPipeline etc. In other words, the variables and results from the gradleProjectBranchWrapper should be accessible to the awsPipeline etc.
There is a post block in the gradleProjectBranchWrapper, the other
pipelines may have their own post blocks
I have several jenkins pipelines that in my case call "docker-compose up" at the end of the build to run the containers/application. Now, I don't need the containers/application to be up all the time and I would like to have a way from the jenkins pipeline page to shutdown (docker-compose stop) the application to free resources for other builds. Do you know any good way to do this?
You can declare a choice parameter (ex: stop_containers) with values YES and NO in your Jenkins job that is responsible for stopping the containers.
Then in the Build section select Execute shell and add the following script to it:
#!/bin/bash
if [ "$stop_containers" = "YES" ]
then
//Command for stopping containers....
else
echo "Do Nothing."
fi
Now, whenever you will run your job it will ask if you want to stop the containers or not. Choose the required option, YES if you want to stop the containers.
If it is a Pipeline Job then you can define a stage to perform the stopping the container operation.
stage('stop containers') {
properties(
[parameters([choice(choices: ["YES", "NO"].join("\n"),
description: 'Some choice parameter',
name: 'STOP_CONTAINERS')])])
agent label:'some-node'
when {
expression {
return env.STOP_CONTAINERS = 'YES';
}
}
steps {
//command to stop containers
}
}
This is the way I have done it looking at
Converting Conditional Build Steps to Jenkins Pipeline.
pipeline {
agent any
parameters {
choice(
choices: 'full_deploy_and_run_docker_container\nrun_docker_containers\nstop_docker_containers',
description: '',
name: 'REQUESTED_ACTION')
}
tools {
maven 'Maven x.x.x'
jdk 'jdkYuWZ'
}
environment {
SVN_URL = 'http://svn.myexample.com/svn/myproject/trunk/'
DOCKER_PROJECT_DIR = '/home/myuser/docker/containers/myproject/trunk'
}
stages {
stage ('Initialize') {
steps {
sh '''
echo "Initialize..."
'''
}
}
stage ('Checkout') {
when {
expression { params.REQUESTED_ACTION == 'full_deploy_and_run_docker_container' }
}
steps {
checkout([$class: 'SubversionSCM', additionalCredentials: [], excludedCommitMessages: '', excludedRegions: '', excludedRevprop: '', excludedUsers: '', filterChangelog: false, ignoreDirPropChanges: false, includedRegions: '', locations: [[credentialsId: ' something here ', depthOption: 'infinity', ignoreExternalsOption: true, local: '.', remote: "$SVN_URL"]], workspaceUpdater: [$class: 'UpdateUpdater']])
}
}
stage ('Build') {
when {
expression { params.REQUESTED_ACTION == 'full_deploy_and_run_docker_container' }
}
steps {
sh 'mvn clean package'
}
}
stage ('Test') {
when {
expression { params.REQUESTED_ACTION == 'full_deploy_and_run_docker_container' }
}
steps {
sh 'echo "Other tests..."'
}
}
stage ('Deploy') {
when {
expression { params.REQUESTED_ACTION == 'full_deploy_and_run_docker_container' }
}
steps {
sh 'echo "The deploy here"'
}
}
stage ('Docker compose run') {
when {
expression { params.REQUESTED_ACTION == 'full_deploy_and_run_docker_container' ||
params.REQUESTED_ACTION == 'run_docker_containers'
}
}
steps {
sh '''
cd $DOCKER_PROJECT_DIR
docker-compose up -d
'''
}
}
stage ('Docker compose stop') {
when {
expression { params.REQUESTED_ACTION == 'stop_docker_containers' }
}
steps {
sh '''
cd $DOCKER_PROJECT_DIR
docker-compose stop
'''
}
}
stage ('Cleanup') {
steps {
cleanWs()
}
}
}
}
A simple way, you can use sh command in jenkins pipeline for this.
node {
stage('stop') {
sh "ssh root#host.com"
sh "cd /diretorio_do_docker-compose/"
sh "docker-compose stop"
}
}