How to set Time in minutes to retain slave when idle and Max number of instances in pipeline when config podTemplate ?
I see these two config options in System->Could->kubernetes. But I use pipeline and I didn't figure it out how to set them.
Now My pipeline looks like below.
podTemplate(label: 'docker-go',
containers: [
containerTemplate(
name: 'jnlp',
image: 'docker.mydomain.com/library/jnlp-slave:2.62',
command: '',
args: '${computer.jnlpmac} ${computer.name}',
),
containerTemplate(name: 'docker', image: 'docker.mydomain.com/library/docker:1.12.6', ttyEnabled: true, command: 'cat'),
containerTemplate(name: 'golang', image: 'docker.mydomain.com/library/golang:1.8.3', ttyEnabled: true, command: '')
],
volumes: [hostPathVolume(hostPath: '/var/run/docker.sock', mountPath: '/var/run/docker.sock')]
) {
def image_tag = "docker.mydomain.com/deploy-demo/demo-go:v0.1"
def workdir = "/go/src/demo-go"
node('docker-go') {
stage('setup') {
}
stage('clone') {
}
stage('compile') {
}
stage('build and push image') {
}
}
}
Ok, I figuire it out
Add these two.
idleMinutes: 10
instanceCap: 10
podTemplate(label: 'docker-go',
containers: [
containerTemplate(
name: 'jnlp',
image: 'docker.mydomain.com/library/jnlp-slave:2.62',
command: '',
args: '${computer.jnlpmac} ${computer.name}',
),
containerTemplate(name: 'docker', image: 'docker.mydomain.com/library/docker:1.12.6', ttyEnabled: true, command: 'cat'),
containerTemplate(name: 'golang', image: 'docker.mydomain.com/library/golang:1.8.3', ttyEnabled: true, command: '')
],
volumes: [hostPathVolume(hostPath: '/var/run/docker.sock', mountPath: '/var/run/docker.sock')],
idleMinutes: 10
instanceCap: 10
) {
def image_tag = "docker.mydomain.com/deploy-demo/demo-go:v0.1"
def workdir = "/go/src/demo-go"
node('docker-go') {
stage('setup') {
}
stage('clone') {
}
stage('compile') {
}
stage('build and push image') {
}
}
}
Related
I am trying to run my integration tests with Testcontainers on Jenkins Kubernetes Docker in Docker container.
Testcontainer version: 1.15.3
However, it always fails to get the Container.getMappedPort(X) inside the DinD Container.
It works absolutely fine on my local setup and manages to get the mapped port.
Has anyone encounter this issue before or has a solution for this?
My Jenkins file
#!groovy
def label = "debug-${UUID.randomUUID().toString()}"
podTemplate(label: label, slaveConnectTimeout: '10', containers: [
containerTemplate(
name: 'docker-in-docker',
image: 'cfzen/dind:java11',
privileged: true,
workingDir: '/home/jenkins/agent',
ttyEnabled: true,
command: 'cat',
envVars: [
envVar(key: 'TESTCONTAINERS_HOST_OVERRIDE', value: 'tcp://localhost:2375'),
envVar(key: 'TESTCONTAINERS_RYUK_DISABLED', value: 'true'),
]
),
containerTemplate(
name: 'helm-kubectl',
image: 'dtzar/helm-kubectl',
workingDir: '/home/jenkins/agent/',
ttyEnabled: true,
command: 'cat'
)
],
volumes: [hostPathVolume(mountPath: '/var/run/docker.sock', hostPath: '/var/run/docker.sock'),],
annotations: [
podAnnotation(key: 'iam.amazonaws.com/role',
value: 'arn:aws:iam::xxxxxxxxxxx')
],
)
{
node(label) {
deleteDir()
stage('Checkout') {
checkout scm
def shortCommit = sh(returnStdout: true, script: "git log -n 1 --pretty=format:'%h'").trim()
currentBuild.description = "${shortCommit}"
}
stage('Run Integration tests') {
container('docker-in-docker') {
withCredentials([
usernamePassword(credentialsId: 'jenkins-artifactory-credentials',
passwordVariable: 'ARTIFACTORY_SERVER_PASSWORD',
usernameVariable: 'ARTIFACTORY_SERVER_USERNAME')])
{
echo 'Run Integration tests'
sh("mvn -B clean verify -q -s mvn/local-settings.xml")
}
}
}
TestRunner:
#RunWith(CucumberWithSerenity.class)
#CucumberOptions(features = "classpath:features")
public final class RunCucumberIT {
#BeforeClass
public static void init(){
Containers.POSTGRES.start();
System.out.println("Exposed port of db is"+Containers.POSTGRES.getExposedPorts());
System.out.println("Assigned port of db is"+Containers.POSTGRES.getFirstMappedPort());
Containers.WIREMOCK.start();
Containers.S3.start();
}
private RunCucumberIT() {
}
}
Fails at Containers.POSTGRES.getFirstMappedPort()
Requested port (X) is not mapped
I have this configuration in my pipeline job
def k8sTestPodTemplate(docker_image) {
return """
apiVersion: v1
kind: Pod
metadata:
name: my-agent
labels:
name: my-agent
spec:
serviceAccountName: jenkins
containers:
- name: python
image: ${docker_image}
command: ["/bin/bash", "-c", "cat"]
tty: true
"""
}
pipeline {
agent none
stages {
stage('Run tests') {
parallel {
stage('Tests Python 3.5') {
agent {
kubernetes {
defaultContainer 'jnlp'
yaml k8sTestPodTemplate('python:3.5')
}
}
steps {
container('python') {
sh "echo 'Hello from Python 3.5'"
}
}
}
stage('Tests Python 3.6') {
agent {
kubernetes {
defaultContainer 'jnlp'
yaml k8sTestPodTemplate('python:3.6')
}
}
steps {
container('python') {
sh "echo 'Hello from Python 3.6'"
}
}
}
stage('Tests Python 3.7') {
agent {
kubernetes {
defaultContainer 'jnlp'
yaml k8sTestPodTemplate('python:3.7')
}
}
steps {
container('python') {
sh "echo 'Hello from Python 3.7'"
}
}
}
}
}
}
}
But as you can see I could easily improve this code to something like that:
def k8sTestPodTemplate(docker_image) {
return """
apiVersion: v1
kind: Pod
metadata:
name: my-agent
labels:
name: my-agent
spec:
serviceAccountName: jenkins
containers:
- name: python
image: ${docker_image}
command: ["/bin/bash", "-c", "cat"]
tty: true
"""
}
def generateStage(docker_image) {
return {
stage("Tests ${docker_image}") {
agent {
kubernetes {
defaultContainer 'jnlp'
yaml k8sTestPodTemplate("${docker_image}")
}
}
steps {
container('python') {
sh "echo ${docker_image}"
}
}
}
}
}
pipeline {
agent none
stages {
stage('Run tests') {
parallel {
generateStage("python:3.5")
generateStage("python:3.6")
generateStage("python:3.7")
}
}
}
}
But I cannot get this to work. The problem is that Jenkins is raising an error
No such DSL method 'agent' found among steps
I am using the "agent" directive inside the "step" directive and the agent is being generated dynamically.
I'm working with microservices and I have a problem. I have two NestJS applications with microservices. First one is hybrid appliacation and the second one is just a simple microservices with app.listen() function. Everything is set up in Docker-compose. I just want to send a simple #EventPattern and it fails. Second application don't receive any event. I was following this topic: https://github.com/nestjs/nest/issues/2532 but it didn't help at all. Here is some code I wrote:
docker-compose-dev.yml
# First Server
# --------------------------------------------------
first-server:
container_name: first-server
image: project/firstserver
build:
context: ../first-server/
args:
UUID: ${UUID}
UGID: ${GUID}
environment:
NODE_ENV: ${NODE_ENV:-development}
depends_on:
- db-postgres
- db-mongo
user: "${UUID}:${GUID}"
tty: true
expose:
- "3000"
- "3010"
ports:
- "3000:3000"
- "3010:3010"
volumes:
- ../first-server:/app
networks:
- servers-network
# Second Server
# --------------------------------------------------
second-server:
container_name: second-server
image: project/secondserver
build:
context: ../second-server/
args:
UUID: ${UUID}
UGID: ${GUID}
environment:
NODE_ENV: ${NODE_ENV:-development}
depends_on:
- db-postgres
- db-mongo
user: "${UUID}:${GUID}"
tty: true
expose:
- "3100"
- "3020"
ports:
- "3100:3100"
- "3020:3020"
volumes:
- ../second-server:/app
networks:
- servers-network
# Docker networks
# --------------------------------------------------
networks:
servers-network:
driver: bridge
name: servers-network
first-server:
main.ts
async function bootstrap() {
const app = await NestFactory.create<NestExpressApplication>(AppModule);
const microservice = app.connectMicroservice({
transport: Transport.TCP,
options: {
host: '0.0.0.0',
port: 3010
}
});
await app.startAllMicroservicesAsync();
await app.listen(port);
}
app.module.ts
#Module({
imports: [
TypeOrmModule.forRootAsync({
useClass: TypeOrmPostgresConfigProvider,
name: TypeOrmPostgresConfigProvider.connectionName,
imports: [
ConfigModule
]
}),
TypeOrmModule.forRootAsync({
useClass: TypeOrmMongoConfigProvider,
name: TypeOrmMongoConfigProvider.connectionName,
imports: [
ConfigModule
]
}),
ServeStaticModule.forRoot({
rootPath: join(__dirname, '../..', 'public/app')
}),
ClientsModule.register([
{
name: 'SERVICE_A',
transport: Transport.TCP,
options: {
host: '0.0.0.0',
port: 3010
}
}
])
],
controllers: [
AppController
],
providers: []
})
app.controller.ts
export class AppController {
constructor(
#Inject('SERVICE_A') private readonly client: ClientProxy
) {
}
async onApplicationBootstrap() {
await this.client.connect()
.then(result => {
console.log('result');
})
.catch(error => {
console.log(error);
});
}
#Get('hello')
getHello() {
this.client.emit<any>('message_printed', new Message('Hi there!'));
return 'Hello World printed';
}
#EventPattern('message_printed')
async handleMessagePrinted(data: Record<string, unknown>) {
console.log('first-server event!');
}
}
second-server:
main.ts
async function bootstrap() {
const app = await NestFactory.createMicroservice<MicroserviceOptions>(
AppModule,
{
transport: Transport.TCP,
options: {
host: '0.0.0.0',
port: 3020
}
}
);
app.listen(() => console.log('Microservice is listening'));
}
app.controller.ts
export class AppController {
constructor() {
}
#EventPattern('message_printed')
async handleMessagePrinted(data: Record<string, unknown>) {
console.log('second-server event!');
}
}
On both servers I have latest versions of NestJS. Docker version is 19.03.5.
I configed a jenkins pipeline to build a project that get from github. But I got an error at step 2 - Build image. Then, I tried to add user admin (of jenkins) to group "docker", and I can run build command successfully without error when login by user admin in the kubernetes master vm, however still error with jenkins. I used blueocean plugin for creating the pipeline. Do you know how to fix this ?
UPDATE: Please see my jenkinsfile
pipeline {
environment {
registry = "192.168.64.162:5000/justme/myweb"
dockerImage = ""
}
agent any
stages {
stage('Checkout Source') {
steps {
git 'https://github.com/taibc/playjenkins.git'
}
}
stage('Build image') {
steps{
script {
dockerImage = docker.build registry + ":$BUILD_NUMBER"
}
}
}
stage('Push Image') {
steps{
script {
docker.withRegistry( "" ) {
dockerImage.push()
}
}
}
}
stage('Deploy App') {
steps {
script {
kubernetesDeploy(configs: "myweb.yaml", kubeconfigId: "mykubeconfig")
}
}
}
}
}
I resolve this problem by installing Jenkins to another server (not belong to kubernetes cluster). But, I got another problem when deploying app as the link: https://github.com/jenkinsci/kubernetes-cd-plugin/issues/122
Here my yaml file
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: myweb
name: myweb
spec:
replicas: 1
selector:
matchLabels:
app: myweb
template:
metadata:
labels:
app: myweb
spec:
containers:
- image: 192.168.94.162:5000/justme/myweb:1
imagePullPolicy: Always
name: myweb
---
apiVersion: v1
kind: Service
metadata:
labels:
app: myweb
name: myweb
spec:
ports:
- nodePort: 32223
port: 80
protocol: TCP
targetPort: 80
selector:
app: myweb
type: NodePort
Here my jenkinsscript
pipeline {
environment {
registry = "192.168.94.162:5000/justme/myweb"
dockerImage = ""
}
agent any
stages {
stage('Checkout Source') {
steps {
git 'https://github.com/taibc/playjenkins.git'
}
}
stage('Build image') {
steps{
script {
dockerImage = docker.build registry + ":$BUILD_NUMBER"
}
}
}
stage('Push Image') {
steps{
script {
docker.withRegistry( "" ) {
dockerImage.push()
}
}
}
}
stage('Deploy App') {
steps {
script {
kubernetesDeploy(configs: "myweb.yaml", kubeconfigId: "mykubeconfig")
}
}
}
}
}
I have a multi-container job which runs on k8s via kubernetes-jenkins plugin. everything works great but I am unable to junit or archiveArtifacts anything. I suspect it's because it exists only in the container but not sure. code is below:
def label = "foo-${UUID.randomUUID().toString()}"
podTemplate(
label: label,
containers: [
containerTemplate(name: 'c1', image: 'c1'),
containerTemplate(name: 'c2', image: 'c2'),
containerTemplate(name: 'c3', image: 'c3'),
],
volumes: [
hostPathVolume(mountPath: '/var/run/docker.sock', hostPath: '/var/run/docker.sock'),
],
) {
node(label) {
stage('test') {
container('c1') {
sh """
cd /some-path
./generate-junit-xml
"""
archiveArtifacts allowEmptyArchive: true, artifacts: '/some-path/foo.xml'
sh "cat /some-path/foo.xml"
}
}
}
}
def label = "foo-${UUID.randomUUID().toString()}"
podTemplate(
label: label,
namespace: 'jenkins',
imagePullSecrets: [ 'myreg' ],
containers: [
containerTemplate(name: 'c1', image: 'c1'),
containerTemplate(name: 'c2', image: 'c2'),
containerTemplate(name: 'c3', image: 'c3'),
],
volumes: [
hostPathVolume(mountPath: '/var/run/docker.sock', hostPath: '/var/run/docker.sock'),
],
) {
node(label) {
stage('test') {
container('c1') {
sh """
./something-that-generates-junit-foo-xml
"""
archiveArtifacts allowEmptyArchive: true, artifacts: '/abs/path/to/foo.xml'
sh "cat /abs/path/to/foo.xml"
}
}
}
}
build log shows the following output:
[Pipeline] archiveArtifacts
Archiving artifacts
WARN: No artifacts found that match the file pattern "/some-path/foo.xml". Configuration error?
[Pipeline] sh
[test-pipeline] Running shell script
+ cat /some-path/unittest.xml
<?xml version="1.0" encoding="utf-8"?>...</xml>
would appreciate your help!
both junit and archiveArtifacts can only archive files that are inside WORKSPACE, containers do not shave any volumes with host (where jenkins WORKSPACE is) unless you explicitly do so
I solved this with:
- adding additional volume where I save files
hostPathVolume(hostPath: '/tmp', mountPath: '/tmp')
- copying files from tmp to WORKSPACE with File Operations Plugin
dir("/tmp/screenshots") {
fileOperations([fileCopyOperation(excludes: '', flattenFiles: true, includes: '*.png', targetLocation: "${WORKSPACE}/screenshots")])
}
- archiving artifacts
archiveArtifacts allowEmptyArchive: true, artifacts: 'screenshots/**/*.png'
copying the artifacts to the Jenkins workspace will solve it
age('test') {
container('c1') {
sh """
./something-that-generates-junit-foo-xml
"""
sh 'cp /abs/path/to/foo.xml ${WORKSPACE}/abs/foo.xml'
archiveArtifacts allowEmptyArchive: true, artifacts: 'abs/foo.xml'
sh "cat /abs/path/to/foo.xml"
}
you can copy the dir if you need all the dir content
sh 'cp -r /abs/path/to/ ${WORKSPACE}/abs/to'
archiveArtifacts allowEmptyArchive: true, artifacts: 'abs/to/*.xml'