I am able to mount jenkins-home Volume as PersistentVolumeClaim
I am unable to mount the tmp Volume as Persistent volume from the values.yaml , it keeps appearing as EmptyDir and connected to directly to Host
I have tried a both the volume options and defining here
https://github.com/helm/charts/blob/77c2f8c632b939af76b4487e0d8032c542568445/stable/jenkins/values.yaml#L478
It still appears as EmptyDir and connected to Host.
https://github.com/helm/charts/blob/master/stable/jenkins/values.yaml
Values.yaml below
clusterZone: "cluster.local"
nameOverride: ""
fullnameOverride: ""
namespaceOverride: test-project
master:
componentName: "jenkins-master"
image: "jenkins/jenkins"
tag: "lts"
imagePullPolicy: "Always"
imagePullSecretName:
lifecycle:
postStart:
exec:
command: ["/bin/sh", "-c", "echo Script from the postStart handler to install jq and aws > /usr/share/message && apt-get upgrade -y && apt-get update -y && apt-get install vim -y && apt-get install jq -y && apt-get install awscli -y && apt-get install -y -qq groff && apt-get install -y -qq less"]
numExecutors: 10
customJenkinsLabels: []
useSecurity: true
enableXmlConfig: true
securityRealm: |-
<securityRealm class="hudson.security.LegacySecurityRealm"/>
authorizationStrategy: |-
<authorizationStrategy class="hudson.security.FullControlOnceLoggedInAuthorizationStrategy">
<denyAnonymousReadAccess>true</denyAnonymousReadAccess>
</authorizationStrategy>
hostNetworking: false
# login user for Jenkins
adminUser: "ctjenkinsadmin"
rollingUpdate: {}
resources:
requests:
cpu: "50m"
memory: "512Mi"
limits:
cpu: "2000m"
memory: "4096Mi"
usePodSecurityContext: true
servicePort: 8080
targetPort: 8080
# Type NodePort for minikube
serviceAnnotations: {}
deploymentLabels: {}
serviceLabels: {}
podLabels: {}
# NodePort for Jenkins Service
healthProbes: true
healthProbesLivenessTimeout: 5
healthProbesReadinessTimeout: 5
healthProbeLivenessPeriodSeconds: 10
healthProbeReadinessPeriodSeconds: 10
healthProbeLivenessFailureThreshold: 5
healthProbeReadinessFailureThreshold: 3
healthProbeLivenessInitialDelay: 90
healthProbeReadinessInitialDelay: 60
slaveListenerPort: 50000
slaveHostPort:
disabledAgentProtocols:
- JNLP-connect
- JNLP2-connect
csrf:
defaultCrumbIssuer:
enabled: true
proxyCompatability: true
cli: false
slaveListenerServiceType: "ClusterIP"
slaveListenerServiceAnnotations: {}
slaveKubernetesNamespace:
loadBalancerSourceRanges:
- 0.0.0.0/0
extraPorts: []
installPlugins:
- configuration-as-code:latest
- kubernetes:latest
- workflow-aggregator:latest
- workflow-job:latest
- credentials-binding:latest
- git:latest
- git-client:latest
- git-server:latest
- greenballs:latest
- blueocean:latest
- strict-crumb-issuer:latest
- http_request:latest
- matrix-project:latest
- jquery:latest
- artifactory:latest
- jdk-tool:latest
- matrix-auth:latest
enableRawHtmlMarkupFormatter: false
scriptApproval: []
initScripts:
- |
#!groovy
import hudson.model.*;
import jenkins.model.*;
import jenkins.security.*;
import jenkins.security.apitoken.*;
// script parameters
def userName = 'user'
def tokenName = 'token'
def uploadscript =['/bin/sh', '/var/lib/jenkins/update_token.sh']
def user = User.get(userName, false)
def apiTokenProperty = user.getProperty(ApiTokenProperty.class)
def result = apiTokenProperty.tokenStore.generateNewToken(tokenName)
def file = new File("/tmp/token.txt")
file.delete()
file.write result.plainValue
uploadscript.execute()
uploadscript.waitForOrKill(100)
user.save()
return result.plainValue
value = result.plainValue
jobs:
Test-Job: |-
<?xml version='1.0' encoding='UTF-8'?>
<project>
<keepDependencies>false</keepDependencies>
<properties/>
<scm class="hudson.scm.NullSCM"/>
<canRoam>false</canRoam>
<disabled>false</disabled>
<blockBuildWhenDownstreamBuilding>false</blockBuildWhenDownstreamBuilding>
<blockBuildWhenUpstreamBuilding>false</blockBuildWhenUpstreamBuilding>
<triggers/>
<concurrentBuild>false</concurrentBuild>
<builders/>
<publishers/>
<buildWrappers/>
</project>
JCasC:
enabled: true
configScripts:
welcome-message: |
jenkins:
systemMessage: Welcome to Jenkins Server.
customInitContainers: []
sidecars:
configAutoReload:
enabled: false
image: kiwigrid/k8s-sidecar:0.1.20
imagePullPolicy: IfNotPresent
resources: {}
sshTcpPort: 1044
folder: "/var/jenkins_home/casc_configs"
other: []
nodeSelector: {}
tolerations: []
#- key: "node.kubernetes.io/disk-pressure"
# operator: "Equal"
# effect: "NoSchedule"
#- key: "node.kubernetes.io/memory-pressure"
# operator: "Equal"
# effect: "NoSchedule"
#- key: "node.kubernetes.io/pid-pressure"
# operator: "Equal"
# effect: "NoSchedule"
#- key: "node.kubernetes.io/not-ready"
# operator: "Equal"
# effect: "NoSchedule"
#- key: "node.kubernetes.io/unreachable"
# operator: "Equal"
# effect: "NoSchedule"
#- key: "node.kubernetes.io/unschedulable"
# operator: "Equal"
# effect: "NoSchedule"
podAnnotations: {}
customConfigMap: false
overwriteConfig: false
overwriteJobs: false
jenkinsUrlProtocol: "https"
# If you set this prefix and use ingress controller then you might want to set the ingress path below
#jenkinsUriPrefix: "/jenkins"
ingress:
enabled: true
apiVersion: "extensions/v1beta1"
labels: {}
annotations: {}
kubernetes.io/secure-backends: "true"
kubernetes.io/ingress.class: nginx
name: ""
#service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-west-2:454211873573:certificate/a3146344-5888-48d5-900c-80a9d1532781 #replace this value
#service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
#kubernetes.io/ingress.class: nginx
#kubernetes.io/tls-acme: "true"
#path: "/jenkins"
kubernetes.io/ssl-redirect: "true"
#nginx.ingress.kubernetes.io/ssl-redirect: "true"
hostName: ""
tls:
#- secretName: jenkins.cluster.local
# hosts:
# - jenkins.cluster.local
backendconfig:
enabled: false
apiVersion: "extensions/v1beta1"
name:
labels: {}
annotations: {}
spec: {}
route:
enabled: false
labels: {}
annotations: {}
additionalConfig: {}
hostAliases: []
prometheus:
enabled: false
serviceMonitorAdditionalLabels: {}
scrapeInterval: 60s
scrapeEndpoint: /prometheus
alertingRulesAdditionalLabels: {}
alertingrules: []
testEnabled: true
agent:
enabled: true
image: "jenkins/jnlp-slave"
tag: "latest"
customJenkinsLabels: []
imagePullSecretName:
componentName: "jenkins-slave"
privileged: false
resources:
requests:
cpu: "1"
memory: "1Gi"
limits:
cpu: "1"
memory: "4Gi"
alwaysPullImage: false
podRetention: "Never"
envVars: []
# mount docker in agent pod
volumes:
- type: HostPath
hostPath: /var/run/docker.sock
mountPath: /var/run/docker.sock
nodeSelector: {}
command:
args:
- echo installing jq;
apt-get update;
apt-get install jq -y;
apt-get install -y git;
apt-get install -y java-1.8.0-openjdk;
apt-get install awscli;
sideContainerName: "jnlp"
TTYEnabled: true
containerCap: 10
podName: "default"
idleMinutes: 0
yamlTemplate: ""
persistence:
enabled: true
existingClaim: test-project-pvc
storageClass: test-project-pv
annotations: {}
accessMode: "ReadWriteOnce"
size: "20Gi"
volumes:
mounts:
networkPolicy:
enabled: false
apiVersion: networking.k8s.io/v1
rbac:
create: true
readSecrets: false
serviceAccount:
create: true
name:
annotations: {}
Please create a PersistentVolumeClaim with following yaml file in the namespace for jenkins (by updating the namespace field):
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: jenkins-tmp-pvc
namespace: test-project
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "10Gi"
storageClassName: gp2
Then add persistence volume, mount and javaOpts as follows in Jenkins values yml file:
master
...
javaOpts: "-Djava.io.tmpdir=/var/jenkins_tmp"
persistence:
...
volumes:
- name: jenkins-tmp
persistentVolumeClaim:
claimName: jenkins-tmp-pvc
mounts:
- mountPath: /var/jenkins_tmp
name: jenkins-tmp
This will first create the persistent volume claim "jenkins-tmp-pvc" and underlying persistent volume and then Jenkins will use the claim mount path "/var/jenkins_tmp" as tmp directory. Also, make sure your "gp2" storageclass is created with "allowVolumeExpansion: true" attribute so that "jenkins-tmp-pvc" is expandable whenever you need to increase tmp disk space.
Related
I installed GitLab runner via HelmChart on my Kubernetes cluster
While installing via helm I used config values.yaml
But my Runner stucks every time at docker login command,
without docker login working good
I have no idea what is wrong :(
Any help appreciated!
Error: write tcp 10.244.0.44:50882->188.72.88.34:443: use of closed network connection
.gitlab-ci.yaml file
build docker image:
stage: build
image: docker:latest
services:
- name: docker:dind
entrypoint: ["env", "-u", "DOCKER_HOST"]
command: ["dockerd-entrypoint.sh"]
variables:
DOCKER_HOST: tcp://localhost:2375/
DOCKER_DRIVER: overlay2
DOCKER_TLS_CERTDIR: ""
before_script:
- mkdir -p $HOME/.docker
- echo passwd| docker login -u user https://registry.labs.com --password-stdin
script:
- docker images
- docker ps
- docker pull registry.labs.com/jappweek:a_zh
- docker build -t "$CI_REGISTRY"/"$CI_REGISTRY_IMAGE":1.8 .
- docker push "$CI_REGISTRY"/"$CI_REGISTRY_IMAGE":1.8
tags:
- k8s
values.yaml file
image:
registry: registry.gitlab.com
#image: gitlab/gitlab-runner:v13.0.0
image: gitlab-org/gitlab-runner
# tag: alpine-v11.6.0
imagePullPolicy: IfNotPresent
gitlabUrl: https://gitlab.somebars.com
runnerRegistrationToken: "GR1348941a7jJ4WF7999yxsya9Arsd929g"
terminationGracePeriodSeconds: 3600
#
concurrent: 10
checkInterval: 30
sessionServer:
enabled: false
## For RBAC support:
rbac:
create: true
rules:
- resources: ["configmaps", "pods", "pods/attach", "secrets", "services"]
verbs: ["get", "list", "watch", "create", "patch", "update", "delete"]
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["create", "patch", "delete"]
clusterWideAccess: false
podSecurityPolicy:
enabled: false
resourceNames:
- gitlab-runner
metrics:
enabled: false
portName: metrics
port: 9252
serviceMonitor:
enabled: false
service:
enabled: false
type: ClusterIP
runners:
config: |
[[runners]]
[runners.kubernetes]
namespace = "{{.Release.Namespace}}"
image = "ubuntu:16.04"
privileged: true
cache: {}
builds: {}
services: {}
helpers: {}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: false
runAsNonRoot: true
privileged: false
capabilities:
drop: ["ALL"]
podSecurityContext:
runAsUser: 100
# runAsGroup: 65533
fsGroup: 65533
resources: {}
affinity: {}
nodeSelector: {}
tolerations: []
hostAliases: []
podAnnotations: {}
podLabels: {}
priorityClassName: ""
secrets: []
configMaps: {}
volumeMounts: []
volumes: []
I bypassed docker login with importing $HOME/.docker/config.json file which stores auth token from my host machine to Gitlab Ci
before_script:
- mkdir -p $HOME/.docker
- echo $DOCKER_AUTH_CONFIG > $HOME/.docker/config.json
$DOCKER_AUTH_CONFIG is $HOME/.docker/config.json
That's all no docker login required
Trying to add more kafka connectors to our kafka cluster based on the following link's instructions . But getting failed with errImagepull error . Please find the details and help me resolve this .
Reference Link :
https://docs.confluent.io/home/connect/extending.html#create-a-docker-image-containing-c-hub-connectors
Created Custom Docker Image :
FROM confluentinc/cp-server-connect-operator:6.0.0.0
USER root
RUN confluent-hub install --no-prompt confluentinc/kafka-connect-s3:latest \
&& confluent-hub install --no-prompt confluentinc/kafka-connect-tibco-source:latest \
&& confluent-hub install --no-prompt confluentinc/kafka-connect-azure-event-hubs:latest \
&& confluent-hub install --no-prompt confluentinc/kafka-connect-azure-event-hubs:latest \
&& confluent-hub install --no-prompt confluentinc/kafka-connect-datadog-metrics:latest \
&& confluent-hub install --no-prompt confluentinc/kafka-connect-ftps:latest \
&& confluent-hub install --no-prompt confluentinc/kafka-connect-gcp-pubsub:latest \
&& confluent-hub install --no-prompt confluentinc/kafka-connect-gcs-source:latest \
&& confluent-hub install --no-prompt confluentinc/kafka-connect-pagerduty:latest \
&& confluent-hub install --no-prompt confluentinc/kafka-connect-sftp:latest \
&& confluent-hub install --no-prompt confluentinc/kafka-connect-teradata:latest \
&& confluent-hub install --no-prompt confluentinc/kafka-connect-tibco-source:latest \
&& confluent-hub install --no-prompt confluentinc/kafka-connect-s3-source:latest \
&& confluent-hub install --no-prompt confluentinc/kafka-connect-gcs:latest
USER 1001
Push to publicaccess :
Updated in my-values.yaml
failing with errImagepull error :
my-values.yaml
## Overriding values for Chart's values.yaml for AWS
##
global:
provider:
name: aws
region: us-east-1
## Docker registry endpoint where Confluent Images are available.
##
kubernetes:
deployment:
zones:
- us-east-1a
- us-east-1b
- us-east-1c
registry:
fqdn: docker.io
credential:
required: false
sasl:
plain:
username: test
password: test123
authorization:
rbac:
enabled: false
simple:
enabled: false
superUsers: []
dependencies:
mds:
endpoint: ""
publicKey: ""
## Zookeeper cluster
##
zookeeper:
name: zookeeper
replicas: 3
oneReplicaPerNode: true
affinity:
nodeAffinity:
key: worker-type
values:
- node-group-zookeeper
rule: requiredDuringSchedulingIgnoredDuringExecution
resources:
requests:
cpu: 200m
memory: 512Mi
## Kafka Cluster
##
kafka:
name: kafka
replicas: 3
oneReplicaPerNode: true
affinity:
nodeAffinity:
key: worker-type
values:
- node-group-broker
rule: requiredDuringSchedulingIgnoredDuringExecution
resources:
requests:
cpu: 200m
memory: 1Gi
loadBalancer:
enabled: true
type: internal
domain: conf-ka01.dsol.core
annotations:
service.beta.kubernetes.io/aws-load-balancer-internal: "0.0.0.0/0"
tls:
enabled: false
fullchain: |-
privkey: |-
cacerts: |-
metricReporter:
enabled: true
publishMs: 30000
replicationFactor: ""
tls:
enabled: false
internal: false
authentication:
type: ""
bootstrapEndpoint: ""
## Connect Cluster
##
connect:
name: connectors
image:
repository: rdkarthikeyan27/hebdevkafkaconnectors
tag: 1.0
oneReplicaPerNode: false
affinity:
nodeAffinity:
key: worker-type
values:
- node-group-connector
rule: requiredDuringSchedulingIgnoredDuringExecution
replicas: 2
tls:
enabled: false
## "" for none, "tls" for mutual auth
authentication:
type: ""
fullchain: |-
privkey: |-
cacerts: |-
loadBalancer:
enabled: true
type: internal
domain: conf-ka01.dsol.core
annotations:
service.beta.kubernetes.io/aws-load-balancer-internal: "0.0.0.0/0"
dependencies:
kafka:
bootstrapEndpoint: kafka:9071
brokerCount: 3
schemaRegistry:
enabled: true
url: http://schemaregistry:8081
## Replicator Connect Cluster
##
replicator:
name: replicator
oneReplicaPerNode: false
replicas: 0
tls:
enabled: false
authentication:
type: ""
fullchain: |-
privkey: |-
cacerts: |-
loadBalancer:
enabled: true
type: internal
domain: conf-ka01.dsol.core
annotations:
service.beta.kubernetes.io/aws-load-balancer-internal: "0.0.0.0/0"
dependencies:
kafka:
brokerCount: 3
bootstrapEndpoint: kafka:9071
##
## Schema Registry
##
schemaregistry:
name: schemaregistry
oneReplicaPerNode: false
affinity:
nodeAffinity:
key: worker-type
values:
- node-group-schema-reg
rule: requiredDuringSchedulingIgnoredDuringExecution
tls:
enabled: false
authentication:
type: ""
fullchain: |-
privkey: |-
cacerts: |-
loadBalancer:
enabled: true
type: internal
domain: conf-ka01.dsol.core
annotations:
service.beta.kubernetes.io/aws-load-balancer-internal: "0.0.0.0/0"
dependencies:
kafka:
brokerCount: 3
bootstrapEndpoint: kafka:9071
##
## KSQL
##
ksql:
name: ksql
replicas: 2
oneReplicaPerNode: true
affinity:
nodeAffinity:
key: worker-type
values:
- node-group-ksql
rule: requiredDuringSchedulingIgnoredDuringExecution
tls:
enabled: false
authentication:
type: ""
fullchain: |-
privkey: |-
cacerts: |-
loadBalancer:
enabled: true
type: internal
domain: conf-ka01.dsol.core
annotations:
service.beta.kubernetes.io/aws-load-balancer-internal: "0.0.0.0/0"
dependencies:
kafka:
brokerCount: 3
bootstrapEndpoint: kafka:9071
brokerEndpoints: kafka-0.kafka:9071,kafka-1.kafka:9071,kafka-2.kafka:9071
schemaRegistry:
enabled: false
tls:
enabled: false
authentication:
type: ""
url: http://schemaregistry:8081
## Control Center (C3) Resource configuration
##
controlcenter:
name: controlcenter
license: ""
##
## C3 dependencies
##
dependencies:
c3KafkaCluster:
brokerCount: 3
bootstrapEndpoint: kafka:9071
zookeeper:
endpoint: zookeeper:2181
connectCluster:
enabled: true
url: http://connectors:8083
ksql:
enabled: true
url: http://ksql:9088
schemaRegistry:
enabled: true
url: http://schemaregistry:8081
oneReplicaPerNode: false
affinity:
nodeAffinity:
key: worker-type
values:
- node-group-control
rule: requiredDuringSchedulingIgnoredDuringExecution
##
## C3 External Access
##
loadBalancer:
enabled: true
type: internal
domain: conf-ka01.dsol.core
annotations:
service.beta.kubernetes.io/aws-load-balancer-internal: "0.0.0.0/0"
##
## TLS configuration
##
tls:
enabled: false
authentication:
type: ""
fullchain: |-
privkey: |-
cacerts: |-
##
## C3 authentication
##
auth:
basic:
enabled: true
##
## map with key as user and value as password and role
property:
admin: Developer1,Administrators
disallowed: no_access
Image from docker io https://hub.docker.com/r/confluentinc/cp-server-connect-operator doesn’t have tag 1.0.0 available . Try tag 6.0.0.0
That is confluentinc/cp-server-connect-operator:6.0.0.0
I have 2 Jenkins instances, one use version 1.8 and second version 1.18.
Oldest version is able to create both containers.
Agent specification [Kubernetes Pod Template] (mo-aio-build-supplier):
* [jnlp] mynexus.services.com/mo-base/jenkins-slave-mo-aio:1.8.2-ca(resourceRequestCpu: 0.25, resourceRequestMemory: 256Mi, resourceLimitCpu: 1, resourceLimitMemory: 1.5Gi)
* [postgres] mynexus.services.com:443/mo-base/mo-base-postgresql-95-openshift
Newest version are not able to create postgres container
Container postgres exited with error 1. Logs: mkdir: cannot create directory '/home/jenkins': Permission denied
Both use same podTemplate
podTemplate(
name: label,
label: label,
cloud: 'openshift',
serviceAccount: 'jenkins',
containers: [
containerTemplate(
name: 'jnlp',
image: 'mynexus.services.theosmo.com/jenkins-slave-mo-aio:v3.11.104-14_jdk8',
resourceRequestCpu: env.CPU_REQUEST,
resourceLimitCpu: env.CPU_LIMIT,
resourceRequestMemory: env.RAM_REQUEST,
resourceLimitMemory: env.RAM_LIMIT,
workingDir: '/tmp',
args: '${computer.jnlpmac} ${computer.name}',
command: ''
),
containerTemplate(
name: 'postgres',
image: 'mynexus.services.theosmo.com:443/mo-base/mo-base-postgresql-95-openshift',
envVars: [
envVar(key: "POSTGRESQL_USER", value: "admin"),
envVar(key: "POSTGRESQL_PASSWORD", value: "admin"),
envVar(key: "POSTGRESQL_DATABASE", value: "supplier_data"),
]
)
],
volumes: [emptyDirVolume(mountPath: '/dev/shm', memory: true)]
)
Also, I've noticed YAML created by newest version is a bit weird
apiVersion: "v1"
kind: "Pod"
metadata:
annotations:
buildUrl: "http://jenkins.svc:80/job/build-supplier/473/"
labels:
jenkins: "slave"
jenkins/mo-aio-build-supplier: "true"
name: "mo-aio-build-supplier-xfgmn-qmrdl"
spec:
containers:
- args:
- "********"
- "mo-aio-build-supplier-xfgmn-qmrdl"
env:
- name: "JENKINS_SECRET"
value: "********"
- name: "JENKINS_TUNNEL"
value: "jenkins-jnlp.svc:50000"
- name: "JENKINS_AGENT_NAME"
value: "mo-aio-build-supplier-xfgmn-qmrdl"
- name: "JENKINS_NAME"
value: "mo-aio-build-supplier-xfgmn-qmrdl"
- name: "JENKINS_AGENT_WORKDIR"
value: "/tmp"
- name: "JENKINS_URL"
value: "http://jenkins.svc:80/"
- name: "HOME"
value: "/home/jenkins"
image: "mynexus.services.com/mo-base/jenkins-slave-mo-aio:1.8.2-ca"
imagePullPolicy: "IfNotPresent"
name: "jnlp"
resources:
limits:
memory: "1.5Gi"
cpu: "1"
requests:
memory: "256Mi"
cpu: "0.25"
securityContext:
privileged: false
tty: false
volumeMounts:
- mountPath: "/dev/shm"
name: "volume-0"
readOnly: false
- mountPath: "/tmp"
name: "workspace-volume"
readOnly: false
workingDir: "/tmp"
- env:
- name: "POSTGRESQL_DATABASE"
value: "supplier_data"
- name: "POSTGRESQL_USER"
value: "admin"
- name: "HOME"
value: "/home/jenkins"
- name: "POSTGRESQL_PASSWORD"
value: "admin"
image: "mynexus.services.com:443/mo-base/mo-base-postgresql-95-openshift"
imagePullPolicy: "IfNotPresent"
name: "postgres"
resources:
limits: {}
requests: {}
securityContext:
privileged: false
tty: false
volumeMounts:
- mountPath: "/dev/shm"
name: "volume-0"
readOnly: false
- mountPath: "/home/jenkins/agent"
name: "workspace-volume"
readOnly: false
workingDir: "/home/jenkins/agent"
nodeSelector: {}
restartPolicy: "Never"
serviceAccount: "jenkins"
volumes:
- emptyDir:
medium: "Memory"
name: "volume-0"
- emptyDir: {}
name: "workspace-volume"
As you are able to see above:
postgres container is under an env tree
Any suggestion? Thanks in advance
As far as I checked there
The problem
Since Kubernetes Plugin version 1.18.0, the default working directory of the pod containers was changed from /home/jenkins to /home/jenkins/agent. But the default HOME environment variable enforcement is still pointing to /home/jenkins. The impact of this change is that if pod container images do not have a /home/jenkins directory with sufficient permissions for the running user, builds will fail to do anything directly under their HOME directory, /home/jenkins.
Resolution
There are different workaround to that problem:
Change the default HOME variable
The simplest and preferred workaround is to add the system property -Dorg.csanchez.jenkins.plugins.kubernetes.PodTemplateBuilder.defaultHome=/home/jenkins/agent on Jenkins startup. This requires a restart.
This workaround will reflect the behavior of kubernetes plugin pre-1.18.0 but on the new working directory /home/jenkins/agent
Use /home/jenkins as the working directory
A workaround is to change the working directory of pod containers back to /home/jenkins. This workaround is only possible when using YAML to define agent pod templates (see JENKINS-60977).
Prepare images for Jenkins
A workaround could be to ensure that the images used in agent pods have a /home/jenkins directory that is owned by the root group and writable by the root group as mentioned in OpenShift Container Platform-specific guidelines.
Additionaly there is the issue on jenkins.
Hope this helps.
I am trying to setup Jenkins with stable/helm charts but Jenkins pod always remains in Init status and doesn't give any errors while I am describing the Jenkins pod. I am not able to debug is as it's in Init status.
I have already created PV & PVC and assigned PVC in values files.
Below is my configuration:
master:
componentName: "jenkins-master"
image: "jenkins/jenkins"
tag: "lts"
imagePullPolicy: "IfNotPresent"
lifecycle:
numExecutors: 0
customJenkinsLabels: []
useSecurity: true
enableXmlConfig: true
securityRealm: |-
<securityRealm class="hudson.security.LegacySecurityRealm"/>
authorizationStrategy: |-
<authorizationStrategy class="hudson.security.FullControlOnceLoggedInAuthorizationStrategy">
<denyAnonymousReadAccess>true</denyAnonymousReadAccess>
</authorizationStrategy>
hostNetworking: false
adminUser: "admin"
adminPassword: "admin"
rollingUpdate: {}
resources:
requests:
cpu: "50m"
memory: "256Mi"
limits:
cpu: "2000m"
memory: "2048Mi"
usePodSecurityContext: true
servicePort: 8080
targetPort: 8080
serviceType: NodePort
serviceAnnotations: {}
deploymentLabels: {}
serviceLabels: {}
podLabels: {}
nodePort: 32323
healthProbes: true
healthProbesLivenessTimeout: 5
healthProbesReadinessTimeout: 5
healthProbeLivenessPeriodSeconds: 10
healthProbeReadinessPeriodSeconds: 10
healthProbeLivenessFailureThreshold: 5
healthProbeReadinessFailureThreshold: 3
healthProbeLivenessInitialDelay: 90
healthProbeReadinessInitialDelay: 60
slaveListenerPort: 50000
slaveHostPort:
disabledAgentProtocols:
- JNLP-connect
- JNLP2-connect
csrf:
defaultCrumbIssuer:
enabled: true
proxyCompatability: true
cli: false
slaveListenerServiceType: "ClusterIP"
slaveListenerServiceAnnotations: {}
slaveKubernetesNamespace:
loadBalancerSourceRanges:
- 0.0.0.0/0
extraPorts:
installPlugins:
- kubernetes:1.18.1
- workflow-job:2.33
- workflow-aggregator:2.6
- credentials-binding:1.19
- git:3.11.0
- blueocean:1.18.1
- kubernetes-cd:2.0.0
enableRawHtmlMarkupFormatter: false
scriptApproval:
initScripts:
jobs: {}
JCasC:
enabled: false
pluginVersion: "1.27"
supportPluginVersion: "1.18"
configScripts:
welcome-message: |
jenkins:
systemMessage: Welcome to our CI\CD server. This Jenkins is configured and managed 'as code'.
customInitContainers: []
sidecars:
configAutoReload:
enabled: false
image: shadwell/k8s-sidecar:0.0.2
imagePullPolicy: IfNotPresent
resources: {}
sshTcpPort: 1044
folder: "/var/jenkins_home/casc_configs"
nodeSelector: {}
tolerations: []
podAnnotations: {}
customConfigMap: false
overwriteConfig: false
overwriteJobs: false
ingress:
enabled: false
apiVersion: "extensions/v1beta1"
labels: {}
annotations: {}
hostName:
tls:
backendconfig:
enabled: false
apiVersion: "extensions/v1beta1"
name:
labels: {}
annotations: {}
spec: {}
route:
enabled: false
labels: {}
annotations: {}
additionalConfig: {}
hostAliases: []
prometheus:
enabled: false
serviceMonitorAdditionalLabels: {}
scrapeInterval: 60s
scrapeEndpoint: /prometheus
alertingRulesAdditionalLabels: {}
alertingrules: []
agent:
enabled: true
image: "jenkins/jnlp-slave"
tag: "3.27-1"
customJenkinsLabels: []
imagePullSecretName:
componentName: "jenkins-slave"
privileged: false
resources:
requests:
cpu: "200m"
memory: "256Mi"
limits:
cpu: "200m"
memory: "256Mi"
alwaysPullImage: false
podRetention: "Never"
envVars:
volumes:
nodeSelector: {}
command:
args:
sideContainerName: "jnlp"
TTYEnabled: false
containerCap: 10
podName: "default"
idleMinutes: 0
yamlTemplate:
persistence:
enabled: true
existingClaim: jenkins-pvc
storageClass:
annotations: {}
accessMode: "ReadWriteOnce"
size: "2Gi"
volumes:
mounts:
networkPolicy:
enabled: false
apiVersion: networking.k8s.io/v1
rbac:
create: true
serviceAccount:
create: true
name:
annotations: {}
serviceAccountAgent:
create: false
name:
annotations: {}
backup:
enabled: false
componentName: "backup"
schedule: "0 2 * * *"
annotations:
iam.amazonaws.com/role: "jenkins"
image:
repository: "nuvo/kube-tasks"
tag: "0.1.2"
extraArgs: []
existingSecret: {}
env:
- name: "AWS_REGION"
value: "us-east-1"
resources:
requests:
memory: 1Gi
cpu: 1
limits:
memory: 1Gi
cpu: 1
destination: "s3://nuvo-jenkins-data/backup"
checkDeprecation: true```
We recently had this issue while trying to run Jenkins using helm. The issue was that the pod couldn't inintialize because of an error that occurred while Jenkins was trying to configure itself and pull updates down from jenkins.io. You can find these log messages using a command similar to the following:
kubectl logs solemn-quoll-jenkins-abc78900-xxx -c copy-default-config
Replace solemn-quoll-jenkins-abc78900-xxx above with whatever name helm assigns to your jenkins pod. The issue was in the copy-default-config container, so the -c option allows you to peek at the logs of this container within the jenkins pod. In our case, it was an http proxy issue where the copy-default-config container was failing because it could not connect to https://updates.jenkins.io/ to download updates for plugins. You can test if it is a plugin update issue by going into your values.yaml file and commenting out all the plugins under the installPlugins: heading in the yaml file.
For example:
installPlugins:
#- kubernetes:1.18.1
#- workflow-job:2.33
#- workflow-aggregator:2.6
#- credentials-binding:1.19
#- git:3.11.0
#- blueocean:1.18.1
#- kubernetes-cd:2.0.0
I am using Strapi within a digital-ocean Kubernetes cluster. The public folder images are handled by a persistent volume claim(PVC). After a redeploy, the images are visible within Strapi and also from http://api.mywebsite.com/uploads/blabla.jpg. An Imaginary image processor located within the same cluster returns a 404 Error when trying to get the same images from Strapi.
What might be the cause of this?
I have tried to build an initContainer like written here https://medium.com/faun/digitalocean-kubernetes-and-volume-permissions-820f46598965 but it did not help.
initContainers:
- name: data-permissions-fix
image: busybox
command: ["/bin/chmod","-R","777", "/backend/public/uploads"]
volumeMounts:
- name: backend-images
mountPath: /backend/public/uploads
The flow is like this: frontend -> ingress -> image-processor (Fastify server) -> imaginary -> backend
Backend:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.18.0 ()
creationTimestamp: null
labels:
io.kompose.service: backend
name: backend
spec:
replicas: 1
strategy: {}
template:
metadata:
creationTimestamp: null
labels:
io.kompose.service: backend
spec:
containers:
image: backend
name: backend
ports:
- containerPort: 1337
resources: {}
volumeMounts:
- mountPath: /backend/public/uploads
name: backend-images
readOnly: false
initContainers:
- name: data-permissions-fix
image: busybox
command: ["/bin/chmod","-R","777", "/backend/public/uploads"]
volumeMounts:
- name: backend-images
mountPath: /backend/public/uploads
volumes:
- name: backend-images
persistentVolumeClaim:
claimName: backend-images
initContainers:
- name: init-db
image: busybox
command: ['sh', '-c', 'until nc -z db:5432; do echo waiting for db; sleep 2; done;']
restartPolicy: Always
status: {}
Backend PVC:
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
creationTimestamp: null
labels:
io.kompose.service: backend-images
name: backend-images
spec:
accessModes:
- ReadWriteOnce
storageClassName: do-block-storage
# persistentVolumeReclaimPolicy: Recycle
resources:
requests:
storage: 1Gi
status: {}
Describe backend pod:
Name: backend-5f-vhx48
Namespace: default
Priority: 0
PriorityClassName: <none>
Node: pool-1-xveq/10.135.181.55
Start Time: Thu, 27 Jun 2019 19:07:31 +0200
Labels: io.kompose.service=backend
pod-template-hash=5f9fb4fbb6
Annotations: <none>
Status: Running
IP: 10.244.1.92
Controlled By: ReplicaSet/backend-5f9fbb6
Init Containers:
init-db:
Container ID: docker://e4728305d970fb2d76f1f203271d3ce902a5ef56
Image: busybox
Image ID: docker-pullable://busybox#sha256:7a4d4ed96e15da96906910d57fc4a13210160
Port: <none>
Host Port: <none>
Command:
sh
-c
until nc -z db:5432; do echo waiting for db; sleep 2; done;
State: Terminated
Reason: Completed
Exit Code: 0
Started: Thu, 27 Jun 2019 19:07:39 +0200
Finished: Thu, 27 Jun 2019 19:07:39 +0200
Ready: True
Restart Count: 0
Environment: <none>
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from default-token-fl98h (ro)
Containers:
backend:
Container ID: docker://b42bea24655d3d40e59985f8fff96bce
Image: backend
Image ID: docker-pullable://backend#sha25663765ef8841b45e4717f047b71446c1058d2
Port: 1337/TCP
Host Port: 0/TCP
State: Running
Started: Thu, 27 Jun 2019 19:07:41 +0200
Ready: True
Restart Count: 0
Environment:
Mounts:
/usr/src/backend/public/uploads from backend-images-teuberkohlhoff (rw)
/var/run/secrets/kubernetes.io/serviceaccount from default-token-fl98h (ro)
Conditions:
Type Status
Initialized True
Ready True
ContainersReady True
PodScheduled True
Volumes:
backend-images-teuberkohlhoff:
Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
ClaimName: backend-images
ReadOnly: false
default-token-fl98h:
Type: Secret (a volume populated by a Secret)
SecretName: default-token-flh72
Optional: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events: <none>
Describe PVC:
Name: backend-images
Namespace: default
StorageClass: do-block-storage
Status: Bound
Volume: pvc-de757a78-8b8a-364b3aed3
Labels: io.kompose.service=backend-images
Annotations: kubectl.kubernetes.io/last-applied-configuration:
{"apiVersion":"v1","kind":"PersistentVolumeClaim","metadata":{"annotations":{},"creationTimestamp":null,"labels":{"io.kompose.service":"ba...
pv.kubernetes.io/bind-completed: yes
pv.kubernetes.io/bound-by-controller: yes
volume.beta.kubernetes.io/storage-provisioner: dobs.csi.digitalocean.com
Finalizers: [kubernetes.io/pvc-protection]
Capacity: 1Gi
Access Modes: RWO
VolumeMode: Filesystem
Events: <none>
Mounted By: backend-5f-vhx48
Image-processor:
const imaginary = require('imaginary');
const fastify = require('fastify')({ logger: true });
const imageServer = 'http://imaginary:9000/';
fastify.get('*', async (request, reply) => {
const {
filename, type: format, width: imageWidth, url: imageUrl,
} = request.query;
const imageStream = imaginary()
.server(imageServer)
.resize({ width: imageWidth, url: imageUrl, type: format })
.on('error', (err) => {
console.error('Cannot resize the image:', err);
});
reply
.header('Content-Disposition', `attachment; filename="${filename}.${format}"`)
.header('Content-Type', `image/${format}`)
.send(imageStream);
});
const start = async () => {
try {
await fastify.listen(9009, '0.0.0.0');
fastify.log.info(`server listening on ${fastify.server.address().port}`);
} catch (err) {
fastify.log.error('ERROR', err);
process.exit(1);
}
};
start();
The frontend img-url is
http://imagehandling.domain.com/b2b36f31caa9d8f6/320/title.webp?type=webp&width=320&url=http://backend:1337/uploads/b2b36f31caa9d8f6.jpg&filename=title
I am sorry, it was my error. The Ingress controller was hitting the wrong URL. I will just leave the question in case others are searching for how to setup image processing.
#webdev asked for the Dockerfile:
FROM node:10-alpine
WORKDIR /usr/src/app/backend
RUN echo "unsafe-perm = true" >> ~/.npmrc
RUN apk add --no-cache \
autoconf \
automake \
gcc \
libc-dev \
libtool \
make \
nasm \
zlib-dev
RUN npm install -g strapi#beta
COPY . .
# COPY strapi.sh ./
RUN chmod +x ./strapi.sh
EXPOSE 1337
# COPY healthcheck.js ./
HEALTHCHECK --interval=15s --timeout=5s --start-period=30s \
CMD node /usr/src/api/healthcheck.js
CMD ["./strapi.sh"]
Strapi.sh:
#!/bin/sh
set -ea
_stopStrapi() {
echo "Stopping strapi"
kill -SIGINT "$strapiPID"
wait "$strapiPID"
}
trap _stopStrapi TERM INT
cd /usr/src/app/backend
APP_NAME=${APP_NAME:-strapi-app}
DATABASE_CLIENT=${DATABASE_CLIENT:-mongo}
DATABASE_HOST=${DATABASE_HOST:-localhost}
DATABASE_PORT=${DATABASE_PORT:-27017}
DATABASE_NAME=${DATABASE_NAME:-strapi}
DATABASE_SRV=${DATABASE_SRV:-false}
EXTRA_ARGS=${EXTRA_ARGS:-}
FRESH_BOOTSTRAP=false
if [ ! -f "$APP_NAME/package.json" ]
then
strapi new ${APP_NAME} --dbclient=$DATABASE_CLIENT --dbhost=$DATABASE_HOST --dbport=$DATABASE_PORT --dbsrv=$DATABASE_SRV --dbname=$DATABASE_NAME --dbusername=$DATABASE_USERNAME --dbpassword=$DATABASE_PASSWORD --dbssl=$DATABASE_SSL --dbauth=$DATABASE_AUTHENTICATION_DATABASE $EXTRA_ARGS
strapi new "${APP_NAME}" \
"--dbclient=$DATABASE_CLIENT" \
"--dbhost=$DATABASE_HOST" \
"--dbport=$DATABASE_PORT" \
"--dbsrv=$DATABASE_SRV" \
"--dbname=$DATABASE_NAME" \
"--dbusername=$DATABASE_USERNAME" \
"--dbpassword=$DATABASE_PASSWORD" \
"--dbssl=$DATABASE_SSL" \
"--dbauth=$DATABASE_AUTHENTICATION_DATABASE" \
$EXTRA_ARGS \
--dbforce
FRESH_BOOTSTRAP=true
elif [ ! -d "$APP_NAME/node_modules" ]
then
npm install --prefix "./$APP_NAME"
FRESH_BOOTSTRAP=true
fi
cd $APP_NAME
if [ "$NODE_ENV" = "production" ]
then
strapi start &
else
strapi develop &
fi
strapiPID=$!
wait "$strapiPID"