image: atlassian/default-image:3
pipelines:
tags:
ecr-release-*:
- step:
services:
- docker
script:
- apt update -y
- apt install python3-pip -y
- pip3 --version
- pip3 install awscli
- aws configure set aws_access_key_id "AKIA6J47DSdaUIAZH46DKDDID6UH"
- aws configure set aws_secret_access_key "2dWgDxx5i7Jre0aZJ+tQ3oDve5biYk0ZMDKKASA7554QoJSJSJS"
- curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
- chmod +x ./kubectl
- mv ./kubectl /usr/local/bin/kubectl
- aws eks update-kubeconfig --name build_web --region us-west-2
- kubectl apply -f eks/aws-auth.yaml
- kubectl apply -f eks/deployment.yaml
- kubectl apply -f eks/service.yaml
definitions:
services:
docker:
memory: 3072
Here is my bitbucket-pipelines.yml.
When i am running bitbucket pipeline i am getting below error in screenshot.
I think i already added aws access credentials
Please take a look
You need to create service account and give permissions, also you need certificate to connect Kubernetes API server.
Here is nice explanation with all details which might be helpful for you: https://medium.com/codeops/continuous-deployment-with-bitbucket-pipelines-ecr-and-aws-eks-791a30b7c84b
The problem is resolved changing the kube config file. You need to specify the profile you need to use. By default the update-kubeconfig creates the authentication credentials, and put inside the file something like this:
- name: arn:aws:eks:{region}:{}account-id:cluster/{cluster-name}
user:
exec:
apiVersion: client.authentication.k8s.io/v1beta1
args:
- --region
- {region}
- eks
- get-token
- --cluster-name
- {cluster-name}
command: aws
env:
- name: AWS_PROFILE
value: {profile}
interactiveMode: IfAvailable
provideClusterInfo: false
For son reason aws cli is not picking up the AWS_PROFILE env variable value, so in this case I solved manualy updating the kube config and specifying the --profile in the aws command part:
- name: arn:aws:eks:{region}:{}account-id:cluster/{cluster-name}
user:
exec:
apiVersion: client.authentication.k8s.io/v1beta1
args:
- --region
- {region}
- eks
- get-token
- --profile
- {profile}
- --cluster-name
- {cluster-name}
command: aws
#env:
#- name: AWS_PROFILE
# value: {profile}
interactiveMode: IfAvailable
provideClusterInfo: false
Related
We're using Gitlab for CI/CD. I'll include the script which we're using gitlab ci-cd file
services:
- docker:19.03.11-dind
workflow:
rules:
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH || $CI_COMMIT_BRANCH == "developer" || $CI_COMMIT_BRANCH == "stage"|| ($CI_COMMIT_BRANCH =~ (/^([A-Z]([0-9][-_])?)?SPRINT(([-_][A-Z][0-9])?)+/i))
when: always
- if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH || $CI_COMMIT_BRANCH != "developer" || $CI_COMMIT_BRANCH != "stage"|| ($CI_COMMIT_BRANCH !~ (/^([A-Z]([0-9][-_])?)?SPRINT(([-_][A-Z][0-9])?)+/i))
when: never
stages:
- build
- Publish
- deploy
cache:
paths:
- .m2/repository
- target
build_jar:
image: maven:3.8.3-jdk-11
stage: build
script:
- mvn clean install package -DskipTests=true
artifacts:
paths:
- target/*.jar
docker_build_dev:
stage: Publish
image: docker:19.03.11
services:
- docker:19.03.11-dind
variables:
IMAGE_TAG: $CI_REGISTRY_IMAGE:$CI_COMMIT_SHORT_SHA
script:
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- docker build -t $IMAGE_TAG .
- docker push $IMAGE_TAG
only:
- /^([A-Z]([0-9][-_])?)?SPRINT(([-_][A-Z][0-9])?)+/i
- developer
docker_build_stage:
stage: Publish
image: docker:19.03.11
services:
- docker:19.03.11-dind
variables:
IMAGE_TAG: $CI_REGISTRY_IMAGE:$CI_COMMIT_SHORT_SHA
script:
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- docker build -t $IMAGE_TAG .
- docker push $IMAGE_TAG
only:
- stage
deploy_dev:
stage: deploy
image: stellacenter/aws-helm-kubectl
variables:
ENV_VAR_NAME: development
before_script:
- apt update
- apt-get install gettext-base
- aws configure set aws_access_key_id ${DEV_AWS_ACCESS_KEY_ID}
- aws configure set aws_secret_access_key ${DEV_AWS_SECRET_ACCESS_KEY}
- aws configure set region ${DEV_AWS_DEFAULT_REGION}
script:
- sed -i "s/<VERSION>/${CI_COMMIT_SHORT_SHA}/g" patient-service.yml
- mkdir -p $HOME/.kube
- cp $KUBE_CONFIG_DEV $HOME/.kube/config
- chown $(id -u):$(id -g) $HOME/.kube/config
- export KUBECONFIG=$HOME/.kube/config
- cat patient-service.yml | envsubst | kubectl apply -f patient-service.yml -n ${KUBE_NAMESPACE_DEV}
only:
- /^([A-Z]([0-9][-_])?)?SPRINT(([-_][A-Z][0-9])?)+/i
- developer
deploy_stage:
stage: deploy
image: stellacenter/aws-helm-kubectl
variables:
ENV_VAR_NAME: stage
before_script:
- apt update
- apt-get install gettext-base
- aws configure set aws_access_key_id ${DEV_AWS_ACCESS_KEY_ID}
- aws configure set aws_secret_access_key ${DEV_AWS_SECRET_ACCESS_KEY}
- aws configure set region ${DEV_AWS_DEFAULT_REGION}
script:
- sed -i "s/<VERSION>/${CI_COMMIT_SHORT_SHA}/g" patient-service.yml
- mkdir -p $HOME/.kube
- cp $KUBE_CONFIG_STAGE $HOME/.kube/config
- chown $(id -u):$(id -g) $HOME/.kube/config
- export KUBECONFIG=$HOME/.kube/config
- cat patient-service.yml | envsubst | kubectl apply -f patient-service.yml -n ${KUBE_NAMESPACE_STAGE}
only:
- stage
According to the script, we just merged the script not to face conflicts/clashes for stage and development enviornment while deployment. Previously, we having each docker files for each environment(stage and developer). Now I want to merge the dockerfile & k8's yml file also, I merged, but the dockerfile is not fetching. Having clashes (its showing the warning message "back-off restarting failed container"after pipeline succeeds) in Kubernetes. I don't know how to clear the warning in Kubernetes. I'll enclose the docker file and yml file for your reference which I merged.
k8's yaml file
apiVersion: apps/v1
kind: Deployment
metadata:
name: patient-app
labels:
app: patient-app
spec:
replicas: 1
selector:
matchLabels:
app : patient-app
template:
metadata:
labels:
app: patient-app
spec:
containers:
- name: patient-app
image: registry.gitlab.com/stella-center/backend-services/patient-service:<VERSION>
imagePullPolicy: Always
ports:
- containerPort: 8094
env:
- name: ENV_VAR_NAME
value: "${ENV_VAR_NAME}"
imagePullSecrets:
- name: gitlab-registry-token-auth
---
apiVersion: v1
kind: Service
metadata:
name: patient-service
spec:
type: NodePort
selector:
app: patient-app
ports:
- port: 8094
targetPort: 8094
Docker file
FROM maven:3.8.3-jdk-11 AS MAVEN_BUILD
COPY pom.xml /build/
COPY src /build/src/
WORKDIR /build/
RUN mvn clean install package -DskipTests=true
FROM openjdk:11
WORKDIR /app
COPY --from=MAVEN_BUILD /build/target/patient-service-*.jar /app/patient-service.jar
ENV PORT 8094
EXPOSE $PORT
ENTRYPOINT ["java","-Dspring.profiles.active=$ENV_VAR_NAME","-jar","/app/patient-service.jar"]
In dockerfile , before we used the last line, we used before,
ENTRYPOINT ["java","-Dspring.profiles.active=development","-jar","/app/patient-service.jar"] -for developer dockerfile
ENTRYPOINT ["java","-Dspring.profiles.active=stage","-jar","/app/patient-service.jar"] - for stage dockerfile
At the time, its working fine, I'm not facing any issue on Kubernetes. I just added environment variable to fetch along with whether development or stage .I don't know why the warning is happening. Please help me to sort this out . Thanks in advance.
kubectl describe pods
> Name: patient-app-6cd8c88d6-s7ldt Namespace:
> stellacenter-dev Priority: 0 Node:
> ip-192-168-49-35.us-east-2.compute.internal/192.168.49.35 Start Time:
> Wed, 25 May 2022 20:09:23 +0530 Labels: app=patient-app
> pod-template-hash=6cd8c88d6 Annotations: kubernetes.io/psp: eks.privileged Status: Running IP:
> 192.168.50.146 IPs: IP: 192.168.50.146 Controlled By: ReplicaSet/patient-app-6cd8c88d6 Containers: patient-app:
> Container ID: docker://2d3431a015a40f551e51285fa23e1d39ad5b257bfd6ba75c3972f422b94b12be
> Image: registry.gitlab.com/stella-center/backend-services/patient-service:96e21d80
> Image ID: docker-pullable://registry.gitlab.com/stella-center/backend-services/patient-service#sha256:3f9774efe205c081de4df5b6ee22cba9940f974311b094
> 2a8473ee02b9310b43
> Port: 8094/TCP
> Host Port: 0/TCP
> State: Running
> Started: Wed, 25 May 2022 20:09:24 +0530
> Ready: True
> Restart Count: 0
> Environment: <none>
> Mounts:
> /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-sxbzc (ro) Conditions: Type Status
> Initialized True Ready True ContainersReady
> True PodScheduled True Volumes: kube-api-access-sxbzc:
> Type: Projected (a volume that contains injected data from multiple sources)
> TokenExpirationSeconds: 3607
> ConfigMapName: kube-root-ca.crt
> ConfigMapOptional: <nil>
> DownwardAPI: true QoS Class: BestEffort Node-Selectors: <none> Tolerations:
> node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
> node.kubernetes.io/unreachable:NoExecute op=Exists for 300s Events: <none>
Your Dockerfile uses exec form ENTRYPOINT syntax. This form doesn't expand environment variables; Spring is literally getting the string $ENV_VAR_NAME as the profile name, and failing on this.
Spring knows how to set properties from environment variables, though. Rather than building that setting into the Dockerfile, you can use an environment variable to set the profile name at deploy time.
# Dockerfile: do not set `-Dspring.profiles.active`
ENTRYPOINT ["java", "-jar", "/app/patient-service.jar"]
# Deployment YAML: do set `$SPRING_PROFILES_ACTIVE`
env:
- name: SPRING_PROFILES_ACTIVE
value: "${ENV_VAR_NAME}" # Helm: {{ quote .Values.environment }}
However, with this approach, you still need to set deployment-specific settings in your src/main/resources/application-*.yml file, then rebuild the jar file, then rebuild the Docker image, then redeploy. This doesn't make sense for most settings, particularly since you can set them as environment variables. If one of these values needs to change you can just change the Kubernetes configuration and redeploy, without recompiling anything.
# Deployment YAML: don't use Spring profiles; directly set variables instead
env:
- name: SPRING_DATASOURCE_URL
value: "jdbc:postgresql://postgres-dev/database"
Run the following command to get the output of why your pod crashes:
kubectl describe pod -n <your-namespace> <your-pod>.
Additionally the output of kubectl get pod -o yaml -n <your-namespace> <your-pod> has a status section that holds the reason for restarts. You might have to lookup the exit code. E.g. 137 stands for OOM.
I have built a Jenkins docker container using bash script below which runs ok. Then I commited it to an image and uploaded it to Docker registry. Now I am trying to deploy it in kubernetes but the pod is failing and I canĀ“t figure out how to solve it. This is the bash script and below there is the yaml file with the deployment and service definitions.
DOCKER BASH SCRIPT
echo "Jenkins"
echo "###############################################################################################################"
mkdir -p /home/ubuntu/Jenkins
cd /home/ubuntu/Jenkins
cat << EOF > Dockerfile
# Dockerfile
FROM jenkins4eval/jenkins:latest
# copy the list of plugins we want to install
COPY plugins.txt /usr/share/jenkins/plugins.txt
# run the install-plugins script to install the plugins
RUN /usr/local/bin/install-plugins.sh < /usr/share/jenkins/plugins.txt
# disable the setup wizard as we will set up jenkins as code :)
ENV JAVA_OPTS -Djenkins.install.runSetupWizard=false
# copy the config-as-code yaml file into the image
COPY jenkins-casc.yaml /usr/local/jenkins-casc.yaml
# tell the jenkins config-as-code plugin where to find the yaml file
ENV CASC_JENKINS_CONFIG /usr/local/jenkins-casc.yaml
EOF
cat << EOF > docker-compose.yml
version: "3.1"
services:
Jenkins:
container_name: Jenkins
image: jenkins-casc:0.1
ports:
- 8080:8080
volumes:
- ./plugins.txt:/usr/share/jenkins/plugins.txt
- ./jenkins-casc.yaml:/usr/local/jenkins-casc.yaml
environment:
- JAVA_OPTS=-Djenkins.install.runSetupWizard=false
- CASC_JENKINS_CONFIG=/usr/local/jenkins-casc.yaml
EOF
cat << EOF > plugins.txt
configuration-as-code
saml
matrix-auth
cloudbees-folder
build-timeout
timestamper
ws-cleanup
github-api
github
ssh-slaves
warnings-ng
plot
sonar
EOF
# Download yaml file
CASC_CONFIG_FILE=/home/ubuntu/Jenkins/jenkins-casc.yaml
CURL_TOKEN="856d36c380982e13f8d84e1b4dab13009b8ebdd4"
CURL_OWNER="slotone"
CURL_REPO="s1g_install"
CURL_FILEPATH="s1_ami_jenkins_master_config.yaml"
CURL_URL="https://api.github.com/repos/${CURL_OWNER}/${CURL_REPO}/contents/${CURL_FILEPATH}"
/usr/bin/curl --header "Authorization: token ${CURL_TOKEN}" --header "Accept: application/vnd.github.v3.raw" \
--location ${CURL_URL} -o ${CASC_CONFIG_FILE}
# Build and run Jenkins server
docker build -t jenkins-casc:0.1 .
KUBERNETES JENKINS YAML
apiVersion: apps/v1
kind: Deployment
metadata:
name: jenkins
spec:
selector:
matchLabels:
app: jenkins
replicas: 1
template: # template for the pods
metadata:
labels:
app: jenkins
spec:
containers:
- name: jenkins
image: tpargmdiaz/jenkins-k8s:1.0
---
apiVersion: v1
kind: Service
metadata:
name: jenkins
spec:
# This defines which pods are going to be represented by this Service
# The service becomes a network endpoint for either other services
# or maybe external users to connect to (eg browser)
selector:
app: jenkins
ports:
- name: http
port: 80
type: ClusterIP
I am trying to use GitHub Actions to fire up a Postgres container for my tests. I have a script called build.sh that gets called when npm run build is called via GitHub actions. This script calls upon restore-schema.sh (shown below).
The issue here is when restore-schema.sh gets ran, I keep getting Error: no such container: postgres. GitHub actions is naming the container some arbitrary string. Is there not a way I can run docker exec on an image or somehow name the postgres container that GitHub actions is creating? I've looked through both documentations to no avail.
How should I go about this? I noticed that in the Docker run ps screenshot, it shows command docker-entrypoint.sh. Should I use this instead? Do I specify the Dockerfile inside .github/workflows/?
I tried to include as much relevant information as possible - comment if you need any other information please.
Screenshots from GitHub Actions
Initialize containers
Docker run ps <- docker ps showing name postgres
Run npm run build --if-present <- WHERE THE ISSUE IS OCCURING
build.sh
#!/bin/sh
# Import core db schema
./.deploy/postgres/restore-schema.sh
.deploy/postgres/restore-schema.sh
#!/bin/sh
docker exec -it postgres psql \
--username postgres \
--password dev \
coredb < .deploy/postgres/db-schema.sql
.github/workflows/test-api-gateway.yml
name: API Gateway CI
on:
push:
branches: [ master ]
pull_request:
branches: [ master, develop ]
jobs:
build:
runs-on: ubuntu-latest
services: # Serivce containers to run with `container-job`
# Label used to access the service container
postgres:
# Docker Hub image
image: postgres
# Provide the password for postgres
env:
POSTGRES_USER: postgres
POSTGRES_DB: coredb
POSTGRES_PASSWORD: dev
# Set health checks to wait until postgres has started
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
- 5432:5432
strategy:
matrix:
node-version: [14.x]
steps:
- uses: actions/checkout#v2
- name: Use Node.js ${{ matrix.node-version }}
uses: actions/setup-node#v1
with:
node-version: ${{ matrix.node-version }}
- run: docker ps
- run: chmod +x build.sh .deploy/postgres/restore-schema.sh
- run: npm ci
- run: npm run build --if-present
- run: npm test
Try the --name option
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
--name postgres
https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#jobsjob_idservices
jobs.<job_id>.services.options: Additional Docker container resource options. For a list of options, see "docker create options."
Another solution I've seen is using last created container
docker exec -it $(docker ps --latest --quiet) bash
I don't know how to update my backend workload on my Kubernetes cluster. My Gitlab Pipeline is running without errors. My active revision is still on my first push, so how can I update the revision to call the rolling update action? Can I integrate an automatic rollout into the Gitlab Ci?
.gitlab-ci
image: docker:latest
services:
- docker:dind
variables:
DOCKER_DRIVER: overlay
SPRING_PROFILES_ACTIVE: gitlab-ci
stages:
- build
- package
- deploy
maven-build:
image: maven:3-jdk-8
stage: build
script: "mvn package -B"
artifacts:
paths:
- target/*.jar
docker-build:
stage: package
script:
- docker build -t registry.gitlab.com/projectX/ft-backend .
- docker login -u gitlab-ci-token -p $CI_BUILD_TOKEN registry.gitlab.com
- docker push registry.gitlab.com/projectX/ft-backend
k8s-deploy:
image: google/cloud-sdk
stage: deploy
script:
- echo "$GOOGLE_KEY" > key.json
- gcloud auth activate-service-account --key-file key.json
- gcloud config set compute/zone europe-west3-a
- gcloud config set project projectX
- gcloud config unset container/use_client_certificate
- gcloud container clusters get-credentials development --zone europe-west3-a --project projectX
- kubectl delete secret registry.gitlab.com
- kubectl create secret docker-registry registry.gitlab.com --docker-server=https://registry.gitlab.com --docker-username=MYNAME --docker-password=$REGISTRY_PASSWD --docker-email=MYMAIL
- kubectl apply -f deployment.yml
deployment.yml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: ft-backend
spec:
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
minReadySeconds: 5
template:
metadata:
labels:
app: ft-backend
spec:
containers:
- name: ft-backend
image: registry.gitlab.com/projectX/ft-backend
imagePullPolicy: Always
ports:
- containerPort: 8080
imagePullSecrets:
- name: registry.gitlab.com
Google Cloud Workload
As discussed in comments, you have to update your Deployment .spec.template to trigger a rollout. An easy way for you to do it is to tag your image upon release.
In your .gitlab-ci.yml file you can use the CI_COMMIT_SHA variable:
# in your docker-build job, update build and push:
- docker build -t registry.gitlab.com/projectX/ft-backend:${CI_COMMIT_SHA} .
- docker push registry.gitlab.com/projectX/ft-backend:${CI_COMMIT_SHA}
# in your k8s-deploy job add this:
- kubectl set image deployment/ft-backend ft-backend=registry.gitlab.com/projectX/ft-backend:${CI_COMMIT_SHA}
That would both version your image on your GitLab project registry, and trigger a rollout.
Like Clorichel mentioned in the comments, you'd need to modify your deployment to trigger a rollout. You could use something like Gitflow and Semantic Versioning (if you're not already) to tag your container image. For example, in the .gitlab-ci you could add the Git tag to your container image:
script:
- docker build -t registry.gitlab.com/projectX/ft-backend:$CI_COMMIT_TAG .
- docker login -u gitlab-ci-token -p $CI_BUILD_TOKEN registry.gitlab.com
- docker push registry.gitlab.com/projectX/ft-backend:$CI_COMMIT_TAG
In the deployment.yml you would reference the new version:
spec:
containers:
- name: ft-backend
image: registry.gitlab.com/projectX/ft-backend:YOUR_NEW_GIT_TAG
imagePullPolicy: Always
ports:
- containerPort: 8080
I get the following error message in my Gitlab CI pipeline and I can't do anything with it. Yesterday the pipeline still worked, but I didn't change anything in the yml and I don't know where I made the mistake. I also reset my code to the last working commit, but the error still occurs.
$ kubectl set image deployment/ft-backend ft-backend=registry.gitlab.com/projectX/ft-backend
Error from server (NotFound): deployments.extensions "ft-backend" not
found
.gitlab-ci.yml
image: docker:latest
services:
- docker:dind
variables:
DOCKER_DRIVER: overlay
SPRING_PROFILES_ACTIVE: gitlab-ci
stages:
- build
- package
- deploy
maven-build:
image: maven:3-jdk-8
stage: build
script: "mvn package -B"
artifacts:
paths:
- target/*.jar
docker-build:
stage: package
script:
- docker build -t registry.gitlab.com/projectX/ft-backend:${CI_COMMIT_SHA} .
- docker login -u gitlab-ci-token -p $CI_BUILD_TOKEN registry.gitlab.com
- docker push registry.gitlab.com/projectX/ft-backend:${CI_COMMIT_SHA}
k8s-deploy:
image: google/cloud-sdk
stage: deploy
script:
- echo "$GOOGLE_KEY" > key.json
- gcloud auth activate-service-account --key-file key.json
- gcloud config set compute/zone europe-west3-a
- gcloud config set project projectX
- gcloud config unset container/use_client_certificate
- gcloud container clusters get-credentials development --zone europe-west3-a --project projectX
- kubectl delete secret registry.gitlab.com
- kubectl create secret docker-registry registry.gitlab.com --docker-server=https://registry.gitlab.com --docker-username=MY_NAME --docker-password=$REGISTRY_PASSWD --docker-email=MY_MAIL
- kubectl set image deployment/ft-backend ft-backend=registry.gitlab.com/projectX/ft-backend:${CI_COMMIT_SHA}
- kubectl apply -f deployment.yml
I suppose that when you are invoking command:
kubectl set image deployment/ft-backend ft-backend=registry.gitlab.com/projectX/ft-backend
deployment ft-backend does not exist in your cluster. Does the command: kubectl get deployment ft-backend return the same result?
Use this command to create deployments, its not supported in newer version:
check this for newer version:
$ kubectl create deployment hello-minikube --image=k8s.gcr.io/echoserver:1.4