I've got a spring boot with jib project where the latest Docker image that was built gets the "latest" tag. Now I've created a Dockerfile and the pipeline definition for my angular project but the latest tag is not being created. How do I make the latest image automatically latest, as is the case with my spring boot jib project?
My Dockerfile:
FROM nginx:latest
COPY dist/test-docker-angular-app/ /usr/share/nginx/html
COPY nginx.conf /etc/nginx/nginx.conf
My gitlab-ci.yml:
stages:
- build
- package
variables:
VERSION: ${CI_COMMIT_SHORT_SHA}-${CI_COMMIT_REF_SLUG}
build-prod-app:
stage: build
image: node:latest
script:
- npm install -g #angular/cli#9.1.0
- npm install
- ng build --prod
artifacts:
paths:
- dist/
expire_in: 2 hours
cache:
paths:
- node_modules/
docker-build:
image: docker:stable
stage: package
services:
- docker:dind
before_script:
- echo $CI_BUILD_TOKEN | docker login -u "$CI_REGISTRY_USER"
--password-stdin $CI_REGISTRY
script:
- IMAGE_NAME="$CI_REGISTRY_IMAGE:$VERSION"
- docker build --pull -t "$IMAGE_NAME" -f Dockerfile .
- docker push "$IMAGE_NAME"
This has worked for me:
docker-build-master:
image: docker:stable
stage: package
services:
- docker:dind
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- docker build --pull -t "$CI_REGISTRY_IMAGE" .
- docker push "$CI_REGISTRY_IMAGE"
only:
- master
docker-build:
image: docker:stable
stage: package
services:
- docker:dind
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- IMAGE_NAME="$CI_REGISTRY_IMAGE:$VERSION"
- docker build --pull -t "$IMAGE_NAME" .
- docker push "$IMAGE_NAME"
except:
- master
Related
I use Gitlab runner on an EC2 to build, test and deploy docker images on a ECS.
I start my CI workflow using a "push/pull" logic: I build all my docker images during the first stage and push them to my gitlab repository then I pull them during the test stage.
I thought that I could drastically improve the workflow time by keeping the image built during the build stage between build and test stages.
My gitlab-ci.yml looks like this:
stages:
- build
- test
- deploy
build_backend:
stage: build
image: docker
services:
- docker:dind
before_script:
- echo $CI_REGISTRY_PASSWORD | docker login -u $CI_REGISTRY_USER $CI_REGISTRY --password-stdin
script:
- docker build -t backend:$CI_COMMIT_BRANCH ./backend
only:
refs:
- develop
- master
build_generator:
stage: build
image: docker
services:
- docker:dind
before_script:
- echo $CI_REGISTRY_PASSWORD | docker login -u $CI_REGISTRY_USER $CI_REGISTRY --password-stdin
script:
- docker build -t generator:$CI_COMMIT_BRANCH ./generator
only:
refs:
- develop
- master
build_frontend:
stage: build
image: docker
services:
- docker:dind
before_script:
- echo $CI_REGISTRY_PASSWORD | docker login -u $CI_REGISTRY_USER $CI_REGISTRY --password-stdin
script:
- docker build -t frontend:$CI_COMMIT_BRANCH ./frontend
only:
refs:
- develop
- master
build_scraping:
stage: build
image: docker
services:
- docker:dind
before_script:
- echo $CI_REGISTRY_PASSWORD | docker login -u $CI_REGISTRY_USER $CI_REGISTRY --password-stdin
script:
- docker build -t scraping:$CI_COMMIT_BRANCH ./scraping
only:
refs:
- develop
- master
test_backend:
stage: test
needs: ["build_backend"]
image: docker
services:
- docker:dind
before_script:
- echo $CI_REGISTRY_PASSWORD | docker login -u $CI_REGISTRY_USER $CI_REGISTRY --password-stdin
- DOCKER_CONFIG=${DOCKER_CONFIG:-$HOME/.docker}
- mkdir -p $DOCKER_CONFIG/cli-plugins
- apk add curl
- curl -SL https://github.com/docker/compose/releases/download/v2.3.2/docker-compose-linux-x86_64 -o $DOCKER_CONFIG/cli-plugins/docker-compose
- chmod +x $DOCKER_CONFIG/cli-plugins/docker-compose
script:
- docker compose -f docker-compose-ci.yml up -d backend
- docker exec backend pip3 install --no-cache-dir --upgrade -r requirements-test.txt
- docker exec db sh mongo_init.sh
- docker exec backend pytest test --junitxml=report.xml -p no:cacheprovider
artifacts:
when: always
reports:
junit: backend/report.xml
only:
refs:
- develop
- master
test_generator:
stage: test
needs: ["build_generator"]
image: docker
services:
- docker:dind
before_script:
- echo $CI_REGISTRY_PASSWORD | docker login -u $CI_REGISTRY_USER $CI_REGISTRY --password-stdin
- DOCKER_CONFIG=${DOCKER_CONFIG:-$HOME/.docker}
- mkdir -p $DOCKER_CONFIG/cli-plugins
- apk add curl
- curl -SL https://github.com/docker/compose/releases/download/v2.3.2/docker-compose-linux-x86_64 -o $DOCKER_CONFIG/cli-plugins/docker-compose
- chmod +x $DOCKER_CONFIG/cli-plugins/docker-compose
script:
- docker compose -f docker-compose-ci.yml up -d generator
- docker exec generator pip3 install --no-cache-dir --upgrade -r requirements-test.txt
- docker exec generator pip3 install --no-cache-dir --upgrade -r requirements.txt
- docker exec db sh mongo_init.sh
- docker exec generator pytest test --junitxml=report.xml -p no:cacheprovider
artifacts:
when: always
reports:
junit: generator/report.xml
only:
refs:
- develop
- master
[...]
gitlab-runner/config.toml:
concurrent = 5
check_interval = 0
[session_server]
session_timeout = 1800
[[runners]]
name = "Docker Runner"
url = "https://gitlab.com/"
token = ""
executor = "docker"
[runners.custom_build_dir]
[runners.cache]
[runners.cache.s3]
[runners.cache.gcs]
[runners.cache.azure]
[runners.docker]
tls_verify = false
image = "docker:19.03.12"
privileged = true
disable_entrypoint_overwrite = false
oom_kill_disable = false
disable_cache = false
volumes = ["/certs/client", "/cache"]
shm_size = 0
docker-compose-ci.yml:
services:
backend:
container_name: backend
image: backend:$CI_COMMIT_BRANCH
build:
context: backend
volumes:
- ./backend:/app
networks:
default:
ports:
- 8000:8000
- 587:587
- 443:443
environment:
- ENVIRONMENT=development
depends_on:
- db
generator:
container_name: generator
image: generator:$CI_COMMIT_BRANCH
build:
context: generator
volumes:
- ./generator:/var/task
networks:
default:
ports:
- 9000:8080
environment:
- ENVIRONMENT=development
depends_on:
- db
db:
container_name: db
image: mongo
volumes:
- ./mongo_init.sh:/mongo_init.sh:ro
networks:
default:
environment:
MONGO_INITDB_DATABASE: DB
MONGO_INITDB_ROOT_USERNAME: admin
MONGO_INITDB_ROOT_PASSWORD: admin
ports:
- 27017:27017
frontend:
container_name: frontend
image: frontend:$CI_COMMIT_BRANCH
build:
context: frontend
volumes:
- ./frontend:/app
networks:
default:
ports:
- 8080:8080
depends_on:
- backend
networks:
default:
driver: bridge
When I comment context: in my docker-compose-ci.yml, Docker can't find my image and indeed it is not keep between jobs.
What is the best Docker approach during CI to build -> test -> deploy?
Should I zip my docker image and share them between stages using artifacts? It doesn't seem to be the most efficient way to do this.
I'm a bit lost about which approach I should use to perform a such common workflow in Gitlab CI using Docker.
The best way to do this is to push the image to the registry and pull it in other stages where it is needed. You appear to be missing the push/pull logic.
You also want to make sure you've leveraging docker caching in your docker builds. You'll probably want to specify the cache_from: key in your compose file.
For example:
build:
stage: build
script:
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
# pull latest image to leverage cached layers
- docker pull $CI_REGISTRY_IMAGE:latest || true
# build and push the image to be used in subsequent stages
- docker build --cache-from $CI_REGISTRY_IMAGE:latest --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA .
- docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA # push the image
test:
stage: test
needs: [build]
script:
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
# pull the image that was built in the previous stage
- docker pull $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
- docker-compose up # or docker run or whatever
Try mounting the "Docker Root Dir" as a persistent/nfs volume that is shared by the fleet of runners.
Docker images are stored in "Docker Root Dir" path. You can find out your docker root by running:
# docker info
...
Storage Driver: overlay2
Docker Root Dir: /var/lib/docker
...
Generally the default paths based on the OS are
Ubuntu: /var/lib/docker/
Fedora: /var/lib/docker/
Debian: /var/lib/docker/
Windows: C:\ProgramData\DockerDesktop
MacOS: ~/Library/Containers/com.docker.docker/Data/vms/0/
Once properly mounted to all agents, you will be able to access all local docker images.
References:
https://docs.gitlab.com/runner
https://blog.nestybox.com/2020/10/21/gitlab-dind.html
I have node.js application that I need to deploy to exising kubernetes cluster.
The cluster is setup using kops on AWS.
I have created .gitlab-ci.yml file for building docker images.
So, whenever a change is pushed to either master or develop branch. It will build the docker image.
I have already followed steps defined here to add an existing cluster.
Now, I have to roll update to exisitng kubernetes cluster..
# This file is a template, and might need editing before it works on your project.
docker-build-master:
# Official docker image.
image: docker:latest
stage: build
services:
- docker:dind
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- docker build --pull -t "$CI_REGISTRY_IMAGE:prod" .
- docker push "$CI_REGISTRY_IMAGE:prod"
only:
- master
docker-build-dev:
# Official docker image.
image: docker:latest
stage: build
services:
- docker:dind
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- docker build --pull -t "$CI_REGISTRY_IMAGE:dev" .
- docker push "$CI_REGISTRY_IMAGE:dev"
only:
- develop
For now, I am using shared runner.
How can I integrate kubernetes deployment after image is built with gitlab ci/cd to deploy on aws (cluster is created with kops)?
For registry I am using gitlab's container registry not docker hub.
Update
I changed configuration and doing below,
stages:
- docker-build
- deploy
docker-build-master:
image: docker:latest
stage: docker-build
services:
- docker:dind
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- docker build --pull -t "$CI_REGISTRY_IMAGE:prod" .
- docker push "$CI_REGISTRY_IMAGE:prod"
only:
- master
deploy-prod:
stage: deploy
image: roffe/kubectl
script:
- kubectl apply -f scheduler-deployment.yaml
only:
- master
docker-build-dev:
image: docker:latest
stage: docker-build
services:
- docker:dind
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- docker build --pull -t "$CI_REGISTRY_IMAGE:dev" .
- docker push "$CI_REGISTRY_IMAGE:dev"
only:
- develop
But now I am getting below error.
roffe/kubectl with digest roffe/kubectl#sha256:ba13f8ffc55c83a7ca98a6e1337689fad8a5df418cb160fa1a741c80f42979bf ...
$ kubectl apply -f scheduler-deployment.yaml
error: the path "scheduler-deployment.yaml" does not exist
Cleaning up file based variables
00:01
ERROR: Job failed: exit code 1
File scheduler-deployment.yaml does exist in the root directory.
I'm trying to run a mounted shell-script inside a docker container by following these steps:
build stage: build the docker image.
test stage: mount a directory into the container at runtime with a shell-script file inside.
test stage: run the shell-script file from inside the docker.
could someone please explain how this should be done?
see line: #- ?? HERE I SHOULD RUN THE TEST: /test/check.sh ??
services:
- docker:dind
stages:
- build
- test
before_script:
- docker info
# Build the docker image
build:
image: docker:latest
services:
- docker:dind
before_script:
- docker login docker.example.com -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD
only:
- master
script:
- docker build -t our-docker .
- docker save our-docker > our-docker.tar
artifacts:
paths:
- our-docker.tar
expire_in: 1 week
stage: build
test:
image: docker:latest
only:
- master
script:
- docker load < our-docker.tar
- docker run --volume source="$(pwd)/test",target="/test" our-docker
#- ?? HERE I SHOULD RUN THE TEST: /test/check.sh ??
stage: test
First, there was an issue with the docker run command itself:
docker run --volume source="$(pwd)/test",target="/test" our-docker # buggy
as the syntax to setup a bind-mount is:
either docker run -v "$PWD/test":"/test" our-docker
(-v being the short form of --volume)
or docker run --mount type=bind,source="$PWD/test",target="/test" our-docker
(Note: I replaced above "$(pwd)" with the special shell variable "$PWD" which avoids spinning yet another process.)
Next, you cannot just append the line /test/check.sh after the docker run line because you precisely need to run that command within the context of docker run. To this aim, you may want to use the pattern I proposed in this other SO thread: How do I set docker-credential-ecr-login in my PATH before anything else in GitLab CI (which contains more details/remarks about set -e, quotes and shell escaping in the context of that pattern).
Wrap-up
More precisely, could you try the following adaptation of your .gitlab-ci.yml? (I've added some ls commands that should help debugging your configuration):
services:
- docker:dind
stages:
- build
- test
before_script:
- docker info
# Build the docker image
build:
image: docker:latest
services:
- docker:dind
before_script:
- docker login docker.example.com -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD"
only:
- master
script:
- docker build -t our-docker .
- docker save our-docker > our-docker.tar
artifacts:
paths:
- our-docker.tar
expire_in: 1 week
stage: build
test:
image: docker:latest
# note: use /bin/sh below as this image doesn't provide /bin/bash
only:
- master
script:
- docker load < our-docker.tar
- echo "$PWD"
- ls
- ls -Rhal test
- |
docker run --rm -v "$PWD/test":"/test" our-docker /bin/sh -c "
set -ex
ls -Rhal /test
/test/check.sh
"
stage: test
I have a Python based repository and I am trying to setup Gitlab CI to Build a Docker image using Dockerfile and pushing the image to Gitlab's Registry.
Before building and deploying the Docker image to registry, I want to run my unit tests using Python. Here is my current gitlab-ci.yml file that only does testing:
image: python:3.7-slim
before_script:
- pip3 install -r requirements.txt
test:
variables:
DJANGO_SECRET_KEY: some-key-here
script:
- python manage.py test
build:
DO NOT KNOW HOW TO DO IT
I am checking some templates from Gitlab's website and found one for Docker:
# This file is a template, and might need editing before it works on your project.
# Official docker image.
image: docker:latest
services:
- docker:dind
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
build-master:
stage: build
script:
- docker build --pull -t "$CI_REGISTRY_IMAGE" .
- docker push "$CI_REGISTRY_IMAGE"
only:
- master
build:
stage: build
script:
- docker build --pull -t "$CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG" .
- docker push "$CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG"
except:
- master
However, both of these do not work for me because I need to have python for testing and docker for building the image. Is there way to do it with Gitlab CI without creating a custom Docker image that has both python and Docker installed?
I found out that I can create multiple jobs, each with their own images:
stages:
- test
- build
test:
stage: test
image: python:3.7-slim
variables:
DJANGO_SECRET_KEY: key
before_script:
- pip3 install -r requirements.txt
script:
- python manage.py test
only:
- master
build:
stage: build
image: docker:latest
services:
- docker:dind
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- docker build --pull -t "$CI_REGISTRY_IMAGE" .
- docker push "$CI_REGISTRY_IMAGE"
only:
- master
I use Gitlab.com for my CI using their shared docker runners. I have a project which requires PHP and composer installed, while it also needs docker to build a docker image of the project.
I've tried for hours to build a docker image which has PHP, composer and docker installed, but I can't seem to figure it out.
For reference, my gitlab-ci.yml file looks like this;
image: docker:latest
variables:
DOCKER_DRIVER: overlay2
services:
- docker:dind
composer:install:
stage: build
artifacts:
paths:
- /
expire_in: 1 week
script:
- docker run --rm --interactive --tty --volume $PWD:/app composer install
build:image:
stage: build
dependencies:
- composer:install
script:
- docker login registry.gitlab.com -u $REGISTRY_USERNAME -p $REGISTRY_PASSWORD
- docker build -t registry.gitlab.com/accountname/projectname/develop .
- docker push registry.gitlab.com/accountname/projectname/develop
Using the sample build script provided by Stefan below, I put together the following build file which appears to work perfectly. It builds the project using the project Dockerfile, and pushed the resulting image to my Gitlab repository.
image: docker:latest
variables:
DOCKER_DRIVER: overlay2
services:
- docker:dind
before_script:
- docker run --rm --volume $PWD:/app composer install
build:image:
stage: build
script:
- docker login registry.gitlab.com -u $REGISTRY_USERNAME -p $REGISTRY_PASSWORD
- docker build -t registry.gitlab.com/accountname/projectname/develop .
- docker push registry.gitlab.com/accountname/projectname/develop
You should use a 2 job CI and use the artifact feature provided by Gitlab to pass the composer install result as dependencies between jobs.
Your first job (composer:install) could use something like https://hub.docker.com/r/library/composer/ in your script section to install all composer packages, then pass it to the build:image job that builds your Docker image.
This would roughly look like this:
image: docker:latest
variables:
DOCKER_DRIVER: overlay2
services:
- docker:dind
composer:install:
stage: composer
artifacts:
paths:
- /
expire_in: 1 week
script:
- docker run --rm --interactive --tty --volume $PWD:/app composer install
build:image:
stage: build
dependencies:
- composer:install
script:
- docker build -t myimage:latest .
Where your Dockerfile could be something like this (based on this):
FROM php:7.0-cli
COPY . /usr/src/myapp
WORKDIR /usr/src/myapp
CMD [ "php", "./your-script.php" ]