I have two docker containers connected through a frontendbuild docker volume:
services:
nginx:
container_name: nginx
build:
context: .
dockerfile: ./compose/production/nginx_ssltls/Dockerfile
#restart: unless-stopped
volumes:
- ./compose/production/nginx_live:/etc/nginx/conf.d
- ./data/certbot/conf:/etc/letsencrypt
- ./data/certbot/www:/var/www/certbot
- staticfiles_harcelement:/app/static
- mediafiles_harcelement:/app/media
- frontendbuild:/usr/share/nginx/html/build
ports:
- "80:80"
- "443:443"
command: "/bin/sh -c 'while :; do sleep 6h & wait $${!}; nginx -s reload; done & nginx -g \"daemon off;\"'"
networks:
- network_app
react
build:
context: .
dockerfile: ./compose/production/frontend/Dockerfile
#restart: always
volumes:
- frontendbuild:/app/frontend/build
networks:
- network_app
Each time i re-run my build, the volume is not updated with the updated /app/frontend/build folder from the updated react docker container.
I have found how to update a volume from a folder on my host machine, but this time the build is created in the Dockerfile, so the files I need to update to the volume are inside the container...
How can i automatize this in code?
Here is result of docker inspect volume :
[
{
"CreatedAt": "2022-07-07T19:37:23+02:00",
"Driver": "local",
"Labels": {
"com.docker.compose.project": "app-harcelement",
"com.docker.compose.version": "1.25.0",
"com.docker.compose.volume": "frontendbuild"
},
"Mountpoint": "/var/lib/docker/volumes/frontendbuild/_data",
"Name": "frontendbuild",
"Options": null,
"Scope": "local"
}
]
Thank you
Related
I have 3 services sharing 1 common package.
I've set a docker-compose.yaml file to run these services together (each service has its own container). Also I configured, using bind mount, hot reload for each service.
Now, I also have 1 shared package, common that being used by each service.
What I want is, to make this package trigger change for each service, upon own change. By this I mean, if I changed some code in the common package, it will make all 3 other services to reload again.
This is my docker-compose.yaml file:
version: '3.8'
services:
mongo_launcher:
container_name: mongo_launcher
image: mongo:6.0.2
restart: on-failure
networks:
- dashboard_network
volumes:
- ./docker/scripts/mongo-setup.sh:/scripts/mongo-setup.sh
entrypoint: ['sh', '/scripts/mongo-setup.sh']
mongo_replica_1:
container_name: mongo_replica_1
image: mongo:6.0.2
ports:
- 27017:27017
restart: always
entrypoint:
[
'/usr/bin/mongod',
'--bind_ip_all',
'--replSet',
'dbrs',
'--dbpath',
'/data/db',
'--port',
'27017',
]
volumes:
- ./.volumes/mongo/replica1:/data/db
- ./.volumes/mongo/replica1/configdb:/data/configdb
networks:
- dashboard_network
mongo_replica_2:
container_name: mongo_replica_2
image: mongo:6.0.2
ports:
- 27018:27018
restart: always
entrypoint:
[
'/usr/bin/mongod',
'--bind_ip_all',
'--replSet',
'dbrs',
'--dbpath',
'/data/db',
'--port',
'27018',
]
volumes:
- ./.volumes/mongo/replica2:/data/db
- ./.volumes/mongo/replica2/configdb:/data/configdb
networks:
- dashboard_network
mongo_replica_3:
container_name: mongo_replica_3
image: mongo:6.0.2
ports:
- 27019:27019
restart: always
entrypoint:
[
'/usr/bin/mongod',
'--bind_ip_all',
'--replSet',
'dbrs',
'--dbpath',
'/data/db',
'--port',
'27019',
]
volumes:
- ./.volumes/mongo/replica3:/data/db
- ./.volumes/mongo/replica3/configdb:/data/configdb
networks:
- dashboard_network
backend:
container_name: backend
build:
context: .
dockerfile: ./docker/Dockerfile.backend-dev
env_file:
- ./apps/backend/envs/.env.development
- ./docker/envs/.env.development
ports:
- 3000:3000
restart: always
depends_on:
- mongo_replica_1
- mongo_replica_2
- mongo_replica_3
networks:
- dashboard_network
volumes:
- type: bind
source: ./apps/backend/src
target: /dashboard/apps/backend/src
cli-backend:
container_name: cli-backend
build:
context: .
dockerfile: ./docker/Dockerfile.cli-backend-dev
env_file:
- ./apps/cli-backend/envs/.env.development
- ./docker/envs/.env.development
ports:
- 4000:4000
restart: always
depends_on:
- mongo_replica_1
- mongo_replica_2
- mongo_replica_3
networks:
- dashboard_network
volumes:
- type: bind
source: ./apps/cli-backend/src
target: /dashboard/apps/cli-backend/src
frontend:
container_name: frontend
build:
context: .
dockerfile: ./docker/Dockerfile.frontend-dev
env_file:
- ./apps/frontend/.env.development
ports:
- 8080:8080
restart: always
depends_on:
- backend
- cli-backend
networks:
- dashboard_network
volumes:
- type: bind
source: ./apps/frontend/src
target: /dashboard/apps/frontend/src
networks:
dashboard_network:
driver: bridge
This is typical dockerfile for a service (may differ for each service, but the idea is the same):
FROM node:18
RUN curl -f https://get.pnpm.io/v6.16.js | node - add --global pnpm
WORKDIR /dashboard
COPY ./package.json ./pnpm-workspace.yaml ./.npmrc ./
COPY ./apps/backend/package.json ./apps/backend/
COPY ./packages/common/package.json ./packages/common/
COPY ./prisma/schema.prisma ./prisma/
RUN pnpm i -w
RUN pnpm --filter backend --filter common i
COPY ./tsconfig.base.json ./nx.json ./
COPY ./apps/backend/ ./apps/backend/
COPY ./packages/common/ ./packages/common/
CMD ["pnpm", "exec", "nx", "start:dev:docker", "backend"]
My pnpm-workspace.yaml file:
packages:
- 'apps/*'
- 'packages/*'
I also use nx package, nx.json file is:
{
"workspaceLayout": {
"appsDir": "apps",
"libsDir": "packages"
},
"tasksRunnerOptions": {
"default": {
"runner": "nx/tasks-runners/default",
"options": {
"cacheableOperations": ["build", "lint", "type-check", "depcheck", "stylelint"]
}
}
},
"namedInputs": {
"source": ["{projectRoot}/src/**/*"],
"jsSource": ["{projectRoot}/src/**/*.{ts,js,cjs}"],
"reactTsSource": ["{projectRoot}/src/**/*.{ts,tsx}"],
"scssSource": ["{projectRoot}/src/**/*.scss"]
},
"targetDefaults": {
"build": {
"inputs": ["source", "^source"],
"dependsOn": ["^build"]
},
"lint": {
"inputs": ["jsSource", "{projectRoot}/.eslintrc.cjs", "{projectRoot}/.eslintignore"],
"outputs": []
},
"type-check": {
"inputs": [
"reactTsSource",
"{projectRoot}/tsconfig.json",
"{projectRoot}/tsconfig.base.json",
"{workspaceRoot}/tsconfig.base.json"
],
"dependsOn": ["^build"],
"outputs": []
},
"depcheck": {
"inputs": ["{projectRoot}/.depcheckrc.json", "{projectRoot}/package.json"],
"outputs": []
},
"stylelint": {
"inputs": ["scssSource", "{projectRoot}/stylelint.config.cjs"]
},
"start:dev": {
"dependsOn": ["^build"]
},
"start:dev:docker": {
"dependsOn": ["^build"]
}
}
}
For each service, I installed the shared package in package.json#devDependencies:
"common": "workspace:1.0.0",
As you can see, my start:dev:docker script depends on build script of the shared package. So in the containers, the common package will build. This is the scripts of common/package.json:
"build": "rimraf ./dist && tsc --project ./tsconfig.build.json",
"start:dev": "tsc --project ./tsconfig.build.json --watch",
So I need to use start:dev somehow, but surely not with dependsOn of NX.
I have created a simple app connected with PostgreSQL and pgAdmin, as well as a web server in a Docker images running in a container.
My question is how I can make it reload, like with nodemon in a local server, without the need of deleting the container everytime.
I have been trying different solutions and methods I have seen around but I haven't been able to make it work.
I have already tried inserting the command: ["npm", "run", "start:dev"] in the docker-compose.file as well...
My files are:
Dockerfile
FROM node:latest
WORKDIR /
COPY package*.json ./
COPY . .
COPY database.json .
COPY .env .
EXPOSE 3000
CMD [ "npm", "run", "watch ]
Docker-compose.file
version: '3.7'
services:
postgres:
image: postgres:latest
environment:
- POSTGRES_USER=test
- POSTGRES_PASSWORD=tes
- POSTGRES_DB=test
ports:
- 5432:5432
logging:
options:
max-size: 10m
max-file: "3"
pgadmin:
image: dpage/pgadmin4
environment:
- PGADMIN_DEFAULT_EMAIL=test#gmail.com
- PGADMIN_DEFAULT_PASSWORD=pasword123test
ports:
- "5050:80"
web:
build: .
# command: ["npm", "run", "start:dev"]
links:
- postgres
image: prueba
depends_on:
- postgres
ports:
- '3000:3000'
env_file:
- .env
Nodemon.json file:
{
"watch": ["dist"],
"ext": ".ts,.js",
"ignore": [],
"exec": "ts-node ./dist/server.js"
}
Package.json file:
"scripts": {
"start:dev": "nodemon",
"build": "rimraf ./dist && tsc",
"start": "npm run build && node dist/server.js",
"watch": "tsc-watch --esModuleInterop src/server.ts --outDir ./dist --onSuccess \"node ./dist/server.js\"",
"jasmine": "jasmine",
"test": "npm run build && npm run jasmine",
"db-test": "set ENV=test&& db-migrate -e test up && npm run test && db-migrate -e test reset",
"lint": "eslint . --ext .ts",
"prettier": "prettier --config .prettierrc src/**/*.ts --write",
"prettierLint": "prettier --config .prettierrc src/**/*.ts --write && eslint . --ext .ts --fix"
},
Thanks
The COPY . . command only runs when the image is built, which only happens when you first run docker compose up. In order for the container to be aware of changes, you need the code changes on your host machine to be synchronized with the code inside the container, even after the build is complete.
Below I've added the volume mount to the web container in your docker compose and uncommented the command that should support hot-reloading. I assumed that the source code you wanted to change lives in a src directory, but feel free to update to reflect how you've organized your source code.
version: '3.7'
services:
postgres:
image: postgres:latest
environment:
- POSTGRES_USER=test
- POSTGRES_PASSWORD=tes
- POSTGRES_DB=test
ports:
- 5432:5432
logging:
options:
max-size: 10m
max-file: "3"
pgadmin:
image: dpage/pgadmin4
environment:
- PGADMIN_DEFAULT_EMAIL=test#gmail.com
- PGADMIN_DEFAULT_PASSWORD=pasword123test
ports:
- "5050:80"
web:
build: .
command: ["npm", "run", "start:dev"]
links:
- postgres
image: prueba
depends_on:
- postgres
ports:
- '2000:2000'
env_file:
- .env
volumes:
# <host-path>:<container-path>
- ./src:/src/
If that isn't clear, here's an article that might help:
https://www.freecodecamp.org/news/how-to-enable-live-reload-on-docker-based-applications/
I need to find volume by label or name easily not by a docker assigned id like:
docker volume ls --filter label=key=value
but if I try add a 'container_name' or 'labels' to docker-compose.yaml I can't see any assigned label of name to volume when I inspect it, here is an output:
>>> docker volume inspect <volume_id>
[
{
"CreatedAt": "2020-10-28T11:41:51+01:00",
"Driver": "local",
"Labels": null,
"Mountpoint": "/var/lib/docker/volumes/4dce13df34f4630b34fbf1f853f7b59dbee2e3150a5122fa38d02024c155ec7d/_data",
"Name": "4dce13df34f4630b34fbf1f853f7b59dbee2e3150a5122fa38d02024c155ec7d",
"Options": null,
"Scope": "local"
}
]
I believe I can filter volumes by labels and name.
Here is a part of docker-compose.yml config file for mongo service:
version: '3.4'
services:
mongodb:
container_name: some_name
image: mongo
labels:
com.docker.compose.project: app-name
restart: always
ports:
- 27017:27017
volumes:
- ./mongo:/data/db
I'm not exactly sure what you're tring to acheive here, but I hope something in my response will be helpful.
You can define a named volume within your docker-compose.yml
version: '3.4'
services:
mongodb:
container_name: some_name
image: mongo
labels:
com.docker.compose.project: app-name
restart: always
ports:
- 27017:27017
volumes:
- mongo_db:/data/db
volumes:
mongo_db:
You could then use the docker volume inspect command to see some details about this volume.
docker volume inspect mongo_db
This is the docker-compose command and the results:
$ docker-compose -f docker-compose-base.yml -f docker-compose-test.yml run api sh -c 'pwd && ls'
Starting test-db ... done
/usr/src/api
node_modules
I then inspected the most recent container id:
$ docker inspect --format='{{json .Mounts}}' e150beeef85c
[
{
"Type": "bind",
"Source": "/home/circleci/project",
"Destination": "/usr/src/api",
"Mode": "rw",
"RW": true,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "4f86174ca322af6d15489da91f745861815a02f5b4e9e879ef5375663b9defff",
"Source": "/var/lib/docker/volumes/4f86174ca322af6d15489da91f745861815a02f5b4e9e879ef5375663b9defff/_data",
"Destination": "/usr/src/api/node_modules",
"Driver": "local",
"Mode": "",
"RW": true,
"Propagation": ""
}
]
Which means, these files are not appearing:
$ ls /home/circleci/project
Dockerfile docker-compose-base.yml docker-compose-prod.yml migrations nodemon-debug.json package-lock.json src test-db.env tsconfig.build.json tslint.json
README.md docker-compose-dev.yml docker-compose-test.yml nest-cli.json nodemon.json package.json test test.env tsconfig.json
Why could this be?
Update: I should mention that all this works fine on my local dev environment. The above is failing on CircleCI.
When I inspect the differences between the containers, the only major things that I see is that my dev environment runs Docker 19 using overlay2 graph driver and the above failing environment runs Docker 17 using aufs graph driver.
Update 2: Actual docker-compose files:
# docker-compose-base.yml
version: '3'
services:
api:
build: .
restart: on-failure
container_name: api
# docker-compose-test.yml
version: '3'
networks:
default:
external:
name: lb_lbnet
services:
test-db:
image: postgres:11
container_name: test-db
env_file:
- ./test-db.env # uses POSTGRES_DB and POSTGRES_PASSWORD to create a fresh db with a password when first run
api:
restart: 'no'
env_file:
- test.env
volumes:
- ./:/usr/src/api
- /usr/src/api/node_modules
depends_on:
- test-db
ports:
- 9229:9229
- 3000:3000
command: npm run start:debug
And finally Dockerfile:
FROM node:11
WORKDIR /usr/src/api
COPY package*.json ./
RUN npm install
COPY . .
# not using an execution list here so we get shell variable substitution
CMD npm run start:$NODE_ENV
As #allisongranemann pointed out, CircleCI states:
It is not possible to mount a volume from your job space into a
container in Remote Docker (and vice versa).
The original reason why I wanted to mount the project directory to docker was that in the development environment, I could change code quickly and run tests without rebuilding the container.
With this limitation, the solution I went with was to remove volumes mounting from docker-compose-test.yml as follow:
version: '3'
services:
test-db:
image: postgres:11
container_name: test-db
env_file:
- ./test-db.env # uses POSTGRES_DB and POSTGRES_PASSWORD to create a fresh db with a password when first run
api:
restart: 'no'
env_file:
- test.env
depends_on:
- test-db
ports:
- 9229:9229
- 3000:3000
command: npm run start:debug
And I also added docker-compose-test-dev.yml that adds the volumes for the dev environment:
version: '3'
services:
api:
volumes:
- ./:/usr/src/api
Finally, to run tests on the dev environment, I run:
docker-compose -f docker-compose-base.yml -f docker-compose-test.yml -f docker-compose-test-dev.yml run api npm run test:e2e
docker-compose.yml
services:
idprovider-app:
container_name: idprovider-app
build:
dockerfile: Dockerfile
context: .
environment:
KEYCLOAK_USER: admin
KEYCLOAK_PASSWORD: admin
volumes:
- keycloak-data-volume:/var/lib/keycloak/data
ports:
- "8090:8090"
- "8443:8443"
volumes:
keycloak-data-volume:
external: true
dockerfile
FROM jboss/keycloak:7.0.1
EXPOSE 8080
EXPOSE 8443
docker inspect "container"
"Mounts": [
{
"Type": "volume",
"Name": "keycloak-data-volume",
"Source": "/mnt/sda1/var/lib/docker/volumes/keycloak-data-volume/_data",
"Destination": "/var/lib/keycloak/data",
"Driver": "local",
"Mode": "rw",
"RW": true,
"Propagation": ""
}
],
docker volume inspect keycloak-data-volume
[
{
"CreatedAt": "2019-12-10T19:31:55Z",
"Driver": "local",
"Labels": {},
"Mountpoint": "/mnt/sda1/var/lib/docker/volumes/keycloak-data-volume/_data",
"Name": "keycloak-data-volume",
"Options": {},
"Scope": "local"
}
]
There isn't errors, but it doesn't save state. I have no any idea what's wrong. I run it on Windows 10.
Using default database location you may try this option with docker-compose:
keycloak:
image: quay.io/keycloak/keycloak:14.0.0
container_name: keycloak
environment:
KEYCLOAK_USER: admin
KEYCLOAK_PASSWORD: admin
ports:
- "8082:8080"
restart: always
volumes:
- .local/keycloak/:/opt/jboss/keycloak/standalone/data/
Found similar answer with plain docker https://stackoverflow.com/a/60554189/6916890
docker run --volume /root/keycloak/data/:/opt/jboss/keycloak/standalone/data/
In case you are using docker setup mentioned in https://www.keycloak.org/getting-started/getting-started-docker and looking for a way to persist data even if the container is killed then you can use docker volumes and mount the /opt/keycloak/data/ folder from docker container to a directory in your local machine.
The only change you need to do in the docker command mentioned in the getting started doc is add volume mount docker option using
-v /<path-in-your-local-machine>/keycloak-data/:/opt/keycloak/data/
so, the final docker run command with an example of local directory would look like:
docker run -p 8080:8080 -e KEYCLOAK_ADMIN=admin -e KEYCLOAK_ADMIN_PASSWORD=admin \
-v /Users/amit/workspace/keycloak/keycloak-data/:/opt/keycloak/data/ \
quay.io/keycloak/keycloak:19.0.3 start-dev
Which database are you using with it? I think you need to bind the database volume as well with it to save the state.
For eg: for postgress
services:
postgres:
image: postgres
volumes:
- postgres_data:/var/lib/postgresql/data
environment:
POSTGRES_DB: keycloak
POSTGRES_USER: keycloak
POSTGRES_PASSWORD: password
or for mysql
services:
mysql:
image: mysql:5.7
volumes:
- mysql_data:/var/lib/mysql
environment:
MYSQL_ROOT_PASSWORD: root
MYSQL_DATABASE: keycloak
MYSQL_USER: keycloak
MYSQL_PASSWORD: password
You must specify the source of the database in the environment variables.
If you used a different service for the PostgreSQL instance for postgres, you must specify the DB_ADDR environment variable in your service.
services:
idprovider-app:
container_name: idprovider-app
build:
dockerfile: Dockerfile
context: .
environment:
DB_VENDOR: POSTGRES
# Specify hostname of the database (eg: hostname or hostname:port)
DB_ADDR: hostname:5432
DB_DATABASE: keycloak
DB_USER: keycloak
DB_SCHEMA: public
DB_PASSWORD: password
KEYCLOAK_USER: admin
KEYCLOAK_PASSWORD: admin
volumes:
- keycloak-data-volume:/var/lib/keycloak/data
ports:
- "8090:8090"
- "8443:8443"
volumes:
keycloak-data-volume:
external: true
my 2 cents, worked for me with the persistent volume pointing to /opt/keycloak/data/h2, with Keycloak docker version 19.0.1 :
-v /<path-in-your-local-machine>/keycloak-data/:/opt/keycloak/data/h2
Update for version >= 17.0
To complement lazylead's answer, you need to use /opt/keycloak/data/ instead of /opt/jboss/keycloak/standalone/data/ for keycloak version >= 17.0.0
https://stackoverflow.com/a/60554189/5424025