I'm fairly new to docker, but I recently discovered something that I just can't wrap my head around. I started a docker machine:
docker-machine create -d virtualbox machine_name
Created a docker-compose file for my application:
version: '3.3'
services:
client:
container_name: client
build:
context: ./services/client
dockerfile: Dockerfile
volumes:
- './services/client:/usr/src/app'
ports:
- '3007:3000'
environment:
- NODE_ENV=development
depends_on:
- project
links:
- project
db:
container_name: db
build:
context: ./services/db
dockerfile: Dockerfile
ports:
- 5435:5432
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
project:
container_name: project
build: ./services/project
volumes:
- './services/project:/usr/src/app'
- './services/project/package.json:/usr/src/app/package.json'
ports:
- 3000:3000
environment:
- DATABASE_URL=postgres://postgres:postgres#db:5432/esports_manager_dev
- DATABASE_TEST_URL=postgres://postgres:postgres#db:5432/esports_manager_test
- NODE_ENV=${NODE_ENV}
- TOKEN_SECRET=tempsectre
depends_on:
- db
links:
- db
and then I ssh'd into the docker machine to find my entire filesystem. Is this intended behaviour, I can't seem to find anything in the docs that talks about it.
Related
I have docker-compose.yml file which contains frontend,backend,testing,postgres and pgadmin container. The containers except testing are able to communicate each other. But the testing container fails to communicate with backend and frontend container in docker-compose.
version: '3.7'
services:
frontend:
container_name: test-frontend
build:
context: ./frontend
dockerfile: Dockerfile.local
ports:
- '3000:3000'
networks:
- test-network
environment:
# For the frontend can be applied only during the build!
# (while it's applied when TS is compiled)
# You have to build manually without cache if one of those are changed at least for the prod mode.
- REACT_APP_BACKEND_API=http://localhost:8000/api/v1
- REACT_APP_GOOGLE_CLIENT_ID=1234567dfghjjnfd
- CI=true
- CHOKIDAR_USEPOLLING=true
postgres:
image: postgres
environment:
POSTGRES_USER: dev
POSTGRES_PASSWORD: dev
PGDATA: /data/postgres
volumes:
- postgres:/data/postgres
ports:
- "5432:5432"
networks:
- test-network
restart: unless-stopped
pgadmin:
image: dpage/pgadmin4
environment:
PGADMIN_DEFAULT_EMAIL: "dev#dev.com"
PGADMIN_DEFAULT_PASSWORD: dev
volumes:
- pgadmin:/root/.pgadmin
- ./pgadmin-config/servers.json:/pgadmin4/servers.json
ports:
- "5050:80"
networks:
- test-network
restart: unless-stopped
backend:
container_name: test-backend
build:
context: ./backend
dockerfile: Dockerfile.local
ports:
- '8000:80'
volumes:
- ./backend:/app
command: >
bash -c "alembic upgrade head
&& exec /start-reload.sh"
networks:
- test-network
depends_on:
- postgres
environment:
- GOOGLE_APPLICATION_CREDENTIALS=/app/.secret/secret.json
- APP_DB_CONNECTION_STRING=postgresql+psycopg2://dev:dev#postgres:5432/postgres
- LOG_LEVEL=debug
- SQLALCHEMY_ECHO=True
- AUTH_ENABLED=True
- CORS=*
- GCP_ALLOWED_DOMAINS=*
testing:
container_name: test-testing
build:
context: ./testing
dockerfile: Dockerfile
volumes:
- ./testing:/isp-app
command: >
bash -c "/wait
&& robot ."
networks:
- test-network
depends_on:
- backend
- frontend
environment:
- WAIT_HOSTS= frontend:3000, backend:8000
- WAIT_TIMEOUT= 3000
- WAIT_SLEEP_INTERVAL=300
- WAIT_HOST_CONNECT_TIMEOUT=300
volumes:
postgres:
pgadmin:
networks:
test-network:
driver: bridge
All the containers are mapped to test-network. When the testing container tried to connect to frontend:3000 or backend:8000, it throws "Host [ backend:8000] not yet available"
How to fix it?
I know that this question has a lot answers on stackoverflow but I didn't found a solution for my case!
I moving the Laravel app to the containers.
I CAN CONNECT TO MARIADB INSTANCE OUTSIDE THE DOCKER NETWORK BUT NOT INSIDE!
(I can connect via MySQL Workbench, locally (via docker exec), I can restore the dump locally from container console and access to DB data outside)
What's wrong?
Why the app is not working (PHP has no access to the mariadb via internal app_network) but in the same time I can get access to DB outside and inside container itself???
OS: CentOS 7.9.2009
Docker: 20.10.12 (e91ed57)
Docker-compose: 1.29.2 (5becea4c)
The same configs works fine on Windows 10.
DOCKER COMPOSE CONFIG:
version: '3.9'
networks:
app_network:
driver: bridge
name: ${NETWORK_NAME}
volumes:
app:
name: ${APP_VOLUME_NAME}
mysql_database:
name: ${MYSQL_DATABASE_VOLUME_NAME}
mysql_dumps:
name: ${MYSQL_DATABASE_DUMPS_VOLUME_NAME}
services:
mariadb:
image: mariadb
env_file:
- ./.env
command: --default-authentication-plugin=mysql_native_password
ports:
- ${MYSQL_EXTERNAL_PORT}:3306
volumes:
- mysql_database:/var/lib/mysql
- mysql_dumps:/var/mysqldump
environment:
MYSQL_ROOT_PASSWORD: ${MYSQL_ROOT_PASSWORD}
MYSQL_USER: ${MYSQL_USER}
MYSQL_PASSWORD: ${MYSQL_PASSWORD}
networks:
app_network:
aliases:
- mariadb
profiles:
- dev
- prod
php:
restart: always
env_file:
- ./.env
build:
context: ../../
dockerfile: ./.environment/cs/php/Dockerfile
args:
- USER_ID=${PHP_USER_ID}
- GROUP_ID=${PHP_GROUP_ID}
- DEFAULT_CONFIG_FILE=${PHP_DEFAULT_CONFIG_FILE}
- CUSTOM_CONFIG_FILE=${PHP_CUSTOM_CONFIG_FILE}
- PROJECT_FOLDER=${PHP_PROJECT_FOLDER}
volumes:
- ./php/logs:/var/log
- ../../:${PHP_PROJECT_FOLDER}
networks:
app_network:
aliases:
- php
depends_on:
- memcached
- mariadb
profiles:
- dev
- prod
nginx:
restart: always
env_file:
- ./.env
build:
context: ../../
dockerfile: ./.environment/cs/nginx/Dockerfile
args:
- CONFIG_FILE=${WEB_CONFIG_FILE}
- PROJECT_FOLDER=${WEB_PROJECT_FOLDER}
ports:
- ${WEB_EXTERNAL_PORT}:80
volumes:
- ./nginx/logs:/var/log/nginx
- ../../public:${WEB_PROJECT_FOLDER}:cached
networks:
app_network:
aliases:
- nginx
depends_on:
- php
profiles:
- dev
- prod
Docker .ENV
NETWORK_NAME=CS
APP_VOLUME_NAME=CS_APP_STORAGE
MYSQL_DATABASE_VOLUME_NAME=CS_DATABASE
MYSQL_DATABASE_DUMPS_VOLUME_NAME=CS_DATABASE_DUMPS
MYSQL_EXTERNAL_PORT=3317
MYSQL_ROOT_PASSWORD=root
MYSQL_USER=client
MYSQL_PASSWORD=client
PHP_USER_ID=1000
PHP_GROUP_ID=1000
PHP_DEFAULT_CONFIG_FILE=php.ini-production
PHP_CUSTOM_CONFIG_FILE=./.environment/cs/php/custom.prod.ini
PHP_PROJECT_FOLDER=/var/www/app
WEB_EXTERNAL_PORT=127.0.0.1:8091
WEB_CONFIG_FILE=./.environment/cs/nginx/nginx.dev.conf
WEB_PROJECT_FOLDER=/var/www/app/public
Laravel .ENV
DB_CONNECTION=mysql
DB_HOST=mariadb
DB_PORT=3306
DB_DATABASE=client
DB_USERNAME=client
DB_PASSWORD=client
try to add the expose config key in the maria db service
mariadb:
//...
expose:
- 3306
This is the issue of the MariaDB container. I connected to the MariaDB via MySQL Workbench, fully remove the user, create a new one and give scheme privileges.
After that all works fine.
Note: Thanks #Ferran Buireu for the suggestion. I'm quite sure to get minus vote because of very new to docker and changing network world to system and programming.
After deploy gatsbyjs, I found the socketio error "net::ERR_CONNECTION_REFUSED".
Even it works properly when I browse to any pages but I think it is not running correctly.
How can I solve this error? (below is the error capture)
I implement and deploy these services on Ubuntu 20.04.2 with Docker 20.10.6, please see the below "docker-compose.yml"
version: "3"
services:
frontendapp01:
working_dir: /frontendapp01
build:
context: ./frontendapp01
dockerfile: Dockerfile
depends_on:
- backendsrv01
- mongoserver
volumes:
- ./sentric01:/srv/front
ports:
- "8001:8000"
environment:
GATSBY_WEBPACK_PUBLICPATH: /
STRAPI_URL: backendsrv01:1337
networks:
- vpsnetwork
frontendapp02:
working_dir: /frontendapp02
build:
context: ./frontendapp02
dockerfile: Dockerfile
depends_on:
- backendsrv02
- mongoserver
volumes:
- ./sentric02:/srv/front
ports:
- "8002:8000"
environment:
GATSBY_WEBPACK_PUBLICPATH: /
STRAPI_URL: backendsrv02:1338
networks:
- vpsnetwork
frontendapp03:
working_dir: /frontendapp03
build:
context: ./frontendapp03
dockerfile: Dockerfile
depends_on:
- backendsrv02
- mongoserver
volumes:
- ./sentric03:/srv/front
ports:
- "8003:8000"
environment:
GATSBY_WEBPACK_PUBLICPATH: /
STRAPI_URL: backendsrv02:1338
networks:
- vpsnetwork
backendsrv01:
image: strapi/strapi
container_name: backendsrv01
restart: unless-stopped
environment:
DATABASE_CLIENT: mongo
DATABASE_NAME: essential
DATABASE_HOST: mongoserver
DATABASE_PORT: 27017
networks:
- vpsnetwork
volumes:
- ./app01:/srv/app
ports:
- "1337:1337"
backendsrv02:
image: strapi/strapi
container_name: backendsrv02
restart: unless-stopped
environment:
DATABASE_CLIENT: mongo
DATABASE_NAME: solven
DATABASE_HOST: mongoserver
DATABASE_PORT: 27017
networks:
- vpsnetwork
volumes:
- ./app02:/srv/app
ports:
- "1338:1337"
mongoserver:
image: mongo
container_name: mongoserver
restart: unless-stopped
networks:
- vpsnetwork
volumes:
- vpsappdata:/data/db
ports:
- "27017:27017"
networks:
vpsnetwork:
driver: bridge
volumes:
vpsappdata:
The socket connection only appears during the development stage (gatsby develop) and it's intended to refresh and update the browser on each saves by hot-reloading, so without losing component state. This feature is known as fast-refresh.
As I said, and for obvious reasons, this only applies in gatsby develop. Under gatsby build, there's no connection socket. If your Docker development environment is sharing the port 8000 and 8001 (according to your docker-compose.yml setup), once built, can cause a break of the socket because it has changed the scope of the project.
Answering, you don't have to worry about, your project seems to build properly but, because of the sharing port between environments it prompts the log.
Further readings:
https://www.gatsbyjs.com/docs/conceptual/overview-of-the-gatsby-build-process/
https://www.gatsbyjs.com/docs/reference/local-development/fast-refresh/
Tibco has made the Dockerfile and supporting scripts for running TIBCO JasperReportsĀ® Server in a Docker container. What do I need to change in these files to support the Community Edition?
https://github.com/TIBCOSoftware/js-docker/
Thanks.
You can also alternatively use the JasperReports Server docker from bitnami, which uses the CE version of the server:
https://github.com/bitnami/bitnami-docker-jasperreports
You need to make changes to the js-docker Docker files, shell scripts and roll your own Docker Compose file, for example:
version: '3.7'
services:
postgres:
container_name: postgres
build:
context: ./services/postgres
dockerfile: Dockerfile
ports:
- "5432:5432"
volumes:
- .:/var/lib/postgresql/data
env_file: ./services/postgres/postgres.env
pgadmin:
container_name: pgadmin
build:
context: ./services/pgadmin
dockerfile: Dockerfile
environment:
PGADMIN_DEFAULT_EMAIL: ${PGADMIN_DEFAULT_EMAIL:-admin#serendipity.org.au}
PGADMIN_DEFAULT_PASSWORD: ${PGADMIN_DEFAULT_PASSWORD:-secret}
ports:
- "${PGADMIN_PORT:-5050}:80"
volumes:
- .:/root/.pgadmin
jasperreports-server:
container_name: jasperreports-server
build:
context: ./services/jasperreports-server
dockerfile: Dockerfile
ports:
- "11001:8080"
- "11443:8443"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./license:/usr/local/share/jasperserver/license
- ./keystore:/usr/local/share/jasperserver/keystore
env_file: ./services/jasperreports-server/jasperreports-server.env
environment:
- DB_HOST=postgres
depends_on:
- jasperreports-server-cmdline
command: ["/wait-for-container-to-exit.sh", "jasperreports-server-cmdline", "-t" , "30", "--", "/entrypoint-ce.sh", "run"]
jasperreports-server-cmdline:
container_name: jasperreports-server-cmdline
build:
context: ./services/jasperreports-server
dockerfile: Dockerfile-cmdline
volumes:
- ./license:/usr/local/share/jasperserver/license
- ./keystore:/usr/local/share/jasperserver/keystore
env_file: ./services/jasperreports-server/jasperreports-server.env
environment:
- DB_HOST=postgres
- JRS_LOAD_SAMPLES=true
depends_on:
- postgres
command: ["/wait-for-it.sh", "postgres:5432", "-t" , "30", "--", "/entrypoint-cmdline-ce.sh", "init"]
Take a look at this example GitHub repo: https://github.com/Robinyo/serendipity-api/tree/master/projects/spring-boot/server/services/jasperreports-server
I have followed the api-platform tutorial and successfully built and started the application using Docker on my localhost machine.
I have a production server running Ubuntu 16.04.5 LTS, and a newly installed Docker version 18.06.1-ce.
How would I build this code on my local machine and run it on the Docker server?
I have also looked at the Deploying API Platform Applications documentation but I am not sure how to use this.
I am struggling to understand how to build api-platform from my localhost to the server
this is docker-compose.yml file try this please docker-compose up -d
version: '3.4'
services:
php:
image: ${CONTAINER_REGISTRY_BASE}/php
build:
context: ./api
target: api_platform_php
cache_from:
- ${CONTAINER_REGISTRY_BASE}/php
- ${CONTAINER_REGISTRY_BASE}/nginx
- ${CONTAINER_REGISTRY_BASE}/varnish
depends_on:
- db
# Comment out these volumes in production
volumes:
- ./api:/srv/api:rw,cached
# If you develop on Linux, uncomment the following line to use a bind-mounted host directory instead
# - ./api/var:/srv/api/var:rw
api:
image: ${CONTAINER_REGISTRY_BASE}/nginx
build:
context: ./api
target: api_platform_nginx
cache_from:
- ${CONTAINER_REGISTRY_BASE}/php
- ${CONTAINER_REGISTRY_BASE}/nginx
- ${CONTAINER_REGISTRY_BASE}/varnish
depends_on:
- php
# Comment out this volume in production
volumes:
- ./api/public:/srv/api/public:ro
ports:
- "8080:80"
cache-proxy:
image: ${CONTAINER_REGISTRY_BASE}/varnish
build:
context: ./api
target: api_platform_varnish
cache_from:
- ${CONTAINER_REGISTRY_BASE}/php
- ${CONTAINER_REGISTRY_BASE}/nginx
- ${CONTAINER_REGISTRY_BASE}/varnish
depends_on:
- api
volumes:
- ./api/docker/varnish/conf:/usr/local/etc/varnish:ro
tmpfs:
- /usr/local/var/varnish:exec
ports:
- "8081:80"
db:
# In production, you may want to use a managed database service
image: postgres:10-alpine
environment:
- POSTGRES_DB=api
- POSTGRES_USER=api-platform
# You should definitely change the password in production
- POSTGRES_PASSWORD=!ChangeMe!
volumes:
- db-data:/var/lib/postgresql/data:rw
# You may use a bind-mounted host directory instead, so that it is harder to accidentally remove the volume and lose all your data!
# - ./docker/db/data:/var/lib/postgresql/data:rw
ports:
- "5432:5432"
client:
# Use a static website hosting service in production
# See https://github.com/facebookincubator/create-react-app/blob/master/packages/react-scripts/template/README.md#deployment
image: ${CONTAINER_REGISTRY_BASE}/client
build:
context: ./client
cache_from:
- ${CONTAINER_REGISTRY_BASE}/client
env_file:
- ./client/.env
volumes:
- ./client:/usr/src/client:rw,cached
- /usr/src/client/node_modules
ports:
- "80:3000"
admin:
# Use a static website hosting service in production
# See https://github.com/facebookincubator/create-react-app/blob/master/packages/react-scripts/template/README.md#deployment
image: ${CONTAINER_REGISTRY_BASE}/admin
build:
context: ./admin
cache_from:
- ${CONTAINER_REGISTRY_BASE}/admin
volumes:
- ./admin:/usr/src/admin:rw,cached
- /usr/src/admin/node_modules
ports:
- "81:3000"
h2-proxy:
# Don't use this proxy in prod
build:
context: ./h2-proxy
depends_on:
- client
- admin
- api
- cache-proxy
ports:
- "443:443"
- "444:444"
- "8443:8443"
- "8444:8444"
volumes:
db-data: {}