I try to configure docker compose for my php project. On deploy I want to update a source code, update composer dependencies and run database migrations.
So I have a docker-compose.yml file:
version: '3.0'
services:
php:
build:
context: .
dockerfile: php/Dockerfile
depends_on:
- postgres
postgres:
image: "postgres:13-alpine"
restart: always
environment:
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_DB: ${POSTGRES_DB_NAME}
Php container builds from the next Dockerfile:
# Inatall dependensies
RUN apt-get update \
&& apt-get install -y git libicu-dev postgresql-server-dev-all zip libzip-dev postgresql-client\
&& docker-php-ext-install intl pdo pdo_pgsql zip
RUN curl -sS https://getcomposer.org/installer | php -- --install-dir=/usr/local/bin --filename=composer
# Copy source files
COPY ./app /var/www/my-site
# Update project files
WORKDIR /var/www/my-site
RUN composer install
RUN php ./yii migrate --interactive=0 # This command needs to connect to the database and fails
CMD [ "php-fpm"]
When I run docker-compose build, I have this error: could not translate host name "postgres" to address: Name or service not known.
How can I take access to database container while other is building?
Both php and postgres need to be on same network and php can access postgres using container_name which is postgres. depends_on will make sure postgres get starts before php.
version: '3.0'
services:
php:
build:
context: .
dockerfile: php/Dockerfile
restart: on-failure
depends_on:
- postgres
networks:
- test-network
postgres:
container_name: 'postgres'
image: "postgres:13-alpine"
restart: always
environment:
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_DB: ${POSTGRES_DB_NAME}
networks:
- test-network
networks:
test-network:
driver: bridge
Related
Trying to setup my first Docker container in laravel.
Doing everything exactly like the tutorial, but on my PC I get
The "--host-0.0.0.0" option does not exist.
Dockerfile
FROM php:7.4-fpm-alpine
RUN docker-php-ext-install pdo pdo_mysql
RUN curl -sS https://getcomposer.org/installer | php -- --install-dir=/usr/local/bin --filename=composer
WORKDIR /app
COPY . .
RUN composer install
CMD php artisan serve --host=0.0.0.0
docker-compose.yml
version: '3.8'
services:
backend:
build:
context: .
dockerfile: Dockerfile
ports:
- 8000:8000
volumes:
- .:/app
depends_on:
- db
db:
image: library/mysql:5.7.22
environment:
MYSQL_DATABASE: admin
MYSQL_USER: admin
MYSQL_PASSWORD: admin
MYSQL_ROOT_PASSWORD: admin
volumes:
- ./storage/dbdata:/var/lib/mysql
ports:
- 33066:3306
Thanks too #Tolis Gerodimos
https://stackoverflow.com/a/72349936/2240163
docker-compose up --build
docker-compose.yml
version: "3.7"
services:
courseshine_redis:
container_name: courseshine_redis
image: redis:latest
command: redis-server --requirepass ${POSTGRES_PASSWORD}
restart: always
env_file: .env
stdin_open: true
ports:
- ${REDIS_PORT}:${REDIS_PORT}
volumes:
- courseshine_redis_data:/data
networks:
- internal
courseshine_db:
container_name: courseshine_db
build:
context: ../..
dockerfile: courseshine_docker/development/courseshine_db/Dockerfile
restart: always
env_file: .env
environment:
- POSTGRES_MULTIPLE_DATABASES=${POSTGRES_DEV_DB},${POSTGRES_TEST_DB}
ports:
- ${COURSESHINE_DB_PORT}:${COURSESHINE_DB_PORT}
volumes:
- courseshine_postgres_data:/var/lib/postgresql/data
- ./courseshine_db:/dockerfile-entrypoint-initdb.d
networks:
- internal
courseshine_pgadmin:
container_name: courseshine_pgadmin
image: dpage/pgadmin4:4.21
restart: unless-stopped
env_file: .env
environment:
- PGADMIN_DEFAULT_EMAIL=${POSTGRES_USER}
- PGADMIN_DEFAULT_PASSWORD=${POSTGRES_PASSWORD}
volumes:
- pgadmin:/var/lib/pgadmin
- courseshine_postgres_data:/var/lib/postgresql/data
depends_on:
- courseshine_db
networks:
- internal
courseshine_api: &api_base
container_name: courseshine_api
build:
context: ../..
dockerfile: courseshine_docker/development/courseshine_api/Dockerfile
env_file: .env
stdin_open: true
volumes:
- ../../courseshine_api:/var/www/courseshine/courseshine_api
- /var/run/docker.sock:/var/run/docker.sock
- bundle_cache:/usr/local/bundle
depends_on:
- courseshine_redis
- courseshine_db
networks:
- internal
courseshine_ui:
container_name: courseshine_ui
build:
context: ../../
dockerfile: courseshine_docker/development/courseshine_ui/Dockerfile
env_file: .env
stdin_open: true
volumes:
- ../../courseshine_ui:/var/www/courseshine_ui
depends_on:
- courseshine_api
networks:
- internal
networks:
internal:
volumes:
courseshine_redis_data:
courseshine_postgres_data:
pgadmin:
bundle_cache:
my docerfile for courseshine_api service
FROM ruby:2.7.1-slim-buster
RUN apt-get update -qq && apt-get install -y build-essential nodejs libpq-dev postgresql-client && rm -rf /var/lib/apt/lists/*
ENV APP_HOME /var/www/courseshine/courseshine_api
RUN mkdir -p $APP_HOME
WORKDIR $APP_HOME
COPY ./courseshine_api/Gemfile $APP_HOME/Gemfile
COPY ./courseshine_api/Gemfile.lock $APP_HOME/Gemfile.lock
RUN bundle install --path vendor/cache
# Copy the main application.
COPY ./courseshine_api $APP_HOME
# Add a script to be executed every time the container starts.
COPY ./courseshine_docker/development/courseshine_api/entrypoint.sh /usr/bin/
RUN chmod +x /usr/bin/entrypoint.sh
ENTRYPOINT ["entrypoint.sh"]
# Expose port 3000 to the Docker host, so we can access it
# from the outside.
EXPOSE 3000
# The main command to run when the container starts. Also
# tell the Rails dev server to bind to all interfaces by
# default.
CMD ["rails","server","-b","0.0.0.0"]
entrypoint.sh
set -e
rm -f $APP_HOME/tmp/pids/server.pid
exec "$#"
when i hit docker-compose up, the courseshine_api service is not stand and throw Could not find rake-13.0.3 in any of the sources (Bundler::GemNotFound). Why this problem occur and how to fix this ..
I'm trying to create symfony 5 project using docker with a container for mysql, phpmyadmin, symfony and maildev.
Here is my configuration in the docker-compose.yml :
version: '3.7'
services:
db:
image: mysql:latest
container_name: ruakh_db
restart: always
volumes:
- db-data:/var/lib/mysql
environment:
MYSQL_ALLOW_EMPTY_PASSWORD: "yes"
networks:
- dev
phpmyadmin:
image: phpmyadmin:latest
container_name: ruakh_phpmyadmin
restart: always
depends_on:
- db
ports:
- 8080:80
environment:
PMA_HOST: db
networks:
- dev
maildev:
image: maildev/maildev
container_name: ruakh_mail_dev
restart: always
command: bin/maildev --web 80 --smtp 25 --hide-exetensions STARTTLS
ports:
- 8081:80
networks:
- dev
apache:
build: php
container_name: ruakh_www
ports:
- 80:80
volumes:
- ./php/vhosts:/etc/apache2/sites-enabled
- ./:/var/www
restart: always
networks:
- dev
networks:
dev:
volumes:
db-data:
The Dockerfile configuration to create the server :
FROM php:8.0-apache
RUN echo "ServerName localhost" >> /etc/apache2/apache2.conf
RUN apt-get update \
&& apt-get install -y --no-install-recommends locales apt-utils git libicu-dev g++ libpng-dev libxml2-dev libzip-dev libonig-dev libxslt-dev;
RUN echo "en_US.UTF8 UTF8" > /etc/locale.gen && \
echo "fr_FR.UTF-8 UTF-8" >> /etc/locale.gen && \
locale-gen
RUN curl -sSk https://getcomposer.org/installer | php -- --disable-tls && \
mv composer.phar /usr/local/bin/composer
RUN docker-php-ext-configure intl
RUN docker-php-ext-install pdo pdo_mysql gd opcache intl zip calendar dom mbstring zip gd xsl
RUN pecl install apcu && docker-php-ext-enable apcu
WORKDIR /var/www/
The issue i'm struggling with is that whenever I want to run a php bin/console make:migration it throws me this error :
In AbstractMySQLDriver.php line 128: An exception occurred in driver: could not find driver
I assume that it has something to do with my .env and my server can't manage to connect to the database.
Here is the .env :
MAILER_DSN=smtp://ruakh_mail_dev:25?verify_peer=0
DATABASE_URL="mysql://root:#ruakh_db/ruakh?serverVersion=5.7"
How could I manage to resolve this error ?
I can run queries to the database and fetch data from a controller.
But I can't run php bin/console make migration but the php bin/console make:entity is working
here is the config/packages/doctrine :
doctrine:
dbal:
url: '%env(resolve:DATABASE_URL)%'
# IMPORTANT: You MUST configure your server version,
# either here or in the DATABASE_URL env var (see .env file)
#server_version: '13'
orm:
auto_generate_proxy_classes: true
naming_strategy: doctrine.orm.naming_strategy.underscore_number_aware
auto_mapping: true
mappings:
App:
is_bundle: false
type: annotation
dir: '%kernel.project_dir%/src/Entity'
prefix: 'App\Entity'
alias: App
EDIT
Today I just opened and tried again and it seems like the error had change here is the error I got now :
In AbstractMySQLDriver.php line 112:
An exception occurred in driver: SQLSTATE[HY000] [2002]
php_network_getaddresses: getaddrinfo failed: Temporary failure in name > resolution
I find a solution to avoid my problem. Whenever something is about the database I run the commands from the docker containereven if it's not what I was looking for. So I keep this post open in case someone have an answer.
A better understanding of docker will help you get why it's working inside the container and not from your machine.
When you declare services in docker-compose.yml each service will have a DNS name which is the container_name so when you are inside one of the containers ruakh_db is reachable, that's why your controllers are able to access the database.
But when you are outside the containers ruakh_db has no meaning as your machine will not be able to resolve the DNS name. That's why your command line wont work.
One solution is to configure your OS to make ruakh_db point at your localhost.
Doing so depends on the OS you are using, but generally it consists of adding this line to your hosts file:
127.0.0.1 ruakh_db
Follow this link for more information on how to change your hosts file depending on your OS: https://www.howtogeek.com/howto/27350/beginner-geek-how-to-edit-your-hosts-file/
You should then configure your MySQL container to expose an external port, so it's reachable from outside :
db:
image: mysql:latest
container_name: ruakh_db
restart: always
volumes:
- db-data:/var/lib/mysql
environment:
MYSQL_ALLOW_EMPTY_PASSWORD: "yes"
networks:
- dev
ports:
- 3306:3306
You also should have mysql extension on your machinesphp-cli installed to be able to talk to your mysql database.
Coming from an environment where I was manually doing a ssh into the remote server, doing a git pull and creating my .env(since it is gitignored), how do I separate development .env and a production .env. I used docker-machine to create an AWS EC2 instance. I created a production.yml and did docker-compose -f production.yml up -d. The container in the EC2 machine picked up my development .env which is not what I want.
Dockerfile
FROM python:3.6-alpine
ENV PYTHONUNBUFFERED 1
RUN apk update && apk add postgresql-dev gcc python3-dev musl-dev git jpeg-dev zlib-dev libmagic
RUN python -m pip install --upgrade pip
RUN mkdir /writer-api
COPY requirements.txt /writer-api/
RUN pip install --no-cache-dir -r /writer-api/requirements.txt
COPY . /writer-api/
WORKDIR /writer-api
production.yml
version: "3"
services:
postgres:
restart: always
image: postgres
ports:
- "5432:5432"
volumes:
- pgdata:/var/lib/postgresql/data/
web:
restart: always
build: .
command: gunicorn writer.wsgi:application -w 2 -b :8000
environment:
DEBUG: ${DEBUG}
SECRET_KEY: ${SECRET_KEY}
DB_HOST: ${DB_HOST}
DB_NAME: ${DB_NAME}
DB_USER: ${DB_USER}
DB_PORT: ${DB_PORT}
DB_PASSWORD: ${DB_PASSWORD}
SENDGRID_API_KEY: ${SENDGRID_API_KEY}
AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID}
AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY}
AWS_STORAGE_BUCKET_NAME: ${AWS_STORAGE_BUCKET_NAME}
depends_on:
- postgres
- redis
expose:
- "8000"
redis:
restart: always
image: "redis:alpine"
celery:
restart: always
build: .
command: celery -A writer worker -l info
volumes:
- .:/writer-api
depends_on:
- postgres
- redis
celery-beat:
restart: always
build: .
command: celery -A writer beat -l info
volumes:
- .:/writer-api
depends_on:
- postgres
- redis
nginx:
restart: always
build: ./nginx/
ports:
- "80:80"
depends_on:
- web
volumes:
pgdata:
I guess you can export the environment shell variable & then use the .env as per the environment. Create a dev.env & prod.env file in the workspace.
Sample compose -
version: '3'
services:
nginx:
image: nginx
ports:
- '80'
env_file:
- ${ENVIRON}.env
Build for DEV -
export ENVIRON=dev
docker-compose up -d
Build for PROD -
export ENVIRON=prod
docker-compose up -d
This way you will be able to leverage same compose file for DEV & PROD environments.
setup the compose files for production and dev in seperate folders and put .env file in those folders
I am trying to create a service consisting of a web server and database all in docker containers. Currently I am trying to create same environment file for both of them that would contain database credentials. Unfortunatelly, when I try to build database with it, it turns out that they are empty. How can I create a project with single environment file for both components? Here is my docker-compose.yml:
version: '2'
services:
db:
build:
context: .
dockerfile: Dockerfile-db
ports:
- '5433:5432'
env_file:
- env
web:
build: .
ports:
- '8000:8000'
command: venv/bin/python manage.py runserver 0.0.0.0:8000
depends_on:
- db
env_file:
- env
Here is part of my Dockerfile-db file responsible for creating database:
FROM ubuntu:16.04
RUN apt-get update && apt-get install -y postgresql-9.5-postgis-2.2
USER postgres
ARG DB_PASSWORD
ARG DB_NAME
RUN echo $DB_PASSWORD
RUN /etc/init.d/postgresql start && \
psql --command "ALTER USER postgres WITH PASSWORD '$DB_PASSWORD';" && \
psql --command "CREATE DATABASE $DB_NAME;" && \
psql --command "\\c $DB_NAME" && \
psql --command "CREATE EXTENSION postgis;" && \
psql --command "CREATE EXTENSION postgis_topology;"
And my env file has following structure:
DB_NAME=some_name
DB_USER=postgres
DB_PASSWORD=some_password
DB_HOST=db
DB_PORT=5432
The environment file is not part of the build process, it is used when running the container.
You need to use build-args. In docker-compose you can specify build args in the file:
build:
context: .
dockerfile: Dockerfile-db
args:
DB_NAME: some_name
DB_USER: postgress
...
This might not be a good idea if you want to publish the composefile, as you are storing credentials in it. You can explicitly build and pass --build-arg
docker-compose build --build-arg DB_NAME= some_name ...
And when running specify no build in docker-compose run --no-build
Update:
As suggested by #gonczor, a shorter and cleaner syntax to use pass the env file as build args is:
docker-compose build --build-args $(cat envfile)
Your configuration is correct, it seems like you may not be passing the actual path or the name of the .env file. Have you tried the following (assuming your .env file is in the same directory).
version: '2'
services:
db:
build:
context: .
dockerfile: Dockerfile-db
ports:
- '5433:5432'
env_file:
- ./.env
web:
build: .
ports:
- '8000:8000'
command: venv/bin/python manage.py runserver 0.0.0.0:8000
depends_on:
- db
env_file:
- ./.env