Docker compose file with yarn monorepo - docker

I'm pretty new with docker, but trying to get docker-compose handling my local dev environment. Essentially when I run docker-compose up it should have my api running on port 3000:3000
This is my current docker-compose.yml file:
version: "3"
services:
api:
image: node:9
ports:
- 127.0.0.1:3000:3000
working_dir: /api
volumes:
- ./:/api
command: bash -c 'yarn && cd api && yarn dev'
mongo:
image: mongo:3.4
ports:
- 127.0.0.1:27017:27017
volumes:
- ./db:/data/db
minio:
image: minio/minio
ports:
- 9000:9000
environment:
- MINIO_ACCESS_KEY=miniokey
- MINIO_SECRET_KEY=miniosecret
volumes:
- ./minio:/data
command: ["server", "/data"]
createbuckets:
image: minio/mc
depends_on:
- minio
entrypoint: >
/bin/sh -c "
while ! /usr/bin/nc minio 9000; do sleep 2s; done;
/usr/bin/mc config host add myminio http://minio:9000 miniokey miniosecret;
/usr/bin/mc mb myminio/vividaura;
/usr/bin/mc policy download myminio/vividaura;
/usr/bin/mc mb myminio/vividaura-test;
/usr/bin/mc policy download myminio/vividaura-test;
exit 0;
"
nats:
image: nats:1.1.0-linux
ports:
- 127.0.0.1:4222:4222
- 127.0.0.1:8222:8222
The thing is, I'm using yarn's workspace feature. So I need to run yarn in the root directory then run yarn inside /api
This is my folder structure:
> /api
> /image-compose
> /src
> docker-compose.yml
> package.json

I got it working. Had issues with the ports. This is my updated docker-compose.yml file:
version: "3"
services:
web:
image: node:9
ports:
- 3000:3000
working_dir: /app
volumes:
- ./:/app
command: bash -c 'yarn && cd src && yarn dev'
depends_on:
- api
api:
image: node:9
ports:
- 3001:3001
working_dir: /api
volumes:
- ./:/api
command: bash -c 'yarn && cd api && yarn dev'
depends_on:
- mongo
- nats
mongo:
image: mongo:3.4
ports:
- 127.0.0.1:27017:27017
volumes:
- ./db:/data/db
minio:
image: minio/minio
ports:
- 9000:9000
environment:
- MINIO_ACCESS_KEY=miniokey
- MINIO_SECRET_KEY=miniosecret
volumes:
- ./minio:/data
command: ["server", "/data"]
createbuckets:
image: minio/mc
depends_on:
- minio
entrypoint: >
/bin/sh -c "
while ! /usr/bin/nc minio 9000; do sleep 2s; done;
/usr/bin/mc config host add myminio http://minio:9000 miniokey miniosecret;
/usr/bin/mc mb myminio/vividaura;
/usr/bin/mc policy download myminio/vividaura;
/usr/bin/mc mb myminio/vividaura-test;
/usr/bin/mc policy download myminio/vividaura-test;
exit 0;
"
nats:
image: nats:1.1.0-linux
ports:
- 127.0.0.1:4222:4222
- 127.0.0.1:8222:8222

Related

When creating container into docker-compose on a server. Logs show 'exec /usr/local/bin/docker-entrypoint.sh: exec format error'

Using MacBook M1. (Maybe it's a reason of problem)
During uploading my project into server I got this error
$ cat docker-compose.yml
version: '3.8'
services:
db:
image: postgres:13.0-alpine
volumes:
- postgres_data:/var/lib/postgresql/data/
env_file:
- ./.env
restart: always
frontend:
image: annarulunat/foodgram_frontend:latest
volumes:
- ../frontend/:/app/result_build/
backend:
image: annarulunat/foodgram:latest
restart: always
volumes:
- static_value:/app/static_backend/
- media_value:/app/media/
depends_on:
- db
env_file:
- ./.env
command: >
sh -c "python manage.py collectstatic --noinput &&
python manage.py migrate &&
gunicorn foodgram.wsgi:application --bind 0:8000"
nginx:
image: nginx:1.21.3-alpine
ports:
- "80:80"
volumes:
- ./nginx.conf:/etc/nginx/conf.d/default.conf
- ../frontend/build:/usr/share/nginx/html/
- ../docs/:/usr/share/nginx/html/api/docs/
- static_value:/var/html/static_backend/
- media_value:/var/html/media/
restart: always
volumes:
static_value:
media_value:
postgres_data:
$ cat Dockerfile
# build env
FROM node:13.12.0-alpine as build
WORKDIR /app
COPY package*.json ./
RUN npm install
COPY . ./
RUN npm run build
CMD cp -r build result_build
Thank you)))
I tried to add FROM --platform=linux/amd64 <image>-<version> in the Dockerfile and build

'Could not find rake-13.0.3 in any of the sources (Bundler::GemNotFound)' while creating my api service

docker-compose.yml
version: "3.7"
services:
courseshine_redis:
container_name: courseshine_redis
image: redis:latest
command: redis-server --requirepass ${POSTGRES_PASSWORD}
restart: always
env_file: .env
stdin_open: true
ports:
- ${REDIS_PORT}:${REDIS_PORT}
volumes:
- courseshine_redis_data:/data
networks:
- internal
courseshine_db:
container_name: courseshine_db
build:
context: ../..
dockerfile: courseshine_docker/development/courseshine_db/Dockerfile
restart: always
env_file: .env
environment:
- POSTGRES_MULTIPLE_DATABASES=${POSTGRES_DEV_DB},${POSTGRES_TEST_DB}
ports:
- ${COURSESHINE_DB_PORT}:${COURSESHINE_DB_PORT}
volumes:
- courseshine_postgres_data:/var/lib/postgresql/data
- ./courseshine_db:/dockerfile-entrypoint-initdb.d
networks:
- internal
courseshine_pgadmin:
container_name: courseshine_pgadmin
image: dpage/pgadmin4:4.21
restart: unless-stopped
env_file: .env
environment:
- PGADMIN_DEFAULT_EMAIL=${POSTGRES_USER}
- PGADMIN_DEFAULT_PASSWORD=${POSTGRES_PASSWORD}
volumes:
- pgadmin:/var/lib/pgadmin
- courseshine_postgres_data:/var/lib/postgresql/data
depends_on:
- courseshine_db
networks:
- internal
courseshine_api: &api_base
container_name: courseshine_api
build:
context: ../..
dockerfile: courseshine_docker/development/courseshine_api/Dockerfile
env_file: .env
stdin_open: true
volumes:
- ../../courseshine_api:/var/www/courseshine/courseshine_api
- /var/run/docker.sock:/var/run/docker.sock
- bundle_cache:/usr/local/bundle
depends_on:
- courseshine_redis
- courseshine_db
networks:
- internal
courseshine_ui:
container_name: courseshine_ui
build:
context: ../../
dockerfile: courseshine_docker/development/courseshine_ui/Dockerfile
env_file: .env
stdin_open: true
volumes:
- ../../courseshine_ui:/var/www/courseshine_ui
depends_on:
- courseshine_api
networks:
- internal
networks:
internal:
volumes:
courseshine_redis_data:
courseshine_postgres_data:
pgadmin:
bundle_cache:
my docerfile for courseshine_api service
FROM ruby:2.7.1-slim-buster
RUN apt-get update -qq && apt-get install -y build-essential nodejs libpq-dev postgresql-client && rm -rf /var/lib/apt/lists/*
ENV APP_HOME /var/www/courseshine/courseshine_api
RUN mkdir -p $APP_HOME
WORKDIR $APP_HOME
COPY ./courseshine_api/Gemfile $APP_HOME/Gemfile
COPY ./courseshine_api/Gemfile.lock $APP_HOME/Gemfile.lock
RUN bundle install --path vendor/cache
# Copy the main application.
COPY ./courseshine_api $APP_HOME
# Add a script to be executed every time the container starts.
COPY ./courseshine_docker/development/courseshine_api/entrypoint.sh /usr/bin/
RUN chmod +x /usr/bin/entrypoint.sh
ENTRYPOINT ["entrypoint.sh"]
# Expose port 3000 to the Docker host, so we can access it
# from the outside.
EXPOSE 3000
# The main command to run when the container starts. Also
# tell the Rails dev server to bind to all interfaces by
# default.
CMD ["rails","server","-b","0.0.0.0"]
entrypoint.sh
set -e
rm -f $APP_HOME/tmp/pids/server.pid
exec "$#"
when i hit docker-compose up, the courseshine_api service is not stand and throw Could not find rake-13.0.3 in any of the sources (Bundler::GemNotFound). Why this problem occur and how to fix this ..

Rails image not running from docker-compose.yml

I have a rails application that runs on Docker. My source code have the following files:
Dockerfile
FROM ruby:2.6.0
RUN apt-get update -qq && apt-get install -y build-essential libpq-dev nodejs
RUN mkdir /myapp
WORKDIR /myapp
COPY Gemfile /myapp/Gemfile
COPY Gemfile.lock /myapp/Gemfile.lock
RUN bundle install
COPY . /myapp
CMD bash -c "rm -f tmp/pids/server.pid && rails s -p 3000 -b '0.0.0.0'"
docker-compose.yml
version: '3'
services:
db:
image: postgres
volumes:
- ./tmp/db:/var/lib/postgresql/data
ports:
- "5432:5432"
redis:
image: redis
command: redis-server
ports:
- "6379:6379"
sidekiq:
build: .
command: bundle exec sidekiq
depends_on:
- redis
volumes:
- .:/myapp
web:
build: .
command: bash -c "rm -f tmp/pids/server.pid && rails s -p 3000 -b '0.0.0.0'"
volumes:
- .:/myapp
ports:
- "3000:3000"
depends_on:
- db
- redis
- sidekiq
It runs normally using docker-compose up since I'm running this with the source code level.
Now when I build this app and push it to Dockerhub
docker build -t myusername/rails-app .
docker push myusername/rails-app
I'm expecting that I can run the rails app from an independent docker-compose.yml separately from the source code.
version: '3'
services:
db:
image: postgres
volumes:
- ./tmp/db:/var/lib/postgresql/data
ports:
- "5432:5432"
redis:
image: redis
command: redis-server
ports:
- "6379:6379"
sidekiq:
build: .
command: bundle exec sidekiq
depends_on:
- redis
volumes:
- .:/myapp
web:
image: myusername/rails-app:latest # <= Running the app now from the image
command: bash -c "rm -f tmp/pids/server.pid && rails s -p 3000 -b '0.0.0.0'"
volumes:
- .:/myapp
ports:
- "3000:3000"
depends_on:
- db
- redis
- sidekiq
The only containers running are redis and db. The web is looking for this
Could not locate Gemfile or .bundle/ directory
In the second docker-compose.yml file, the one that should work somewhere else without the source code, you're still having the volume mounting the local folder in the container:
volumes:
- .:/myapp
Remove that from the sidekiq and web containers and it should work.
You've also kept the build: . for the sidekiq container which is useful only for the development box. Replace it with the image attribute, pointing to your image.
To summarise your docker-comspose.yaml file:
version: '3'
services:
db:
image: postgres
volumes:
- ./tmp/db:/var/lib/postgresql/data
ports:
- "5432:5432"
redis:
image: redis
command: redis-server
ports:
- "6379:6379"
sidekiq:
image: myusername/rails-app:latest
command: bundle exec sidekiq
depends_on:
- redis
web:
image: myusername/rails-app:latest # <= Running the app now from the image
command: bash -c "rm -f tmp/pids/server.pid && rails s -p 3000 -b '0.0.0.0'"
ports:
- "3000:3000"
depends_on:
- db
- redis
- sidekiq

How to set up separate .env for development and production using Docker

Coming from an environment where I was manually doing a ssh into the remote server, doing a git pull and creating my .env(since it is gitignored), how do I separate development .env and a production .env. I used docker-machine to create an AWS EC2 instance. I created a production.yml and did docker-compose -f production.yml up -d. The container in the EC2 machine picked up my development .env which is not what I want.
Dockerfile
FROM python:3.6-alpine
ENV PYTHONUNBUFFERED 1
RUN apk update && apk add postgresql-dev gcc python3-dev musl-dev git jpeg-dev zlib-dev libmagic
RUN python -m pip install --upgrade pip
RUN mkdir /writer-api
COPY requirements.txt /writer-api/
RUN pip install --no-cache-dir -r /writer-api/requirements.txt
COPY . /writer-api/
WORKDIR /writer-api
production.yml
version: "3"
services:
postgres:
restart: always
image: postgres
ports:
- "5432:5432"
volumes:
- pgdata:/var/lib/postgresql/data/
web:
restart: always
build: .
command: gunicorn writer.wsgi:application -w 2 -b :8000
environment:
DEBUG: ${DEBUG}
SECRET_KEY: ${SECRET_KEY}
DB_HOST: ${DB_HOST}
DB_NAME: ${DB_NAME}
DB_USER: ${DB_USER}
DB_PORT: ${DB_PORT}
DB_PASSWORD: ${DB_PASSWORD}
SENDGRID_API_KEY: ${SENDGRID_API_KEY}
AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID}
AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY}
AWS_STORAGE_BUCKET_NAME: ${AWS_STORAGE_BUCKET_NAME}
depends_on:
- postgres
- redis
expose:
- "8000"
redis:
restart: always
image: "redis:alpine"
celery:
restart: always
build: .
command: celery -A writer worker -l info
volumes:
- .:/writer-api
depends_on:
- postgres
- redis
celery-beat:
restart: always
build: .
command: celery -A writer beat -l info
volumes:
- .:/writer-api
depends_on:
- postgres
- redis
nginx:
restart: always
build: ./nginx/
ports:
- "80:80"
depends_on:
- web
volumes:
pgdata:
I guess you can export the environment shell variable & then use the .env as per the environment. Create a dev.env & prod.env file in the workspace.
Sample compose -
version: '3'
services:
nginx:
image: nginx
ports:
- '80'
env_file:
- ${ENVIRON}.env
Build for DEV -
export ENVIRON=dev
docker-compose up -d
Build for PROD -
export ENVIRON=prod
docker-compose up -d
This way you will be able to leverage same compose file for DEV & PROD environments.
setup the compose files for production and dev in seperate folders and put .env file in those folders

Container hostname is not resolved to IP

I am trying to set the host for connection to Neo4j in the application.conf file using environment variable which is going to be set in a Dockerfile. Neo4j is used as an image from the docker-compose.yml file.
When I run an image of the application with the hostname of the Neo4j container I get an error ServiceUnavailableException: Unable to connect to neo4jdb:7687, ensure the database is running and that there is a working network connection to it
So the name of the container is not resolved to the IP-address. How can I fix it?
Configurations file:
neo4j{
url= "bolt://localhost:7687"
url= ${?HOSTNAME}
user = "user"
password = "password"
}
Application Dockerfile:
FROM java:8
ARG ARG_CLASS
ENV MAIN_CLASS $ARG_CLASS
ENV SCALA_VERSION 2.11.8
ENV SBT_VERSION 1.1.1
ENV SPARK_VERSION 2.2.0
ENV SPARK_DIST spark-$SPARK_VERSION-bin-hadoop2.6
ENV SPARK_ARCH $SPARK_DIST.tgz
ENV HOSTNAME bolt://neo4jdb:7687
VOLUME /workdir
WORKDIR /opt
# Install Scala
RUN \
cd /root && \
curl -o scala-$SCALA_VERSION.tgz http://downloads.typesafe.com/scala/$SCALA_VERSION/scala-$SCALA_VERSION.tgz && \
tar -xf scala-$SCALA_VERSION.tgz && \
rm scala-$SCALA_VERSION.tgz && \
echo >> /root/.bashrc && \
echo 'export PATH=~/scala-$SCALA_VERSION/bin:$PATH' >> /root/.bashrc
# Install SBT
RUN \
curl -L -o sbt-$SBT_VERSION.deb https://dl.bintray.com/sbt/debian/sbt-$SBT_VERSION.deb && \
dpkg -i sbt-$SBT_VERSION.deb && \
rm sbt-$SBT_VERSION.deb
# Install Spark
RUN \
cd /opt && \
curl -o $SPARK_ARCH http://d3kbcqa49mib13.cloudfront.net/$SPARK_ARCH && \
tar xvfz $SPARK_ARCH && \
rm $SPARK_ARCH && \
echo 'export PATH=$SPARK_DIST/bin:$PATH' >> /root/.bashrc
EXPOSE 9851 9852 4040 9092 9200 9300 5601 7474 7687 7473
CMD /workdir/runDemo.sh "$MAIN_CLASS"
I build and run my application separately with this commands:
docker build -t container-name --build-arg ARG_CLASS=producer .
docker run -v ${PWD}/:/workdir -w /workdir container-name
Docker-compose file looks like this:
version: '3.3'
services:
kafka:
image: spotify/kafka
ports:
- "9092:9092"
networks:
- docker_elk
environment:
- ADVERTISED_HOST=localhost
neo4jdb:
image: neo4j:latest
ports:
- "7474:7474"
- "7473:7473"
- "7687:7687"
networks:
- docker_elk
volumes:
- /var/lib/neo4j/import:/var/lib/neo4j/import
- /var/lib/neo4j/data:/data
- /var/lib/neo4j/conf:/conf
environment:
- NEO4J_dbms_active__database=graphImport.db
elasticsearch:
image: elasticsearch:latest
ports:
- "9200:9200"
- "9300:9300"
networks:
- docker_elk
volumes:
- esdata1:/usr/share/elasticsearch/data
kibana:
image: kibana:latest
ports:
- "5601:5601"
networks:
- docker_elk
volumes:
esdata1:
driver: local
networks:
docker_elk:
driver: bridge
Defining all the application containers in one docker-compose file worked. Hostnames of all the containers are resolved.
version: '3.3'
services:
kafka:
image: spotify/kafka
ports:
- "9092:9092"
environment:
- ADVERTISED_HOST=localhost
neo4jdb:
image: neo4j:latest
hostname: neo4jdb
ports:
- "7474:7474"
- "7473:7473"
- "7687:7687"
volumes:
- /var/lib/neo4j/import:/var/lib/neo4j/import
- /var/lib/neo4j/data:/var/lib/neo4j/data
- /var/lib/neo4j/conf:/var/lib/neo4j/conf
environment:
- NEO4J_dbms_active__database=graphImport.db
elasticsearch:
image: elasticsearch:latest
ports:
- "9200:9200"
- "9300:9300"
networks:
- docker_elk
volumes:
- esdata1:/usr/share/elasticsearch/data
kibana:
image: kibana:latest
ports:
- "5601:5601"
networks:
- docker_elk
consumer-demo:
build:
context: .
dockerfile: Dockerfile
args:
- HOST=neo4jdb
volumes:
- ./:/workdir
working_dir: /workdir
depends_on:
- neo4jdb
- kafka
- elasticsearch
- kibana
links:
- neo4jdb
- kafka
- elasticsearch
- kibana
producer-demo:
build:
context: .
dockerfile: Dockerfile
args:
- HOST=neo4jdb
volumes:
- ./:/workdir
working_dir: /workdir
depends_on:
- neo4jdb
- kafka
- elasticsearch
- kibana
links:
- neo4jdb
- kafka
- elasticsearch
- kibana
- consumer-demo
volumes:
esdata1:
driver: local
networks:
docker_elk:
driver: bridge

Resources