Dockerfile:
\`FROM node:lts-alpine as development
WORKDIR /usr/src/app
COPY package.json yarn.lock ./
RUN yarn --frozen-lockfile --production=false
COPY . .
RUN yarn prisma generate
RUN yarn build
FROM node:lts-alpine as production`your text`
ARG NODE_ENV=production
ENV NODE_ENV=${NODE_ENV}
WORKDIR /usr/src/app
COPY package.json yarn.lock ./
RUN yarn --frozen-lockfile --production=true
COPY --from=development /usr/src/app/dist ./dist
COPY --from=development /usr/src/app/prisma ./prisma
EXPOSE 3000
CMD \["yarn", "start:prod"\]\`
docker-compose.yml:
version: '3.7'
services:
postgres_db:
image: postgres:12.2
container_name: postgres_db
restart: always
environment:
POSTGRES_PASSWORD: password
POSTGRES_USER: user
POSTGRES_DB: db
ports:
- 5432:5432
volumes:
- ./db:/var/lib/postgresql/data
nestjs_server:
build:
context: ./nestjs_server
dockerfile: Dockerfile
target: development
container_name: nestjs_server
volumes:
- ./nestjs_server:/usr/src/app
- /usr/src/app/node_modules
restart: always
environment:
DATABASE_URL: "postgresql://user:password#postgres_db:5432/mydb?schema=public"
ports:
- 3000:3000
depends_on:
- postgres_db
command: yarn start:debug
volumes:
postgres_db:
name: postgres_db
Structure:
It works perfectly fine but when I change something on the files I won't recompile.
Related
I am building my first customs Docker with Docker compose and I feel I am very close to finishing it but I have having an issue with what seem to be the entrypoint
FYI i am tryng to deploy a django app with postgres, nginx, certbot, letsencrypt
this is what I am seeing:
certbot_1 | /data/entrypoint.sh: exec: line 14: certbot: not found
nginx_1 | /data/entrypoint.sh: exec: line 14: run: not found
EDIT: I was able to make them run with the editted code but Ngnix exits with a code 0 and I don't know why and wont run
I have tried changing path with no luck
I am not sure what I am doing wrong
any advice you can provide would be great!
docker compose file:
version: '3.8'
services:
web:
build: .
command: gunicorn FleetOptimal.wsgi:application --bind 0.0.0.0:8000
environment:
- TZ=America/Toronto
volumes:
- /home/littlejiver/src/FleetOptimal/FleetOptimal/FleetOptimal/:/manage/
- /home/littlejiver/src/FleetOptimal/FleetOptimal/:/data/
- /home/littlejiver/src/FleetOptimal/FleetOptimal/FleetOptimal/staticfiles/:/static_volume/
- /home/littlejiver/src/FleetOptimal/FleetOptimal/FleetOptimal/images/:/media_volume/
expose:
- 8000
env_file:
- ./.env.dev
depends_on:
- db
db:
image: postgres:13.0-alpine
volumes:
- /home/littlejiver/docker/postgres/postgres_data:/postgres_data/
environment:
- PUID=1000
- PGID=1000
- TZ=Canada/Toronto
- POSTGRES_USER=someusername
- POSTGRES_PASSWORD=#somepassword
- POSTGRES_DB=somedb
nginx-proxy:
tty: true
image: nginx:latest
container_name: nginx-proxy
build: .
command: nginx -g "daemon off"
restart: always
environment:
- NGINX_DOCKER_GEN_CONTAINER=nginx-proxy-letsencrypt
ports:
- 443:443
- 80:80
volumes:
- /home/littlejiver/src/FleetOptimal/FleetOptimal/:/data/
- /home/littlejiver/src/FleetOptimal/FleetOptimal/FleetOptimal/staticfiles/:/static_volume/
- /home/littlejiver/src/FleetOptimal/FleetOptimal/FleetOptimal/images/:/media_volume/
- certs:/etc/nginx/certs
- html:/usr/share/nginx/html
- vhost:/etc/nginx/vhost.d
- /var/run/docker.sock:/tmp/docker.sock:ro
depends_on:
- web
nginx-proxy-letsencrypt:
image: jrcs/letsencrypt-nginx-proxy-companion
env_file:
- ./.env.dev
environment:
- NGINX_DOCKER_GEN_CONTAINER=nginx-proxy-letsencrypt
volumes:
- /home/littlejiver/src/FleetOptimal/FleetOptimal/:/data/
- /var/run/docker.sock:/var/run/docker.sock:ro
- certs:/etc/nginx/certs
- html:/usr/share/nginx/html
- vhost:/etc/nginx/vhost.d
- acme:/etc/acme.sh
depends_on:
- nginx-proxy
volumes:
postgres_data:
static_volume:
media_volume:
certs:
html:
vhost:
acme:
DockerFile for Ngnix:
FROM nginx:latest
COPY vhost.d/default /etc/nginx/vhost.d/default
COPY custom.conf /etc/nginx/conf.d/custom.conf
DockerFile for Web:
###########
# BUILDER #
###########
# pull official base image
FROM python:3.9.6-alpine as builder
# set work directory
WORKDIR /manage
# set environment variables
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
# install psycopg2 dependencies
RUN apk update \
&& apk add postgresql-dev gcc python3-dev musl-dev
# lint
RUN apk add zlib-dev jpeg-dev gcc musl-dev
RUN pip install --upgrade pip
# RUN pip install flake8==3.9.2
COPY . .
# RUN flake8 --ignore=E501,F401 /manage
# install dependencies
COPY ./requirements.txt .
RUN pip wheel --no-cache-dir --no-deps --wheel-dir /usr/src/app/wheels -r requirements.txt
#########
# FINAL #
#########
# pull official base image
FROM python:3.9.6-alpine
# create directory for the app user
# create the app user
RUN addgroup -S littlejiver && adduser -S littlejiver -G littlejiver
# create the appropriate directories
ENV HOME=/manage
ENV APP_HOME=/manage
WORKDIR $APP_HOME
# install dependencies
RUN apk add zlib-dev jpeg-dev gcc musl-dev
COPY --from=builder /usr/src/app/wheels /wheels
COPY --from=builder /manage/requirements.txt .
RUN pip install --no-cache /wheels/*
# copy entrypoint.prod.sh
COPY ./entrypoint.sh .
RUN sed -i 's/\r$//g' $APP_HOME/entrypoint.sh
RUN chmod +x $APP_HOME/entrypoint.sh
# copy project
COPY . $APP_HOME
# chown all the files to the app user
RUN chown -R littlejiver:littlejiver $APP_HOME
# change to the app user
USER littlejiver
WORKDIR /manage
# run entrypoint.prod.sh
ENTRYPOINT ["sh", "/data/entrypoint.sh"]
entrypoint.sh
#!/bin/sh
if [ "$DATABASE" = "postgres" ]
then
echo "Waiting for postgres..."
while ! nc -z $SQL_HOST $SQL_PORT; do
sleep 0.1
done
echo "PostgreSQL started"
fi
exec "$#"
Thanks
-littlejiver
roughly based of this guide:
I'm running docker-compose to run my application which listen to REST api calls.
For some reason it is not accessible from outside.
I don't understand what am I doing wrong.
Here is the configuration:
version: '3.4'
services:
rabbitmq:
image: rabbitmq:3-management
ports:
- 5672:5672
- 15672:15672
my-server:
image: my-server
build:
context: .
dockerfile: ./apiserver/Dockerfile
ports:
- 5000:5000
restart: on-failure
depends_on:
- rabbitmq
and my Dockerfile is:
FROM ubuntu:16.04
RUN apt-get update -y && \
apt-get install -y python-pip python-dev
# We copy just the requirements.txt first to leverage Docker cache
COPY ./requirements.txt /app/requirements.txt
WORKDIR /app
RUN pip install -r requirements.txt
COPY . /app
EXPOSE 5000
ENTRYPOINT [ "python" ]
CMD [ "app.py" ]
I'm trying to dockerize my Symfony project.
In this project, I have a folder under: public/fichiersflux/
"fichiersflux" is a folder with persistent data (img, pdf...)
Here is the docker-compose.yml:
version: '3.7'
services:
mariadb:
image: mariadb:10.4
restart: always
environment:
MYSQL_ROOT_PASSWORD: zfezZEFfz4e1589fze
MYSQL_DATABASE: 1c1t
MYSQL_USER: 1c1t
MYSQL_PASSWORD: fez45FZE1fez0fzefF!
ports:
- 3306:3306
php:
image: php:7.4
build:
context: .
dockerfile: docker/php/Dockerfile
restart: on-failure
user: 1000:1000
nginx:
image: nginx:1.17-alpine
restart: on-failure
volumes:
- './app/public/:/usr/src/app'
- './docker/nginx/default.conf:/etc/nginx/conf.d/default.conf:ro'
ports:
- 8080:80
depends_on:
- php
And my Dockerfile:
# ./docker/php/Dockerfile
FROM php:7.4-fpm
RUN docker-php-ext-install pdo_mysql
RUN pecl install apcu
RUN apt-get update && \
apt-get install -y \
zlib1g-dev
RUN apt-get install -y \
libzip-dev \
libicu-dev \
zip \
&& docker-php-ext-install zip
RUN docker-php-ext-enable apcu \
&& docker-php-ext-install intl
# Install Composer
RUN curl -sS https://getcomposer.org/installer | php -- --install-dir=/usr/local/bin --filename=composer
WORKDIR /usr/src/app
COPY app/ /usr/src/app
RUN chown -R 1000:1000 /usr/src/app
RUN PATH=$PATH:/usr/src/apps/vendor/bin:bin
The problem is, when I build my docker containers, the folder /usr/src/app/ is apparently re-created.. and I loose all data inside public/fichiersflux
How can I persist public/fichiersflux folder ?
Best regards :)
You could just add volumes on php services, to mount your target folder.
version: '3.7'
services:
mariadb:
image: mariadb:10.4
restart: always
environment:
MYSQL_ROOT_PASSWORD: zfezZEFfz4e1589fze
MYSQL_DATABASE: 1c1t
MYSQL_USER: 1c1t
MYSQL_PASSWORD: fez45FZE1fez0fzefF!
ports:
- 3306:3306
php:
image: php:7.4
build:
context: .
dockerfile: docker/php/Dockerfile
volumes:
- '../app/public/fichiersflux:/usr/src/app/fichiersflux'
restart: on-failure
user: 1000:1000
nginx:
image: nginx:1.17-alpine
restart: on-failure
volumes:
- './app/public/:/usr/src/app'
- './docker/nginx/default.conf:/etc/nginx/conf.d/default.conf:ro'
ports:
- 8080:80
depends_on:
- php
I've got this Dockerfile:
FROM python:3.6.9-alpine
RUN mkdir -p /usr/src/app
WORKDIR /usr/src/app
COPY ./requirements.txt /usr/src/app/requirements.txt
RUN pip install -r requirements.txt
COPY . /usr/src/app
CMD ["python", "manage.py", "run -h 0.0.0.0"] # <--
On the last line I'm getting python: can't open file 'manage.py': [Errno 2] No such file or directory
docker-compose.yml:
version: '3.7'
services:
users:
build:
context: ./users
dockerfile: Dockerfile
volumes:
- './services/users:/usr/src/app'
ports:
- 5001:5000
environment:
- FLASK_APP=project/__init__.py
- FLASK_ENV=development
I've got manage.py in the same directory as the Dockerfile (located in users dir). Where does it look for it ?
Issue solved by editing volumes:
- './services/users:/usr/src/app'
to:
volumes:
- './users:/usr/src/app'
Getting this error when inserting values in Model through rails console .
"Mongo::Error::NoServerAvailable: No server is available matching
preference: # using server_selection_timeout=30 and local_threshold=
0.015 "
Both containers are running fine, but Rails not able to connect mongodb .
I have only one Dockerfile.
My docker-compose.yml file contents are:
version: '2'
services:
mongo:
image: mongo:3.0
command: mongod --smallfiles --quiet
environment:
- RAILS_ENV=production
- RACK_ENV=production
ports:
- "27017:27017"
app:
depends_on:
- 'mongo'
# - 'redis'
build: .
ports:
- '3000:3000'
volumes:
- '.:/app'
command: rails s -b '0.0.0.0'
env_file:
- '.env'
volumes:
mongo:
My Dockerfile :
FROM ruby:2.3.0
RUN apt-get update -qq && apt-get install -y build-essential libpq-dev nodejs
ENV APP_HOME /app
RUN mkdir $APP_HOME
WORKDIR $APP_HOME
ADD Gemfile* $APP_HOME/
RUN bundle install
ADD . $APP_HOME
Did you use mongo(same as the container name mentioned in docker-compose.yml) as your host in mongoid.yml?