I found it very strange, I had setup an rails app, posgres db, and a nginx server for production only but the ngix only able to start if I type
docker-compose -f docker-compose.yml -f production.yml up --build
but not the pre-build
docker-compose -f docker-compose.yml -f production.yml build
then
docker-compose up
the rails app and db is starting just fine is just that nginx is not started and the port is revert back to port 3000 instead of 80 which i found very strange isn't they doing the same thing?
nginx.conf
# This is a template. Referenced variables (e.g. $INSTALL_PATH) need
# to be rewritten with real values in order for this file to work.
upstream rails_app {
server unix:///webapp/tmp/sockets/puma.sock;
}
server {
listen 80;
# define your domain
server_name 127.0.0.1 localhost www.example.com;
# define the public application root
root /providre_api/public;
# define where Nginx should write its logs
access_log /providre_api/log/nginx.access.log;
error_log /providre_api/log/nginx.error.log;
# deny requests for files that should never be accessed
location ~ /\. {
deny all;
}
location ~* ^.+\.(rb|log)$ {\
deny all;
}
# serve static (compiled) assets directly if they exist (for rails production)
location ~ ^/(assets|images|javascripts|stylesheets|swfs|system)/ {
try_files $uri #rails;
access_log off;
gzip_static on; # to serve pre-gzipped version
expires max;
add_header Cache-Control public;
# Some browsers still send conditional-GET requests if there's a
# Last-Modified header or an ETag header even if they haven't
# reached the expiry date sent in the Expires header.
add_header Last-Modified "";
add_header ETag "";
break;
}
# send non-static file requests to the app server
location / {
try_files $uri #rails;
}
location #rails {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_pass http://rails_app;
}
}
web.Dockerfile
# Base image:
FROM nginx
# Install dependencies
RUN apt-get update -qq && apt-get -y install apache2-utils
# establish where Nginx should look for files
ENV INSTALL_PATH /providre_api
# Set our working directory inside the image
WORKDIR $INSTALL_PATH
# create log directory
RUN mkdir log
# copy over static assets
COPY public public/
# Copy Nginx config template
COPY docker/web/nginx.conf /tmp/docker.nginx
# substitute variable references in the Nginx config template for real values from the environment
# put the final config in its place
RUN envsubst '$INSTALL_PATH' < /tmp/docker.nginx > /etc/nginx/conf.d/default.conf
EXPOSE 80
# Use the "exec" form of CMD so Nginx shuts down gracefully on SIGTERM (i.e. `docker stop`)
CMD [ "nginx", "-g", "daemon off;" ]
docker-compose.yml
version: '3'
services:
db:
image: postgres
volumes:
- ./tmp/db:/var/lib/postgresql/data
restart: always
ports:
- "5433:5432"
environment:
POSTGRES_USER: 'postgres'
POSTGRES_PASSWORD: ''
app:
command: bundle exec puma -C config/puma.rb
ports:
- "3000"
depends_on:
- db
docker-compose.override.yml
version: '3'
services:
app:
build:
context: .
dockerfile: ./docker/app/Dockerfile
volumes:
- .:/providre_api
ports:
- "3000:3000"
production.yml
version: '3'
services:
app:
build:
context: .
dockerfile: ./docker/app/prod.Dockerfile
volumes:
- .:/providre_api
ports:
- "3000"
nginx:
container_name: web
build:
context: .
dockerfile: ./docker/web/web.Dockerfile
depends_on:
- app
volumes:
- ./docker/web/nginx.conf:/etc/nginx/conf.d/default.conf
ports:
- 80:80
Sorry my bad I didn't test it fully so the docker-compose up will used the normal docker-compose.yml so I have to repeat and use docker-compose -f docker-compose.yml -f production.yml up instead
Related
My app structure is like this:
.
├── src
│ └── some go files
├── templates
├── static
|── images
|── js
└── styles
And here is my Dockerfile:
FROM golang:1.18
WORKDIR /usr/src/app
COPY go.mod .
COPY go.sum .
RUN go mod download
COPY . .
CMD ["go", "run", "src/cmd/main.go"]
And here is my docker-compose.yml:
version: "3.8"
services:
pgsql:
image: postgres
ports:
- "5432:5432"
volumes:
- todo_pg_db:/var/lib/postgresql/data
environment:
- POSTGRES_DB=todo
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
app:
build: .
ports:
- "8080"
restart: always
depends_on:
- pgsql
nginx:
image: nginx
restart: always
ports:
- 801:801
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf
volumes:
todo_pg_db:
And here is the nginx.conf:
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
access_log /var/log/nginx/access.log;
sendfile on;
server {
listen 801;
server_name 127.0.0.1;
charset utf-8;
keepalive_timeout 5;
location / {
# checks for static file, if not found proxy to app
try_files $uri #backend;
}
location #backend {
# client_max_body_size 10m;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_pass http://app:8080;
}
}
}
My problem is that nginx can't find static files.
Here is some example logs:
open() "/usr/src/app/static/styles/bootstrap.min.css" failed (2: No such file or directory)
But there is such directory.
when I exec to my docker container using this commad: sudo docker exec -it todo_app_1 bash.
Then I cat contents of the file, and it works fine!!!
cat /usr/src/app/static/styles/bootstrap.min.css
# output: file content...
I don't know what is wrong in here.
What am I missing?
I have fixed that using volumes:
nginx:
image: nginx
restart: always
ports:
- 801:801
volumes:
- ./static:/var/www
- ./nginx.conf:/etc/nginx/nginx.conf
and in nginx.conf:
location /static {
alias /var/www;
}
I'm trying to run a platform building on Django, Docker, Nginx and Gunicorn from my Ubuntu server.
Before to ask you, i'm reading about my problem and i did on my nginx.conf:
location / {
proxy_read_timeout 300s;
proxy_connect_timeout 75s;
...
}
Then, on my Gunicorn settings:
CMD ["gunicorn", "--bind", "0.0.0.0:8000", "-t 90", "config.wsgi:application"]
The problem persists and server always returns: 502 Bad Gateway. When i try to access to:
http://34.69.240.210:8000/admin/
From browser, the server redirect to
http://34.69.240.210:8000/admin/login/?next=/admin/
But show the error:
My Dockerfile:
FROM python:3.8
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
RUN mkdir /code
WORKDIR /code
COPY . /code/
RUN pip install -r requirements.txt
CMD ["gunicorn", "--bind", "0.0.0.0:8000", "-t 90", "config.wsgi:application"]
My docker-compose.yml:
version: "3.8"
services:
django_app:
build: .
volumes:
- static:/code/static
- .:/code
nginx:
image: nginx:1.15
ports:
- 8000:8000
volumes:
- ./config/nginx/conf.d:/etc/nginx/conf.d
- static:/code/static
depends_on:
- django_app
volumes:
.:
static:
My Nginx file:
upstream django_server {
server django_app:8000 fail_timeout=0;
}
server {
listen 8000;
server_name 34.69.240.210;
keepalive_timeout 5;
client_max_body_size 4G;
location / {
proxy_read_timeout 300s;
proxy_connect_timeout 75s;
proxy_pass http://django_server;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_redirect off;
}
}
Any idea what can i do to fix it?
Thank you.
Well, for the record.
My problem was the database connection. My Docker container couldn't connect to potsgres local database.
So, i added this line to postgresql.conf:
listen_addresses = '*'
Then, i added this line to pg_hba.conf
host all all 0.0.0.0/0 md5
Restart Postgres:
sudo service postgresql restart
My host postgres ip:
172.17.0.1
Test from inside docker:
psql -U myuser -d databasename -h 172.17.0.1 -W
Done! :)
I am trying to deploy a simple Django Rest Framework app to the production server using Docker. My aim is to install Nginx with a proxy and Certbot for a regular Let'sEncrypt SSL at the same time. I manage my dependencies in DockerFiles and docker-compose.
So the folder structure has the following view:
app
DockerFile
nginx
DockerFile
init-letsencrypt.sh
nginx.conf
docker-compose.yml
My idea is to hold all the configs in app/docker-compose.yml and start many different instances from the same source. But I do not have any nginx or certbot config in app/DockerFile - that's only for Django Rest Framework and that works well. But in docker-compose.yml I have the following code:
version: '3'
'services':
app:
container_name: djangoserver
command: gunicorn prototyp.wsgi:application --env DJANGO_SETTINGS_MODULE=prototyp.prod_settings --bind 0.0.0.0:8000 --workers=2 --threads=4 --worker-class=gthread
build:
context: ./api
dockerfile: Dockerfile
restart: always
ports:
- "8000:8000"
depends_on:
- otherserver
otherserver:
container_name: otherserver
build:
context: ./otherserver
dockerfile: Dockerfile
restart: always
nginx:
build: ./nginx
ports:
- 80:80
depends_on:
- app
command: "/bin/sh -c 'while :; do sleep 6h & wait $${!}; nginx -s reload; done & nginx -g \"daemon off;\"'"
certbot:
image: certbot/certbot
entrypoint: "/bin/sh -c 'trap exit TERM; while :; do certbot renew; sleep 12h & wait $${!}; done;'"
This makes me to build "app", "otherserver", "nginx" and "certbot".
The most important parts are in "nginx" folder.
I used this manual and cloned file "init-letsencrypt.sh" from the source just the way it was described. Then I tried to bash it:
nginx/DockerFile:
FROM nginx:1.19.0-alpine
RUN rm /etc/nginx/conf.d/default.conf
COPY nginx.conf /etc/nginx/conf.d
RUN mkdir -p /usr/src/app
COPY init-letsencrypt.sh /usr/src/app
WORKDIR /usr/src/app
RUN chmod +x init-letsencrypt.sh
ENTRYPOINT ["/usr/src/app/init-letsencrypt.sh"]
In nginx/nginx.conf I have the following code:
upstream django {
server app:8000;
}
server {
listen 80;
server_name app.com www.app.com;
location / {
return 301 https://$host$request_uri;
}
}
server {
listen 443 ssl;
server_name app.com www.app.com;
access_log /var/log/nginx-access.log;
error_log /var/log/nginx-error.log;
ssl_certificate /etc/letsencrypt/live/app.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/app.com/privkey.pem;
include /etc/letsencrypt/options-ssl-nginx.conf;
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
location ^/static/rest_framework/((img/|css/|js/|fonts).*)$ {
autoindex on;
access_log off;
alias /usr/src/app/static/rest_framework/$1;
}
location / {
proxy_pass http://django;
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
client_body_buffer_size 256k;
proxy_connect_timeout 120;
proxy_send_timeout 120;
proxy_read_timeout 120;
proxy_buffer_size 64k;
proxy_buffers 4 64k;
proxy_busy_buffers_size 64k;
proxy_temp_file_write_size 64k;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
client_max_body_size 100M;
}
}
So, with this configuration when I do "docker-compose build", the build works without any errors and everything is successfully built. But as soon as I do "docker-compose up" I have the problem that certbot and nginx are not connect and the app is working only when I use http://app.com:8000 instead of https://app.com.
In console I do not have any errors.
What do I do wrong? What have I missed? Any help will be appreciated.
I see in your setup you try to run let's encrypt from within the nginx container. But I believe there are two better way that I describe in details here and here.
The idea behind the first method is to have a docker-compose file to initiate the letsencrypt certificate, and another docker-compose file to run the system and renew the certificate.
So without further ado, here is the file structure and content that is working really well for me (you still need to adapt the files to suit your needs)
./setup.sh
./docker-compose-initiate.yaml
./docker-compose.yaml
./etc/nginx/templpates/default.conf.template
./etc/nginx/templpates-initiation/default.conf.template
The setup in 2 phases:
In the first phase "the initiation phase" we will run an nginx container, and a certbot container just to obtain the ssl certificate for the first time and store it on the host ./etc/letsencrypt folder
I the second phase "the operation phase" we run all necessary services for the app including nginx that will use the letsencrypt folder this time to serve https on port 443, a certbot container will also run (on demand) to renew the certificate. We can add a cron job for that. So the setup.sh script is a simple convenience script that runs the commands one after another:
#!/bin/bash
# the script expects two arguments:
# - the domain name for which we are obtaining the ssl certificatee
# - the Email address associated with the ssl certificate
echo DOMAIN=$1 >> .env
echo EMAIL=$2 >> .env
# Phase 1 "Initiation"
docker-compose -f ./docker-compose-first.yaml up -d nginx
docker-compose -f ./docker-compose-first.yaml up certbot
docker-compose -f ./docker-compose-first.yaml down
# Phase 2 "Operation"
crontab ./etc/crontab
docker-compose -f ./docker-compose.yaml up -d
Phase 1: The ssl certificate initiation phase:
./docker-compose-initiate.yaml
version: "3"
services:
nginx:
container_name: nginx
image: nginx:latest
environment:
- DOMAIN
ports:
- 80:80
volumes:
- ./etc/nginx/templates-initiate:/etc/nginx/templates:ro
- ./etc/letsencrypt:/etc/letsencrypt:ro
- ./certbot/data:/var/www/certbot
certbot:
container_name: certbot
image: certbot/certbot:latest
depends_on:
- nginx
command: >-
certonly --reinstall --webroot --webroot-path=/var/www/certbot
--email ${EMAIL} --agree-tos --no-eff-email
-d ${DOMAIN}
volumes:
- ./etc/letsencrypt:/etc/letsencrypt
- ./certbot/data:/var/www/certbot
./etc/nginx/templates-initiate/default.conf.template
server {
listen [::]:80;
listen 80;
server_name $DOMAIN;
location ~/.well-known/acme-challenge {
allow all;
root /var/www/certbot;
}
}
Phase 2: The operation phase
./docker-compose.yaml
services:
app:
{{your_configurations_here}}
{{other_services...}}:
{{other_services_configuraitons}}
nginx:
container_name: nginx
image: nginx:latest
restart: always
environment:
- DOMAIN
depends_on:
- app
ports:
- 80:80
- 443:443
volumes:
- ./etc/nginx/templates:/etc/nginx/templates:ro
- ./etc/letsencrypt:/etc/letsencrypt
- ./certbot/data:/var/www/certbot
- /var/log/nginx:/var/log/nginx
certbot:
container_name: certbot
image: certbot/certbot:latest
depends_on:
- nginx
command: >-
certonly --reinstall --webroot --webroot-path=/var/www/certbot
--email ${EMAIL} --agree-tos --no-eff-email
-d ${DOMAIN}
volumes:
- ./etc/letsencrypt:/etc/letsencrypt
- ./certbot/data:/var/www/certbot
./etc/nginx/templates/default.conf.template
server {
listen [::]:80;
listen 80;
server_name $DOMAIN;
return 301 https://$host$request_uri;
}
server {
listen [::]:443 ssl http2;
listen 443 ssl http2;
server_name $DOMAIN;
ssl_certificate /etc/letsencrypt/live/$DOMAIN/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/$DOMAIN/privkey.pem;
location ~ /.well-known/acme-challenge {
allow all;
root /var/www/html;
}
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
location / {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Proto https;
proxy_pass http://app:80;
}
}
The second method uses two docker images: http-proxy and http-proxy-acme-companion that were developed specifically for this reason. I suggest looking at the blog post for further details.
As I see, you havenot exposed port 443 for nginx container:
nginx:
build: ./nginx
ports:
- 80:80
- 443:443
depends_on:
Add more 443 port.
I am deploying my Next.js / Nginx docker image from Container Registry to Compute Engine
Once deployed the application is running as expected, but its running on port 3000 instead of 80 - i.e. I want to access it at <ip_address> but I can only access it on <ip_address>:3000. I have setup a reverse proxy in Nginx to forward port 3000 to 80 but it does not seem to be working.
When I run docker-compose up the app is accessible on localhost (rather than localhost:3000)
Dockerfile
FROM node:alpine as react-build
RUN mkdir -p /usr/src/app
WORKDIR /usr/src/app
COPY package.json /usr/src/app
RUN npm install
RUN npm install --global pm2
COPY . /usr/src/app
RUN npm run build
# EXPOSE 3000
EXPOSE 80
CMD [ "pm2-runtime", "start", "npm", "--", "start" ]
docker-compose.yml
version: '3'
services:
nextjs:
build: ./
nginx:
build: ./nginx
ports:
- 80:80
./nginx/Dockerfile
FROM nginx:alpine
# Remove any existing config files
RUN rm /etc/nginx/conf.d/*
# Copy config files
# *.conf files in "conf.d/" dir get included in main config
COPY ./default.conf /etc/nginx/conf.d/
./nginx/default.conf
proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=STATIC:10m inactive=7d use_temp_path=off;
server {
listen 80;
gzip on;
gzip_proxied any;
gzip_comp_level 4;
gzip_types text/css application/javascript image/svg+xml;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
# BUILT ASSETS (E.G. JS BUNDLES)
# Browser cache - max cache headers from Next.js as build id in url
# Server cache - valid forever (cleared after cache "inactive" period)
location /_next/static {
#proxy_cache STATIC;
proxy_pass http://localhost:3000;
}
# STATIC ASSETS (E.G. IMAGES)
# Browser cache - "no-cache" headers from Next.js as no build id in url
# Server cache - refresh regularly in case of changes
location /static {
#proxy_cache STATIC;
proxy_ignore_headers Cache-Control;
proxy_cache_valid 60m;
proxy_pass http://locahost:3000;
}
# DYNAMIC ASSETS - NO CACHE
location / {
proxy_pass http://locahost:3000;
}
}
I'm very new to Docker and Nginx and this might be a stupid question but how can I point my nginx box to look at the files in Rails public? Basically, I have an nginx box, and an application box. I would like to know where I can put those files so that the nginx box can read them.
version: "3"
services:
api:
build: "./api"
env_file:
- .env-dev
ports:
- "3000:3000"
depends_on:
- db
volumes:
- .:/app/api
command: rails server -b "0.0.0.0"
nginx:
build: ./nginx
env_file: .env-dev
volumes:
- .:/app/nginx
depends_on:
- api
links:
- api
ports:
- "80:80"
...
Api dockerfile:
FROM ruby:2.4.1-slim
RUN apt-get update && apt-get install -qq -y \
build-essential \
libmysqlclient-dev \
nodejs \
--fix-missing \
--no-install-recommends
ENV INSTALL_PATH /api
RUN mkdir -p $INSTALL_PATH
WORKDIR $INSTALL_PATH
COPY Gemfile $INSTALL_PATH
RUN bundle install
COPY . .
EXPOSE 3000
Nginx Dockerfile:
FROM nginx
ENV INSTALL_PATH /nginx
RUN mkdir -p $INSTALL_PATH
COPY nginx.conf /etc/nginx/nginx.conf
# COPY ?
EXPOSE 80
nginx config (this is correctly being copied over)
daemon off;
worker_processes: 1;
events { worker_connections: 1024; }
http {
sendfile on;
gzip on;
gzip_http_version 1.0;
gzip_proxied any;
gzip_min_length 500;
gzip_disable "MSIE [1-6]\.";
gzip_types text/plain text/xml text/css
text/comma-separated-values
text/javascript
application/x-javascript
application/atom+xml;
# Rails Api
upstream api {
server http://api/;
}
# Configuration for the server
server {
# Running port
listen 80;
# Proxying the connections connections
location /api {
proxy_pass http://api;
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
error_page 500 502 503 504 /public/50x.html
error_page 404 /public/404.html
location = /50x.html {
root /api/public;
}
location = /404.html {
root /api/public
}
}
}
Now, when I go to localhost:80 it show the generic nginx folder. However, I'm unsure how to link the public dir of rails api/public/ to the nginx container.
Can I just COPY path/to/rails/public path/nginx. Where is nginx expecting to find those files?
Edit
I believe I should be putting them in /var/www/app_name, correct?
I think what you want to achieve should be done by mounting a volume of container 'api' to container 'nginx', something like this:
version: "3"
services:
api:
image: apiimg
volumes:
- apivol:/path/to/statics
nginx:
image: nginximg
volumes:
- apivol:/var/www/statics
volumes:
apivol: {}
So there's a shared volume declared for all containers, apivol, which is mapped to /path/to/statics on your Rails container, and to /var/www/statics in your nginx container. This way you don't need to copy anything manually into the nginx container.
The default location for static content on nginx is /etc/nginx/html, but you could put it in var/www/app_name as long as you remember to add
root /var/www/app_name
in the corresponding location block for your static content.