I've been trying for several days to run my frontend and backend application with docker but without success, I would also need it to be served via nginx. Here's my current configuration, if anyone could tell me what's wrong with it, that would be great.
Project structure
/frontend
/backend
/nginx
docker-compose.yml
I can start the Dockerfile frontend with cd frontend && docker build -t frontend . && docker run -p 2000:80 frontend but from the root of project when i run docker-compose up frontend, backend and nginx exit with code 0
docker-compose.yml
version: '3.6'
services:
db:
container_name: postgres
image: postgres:11
volumes:
- ./tmp/db:/var/lib/postgresql/data
environment:
POSTGRES_HOST_AUTH_METHOD: trust
backend:
build: ./backend
volumes:
- './backend:/myproject/backend'
depends_on:
- db
ports:
- 3000:3000
environment:
- RAILS_ENV=production
- MASTER_KEY=XXXXXXXXXX
frontend:
build: ./frontend
environment:
- NODE_ENV=production
depends_on:
- backend
ports:
- 4000:80
nginx:
build: ./nginx
restart: always
ports:
- 80:80
depends_on:
- backend
- frontend
backend/Dockerfile
FROM ruby:2.7.1
RUN apt-get update -qq && apt-get install -y build-essential libpq-dev nodejs
RUN mkdir -p /myproject/backend
WORKDIR /myproject/backend
COPY Gemfile /myproject/backend/Gemfile
COPY Gemfile.lock /myproject/backend/Gemfile.lock
RUN bundle install
COPY . /myproject/backend
RUN bundle exec rails db:create
RUN bundle exec rails db:migrate
EXPOSE 3000
CMD ["bundle", "exec", "rails", "s", "-p", "3000", "-b", "'0.0.0.0'"]
frontend/Dockerfile
FROM node:12.18.3-alpine as builder
WORKDIR /myproject/frontend
COPY package.json /myproject/frontend/package.json
RUN yarn global add webpack
RUN yarn install
COPY . .
RUN yarn build
FROM nginx:1.15.2-alpine
COPY --from=builder /myproject/frontend/dist /var/www
COPY nginx.conf /etc/nginx/nginx.conf
EXPOSE 80
CMD ["nginx", "-g", "daemon off;"]
frontend/nginx.conf
# auto detects a good number of processes to run
worker_processes auto;
#Provides the configuration file context in which the directives that affect connection processing are specified.
events {
# Sets the maximum number of simultaneous connections that can be opened by a worker process.
worker_connections 8000;
# Tells the worker to accept multiple connections at a time
multi_accept on;
}
http {
# what times to include
include /etc/nginx/mime.types;
# what is the default one
default_type application/octet-stream;
# Sets the path, format, and configuration for a buffered log write
log_format compression '$remote_addr - $remote_user [$time_local] '
'"$request" $status $upstream_addr '
'"$http_referer" "$http_user_agent"';
server {
# listen on port 80
listen 80;
# save logs here
access_log /var/log/nginx/access.log compression;
# where the root here
root /var/www;
# what file to server as index
index index.html index.htm;
location / {
# First attempt to serve request as file, then
# as directory, then fall back to redirecting to index.html
try_files $uri $uri/ /index.html;
}
# Media: images, icons, video, audio, HTC
location ~* \.(?:jpg|jpeg|gif|png|ico|cur|gz|svg|svgz|mp4|ogg|ogv|webm|htc)$ {
expires 1M;
access_log off;
add_header Cache-Control "public";
}
# Javascript and CSS files
location ~* \.(?:css|js)$ {
try_files $uri =404;
expires 1y;
access_log off;
add_header Cache-Control "public";
}
# Any route containing a file extension (e.g. /devicesfile.js)
location ~ ^.+\..+$ {
try_files $uri =404;
}
}
}
nginx/Dockerfile
FROM nginx:alpine
RUN rm /etc/nginx/conf.d/default.conf
COPY /default.conf /etc/nginx/conf.d
nginx/default.conf
server {
listen 80;
location / {
proxy_pass http://frontend:80;
proxy_redirect default;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $server_name;
}
location /api {
proxy_pass http://backend:3000;
proxy_redirect default;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $server_name;
}
}
docker-compose up command output
docker-compose up
Creating network "myproject_default" with the default driver
WARNING: Found orphan containers (yaichi) for this project. If you removed or renamed this service in your compose file, you can run this command with the --remove-orphans flag to clean it up.
Creating postgres ... done
Creating myproject_backend_1 ... done
Creating myproject_frontend_1 ... done
Creating myproject_nginx_1 ... done
Attaching to postgres, myproject_backend_1, myproject_frontend_1, myproject_nginx_1
nginx_1 | /docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration
nginx_1 | /docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/
nginx_1 | /docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh
backend_1 | ruby 2.7.1p83 (2020-03-31 revision a0c7c23c9c) [x86_64-linux]
backend_1 | The dependency tzinfo-data (>= 0) will be unused by any of the platforms Bundler is installing for. Bundler is installing for ruby but the dependency is only for x86-mingw32, x86-mswin32, x64-mingw32, java. To add those platforms to the bundle, run `bundle lock --add-platform x86-mingw32 x86-mswin32 x64-mingw32 java`.
nginx_1 | 10-listen-on-ipv6-by-default.sh: error: /etc/nginx/conf.d/default.conf is not a file or does not exist
nginx_1 | /docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh
backend_1 | The Gemfile's dependencies are satisfied
nginx_1 | /docker-entrypoint.sh: Configuration complete; ready for start up
myproject_frontend_1 exited with code 0
myproject_backend_1 exited with code 0
nginx_1 | 2020/08/21 07:41:44 [emerg] 1#1: host not found in upstream "frontend" in /etc/nginx/conf.d/nginx.conf:6
nginx_1 | nginx: [emerg] host not found in upstream "frontend" in /etc/nginx/conf.d/nginx.conf:6
postgres |
postgres | PostgreSQL Database directory appears to contain a database; Skipping initialization
postgres |
postgres | 2020-08-21 07:41:44.480 UTC [1] LOG: listening on IPv4 address "0.0.0.0", port 5432
postgres | 2020-08-21 07:41:44.480 UTC [1] LOG: listening on IPv6 address "::", port 5432
postgres | 2020-08-21 07:41:44.504 UTC [1] LOG: listening on Unix socket "/var/run/postgresql/.s.PGSQL.5432"
postgres | 2020-08-21 07:41:44.765 UTC [28] LOG: database system was shut down at 2020-08-21 07:40:56 UTC
postgres | 2020-08-21 07:41:44.867 UTC [1] LOG: database system is ready to accept connections
myproject_nginx_1 exited with code 1
myproject_nginx_1 exited with code 1
nginx_1 | 2020/08/21 07:41:47 [emerg] 1#1: host not found in upstream "frontend" in /etc/nginx/conf.d/nginx.conf:6
nginx_1 | nginx: [emerg] host not found in upstream "frontend" in /etc/nginx/conf.d/nginx.conf:6
myproject_nginx_1 exited with code 1
myproject_nginx_1 exited with code 1
nginx_1 | 2020/08/21 07:41:51 [emerg] 1#1: host not found in upstream "frontend" in /etc/nginx/conf.d/nginx.conf:6
nginx_1 | nginx: [emerg] host not found in upstream "frontend" in /etc/nginx/conf.d/nginx.conf:6
myproject_nginx_1 exited with code 1
nginx_1 | 2020/08/21 07:41:55 [emerg] 1#1: host not found in upstream "frontend" in /etc/nginx/conf.d/nginx.conf:6
nginx_1 | nginx: [emerg] host not found in upstream "frontend" in /etc/nginx/conf.d/nginx.conf:6
myproject_nginx_1 exited with code 1
^CGracefully stopping... (press Ctrl+C again to force)
Stopping myproject_nginx_1 ... done
Stopping postgres ... done
docker-compose up --build output
docker-compose up --build
WARNING: Found orphan containers (yaichi) for this project. If you removed or renamed this service in your compose file, you can run this command with the --remove-orphans flag to clean it up.
Building backend
Step 1/12 : FROM ruby:2.7.1
---> 958d3491c09a
Step 2/12 : RUN apt-get update -qq && apt-get install -y build-essential libpq-dev nodejs
---> Using cache
---> b8ae75fa12b3
Step 3/12 : RUN mkdir -p /m3/backend
---> Using cache
---> cb3b556f8ce3
Step 4/12 : WORKDIR /m3/backend
---> Using cache
---> 764b5236b91f
Step 5/12 : COPY Gemfile /m3/backend/Gemfile
---> Using cache
---> d40100111945
Step 6/12 : COPY Gemfile.lock /m3/backend/Gemfile.lock
---> Using cache
---> 0682d5bf02f0
Step 7/12 : RUN bundle install
---> Using cache
---> f9615e684ebd
Step 8/12 : COPY . /m3/backend
---> 12ea56e4eeff
Step 9/12 : RUN bundle exec rails db:create
---> Running in 4e394b5ba38f
could not connect to server: No such file or directory
Is the server running locally and accepting
connections on Unix domain socket "/var/run/postgresql/.s.PGSQL.5432"?
Couldn't create 'm3_development' database. Please check your configuration.
rails aborted!
ActiveRecord::NoDatabaseError: could not connect to server: No such file or directory
Is the server running locally and accepting
connections on Unix domain socket "/var/run/postgresql/.s.PGSQL.5432"?
/usr/local/bundle/gems/activerecord-6.0.3.1/lib/active_record/connection_adapters/postgresql_adapter.rb:50:in `rescue in postgresql_connection'
/usr/local/bundle/gems/activerecord-6.0.3.1/lib/active_record/connection_adapters/postgresql_adapter.rb:33:in `postgresql_connection'
/usr/local/bundle/gems/activerecord-6.0.3.1/lib/active_record/connection_adapters/abstract/connection_pool.rb:887:in `new_connection'
/usr/local/bundle/gems/activerecord-6.0.3.1/lib/active_record/connection_adapters/abstract/connection_pool.rb:931:in `checkout_new_connection'
/usr/local/bundle/gems/activerecord-6.0.3.1/lib/active_record/connection_adapters/abstract/connection_pool.rb:910:in `try_to_checkout_new_connection'
/usr/local/bundle/gems/activerecord-6.0.3.1/lib/active_record/connection_adapters/abstract/connection_pool.rb:871:in `acquire_connection'
/usr/local/bundle/gems/activerecord-6.0.3.1/lib/active_record/connection_adapters/abstract/connection_pool.rb:593:in `checkout'
/usr/local/bundle/gems/activerecord-6.0.3.1/lib/active_record/connection_adapters/abstract/connection_pool.rb:437:in `connection'
/usr/local/bundle/gems/activerecord-6.0.3.1/lib/active_record/connection_adapters/abstract/connection_pool.rb:1119:in `retrieve_connection'
/usr/local/bundle/gems/activerecord-6.0.3.1/lib/active_record/connection_handling.rb:221:in `retrieve_connection'
/usr/local/bundle/gems/activerecord-6.0.3.1/lib/active_record/connection_handling.rb:189:in `connection'
/usr/local/bundle/gems/activerecord-6.0.3.1/lib/active_record/tasks/postgresql_database_tasks.rb:12:in `connection'
/usr/local/bundle/gems/activerecord-6.0.3.1/lib/active_record/tasks/postgresql_database_tasks.rb:21:in `create'
/usr/local/bundle/gems/activerecord-6.0.3.1/lib/active_record/tasks/database_tasks.rb:126:in `create'
/usr/local/bundle/gems/activerecord-6.0.3.1/lib/active_record/tasks/database_tasks.rb:185:in `block in create_current'
/usr/local/bundle/gems/activerecord-6.0.3.1/lib/active_record/tasks/database_tasks.rb:479:in `block (2 levels) in each_current_configuration'
/usr/local/bundle/gems/activerecord-6.0.3.1/lib/active_record/tasks/database_tasks.rb:476:in `each'
/usr/local/bundle/gems/activerecord-6.0.3.1/lib/active_record/tasks/database_tasks.rb:476:in `block in each_current_configuration'
/usr/local/bundle/gems/activerecord-6.0.3.1/lib/active_record/tasks/database_tasks.rb:475:in `each'
/usr/local/bundle/gems/activerecord-6.0.3.1/lib/active_record/tasks/database_tasks.rb:475:in `each_current_configuration'
/usr/local/bundle/gems/activerecord-6.0.3.1/lib/active_record/tasks/database_tasks.rb:184:in `create_current'
/usr/local/bundle/gems/activerecord-6.0.3.1/lib/active_record/railties/databases.rake:39:in `block (2 levels) in <main>'
/usr/local/bundle/gems/railties-6.0.3.1/lib/rails/commands/rake/rake_command.rb:23:in `block in perform'
/usr/local/bundle/gems/railties-6.0.3.1/lib/rails/commands/rake/rake_command.rb:20:in `perform'
/usr/local/bundle/gems/railties-6.0.3.1/lib/rails/command.rb:48:in `invoke'
/usr/local/bundle/gems/railties-6.0.3.1/lib/rails/commands.rb:18:in `<main>'
/usr/local/bundle/gems/bootsnap-1.4.6/lib/bootsnap/load_path_cache/core_ext/kernel_require.rb:23:in `require'
/usr/local/bundle/gems/bootsnap-1.4.6/lib/bootsnap/load_path_cache/core_ext/kernel_require.rb:23:in `block in require_with_bootsnap_lfi'
/usr/local/bundle/gems/bootsnap-1.4.6/lib/bootsnap/load_path_cache/loaded_features_index.rb:92:in `register'
/usr/local/bundle/gems/bootsnap-1.4.6/lib/bootsnap/load_path_cache/core_ext/kernel_require.rb:22:in `require_with_bootsnap_lfi'
/usr/local/bundle/gems/bootsnap-1.4.6/lib/bootsnap/load_path_cache/core_ext/kernel_require.rb:31:in `require'
/usr/local/bundle/gems/activesupport-6.0.3.1/lib/active_support/dependencies.rb:324:in `block in require'
/usr/local/bundle/gems/activesupport-6.0.3.1/lib/active_support/dependencies.rb:291:in `load_dependency'
/usr/local/bundle/gems/activesupport-6.0.3.1/lib/active_support/dependencies.rb:324:in `require'
bin/rails:4:in `<main>'
Related
I ahve a react app which I've deployed into a docker container, from the image nginx:alpine.
When I access the URL, I don't get anything when doing docker logs -f nginx for some reason. (basically, I think nginx doesn't catch any request) and the browser says "Unable to connect".
This is my first deployment so I might be doing something wrong :)
Here's the nginx entry from docker-compose
nginx:
container_name: best-nginx
build:
context: .
restart: always
image: nginx:alpine
volumes:
- ./nginx/default.conf:/etc/nginx/default.conf
- ./certs:/etc/nginx/certs
ports:
- "443:443"
default.conf
server {
root /usr/share/nginx/html;
index index.html index.htm index.nginx-debian.html;
server_name myservername.com;
location / {
try_files $uri $uri/ =404;
}
location /keycloak {
proxy_pass http://localhost:28080/;
}
listen [::]:443 ssl ipv6only=on; # managed by Certbot
listen 443 ssl; # managed by Certbot
ssl_certificate /etc/nginx/certs/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/nginx/certs/privkey.pem; # managed by Certbot
}
Dockerfile
# develop stage
FROM node:18-alpine as develop-stage
WORKDIR /app
COPY package*.json ./
COPY tsconfig.json ./
RUN npm install
COPY ./public ./public
COPY ./src ./src
# build stage
FROM develop-stage as build-stage
RUN npm run build
# production stage
FROM nginx:1.23.1-alpine as production-stage
COPY --from=build-stage /app/build /usr/share/nginx/html
CMD ["nginx", "-g", "daemon off;"]
docker logs -f nginx
/docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration
/docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/
/docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh
10-listen-on-ipv6-by-default.sh: info: Getting the checksum of /etc/nginx/conf.d/default.conf
10-listen-on-ipv6-by-default.sh: info: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf
/docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh
/docker-entrypoint.sh: Launching /docker-entrypoint.d/30-tune-worker-processes.sh
/docker-entrypoint.sh: Configuration complete; ready for start up
2022/10/07 09:56:33 [notice] 1#1: using the "epoll" event method
2022/10/07 09:56:33 [notice] 1#1: nginx/1.23.1
2022/10/07 09:56:33 [notice] 1#1: built by gcc 11.2.1 20220219 (Alpine 11.2.1_git20220219)
2022/10/07 09:56:33 [notice] 1#1: OS: Linux 5.15.0-48-generic
2022/10/07 09:56:33 [notice] 1#1: getrlimit(RLIMIT_NOFILE): 1048576:1048576
2022/10/07 09:56:33 [notice] 1#1: start worker processes
2022/10/07 09:56:33 [notice] 1#1: start worker process 32
why does my nginx not catch any request and how can I fix it?
Thanks.
config mounted to wrong path should be under /etc/nginx/conf.d/
- ./nginx/default.conf:/etc/nginx/conf.d/default.conf
I am trying to deploy an Angular Application in Heroku with Nginx as reverse proxy (two containers/dynos).
The web container (Nginx) crashes as it can’t find the worker container (Node/Express with SSR):
BTW I'm also using dockerfiles and Gitlab CI/CD for the pipeline.
I'm getting the following error:
2021-12-17T18:23:30.617790+00:00 heroku[web.1]: Process exited with status 1
2021-12-17T18:23:30.696851+00:00 heroku[web.1]: State changed from starting to crashed
2021-12-17T18:23:30.704189+00:00 heroku[web.1]: State changed from crashed to starting
2021-12-17T18:23:30.486129+00:00 app[web.1]: 2021/12/17 18:23:30 [emerg] 3#3: host not found in upstream "worker" in /etc/nginx/nginx.conf:9
2021-12-17T18:23:30.486148+00:00 app[web.1]: nginx: [emerg] host not found in upstream "worker" in /etc/nginx/nginx.conf:9
My configuration is:
gitlab-ci.yml
build:
image: trion/ng-cli:11.2.12
stage: build
script:
- npm install
- npm audit fix
- npm run-script build:ssr
artifacts:
paths:
- dist
only:
refs:
- tags
- master
deploy:
image: docker:20.10.7
stage: deploy
services:
- docker:20.10.7-dind
before_script:
# Install Heroku CLI
- apk update
- apk add bash
- apk add curl
- apk add --update nodejs npm
- curl https://cli-assets.heroku.com/install.sh | sh
- export HEROKU_API_KEY=$HEROKU_API_KEY
script:
- heroku container:login
- heroku container:push --recursive --app=$HEROKU_APP_PRODUCTION
- heroku container:release web worker --app=$HEROKU_APP_PRODUCTION
dependencies:
- build
only:
refs:
- tags
- master
Dockerfile.worker
FROM node:14-alpine3.12
WORKDIR /usr/src/app/
RUN mkdir -p /usr/src/app/dist/usermetrics/browser
COPY dist/usermetrics/browser/index.html /usr/src/app/dist/usermetrics/browser/index.html
COPY dist/usermetrics/server /usr/src/app
EXPOSE $PORT
CMD node main.js
Dockerfile.web
FROM nginx:1.17.8-alpine
COPY nginx.conf /etc/nginx/nginx.conf
WORKDIR /usr/share/nginx/html/
COPY dist/usermetrics/browser .
nginx.conf
worker_processes auto;
events {
worker_connections 8192;
}
http {
upstream nodeserver_upstream {
server worker; <---- here!
}
server {
listen ${PORT};
server_name localhost;
root /usr/share/nginx/html;
index index.html index.htm;
include /etc/nginx/mime.types;
# compression
gzip on;
gzip_min_length 1000;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain text/css application/json application/javascript application/x-javascript text/xml application/xml application/xml+rss text/javascript;
# # angular index.html location
location / {
try_files $uri $uri/ /index.html;
}
# # potential reverse proxy for sending api calls
location / {
proxy_set_header Host $host;
proxy_set_header Content-Type application/json;
proxy_set_header X-Real-IP $remote_addr;
proxy_pass http://nodeserver_upstream/;
}
location / {
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
}
}
Pipeline finishes without any errors and I can see both dynos: web and worker up, but with that error in the log.
Am I missing something? how to properly set nginx.conf to redirect the traffic to the Express server in the other dyno?
By default, dynos are isolated and cannot reach each other. Additionally, worker dynos are not intended to be reachable directly by other dynos. Only web dynos can receive traffic from routers.
To route between your dynos like this, you would need to use private spaces and separate apps.
See Networking documentation and Runtime documentation for more information on networking between your dynos.
If you want to support an interface which is similar to docker-compose or Docker Swarm, where each container is automatically networked together, see this answer which explains a workaround.
I am trying to get nginx to proxy a subdomain to another docker container or return an error if the container is not available.
My issue is that nginx seems to check if the server is available and refuses to start if it is not.
I tried using error_page like described here but it didn't change anything about the issue.
The following configuration works if both containers are up and running:
server {
server_name localhost;
location / {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
return 501;
}
}
server {
server_name *.ref.localhost ref.localhost;
location / {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_intercept_errors on;
proxy_pass http://reference:5000;
}
}
When I kill the reference container after nginx is running, I get a 502 error, but when I try to start the nginx container without the reference container running, nginx crashes with the following error message:
Use 'docker scan' to run Snyk tests against images to find vulnerabilities and learn how to fix them
Recreating nginx ... done
Attaching to nginx
nginx | /docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration
nginx | /docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/
nginx | /docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh
nginx | 10-listen-on-ipv6-by-default.sh: info: Getting the checksum of /etc/nginx/conf.d/default.conf
nginx | 10-listen-on-ipv6-by-default.sh: info: /etc/nginx/conf.d/default.conf differs from the packaged version
nginx | /docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh
nginx | /docker-entrypoint.sh: Launching /docker-entrypoint.d/30-tune-worker-processes.sh
nginx | /docker-entrypoint.sh: Configuration complete; ready for start up
nginx | 2021/05/15 12:55:51 [emerg] 1#1: host not found in upstream "reference" in /etc/nginx/conf.d/default.conf:25
nginx | nginx: [emerg] host not found in upstream "reference" in /etc/nginx/conf.d/default.conf:25
nginx | 2021/05/15 12:55:53 [emerg] 1#1: host not found in upstream "reference" in /etc/nginx/conf.d/default.conf:25
nginx | nginx: [emerg] host not found in upstream "reference" in /etc/nginx/conf.d/default.conf:25
nginx exited with code 1
Both nginx and reference are running with docker-compose:
version: "3.8"
services:
nginx:
container_name: nginx
restart: always
build:
context: ./nginx
ports:
- "80:80"
reference:
container_name: reference
build:
context: ./app
nginx/Dockerfile
# pull the Node.js Docker image
FROM node:alpine
# create the directory inside the container
WORKDIR /usr/src/app
# copy the package.json files from local machine to the workdir in container
COPY package*.json ./
# run npm install in our local machine
RUN npm install
# copy the generated modules and all other files to the container
COPY . .
# our app is running on port 5000 within the container, so need to expose it
EXPOSE 5000
# the command that starts our app
CMD ["node", "index.js"]
reference/Dockerfile:
# pull the Node.js Docker image
FROM node:alpine
# create the directory inside the container
WORKDIR /usr/src/app
# copy the package.json files from local machine to the workdir in container
COPY package*.json ./
# run npm install in our local machine
RUN npm install
# copy the generated modules and all other files to the container
COPY . .
# our app is running on port 5000 within the container, so need to expose it
EXPOSE 5000
# the command that starts our app
CMD ["node", "index.js"]
I'm trying to run a platform building on Django, Docker, Nginx and Gunicorn from my Ubuntu server.
Before to ask you, i'm reading about my problem and i did on my nginx.conf:
location / {
proxy_read_timeout 300s;
proxy_connect_timeout 75s;
...
}
Then, on my Gunicorn settings:
CMD ["gunicorn", "--bind", "0.0.0.0:8000", "-t 90", "config.wsgi:application"]
The problem persists and server always returns: 502 Bad Gateway. When i try to access to:
http://34.69.240.210:8000/admin/
From browser, the server redirect to
http://34.69.240.210:8000/admin/login/?next=/admin/
But show the error:
My Dockerfile:
FROM python:3.8
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
RUN mkdir /code
WORKDIR /code
COPY . /code/
RUN pip install -r requirements.txt
CMD ["gunicorn", "--bind", "0.0.0.0:8000", "-t 90", "config.wsgi:application"]
My docker-compose.yml:
version: "3.8"
services:
django_app:
build: .
volumes:
- static:/code/static
- .:/code
nginx:
image: nginx:1.15
ports:
- 8000:8000
volumes:
- ./config/nginx/conf.d:/etc/nginx/conf.d
- static:/code/static
depends_on:
- django_app
volumes:
.:
static:
My Nginx file:
upstream django_server {
server django_app:8000 fail_timeout=0;
}
server {
listen 8000;
server_name 34.69.240.210;
keepalive_timeout 5;
client_max_body_size 4G;
location / {
proxy_read_timeout 300s;
proxy_connect_timeout 75s;
proxy_pass http://django_server;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_redirect off;
}
}
Any idea what can i do to fix it?
Thank you.
Well, for the record.
My problem was the database connection. My Docker container couldn't connect to potsgres local database.
So, i added this line to postgresql.conf:
listen_addresses = '*'
Then, i added this line to pg_hba.conf
host all all 0.0.0.0/0 md5
Restart Postgres:
sudo service postgresql restart
My host postgres ip:
172.17.0.1
Test from inside docker:
psql -U myuser -d databasename -h 172.17.0.1 -W
Done! :)
I found it very strange, I had setup an rails app, posgres db, and a nginx server for production only but the ngix only able to start if I type
docker-compose -f docker-compose.yml -f production.yml up --build
but not the pre-build
docker-compose -f docker-compose.yml -f production.yml build
then
docker-compose up
the rails app and db is starting just fine is just that nginx is not started and the port is revert back to port 3000 instead of 80 which i found very strange isn't they doing the same thing?
nginx.conf
# This is a template. Referenced variables (e.g. $INSTALL_PATH) need
# to be rewritten with real values in order for this file to work.
upstream rails_app {
server unix:///webapp/tmp/sockets/puma.sock;
}
server {
listen 80;
# define your domain
server_name 127.0.0.1 localhost www.example.com;
# define the public application root
root /providre_api/public;
# define where Nginx should write its logs
access_log /providre_api/log/nginx.access.log;
error_log /providre_api/log/nginx.error.log;
# deny requests for files that should never be accessed
location ~ /\. {
deny all;
}
location ~* ^.+\.(rb|log)$ {\
deny all;
}
# serve static (compiled) assets directly if they exist (for rails production)
location ~ ^/(assets|images|javascripts|stylesheets|swfs|system)/ {
try_files $uri #rails;
access_log off;
gzip_static on; # to serve pre-gzipped version
expires max;
add_header Cache-Control public;
# Some browsers still send conditional-GET requests if there's a
# Last-Modified header or an ETag header even if they haven't
# reached the expiry date sent in the Expires header.
add_header Last-Modified "";
add_header ETag "";
break;
}
# send non-static file requests to the app server
location / {
try_files $uri #rails;
}
location #rails {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_pass http://rails_app;
}
}
web.Dockerfile
# Base image:
FROM nginx
# Install dependencies
RUN apt-get update -qq && apt-get -y install apache2-utils
# establish where Nginx should look for files
ENV INSTALL_PATH /providre_api
# Set our working directory inside the image
WORKDIR $INSTALL_PATH
# create log directory
RUN mkdir log
# copy over static assets
COPY public public/
# Copy Nginx config template
COPY docker/web/nginx.conf /tmp/docker.nginx
# substitute variable references in the Nginx config template for real values from the environment
# put the final config in its place
RUN envsubst '$INSTALL_PATH' < /tmp/docker.nginx > /etc/nginx/conf.d/default.conf
EXPOSE 80
# Use the "exec" form of CMD so Nginx shuts down gracefully on SIGTERM (i.e. `docker stop`)
CMD [ "nginx", "-g", "daemon off;" ]
docker-compose.yml
version: '3'
services:
db:
image: postgres
volumes:
- ./tmp/db:/var/lib/postgresql/data
restart: always
ports:
- "5433:5432"
environment:
POSTGRES_USER: 'postgres'
POSTGRES_PASSWORD: ''
app:
command: bundle exec puma -C config/puma.rb
ports:
- "3000"
depends_on:
- db
docker-compose.override.yml
version: '3'
services:
app:
build:
context: .
dockerfile: ./docker/app/Dockerfile
volumes:
- .:/providre_api
ports:
- "3000:3000"
production.yml
version: '3'
services:
app:
build:
context: .
dockerfile: ./docker/app/prod.Dockerfile
volumes:
- .:/providre_api
ports:
- "3000"
nginx:
container_name: web
build:
context: .
dockerfile: ./docker/web/web.Dockerfile
depends_on:
- app
volumes:
- ./docker/web/nginx.conf:/etc/nginx/conf.d/default.conf
ports:
- 80:80
Sorry my bad I didn't test it fully so the docker-compose up will used the normal docker-compose.yml so I have to repeat and use docker-compose -f docker-compose.yml -f production.yml up instead