Docker Compose Host Network Access and Reverse Proxy - docker

I have a situation where I have created a microservice that can change the network settings of the cat5 ethernet ports on the running server. To do this it needs to be set in network_mode: host mode. The microservice exposes a HTTP rest api that I would like to have behind my nginx reverse proxy, but since it uses a bridge network, I cannot seem to access to the network_utilities(see the docker-compose file below) service. Any suggestions on how to make this work?
Here is my condensed docker-compose file:
version: '3.3'
services:
nginx:
image: nginx:stable
container_name: production_nginx
restart: unless-stopped
ports:
- 80:80
depends_on:
- smart-mobile-server
- network_utilities
volumes:
- ./config/front-end-gui:/var/www/html
- ./config/nginx:/etc/nginx
networks:
- smart-mobile-loopback
smart-mobile-server:
container_name: smart-mobile-rest-api
image: smartmobile_server:latest
build:
context: .
dockerfile: main.Dockerfile
environment:
NODE_ENV: production
command: sh -c "pm2 start --env production && pm2 logs all"
depends_on:
- 'postgres'
restart: unless-stopped
networks:
- smart-mobile-loopback
volumes:
- ~/server:/usr/app/dist/express-server/uploads
- ~/server/logs:/usr/app/logs
network_utilities:
image: smartgear/network-utilities-service:latest
network_mode: host
environment:
NODE_ENV: production
REST_API_PORT: '64000'
privileged: true
networks:
smart-mobile-loopback:
driver: bridge
nginx.conf
worker_processes 2;
events {
# Connections per worker process
worker_connections 1024;
# Turning epolling on is a handy tuning mechanism to use more efficient connection handling models.
use epoll;
# We turn off accept_mutex for speed, because we don’t mind the wasted resources at low connection request counts.
accept_mutex off;
}
http {
upstream main_server {
# least_conn Specifies that a group should use a load balancing method where a request is
# passed to the server with the least number of active connections,
# taking into account weights of servers. If there are several such
# servers, they are tried in turn using a weighted round-robin balancing method.
ip_hash;
# These are references to our backend containers, facilitated by
# Compose, as defined in docker-compose.yml
server smart-mobile-server:10010;
}
upstream network_utilities {
least_conn;
server 127.0.0.1:64000;
}
server {
# GZIP SETTINGS FOR LARGE FILES
gzip on;
gzip_http_version 1.0;
gzip_comp_level 6;
gzip_min_length 0;
gzip_buffers 16 8k;
gzip_proxied any;
gzip_types text/plain text/css text/xml text/javascript application/xml application/xml+rss application/javascript application/json;
gzip_disable "MSIE [1-6]\.";
gzip_vary on;
include /etc/nginx/mime.types;
## SECURITY SETTINGS
# don't send the nginx version number in error pages and Server header
server_tokens off;
# when serving user-supplied content, include a X-Content-Type-Options: nosniff header along with the Content-Type: header,
# to disable content-type sniffing on some browsers.
add_header X-Content-Type-Options nosniff;
listen 80;
location / {
# This would be the directory where your React app's static files are stored at
root /var/www/html/;
index index.html;
try_files $uri /index.html;
}
location /api/documentation/network-utilities {
proxy_pass http://network_utilities/api/documentation/network-utilities;
proxy_set_header Host $host;
}
location /api/v1/network-utilities/ {
proxy_pass http://network_utilities/;
proxy_set_header Host $host;
}
location /api/ {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $host;
proxy_pass http://main_server/api/;
}
}
}

Related

Add Additonal Docker Containers Behind NGINX Reverse Proxy

I have a Docker compose file running an application that utilizes NGIX as a reverse proxy. The proxy is running on HTTPS for STIG Manager and Keycloak but the additional container I wish to add is running on a different port that is non-HTTPS.
#1 I want to add additional docker containers behind the proxy.
#2 I want to call the app using a DNS name.
Environment: (The server hosting docker)
gsil-docker1.gsil.mil
Compose File:
version: '3.7'
services:
nginx:
# image: nginx:1.23.1
# alternative image from Ironbank
image: registry1.dso.mil/ironbank/opensource/nginx/nginx:1.23.1
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf
- ./certs/localhost/localhost.crt:/etc/nginx/cert.pem
- ./certs/localhost/localhost.key:/etc/nginx/privkey.pem
- ./certs/dod/Certificates_PKCS7_v5.9_DoD.pem.pem:/etc/nginx/dod-certs.pem
- ./nginx/index.html:/usr/share/nginx/html/index.html
ports:
- "443:443"
keycloak:
# image: quay.io/keycloak/keycloak:19.0.2
# alternative image from Ironbank
image: registry1.dso.mil/ironbank/opensource/keycloak/keycloak:19.0.2
environment:
- KEYCLOAK_ADMIN=admin
- KEYCLOAK_ADMIN_PASSWORD=Pa55w0rd
- KC_PROXY=edge
- KC_HOSTNAME_URL=https://localhost/kc/
- KC_HOSTNAME_ADMIN_URL=https://localhost/kc/
- KC_SPI_X509CERT_LOOKUP_PROVIDER=nginx
- KC_SPI_X509CERT_LOOKUP_NGINX_SSL_CLIENT_CERT=SSL-CLIENT-CERT
- KC_SPI_TRUSTSTORE_FILE_FILE=/tmp/truststore.p12
- KC_SPI_TRUSTSTORE_FILE_PASSWORD=password
command: start --import-realm
volumes:
- ./certs/dod/Certificates_PKCS7_v5.9_DoD.pem.p12:/tmp/truststore.p12
- ./kc/stigman_realm.json:/opt/keycloak/data/import/stigman_realm.json
- ./kc/create-x509-user.jar:/opt/keycloak/providers/create-x509-user.jar
# uncomment below to persist Keycloak data
# - ./kc/h2:/opt/keycloak/data/h2
stigman:
# image: nuwcdivnpt/stig-manager:1.2.20
# alternative image based on Ironbank Node.js
image: nuwcdivnpt/stig-manager:latest-ironbank
environment:
- STIGMAN_OIDC_PROVIDER=http://keycloak:8080/realms/stigman
- STIGMAN_CLIENT_OIDC_PROVIDER=https://localhost/kc/realms/stigman
- STIGMAN_CLASSIFICATION=U
- STIGMAN_DB_HOST=mysql
- STIGMAN_DB_USER=stigman
- STIGMAN_DB_PASSWORD=stigmanpw
# uncomment below to fetch current STIG library from DISA and import it
# - STIGMAN_INIT_IMPORT_STIGS=true
init: true
mysql:
# image: mysql:8.0.21
# alternative image from Ironbank
image: registry1.dso.mil/ironbank/opensource/mysql/mysql8:8.0.31
environment:
- MYSQL_ROOT_PASSWORD=rootpw
- MYSQL_USER=stigman
- MYSQL_DATABASE=stigman
- MYSQL_PASSWORD=stigmanpw
# uncomment below to persist MySQL data
volumes:
- ./mysql-data:/var/lib/mysql
Nginx Config:
events {
worker_connections 4096; ## Default: 1024
}
pid /var/cache/nginx/nginx.pid;
http {
server {
listen 443 ssl;
server_name localhost;
root /usr/share/nginx/html;
client_max_body_size 100M;
ssl_certificate /etc/nginx/cert.pem;
ssl_certificate_key /etc/nginx/privkey.pem;
ssl_prefer_server_ciphers on;
ssl_client_certificate /etc/nginx/dod-certs.pem;
ssl_verify_client optional;
ssl_verify_depth 4;
error_log /var/log/nginx/error.log debug;
if ($return_unauthorized) { return 496; }
location / {
autoindex on;
ssi on;
}
location /stigman/ {
proxy_pass http://stigman:54000/;
}
location /kc/ {
proxy_pass http://keycloak:8080/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header ssl-client-cert $ssl_client_escaped_cert;
proxy_buffer_size 128k;
proxy_buffers 4 256k;
proxy_busy_buffers_size 256k;
}
}
# define which endpoints require mTLS
map_hash_bucket_size 128;
map $uri $secured_url {
default false;
"/kc/realms/stigman/protocol/openid-connect/auth" true;
}
map "$secured_url:$ssl_client_verify" $return_unauthorized {
default 0;
"true:FAILED" 1;
"true:NONE" 1;
"true:" 1;
}
}
I have tried adding settings to my docker-compose and nginx but I was unable to make it work.
docker-compose addition:
networks:
default:
name: grafana_default
external: true
nginx addtion:
server {
listen 80;
server_name grafana.gsil.mil;
location / {
proxy_pass http://grafana.gsil.smil:3000/;
}
}
Additionally, I have created a CNAME DNS entry for grafana.gsil.mil and pointed it to gsil-docker1.gsil.mil
The containers app are all running and I can reach all of them respectively by going to:
gsil-docker1.gsil.mil/stigman
gsil-docker1.gsil.mil/kc
gsil-docker1.gsil.mil:3000
The docker-compose file for grafana:
version: '3.0'
volumes:
grafana-data:
services:
grafana:
container_name: grafana
image: registry1.dso.mil/ironbank/opensource/grafana/grafana:9.3.2
environment:
- grafana.config
restart: always
volumes:
- grafana-data:/var/lib/grafana
ports:
- 3000:3000/tcp
I have done a lot of searching but examples I found tended to show http on nginx with http backend apps. I was struggling to find something that would help pull this all together. Can you have an https proxy with a http backend app or do I need to create certs and make all my backend apps run https?
The issue was simple to fix. I needed to add port 80 to my nginx config in my docker-compose file. NGINX cannot proxy http traffic when listening on https only (so add http).
version: '3.7'
services:
nginx:
ports:
- "443:443"
- "80:80"
My presumptions about these specific items were all correct:
-making docker aware of external networks (when the container you want to add/proxy is not part of the same network)
networks:
default:
name: grafana_default
external: true
-adding DNS CNAME entries was correct.
I have created a CNAME DNS entry for grafana.gsil.mil and pointed it to gsil-docker1.gsil.mil
-the appropriate lines had to be added to nginx.conf for each additional container that you need to add.
server {
listen 80;
server_name grafana.gsil.mil;
location / {
proxy_pass http://grafana.gsil.smil:3000/;
}
}

Issue with Docker-compose and nginx

Please assist
I'm trying to run both react.js and nest.js on http://localhost:3000 with docker-compose and Nginx, however
my react.js app isn't binding correctly. When I visit the link, I only see the nginx welcome page.
This is my docker-compose.yaml
version: "3.6"
services:
database:
image: postgres:13.1-alpine
env_file:
- ./database/.env
volumes:
- "db-data:/var/lib/postgresql/data"
networks:
- challenge
ports:
- "5432:5432"
backend:
build:
context: $PWD/../../backend
dockerfile: $PWD/backend/Dockerfile
volumes:
- ./backend/.env:/app/.env
- ../../backend/src:/app/src
- storage:/app/storage
ports:
- 3000
networks:
- challenge
depends_on:
- database
env_file:
- ./backend/.env
environment:
- FORCE_COLOR=1
frontend:
build:
context: $PWD/../../web
dockerfile: $PWD/frontend/Dockerfile
ports:
- 3001
networks:
- challenge
depends_on:
- backend
env_file:
- ./frontend/.env
volumes:
- ../../web/src:/app/frontend/src
nginx:
image: nginx
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro
ports:
- 3000:80
depends_on:
- backend
- frontend
networks:
- challenge
volumes:
db-data:
storage:
networks:
challenge:
And this is my Dockerfile for the React
FROM node:14.18.1-alpine3.14 as build
WORKDIR /app/frontend
COPY package.json /app/frontend/
COPY yarn.lock /app/frontend/
RUN yarn install --frozen-lockfile
COPY . /app/frontend
RUN yarn run build
FROM nginx:1.21.3-alpine
COPY --from=build /app/frontend/build /usr/share/nginx/html
EXPOSE 80
CMD ["nginx", "-g", "daemon off;"]
Also, this is my nginx.conf
events {
worker_connections 1024;
}
http {
client_max_body_size 1000M;
server {
gzip on;
gzip_disable "msie6";
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
listen 80;
server_name localhost;
location /api/v1 {
proxy_pass http://backend:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_cache_bypass $http_upgrade;
}
location / {
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}
}
Please assist as I've tried changing the ports, and exposing different ports but that doesn't seem to work.
You have two nginx servers: one inside frontend service and another one inside nginx service. Is it intended?
I suppose you need to access the nginx service provided by frontend so in that case you need to go to:
http://localhost:3001
and map 3001:80 because 80 is the exposed port in that image.

Nginx Reverse Proxy on Docker - How to connect to other application?

I have Joplin running in a docker container on my NAS using docker compose. Now I want to setup a reverse proxy in order to make it accessible via my personal domain.
The joplin/docker-compose.yml file looks as follows:
version: '3'
services:
db:
image: postgres:13.1
volumes:
- /local/joplin:/var/lib/postgresql/data
restart: unless-stopped
environment:
- APP_PORT=22300
- POSTGRES_PASSWORD=********
- POSTGRES_USER=user
- POSTGRES_DB=database
app:
image: joplin/server:2.2.10
depends_on:
- db
ports:
- "22300:22300"
restart: unless-stopped
environment:
- APP_PORT=22300
- APP_BASE_URL=http://192.168.1.2:22300/
- DB_CLIENT=pg
- POSTGRES_PASSWORD=********
- POSTGRES_DATABASE=database
- POSTGRES_USER=user
- POSTGRES_PORT=5432
- POSTGRES_HOST=db
The nginx/docker-compose.yml file looks like this:
version: '3'
services:
nginx:
image: nginx:latest
ports:
- 8080:80
volumes:
- /local/nginx/nginx.conf:/etc/nginx/nginx.conf
- /local/nginx/sites-enabled:/etc/nginx/sites-enabled
I used the default for my /local/nginx/nginx.conf. It is as follows. :
user www-data;
worker_processes auto;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;
events {
worker_connections 768;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
Furthermore, inside the /local/nginx/sites-enabled/ folder I created the following files:
/local/nginx/sites-enabled/example.org,
/local/nginx/sites-enabled/my.example.org.
The content of /local/nginx/sites-enabled/example.org is:
##
# example.org -- Configuration
server {
listen 80;
listen [::]:80;
root /var/www/html;
# Add index.php to the list if you are using PHP
index index.html index.htm index.nginx-debian.html;
server_name example.org;
}
The content of /local/nginx/sites-enabled/my.example.org is:
##
# my.example.org -- Configuration
server {
listen 80;
listen [::]:80;
server_name my.example.org;
location / {
proxy_pass http://192.168.1.2:22300/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Protocol $scheme;
proxy_set_header X-Forwarded-Host $http_host;
}
}
I set up portforwarding on my router to the nginx container and it works. (I see the 404 screen of nginx when I go to http://example.org) However, I struggle to set up the reverse proxy for the joplin container. When I try to access http://my.example.org, I get a 510 error message. What am I doing wrong?
The weird thing is, when I replace http://192.168.1.2:22300/ with the ip of my personal pc running a test webpage, I can access it via http://my.example.org. Even when I setup Joplin on my pc it works. Something seems to be wrong with either my nginx or docker setup.
After lots of debugging and googling around I finally found the solution. What one has to do is the following:
Setup nginx with a network inside nginx/docker-compose.yml:
version: '3'
services:
nginx:
image: nginx:latest
ports:
- 8080:80
volumes:
- /local/nginx/nginx.conf:/etc/nginx/nginx.conf
- /local/nginx/sites-enabled:/etc/nginx/sites-enabled
#----------------------------------------
# Setup network my_net
#----------------------------------------
networks:
- my_net
networks:
my_net:
driver: bridge
Make Joplin use the network defined for nginx, and add an extra one for communication to the database. (Plus, setup a name for the Joplin container.)
version: '3'
services:
db:
image: postgres:13.1
#----------------------------------------
# Setup communication to Joplin server
#----------------------------------------
container_name: database
networks:
- joplin_net
#----------------------------------------
volumes:
- /local/joplin:/var/lib/postgresql/data
restart: unless-stopped
environment:
- APP_PORT=22300
- POSTGRES_PASSWORD=********
- POSTGRES_USER=user
- POSTGRES_DB=database
app:
image: joplin/server:2.2.10
#----------------------------------------
# Setup communication to Postgres server
# and nginx
#----------------------------------------
container_name: joplin # This will be the name used by nginx.
networks:
- joplin_net
- nginx_my_net
#----------------------------------------
depends_on:
- db
ports:
- "22300:22300"
restart: unless-stopped
environment:
- APP_PORT=22300
- APP_BASE_URL=http://192.168.1.2:22300/
- DB_CLIENT=pg
- POSTGRES_PASSWORD=********
- POSTGRES_DATABASE=database
- POSTGRES_USER=user
- POSTGRES_PORT=5432
- POSTGRES_HOST=db
#----------------------------------------
# You can replace joplin_net with any
# name you like. However, the name for
# nginx_my_net has to be:
# app folder + '_' + network name
# The nginx application is in the nginx
# folder, therefore the prefix has to be
# 'nginx_'. The network name is 'my_net',
# so this has to be the suffix.
#----------------------------------------
networks:
joplin_net:
driver: bridge
nginx_my_net:
external: true
The /local/nginx/sites-enabled/my.example.org file has to be amended:
##
# my.example.org -- Configuration
server {
listen 80;
listen [::]:80;
server_name my.example.org;
# The next line make nginx use the docker DNS
# to find the Joplin container by its name
# ('joplin').
resolver 127.0.0.11 valid=30;
location / {
# The server name used here has to be the
# one defined using 'container_name' in the
# docker-compose.yml for the application we
# want to proxy to.
proxy_pass http://joplin:22300/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Protocol $scheme;
proxy_set_header X-Forwarded-Host $http_host;
}
}
Hope this saves someone a couple of days of head-scratching.

jwilder/nginx-proxy: no access to virtual host

I have a NAS behind a router. On this NAS I want to run for testing Nextcloud and Seafile together. Everything should be set up with docker. The jwilder/nginx-proxy container does no work as expected and I cannot find helpful information. I feel I am missing something very basic.
What is working:
I have a noip.com DynDNS that points to my routers ip: blabla.ddns.net
The router forwards ports 22, 80 and 443 to my NAS at 192.168.1.11
A plain nginx server running on the NAS can be accessed via blabla.ddns.net, its docker-compose.yml is this:
version: '2'
services:
nginxnextcloud:
container_name: nginxnextcloud
image: nginx
restart: always
ports:
- "80:80"
networks:
- web
networks:
web:
external: true
What is not working:
The same nginxserver like above behind the nginx-proxy. I cannot access this server. Calling blabla.ddns.net gives a 503 error, calling nextcloud.blabla.ddns.net gives "page not found". Viewing the logs of the nginx-proxy via docker logs -f nginxproxy logs every test with blabla.ddns.net and shows its 503 answer, but when I try to access nextcloud.blabla.ddns.net not even a log entry occurs.
This is the docker-compose.yml for one nginx behind a nginx-proxy:
version: '2'
services:
nginxnextcloud:
container_name: nginxnextcloud
image: nginx
restart: always
expose:
- 80
networks:
- web
environment:
- VIRTUAL_HOST=nextcloud.blabla.ddns.net
nginx-proxy:
image: jwilder/nginx-proxy
container_name: nginxproxy
ports:
- "80:80"
volumes:
- /var/run/docker.sock:/tmp/docker.sock
networks:
- web
networks:
web:
external: true
The generated configuration file for nginx-proxy /etc/nginx/conf.d/default.conf contains entries for my test server:
# If we receive X-Forwarded-Proto, pass it through; otherwise, pass along the
# scheme used to connect to this server
map $http_x_forwarded_proto $proxy_x_forwarded_proto {
default $http_x_forwarded_proto;
'' $scheme;
}
# If we receive X-Forwarded-Port, pass it through; otherwise, pass along the
# server port the client connected to
map $http_x_forwarded_port $proxy_x_forwarded_port {
default $http_x_forwarded_port;
'' $server_port;
}
# If we receive Upgrade, set Connection to "upgrade"; otherwise, delete any
# Connection header that may have been passed to this server
map $http_upgrade $proxy_connection {
default upgrade;
'' close;
}
# Apply fix for very long server names
server_names_hash_bucket_size 128;
# Default dhparam
ssl_dhparam /etc/nginx/dhparam/dhparam.pem;
# Set appropriate X-Forwarded-Ssl header
map $scheme $proxy_x_forwarded_ssl {
default off;
https on;
}
gzip_types text/plain text/css application/javascript application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
log_format vhost '$host $remote_addr - $remote_user [$time_local] '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent"';
access_log off;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384';
ssl_prefer_server_ciphers off;
resolver 127.0.0.11;
# HTTP 1.1 support
proxy_http_version 1.1;
proxy_buffering off;
proxy_set_header Host $http_host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $proxy_connection;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $proxy_x_forwarded_proto;
proxy_set_header X-Forwarded-Ssl $proxy_x_forwarded_ssl;
proxy_set_header X-Forwarded-Port $proxy_x_forwarded_port;
# Mitigate httpoxy attack (see README for details)
proxy_set_header Proxy "";
server {
server_name _; # This is just an invalid value which will never trigger on a real hostname.
listen 80;
access_log /var/log/nginx/access.log vhost;
return 503;
}
# nextcloud.blabla.ddns.net
upstream nextcloud.blabla.ddns.net {
## Can be connected with "web" network
# nginxnextcloud
server 172.22.0.2:80;
}
server {
server_name nextcloud.blabla.ddns.net;
listen 80 ;
access_log /var/log/nginx/access.log vhost;
location / {
proxy_pass http://nextcloud.blabla.ddns.net;
}
}
Why is this minimal example not working?

Docker Nginx Reverse Proxy for Protection of Docker Container

I have two docker services (an angular web-app and a tomcat backend), which I want to protect with a third docker service, which is an nginx configured as a reverse-proxy. My proxy configuration is working, but I'm suffering with the basic authorization my reverse-proxy should also handle. When I protect my angular frontend service with basic auth via reverse-proxy config, everything works fine, but my backend is still exposed for everyone. When I add also basic auth to the backend service, I have the problem, that my basic auth configuration header from my frontend is not forwarded/added to the backend REST requests. Is it possible to configure the nginx reverse proxy to add the Authorization header to each request send by the frontend. Or maybe I'm thinking wrong and there is a better solution?
Here is my docker and nginx configuration:
reverse-proxy config:
worker_processes 1;
events { worker_connections 1024; }
http {
sendfile on;
upstream docker-nginx {
server frontend-nginx:80;
}
upstream docker-tomcat {
server backend-tomcat:8080;
}
map $upstream_http_docker_distribution_api_version $docker_distribution_api_version {
'' 'registry/2.0';
}
server {
listen 80;
location / {
auth_basic "Protected area";
auth_basic_user_file /etc/nginx/conf.d/nginx.htpasswd;
add_header 'Docker-Distribution-Api-Version' $docker_distribution_api_version always;
proxy_pass http://docker-nginx;
proxy_redirect off;
}
}
server {
listen 8080;
location / {
auth_basic "Protected area";
auth_basic_user_file /etc/nginx/conf.d/nginx.htpasswd;
add_header 'Docker-Distribution-Api-Version' $docker_distribution_api_version always;
proxy_pass http://docker-tomcat;
proxy_redirect off;
}
}
}
docker-compose (setting up all containers):
version: '2.4'
services:
reverse-proxy:
container_name: reverse-proxy
image: nginx:alpine
volumes:
- ./auth:/etc/nginx/conf.d
- ./auth/nginx.conf:/etc/nginx/nginx.conf:ro
ports:
- "80:80"
- "8080:8080"
restart: always
links:
- registry:registry
frontend-nginx:
container_name: frontend
build: './frontend'
volumes:
- /dockerdev/frontend/dist/:/usr/share/nginx/html
depends_on:
- reverse-proxy
- bentley-tomcat
restart: always
backend-tomcat:
container_name: backend
build: './backend'
volumes:
- /data:/data
depends_on:
- reverse-proxy
restart: always
registry:
image: registry:2
ports:
- 127.0.0.1:5000:5000
volumes:
- ./data:/var/lib/registry
frontend Dockerfile:
FROM nginx
COPY ./dist/ /usr/share/nginx/html
COPY ./fast-nginx-default.conf /etc/nginx/conf.d/default.conf
frontend config:
server {
listen 80;
sendfile on;
default_type application/octet-stream;
gzip on;
gzip_http_version 1.1;
gzip_disable "MSIE [1-6]\.";
gzip_min_length 256;
gzip_vary on;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain text/css application/json application/javascript application/x-javascript text/xml application/xml application/xml+rss text/javascript;
gzip_comp_level 9;
root /usr/share/nginx/html;
location / {
try_files $uri $uri/ /index.html =404;
}
}
backend Dockerfile:
FROM openjdk:11
RUN mkdir -p /usr/local/bin/tomcat
COPY ./backend-0.0.1-SNAPSHOT.jar /usr/local/bin/tomcat/backend-0.0.1-SNAPSHOT.jar
WORKDIR /usr/local/bin/tomcat
CMD ["java", "-jar", "backend-0.0.1-SNAPSHOT.jar"]
Try adding this directives to your location block
proxy_set_header Authorization $http_authorization;
proxy_pass_header Authorization;
I've solved my issue, by listining on port 80 for request with /api and redirected them to the tomcat on port 8080. For that I also had to adjust my front- and backend requests, now all my backend request begin with /api. By this solution I'm able to implement the basic auth on port 80 for protecting the front- and backend.
worker_processes 1;
events { worker_connections 1024; }
http {
sendfile on;
client_max_body_size 25M;
upstream docker-nginx {
server frontend-nginx:80;
}
upstream docker-tomcat {
server backend-tomcat:8080;
}
server {
listen 80;
location /api {
proxy_pass http://docker-tomcat;
}
location / {
auth_basic "Protected area";
auth_basic_user_file /etc/nginx/conf.d/nginx.htpasswd;
proxy_pass http://docker-nginx;
proxy_redirect off;
}
}
}

Resources