I'm having problem with using jwilder/nginx-proxy with cloudflare ssl (origin key, FULL type SSL).
Everything is working fine (in http) until I activate DNS Proxy of Cloudflare. With the server returning 521 (Web Server Down).
Here's my docker-compose.yaml
version: "2"
services:
nginx-proxy:
image: jwilder/nginx-proxy:alpine
ports:
- 80:80
volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro
- ./certs:/etc/nginx/certs
network_mode: bridge
saraswati-global:
image: asia.gcr.io/ordent-production/ordent/saraswati-global
ports:
- 3000:3000
environment:
- VIRTUAL_HOST=beta.saraswati.global
- VIRTUAL_PORT=3000
- VIRTUAL_PROTO=https
network_mode: bridge
api-healed-id:
image: asia.gcr.io/ordent-production/ordent/api.healed.id
ports:
- 4001:4001
environment:
- VIRTUAL_HOST=dev.healed.id
- VIRTUAL_PORT=4001
- VIRTUAL_PROTO=https
network_mode: bridge
Maybe you guys could help me with the configuration -
Here's the nginx configuration created by above config :
# If we receive X-Forwarded-Proto, pass it through; otherwise, pass along the
# scheme used to connect to this server
map $http_x_forwarded_proto $proxy_x_forwarded_proto {
default $http_x_forwarded_proto;
'' $scheme;
}
# If we receive X-Forwarded-Port, pass it through; otherwise, pass along the
# server port the client connected to
map $http_x_forwarded_port $proxy_x_forwarded_port {
default $http_x_forwarded_port;
'' $server_port;
}
# If we receive Upgrade, set Connection to "upgrade"; otherwise, delete any
# Connection header that may have been passed to this server
map $http_upgrade $proxy_connection {
default upgrade;
'' close;
}
# Apply fix for very long server names
server_names_hash_bucket_size 128;
# Default dhparam
ssl_dhparam /etc/nginx/dhparam/dhparam.pem;
# Set appropriate X-Forwarded-Ssl header
map $scheme $proxy_x_forwarded_ssl {
default off;
https on;
}
gzip_types text/plain text/css application/javascript application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
log_format vhost '$host $remote_addr - $remote_user [$time_local] '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent"';
access_log off;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:D
ssl_prefer_server_ciphers off;
resolver 172.26.0.2;
# HTTP 1.1 support
proxy_http_version 1.1;
proxy_buffering off;
proxy_set_header Host $http_host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $proxy_connection;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $proxy_x_forwarded_proto;
proxy_set_header X-Forwarded-Ssl $proxy_x_forwarded_ssl;
proxy_set_header X-Forwarded-Port $proxy_x_forwarded_port;
# Mitigate httpoxy attack (see README for details)
proxy_set_header Proxy "";
server {
server_name _; # This is just an invalid value which will never trigger on a real hostname.
listen 80;
access_log /var/log/nginx/access.log vhost;
return 503;
}
# beta.saraswati.global
upstream beta.saraswati.global {
## Can be connected with "bridge" network
# ordent-production-host_saraswati-global_1
server 172.17.0.3:3000;
}
server {
server_name beta.saraswati.global;
listen 80 ;
access_log /var/log/nginx/access.log vhost;
location / {
proxy_pass https://beta.saraswati.global;
}
}
# dev.healed.id
upstream dev.healed.id {
## Can be connected with "bridge" network
# ordent-production-host_api-healed-id_1
server 172.17.0.4:4001;
}
server {
server_name dev.healed.id;
listen 80 ;
access_log /var/log/nginx/access.log vhost;
location / {
proxy_pass https://dev.healed.id;
}
}
The issue is caused because of this when you are defining the nginx-proxy service:
ports:
- 80:80
As you enabled SSL on Cloudflare, the default port will be 443, not 80.
So the nginx-proxy needs to listen to 443 port and the correct way is:
ports:
- 443:443
Related
I am attempting to forward requests this way:
https://xxx.domain1.com -> http://localhost:3000
https://yyy.domain2.com -> http://localhost:3001
To make it easier to get nginx up and running, I'm using docker. Here is my Dockerfile:
version: '3.7'
services:
proxy:
image: nginx:alpine
container_name: proxy
ports:
- '443:443'
- '80:80'
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf
- ./.cert/cert.pem:/etc/nginx/.cert/cert.pem
- ./.cert/key.pem:/etc/nginx/.cert/key.pem
restart: 'unless-stopped'
networks:
- backend
networks:
backend:
driver: bridge
And here is my nginx.conf:
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
server {
listen 80;
server_name yyy.domain2.com;
chunked_transfer_encoding on;
location / {
proxy_pass http://localhost:3001/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
server {
listen 80;
server_name xxx.domain1.com;
chunked_transfer_encoding on;
location / {
proxy_pass http://localhost:3000/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
}
stream {
map $ssl_preread_server_name $name {
xxx.domain1.com backend;
yyy.domain2.com frontend;
}
upstream backend {
server localhost:3000;
}
upstream frontend {
server localhost:3001;
}
server {
listen 443;
listen [::]:443;
proxy_pass $name;
ssl_preread on;
ssl_certificate ./.cert/cert.pem;
ssl_certificate_key ./.cert/key.pem;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_ciphers "EECDH+ECDSA+AESGCM EECDH+aRSA+AESGCM EECDH+ECDSA+SHA384 EECDH+ECDSA+SHA256 EECDH+aRSA+SHA384 EECDH+aRSA+SHA256 EECDH+aRSA+RC4 EECDH EDH+aRSA RC4 !aNULL !eNULL !LOW !3DES !MD5 !EXP !PSK !SRP !DSS";
}
}
I can access my services locally if I just open http://localhost:3000/test and http://localhost:3001/test, no problem.
But if I attempt to access with https://xxx.domain1.com/test, it spins for a while and then fails with ERR_CONNECTION_TIMED_OUT.
What am I missing?
UPDATE: I tried setting up the nginx service with a host network, but same result so far. I tried:
services:
proxy:
image: nginx:alpine
# ports:
# - '443:443'
# - '80:80'
...
extra_hosts:
- "host.docker.internal:host-gateway"
and
services:
proxy:
image: nginx:alpine
ports:
- '443:443'
- '80:80'
...
network_mode: "host"
But no luck...
I think I'm missing the part on how to tell nginx to forward the request to the host, instead to localhost inside of it's own container.
But how to fix that?
Thanks,
Eduardo
I have 2 domains pointing to one virtual private server ubuntu 21. The first domain(running on port 3000) works as expected, the second domain(running on port 4000 on container and 5000 on host) does not and return nginx 502 bad gateway. I have added port 4000 point to 80 on nginx container:
I have configured like below:
docker-compose.yml:
version: '3'
services:
nginx:
image: nginx:stable-alpine
ports:
- "3000:80" # nginx listen on 80
- "4000:80"
volumes:
- ./nginx/default.conf:/etc/nginx/conf.d/default.conf:ro
pwm-node:
build: .
image: my_acc/pwm-node
environment:
- PORT=3000
depends_on:
- mongo
mongo:
image: mongo
volumes:
- mongo-db:/data/db
redis:
image: redis
volumes:
mongo-db:
nginx conf:
server {
listen 80;
server_name first_domain.com www.first_domain.com;
# Redirect http to https
location / {
return 301 https://first_domain.com$request_uri;
}
}
server {
listen 80;
server_name second_domain.com www.second_domain.com;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://localhost:4000;
proxy_redirect off;
}
}
server {
listen 443 ssl http2;
server_name first_domain.com www.first_domain.com;
ssl on;
server_tokens off;
ssl_certificate /etc/nginx/ssl/live/first_domain.com/fullchain.pem;
ssl_certificate_key /etc/nginx/ssl/live/first_domain.com/privkey.pem;
ssl_dhparam /etc/nginx/dhparam/dhparam-2048.pem;
ssl_buffer_size 8k;
ssl_protocols TLSv1.2 TLSv1.1 TLSv1;
ssl_prefer_server_ciphers on;
ssl_ciphers ECDH+AESGCM:ECDH+AES256:ECDH+AES128:DH+3DES:!ADH:!AECDH:!MD5;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://pwm-node:3000;
proxy_redirect off;
}
}
looks like nginx does not accept http://localhost:4000;. I may have to add node-app-4000 to docker-compose.yml as a service and replace localhost with node-app-4000
I have a NAS behind a router. On this NAS I want to run for testing Nextcloud and Seafile together. Everything should be set up with docker. The jwilder/nginx-proxy container does no work as expected and I cannot find helpful information. I feel I am missing something very basic.
What is working:
I have a noip.com DynDNS that points to my routers ip: blabla.ddns.net
The router forwards ports 22, 80 and 443 to my NAS at 192.168.1.11
A plain nginx server running on the NAS can be accessed via blabla.ddns.net, its docker-compose.yml is this:
version: '2'
services:
nginxnextcloud:
container_name: nginxnextcloud
image: nginx
restart: always
ports:
- "80:80"
networks:
- web
networks:
web:
external: true
What is not working:
The same nginxserver like above behind the nginx-proxy. I cannot access this server. Calling blabla.ddns.net gives a 503 error, calling nextcloud.blabla.ddns.net gives "page not found". Viewing the logs of the nginx-proxy via docker logs -f nginxproxy logs every test with blabla.ddns.net and shows its 503 answer, but when I try to access nextcloud.blabla.ddns.net not even a log entry occurs.
This is the docker-compose.yml for one nginx behind a nginx-proxy:
version: '2'
services:
nginxnextcloud:
container_name: nginxnextcloud
image: nginx
restart: always
expose:
- 80
networks:
- web
environment:
- VIRTUAL_HOST=nextcloud.blabla.ddns.net
nginx-proxy:
image: jwilder/nginx-proxy
container_name: nginxproxy
ports:
- "80:80"
volumes:
- /var/run/docker.sock:/tmp/docker.sock
networks:
- web
networks:
web:
external: true
The generated configuration file for nginx-proxy /etc/nginx/conf.d/default.conf contains entries for my test server:
# If we receive X-Forwarded-Proto, pass it through; otherwise, pass along the
# scheme used to connect to this server
map $http_x_forwarded_proto $proxy_x_forwarded_proto {
default $http_x_forwarded_proto;
'' $scheme;
}
# If we receive X-Forwarded-Port, pass it through; otherwise, pass along the
# server port the client connected to
map $http_x_forwarded_port $proxy_x_forwarded_port {
default $http_x_forwarded_port;
'' $server_port;
}
# If we receive Upgrade, set Connection to "upgrade"; otherwise, delete any
# Connection header that may have been passed to this server
map $http_upgrade $proxy_connection {
default upgrade;
'' close;
}
# Apply fix for very long server names
server_names_hash_bucket_size 128;
# Default dhparam
ssl_dhparam /etc/nginx/dhparam/dhparam.pem;
# Set appropriate X-Forwarded-Ssl header
map $scheme $proxy_x_forwarded_ssl {
default off;
https on;
}
gzip_types text/plain text/css application/javascript application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
log_format vhost '$host $remote_addr - $remote_user [$time_local] '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent"';
access_log off;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384';
ssl_prefer_server_ciphers off;
resolver 127.0.0.11;
# HTTP 1.1 support
proxy_http_version 1.1;
proxy_buffering off;
proxy_set_header Host $http_host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $proxy_connection;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $proxy_x_forwarded_proto;
proxy_set_header X-Forwarded-Ssl $proxy_x_forwarded_ssl;
proxy_set_header X-Forwarded-Port $proxy_x_forwarded_port;
# Mitigate httpoxy attack (see README for details)
proxy_set_header Proxy "";
server {
server_name _; # This is just an invalid value which will never trigger on a real hostname.
listen 80;
access_log /var/log/nginx/access.log vhost;
return 503;
}
# nextcloud.blabla.ddns.net
upstream nextcloud.blabla.ddns.net {
## Can be connected with "web" network
# nginxnextcloud
server 172.22.0.2:80;
}
server {
server_name nextcloud.blabla.ddns.net;
listen 80 ;
access_log /var/log/nginx/access.log vhost;
location / {
proxy_pass http://nextcloud.blabla.ddns.net;
}
}
Why is this minimal example not working?
I have a environment where I have 2 tomcat containers are exposed say dev and test on ports 8080 and 8081 respectively. I am able to access the tomcat instances with host and port combinations as below.
http://<ip>:8080
http://<ip>:8081
Now I am trying to setup an nginx container as a proxy to send all /dev requests to dev(8080) container and all /test requests to test(8081) container.
Below is my docker-compose.yml
version: "3.5"
services:
web1:
image: "tomcat:latest"
container_name: "web1"
ports:
- "8080:8080"
web2:
image: "tomcat:latest"
container_name: "web2"
ports:
- "8081:8080"
nginx:
image: "nginx:latest"
container_name: "nginx"
ports:
- "8000:80"
volumes:
- "./nginx.conf:/etc/nginx/nginx.conf"
#- "./default.conf:/etc/nginx/conf.d/default.conf"
Below is my nginx.conf file
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
include /etc/nginx/conf.d/*.conf;
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name _;
proxy_redirect off;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
client_max_body_size 0;
location / {
}
location /dev {
proxy_pass http://35.239.73.252:8080/;
}
location /test {
proxy_pass http://35.239.73.252:8081/;
}
}
}
Now the problem is when i try to load my tomcat containers directly they work fine. But when they are accessed through nginx with uri paths /dev and /test the pages are broken and images and css are not loaded.
What could the issue and how to fix it.
I believe you need a closing "/" after both your location path and your target. Here's a working example from a project of mine:
location ^~ /ll/ {
proxy_pass http://werther:8080/;
}
I have environment builded upon docker containers (in boot2docker). I have following docker-compose.yml file to quickly setup nginx and nexus servers :
version: '3.2'
services:
nexus:
image: stefanprodan/nexus
container_name: nexus
ports:
- 8081:8081
- 5000:5000
nginx:
image: nginx:latest
container_name: nginx
ports:
- 5043:443
volumes:
- /opt/dm/nginx2/nginx.conf:/etc/nginx/nginx.conf:ro
Nginx has following configuration (nginx.conf)
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
proxy_send_timeout 120;
proxy_read_timeout 300;
proxy_buffering off;
keepalive_timeout 5 5;
tcp_nodelay on;
server {
listen 80;
server_name demo.com;
return 301 https://$server_name$request_uri;
}
server {
listen 443 ssl;
server_name demo.com;
# allow large uploads of files - refer to nginx documentation
client_max_body_size 1024m;
# optimize downloading files larger than 1G - refer to nginx doc before adjusting
#proxy_max_temp_file_size 2048m
#ssl on;
#ssl_certificate /etc/nginx/ssl.crt;
#ssl_certificate_key /etc/nginx/ssl.key;
location / {
proxy_pass http://nexus:8081/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto "https";
}
}
}
Nexus seems to work very well. I call sucessfully curl http://localhost:8081 on docker host machine. This return me html of nexus login site. Now I want to try nginx server. It is configured to listen on 443 port, but SSL is right now disabled (I wanted to test it before diving into SSL configuration). As you can notice, my ngix container maps port 443 to port 5043. Thus, I try to use following curl command : curl -v http://localhost:5043/. Now I expect that my http request is going to be send to nginx and proxied to proxy_pass http://nexus:8081/; nexus. Nexus hostname is visible within docker container network and is accesible from nginx container. Unfortunately in reponse I receive :
* Trying 127.0.0.1...
* Connected to localhost (127.0.0.1) port 5043 (#0)
> GET / HTTP/1.1
> Host: localhost:5043
> User-Agent: curl/7.49.1
> Accept: */*
>
* Empty reply from server
* Connection #0 to host localhost left intact
curl: (52) Empty reply from server
I was checking nginx logs, error, access but these logs are empty. Can somebody help me solving this problem ? It should be just a simple example of proxying requests, but maybe I misunderstand some concept ?
Do you have an upstream directive in your nginx conf (placed within the http directive)?
upstream nexus {
server <Nexus_IP>:<Nexus_Port>;
}
Only then nginx can correctly resolve it. The docker-compose service name nexus is not injected to the nginx container on runtime.
You can try links in docker-compose:
https://docs.docker.com/compose/compose-file/#links
This gives you an alias for the linked container in your /etc/hosts. But you still need an upstream directive. Update: If resolvable, you can as well use the names directly in nginx directives like location.
https://serverfault.com/questions/577370/how-can-i-use-environment-variables-in-nginx-conf
As #arnold's answer you are missing the upstream configuration in your nginx. I saw you are using the stefanprodan nexus image, see his blog for the full configuration. Below you can find mine (remember to open ports 8081 and 5000 of nexus even the entrance point is the 443). Besides you need to include the certificate because docker client requires ssl working:
worker_processes 2;
events {
worker_connections 1024;
}
http {
error_log /var/log/nginx/error.log warn;
access_log /dev/null;
proxy_intercept_errors off;
proxy_send_timeout 120;
proxy_read_timeout 300;
upstream nexus {
server nexus:8081;
}
upstream registry {
server nexus:5000;
}
server {
listen 80;
listen 443 ssl default_server;
server_name <yourdomain>;
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
ssl_certificate /etc/letsencrypt/live/<yourdomain>/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/<yourdomain>/privkey.pem;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 5m;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_dhparam /etc/ssl/certs/dhparam.pem;
ssl_ciphers "EECDH+AESGCM:EDH+AESGCM:ECDHE-RSA-AES128-GCM-SHA256:AES256+EECDH:DHE-RSA-AES128-GCM-SHA256:AES256+EDH:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES256-GCM-SHA384:AES128-GCM-SHA256:AES256-SHA256:AES128-SHA256:AES256-SHA:AES128-SHA:DES-CBC3-SHA:HIGH:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!RC4";
keepalive_timeout 5 5;
proxy_buffering off;
# allow large uploads
client_max_body_size 1G;
location / {
# redirect to docker registry
if ($http_user_agent ~ docker ) {
proxy_pass http://registry;
}
proxy_pass http://nexus;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto "https";
}
}
}
The certificates are generated using letsencrypt or certbot. The rest of the configuration is to have an A+ in ssllabs analysis as it is explained here
Your docker-compose of 5000 port is an dynamic port(Because it hadn't been exposed ) , so you cannot connect the 5000 port because the
ports:
- 8081:8081
- 5000:5000
are not efficient .
you can use like this:
Build a new Dockerfile and expose 5000 port (Mine name is )
FROM sonatype/nexus3:3.16.2
EXPOSE 5000```
Use new image to start the container and publish the port .
version: "3.7"
services:
nexus:
image: 'feibor/nexus:3.16.2-1'
deploy:
placement:
constraints:
- node.hostname == node1
restart_policy:
condition: on-failure
ports:
- 8081:8081/tcp
- 5000:5000/tcp
volumes:
- /mnt/home/opt/nexus/nexus-data:/nexus-data:z