I have docker stack running 2 containers, first is Nginx, second - application.
The problem is that nginx shows Bad Gateway error:
Here is nginx conf:
upstream example {
server mystack_app1;
# Also tried with just 'app1'
# server mystack_app2;
keepalive 32;
}
server {
listen 80;
server_name example;
location / {
proxy_pass http://example;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_connect_timeout 150;
proxy_send_timeout 100;
proxy_read_timeout 100;
proxy_buffers 4 32k;
client_max_body_size 8m;
client_body_buffer_size 128k;
}
}
Here is docker-compose.yml
version: "3"
services:
app1:
image: my-app:latest
ports:
- "9000:9000"
networks:
- webnet
web:
image: my-web:latest
ports:
- "81:80"
networks:
- webnet
deploy:
restart_policy:
condition: on-failure
networks:
webnet:
I use following command to deploy docker stack:
docker stack deploy -c docker-compose.yml mystack
So I can access application from host's browser by localhost:9000 - it works ok.
Also, from the nginx container, I can ping mystack_app1.
But when accessing localhost:81, nginx shows 502 Bad Gateway
Please help.
It looks like your upstream definition is not correct. It's trying to connect to port 80 instead of port 9000.
Try
upstream example {
server mystack_app1:9000;
# Also tried with just 'app1'
# server mystack_app2;
keepalive 32;
}
Btw, I suggest you to use the container_name in your docker-compose file.
Related
I have a Docker compose file running an application that utilizes NGIX as a reverse proxy. The proxy is running on HTTPS for STIG Manager and Keycloak but the additional container I wish to add is running on a different port that is non-HTTPS.
#1 I want to add additional docker containers behind the proxy.
#2 I want to call the app using a DNS name.
Environment: (The server hosting docker)
gsil-docker1.gsil.mil
Compose File:
version: '3.7'
services:
nginx:
# image: nginx:1.23.1
# alternative image from Ironbank
image: registry1.dso.mil/ironbank/opensource/nginx/nginx:1.23.1
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf
- ./certs/localhost/localhost.crt:/etc/nginx/cert.pem
- ./certs/localhost/localhost.key:/etc/nginx/privkey.pem
- ./certs/dod/Certificates_PKCS7_v5.9_DoD.pem.pem:/etc/nginx/dod-certs.pem
- ./nginx/index.html:/usr/share/nginx/html/index.html
ports:
- "443:443"
keycloak:
# image: quay.io/keycloak/keycloak:19.0.2
# alternative image from Ironbank
image: registry1.dso.mil/ironbank/opensource/keycloak/keycloak:19.0.2
environment:
- KEYCLOAK_ADMIN=admin
- KEYCLOAK_ADMIN_PASSWORD=Pa55w0rd
- KC_PROXY=edge
- KC_HOSTNAME_URL=https://localhost/kc/
- KC_HOSTNAME_ADMIN_URL=https://localhost/kc/
- KC_SPI_X509CERT_LOOKUP_PROVIDER=nginx
- KC_SPI_X509CERT_LOOKUP_NGINX_SSL_CLIENT_CERT=SSL-CLIENT-CERT
- KC_SPI_TRUSTSTORE_FILE_FILE=/tmp/truststore.p12
- KC_SPI_TRUSTSTORE_FILE_PASSWORD=password
command: start --import-realm
volumes:
- ./certs/dod/Certificates_PKCS7_v5.9_DoD.pem.p12:/tmp/truststore.p12
- ./kc/stigman_realm.json:/opt/keycloak/data/import/stigman_realm.json
- ./kc/create-x509-user.jar:/opt/keycloak/providers/create-x509-user.jar
# uncomment below to persist Keycloak data
# - ./kc/h2:/opt/keycloak/data/h2
stigman:
# image: nuwcdivnpt/stig-manager:1.2.20
# alternative image based on Ironbank Node.js
image: nuwcdivnpt/stig-manager:latest-ironbank
environment:
- STIGMAN_OIDC_PROVIDER=http://keycloak:8080/realms/stigman
- STIGMAN_CLIENT_OIDC_PROVIDER=https://localhost/kc/realms/stigman
- STIGMAN_CLASSIFICATION=U
- STIGMAN_DB_HOST=mysql
- STIGMAN_DB_USER=stigman
- STIGMAN_DB_PASSWORD=stigmanpw
# uncomment below to fetch current STIG library from DISA and import it
# - STIGMAN_INIT_IMPORT_STIGS=true
init: true
mysql:
# image: mysql:8.0.21
# alternative image from Ironbank
image: registry1.dso.mil/ironbank/opensource/mysql/mysql8:8.0.31
environment:
- MYSQL_ROOT_PASSWORD=rootpw
- MYSQL_USER=stigman
- MYSQL_DATABASE=stigman
- MYSQL_PASSWORD=stigmanpw
# uncomment below to persist MySQL data
volumes:
- ./mysql-data:/var/lib/mysql
Nginx Config:
events {
worker_connections 4096; ## Default: 1024
}
pid /var/cache/nginx/nginx.pid;
http {
server {
listen 443 ssl;
server_name localhost;
root /usr/share/nginx/html;
client_max_body_size 100M;
ssl_certificate /etc/nginx/cert.pem;
ssl_certificate_key /etc/nginx/privkey.pem;
ssl_prefer_server_ciphers on;
ssl_client_certificate /etc/nginx/dod-certs.pem;
ssl_verify_client optional;
ssl_verify_depth 4;
error_log /var/log/nginx/error.log debug;
if ($return_unauthorized) { return 496; }
location / {
autoindex on;
ssi on;
}
location /stigman/ {
proxy_pass http://stigman:54000/;
}
location /kc/ {
proxy_pass http://keycloak:8080/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header ssl-client-cert $ssl_client_escaped_cert;
proxy_buffer_size 128k;
proxy_buffers 4 256k;
proxy_busy_buffers_size 256k;
}
}
# define which endpoints require mTLS
map_hash_bucket_size 128;
map $uri $secured_url {
default false;
"/kc/realms/stigman/protocol/openid-connect/auth" true;
}
map "$secured_url:$ssl_client_verify" $return_unauthorized {
default 0;
"true:FAILED" 1;
"true:NONE" 1;
"true:" 1;
}
}
I have tried adding settings to my docker-compose and nginx but I was unable to make it work.
docker-compose addition:
networks:
default:
name: grafana_default
external: true
nginx addtion:
server {
listen 80;
server_name grafana.gsil.mil;
location / {
proxy_pass http://grafana.gsil.smil:3000/;
}
}
Additionally, I have created a CNAME DNS entry for grafana.gsil.mil and pointed it to gsil-docker1.gsil.mil
The containers app are all running and I can reach all of them respectively by going to:
gsil-docker1.gsil.mil/stigman
gsil-docker1.gsil.mil/kc
gsil-docker1.gsil.mil:3000
The docker-compose file for grafana:
version: '3.0'
volumes:
grafana-data:
services:
grafana:
container_name: grafana
image: registry1.dso.mil/ironbank/opensource/grafana/grafana:9.3.2
environment:
- grafana.config
restart: always
volumes:
- grafana-data:/var/lib/grafana
ports:
- 3000:3000/tcp
I have done a lot of searching but examples I found tended to show http on nginx with http backend apps. I was struggling to find something that would help pull this all together. Can you have an https proxy with a http backend app or do I need to create certs and make all my backend apps run https?
The issue was simple to fix. I needed to add port 80 to my nginx config in my docker-compose file. NGINX cannot proxy http traffic when listening on https only (so add http).
version: '3.7'
services:
nginx:
ports:
- "443:443"
- "80:80"
My presumptions about these specific items were all correct:
-making docker aware of external networks (when the container you want to add/proxy is not part of the same network)
networks:
default:
name: grafana_default
external: true
-adding DNS CNAME entries was correct.
I have created a CNAME DNS entry for grafana.gsil.mil and pointed it to gsil-docker1.gsil.mil
-the appropriate lines had to be added to nginx.conf for each additional container that you need to add.
server {
listen 80;
server_name grafana.gsil.mil;
location / {
proxy_pass http://grafana.gsil.smil:3000/;
}
}
I'm trying to deploy simple FastAPI app with Docker and Nginx proxy on Google Cloud using simple ssh-terminal window.
My nginx.conf:
access_log /var/log/nginx/app.log;
error_log /var/log/nginx/app.log;
proxy_headers_hash_max_size 512;
proxy_headers_hash_bucket_size 128;
proxy_set_header Host $http_host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $proxy_connection;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $proxy_x_forwarded_proto;
proxy_set_header X-Forwarded-Ssl $proxy_x_forwarded_ssl;
proxy_set_header X-Forwarded-Port $proxy_x_forwarded_port;
proxy_set_header X-Original-URI $request_uri;
proxy_set_header Proxy "";
upstream app_server {
server example.com:8000;
}
server {
server_name example.com;
listen 80;
return 301 https://$host$request_uri;
}
server {
listen 443 ssl;
server_name example.com;
ssl_certificate /root/ssl/cert.pem;
ssl_certificate_key /root/ssl/key.pem;
location / {
proxy_pass "http://app_server";
}
}
My docker-compose.yml:
version: '3.8'
services:
reverse-proxy:
image: jwilder/nginx-proxy
container_name: reverse-proxy
ports:
- "80:80"
- "443:443"
volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro
- ./nginx:/etc/nginx/conf.d
- ./ssl/cert1.pem:/root/ssl/cert.pem
- ./ssl/privkey1.pem:/root/ssl/key.pem
- ./ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem
networks:
- reverse-proxy
web:
environment: [.env]
build: ./project
ports:
- 8000:8000
command: gunicorn main:app -k uvicorn.workers.UvicornWorker -w 2 -b 0.0.0.0:8000
volumes:
- ./project:/usr/src/app
networks:
- reverse-proxy
- back
networks:
reverse-proxy:
external:
name: reverse-proxy
back:
driver: bridge
After run docker-compose up command and going to example.com address, I get error:
*3 upstream timed out (110: Connection timed out) while connecting to upstream...
Also, I have opened ports with Google Cloud Firewall service (checked with netstat command) and configured my VM's instance with network parameters from this article.
I don't understand why I receive 504 Gateway Time-out cause my service work with the similar configuration on a simple VPS hosting, and also it works from the inside Google Cloud VM's ssh-terminal when using curl and check localhost instead example.com domain. I want to know how to run my service on Google Cloud VM using only docker-compose util for this purpose?
In Nginx config file, try to mention the web container name:
upstream app_server {
server web:8000;
}
My docker compose looks like this:
version: '3.2'
services:
mediawiki:
image: mediawiki:lts
nginx:
image: nginx:stable-alpine
depends_on:
- mediawiki
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf
ports:
- 80:80
#...
Where mediawiki is a docker container that runs on port 80 in docker and does not appear to have a way to change the port number.
I'm trying to expose mediwiki through ngninx and the nginx config looks like this:
events {
}
http {
server {
listen 80;
location / {
client_max_body_size 2M;
real_ip_header X-Forwarded-For;
real_ip_recursive on;
proxy_pass http://mediawiki:80;
}
}
}
Since both nginx and mediawiki is running at port 80, I can't set portmap mediwiki 80:80.
I've tried mapping it to another port under mediawiki such as 7001:80 and in nginx config replace http://mediawiki:80 with http://mediawiki:7001 but this produces bad gateway error when loading the site url at port 80.
How might I fix this?
Let's have a look at reverse proxy in which case I use.
version: '3.2'
services:
mediawiki:
image: mediawiki:lts
nginx:
build: .
image: A_NEW_NAME:VERSION_TAG
depends_on:
- mediawiki
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf
- ./wiki.conf:/etc/sites-available/wiki.conf
ports:
- 80:80
This should be your wiki.conf contents:
server {
listen 80;
server_name THE_DOMAIN_NAME_OF_YOUR_MEDIAWIKI;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $http_host;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://mediawiki:80;
proxy_redirect off;
# Socket.IO Support
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}
And add a Dockerfile in the directory where your docker-compose file is:
FROM nginx:stable-alpine
COPY wiki.conf /etc/sites-available/
RUN cd /etc/sites-enabled/ && ln -s /etc/sites-available/wiki.conf
And keep your nginx.conf as default values, or change some values on your own but do not add any directives to serve wiki.
You can replace THE_DOMAIN_NAME_OF_YOUR_MEDIAWIKI wit the actual domain name. Like if you have media.com and your wiki wants to be accessible at wiki.media.com.
Now you can run docker-compose up -d --build and see the result.
Change the service port for media wiki to 8080, like
8080:80
and
Change the nginx port to 7001 inside the local nginx.conf and
proxy_pass http://mediawiki:8080;
./nginx.conf:/etc/nginx/nginx.conf
So, nginx will run on port 7001 and mediawiki on 80.
version: '3.2'
services:
mediawiki:
image: mediawiki:lts
nginx:
image: nginx:stable-alpine
depends_on:
- mediawiki
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf
ports:
- 80:7001
#...
Then access the app at http://mediawiki:80
I'm having a problem with getting to work my NGINX reverse proxy on Docker.
When I access:
local.lab - NGINX responds with expected index.html page
127.0.0.1:2000 or 127.0.0.1:2001 or 127.0.0.1:2002 - service works and I get expected results
local.lab/a1 or local.lab/a2 or local.lab/a3 - I get "502 Bad Gateway" error.
Detailed error from nginx log:
2021/02/25 18:20:48 [error] 30#30: *4 connect() failed (111: Connection refused) while connecting to upstream, client: 172.19.0.1, server: local.lab, request: "GET /a2 HTTP/2.0", upstream: "http://127.0.0.1:2006/", host: "www.local.lab"
I tried to add network_mode: host to nginx service in docker compose without success.
I'm using docker compose:
version: '3.7'
services:
nginx:
container_name: lab-nginx
image: nginx:latest
restart: always
depends_on:
- http1
- http2
- http3
volumes:
- ./html:/usr/share/nginx/html/
- ./nginx.conf:/etc/nginx/nginx.conf
- ./error_log/error.log:/var/log/nginx/error.log
- ./cert:/var/log/nginx/cert/
ports:
- 80:80
- 443:443
http1:
container_name: lab-http1
image: httpd:latest
restart: always
# build:
# context: ./apache_service
ports:
- 2000:80
- 2005:443
volumes:
- ./apache/index1.html:/usr/local/apache2/htdocs/index.html
http2:
container_name: lab-http2
image: httpd:latest
restart: always
ports:
- 2001:80
- 2006:443
volumes:
- ./apache/index2.html:/usr/local/apache2/htdocs/index.html
http3:
container_name: lab-http3
image: httpd:latest
restart: always
ports:
- 2002:80
- 2007:443
volumes:
- ./apache/index3.html:/usr/local/apache2/htdocs/index.html
My nginx config:
worker_processes auto;
events { worker_connections 1024;}
error_log /var/log/nginx/error.log error;
http{
server {
listen 443 ssl http2;
server_name local.lab;
ssl_certificate /var/log/nginx/cert/local.lab.crt;
ssl_certificate_key /var/log/nginx/cert/local.lab.key;
ssl_protocols TLSv1.3;
location / {
root /usr/share/nginx/html;
index index.html;
}
location /a1 {
proxy_pass http://127.0.0.1:2000/;
proxy_set_header X-Forwarded-For $remote_addr;
}
location /a2 {
proxy_pass http://127.0.0.1:2001/;
proxy_set_header X-Forwarded-For $remote_addr;
}
location /a3 {
proxy_pass http://127.0.0.1:2002/;
proxy_set_header X-Forwarded-For $remote_addr;
}
}
}
How can I fix this?
The reverse proxy configuration in NGINX should reference the internal ports of your services, not the external ports they are mapped to in the docker-compose.yml. The services all have different names running in different containers so they can run on the same port (80 in this case) and use the service name, not the loopback address. You need to map them to different ports externally though because you can't have more than one service per port on your host.
For example:
location /a1 {
proxy_pass http://http1:80/;
proxy_set_header X-Forwarded-For $remote_addr;
}
location /a2 {
proxy_pass http://http2:80/;
proxy_set_header X-Forwarded-For $remote_addr;
}
location /a3 {
proxy_pass http://http3:80/;
proxy_set_header X-Forwarded-For $remote_addr;
}
I am trying to run a nginx proxy server with a ktor java server behind. However, nginx throws "111: Connection refused" with those configurations. I've tried the "setting upstream server name from localhost to docker compose name" on web but it didn't help anything.
Thank you in advance, and sorry for my poor english.
docker-compose.yml
version: "3.8"
services:
nginx:
image: nginx:1.19.3
ports:
- 80:80
- 443:443
volumes:
- ./Nginx/logs:/var/log/nginx
- ./Nginx/confs:/etc/nginx/conf.d
- ./Nginx/confs:/etc/nginx/keys
mariadb:
image: mariadb:10.5.6
ports:
- 3306:3306
volumes:
- ./Mariadb/data:/var/lib/mysql
- ./Mariadb/confs:/etc/mysql/conf.d
- ./Mariadb/inits:/docker-entrypoint-initdb.d
env_file:
- .env
environment:
TZ: Asia/Seoul
MYSQL_USER: dockhyub
yangjin208:
build: ./Yangjin208
ports:
- "3000:8080"
env_file:
- .env
- ./Yangjin208/.env
links:
- mariadb:sql
yangjin208.conf under ./Nginx/confs
upstream yangjin208_app {
server yangjin208:3000;
}
server {
listen 80;
server_name localhost;
location / {
proxy_pass http://yangjin208_app;
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $server_name;
}
}
localhost:3000 is accessible by browser, and has no problems.
So, I've found the problem - It seems like the docker internal network uses the original port instead of the port changed from docker-compose.yml's "ports" configuration. I was using the port 3000 (Port declared from docker-compose) instead of 8080 (The original port), and that was the reason it didn't work. Thanks for everyone.
I think you're not using the yangjin208.conf in nginx. Rename yangjin208.conf to default.conf.