I'm trying to serve : one django back-end , 2 reactjs front-end .
the back-end work well but for the front-ends only one of theme work .
this is my nginx and front-end Dockerfile :
FROM node:lts-alpine3.12 as build1
WORKDIR /frontend
COPY ./frontend/package.json ./
COPY ./frontend/package-lock.json ./
RUN npm install
COPY ./frontend/ ./
RUN npm run build
FROM node:lts-alpine3.12 as build2
WORKDIR /frontend2
COPY ./frontend2/package.json ./
COPY ./frontend2/package-lock.json ./
RUN npm install
COPY ./frontend2/ ./
RUN npm run build
FROM nginx:1.18.0-alpine
COPY ./webserver/default.conf /etc/nginx/conf.d/default.conf
COPY --from=build1 /frontend/build /usr/share/nginx/html/frontend1
COPY --from=build2 /frontend2/build /usr/share/nginx/html/frontend2
this is my default.conf:
upstream api {
server backend:8000;
}
server {
listen 80;
server_name "localhost";
root /usr/share/nginx/html/frontend1;
location / {
try_files $uri /index.html;
}
location /api/ {
proxy_pass http://api;
}
}
server {
listen 8080 ;
server_name "localhost";
root /usr/share/nginx/html/frontend2;
location / {
try_files $uri /index.html;
}
location /api/ {
proxy_pass http://api;
}
}
and this is the results for port 80 :
and this is the result for port 8080:
Update :
this is my docker-compose :
version: "3.9"
services:
backend:
build:
context: ./backend
ports:
- "8000:8000"
command: gunicorn server.wsgi:application --bind 0.0.0.0:8000
volumes:
- staticfiles:/backend/staticfiles
nginx:
build:
context: .
dockerfile: ./webserver/Dockerfile
restart: always
volumes:
- staticfiles:/staticfiles
ports:
- "80:80"
depends_on:
- backend
volumes:
staticfiles:
use different server names for frontend other localhost,
Use this default.conf
upstream api {
server backend:8000;
}
server {
listen 80;
server_name myapp.loc;
root /usr/share/nginx/html/frontend1;
location / {
try_files $uri /index.html;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}
server {
listen 80;
server_name newapp.loc;
root /usr/share/nginx/html/frontend2;
location / {
try_files $uri /index.html;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}
/etc/hosts
127.0.0.1 newapp.loc
127.0.0.1 myapp.loc
As I see in your docker-compose, you didn't connect your container port 8080 to your local port 8080.
So when you open localhost:8080 in your browser, you get connection error, because this port isn't open now.
please use this docker-compose, and let me know if you have any problem with it:
version: "3.9"
services:
backend:
build:
context: ./backend
ports:
- "8000:8000"
command: gunicorn server.wsgi:application --bind 0.0.0.0:8000
volumes:
- staticfiles:/backend/staticfiles
nginx:
build:
context: .
dockerfile: ./webserver/Dockerfile
restart: always
volumes:
- staticfiles:/staticfiles
ports:
- "80:80"
- "8080:8080"
depends_on:
- backend
volumes:
staticfiles:
Related
I create docker like this:
version: '3'
services:
nginx:
image: nginx
container_name: test1_nginx
restart: always
volumes:
- ./docker/nginx/nginx.conf:/etc/nginx/nginx.conf
ports:
- "80:80"
- "443:443"
site1:
restart: always
container_name: test1_site1
build: ./site1/
command: "npm run start"
ports:
- "3001:3001"
site2:
restart: always
container_name: test1_site2
build: ./site2/
command: "npm run start"
ports:
- "3002:3002"
with struct of project like this: struct of project
Here is ./docker/nginx/nginx.conf:
events {
worker_connections 1024;
}
http {
upstream site1 {
server localhost:3001;
}
upstream site2 {
server localhost:3002;
}
server {
listen 80;
listen [::]:80;
server_name localhost;
location / {
root /usr/share/nginx/html;
index index.html index.htm index.php;
}
location /site1 {
proxy_pass http://localhost:3001;
proxy_set_header Host $host;
}
location /site2 {
proxy_pass http://site2;
proxy_set_header Host $host;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}
}
http://localhost:80 working fine
http://localhost:3001 working fine
http://localhost:3002 working fine
I want:
when I enter http://localhost/test1, it will pass proxy with http://localhost:3001
when I enter http://localhost/test3, it will pass proxy with http://localhost:3003
Please let me know how to do it.
Thank you
sorry for the question. I'm new with docker and nginx. I need some help
So, I have a docker with the nginx proxy and 3 dockers with 3 different applications
I don't know how to configure nginx at all to avoid bad gateway errors on my 3 applications. Can you please help me?
I can only find help for this problem on 1 application.
My docker-compose
version: '2.1'
services:
proxy:
container_name: proxy
hostname: proxy
build: .docker/nginx
ports:
- "80:80" #80:80
- "443:443"
volumes:
- ./certs:/etc/nginx/certs
- /var/run/docker.sock:/tmp/docker.sock:ro
networks:
- proxy
restart: always
networks:
proxy:
external: true
My DockerFile
FROM jwilder/nginx-proxy
RUN { \
echo 'client_max_body_size 500M;'; \
} > /etc/nginx/conf.d/my_proxy.conf
App1
version: '3'
services:
apache:
build: .docker/apache
container_name: app1_apache
environment:
VIRTUAL_HOST: "app1.local"
expose:
- "80"
volumes:
- .docker/config/vhosts:/etc/apache2/sites-enabled
- ../../app1:/var/www/app1.local
depends_on:
- php
networks:
- default
- proxy
php:
build: .docker/php
container_name: app1_php
environment:
VIRTUAL_HOST: "app1.local"
volumes:
- ../../app1:/var/www/app1.local
networks:
- default
- proxy
networks:
proxy:
external: true
Same file for App2 and App3
I found this solution but I don't understand how to implement it.
nginx docker container: 502 bad gateway response
I need to modify the nginx config.d configuration file but I don't see how I can do that.
my default config.d file
server {
listen 80;
server_name localhost;
#charset koi8-r;
#access_log /var/log/nginx/log/host.access.log main;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
# proxy the PHP scripts to Apache listening on 127.0.0.1:80
#
#location ~ \.php$ {
# proxy_pass http://127.0.0.1;
#}
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
#
#location ~ \.php$ {
# root html;
# fastcgi_pass 127.0.0.1:9000;
# fastcgi_index index.php;
# fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
# include fastcgi_params;
#}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
So, I have to add
upstream app1{
//insert your hosts ip here
server 192.168.99.100:8080;
}
upstream app2{
//insert your hosts ip here
server 192.168.99.100:8080;
}
upstream app3{
//insert your hosts ip here
server 192.168.99.100:8080;
}
That's right ?
Do I also have to change the proxy_pass?
The nginx configuration is really obscure to me.
Thank you for your help
When I start de Project with php artisan serve everything works fine, but when I start my project with docker-compose up -d there is an error: 403 Forbidden nginx/1.10.3
Nginx default file:
listen [::]:80;
listen 80;
root /var/www/html/public;
index index.html index.htm index.php;
server_name {{getenv "NGINX_SERVER_NAME"}};
server_tokens off;
charset utf-8;
location = /favicon.ico { log_not_found off; access_log off; }
location = /robots.txt { log_not_found off; access_log off; }
location / {
try_files $uri $uri/ /index.php$is_args$args;
}
location ~ \.php$ {
include snippets/fastcgi-php.conf;
fastcgi_pass unix:/usr/local/var/run/php-fpm.sock;
}
error_page 404 /index.php;
location ~ /\.ht {
deny all;
}
add_header X-Served-By Bitpress.io;
include h5bp/basic.conf;
}
and here is my docker-compose File
docker-compose.yml
version: "3"
networks:
app-tier:
driver: bridge
services:
app:
image: test
container_name: site
build:
context: .
dockerfile: docker/Dockerfile
networks:
- app-tier
env_file:
- .docker.env
ports:
- 5050:80
volumes:
- .:/var/www/html
environment:
APP_ENV: local
CONTAINER_ROLE: app
scheduler:
image: test
container_name: scheduler
depends_on:
- app
env_file:
- .docker.env
volumes:
- .:/var/www/html
environment:
CONTAINER_ROLE: scheduler
queue:
image: test
container_name: queue
depends_on:
- app
env_file:
- .docker.env
volumes:
- .:/var/www/html
environment:
CONTAINER_ROLE: queue
I've seen, that the Permissions from the Directories is root.
I have tried to change it with the commandRUN chown -R www-data:www-data /var/www/html but it not works.
I just update what you have, but won't fix 100% your issues, some stuff have ot be done too, but without all information I cannot do more.
You may need to add php-fpm into your docker-compose.yml
nginx.conf
server {
listen [::]:80;
listen 80;
# will be remove if you run everything inside container
root /var/www/html/public;
# will be remove if you run everything inside container
index index.html index.htm index.php;
server_name {{getenv "NGINX_SERVER_NAME"}};
server_tokens off;
charset utf-8;
location = /favicon.ico { log_not_found off; access_log off; }
location = /robots.txt { log_not_found off; access_log off; }
# will be remove
# location / {
# try_files $uri $uri/ /index.php$is_args$args;
# }
# Add this, now nginx only redirect request to expose socket from docker
location / {
proxy_pass http://localhost:5050
proxy_ser_header X-Served-By Bitpress.io;
}
location ~ \.php$ {
include snippets/fastcgi-php.conf;
fastcgi_pass unix:/usr/local/var/run/php-fpm.sock;
}
# will be remove if you run everything inside container
error_page 404 /index.php;
location ~ /\.ht {
deny all;
}
# will be remove if you run everything inside container
add_header X-Served-By Bitpress.io;
include h5bp/basic.conf;
}
docker-compose.yml
version: "3"
networks:
app-tier:
driver: bridge
services:
app:
image: test
container_name: site
build:
context: .
dockerfile: docker/Dockerfile
networks:
- app-tier
env_file:
- .docker.env
ports:
- 5050:80
volumes:
- .:/var/www/html
# - /absolute/path/better:/var/www/html
environment:
APP_ENV: local
CONTAINER_ROLE: app
scheduler:
image: test
container_name: scheduler
networks: # <-- add thisadd this
- app-tier # <-- add thisadd this
depends_on:
- app
env_file:
- .docker.env
volumes:
- .:/var/www/html
# - /absolute/path/better:/var/www/html
environment:
CONTAINER_ROLE: scheduler
queue:
image: test
container_name: queue
networks: # <-- add thisadd this
- app-tier # <-- add thisadd this
depends_on:
- app
env_file:
- .docker.env
volumes:
- .:/var/www/html
# - /absolute/path/better:/var/www/html
environment:
CONTAINER_ROLE: queue
You may have an issues between env_file: and CONTAINER_ROLE who have the priority: your 3 containers share the shame .docker.env it may be an issues. it may be a good idead to have:
.docker.app.env
.docker.scheduler.env
.docker.queue.env
I tried to start some services via docker-compose. One of them is a nginx reverse-proxy, handling different paths. One path ("/react") is to a containerized react_app with a nginx on port 80. Solely, the reverse-proxy is working correctly. Also, if I server the nginx of the react_app on port 80, all work's fine. Combining both without changing anything in the config leads to 404 for static files like css and js.
Setup #1
Correct forward for path /test to Google.
docker-compose.yml
version: "3"
services:
#react_app:
# container_name: react_app
# image: react_image
# build: .
reverse-proxy:
image: nginx:latest
container_name: reverse-proxy
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf
ports:
- '80:80'
nginx.conf (reverse-proxy)
location /test {
proxy_pass http://www.google.com/;
}
Setup #2
No reverse proxy. Correct answer from nginx inside of container react_app.
docker-compose.yml
version: "3"
services:
react_app:
container_name: react_app
image: react_image
build: .
#reverse-proxy:
# image: nginx:latest
# container_name: reverse-proxy
# volumes:
# - ./nginx.conf:/etc/nginx/nginx.conf
# ports:
# - '80:80'
Setup #3 (not working!)
Reverse proxy and React App with nginx. Loads index.html, but fails so load files in /static
nginx.conf (reverse-proxy)
location /react {
proxy_pass http://react_app/;
}
docker-compose.yml
version: "3"
services:
react_app:
container_name: react_app
image: react_image
build: .
reverse-proxy:
image: nginx:latest
container_name: reverse-proxy
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf
ports:
- '80:80'
Activating both systems leads to failing static content. It seems to me that the reverse-proxy tries to server the files, but fails (for good reason), because there is no log entry in reac_app's nginx. Here's the config from the reac_app nginx, perhaps I'm missing something out.
nginx.conf (inside react_app container)
events {}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
access_log /var/log/nginx/access.log main;
sendfile on;
keepalive_timeout 65;
server {
listen 80;
server_name localhost;
root /usr/share/nginx/html;
location / {
try_files $uri /index.html;
}
}
}
--> Update
This is a rather unsatisfying workaround - but it works. Although now reacts routing is messed up. I cannot reach /react/login
http {
server {
server_name services;
location /react {
proxy_pass http://react_app/;
}
location /static/css {
proxy_pass http://react_app/static/css;
add_header Content-Type text/css;
}
location /static/js {
proxy_pass http://react_app/statics/js;
add_header Content-Type application/x-javascript;
}
}
}
If you check the paths of the missing static files in your browser, you'll notice their relative paths are not what you expect. You can fix this by adding sub filters inside your nginx reverse proxy configuration.
http {
server {
server_name services;
location /react {
proxy_pass http://react_app/;
######## Add the following ##########
sub_filter 'action="/' 'action="/react/';
sub_filter 'href="/' 'href="/react/';
sub_filter 'src="/' 'src="/react/';
sub_filter_once off;
#####################################
}
}
}
This will update the relative paths to your static files.
I'm trying to set up a simple web stack locally on my Mac.
nginx to serve as a reverse proxy
react web app #1 to be served on localhost
react web appĀ #2 to be served on demo.localhost
I'm using docker-compose to spin all the services at once, here's the file:
version: "3"
services:
nginx:
container_name: nginx
build: ./nginx/
ports:
- "80:80"
networks:
- backbone
landingpage:
container_name: landingpage
build: ./landingpage/
networks:
- backbone
expose:
- 3000
frontend:
container_name: frontend
build: ./frontend/
networks:
- backbone
expose:
- 3001
networks:
backbone:
driver: bridge
and here's the nginx config file (copied into the container with a COPY command in the Dockerfile):
worker_processes 1;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
gzip on;
gzip_http_version 1.1;
gzip_comp_level 2;
gzip_types text/plain text/css
application/x-javascript text/xml
application/xml application/xml+rss
text/javascript;
upstream landingpage {
server landingpage:3000;
}
upstream frontend {
server frontend:3001;
}
server {
listen 80;
server_name localhost;
location / {
proxy_pass http://landingpage;
}
}
server {
listen 80;
server_name demo.localhost;
location / {
proxy_pass http://frontend;
}
}
}
I can successfully run docker-compose up, but only opens the web app, while demo.localhost does not.
I've also changed the hosts file contents on my Mac so I have
127.0.0.1 localhost
127.0.0.1 demo.localhost
to no avail.
I am afraid I'm missing something as I'm no expert in web development nor docker or nginx!
For reference: we were able to run this remotely using AWS ligthsail, using the following settings
worker_processes 1;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
gzip on;
gzip_http_version 1.1;
gzip_comp_level 2;
gzip_types text/plain text/css
application/x-javascript text/xml
application/xml application/xml+rss
text/javascript;
upstream landingpage {
server landingpage:5000;
}
upstream frontend {
server frontend:5000;
}
server {
listen 80;
if ($http_x_forwarded_proto != 'https') {
return 301 https://$host$request_uri;
}
server_name domain.com www.domain.com;
location / {
proxy_pass http://landingpage;
}
}
server {
listen 80;
if ($http_x_forwarded_proto != 'https') {
return 301 https://$host$request_uri;
}
server_name demo.domain.com www.demo.domain.com;
location / {
add_header X-Robots-Tag "noindex, nofollow, nosnippet, noarchive, notranslate, noimageindex";
proxy_pass http://frontend;
}
}
}
with the following dockerfile for both react apps (basically exposing port 5000 for both services)
FROM node:latest
RUN mkdir -p /usr/src/app
WORKDIR /usr/src/app
COPY package.json /usr/src/app/
RUN npm install --verbose
COPY . /usr/src/app
RUN npm run build --production
RUN npm install -g serve
EXPOSE 5000
CMD serve -s build
Unfortunately I cannot provide more details on doing this on a local machine
This is working for me. The difference might be that I'm using a fake domain name, but I can't say for sure. I'm also using ssl, because I couldn't get Firefox to access the fake domain via http. I'm routing the subdomain to Couchdb. The webclient service is the parcel-bundler development server.
/etc/hosts
127.0.0.1 example.local
127.0.0.1 www.example.local
127.0.0.1 db.example.local
develop/docker-compose.yaml
version: '3.5'
services:
nginx:
build:
context: ../
dockerfile: develop/nginx/Dockerfile
ports:
- 443:443
couchdb:
image: couchdb:3
volumes:
- ./couchdb/etc:/opt/couchdb/etc/local.d
environment:
- COUCHDB_USER=admin
- COUCHDB_PASSWORD=password
webclient:
build:
context: ../
dockerfile: develop/web-client/Dockerfile
volumes:
- ../clients/web/src:/app/src
environment:
- CLIENT=web
- COUCHDB_URL=https://db.example.local
develop/nginx/Dockerfile
FROM nginx
COPY develop/nginx/conf.d/* /etc/nginx/conf.d/
COPY develop/nginx/ssl/certs/* /etc/ssl/example.local/
develop/nginx/conf.d/default.conf
server {
listen 443 ssl;
ssl_certificate /etc/ssl/example.local/server.crt;
ssl_certificate_key /etc/ssl/example.local/server.key.pem;
server_name example.local www.example.local;
location / {
proxy_pass http://webclient:1234;
}
}
server {
listen 443 ssl;
ssl_certificate /etc/ssl/example.local/server.crt;
ssl_certificate_key /etc/ssl/example.local/server.key.pem;
server_name db.example.local;
location / {
proxy_pass http://couchdb:5984/;
}
}
develop/web-client/Dockerfile
FROM node:12-alpine
WORKDIR /app
COPY clients/web/*.config.js ./
COPY clients/web/package*.json ./
RUN npm install
CMD ["npm", "start"]
Here is the blog that shows how to generate the self-signed certs.