Following is my Docker Compose file & NGINX conf file.
The application seems to work and NGINX is also up, but the proxy_pass setting doesn't seem to work properly.
File
docker-compose.yaml
networks:
webapp:
services:
web:
image: nginx
volumes:
- ./data/ntemplates:/etc/nginx/templates
- ./webapp.conf:/etc/nginx/conf.d/webapp.conf
ports:
- "8080:80"
networks:
- webapp
pyweb:
build: .
ports:
- "5000:5000"
networks:
- webapp
redis:
image: "redis:alpine"
networks:
- webapp
File webapp.conf
server {
listen 80;
listen [::]:80;
server_name localhost;
location / {
proxy_pass "http://pyweb_1:5000/";
}
#error_page 404 /404.html;
error_page 500 502 503 504 /50x.html;
}
Service pyweb is working if properly if directly accessed by http://pyweb_1:5000
I created this app based on docker getting started page
For completeness below are other files and seems to be working just fine.
File Dockerfile
FROM python:3.7-alpine
WORKDIR /code
ENV FLASK_APP=app.py
ENV FLASK_RUN_HOST=0.0.0.0
RUN apk add --no-cache gcc musl-dev linux-headers
COPY requirements.txt requirements.txt
RUN pip install -r requirements.txt
EXPOSE 5000
COPY . .
CMD ["flask", "run"]FROM python:3.7-alpine
WORKDIR /code
ENV FLASK_APP=app.py
ENV FLASK_RUN_HOST=0.0.0.0
RUN apk add --no-cache gcc musl-dev linux-headers
COPY requirements.txt requirements.txt
RUN pip install -r requirements.txt
EXPOSE 5000
COPY . .
CMD ["flask", "run"]
File requirement.txt
flask
redis
File app.py
import time
import redis
from flask import Flask
app = Flask(__name__)
cache = redis.Redis(host='redis', port=6379)
def get_hit_count():
retries = 5
while True:
try:
return cache.incr('hits')
except redis.exceptions.ConnectionError as exc:
if retries == 0:
raise exc
retries -= 1
time.sleep(0.5)
#app.route('/')
def hello():
count = get_hit_count()
return 'Hello World! I have been seen {} times.\n'.format(count)
EDIT:
You're currently not using the nginx configuration. I didn't read carefully your docker-compose file. You can fix it by mapping the webapp.conf on /etc/nginx/conf.d/default.conf. e.g.
services:
web:
image: nginx
volumes:
- ./data/ntemplates:/etc/nginx/templates
- ./webapp.conf:/etc/nginx/conf.d/default.conf
ports:
- "8080:80"
depends_on:
- pyweb
networks:
- webapp
There are 2 issues:
you don't know what container name will be used by docker-compose
you don't know the order used to start the containers
docker-compose allows you to solve the first issue in 2 ways:
define a container_name subsection
use the service name
This means that you can simply use proxy_pass "http://pyweb:5000/"; in your nginx setup
The second issue can be fixed by adding a depends_on subsection in the nginx service. e.g.
services:
web:
image: nginx
volumes:
- ./data/ntemplates:/etc/nginx/templates
- ./webapp.conf:/etc/nginx/conf.d/webapp.conf
depends_on:
- pyweb
ports:
- "8080:80"
networks:
- webapp
Nevertheless, the depends_on might not be enough since it does not check the service status but it only make sure that the docker service is started (as stated in the documentation).
You'll need to find another way to monitor if the service is actually started.
Related
So I have a basic frontend and backend. The backend relies on some environment variables and this is my docker-compose.yml.
version: "3.9"
services:
backend:
env_file:
- .env
build:
context: ./backend
container_name: fastapi-api
ports:
- 80:80
frontend:
build:
context: ./frontend
container_name: vue-ui
ports:
- 8080:8080
links:
- backend
This gives me ERR_EMPTY_RESPONSE when I go to http://127.0.0.1:8080/, however when I ran the individual Dockerfiles for my frontend and backend, this goes smoothly
My frontend
FROM node:lts-alpine
# install simple http server for serving static content
RUN npm install -g http-server
# make the 'frontend' folder the current working directory
WORKDIR /frontend
# copy both 'package.json' and 'package-lock.json' (if available)
COPY package*.json ./
# install project dependencies
RUN npm install
# copy project files and folders to the current working directory (i.e. 'app' folder)
COPY . .
# build app for production with minification
RUN npm run build
EXPOSE 8080
CMD [ "http-server", "dist" ]
My backend
FROM tiangolo/uvicorn-gunicorn:python3.8
LABEL maintainer="Sebastian Ramirez <tiangolo#gmail.com>"
WORKDIR /backend
COPY requirements.txt requirements.txt
RUN pip3 install -r requirements.txt
COPY . .
EXPOSE 80
This is what I see from running docker ps
This is what's happening, frontend requests are being sent to the wrong place
I want it to go here
So requests should go to port 80 not port 8000
This is what I see from dev tools
However this is my code
axios
.post(`http://127.0.0.1:80/city/`, {
city_name: this.current_city
})
Where are the extra 0s coming from?
This is what happens when I ran the two containers separately
By looking at the docker ps output I would guess that you have by accident switched ports for backend and frontend in configuration. Frontend has unmapped port 80 and backend has unmapped port 8080.
Try this one:
version: "3.9"
services:
backend:
env_file:
- .env
build:
context: ./backend
container_name: fastapi-api
ports:
- 8080:8080
frontend:
build:
context: ./frontend
container_name: vue-ui
ports:
- 80:80
links:
- backend
I have built a docker-compose file for my node js application that has been dockerized, But I don't know how to make the API call to that node js app which is running as a docker container, Please help me with this concern.
My DockerFile:
FROM node:10.15-slim
ENV NODE_ENV=production
WORKDIR /app
COPY package.json package-lock*.json ./
RUN npm install && npm cache clean --force
COPY . .
CMD ["node", "./bin/www"]
My Docker-compose file:
version: '2.4'
services:
express:
build:
context: .
dockerfile: Dockerfile
command: /app/node_modules/.bin/nodemon ./bin/www
ports:
- 3000:3000
volumes:
- .:/app
environment:
- DEBUG=sample-express:*
- NODE_ENV=development
You'll need to expose the port from docker on which your application is running.
Let's say your application is running on port 8080 inside docker, here's how you can expose that specific port:
EXPOSE 8080
Then you'll need to map the port exposed by docker tthato your local port. Here's how you can do it in docker:
docker run -p 49160:8080 -d docker_image
And if you're working with docker-compose, you'll do it like this:
version: '3'
services:
nodejs:
build:
context: .
dockerfile: Dockerfile
image: nodejs
container_name: nodejs
ports:
- "8080:8080"
UPDATE
Let's say you want to send /api requests to back-end server. This is how you'll do it in nginx conf:
server {
listen 80
location /api {
proxy_pass http://backend:8080/;
}
}
I hope it helps.
I have an application with 4 services. One of them is Nginx which will act as a proxy.
I use docker compose to run the services. In nginx when I specify a path and where to proxy I want to be able to use the service name. This is what I have done so far.
version: '3'
services:
go_app:
image: go_app
depends_on:
- mysql
ports:
- "8000:8000"
mysql:
image: mysql_db
ports:
- "3306:3306"
flask_app:
image: flask_app
ports:
- "8080:8080"
nginx:
image: nginx_app
ports:
- "80:80"
depends_on:
- mysql
- flask_app
- go_app
With the above I create all services. They all work on their respective ports. I want Nginx to listen on port 80 and proxy as defined in the config:
server {
listen 0.0.0.0:80;
server_name localhost;
location / {
proxy_pass http://${FLASK_APP}:8080/;
}
}
You may ask where does FLASK_APP come from. I specify it inside nginx docker image:
FROM nginx
ENV FLASK_APP=flask_app
RUN rm /etc/nginx/conf.d/default.conf
COPY config/default.conf /etc/nginx/conf.d/default.conf
EXPOSE 80
CMD ["nginx", "-g", "daemon off;"]
Nginx container keeps failing with the following error:
[emerg] 1#1: unknown "flask_app" variable
nginx: [emerg] unknown "flask_app" variable
The way I understand docker compose, flask_app should resolve as the flask_app service.
What am I doing wrong/misunderstanding?
The issue is that nginx does not read ENV variables .
see https://github.com/docker-library/docs/tree/master/nginx#using-environment-variables-in-nginx-configuration
a solution :
you can modify you dockerfile for nginx with this
FROM nginx
ENV FLASK_APP=flask_app
RUN rm /etc/nginx/conf.d/default.conf
COPY default.conf /etc/nginx/conf.d/default.template
EXPOSE 80
CMD ["/bin/bash","-c","envsubst < /etc/nginx/conf.d/default.template > /etc/nginx/conf.d/default.conf && exec nginx -g 'daemon off;'"]
the COPY command is modified to copy you configuration as a template.
the last line is modified to do a substitution using your ENV variables.
i'm trying to make a reverse proxy and dockerize it for my flask application with nginx, gunicorn, docker and docker-compose . Before that the nginx part was in the same container than the web app, i'm trying to separe it.
My docker_compose yaml file is :
version: '3.6'
services:
nginx:
restart: always
build: ./nginx/
ports:
- 8008:8008
networks:
- web_net
flask_min:
build: .
image: flask_min
container_name: flask_min
expose:
- "8008"
networks:
- web_net
depends_on:
- nginx
networks:
web_net:
driver: bridge
My dockerfile is :
FROM python:3.6
MAINTAINER aurelien beliard (email#domain.com)
RUN apt update
COPY . /usr/flask_min
WORKDIR /usr/flask_min
RUN useradd -r -u 20979 -ms /bin/bash aurelien.beliard
RUN pip3 install -r requirements.txt
CMD gunicorn -w 3 -b :8008 app:app
my nginx docker file is
FROM nginx
COPY ./flask_min /etc/nginx/sites-available/
RUN mkdir /etc/nginx/sites-enabled
RUN ln -s /etc/nginx/sites-available/flask_min /etc/nginx/sites-enabled/flask_min
my nginx config file in /etc/nginx sites-available and sites-enabled is named flask-min :
server {
listen 8008;
server_name http://192.168.16.241/ ;
charset utf-8;
location / {
proxy_pass http://flask_min:8008;
} }
the requirements.txt file is :
Flask==0.12.2
grequests==0.3.0
gunicorn==19.7.1
Jinja2==2.10
The 2 containers are well created, gunicorn start well but i can't access to the application and there is nothing in the nginx access and error log .
If you have any idea it will be very appreciated.
ps sorry for the fault english is not my native language.
As mentioned in Maxm's answer, flask is depending on nginx to startup first. One way to fix it is to reverse the dependency order, but I think there's a more clever solution that doesn't require the dependency.
Nginx tries to do some optimization by caching the dns results of proxy_pass, but you can make it more flexible by setting it to a variable. This allows you to freely restart flask without having to also restart nginx.
Here's an example:
resolver 127.0.0.11 ipv6=off;
set $upstream http://flask_min:8008;
proxy_pass $upstream;
server_name should just be the host. try localhost or just _.
you can also do multiple hosts: server_name 192.168.16.241 localhost;
The depends_on should be on nginx not flask_min. Remove it from flask and add:
depends_on:
- flask_min
To nginx.
See if that works, let me know if you run into any more snags.
I am trying to use jwilder/nginx-proxy as a reverse proxy for my angular2 app that is broken down into 3 containers (angular, express and database).
I have tried different configurations to proxy requests to my app on port 80, however when I try to run docker-compose I get :
ERROR: for angular Cannot start service angular: driver failed programming
external connectivity on endpoint example_angular_1
(335ce6d0c775b7837eb436fff97bbb56bfdcaece22d51049e1eb4bf5ce45553c): Bind for
0.0.0.0:80 failed: port is already allocated
While the message is pretty clear that there is a conflict on port 80, I cannot figure out a way to go around it, it works just fine when I set my angular container to work on port 4200 but then I have to specify the port number in url every time I want to visit the page. I am using the reverse proxy because it is not the only app that will be running in my environment
Below is my docker-compose.yml
version: '3'
services:
nginx-proxy:
image: jwilder/nginx-proxy
container_name: nginx-proxy
ports: - "80:80"
volumes: - /var/run/docker.sock:/tmp/docker.sock:ro
angular:
build: client
ports: - "80"
environment:
- VIRTUAL_HOST=example.com
- VIRTUAL_PORT=80
restart: always
express:
build: server
ports: - "3000:3000"
links: - database
restart: always
database:
image: mongo
ports: - "27017:27017"
restart: always
networks:
default:
external:
name: nginx-proxy
And Dockerfile for the angular container
FROM node:8-alpine as builder
COPY package.json package-lock.json ./
RUN npm set progress=false && npm config set depth 0 && npm cache clean --force
RUN npm i && mkdir /ng-app && cp -R ./node_modules ./ng-app
WORKDIR /ng-app
COPY . .
RUN $(npm bin)/ng build --prod --build-optimizer
FROM nginx:1.13.3-alpine
COPY nginx/default.conf /etc/nginx/conf.d/
RUN rm -rf /usr/share/nginx/html/*
COPY --from=builder /ng-app/dist /usr/share/nginx/html
CMD ["nginx", "-g", "daemon off;"]
EXPOSE 80
The problems is that you're trying to open the port 80 on the host twice. Once for the nginx-proxy and once for angular. Remove the "ports 80" from angular.
The browser will speak to the container on the virtual_port that you set.
Maybe you can direct the the request to the backend through an api endpoint
If you want to use nginx as a reverse proxy, you need to access to it using the 80 port. Then modify the nginx config to redirect to your angular container and port (81 for example). Try this: "proxy_pass http://angular:81". This should work.