I am using active storage in rails 6 with Docker and Nginx. I am uploading an image through the rails console, i.e.,
object.images.attach(io: File.open("#{Rails.root}/image_path"), filename: "image.jpg")
It is uploading successfully, but the file is not getting stored in the specified location i.e., Rails.root.join("storage"). And for that, I am not getting the image files.
I am not sure if the issue is with Docker or Nginx or ActiveStorage
Please help...
app.Dockerfile
FROM ruby:3.0.0
RUN apt-get update \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
COPY . /app
COPY ./entrypoint.sh /app
RUN chmod +x ./entrypoint.sh
ENTRYPOINT ["./entrypoint.sh"]
EXPOSE 3000
CMD ["bundle", "exec", "puma", "-C", "config/puma.rb", "-p", "3000"]
nginx.Dockerfile
FROM nginx
RUN apt-get update -qq && apt-get -y install apache2-utils
ENV RAILS_ROOT /app
WORKDIR $RAILS_ROOT
RUN mkdir log
COPY public public/
COPY ./nginx.conf /tmp/docker.nginx
RUN envsubst '$RAILS_ROOT' < /tmp/docker.nginx > /etc/nginx/conf.d/default.conf
EXPOSE 3000
CMD [ "nginx", "-g", "daemon off;" ]
nginx.conf
upstream app {
server 'app:3000';
}
server {
listen 3000;
server_name localhost;
keepalive_timeout 5;
root /app/public;
access_log /app/log/nginx.access.log;
error_log /app/log/nginx.error.log info;
if (-f $document_root/maintenance.html) {
rewrite ^(.*)$ /maintenance.html last;
break;
}
location / {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $host;
if (-f $request_filename) {
break;
}
if (-f $request_filename/index.html) {
rewrite (.*) $1/index.html break;
}
if (-f $request_filename.html) {
rewrite (.*) $1.html break;
}
if (!-f $request_filename) {
proxy_pass http://app;
break;
}
}
location ~ ^(?!/rails/).+\.(jpg|jpeg|gif|png|ico|json|txt|xml)$ {
gzip_static on;
expires max;
add_header Cache-Control public;
try_files $uri =404;
error_page 404 /404.html;
}
location = /500.html {
root /app/current/public;
}
}
docker-compose.yml
version: '3.9'
services:
db:
image: mariadb:10.3.29
restart: unless-stopped
volumes:
- .:/my_app
- db-volume:/var/lib/mysql
ports:
- '3306:3306'
environment:
MARIADB_DATABASE_NAME: database
MARIADB_ROOT_USERNAME: username
MARIADB_ROOT_PASSWORD: password
networks:
- network_name
app:
build:
context: .
dockerfile: app.Dockerfile
container_name: container_name
command: bash -c "bundle exec puma -C config/puma.rb -p 3000"
restart: unless-stopped
volumes:
- .:/my_app
- bundle-volume:/usr/local/bundle
depends_on:
- db
networks:
- network_name
nginx:
build:
context: .
dockerfile: nginx.Dockerfile
restart: unless-stopped
depends_on:
- app
ports:
- 3000:3000
networks:
- network_name
volumes:
db-volume:
bundle-volume:
networks:
network_name:
config/storage.yml
test:
service: Disk
root: <%= Rails.root.join("tmp/storage") %>
local:
service: Disk
root: <%= Rails.root.join("storage") %>
I believe there is a naming convention issue in your docker-compose.yml
services:
db:
image: mariadb:10.3.29
restart: unless-stopped
volumes:
- .:/my_app
- db-volume:/var/lib/mysql
In volumes, as you have given the folder name my_app so files are created for the service my_app service, not for app service.
So .:/my_app should be changed to your application service name i.e .:/app. Modify all the volumes and it should work.
Related
I couldn't find an answer to my question from other similar questions.
So, I have two docker containers:
Next.JS web-app
nginx reverse proxy
NextJS container without nginx reverse proxy worked as expected.
Even more, I can log in to the nginx container with docker exec -it nginx sh and with curl read whose static files on Next.JS container. I also see static files in the folder from a shared volume.
I run them with docker-compose:
volumes:
nextjs-build:
version: '3.9'
services:
nginx:
image: arm64v8/nginx:alpine
container_name: nginx
ports:
- "80:80"
- "443:443"
networks:
- blog
restart: unless-stopped
depends_on:
- website-front
volumes:
- type: volume
source: nextjs-build
target: /nextjs
read_only: true
- type: bind
source: /etc/ssl/private/blog-ssl
target: /etc/ssl/private/
read_only: true
- type: bind
source: ./nginx/includes
target: /etc/nginx/includes
read_only: true
- type: bind
source: ./nginx/conf.d
target: /etc/nginx/conf.d
read_only: true
- type: bind
source: ./nginx/dhparam.pem
target: /etc/nginx/dhparam.pem
read_only: true
- type: bind
source: ./nginx/nginx.conf
target: /etc/nginx/nginx.conf
read_only: true
website-front:
build: ./website
container_name: website-front
ports:
- "3000"
networks:
- blog
restart: unless-stopped
volumes:
- nextjs-build:/app/.next
networks:
blog:
external:
name: nat
my nginx configs:
upstream nextjs_upstream {
server website-front:3000;
}
server {
listen 443 http2 ssl;
listen [::]:443 http2 ssl;
server_name website_url;
ssl_certificate /etc/ssl/private/chain.crt;
ssl_certificate_key /etc/ssl/private/server.key;
ssl_trusted_certificate /etc/ssl/private/ca.ca-bundle;
# access_log /var/log/nginx/host.access.log main;
# security
include includes/security.conf;
include includes/general.conf;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
location /_next {
proxy_pass http://nextjs_upstream/_next/;
}
location / {
proxy_pass http://nextjs_upstream;
}
}
Tried multiple nginx configurations for static route:
localhost /_next {
root /nextjs;
}
NextJS dockerfile:
FROM node:alpine AS builder
# this ensures we fix simlinks for npx, Yarn, and PnPm
RUN apk add --no-cache libc6-compat
RUN corepack disable && corepack enable
WORKDIR /app
COPY ./ ./
RUN yarn install --frozen-lockfile
RUN yarn build
ENV NODE_ENV production
CMD chown -R node:node /app/.next
EXPOSE 3000
USER node
CMD [ "yarn", "start" ]
With that config I can see my website, but for static files I got 404 through upstream.
So, the problem was in a wrong path for mime.types file in nginx.conf and default location paths in includes/general.conf file.
After I changed it from: mime.types to /etc/nginx/mime.types it started working again.
I am trying to use nginx with docker-compose to route traffic for two different apps with different domain names. I want to be able to go to publisher.dev but I can only access that app from localhost:3000 (this is a react app) and I have another app which I want to access from widget.dev but I can only access from localhost:8080 (this is a Preact app). This is my folder structure and configs:
|-docker-compose.yml
|-nginx
|--default.conf
|--Dockerfile.dev
|-publisher
|--// react app
|--Dockerfile.dev
|-widget
|--// preact app (widget)
|--Dockerfile.dev
# default.conf
upstream publisher {
server localhost:3000;
}
upstream widget {
server localhost:8080;
}
server {
listen 80;
server_name publisher.dev;
location / {
proxy_pass http://publisher/;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $remote_addr;
}
}
server {
listen 80;
server_name widget.dev;
location / {
proxy_pass http://widget/;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $remote_addr;
}
}
nginx Dockerfile.dev
FROM nginx:stable-alpine
COPY ./default.conf /etc/nginx/conf.d/default.conf
publisher Dockerfile.dev (same as widget Dockerfile.dev)
# Specify the base image
FROM node:16-alpine
# Specify the working directory inside the container
WORKDIR /app
# copy the package json from your local hard drive to the container
COPY ./package.json ./
# install dependencies
RUN npm install
# copy files from local hard drive into container
# by copying the package.json and running npm install before copy files,
# this insures that a change to a file does not cause a re-run of npm-install
COPY ./ ./
# command to run when the container starts up
CMD ["npm", "run", "start"]
# build this docker container with:
# docker build -f Dockerfile.dev .
# run this container with:
# docker run <container id>
docker-compose.yml
version: '3'
services:
nginx:
build:
dockerfile: Dockerfile.dev
context: ./nginx
ports:
- 3050:80
restart: always
depends_on:
- publisher
- widget
publisher:
stdin_open: true
build:
dockerfile: Dockerfile.dev
context: ./publisher
volumes:
- /app/node_modules
- ./publisher:/app
ports:
- 3000:3000
environment:
VIRTUAL_HOST: publisher.dev
widget:
stdin_open: true
build:
dockerfile: Dockerfile.dev
context: ./widget
volumes:
- /app/node_modules
- ./widget:/app
ports:
- 8080:8080
environment:
VIRTUAL_HOST: widget.dev
hosts file
127.0.0.1 publisher.dev
127.0.0.1 widget.dev
why is your upstream trying to connect with
publisher and widget, shouldn't they connect to localhost:3000 and localhost:8080, let upstream server name be publisher and widget but connect them to localhost.
upstream publisher {
#server publisher:3000;
server localhost:3000;
}
I am trying to serve several Dash/Flask apps using docker-compose and nginx. Currently my set-up looks like this:
The Dash app is using host 0.0.0.0 and port 8050:
if __name__ == '__main__':
app.run_server(host='0.0.0.0',debug=True, port=8050)
In the Dockerfile of the app port 8050 is exposed:
FROM python:3.9
# Copy function code
COPY lp_scr_design_app.py /
COPY assets/ assets/
COPY data/ data/
# Install the function's dependencies using file requirements.txt
# from your project folder.
COPY requirements.txt ./
RUN pip install --trusted-host files.pythonhosted.org --trusted-host pypi.org --trusted-host pypi.python.org -r requirements.txt
EXPOSE 8050
# Start app
CMD ["python", "lp_scr_design_app.py"]
Then nginx is configured such that it passes this app through for the location /:
server {
listen 80;
server_name docker_flask_gunicorn_nginx;
location / {
proxy_pass http://lp_scr_design_app:8050;
# Do not change this
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
location /static {
rewrite ^/static(.*) /$1 break;
root /static;
}
}
with a Dockerfile like this:
FROM nginx:1.15.8
RUN rm /etc/nginx/nginx.conf
COPY nginx.conf /etc/nginx/
RUN rm /etc/nginx/conf.d/default.conf
COPY project.conf /etc/nginx/conf.d/
Finally in the docker-compose both apps are orchestrated like this:
version: '3'
services:
lp_scr_design_app:
container_name: lp_scr_design_app
restart: always
build: ./lp_scr_design_app
ports:
- "8050:8050"
command: gunicorn -w 1 -b :8050 app:server
nginx:
container_name: nginx
restart: always
build: ./nginx
ports:
- "80:80"
depends_on:
- lp_scr_design_app
I can now build and run docker-compose successfully without any issues. However if I try to open the root directory / in a browser I get (after a while) a 502 Bad Gateway from nginx.
Where did I go wrong with my set-up here?
I have a vue app running on the front-end with spring boot backend both on different containers.
I want to dockerize my vuejs app to pass environment variables from the docker-compose file to nginx.
My problem is that my nginx conf file is not picking up environment variables from docker-compose.
Docker Compose File
backend-service:
container_name: backend-service
image: backend-service-local
networks:
- app-network
ports:
- 8081:8080
restart: on-failure
depends_on:
postgresdb:
condition: service_healthy
vue-app:
container_name: vue-app
image: vue-app-local
networks:
- app-network
ports:
- 8080:80
environment:
VUE_APP_BASE_URL: http://backend-service:8080
restart: on-failure
depends_on:
backend-service:
condition: service_started
DOCKER FILE
# build stage
FROM node:lts-alpine as build-stage
WORKDIR /app
COPY package*.json ./
RUN npm install
COPY . .
RUN npm run build
# production stage
FROM nginx:stable-alpine as production-stage
COPY --from=build-stage /app/dist /usr/share/nginx/html
COPY nginx.conf /etc/nginx/nginx.conf
EXPOSE 80
CMD ["nginx", "-g", "daemon off;"]
NGINX CONF
# Run as a less privileged user for security reasons.
user nginx;
# #worker_threads to run;
# "auto" sets it to the #CPU_cores available in the system, and
# offers the best performance.
worker_processes auto;
events { worker_connections 1024; }
http {
sendfile on;
upstream docker-backend {
server ${VUE_APP_BASE_URL};
}
server {
# Hide nginx version information.
server_tokens off;
listen 80;
root /usr/share/nginx/html;
include /etc/nginx/mime.types;
location / {
try_files $uri $uri/ /index.html;
}
location /api/ {
proxy_pass http://docker-backend;
}
}
}
Please advise nginx docker image docs in the Using environment variables in nginx configuration section of the page.
The way the nginx docker image deals with environment variables is injecting them in runtime using the configs in the linked page
I have dockerizd django app with gunicorn and nginx. The app itself works at http://127.0.0.1:8000 but without static/media files, error:
172.24.0.1 - - [08/May/2019:13:25:50 +0000] "GET /static/js/master.js HTTP/1.1" 404 77 "http://127.0.0.1:8000/"
If I try to access files on port 80, they are served just fine.
Dockerfile:
FROM python:3.6-alpine
RUN apk --update add \
build-base \
postgresql \
postgresql-dev \
libpq \
# pillow dependencies
jpeg-dev \
zlib-dev
RUN mkdir /www
WORKDIR /www
COPY requirements.txt /www/
RUN pip install -r requirements.txt
ENV PYTHONUNBUFFERED 1
COPY . /www/
docker-compose.yml
version: "3"
services:
web:
build: .
restart: on-failure
volumes:
- .:/www
env_file:
- ./.env
command: >
sh -c "python manage.py collectstatic --noinput &&
gunicorn --bind 0.0.0.0:8000 portfolio.wsgi:application --access-logfile '-'"
expose:
- "8000"
ports:
- "8000:8000"
nginx:
image: "nginx"
restart: always
volumes:
- ./nginx/conf.d:/etc/nginx/conf.d
- ./static:/var/www/portfolio/static
- ./media:/var/www/portfolio/media
links:
- web
ports:
- "80:80"
nginx.conf
server {
listen 80;
server_name 127.0.0.1;
# serve static files
location /static/ {
root /var/www/portfolio;
}
# serve media files
location /media/ {
root /var/www/portfolio;
}
# pass requests for dynamic content to gunicorn
location / {
pproxy_pass http://web:8000;
proxy_set_header Host $server_name;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
}
What I want is static and media files to load with my web app on 127.0.0.1. It seems to me that there might be a problem with proxy_pass, but I can't figure it out.
Any ideas?
This seems to be the culprit: proxy_pass http://127.0.0.1:8000;
This line makes Nginx look for a service on port 8000 inside the Nginx container. localhost / 127.0.0.1 inside a container always means "the container itself" and not the Docker host.
You are running both services in the same Docker network, so this should work for you:
proxy_pass http://web:8000;
I see you are running two containers and nginx could not connect to python container as the ip address you gave is bound to inside the container. you might need to add extra_hosts: in docker-compose to nginx part at which it will be able to connect to other container.
If you set nice logging in nginx you will realize that it is not running on 127.0.0.1 since its a compose service. So you need to check out on which IP your compose network runs and that is where you find nginx.