Here is my docker-compose file
It does not even run wait_for_db command
I tried to put the commands in a bash script but it didn't work also
If somebody could help me writing these commands
I want to run manage.py commands and also
run celer and celery beat
version: "3.7"
services:
web:
build: .
command: >
sh -c "
python app/manage.py wait_for_db &&
python app/manage.py makemigrations &&
python app/manage.py makemigrations csvreader &&
python app/manage.py migrate &&
python app/manage.py wait_for_migrate &&
python app/manage.py create_admin --username admin --password admin --noinput --email admin#admin.com &&
python app/manage.py runserver 0.0.0.0:8000 &
celery -A app --workdir app worker --loglevel=info &
celery -A app --workdir app beat -l INFO --scheduler django_celery_beat.schedulers:DatabaseScheduler "
volumes:
- .:/djangoapp
ports:
- "8000:8000"
- "23:22"
depends_on:
- db
- broker
environment:
- DB_HOST=db
- DB_PORT=5432
- DB_NAME=mycsv
- DB_USER=postgres
- DB_PASSWORD=password
- CELERY_BROKER=amqp://admin:password#broker:5672//
restart: on-failure
db:
image: postgres:13.3-alpine
environment:
- POSTGRES_DB=mycsv
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=password
ports:
- "5432:5432"
broker:
image: rabbitmq
environment:
- RABBITMQ_DEFAULT_USER=admin
- RABBITMQ_DEFAULT_PASS=password
And here is my Dockerfile
FROM python:3.8
ENV PYTHONUNBUFFERED=1
WORKDIR /djangoapp
RUN apt-get update
RUN apt-get install -y python3-dev build-essential
COPY requirements.txt requirements.txt
RUN pip install -U pip setuptools wheel
RUN pip install -r requirements.txt
EXPOSE 8000
EXPOSE 22
COPY . /djangoapp
It seems that none of the commands are being run
Related
This error message appears to 3 images in composed docker container.
exec /usr/bin/entrypoint.sh: no such file or directory
All images related to Ruby execution of services
Sidekiq, Webpack runned by Ruby executable and Web(rails) services
I have tried change every execution to run loading de Gemfile environment using bundle exec, but nothing worked.
Dockerfile
FROM ruby:2.6.6
RUN apt-get update -qq \
&& apt-get install -y curl build-essential libpq-dev postgresql \
nodejs postgresql-client &&\
curl -sL https://deb.nodesource.com/setup_14.x | bash - && \
curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - && \
echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list && \
apt-get update && apt-get install -y nodejs yarn
ADD . /app
WORKDIR /app
RUN gem install bundler:2.3.22
RUN bundle install
RUN yarn install --check-files
RUN gem install foreman
COPY entrypoint.sh /usr/bin/
RUN chmod +x /usr/bin/entrypoint.sh
ENTRYPOINT ["entrypoint.sh"]
EXPOSE 80
CMD ["bash"]
docker-compose.yml
version: '3.3'
services:
db:
image: postgres
ports:
- 5423:5432
volumes:
- ./tmp/db:/var/lib/postgresql/data
environment:
POSTGRES_PASSWORD: *****
redis:
image: redis
ports:
- "6379:6379"
volumes:
- 'redis:/data'
depends_on:
- db
webpack:
build: .
command: sh -c 'rm -rf public/packs/* || true && bin/webpack-dev-server --host 0.0.0.0 --port 3035 -w'
volumes:
- .:/app
- /app/node_modules
ports:
- "3035:3035"
depends_on:
- db
web:
build: .
command: bash -c "rm -f tmp/pids/server.pid && rails s -b 0.0.0.0 -p 80"
volumes:
- .:/app
ports:
- "80:80"
depends_on:
- db
- redis
- webpack
- chrome
env_file: .env_docker
environment:
RAILS_ENV: development
RAILS_MAX_THREADS: 5
sidekiq:
build: .
command: bundle exec sidekiq -C config/sidekiq.yml
volumes:
- .:/app
depends_on:
- db
- redis
env_file: .env_docker
environment:
RAILS_MAX_THREADS: 5
chrome:
image: selenium/standalone-chrome
ports:
- "4444:4444"
volumes:
- /dev/shm:/dev/shm
depends_on:
- db
- redis
- webpack
- sidekiq
volumes:
redis:
postgres:
Equal to entrypoint.sh exec: #: not found but not resolved
I really want to change my Debian development OS to Windows and work only with containers, not looking to Linux or WSL alternatives
I have a Django API that is completely dockerized and it works locally as well as in my Heroku deployment for production. However when I try to connect the Git repo to Portainer, it is able to successfully pull it but it doesn't publish all the images. It is only able to give the port for the pgadmin image, not for the database or the redis image or the nginx or the django web service itself. These are things I need to get the whole thing working. I'm not sure what's wrong or what to do about it.
This is my docker-compose.yml file:-
version: "3.9"
services:
nginx:
build: ./nginx
ports:
- 8001:80
volumes:
- static-data:/vol/static
depends_on:
- web
restart: "on-failure"
redis:
image: redis:latest
ports:
- 6379:6379
volumes:
- ./config/redis.conf:/redis.conf
command: ["redis-server", "/redis.conf"]
restart: "on-failure"
db:
image: postgres:13
volumes:
- ./data/db:/var/lib/postgresql/data
env_file:
- database.env
restart: always
web:
build: .
command: bash -c "python manage.py makemigrations && python manage.py migrate && python manage.py runserver 0.0.0.0:8101"
container_name: vidhya_io_api
volumes:
- .:/shuddhi
ports:
- 8101:8101
depends_on:
- db
- redis
restart: "on-failure"
volumes:
database-data: # named volumes can be managed easier using docker-compose
static-data:
This is the Dockerfile:-
FROM python:3.8.3
LABEL maintainer="https://github.com/ryarasi"
# ENV MICRO_SERVICE=/app
# RUN addgroup -S $APP_USER && adduser -S $APP_USER -G $APP_USER
# set work directory
# set environment variables
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
COPY ./requirements.txt /requirements.txt
# create root directory for our project in the container
RUN mkdir /shuddhi
# COPY ./scripts /scripts
WORKDIR /shuddhi
# Copy the current directory contents into the container at /shuddhi
ADD . /shuddhi/
# Install any needed packages specified in requirements.txt
# This is to create the collectstatic folder for whitenoise
RUN pip install --upgrade pip && \
pip install --no-cache-dir -r /requirements.txt && \
mkdir -p /vol/web/static && \
mkdir -p /vol/web/media
# ENV PATH="/scripts:$PATH"
# CMD ["run.sh"]
CMD python manage.py wait_for_db && python manage.py collectstatic --noinput && python manage.py migrate && gunicorn shuddhi.wsgi:application --bind 0.0.0.0:8101
After I set up the stack and run it, this is what I see:-
You can see that the published ports are missing for all the images other than the Redis image.
I have no idea why this is happening.
What should I do to get it all published and working?
I have a development environment where I run npm run serve in my local terminal and then docker-compose up -d in a different terminal to run the services I need to start my system.
I have an instance where I am attempting to run front-end tests, which I run inside of a running container using nightwatchJS, and for some reason the test runner is not accessing the files loaded from npm run serve. Quite literally when I print out a screenshot using the test runner the page looks as if I have canceled running npm run serve, however when I go to the page 127.0.0.1 in my browser, everything is loading as usual.
I think my issue is that the test is being run inside of a docker container like so:
docker-compose exec web bash -c "npx nightwatch ...file"
where that specific instance is not running npm run serve but I am confused as to why it works when I hit the browser personally. I have tried exposing ports in the Dockerfile but that does not work.
Can anybody point me in the right direction?
Here is my Dockerfile:
FROM python:3.8.5-slim-buster
# the first 2 prevent Python from writing out pyc files or from buffering stdin/stdout
# the others are Node
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
ENV NVM_DIR /usr/local/nvm
ENV NODE_VERSION 12.7.0
ENV NODE_PATH $NVM_DIR/v$NODE_VERSION/lib/node_modules
ENV PATH $NVM_DIR/versions/node/v$NODE_VERSION/bin:$PATH
# the man1 directory is not present for slim-buster so we add that and then install all of the default system based dependencies
# NOTE...TOP LAYERS ARE CACHED FIRST!!!!
RUN mkdir -p /usr/share/man/man1 \
&& apt-get clean && apt-get update -y && apt-get install pdftk-java curl git -y \
&& curl --silent -o- https://raw.githubusercontent.com/creationix/nvm/v0.31.2/install.sh | bash \
&& apt-get install zlib1g-dev libjpeg-dev python3-pythonmagick inkscape xvfb poppler-utils libfile-mimeinfo-perl qpdf libimage-exiftool-perl ufraw-batch ffmpeg gcc procps -y \
&& apt-get clean && apt-get autoclean
# SELINUM
# get wget...
# Adding trusting keys to apt for repositories
RUN apt-get install gnupg -y && apt-get install wget -y \
&& wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - \
&& sh -c 'echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list' \
&& apt-get update -y \
&& apt-get install google-chrome-stable -y \
&& apt-get install unzip -yqq
# Set up Chromedriver Env Vars
ENV CHROMEDRIVER_VERSION 87.0.4280.20
ENV CHROMEDRIVER_DIR /chromedriver
# make directory for it...
RUN mkdir $CHROMEDRIVER_DIR
# Download and install Chromedriver
RUN wget -q --continue -P $CHROMEDRIVER_DIR "http://chromedriver.storage.googleapis.com/$CHROMEDRIVER_VERSION/chromedriver_linux64.zip" \
&& unzip $CHROMEDRIVER_DIR/chromedriver* -d $CHROMEDRIVER_DIR \
&& rm "$CHROMEDRIVER_DIR/chromedriver_linux64.zip"
# Put Chromedriver into the PATH
ENV PATH $CHROMEDRIVER_DIR:$PATH
# Set display port as an environment variable
ENV DISPLAY=:99
# SELINUM
## NIGHTMARE
#RUN apt-get install wget -y && wget http://selenium-release.storage.googleapis.com/2.44/selenium-server-standalone-2.44.0.jar -P /bin/
#RUN apt install default-jre -y
#RUN apt-get install -y xvfb x11-xkb-utils xfonts-100dpi xfonts-75dpi xfonts-scalable xfonts-cyrillic x11-apps clang libdbus-1-dev libgtk2.0-dev libnotify-dev libgconf2-dev libasound2-dev libcap-dev libcups2-dev libxtst-dev libxss1 libnss3-dev gcc-multilib g++-multilib
# ensure node is installed, and at the end, make the working directory
RUN . $NVM_DIR/nvm.sh \
&& nvm install $NODE_VERSION \
&& nvm alias default $NODE_VERSION \
&& nvm use default \
&& mkdir /code
# set working directory to /code...it was just made for this purpose
WORKDIR /code
# possible that these will cache so separate them from COPY . /code/
COPY requirements.txt /code/
# now install, this will normally also cache
RUN pip install --upgrade pip
RUN pip install -r requirements.txt
# place this at the end because the code will always change...this will almost never cache...
COPY . /code/
EXPOSE 8001
EXPOSE 8888
Here is my compose file:
version: '3.4'
services:
redis:
image: redis
ports:
- "6379"
restart: unless-stopped
networks:
main:
aliases:
- redis
postgres:
image: postgres:12
ports:
- "5432:5432"
env_file: ./.env
restart: unless-stopped
volumes:
- pgdata:/var/lib/postgresql/data
networks:
main:
aliases:
- postgres
#access by going to localhost:16543
#when adding a server to the serve list
#the hostname is postgres
#the username is postgres
#the password is postgres
pgadmin:
image: dpage/pgadmin4
links:
- postgres
depends_on:
- postgres
env_file: ./.env
restart: unless-stopped
ports:
- "16543:80"
networks:
main:
aliases:
- pgadmin
celery:
build:
network: host
context: .
dockerfile: Dockerfile-dev # use docker-dev because production npm installs and npm builds
command: python manage.py celery
env_file: ./.env
restart: unless-stopped
volumes:
- .:/code
- tmp:/tmp
links:
- redis
depends_on:
- redis
networks:
main:
aliases:
- celery
web:
build:
network: host
context: .
dockerfile: Dockerfile-dev
command: python manage.py runserver 0.0.0.0:8000
volumes:
- .:/code
- tmp:/tmp
ports:
- "8000:8000"
env_file: ./.env
restart: unless-stopped
links:
- postgres
- redis
- celery
- pgadmin
depends_on:
- postgres
- redis
- celery
- pgadmin
networks:
main:
aliases:
- web
volumes:
pgdata:
tmp:
networks:
main:
I am using docker and pipenv for my virtual environment and I am getting the following error when I run docker-compose up:
ModuleNotFoundError: No module named 'rest_auth'
I tried pip install django-rest-auth and pipenv install django-rest-auth and also added the following to my INSTALLED_APPS
# Django REST Framework Apps
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
# Other Package Apps
"storages",
# Django REST Framework Apps
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
# Internal Apps
"authentication",
]
Expected to run docker container and access backend on localhost:8000
Actual: docker-compose up > ModuleNotFoundError: No module named 'rest_auth'
Dockerfile:
FROM python:3.7
ENV PYTHONUNBUFFERED 1
RUN apt-get update -y && \
apt-get install -y postgresql postgresql-contrib && \
apt-get clean
RUN mkdir /code
WORKDIR /code
ADD . /code/
RUN pip install pipenv
RUN pipenv install --system
ENTRYPOINT ["./docker-entrypoint.sh"]
docker-entrypoint.sh:
#!/bin/bash
case "$1" in
web_app)
until psql postgres://postgres:$POSTGRES_PASSWORD#db -c '\q'; do
>&2 echo "Postgres is unavailable - sleeping"
sleep 1
done
>&2 echo "Postgres is up!"
case "$2" in
migrate)
python manage.py migrate
;;
static)
python manage.py collectstatic --clear --noinput
python manage.py collectstatic --noinput
;;
migrate_and_static)
python manage.py migrate
python manage.py collectstatic --clear --noinput
python manage.py collectstatic --noinput
;;
esac
case "$3" in
prod)
echo "Starting Gunicorn."
exec gunicorn service_health.wsgi:application \
--bind 0.0.0.0:8000 \
--workers 3 \
--access-logfile '-'
;;
local)
pipenv install --system
echo "Starting local server"
python manage.py runserver 0.0.0.0:8000
;;
esac
;;
esac
docker-compose.yml:
version: '3'
services:
postgres:
image: postgres:11.1
environment:
- POSTGRES_PASSWORD=password
ports:
- "5432:5432"
api:
build:
context: backend
environment:
- POSTGRES_PASSWORD=password
volumes:
- $PWD/backend:/code
ports:
- 8000:8000
links:
- postgres:db
command: web_app migrate local
frontend:
build:
context: frontend
volumes:
- $PWD/frontend:/code
environment:
- NODE_ENV=development
ports:
- 3000:3000
I had same error and did like this:
First I did:
pip uninstall django-rest_auth
then:
pip3 install django-rest_auth.
Hope this works.
I am trying to setup Docker and geodjagno. Upon docker-compose up I have this following error:
django.core.exceptions.ImproperlyConfigured: Could not find the GDAL library (tried "gdal", "GDAL", "gdal2.2.0", "gdal2.1.0", "gdal2.0.0", "gdal1.11.0", "gdal1.10.0", "gdal1.9.0"). Is GDAL installed? If it is, try setting GDAL_LIBRARY_PATH in your settings.
GDAL is a library that can be found in this image wooyek/geodjango
Dockerfile
FROM wooyek/geodjango
ENV PYTHONUNBUFFERED 1
RUN mkdir /code
WORKDIR /code
ADD requirements.txt /code/
RUN pip install -r requirements.txt
ADD . /code/
docker-compose
services:
web:
build: .
container_name: web
command: python3 manage.py runserver 0.0.0.0:8000
volumes:
- .:/code
ports:
- "8000:8000"
depends_on:
- db
db:
image: mdillon/postgis
#command: -e POSTGRES_USER=johndoe -e POSTGRES_PASSWORD=myfakedata -e POSTGRES_DB=myfakedata library/postgres
environment:
- POSTGRES_USER=johndoe
- POSTGRES_PASSWORD=myfakedata
- POSTGRES_DB=myfakedata
ports:
- "5435:5432"
adminer:
image: adminer
restart: always
ports:
- 8080:8080
Try adding the following in your Dockerfile:
RUN apt-get update &&\
apt-get install -y binutils libproj-dev gdal-bin
You can add the following to your docker file
# Install GDAL dependencies
RUN apt-get install -y libgdal-dev g++ --no-install-recommends && \
apt-get clean -y
# Update C env vars so compiler can find gdal
ENV CPLUS_INCLUDE_PATH=/usr/include/gdal
ENV C_INCLUDE_PATH=/usr/include/gdal