I have set up the docker registry at DigitalOcean to deploy my django application.
I have installed the doctl and setup the API credentials.
If I run the command as:
doctl auth list
i can see my auth as the current one, which has the Read and Write permission.
But now, when I try to run the command as:
docker-compose -f docker-compose.staging.yml push
I get the error as:
ERROR: error parsing HTTP 400 response body: unexpected end of JSON
input: ""
Here's my docker-compose.staging.yml file:
version: '3.8'
services:
web:
build:
context: ./app
dockerfile: Dockerfile.prod
image: registry.digitalocean.com/pythonist-do:web
command: gunicorn Pythonist.wsgi:application --bind 0.0.0.0:8000
volumes:
- static_volume:/home/app/web/staticfiles
- media_volume:/home/app/web/mediafiles
expose:
- 8000
env_file:
- ./.env.staging
nginx-proxy:
container_name: nginx-proxy
build: nginx
image: registry.digitalocean.com/pythonist-do:nginx-proxy
restart: always
ports:
- 443:443
- 80:80
volumes:
- static_volume:/home/app/web/staticfiles
- media_volume:/home/app/web/mediafiles
- certs:/etc/nginx/certs
- html:/usr/share/nginx/html
- vhost:/etc/nginx/vhost.d
- /var/run/docker.sock:/tmp/docker.sock:ro
depends_on:
- web
nginx-proxy-letsencrypt:
image: jrcs/letsencrypt-nginx-proxy-companion
env_file:
- ./.env.staging.proxy-companion
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- certs:/etc/nginx/certs
- html:/usr/share/nginx/html
- vhost:/etc/nginx/vhost.d
- acme:/etc/acme.sh
depends_on:
- nginx-proxy
volumes:
static_volume:
media_volume:
certs:
html:
vhost:
acme:
You need to change the image tag
Instead of
registry.digitalocean.com/registry-name:tag
use this
registry.digitalocean.com/registry-name/image-name:tag
Related
I have docker-compose.yml file which contains frontend,backend,testing,postgres and pgadmin container. The containers except testing are able to communicate each other. But the testing container fails to communicate with backend and frontend container in docker-compose.
version: '3.7'
services:
frontend:
container_name: test-frontend
build:
context: ./frontend
dockerfile: Dockerfile.local
ports:
- '3000:3000'
networks:
- test-network
environment:
# For the frontend can be applied only during the build!
# (while it's applied when TS is compiled)
# You have to build manually without cache if one of those are changed at least for the prod mode.
- REACT_APP_BACKEND_API=http://localhost:8000/api/v1
- REACT_APP_GOOGLE_CLIENT_ID=1234567dfghjjnfd
- CI=true
- CHOKIDAR_USEPOLLING=true
postgres:
image: postgres
environment:
POSTGRES_USER: dev
POSTGRES_PASSWORD: dev
PGDATA: /data/postgres
volumes:
- postgres:/data/postgres
ports:
- "5432:5432"
networks:
- test-network
restart: unless-stopped
pgadmin:
image: dpage/pgadmin4
environment:
PGADMIN_DEFAULT_EMAIL: "dev#dev.com"
PGADMIN_DEFAULT_PASSWORD: dev
volumes:
- pgadmin:/root/.pgadmin
- ./pgadmin-config/servers.json:/pgadmin4/servers.json
ports:
- "5050:80"
networks:
- test-network
restart: unless-stopped
backend:
container_name: test-backend
build:
context: ./backend
dockerfile: Dockerfile.local
ports:
- '8000:80'
volumes:
- ./backend:/app
command: >
bash -c "alembic upgrade head
&& exec /start-reload.sh"
networks:
- test-network
depends_on:
- postgres
environment:
- GOOGLE_APPLICATION_CREDENTIALS=/app/.secret/secret.json
- APP_DB_CONNECTION_STRING=postgresql+psycopg2://dev:dev#postgres:5432/postgres
- LOG_LEVEL=debug
- SQLALCHEMY_ECHO=True
- AUTH_ENABLED=True
- CORS=*
- GCP_ALLOWED_DOMAINS=*
testing:
container_name: test-testing
build:
context: ./testing
dockerfile: Dockerfile
volumes:
- ./testing:/isp-app
command: >
bash -c "/wait
&& robot ."
networks:
- test-network
depends_on:
- backend
- frontend
environment:
- WAIT_HOSTS= frontend:3000, backend:8000
- WAIT_TIMEOUT= 3000
- WAIT_SLEEP_INTERVAL=300
- WAIT_HOST_CONNECT_TIMEOUT=300
volumes:
postgres:
pgadmin:
networks:
test-network:
driver: bridge
All the containers are mapped to test-network. When the testing container tried to connect to frontend:3000 or backend:8000, it throws "Host [ backend:8000] not yet available"
How to fix it?
This is how my docker-compose file looks
version: '3.3'
services:
frontend:
ports:
- 8000:8000
build: ./frontend
api:
build: ./api
ports:
- 8080:8080
reverse-proxy:
image: nginx:1.21
depends_on:
- api
- frontend
volumes:
- ./nginx-conf:/etc/nginx/conf.d
ports:
- 80:80
- 443:443
i if I run docker-compose build on the above file I am getting the following error
ERROR: compose.cli.main.main: yaml.scanner.ScannerError: while scanning for the next token
found character '\t' that cannot start any token
in "./docker-compose.yaml", line 3, column 1
I have tried changing the version as well as correcting the spaces/tabs but nothing works
The indentation of services and version should be at the same level, so your docker-compose file should look like this:
version: '3.3'
services:
frontend:
ports:
- 8000:8000
build: ./frontend
api:
build: ./api
ports:
- 8080:8080
reverse-proxy:
image: nginx:1.21
depends_on:
- api
- frontend
volumes:
- ./nginx-conf:/etc/nginx/conf.d
ports:
- 80:80
- 443:443
My docker-compose.yml looks like the below. When i run docker-compose up I get the below error.
ERROR: In file './docker-compose.yml', the service name True must be a quoted string, i.e. 'True'.
version: '3'
services:
db:
restart: always
image: postgres:9.6-alpine
container_name: pleroma_postgres
networks:
- pleroma
volumes:
- ./postgres:/var/lib/postgresql/data
web:
build: .
image: pleroma
container_name: pleroma_web
restart: always
environment:
- VIRTUAL_HOST=<myplaceholderhost>
- VIRTUAL_PORT=4000
- LETSENCRYPT_HOST=<myplaceholderhost>
- LETENCRYPT_EMAIL=<myplaceholderemail>
expose:
- "4000"
volumes:
- ./uploads:/pleroma/uploads
depends_on:
- db
nginx:
image: jwilder/nginx-proxy
container_name: nginx
volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro
- /apps/docker-articles/nginx/vhost.d:/etc/nginx/vhost.d
- /apps/docker-articles/nginx/certs:/etc/nginx/certs:ro
- /apps/docker-articles/nginx/html:/usr/share/nginx/html
restart: always
ports:
- "80:80"
- "443:443"
labels:
com.github.jrcs.letsencrypt_nginx_proxy_companion.nginx_proxy: "true"
networks:
- pleroma
letsencrypt:
image: jrcs/letsencrypt-nginx-proxy-companion:v1.5
container_name: letsencrypt
volumes_from:
- nginx
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- /apps/docker-articles/nginx/vhost.d:/etc/nginx/vhost.d
- /apps/docker/articles/nginx/certs:/etc/nginx/certs:rw
- /apps/docker-articles/nginx/html:/usr/share/nginx/html
networks:
pleroma:
My docker version is
Docker version 18.06.1-ce, build e68fc7a
My docker compose version is
docker-compose version 1.23.1, build b02f1306
Running CoreOS version 1911.3.0
I ended up resolving this issue by modifying the nginx and letsencrypt portions of my docker-compose.yml file to be as follows.
nginx:
image: jwilder/nginx-proxy
container_name: nginx
volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro
- /apps/docker-articles/nginx/vhost.d:/etc/nginx/vhost.d
- /apps/docker-articles/nginx/certs:/etc/nginx/certs:ro
- /apps/docker-articles/nginx/html:/usr/share/nginx/html
restart: always
ports:
- "80:80"
- "443:443"
labels:
- "NGINX_PROXY_CONTAINER=true"
networks:
- pleroma
letsencrypt:
image: jrcs/letsencrypt-nginx-proxy-companion:v1.5
container_name: letsencrypt
environment:
- NGINX_PROXY_CONTAINER=true
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- /apps/docker-articles/nginx/vhost.d:/etc/nginx/vhost.d
- /apps/docker/articles/nginx/certs:/etc/nginx/certs:rw
- /apps/docker-articles/nginx/html:/usr/share/nginx/html
It seems "volumes_from" is deprecated in docker-compose v3. As well as I had forgotted quotes around my label and needed to set my environment within letsencrypt.
in CentOS env your .yml file directory must be /usr/local/bin
I have found an awesome repo that uses docker-compose however I would like to use it Docker Cloud Stacks (I am a newbie).
What would I have to do to be able to swap it over?
docker-compose:
version: "2"
services:
mariadb:
image: wodby/mariadb:10.2-3.0.2
php:
# 1. Images with vanilla WordPress – wodby/wordpress:[WP_VERSION]-[PHP_VERSION]-[STABILITY_TAG].
image: wodby/wordpress:4-7.2-3.3.1
environment:
PHP_SENDMAIL_PATH: /usr/sbin/sendmail -t -i -S mailhog:1025
PHP_FPM_CLEAR_ENV: "no"
volumes:
- codebase:/var/www/html
nginx:
image: wodby/wordpress-nginx:4-1.13-3.0.2
environment:
NGINX_STATIC_CONTENT_OPEN_FILE_CACHE: "off"
NGINX_ERROR_LOG_LEVEL: debug
NGINX_BACKEND_HOST: php
volumes:
- codebase:/var/www/html
depends_on:
- php
labels:
- 'traefik.backend=nginx'
- 'traefik.port=80'
- 'traefik.frontend.rule=Host:wp.docker.localhost'
varnish:
image: wodby/wordpress-varnish:4.1-2.3.1
depends_on:
- nginx
environment:
VARNISH_SECRET: secret
VARNISH_BACKEND_HOST: nginx
VARNISH_BACKEND_PORT: 80
labels:
- 'traefik.backend=varnish'
- 'traefik.port=6081'
- 'traefik.frontend.rule=Host:varnish.wp.docker.localhost'
redis:
image: wodby/redis:4.0-2.1.4
blackfire:
image: blackfire/blackfire
environment:
BLACKFIRE_SERVER_ID:
BLACKFIRE_SERVER_TOKEN:
mailhog:
image: mailhog/mailhog
labels:
- 'traefik.backend=mailhog'
- 'traefik.port=8025'
- 'traefik.frontend.rule=Host:mailhog.wp.docker.localhost'
portainer:
image: portainer/portainer
command: --no-auth -H unix:///var/run/docker.sock
volumes:
- /var/run/docker.sock:/var/run/docker.sock
labels:
- 'traefik.backend=portainer'
- 'traefik.port=9000'
- 'traefik.frontend.rule=Host:portainer.wp.docker.localhost'
traefik:
image: traefik
command: -c /dev/null --web --docker --logLevel=INFO
ports:
- '8000:80'
# - '8080:8080' # Dashboard
volumes:
- /var/run/docker.sock:/var/run/docker.sock
volumes:
codebase:
The docker is running and I want to run a docker container in Windows 10. When I run the docker-compose from Windows power shell, some downloading jobs are completed, an error occurs, and the docker container cannot run. It seems that jupyter fails to build or open a directory. Anyone could help me about this problem? The command line and the error is as the following:
PS C:\Users\mmva> cd C:\Users\mmva\Documents\GitHub\CerebralCortex-DockerCompose
PS C:\Users\mmva\Documents\GitHub\CerebralCortex-DockerCompose> docker-compose up
Building jupyter
Step 1/19 : FROM jupyter/jupyterhub
latest: Pulling from jupyter/jupyterhub
efd26ecc9548: Extracting [==================================================>] 51.34MB/51.34MB
a3ed95caeb02: Download complete
298ffe4c3e52: Download complete
758b472747c8: Download complete
8b9809a68afc: Download complete
93b253b5483d: Download complete
ef8136abb53c: Download complete
ERROR: Service 'jupyter' failed to build: failed to register layer: re-exec error: exit status 1: output: Failed to OpenForBackup failed in Win32: open \\?\C:\ProgramData\Docker\windowsfilter\eb9ac9d604f051d5490a876043809e7929197356387569bc50a3694b77d1b721\usr\share\man\man3\Locale::gettext.3pm.gz: The filename, directory name, or volume label syntax is incorrect. (0x1f) \\?\C:\ProgramData\Docker\windowsfilter\eb9ac9d604f051d5490a876043809e7929197356387569bc50a3694b77d1b721\usr\share\man\man3\Locale::gettext.3pm.gz
My docker version is 17.09.0-ce-win33 (13620).
I think the docker-compose's version is 3.
The content of docker-compose file:
version: '3'
# IPTABLES RULES IF NECESSARY
#-A INPUT -i br+ -j ACCEPT
#-A INPUT -i docker0 -j ACCEPT
#-A OUTPUT -o br+ -j ACCEPT
#-A OUTPUT -o docker0 -j ACCEPT
# The .env file is for production use with server-specific configurations
services:
# Frontend web proxy for accessing services and providing TLS encryption
nginx:
build: ./nginx
container_name: md2k-nginx
restart: always
volumes:
- ./nginx/site:/var/www
- ./nginx/nginx-selfsigned.crt:/etc/ssh/certs/ssl-cert.crt
- ./nginx/nginx-selfsigned.key:/etc/ssh/certs/ssl-cert.key
ports:
- "443:443"
- "80:80"
links:
- apiserver
- grafana
- jupyter
apiserver:
build: ../CerebralCortex-APIServer
container_name: md2k-api-server
restart: always
expose:
- 80
links:
- mysql
- kafka
- minio
depends_on:
- mysql
environment:
- MINIO_HOST=${MINIO_HOST:-minio}
- MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY:-ZngmrLWgbSfZUvgocyeH}
- MINIO_SECRET_KEY=${MINIO_SECRET_KEY:-IwUnI5w0f5Hf1v2qVwcr}
- MYSQL_HOST=${MYSQL:-mysql}
- MYSQL_DB_USER=${MYSQL_ROOT_USER:-root}
- MYSQL_DB_PASS=${MYSQL_ROOT_PASSWORD:-random_root_password}
- KAFKA_HOST=${KAFKA_HOST:-kafka}
- JWT_SECRET_KEY=${MINIO_SECRET_KEY:-IwUnI5w0f5Hf1v2qVwcr}
- FLASK_HOST=${FLASK_HOST:-0.0.0.0}
- FLASK_PORT=${FLASK_PORT:-80}
- FLASK_DEBUG=${FLASK_DEBUG:-False}
volumes:
- ./data:/data
# Data vizualizations
grafana:
image: "grafana/grafana"
container_name: md2k-grafana
restart: always
ports:
- "3000:3000"
links:
- influxdb
environment:
- GF_SERVER_ROOT_URL=%(protocol)s://%(domain)s:%(http_port)s/grafana/
# - GF_INSTALL_PLUGINS=raintank-worldping-app,grafana-clock-panel,grafana-simple-json-datasource
volumes:
- timeseries-storage:/var/lib/grafana
# - timeseries-storage:/etc/grafana
influxdb:
image: "influxdb:alpine"
container_name: md2k-influxdb
restart: always
ports:
- "8086:8086"
volumes:
- timeseries-storage:/var/lib/influxdb
# Data Science Dashboard Interface
jupyter:
build: ./jupyterhub
container_name: md2k-jupyterhub
ports:
- 8000
restart: always
network_mode: "host"
pid: "host"
environment:
TINI_SUBREAPER: 'true'
volumes:
- ./jupyterhub/conf:/srv/jupyterhub/conf
command: jupyterhub --no-ssl --config /srv/jupyterhub/conf/jupyterhub_config.py
# Cerebral Cortex backend
kafka:
image: wurstmeister/kafka:0.10.2.0
container_name: md2k-kafka
restart: always
ports:
- "9092:9092"
environment:
KAFKA_ADVERTISED_HOST_NAME: ${MACHINE_IP:-10.0.0.1}
KAFKA_ADVERTISED_PORT: 9092
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_MESSAGE_MAX_BYTES: 2000000
KAFKA_CREATE_TOPICS: "filequeue:4:1,processed_stream:16:1"
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- data-storage:/kafka
depends_on:
- zookeeper
zookeeper:
image: wurstmeister/zookeeper
container_name: md2k-zookeeper
restart: always
ports:
- "2181:2181"
mysql:
image: "mysql:5.7"
container_name: md2k-mysql
restart: always
ports:
- 3306:3306 # Default mysql port
environment:
- MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD:-random_root_password}
- MYSQL_DATABASE=${MYSQL_DATABASE:-cerebralcortex}
- MYSQL_USER=${MYSQL_USER:-cerebralcortex}
- MYSQL_PASSWORD=${MYSQL_PASSWORD:-cerebralcortex_pass}
volumes:
- ./mysql/initdb.d:/docker-entrypoint-initdb.d
- metadata-storage:/var/lib/mysql
minio:
image: "minio/minio"
container_name: md2k-minio
restart: always
ports:
- 9000:9000 # Default minio port
environment:
- MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY:-ZngmrLWgbSfZUvgocyeH}
- MINIO_SECRET_KEY=${MINIO_SECRET_KEY:-IwUnI5w0f5Hf1v2qVwcr}
command: server /export
volumes:
- object-storage:/export
cassandra:
build: ./cassandra
container_name: md2k-cassandra
restart: always
ports:
- 9160:9160 # Thrift client API
- 9042:9042 # CQL native transport
environment:
- CASSANDRA_CLUSTER_NAME=cerebralcortex
volumes:
- data-storage:/var/lib/cassandra
volumes:
object-storage:
metadata-storage:
data-storage:
temp-storage:
timeseries-storage:
user-storage:
log-storage