Docker - Slow Network Conditions - docker

I have a docker-compose setup with several services, like so:
version: '3.6'
services:
web:
build:
context: ./services/web
dockerfile: Dockerfile-dev
volumes:
- './services/web:/usr/src/app'
ports:
- 5001:5000
environment:
- FLASK_ENV=development
- APP_SETTINGS=project.config.DevelopmentConfig
- DATABASE_URL=postgres://postgres:postgres#web-db:5432/web_dev
- DATABASE_TEST_URL=postgres://postgres:postgres#web-db:5432/web_test
- SECRET_KEY=my_precious
depends_on:
- web-db
- redis
web-db:
build:
context: ./services/web/projct/db
dockerfile: Dockerfile
ports:
- 5435:5432
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
nginx:
build:
context: ./services/nginx
dockerfile: Dockerfile-dev
restart: always
ports:
- 80:80
depends_on:
- web
- client
#- redis
client:
build:
context: ./services/client
dockerfile: Dockerfile-dev
volumes:
- './services/client:/usr/src/app'
- '/usr/src/app/node_modules'
ports:
- 3000:3000
environment:
- NODE_ENV=development
- REACT_APP_WEB_SERVICE_URL=${REACT_APP_WEB_SERVICE_URL}
depends_on:
- web
swagger:
build:
context: ./services/swagger
dockerfile: Dockerfile-dev
volumes:
- './services/swagger/swagger.json:/usr/share/nginx/html/swagger.json'
ports:
- 3008:8080
environment:
- URL=swagger.json
depends_on:
- web
scrapyrt:
image: vimagick/scrapyd:py3
restart: always
ports:
- '9080:9080'
volumes:
- ./services/web:/usr/src/app
working_dir: /usr/src/app/project/api
entrypoint: /usr/src/app/entrypoint-scrapyrt.sh
depends_on:
- web
redis:
image: redis:5.0.3-alpine
restart: always
expose:
- '6379'
ports:
- '6379:6379'
monitor:
image: dev3_web
ports:
- 5555:5555
command: flower -A celery_worker.celery --port=5555 --broker=redis://redis:6379/0
depends_on:
- web
- redis
worker-analysis:
image: dev3_web
restart: always
volumes:
- ./services/web:/usr/src/app
- ./services/web/celery_logs:/usr/src/app/celery_logs
command: celery worker -A celery_worker.celery --loglevel=DEBUG --logfile=celery_logs/worker_analysis.log -Q analysis
environment:
- CELERY_BROKER=redis://redis:6379/0
- CELERY_RESULT_BACKEND=redis://redis:6379/0
- FLASK_ENV=development
- APP_SETTINGS=project.config.DevelopmentConfig
- DATABASE_URL=postgres://postgres:postgres#web-db:5432/web_dev
- DATABASE_TEST_URL=postgres://postgres:postgres#web-db:5432/web_test
- SECRET_KEY=my_precious
depends_on:
- web
- redis
- web-db
links:
- redis:redis
- web-db:web-db
worker-scraping:
image: dev3_web
restart: always
volumes:
- ./services/web:/usr/src/app
- ./services/web/celery_logs:/usr/src/app/celery_logs
command: celery worker -A celery_worker.celery --loglevel=DEBUG --logfile=celery_logs/worker_scraping.log -Q scraping
environment:
- CELERY_BROKER=redis://redis:6379/0
- CELERY_RESULT_BACKEND=redis://redis:6379/0
- FLASK_ENV=development
- APP_SETTINGS=project.config.DevelopmentConfig
- DATABASE_URL=postgres://postgres:postgres#web-db:5432/web_dev
- DATABASE_TEST_URL=postgres://postgres:postgres#web-db:5432/web_test
- SECRET_KEY=my_precious
depends_on:
- web
- redis
- web-db
links:
- redis:redis
- web-db:web-db
worker-emailing:
image: dev3_web
restart: always
volumes:
- ./services/web:/usr/src/app
- ./services/web/celery_logs:/usr/src/app/celery_logs
command: celery worker -A celery_worker.celery --loglevel=DEBUG --logfile=celery_logs/worker_emailing.log -Q email
environment:
- CELERY_BROKER=redis://redis:6379/0
- CELERY_RESULT_BACKEND=redis://redis:6379/0
- FLASK_ENV=development
- APP_SETTINGS=project.config.DevelopmentConfig
- DATABASE_URL=postgres://postgres:postgres#web-db:5432/web_dev
- DATABASE_TEST_URL=postgres://postgres:postgres#web-db:5432/web_test
- SECRET_KEY=my_precious
depends_on:
- web
- redis
- web-db
links:
- redis:redis
- web-db:web-db
worker-learning:
image: dev3_web
restart: always
volumes:
- ./services/web:/usr/src/app
- ./services/web/celery_logs:/usr/src/app/celery_logs
command: celery worker -A celery_worker.celery --loglevel=DEBUG --logfile=celery_logs/worker_ml.log -Q machine_learning
environment:
- CELERY_BROKER=redis://redis:6379/0
- CELERY_RESULT_BACKEND=redis://redis:6379/0
- FLASK_ENV=development
- APP_SETTINGS=project.config.DevelopmentConfig
- DATABASE_URL=postgres://postgres:postgres#web-db:5432/web_dev
- DATABASE_TEST_URL=postgres://postgres:postgres#web-db:5432/web_test
- SECRET_KEY=my_precious
depends_on:
- web
- redis
- web-db
links:
- redis:redis
- web-db:web-db
worker-periodic:
image: dev3_web
restart: always
volumes:
- ./services/web:/usr/src/app
- ./services/web/celery_logs:/usr/src/app/celery_logs
command: celery beat -A celery_worker.celery --schedule=/tmp/celerybeat-schedule --loglevel=DEBUG --pidfile=/tmp/celerybeat.pid
environment:
- CELERY_BROKER=redis://redis:6379/0
- CELERY_RESULT_BACKEND=redis://redis:6379/0
- FLASK_ENV=development
- APP_SETTINGS=project.config.DevelopmentConfig
- DATABASE_URL=postgres://postgres:postgres#web-db:5432/web_dev
- DATABASE_TEST_URL=postgres://postgres:postgres#web-db:5432/web_test
- SECRET_KEY=my_precious
depends_on:
- web
- redis
- web-db
links:
- redis:redis
- web-db:web-db
docker-compose -f docker-compose-dev.yml up -d and docker ps give me:
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
396d7a1a5443 dev3_nginx "nginx -g 'daemon of…" 23 hours ago Up 18 minutes 0.0.0.0:80->80/tcp dev3_nginx_1
8ec7a51e2c2a dev3_web "celery worker -A ce…" 24 hours ago Up 19 minutes dev3_worker-analysis_1
e591e6445c64 dev3_web "celery worker -A ce…" 24 hours ago Up 19 minutes dev3_worker-learning_1
4d1fd17be3cb dev3_web "celery worker -A ce…" 24 hours ago Up 19 minutes dev3_worker-scraping_1
d25c40060fed dev3_web "celery beat -A cele…" 24 hours ago Up 17 seconds dev3_worker-periodic_1
76df1a600afa dev3_web "celery worker -A ce…" 24 hours ago Up 18 minutes dev3_worker-emailing_1
3442b0ce5d56 vimagick/scrapyd:py3 "/usr/src/app/entryp…" 24 hours ago Up 20 minutes 6800/tcp, 0.0.0.0:9080->9080/tcp dev3_scrapyrt_1
81d3ccea4de4 dev3_client "npm start" 24 hours ago Up 19 minutes 0.0.0.0:3000->3000/tcp dev3_client_1
aff5ecf951d2 dev3_web "flower -A celery_wo…" 24 hours ago Up 10 seconds 0.0.0.0:5555->5555/tcp dev3_monitor_1
864f17f39d54 dev3_swagger "/start.sh" 24 hours ago Up 19 minutes 80/tcp, 0.0.0.0:3008->8080/tcp dev3_swagger_1
e69476843236 dev3_web "/usr/src/app/entryp…" 24 hours ago Up 19 minutes 0.0.0.0:5001->5000/tcp dev3_web_1
22fd91b1ab6e redis:5.0.3-alpine "docker-entrypoint.s…" 24 hours ago Up 20 minutes 0.0.0.0:6379->6379/tcp dev3_redis_1
3a0b2115dd8e dev3_web-db "docker-entrypoint.s…" 24 hours ago Up 19 minutes 0.0.0.0:5435->5432/tcp dev3_web-db_1
They are all up, but I'm facing exceedingly slow network conditions, with a lot of instability. I have tried to check connectivity between containers and catch some eventual lag, like so:
docker container exec -it e69476843236 ping aff5ecf951d2
PING aff5ecf951d2 (172.18.0.13): 56 data bytes
64 bytes from 172.18.0.13: seq=0 ttl=64 time=0.504 ms
64 bytes from 172.18.0.13: seq=1 ttl=64 time=0.254 ms
64 bytes from 172.18.0.13: seq=2 ttl=64 time=0.191 ms
64 bytes from 172.18.0.13: seq=3 ttl=64 time=0.168 ms
but timing seems alright by these tests, though now and then I get ping: bad address 'aff5ecf951d2' when some service goes down.
Sometimes I get this error:
ERROR: An HTTP request took too long to complete. Retry with --verbose to obtain debug information.
If you encounter this issue regularly because of slow network conditions, consider setting COMPOSE_HTTP_TIMEOUT to a higher value (current value: 60).
And too many times I just have to restart Docker in order to make it work.
How can I docker inspect deeper into slow network conditions and figure out what is wrong? Can newtwork issues be related to volumes?

The problem manifested itself as the number of containers and the app complexity grew up, (as you always should be aware of).
In my case, I had changed one of the images from Alpine to Slim-Buster (Debian), which is significantly larger.
Turns out I could fix that by simply going to Docker 'Preferences':
clicking on 'Advanced' and increasing memory allocation.
Now it runs smoothly again.

Related

How to run zookeeper & kafka-server on docker avoiding `Connection refused` exception?

I am trying to run a kafka-spark streaming application on Docker. Below is my project structure.
Dockerfile contents:
from gcr.io/datamechanics/spark:platform-3.1-dm14
ENV PYSPARK_MAJOR_PYTHON_VERSION=3
WORKDIR /opt/application/
RUN wget https://jdbc.postgresql.org/download/postgresql-42.2.5.jar
RUN mv postgresql-42.2.5.jar /opt/spark/jars
COPY requirements.txt .
RUN pip3 install -r requirements.txt
COPY main.py .
Docker-compose.yml contents:
version: "2"
services:
spark:
image: docker.io/bitnami/spark:3
environment:
- SPARK_MODE=master
- SPARK_RPC_AUTHENTICATION_ENABLED=no
- SPARK_RPC_ENCRYPTION_ENABLED=no
- SPARK_LOCAL_STORAGE_ENCRYPTION_ENABLED=no
- SPARK_SSL_ENABLED=no
ports:
- '8080:8080'
spark-worker:
image: docker.io/bitnami/spark:3
environment:
- SPARK_MODE=worker
- SPARK_MASTER_URL=spark://spark:7077
- SPARK_WORKER_MEMORY=1G
- SPARK_WORKER_CORES=1
- SPARK_RPC_AUTHENTICATION_ENABLED=no
- SPARK_RPC_ENCRYPTION_ENABLED=no
- SPARK_LOCAL_STORAGE_ENCRYPTION_ENABLED=no
- SPARK_SSL_ENABLED=no
zookeeper:
image: confluentinc/cp-zookeeper:latest
container_name: zookeeper
ports:
- "22181:22181"
environment:
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOOKEEPER_CLIENT_PORT=2181
- ZOOKEEPER_TICK_TIME=2000
kafka-server1:
image: confluentinc/cp-kafka:latest
ports:
- '9092:9092'
environment:
- KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper-server:2181
- KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:9092
- ALLOW_PLAINTEXT_LISTENER=yes
depends_on:
- zookeeper-server
I was able to pull the images and create containers successfully.
(venv) (base) johnny#Johnnys-MBP~/PyCharmProjects/dockerpractice/Johnny> docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
d47d4b091b2c confluentinc/cp-zookeeper:latest "/etc/confluent/dock…" 16 minutes ago Up 12 minutes 2181/tcp, 2888/tcp, 3888/tcp, 0.0.0.0:22181->22181/tcp zookeeper
59c06d3cf754 confluentinc/cp-kafka:latest "/etc/confluent/dock…" 48 minutes ago Up 48 minutes 9092/tcp, 0.0.0.0:29092->29092/tcp kafka
26794da88c7d bitnami/spark:3 "/opt/bitnami/script…" About an hour ago Up About an hour dockerpractice-spark-worker-1
5a308035bd18 bitnami/spark:3 "/opt/bitnami/script…" About an hour ago Up About an hour 0.0.0.0:8080->8080/tcp dockerpractice-spark-1
I connected to the kafka zookeeper image and started it like below:
docker run -i -t confluentinc/cp-zookeeper:latest /bin/bash
zookeeper-server-start /etc/kafka/zookeeper.properties
The above command starts zookeeper with no exceptions and I tried to start my kafka-server in the same way:
docker run -i -t confluentinc/cp-kafka:latest /bin/bash
kafka-server-start /etc/kafka/server.properties
But when I submit the command for kafka-server, I see the below exception:
[2022-04-04 18:13:11,276] WARN Session 0x0 for sever localhost/127.0.0.1:2181, Closing socket connection. Attempting reconnect except it is a SessionExpiredException. (org.apache.zookeeper.ClientCnxn)
java.net.ConnectException: Connection refused
at java.base/sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
at java.base/sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:777)
at org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:344)
at org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1290)
I tried various combinations like changing the ip address & ports of kafka server to new ones and by creating multiple kafka servers but all of them are resulting in same exception.
Could anyone let me know what is the mistake I am doing here and how can I correct it?

Docker - Symfony5/Mercure : Impossible to reach mercure hub

I try with no success to access to Mercure's hub through my browser at this URL :
http://locahost:3000 => ERR_CONNECTION_REFUSED
I use Docker for my development. Here's my docker-compose.yml :
# docker/docker-compose.yml
version: '3'
services:
database:
container_name: test_db
build:
context: ./database
environment:
- MYSQL_DATABASE=${DATABASE_NAME}
- MYSQL_USER=${DATABASE_USER}
- MYSQL_PASSWORD=${DATABASE_PASSWORD}
- MYSQL_ROOT_PASSWORD=${DATABASE_ROOT_PASSWORD}
ports:
- "3309:3306"
volumes:
- ./database/init.sql:/docker-entrypoint-initdb.d/init.sql
- ./database/data:/var/lib/mysql
php-fpm:
container_name: test_php
build:
context: ./php-fpm
depends_on:
- database
environment:
- APP_ENV=${APP_ENV}
- APP_SECRET=${APP_SECRET}
- DATABASE_URL=mysql://${DATABASE_USER}:${DATABASE_PASSWORD}#database:3306/${DATABASE_NAME}?serverVersion=5.7
volumes:
- ./src:/var/www
nginx:
container_name: test_nginx
build:
context: ./nginx
volumes:
- ./src:/var/www
- ./nginx/nginx.conf:/etc/nginx/nginx.conf
- ./nginx/sites/:/etc/nginx/sites-available
- ./nginx/conf.d/:/etc/nginx/conf.d
- ./logs:/var/log
depends_on:
- php-fpm
ports:
- "8095:80"
caddy:
container_name: test_mercure
image: dunglas/mercure
restart: unless-stopped
environment:
MERCURE_PUBLISHER_JWT_KEY: '!ChangeMe!'
MERCURE_SUBSCRIBER_JWT_KEY: '!ChangeMe!'
PUBLISH_URL: '${MERCURE_PUBLISH_URL}'
JWT_KEY: '${MERCURE_JWT_KEY}'
ALLOW_ANONYMOUS: '${MERCURE_ALLOW_ANONYMOUS}'
CORS_ALLOWED_ORIGINS: '${MERCURE_CORS_ALLOWED_ORIGINS}'
PUBLISH_ALLOWED_ORIGINS: '${MERCURE_PUBLISH_ALLOWED_ORIGINS}'
ports:
- "3000:80"
I have executed successfully :
docker-compose up -d
docker ps -a :
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
0e4a72fe75b2 dunglas/mercure "caddy run --config …" 2 hours ago Up 2 hours 443/tcp, 2019/tcp, 0.0.0.0:3000->80/tcp, :::3000->80/tcp test_mercure
724fe920ebef nginx "/docker-entrypoint.…" 3 hours ago Up 3 hours 0.0.0.0:8095->80/tcp, :::8095->80/tcp test_nginx
9e63fddf50ef php-fpm "docker-php-entrypoi…" 3 hours ago Up 3 hours 9000/tcp test_php
e7989b26084e database "docker-entrypoint.s…" 3 hours ago Up 3 hours 0.0.0.0:3309->3306/tcp, :::3309->3306/tcp test_db
I can reach http://localhost:8095 to access to my Symfony app but I don't know on which URL I am supposed to reach my Mercure's hub.
Thank's for your help !
I tried for months to get symfony + nginx + mysql + phpmyadmin + mercure + docker to work both locally for development and in production (obviously). To no avail.
And, while this isn't directly answering your question. The only way I can contribute is with an "answer", as I don't have enough reputation to only comment, or I would have done that.
If you're not tied to nginx for any reason besides a means of a web server, and can replace it with caddy, I have a repo that is symfony + caddy + mysql + phpmyadmin + mercure + docker that works with SSL both locally and in production.
https://github.com/thund3rb1rd78/symfony-mercure-website-skeleton-dockerized

failed Error: Calling enrollment endpoint failed with error [Error: connect ECONNREFUSED 127.0.0.1:7054] | ca_peerOrg1 and ca_peerOrg2 is not running

I am trying to run hyperledger fabric using container. Here is my docker-compose yml.
docker-compose.yaml
version: '2'
services:
ca.org1.sample.com:
image: hyperledger/fabric-ca:1.4
environment:
- FABRIC_CA_HOME=/etc/hyperledger/fabric-ca-server
- FABRIC_CA_SERVER_CA_NAME=ca-org1
- FABRIC_CA_SERVER_CA_CERTFILE=/etc/hyperledger/fabric-ca-server-config/ca.org1.sample.com-cert.pem
- FABRIC_CA_SERVER_CA_KEYFILE=/etc/hyperledger/fabric-ca-server-config/0c7af57d616f614fd42250b8ba14a0c777220874d328ecbd1464a47ef3f85b1a_sk
- FABRIC_CA_SERVER_TLS_ENABLED=true
- FABRIC_CA_SERVER_TLS_CERTFILE=/etc/hyperledger/fabric-ca-server-config/ca.org1.sample.com-cert.pem
- FABRIC_CA_SERVER_TLS_KEYFILE=/etc/hyperledger/fabric-ca-server-config/0c7af57d616f614fd42250b8ba14a0c777220874d328ecbd1464a47ef3f85b1a_sk
ports:
- "7054:7054"
command: sh -c 'fabric-ca-server start -b admin:adminpw -d'
volumes:
- ./channel/crypto-config/peerOrganizations/org1.sample.com/ca/:/etc/hyperledger/fabric-ca-server-config
container_name: ca_peerOrg1
ca.org2.sample.com:
image: hyperledger/fabric-ca:1.4
environment:
- FABRIC_CA_HOME=/etc/hyperledger/fabric-ca-server
- FABRIC_CA_SERVER_CA_NAME=ca-org2
- FABRIC_CA_SERVER_CA_CERTFILE=/etc/hyperledger/fabric-ca-server-config/ca.org2.sample.com-cert.pem
- FABRIC_CA_SERVER_CA_KEYFILE=/etc/hyperledger/fabric-ca-server-config/fc399b786271e773cc0011593c6bcae7c4b4ae0f4a595ebf0883154bddb4daa7_sk
- FABRIC_CA_SERVER_TLS_ENABLED=true
- FABRIC_CA_SERVER_TLS_CERTFILE=/etc/hyperledger/fabric-ca-server-config/ca.org2.sample.com-cert.pem
- FABRIC_CA_SERVER_TLS_KEYFILE=/etc/hyperledger/fabric-ca-server-config/fc399b786271e773cc0011593c6bcae7c4b4ae0f4a595ebf0883154bddb4daa7_sk
ports:
- "8054:7054"
command: sh -c 'fabric-ca-server start -b admin:adminpw -d'
volumes:
- ./channel/crypto-config/peerOrganizations/org2.sample.com/ca/:/etc/hyperledger/fabric-ca-server-config
container_name: ca_peerOrg2
orderer.sample.com:
container_name: orderer.sample.com
image: hyperledger/fabric-orderer:1.4
environment:
- FABRIC_LOGGING_SPEC=debug
- ORDERER_GENERAL_LISTENADDRESS=0.0.0.0
- ORDERER_GENERAL_GENESISMETHOD=file
- ORDERER_GENERAL_GENESISFILE=/etc/hyperledger/configtx/genesis.block
- ORDERER_GENERAL_LOCALMSPID=OrdererMSP
- ORDERER_GENERAL_LOCALMSPDIR=/etc/hyperledger/crypto/orderer/msp
- ORDERER_GENERAL_TLS_ENABLED=true
- ORDERER_GENERAL_TLS_PRIVATEKEY=/etc/hyperledger/crypto/orderer/tls/server.key
- ORDERER_GENERAL_TLS_CERTIFICATE=/etc/hyperledger/crypto/orderer/tls/server.crt
- ORDERER_GENERAL_TLS_ROOTCAS=[/etc/hyperledger/crypto/orderer/tls/ca.crt, /etc/hyperledger/crypto/peerOrg1/tls/ca.crt, /etc/hyperledger/crypto/peerOrg2/tls/ca.crt]
working_dir: /opt/gopath/src/github.com/hyperledger/fabric/orderers
command: orderer
ports:
- 7050:7050
volumes:
- ./channel:/etc/hyperledger/configtx
- ./channel/crypto-config/ordererOrganizations/sample.com/orderers/orderer.sample.com/:/etc/hyperledger/crypto/orderer
- ./channel/crypto-config/peerOrganizations/org1.sample.com/peers/peer0.org1.sample.com/:/etc/hyperledger/crypto/peerOrg1
- ./channel/crypto-config/peerOrganizations/org2.sample.com/peers/peer0.org2.sample.com/:/etc/hyperledger/crypto/peerOrg2
peer0.org1.sample.com:
container_name: peer0.org1.sample.com
extends:
file: base.yaml
service: peer-base
environment:
- CORE_PEER_ID=peer0.org1.sample.com
- CORE_PEER_LOCALMSPID=Org1MSP
- CORE_PEER_ADDRESS=peer0.org1.sample.com:7051
- CORE_PEER_GOSSIP_BOOTSTRAP=peer1.org1.sample.com:7051
- CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer0.org1.sample.com:7051
ports:
- 7051:7051
- 7053:7053
volumes:
- ./channel/crypto-config/peerOrganizations/org1.sample.com/peers/peer0.org1.sample.com/:/etc/hyperledger/crypto/peer
depends_on:
- orderer.sample.com
peer1.org1.sample.com:
container_name: peer1.org1.sample.com
extends:
file: base.yaml
service: peer-base
environment:
- CORE_PEER_ID=peer1.org1.sample.com
- CORE_PEER_LOCALMSPID=Org1MSP
- CORE_PEER_ADDRESS=peer1.org1.sample.com:7051
- CORE_PEER_GOSSIP_BOOTSTRAP=peer0.org1.sample.com:7051
- CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer1.org1.sample.com:7051
ports:
- 7056:7051
- 7058:7053
volumes:
- ./channel/crypto-config/peerOrganizations/org1.sample.com/peers/peer1.org1.sample.com/:/etc/hyperledger/crypto/peer
depends_on:
- orderer.sample.com
peer0.org2.sample.com:
container_name: peer0.org2.sample.com
extends:
file: base.yaml
service: peer-base
environment:
- CORE_PEER_ID=peer0.org2.sample.com
- CORE_PEER_LOCALMSPID=Org2MSP
- CORE_PEER_ADDRESS=peer0.org2.sample.com:7051
- CORE_PEER_GOSSIP_BOOTSTRAP=peer1.org2.sample.com:7051
- CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer0.org2.sample.com:7051
ports:
- 8051:7051
- 8053:7053
volumes:
- ./channel/crypto-config/peerOrganizations/org2.sample.com/peers/peer0.org2.sample.com/:/etc/hyperledger/crypto/peer
depends_on:
- orderer.sample.com
peer1.org2.sample.com:
container_name: peer1.org2.sample.com
extends:
file: base.yaml
service: peer-base
environment:
- CORE_PEER_ID=peer1.org2.sample.com
- CORE_PEER_LOCALMSPID=Org2MSP
- CORE_PEER_ADDRESS=peer1.org2.sample.com:7051
- CORE_PEER_GOSSIP_BOOTSTRAP=peer0.org2.sample.com:7051
- CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer1.org2.sample.com:7051
ports:
- 8056:7051
- 8058:7053
volumes:
- ./channel/crypto-config/peerOrganizations/org2.sample.com/peers/peer1.org2.sample.com/:/etc/hyperledger/crypto/peer
depends_on:
- orderer.sample.com
base.yaml file
version: '2'
services:
peer-base:
image: hyperledger/fabric-peer:1.4
environment:
- CORE_VM_ENDPOINT=unix:///host/var/run/docker.sock
- CORE_VM_DOCKER_HOSTCONFIG_NETWORKMODE=artifacts_default
- FABRIC_LOGGING_SPEC=ERROR
- CORE_PEER_GOSSIP_USELEADERELECTION=true
- CORE_PEER_GOSSIP_ORGLEADER=false
- CORE_PEER_GOSSIP_SKIPHANDSHAKE=true
- CORE_PEER_MSPCONFIGPATH=/etc/hyperledger/crypto/peer/msp
- CORE_PEER_TLS_ENABLED=true
- CORE_PEER_TLS_KEY_FILE=/etc/hyperledger/crypto/peer/tls/server.key
- CORE_PEER_TLS_CERT_FILE=/etc/hyperledger/crypto/peer/tls/server.crt
- CORE_PEER_TLS_ROOTCERT_FILE=/etc/hyperledger/crypto/peer/tls/ca.crt
logging:
driver: "json-file"
options:
max-file: "2"
max-size: "5m"
working_dir: /opt/gopath/src/github.com/hyperledger/fabric/peer
command: peer node start
volumes:
- /var/run/:/host/var/run/
After running these containers, I can not see running ca_peerOrg1 and ca_peerOrg2 containers.
➜ sample_network git:(master) ✗ docker container ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
17e106be6872 hyperledger/fabric-peer:1.4 "peer node start" 42 seconds ago Up 35 seconds 0.0.0.0:8051->7051/tcp, 0.0.0.0:8053->7053/tcp peer0.org2.sample.com
fd72c0b378e3 hyperledger/fabric-peer:1.4 "peer node start" 42 seconds ago Up 35 seconds 0.0.0.0:7051->7051/tcp, 0.0.0.0:7053->7053/tcp peer0.org1.sample.com
f0198beef653 hyperledger/fabric-peer:1.4 "peer node start" 42 seconds ago Up 35 seconds 0.0.0.0:7056->7051/tcp, 0.0.0.0:7058->7053/tcp peer1.org1.sample.com
a9d2a0fabe6d hyperledger/fabric-peer:1.4 "peer node start" 42 seconds ago Up 35 seconds 0.0.0.0:8056->7051/tcp, 0.0.0.0:8058->7053/tcp peer1.org2.sample.com
8173c3c09e6a hyperledger/fabric-orderer:1.4 "orderer" 49 seconds ago Up 43 seconds 0.0.0.0:7050->7050/tcp orderer.sample.com
Here ca_peerOrg1 and ca_peerOrg2 are not running. So When I try to register from node sdk, I am getting the following error,
POST request Enroll on Org1 ...
{"success":false,"message":"failed Error: Calling enrollment endpoint failed with error [Error: connect ECONNREFUSED 127.0.0.1:7054]"}
ORG1 token is null
POST request Enroll on Org2 ...
{"success":false,"message":"failed Error: Calling enrollment endpoint failed with error [Error: connect ECONNREFUSED 127.0.0.1:8054]"}
ORG2 token is null
Please help me to fix this issue.
Check whether the FABRIC_CA_SERVER_CA_KEYFILE and FABRIC_CA_SERVER_TLS_KEYFILE of CA for each organization is properly written. in docker-compose.yaml
# ca_peerOrg1
ls ./channel/crypto-config/peerOrganizations/org1.sample.com/ca/
# ca_peerOrg2
ls ./channel/crypto-config/peerOrganizations/org2.sample.com/ca/
Among the result values operated by the above command, the name of the *_sk file should be written as the FABRIC_CA_SERVER_CA_KEYFILE and FABRIC_CA_SERVER_TLS_KEYFILE
If there are a log according to, I can talk more clearly.
docker logs ca_peerOrg1
docker logs ca_peerOrg2

Multiple docker container the same ports

I got the following containers:
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
f75e2629d5b5 sameersbn/gitlab:10.2.2 "/sbin/entrypoint.sh…" 12 minutes ago Up 11 minutes 80/tcp, 0.0.0.0:22->22/tcp, 443/tcp gitlab_app_1
8fc5b1cec6d5 sameersbn/redis:latest "/sbin/entrypoint.sh…" 12 minutes ago Up 12 minutes 6379/tcp gitlab_redis_1
44db2400787d sameersbn/postgresql:9.6-2 "/sbin/entrypoint.sh" 12 minutes ago Up 12 minutes 5432/tcp gitlab_postgresql_1
31a3423a66c7 nextcloud_web "nginx -g 'daemon of…" 37 minutes ago Up 37 minutes 80/tcp, 443/tcp nextcloud_web_1
14334d36116a nextcloud_app "/entrypoint.sh php-…" 37 minutes ago Up 37 minutes 9000/tcp nextcloud_app_1
258d317934a7 nextcloud_cron "/cron.sh" 37 minutes ago Up 37 minutes 9000/tcp nextcloud_cron_1
c66f31c762d8 mariadb "docker-entrypoint.s…" 37 minutes ago Up 37 minutes 3306/tcp nextcloud_db_1
53e8fa0e5a9f redis "docker-entrypoint.s…" 37 minutes ago Up 37 minutes 6379/tcp nextcloud_redis_1
e4c147824046 tvial/docker-mailserver:latest "/bin/sh -c 'supervi…" About an hour ago Up 33 minutes 0.0.0.0:25->25/tcp, 110/tcp, 0.0.0.0:143->143/tcp, 0.0.0.0:587->587/tcp, 465/tcp, 995/tcp, 0.0.0.0:993->993/tcp, 4190/tcp mail_mail_1
4d99cf5a542a nginx "nginx -g 'daemon of…" About an hour ago Up 33 minutes 80/tcp, 443/tcp mail_ssl_1
a4e9b76b91df jrcs/letsencrypt-nginx-proxy-companion "/bin/bash /app/entr…" About an hour ago Up About an hour main_letsencrypt_1
334d501060b4 jwilder/nginx-proxy "/app/docker-entrypo…" About an hour ago Up 22 minutes 0.0.0.0:80->80/tcp, 0.0.0.0:443->443/tcp main_nginx-proxy_1
2ff7189e6272 mattermost_db "/entrypoint.sh post…" About an hour ago Up About an hour 5432/tcp mattermost_db_1
4d99ebc5ec02 mattermost_app "/entrypoint.sh plat…" About an hour ago Up About an hour 80/tcp mattermost_app_1
94007cb05dd3 mattermost_web "/entrypoint.sh" About an hour ago Up About an hour 80/tcp, 443/tcp mattermost_web_1
As u can see there are multiple containers with the same ports. For example nextcloud_redis and gitlab_redis. Since I am using nginx-proxy I have to put all containers into the same network. This causes a problems, because for example the gitlab_app tries to use the nextcloud_redis.
In the following the docker-compose.
Nextcloud
version: '3'
services:
db:
image: mariadb
# image: mysql
restart: always
volumes:
- ./db:/var/lib/mysql
environment:
- MYSQL_ROOT_PASSWORD=XXX
env_file:
- db.env
redis:
image: redis
restart: always
app:
build: ./app
restart: always
volumes:
- ./data:/var/www/html
environment:
- MYSQL_HOST=db
env_file:
- db.env
depends_on:
- db
- redis
web:
build: ./web
restart: always
expose:
- "443"
- "80"
volumes:
- ./data:/var/www/html:ro
environment:
- VIRTUAL_HOST=cloud.XXX.net
- LETSENCRYPT_HOST=cloud.XXX.net
- LETSENCRYPT_EMAIL=admin#XXX.net
depends_on:
- app
cron:
build: ./app
restart: always
volumes:
- ./data:/var/www/html
entrypoint: /cron.sh
depends_on:
- db
- redis
networks:
default:
external:
name: reverse-proxy
Gitlab
version: '2'
services:
redis:
restart: always
image: sameersbn/redis:latest
command:
- --loglevel warning
volumes:
- ./redis:/var/lib/redis:Z
postgresql:
restart: always
image: sameersbn/postgresql:9.6-2
volumes:
- ./postgresql:/var/lib/postgresql:Z
environment:
- DB_USER=gitlab
- DB_PASS=XXX
- DB_NAME=gitlabhq_production
- DB_EXTENSION=pg_trgm
app:
restart: always
image: sameersbn/gitlab:10.2.2
depends_on:
- redis
- postgresql
expose:
- "80"
ports:
- "22:22"
volumes:
- ./data:/home/git/data:Z
environment:
- VIRTUAL_HOST=gitlab.XXX.net
- DEBUG=false
- DB_ADAPTER=postgresql
- DB_HOST=postgresql
- DB_PORT=5432
- DB_USER=gitlab
- DB_PASS=XXX
- DB_NAME=gitlabhq_production
- REDIS_HOST=redis
- REDIS_PORT=6379
- TZ=Europe/Berlin
- GITLAB_TIMEZONE=Berlin
- GITLAB_HTTPS=true
- SSL_SELF_SIGNED=true
[...]
- OAUTH_CROWD_SERVER_URL=
- OAUTH_CROWD_APP_NAME=
- OAUTH_CROWD_APP_PASSWORD=
- OAUTH_AUTH0_CLIENT_ID=
- OAUTH_AUTH0_CLIENT_SECRET=
- OAUTH_AUTH0_DOMAIN=
- OAUTH_AZURE_API_KEY=
- OAUTH_AZURE_API_SECRET=
- OAUTH_AZURE_TENANT_ID=
networks:
default:
external:
name: reverse-proxy
Whats the best solution to fix this issue?
Try use networks:
(At the moment I have no where to try it, so I hope it works, or at least help you decipher the dilemma)
version: '3'
services:
db:
image: mariadb
# image: mysql
restart: always
volumes:
- ./db:/var/lib/mysql
environment:
- MYSQL_ROOT_PASSWORD=XXX
env_file:
- db.env
networks:
- net_internal_next
redis:
image: redis
restart: always
networks:
- net_internal_next
app:
build: ./app
restart: always
volumes:
- ./data:/var/www/html
environment:
- MYSQL_HOST=db
env_file:
- db.env
depends_on:
- db
- redis
networks:
- net_internal_next
web:
build: ./web
restart: always
expose:
- "443"
- "80"
volumes:
- ./data:/var/www/html:ro
environment:
- VIRTUAL_HOST=cloud.XXX.net
- LETSENCRYPT_HOST=cloud.XXX.net
- LETSENCRYPT_EMAIL=admin#XXX.net
depends_on:
- app
networks:
- net_internal_next
- net_external_next
cron:
build: ./app
restart: always
volumes:
- ./data:/var/www/html
entrypoint: /cron.sh
depends_on:
- db
- redis
networks:
- net_internal_next
networks:
net_external_next:
external:
name: reverse-proxy
net_internal_next:
driver: bridge
--
version: '2'
services:
redis:
restart: always
image: sameersbn/redis:latest
command:
- --loglevel warning
volumes:
- ./redis:/var/lib/redis:Z
networks:
- net_internal_git
postgresql:
restart: always
image: sameersbn/postgresql:9.6-2
volumes:
- ./postgresql:/var/lib/postgresql:Z
environment:
- DB_USER=gitlab
- DB_PASS=XXX
- DB_NAME=gitlabhq_production
- DB_EXTENSION=pg_trgm
networks:
- net_internal_git
app:
restart: always
image: sameersbn/gitlab:10.2.2
depends_on:
- redis
- postgresql
expose:
- "80"
ports:
- "22:22"
volumes:
- ./data:/home/git/data:Z
environment:
- VIRTUAL_HOST=gitlab.XXX.net
- DEBUG=false
- DB_ADAPTER=postgresql
- DB_HOST=postgresql
- DB_PORT=5432
- DB_USER=gitlab
- DB_PASS=XXX
- DB_NAME=gitlabhq_production
- REDIS_HOST=redis
- REDIS_PORT=6379
- TZ=Europe/Berlin
- GITLAB_TIMEZONE=Berlin
- GITLAB_HTTPS=true
- SSL_SELF_SIGNED=true
[...]
- OAUTH_CROWD_SERVER_URL=
- OAUTH_CROWD_APP_NAME=
- OAUTH_CROWD_APP_PASSWORD=
- OAUTH_AUTH0_CLIENT_ID=
- OAUTH_AUTH0_CLIENT_SECRET=
- OAUTH_AUTH0_DOMAIN=
- OAUTH_AZURE_API_KEY=
- OAUTH_AZURE_API_SECRET=
- OAUTH_AZURE_TENANT_ID=
networks:
- net_internal_git
- net_external_git
networks:
net_external_git:
external:
name: reverse-proxy
net_internal_git:
driver: bridge

Docker multiple MYSQL containers

Docker newbie here.
What I'm trying to achieve is to run multiple MySQL containers with docker compose, in addition to an nginx, a PHP and a PhpMyAdmin container.
This is my docker-compose.yml:
version: '3'
services:
server:
build:
context: ./
dockerfile: server.docker
volumes:
- ./:/var/www
ports:
- "8080:80"
links:
- app
app:
build:
context: ./
dockerfile: app.docker
volumes:
- ./:/var/www
links:
- db_callcenter
- db_forecast
- db_logistics
- db_products
- db_rm
- db_rma
- db_settings
- db_tasks
- db_users
db_callcenter:
image: mysql:5.7
environment:
- "MYSQL_ROOT_PASSWORD=secret"
- "MYSQL_DATABASE=prj_callcenter"
ports:
- "33061:3306"
volumes:
- mysql_bkp:/var/lib/mysql
db_forecast:
image: mysql:5.7
environment:
- "MYSQL_ROOT_PASSWORD=secret"
- "MYSQL_DATABASE=prj_forecast"
ports:
- "33062:3306"
volumes:
- mysql_bkp:/var/lib/mysql
db_logistics:
image: mysql:5.7
environment:
- "MYSQL_ROOT_PASSWORD=secret"
- "MYSQL_DATABASE=prj_logistics"
ports:
- "33063:3306"
volumes:
- mysql_bkp:/var/lib/mysql
db_products:
image: mysql:5.7
environment:
- "MYSQL_ROOT_PASSWORD=secret"
- "MYSQL_DATABASE=prj_products"
ports:
- "33064:3306"
volumes:
- mysql_bkp:/var/lib/mysql
db_rm:
image: mysql:5.7
environment:
- "MYSQL_ROOT_PASSWORD=secret"
- "MYSQL_DATABASE=prj_rm"
ports:
- "33065:3306"
volumes:
- mysql_bkp:/var/lib/mysql
db_rma:
image: mysql:5.7
environment:
- "MYSQL_ROOT_PASSWORD=secret"
- "MYSQL_DATABASE=prj_rma"
ports:
- "33066:3306"
volumes:
- mysql_bkp:/var/lib/mysql
db_settings:
image: mysql:5.7
environment:
- "MYSQL_ROOT_PASSWORD=secret"
- "MYSQL_DATABASE=prj_settings"
ports:
- "33067:3306"
volumes:
- mysql_bkp:/var/lib/mysql
db_tasks:
image: mysql:5.7
environment:
- "MYSQL_ROOT_PASSWORD=secret"
- "MYSQL_DATABASE=prj_tasks"
ports:
- "33068:3306"
volumes:
- mysql_bkp:/var/lib/mysql
db_users:
image: mysql:5.7
environment:
- "MYSQL_ROOT_PASSWORD=secret"
- "MYSQL_DATABASE=prj_users"
ports:
- "33069:3306"
volumes:
- mysql_bkp:/var/lib/mysql
pma:
image: phpmyadmin/phpmyadmin
environment:
- "PMA_USER=root"
- "PMA_PASSWORD=secret"
ports:
- "8001:80"
links:
- db_callcenter
- db_forecast
- db_logistics
- db_products
- db_rm
- db_rma
- db_settings
- db_tasks
- db_users
volumes:
mysql_bkp:
But none of the MySQL containers are created. When I run docker ps I get:
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
0228e9c8a267 phpmyadmin/phpmyadmin "/run.sh phpmyadmin" About a minute ago Up 30 seconds 0.0.0.0:8001->80/tcp prj_pma_1
e6c6b11905f1 prj_server "nginx -g 'daemon ..." 22 minutes ago Up 2 minutes 0.0.0.0:8080->80/tcp prj_server_1
2e7dd484c6e5 prj_app "docker-php-entryp..." 24 minutes ago Up 2 minutes 9000/tcp prj_app_1
UPDATE:
docker logs shows:
Unable to lock ./ibdata1 error: 11
or
InnoDB: Unsupported redo log format.
I don't know what I'm doing wrong, or how I should start debugging. Any help would be mostly appreciated.
You can't have multiple mysql processes sharing the same data directory. In your compose file, every database container is using the same mysql_bkp volume. You will need to either create one volume per container, or configure mysql to use a unique subdirectory of /var/lib/mysql for storing data.
If you simply remove the volumes: key from each database service, they will all get a unique anonymous volume (because that's how the mysql image is configured).
Alternatively, you can declare and mount a separate volume for each service:
services:
db_logistics:
image: mysql:5.7
volumes:
- mysql_bkp_logistics:/var/lib/mysql
db_products:
image: mysql:5.7
volumes:
- mysql_bkp_products:/var/lib/mysql
volumes:
mysql_bkp_logistics:
mysql_bkp_products:
Etc.

Resources