Hi I run a docker container with nginx and got the following error:
2019/08/09 11:37:18 [emerg] 1#1: invalid number of arguments in
"upstream" directive in /etc/nginx/conf.d/default.conf:61 nginx:
[emerg] invalid number of arguments in "upstream" directive in
/etc/nginx/conf.d/default.conf:61
My docker compose looks like this:
# #version 2018-01-15
# #author -----
version: "3.7"
networks:
proxy:
external: true
volumes:
# curl https://raw.githubusercontent.com/jwilder/nginx-proxy/master/nginx.tmpl > /var/lib/docker/volumes/proxy_tmpl/_data/nginx.tmpl
conf:
vhost:
certs:
html:
tmpl:
services:
# Nginx proxy
nginx:
image: nginx
networks:
- proxy
ports:
- 80:80
- 443:443
volumes:
- conf:/etc/nginx/conf.d # nginx config
- vhost:/etc/nginx/vhost.d # changed configuration of vhosts (needed by Let's Encrypt)
- html:/usr/share/nginx/html # challenge files
- certs:/etc/nginx/certs:ro # Let's Encrypt certificates
- /var/run/docker.sock:/tmp/docker.sock:ro # docker service
environment:
- ENABLE_IPV6=true
deploy:
mode: global
placement:
constraints: [node.role == manager]
restart_policy:
condition: on-failure
max_attempts: 5
window: 120s
resources:
limits:
memory: 256M
reservations:
memory: 32M
labels:
de.blubbbbb.meta.description: "Nginx"
de.blubbbbb.meta.maintainer: "-----"
de.blubbbbb.meta.version: "2018-01-15"
com.github.jrcs.letsencrypt_nginx_proxy_companion.nginx_proxy: ""
# see also: https://wiki.ssdt-ohio.org/display/rtd/Adjusting+nginx-proxy+Timeout+Configuration
# Docker-gen
dockergen:
# https://hub.docker.com/r/helder/docker-gen
image: helder/docker-gen
networks:
- proxy
volumes:
- conf:/etc/nginx/conf.d
- vhost:/etc/nginx/vhost.d
- html:/usr/share/nginx/html
- certs:/etc/nginx/certs:ro
- tmpl:/etc/docker-gen/templates:ro # docker-gen templates
- /var/run/docker.sock:/tmp/docker.sock:ro # docker service
environment:
ENABLE_IPV6: ""
command: -notify "docker-label-sighup com.github.jrcs.letsencrypt_nginx_proxy_companion.nginx_proxy" -watch -wait 10s:30s /etc/docker-gen/templates/nginx.tmpl /etc/nginx/conf.d/default.conf
deploy:
mode: global
placement:
constraints: [node.role == manager]
restart_policy:
condition: on-failure
max_attempts: 5
window: 120s
resources:
limits:
memory: 256M
reservations:
memory: 32M
labels:
de.blubbbbb.meta.description: "Docker-gen"
de.blubbbbb.meta.maintainer: "-----"
de.blubbbbb.meta.version: "2018-01-15"
com.github.jrcs.letsencrypt_nginx_proxy_companion.docker_gen: ""
# Lets Encrypt
letsencrypt:
image: jrcs/letsencrypt-nginx-proxy-companion
networks:
- proxy
volumes:
- conf:/etc/nginx/conf.d
- vhost:/etc/nginx/vhost.d
- html:/usr/share/nginx/html
- certs:/etc/nginx/certs:rw
- /var/run/docker.sock:/var/run/docker.sock:ro
deploy:
mode: global
placement:
constraints: [node.role == manager]
restart_policy:
condition: on-failure
max_attempts: 5
window: 120s
resources:
limits:
memory: 256M
reservations:
memory: 32M
labels:
de.blubbbbb.meta.description: "Letsencrypt Nginx Proxy Companion"
de.blubbbbb.meta.maintainer: "-----"
de.blubbbbb.meta.version: "2018-01-15"
I run it like this:
docker stack deploy proxy -c docker-compose.yml
What could be the issue? Thanks in advance.
upstream part from conf
upstream {
# Cannot connect to network of this container
server 127.0.0.1 down;
# Cannot connect to network of this container
server 127.0.0.1 down;
# Cannot connect to network of this container
server 127.0.0.1 down;
## Can be connected with "proxy" network
# tools_adminer.1.n1j3poc9mo507somuhyf7adrd
server 10.0.35.3:8080;
# Cannot connect to network of this container
server 127.0.0.1 down;
# Cannot connect to network of this container
server 127.0.0.1 down;
}
In my case there was an adminer docker which has blocked nginx
Related
So i have deploy my stack application and everything is working as expected. Three container replicas running. Now i access phpmyadmin and try to login to the mysql but i got the error: mysqli::real_connect(): php_network_getaddresses: getaddrinfo failed: Temporary failure in name resolution
Both the phpmyadmin and mysql container are on the same network.
version: "3.9"
service:
db:
image: mysql
#container_name: mysql_db
command: --default-authentication-plugin=mysql_native_password
restart: always
secrets:
- mysql_root_password
- mysql_database
- mysql_user
- mysql_password
environment:
MYSQL_ROOT_PASSWORD_FILE: /run/secrets/mysql_root_password
MYSQL_DATABASE_FILE: /run/secrets/mysql_database
MYSQL_USER_FILE: /run/secrets/mysql_user
MYSQL_PASSWORD_FILE: /run/secrets/mysql_password
ports:
- "9906:3306"
networks:
- back-tier
volumes:
- alpine-db_backup:/var/lib/mysql
- alpine-mysql_logs:/var/log/mysql
- alpine-mysql_cnf:/etc/mysql
deploy:
replicas: 3
placement:
constraints: [node.role == manager]
resources:
reservations:
memory: 128M
limits:
memory: 256M
restart_policy:
condition: on-failure
delay: 30s
max_attempts: 10
window: 60s
update_config:
parallelism: 1
delay: 10s
max_failure_ratio: 0.3
phpmyadmin:
image: phpmyadmin
#container_name: phpmyadmin
ports:
- 8080:80
environment:
PMA_HOST: db
PMA_PORT: 3306
PMA_ARBITRARY: 1
depends_on:
- db
networks:
- back-tier
- front-tier
deploy:
replicas: 2
resources:
limits:
cpus: '0.50'
memory: 50M
reservations:
cpus: '0.25'
memory: 20M
restart_policy:
condition: on-failure
delay: 30s
max_attempts: 10
networks:
front-tier:
driver: overlay
back-tier:
driver: overlay
For containers on the same network, to get another service's name resolved, you should use its name without the stack name as prefix. So, your PMA_HOST should be db, not titanfxbmp_db.
version: "3.9"
services:
db:
image: mysql
...
phpmyadmin:
image: phpmyadmin
...
environment:
PMA_HOST: db
PMA_PORT: 3306
Created a simple Traefik instance with 2 services, only by http. I'm getting Gateway timeout in both instances, this is my only file where I created my services and traefik proxy.
version: '3.4'
services:
reverse-proxy:
image: traefik:2.0 # The official Traefik docker image
ports:
- "80:80" # The HTTP port
- "10553:8080" # The Web UI (enabled by --api)
volumes:
- /var/run/docker.sock:/var/run/docker.sock # So that Traefik can listen to the Docker events
networks:
- default
command:
- "--api.insecure=true"
- "--providers.docker=true"
- "--providers.docker.network=demo_swarm_network"
- "--providers.docker.exposedbydefault=false"
- "--providers.docker.swarmMode=true"
- "--entrypoints.web.address=:80"
deploy:
mode: global
placement:
constraints:
- node.role == manager
update_config:
parallelism: 1
delay: 10s
restart_policy:
condition: on-failure
xxxxx-authentication-api:
image: xxxx_authentication_api_nightly:9999
deploy:
labels:
- "traefik.enable=true"
- "traefik.docker.lbswarm=true"
- "traefik.docker.network=demo_swarm_network"
- "traefik.http.routers.authenticationapi.rule=PathPrefix(`/api/authentication`)"
- "traefik.http.routers.authenticationapi.entrypoints=web"
- "traefik.http.services.xxxxx-authentication-api.loadbalancer.server.port=3000"
- "traefik.http.services.xxxxx-authentication-api.loadbalancer.server.scheme=http"
replicas: 1
update_config:
parallelism: 1
delay: 10s
order: stop-first
command: node ./server.js
environment:
- NODE_ENV=authentication
- LOG_LEVEL=info
- NODE_CONFIG_DIR=./config
networks:
- default
ports:
- "3000"
xxxxx-authentication-app:
image: xxxxx_authentication_app_nightly:9999
deploy:
labels:
- "traefik.enable=true"
- "traefik.docker.lbswarm=true"
- "traefik.docker.network=demo_swarm_network"
- "traefik.http.routers.authenticationapp.rule=PathPrefix(`/authentication`)"
- "traefik.http.routers.authenticationapp.entrypoints=web"
- "traefik.http.services.xxxxx-authentication-app.loadbalancer.server.port=80"
- "traefik.http.services.xxxxx-authentication-app.loadbalancer.server.scheme=http"
replicas: 1
update_config:
parallelism: 1
delay: 10s
order: stop-first
networks:
- default
ports:
- "80"
networks:
default:
external:
name: demo_swarm_network
The services are up and running, are so are the containers. Traefik is also running, just when I try to localhost:80/api/authentication or localhost:80/authentication I get gateway timeout.
Where is traefik sending my requests ? I've confirmed in the host ports, that the apps in both endpoints are running.
What's missing in my configuration ?
Huzzah! The timeouts disapeared when I updated the demo_swarm_network network to have overlay.
I'm trying to restore fabric-network with old blockchain data and for same I followed below steps.
Backup process
1. Stopped docker swarm network.
2. created a directory `bchain_backup` and under this directory I have created sub-directories for every node like orderer1, orderer2 and so on.
3. then I copied the data from container to `bchain_backup` directory
--> "docker cp container_name:/var/hyperledger/production bchain_backup/orderer1
--> executed above step for every node
Restoration process
1. copied all the certs and channel-artifacts
2. mapped '/bchain_backup/orderer1/production:/var/hyperledger/production' in compose-file.
3. performed step 2 for every node.
When I tried to start the network them I'm getting below error:
with Orderer node
panic: Error opening leveldb: open /var/hyperledger/production/orderer/index/LOCK: permission denied
With peer node
panic: Error opening leveldb: open /var/hyperledger/production/ledgersData/ledgerProvider/LOCK: permission denied
Using couchDB
Using Docker-swarm on GCP Ubuntu 18.04 instance
docker-orderer1.yaml file
version: '3.7'
volumes:
orderer1.example.com:
# set external: true and now network name is "networks.test-network.name" instead of "networks.test-network.external.name"
networks:
testchain-network:
external: true
name: testchain-network
services:
orderer1:
deploy:
replicas: 1
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 5
placement:
constraints:
- node.hostname == gcloud1
resources:
limits:
cpus: '0.50'
memory: 1000M
reservations:
cpus: '0.25'
memory: 50M
hostname: orderer1.example.com
image: hyperledger/fabric-orderer:1.4.4
user: "${UID}:${GID}"
#healthcheck:
#testchain: ["CMD","curl","-f","http://orderer1.example.com:4443/"]
#interval: 1m30s
#timeout: 10s
#retries: 3
#start_period: 1m
environment:
- CORE_VM_DOCKER_HOSTCONFIG_NETWORKMODE=testchain-network
- ORDERER_HOST=orderer1.example.com
- ORDERER_GENERAL_LOGLEVEL=info
- FABRIC_LOGGING_SPEC=warning
- ORDERER_GENERAL_LISTENADDRESS=0.0.0.0
- ORDERER_GENERAL_LISTENPORT=7050
- ORDERER_GENERAL_GENESISMETHOD=file
- ORDERER_GENERAL_GENESISFILE=/var/hyperledger/orderer/orderer.genesis.block
- ORDERER_GENERAL_LOCALMSPID=OrdererMSP
- ORDERER_GENERAL_LOCALMSPDIR=/var/hyperledger/orderer/msp
- ORDERER_GENERAL_GENESISPROFILE=OrdererOrg
- CONFIGTX_ORDERER_ADDRESSES=[127.0.0.1:7050]
- ORDERER_OPERATIONS_LISTENADDRESS=0.0.0.0:4443
# enabled TLS
- ORDERER_GENERAL_TLS_ENABLED=true
- ORDERER_GENERAL_TLS_PRIVATEKEY=/var/hyperledger/orderer/tls/server.key
- ORDERER_GENERAL_TLS_CERTIFICATE=/var/hyperledger/orderer/tls/server.crt
- ORDERER_GENERAL_TLS_ROOTCAS=[/var/hyperledger/orderer/tls/ca.crt]
#- ORDERER_KAFKA_TOPIC_REPLICATIONFACTOR=1
#- ORDERER_KAFKA_VERBOSE=true
- ORDERER_GENERAL_CLUSTER_CLIENTCERTIFICATE=/var/hyperledger/orderer/tls/server.crt
- ORDERER_GENERAL_CLUSTER_CLIENTPRIVATEKEY=/var/hyperledger/orderer/tls/server.key
- ORDERER_GENERAL_CLUSTER_ROOTCAS=[/var/hyperledger/orderer/tls/ca.crt]
- CORE_CHAINCODE_LOGGING_LEVEL=DEBUG
- CORE_CHAINCODE_LOGGING_SHIM=DEBUG
- ORDERER_TLS_CLIENTROOTCAS_FILES=/var/hyperledger/users/Admin#example.com/tls/ca.crt
- ORDERER_TLS_CLIENTCERT_FILE=/var/hyperledger/users/Admin#example.com/tls/client.crt
- ORDERER_TLS_CLIENTKEY_FILE=/var/hyperledger/users/Admin#example.com/tls/client.key
- GODEBUG=netdns=go
working_dir: /opt/gopath/src/github.com/hyperledger/fabric
command: orderer
volumes:
- /home/delta/GoWorkspace/src/github.com/testchain/bchain_network/channel-artifacts/:/var/hyperledger/configs:ro
- /home/delta/GoWorkspace/src/github.com/testchain/bchain_network/channel-artifacts/genesis.block:/var/hyperledger/orderer/orderer.genesis.block:ro
- /home/delta/GoWorkspace/src/github.com/testchain/bchain_network/crypto-config/ordererOrganizations/example.com/orderers/orderer1.example.com/msp:/var/hyperledger/orderer/msp:ro
- /home/delta/GoWorkspace/src/github.com/testchain/bchain_network/crypto-config/ordererOrganizations/example.com/orderers/orderer1.example.com/tls/:/var/hyperledger/orderer/tls:ro
- /home/delta/GoWorkspace/src/github.com/testchain/bchain_network/crypto-config/ordererOrganizations/example.com/users:/var/hyperledger/users:ro
- /home/delta/GoWorkspace/src/github.com/testchain/backup_blockchain/orderer1/production/orderer:/var/hyperledger/production/orderer
ports:
- published: 7050
target: 7050
# mode: host
#- 7050:7050
- published: 4443
target: 4443
# mode: host
networks:
testchain-network:
aliases:
- orderer1.example.com
docker-peer0-org1.yaml
version: '3.7'
volumes:
peer0.org1.example.com:
networks:
testchain-network:
external: true
name: testchain-network
services:
org1peer0couchdb:
hostname: couchdb.peer0.org1.example.com
image: hyperledger/fabric-couchdb:0.4.18
user: "${UID}:${GID}"
environment:
- COUCHDB_USER=couchdb
- COUCHDB_PASSWORD=couchdb123
deploy:
mode: replicated
replicas: 1
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 5
placement:
constraints:
- node.hostname == gcloud1
ports:
- published: 5984
target: 5984
# mode: host
networks:
testchain-network:
aliases:
- couchdb.peer0.org1.example.com
org1peer0:
hostname: peer0.org1.example.com
image: hyperledger/fabric-peer:1.4.4
user: "${UID}:${GID}"
environment:
- CORE_VM_ENDPOINT=unix:///host/var/run/docker.sock
# the following setting starts chaincode containers on the same
# bridge network as the peers
# https://docs.docker.com/compose/networking/
- CORE_VM_DOCKER_HOSTCONFIG_NETWORKMODE=testchain-network
- CORE_VM_DOCKER_ATTACHSTDOUT=true
- CORE_PEER_ID=peer0.org1.example.com
- CORE_PEER_ADDRESS=peer0.org1.example.com:7051
- CORE_PEER_LISTENADDRESS=0.0.0.0:7051
- CORE_PEER_CHAINCODEADDRESS=peer0.org1.example.com:7052
- CORE_PEER_CHAINCODELISTENADDRESS=0.0.0.0:7052
- CORE_CHAINCODE_BUILDER=hyperledger/fabric-ccenv:1.4.4
- CORE_CHAINCODE_GOLANG_RUNTIME=hyperledger/fabric-baseos:0.4.18
- CORE_PEER_GOSSIP_BOOTSTRAP=peer1.org1.example.com:8051
- CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer0.org1.example.com:7051
- CORE_PEER_LOCALMSPID=Org1MSP
- FABRIC_LOGGING_SPEC=info
- CORE_PEER_TLS_ENABLED=true
- CORE_PEER_GOSSIP_USELEADERELECTION=true
- CORE_PEER_ADDRESSAUTODETECT=true
- CORE_PEER_GOSSIP_ORGLEADER=false
- CORE_PEER_PROFILE_ENABLED=true
- CORE_PEER_TLS_CERT_FILE=/etc/hyperledger/fabric/tls/server.crt
- CORE_PEER_TLS_KEY_FILE=/etc/hyperledger/fabric/tls/server.key
- CORE_PEER_TLS_ROOTCERT_FILE=/etc/hyperledger/fabric/tls/ca.crt
- CORE_CHAINCODE_LOGGING_LEVEL=DEBUG
- CORE_CHAINCODE_LOGGING_SHIM=DEBUG
- CORE_LOGGING_CAUTHDSL=warning
- CORE_LOGGING_GOSSIP=warning
- CORE_LOGGING_LEDGER=info
- CORE_LOGGING_MSP=warning
- CORE_LOGGING_POLICIES=warning
- CORE_LOGGING_GRPC=DEBUG
- CORE_OPERATIONS_LISTENADDRESS=0.0.0.0:7443
# Client certs
- CORE_PEER_TLS_CLIENTROOTCAS_FILES=/var/hyperledger/users/Admin#org1.example.com/tls/ca.crt
- CORE_PEER_TLS_CLIENTCERT_FILE=/var/hyperledger/users/Admin#org1.example.com/tls/client.crt
- CORE_PEER_TLS_CLIENTKEY_FILE=/var/hyperledger/users/Admin#org1.example.com/tls/client.key
# CouchDB
- CORE_LEDGER_STATE_STATEDATABASE=CouchDB
- CORE_LEDGER_STATE_COUCHDBCONFIG_USERNAME=couchdb
- CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD=couchdb123
- CORE_LEDGER_STATE_COUCHDBCONFIG_COUCHDBADDRESS=couchdb.peer0.org1.example.com:5984
- GODEBUG=netdns=go
working_dir: /opt/gopath/src/github.com/hyperledger/fabric/peer
command: peer node start
volumes:
- /var/run/:/host/var/run/:rw
- /home/delta/GoWorkspace/src/github.com/testchain/bchain_network/crypto-config/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/msp:/etc/hyperledger/fabric/msp:ro
- /home/delta/GoWorkspace/src/github.com/testchain/bchain_network/crypto-config/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/tls:/etc/hyperledger/fabric/tls:ro
- /home/delta/GoWorkspace/src/github.com/testchain/bchain_network/crypto-config/peerOrganizations/org1.example.com/users:/var/hyperledger/users:ro
- /home/delta/GoWorkspace/src/github.com/testchain/backup_blockchain/peer0org1/production:/var/hyperledger/production
#- ../chaincode/:/opt/gopath/src/github.com/chaincode
deploy:
mode: replicated
replicas: 1
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 5
placement:
constraints:
- node.hostname == gcloud1
ports:
- published: 7051
target: 7051
# mode: host
- published: 7052
target: 7052
# mode: host
- published: 7443
target: 7443
# mode: host
networks:
testchain-network:
aliases:
- peer0.org1.example.com
- /home/delta/GoWorkspace/src/github.com/testchain/backup_blockchain/orderer1/production/orderer:/var/hyperledger/production/orderer
Instead of above host path mount
Please create a docker volume for each entity(orderer1, orderer2, etc) and copy all data to the volume and map the volume instead of host path
Usage: docker volume COMMAND
Manage volumes
Commands:
create Create a volume
inspect Display detailed information on one or more volumes
ls List volumes
prune Remove all unused local volumes
rm Remove one or more volumes
Run 'docker volume COMMAND --help' for more information on a command.
Seems like permission issue and check the resources like CPU and ram
usage
**I'm trying to use Traefik to load-balance my web apps via docker swarm.
I have installed sample application like joomla in swarm mode behind traefik. Joomla works fine when the application is deployed on the same node as traefik (ie, manager), and I can access it through the browser by hitting the manager's node IP. But, if the service gets deployed on the worker node with no container in the manager node, while the service is up and running without any issue, but I am not able to see anything on the browser (hitting the manager or worker IP)
My traefik.toml file:
defaultEntryPoints = ["http"]
loglevel = "INFO"
sendAnonymousUsage = true
[docker]
endpoint = "unix:///var/run/docker.sock"
exposedByDefault = false
[api]
dashboard = true
entrypoint = "dashboard"
[entryPoints]
[entryPoints.http]
address = ":80"
[entryPoints.dashboard]
address = ":8080"
--------------------------------
My traefik.yml file:
version: '3'
services:
traefik:
image: traefik:v1.7 # The official Traefik docker image
restart: always
ports:
- 80:80 # The HTTP port
- 9090:8080 # The Web UI (enabled by --api)
labels:
- traefik.frontend.rule=Host:traefik.dpaas1.pune.cdac.in
- traefik.port=8080
- traefik.enable=true
volumes:
- /var/run/docker.sock:/var/run/docker.sock # So that Traefik can listen to the Docker events
- ${PWD}/traefik.toml:/etc/traefik/traefik.toml
deploy:
mode: replicated
replicas: 1
restart_policy:
condition: on-failure
max_attempts: 3
placement:
constraints: [node.role == manager]
update_config:
delay: 2s
networks:
- net
networks:
net:
external: true
My joomla.yml file:
version: '3'
services:
joomla:
image: joomla
restart: always
links:
- joomladb:mysql
volumes:
- joomla1-www:/var/www/html
deploy:
mode: replicated
replicas: 3
restart_policy:
condition: on-failure
max_attempts: 3
placement:
constraints: [node.role == manager]
update_config:
delay: 2s
labels:
- traefik.frontend.rule=Host:joomla1.dpaas1.pune.cdac.in
- traefik.port=80
- traefik.enable=true
- traefik.backend.loadbalancer.sticky=true
environment:
JOOMLA_DB_HOST: 10.208.26.162
JOOMLA_DB_PASSWORD: root
tty: true
networks:
- net
networks:
net:
external: true
volumes:
joomla1-www:
_______________________
```_____________ **
My traefik Dashboard:
[![Traefik logs and dashboard][1]][1]
[1]: https://i.stack.imgur.com/tcoGu.png
I’m quite new in docker world.
I have a local virtulbox setup:
vm1=swarm manager (mysql,visualizer) IP: 192.168.99.100
vm2= wordpress service IP: 192.168.99.101
I can reach the application on both IP’s 100/101. But I would like to also use the localhost in order to port forward localhost to NET since 192.168.99.0 subnet is HOST only.
In VBOX I have portforwarding set like this for the NAT interface on the machine where apache runs:
HOST PORT 8888 / GUEST PORT 8888
Currently the YAML looks like this:
version: '3.4'
services:
wordpress:
image: wordpress
depends_on:
- mysql
- wordpress
deploy:
placement:
constraints: [node.labels.application==true]
mode: replicated
replicas: 1
update_config:
parallelism: 1
delay: 10s
restart_policy:
condition: on-failure
ports:
- "80:80"
environment:
WORDPRESS_DB_PASSWORD: "12345"
networks:
- wordpress_net
mysql:
image: mysql:latest
volumes:
- "/mnt/sda1/var/lib/docker/volumes/mysql_data/_data:/var/lib/mysql"
deploy:
placement:
constraints: [node.role == manager]
environment:
MYSQL_ROOT_PASSWORD: "12345"
networks:
- wordpress_net
visualizer:
image: dockersamples/visualizer:stable
ports:
- "8080:8080"
volumes:
- "/var/run/docker.sock:/var/run/docker.sock"
deploy:
placement:
constraints: [node.role == manager]
networks:
- wordpress_net
networks:
wordpress_net:
How can I attach the eth0 interface to container. So both the swarm network and the NAT-ed network will be reachable ?
I was trying something like this but without success:
services:
wordpress:
image: wordpress
depends_on:
- mysql
- wordpress
deploy:
placement:
constraints: [node.labels.application==true]
mode: replicated
replicas: 1
update_config:
parallelism: 1
delay: 10s
restart_policy:
condition: on-failure
ports:
- target: 80
published: 80
protocol: tcp
mode: ingress
- target: 80
published: 8888
protocol: tcp
mode: host
environment:
WORDPRESS_DB_PASSWORD: "12345"
networks:
- wordpress_net
Thanks !