docker compose: No assignment of host names when bridge is used - docker

In order to make every container part of the default bridge, I added network_mode: bridge in every service. These became part of bridge but the containers are not getting attached with the hostnames. Below is the config.
docker-compose.yml
version: '2'
services:
elasticsearch:
build:
context: elasticsearch/
args:
ELK_VERSION: $ELK_VERSION
volumes:
- ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
ports:
- "9200:9200"
- "9300:9300"
environment:
ES_JAVA_OPTS: "-Xmx256m -Xms256m"
network_mode: bridge
hostname: elasticsearch
logstash:
build:
context: logstash/
args:
ELK_VERSION: $ELK_VERSION
volumes:
- ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro
- ./logstash/pipeline:/usr/share/logstash/pipeline:ro
ports:
- "5000:5000"
- "9600:9600"
environment:
LS_JAVA_OPTS: "-Xmx256m -Xms256m"
network_mode: bridge
depends_on:
- elasticsearch
kibana:
build:
context: kibana/
args:
ELK_VERSION: $ELK_VERSION
volumes:
- ./kibana/config/:/usr/share/kibana/config:ro
ports:
- "5601:5601"
network_mode: bridge
depends_on:
- elasticsearch
Docker compose up
$ docker-compose up -d
Creating docker-elk_elasticsearch_1 ... done
Creating docker-elk_kibana_1 ... done
Creating docker-elk_logstash_1 ... done
Docker network inspect
$ docker network inspect bridge
[
{
"Name": "bridge",
"Id": "f561a85fb2b22bbf251545c7021d57020cf152bd3a5c3c061c7d6b0cb4e267e5",
"Created": "2018-09-19T07:02:49.36259364Z",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": [
{
"Subnet": "172.17.0.0/16",
"Gateway": "172.17.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
"0aedc2ce900b8a51f028e58a85c8db9480fd2816874a608540a899a4daab32fd": {
"Name": "docker-elk_kibana_1",
"EndpointID": "df3af338e0accb880ccc44323e5581064ee8ef84574485f1928d12dc415b598e",
"MacAddress": "02:42:ac:11:00:05",
"IPv4Address": "172.17.0.5/16",
"IPv6Address": ""
},
"3f2088847bd8e958a047093b1af879c91c4071f57f0105bb7bf80fb8df832d41": {
"Name": "docker-elk_logstash_1",
"EndpointID": "6588b7eece43144833ae2f9ffe753e3cc6c70d0891a587c3e9a4e9ca84993532",
"MacAddress": "02:42:ac:11:00:06",
"IPv4Address": "172.17.0.6/16",
"IPv6Address": ""
},
"ace35bb6fadd50823f64e9075b5972e6e3b24e8b73273a41e7a48f9eeff89da1": {
"Name": "roach",
"EndpointID": "dd058e3e9f46b2459f14a2e5bdf96eae277e81dcf7ac2e6ac1c97d8220ead30d",
"MacAddress": "02:42:ac:11:00:03",
"IPv4Address": "172.17.0.3/16",
"IPv6Address": ""
},
"f90378063d2a0157110b77af39f2526347f1ea9634839e0d2c0c584fb14ff957": {
"Name": "docker-elk_elasticsearch_1",
"EndpointID": "294a2f67196788135f370bbf83526395ba4401afb25db9eb0b59fba7fd358912",
"MacAddress": "02:42:ac:11:00:04",
"IPv4Address": "172.17.0.4/16",
"IPv6Address": ""
},
"f954c218e5ab15c83c2a0e2c848549c18879613f6f46d07f7ebf71cc89b6e55b": {
"Name": "rabbitmq",
"EndpointID": "e675ddc6076fe2256553e8b367a82aa36f488457e06ae6cf969c2e04feeb9fb8",
"MacAddress": "02:42:ac:11:00:02",
"IPv4Address": "172.17.0.2/16",
"IPv6Address": ""
}
},
"Options": {
"com.docker.network.bridge.default_bridge": "true",
"com.docker.network.bridge.enable_icc": "true",
"com.docker.network.bridge.enable_ip_masquerade": "true",
"com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
"com.docker.network.bridge.name": "docker0",
"com.docker.network.driver.mtu": "1500"
},
"Labels": {}
}
]
Docker inspect elasticsearch
$ docker inspect docker-elk_elasticsearch_1
"NetworkSettings": {
"Bridge": "",
"SandboxID": "31a438f8fcb3dd8efca37260e77d346f21239b36d8bb30f5f08db4b79880a5c9",
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"Ports": {
"9200/tcp": [
{
"HostIp": "0.0.0.0",
"HostPort": "9200"
}
],
"9300/tcp": [
{
"HostIp": "0.0.0.0",
"HostPort": "9300"
}
]
},
"SandboxKey": "/var/run/docker/netns/31a438f8fcb3",
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "294a2f67196788135f370bbf83526395ba4401afb25db9eb0b59fba7fd358912",
"Gateway": "172.17.0.1",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "172.17.0.4",
"IPPrefixLen": 16,
"IPv6Gateway": "",
"MacAddress": "02:42:ac:11:00:04",
"Networks": {
"bridge": {
"IPAMConfig": null,
"Links": null,
"Aliases": null,
"NetworkID": "f561a85fb2b22bbf251545c7021d57020cf152bd3a5c3c061c7d6b0cb4e267e5",
"EndpointID": "294a2f67196788135f370bbf83526395ba4401afb25db9eb0b59fba7fd358912",
"Gateway": "172.17.0.1",
"IPAddress": "172.17.0.4",
"IPPrefixLen": 16,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"MacAddress": "02:42:ac:11:00:04",
"DriverOpts": null
}
}
}
Kibana Logs where elastic search is inaccessible:
$ docker logs docker-elk_kibana_1
{"type":"log","#timestamp":"2018-09-20T05:27:05Z","tags":["warning","elasticsearch","admin"],"pid":1,"message":"Unable to revive connection: http://elasticsearch:9200/"}
{"type":"log","#timestamp":"2018-09-20T05:27:05Z","tags":["warning","elasticsearch","admin"],"pid":1,"message":"No living connections"}
However, everything works fine with below config where I haven't provided any network related config.
version: '2'
services:
elasticsearch:
build:
context: elasticsearch/
args:
ELK_VERSION: $ELK_VERSION
volumes:
- ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
ports:
- "9200:9200"
- "9300:9300"
environment:
ES_JAVA_OPTS: "-Xmx256m -Xms256m"
logstash:
build:
context: logstash/
args:
ELK_VERSION: $ELK_VERSION
volumes:
- ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro
- ./logstash/pipeline:/usr/share/logstash/pipeline:ro
ports:
- "5000:5000"
- "9600:9600"
environment:
LS_JAVA_OPTS: "-Xmx256m -Xms256m"
depends_on:
- elasticsearch
kibana:
build:
context: kibana/
args:
ELK_VERSION: $ELK_VERSION
volumes:
- ./kibana/config/:/usr/share/kibana/config:ro
ports:
- "5601:5601"
depends_on:
- elasticsearch

Containers on the default bridge network cannot refer each other by host name, they can only refer each other by IP. You can find this in the docs here https://docs.docker.com/network/bridge/#differences-between-user-defined-bridges-and-the-default-bridge.
Containers on the default bridge network can only access each other by IP addresses, unless you use the --link option, which is considered legacy. On a user-defined bridge network, containers can resolve each other by name or alias.
The solution is to define your custom bridge network in the Compose file with networks as described here https://docs.docker.com/compose/compose-file/#networks and add every container to this user defined network. On this network containers can resolve each other by name.

Related

docker does not connect to bridge network

I have a container and I want to connect to DB, the docker host machine has a IP X.X.2.26 and the database X.X.2.27. I tried to connect the network in bridge mode. But I can't connect to te database. The host machine has connection to database.
This is my docker-compose.yml
version: '3.7'
​
networks:
sfp:
name: sfp
driver: bridge
​
services:
sfpapi:
image: st/sfp-api:${VERSION-latest}
container_name: "sfp-api"
restart: always
ports:
- "8082:8081"
networks:
- sfp
environment:
- TZ=America/Mexico_City
- SPRING_DATASOURCE_URL
- SPRING_DATASOURCE_USERNAME
- SPRING_DATASOURCE_PASSWORD
​
app:
image: st/sfp-app:${VERSION-latest}
container_name: "app"
restart: always
ports:
- "8081:80"
networks:
- sfp
environment:
- API_HOST
If I check the networks, it was created successfuly.
docker network ls
NETWORK ID NAME DRIVER SCOPE
86a58ac8a053 bridge bridge local
1890c6433c09 host host local
bab0a88222a3 none null local
01a411ad42df sfp bridge local
But if I see the inspect to the network, I can't see added containers
docker network inspect bridge
[
{
"Name": "bridge",
"Id": "86a58ac8a05398bb827252b2dbe4c99e52aedf0896be6aa6c4358c41cf0e766e",
"Created": "2022-04-06T12:50:09.922881204-05:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": [
{
"Subnet": "172.17.0.0/16",
"Gateway": "172.17.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {},
"Options": {
"com.docker.network.bridge.default_bridge": "true",
"com.docker.network.bridge.enable_icc": "true",
"com.docker.network.bridge.enable_ip_masquerade": "false",
"com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
"com.docker.network.bridge.name": "docker0",
"com.docker.network.driver.mtu": "1500"
},
"Labels": {}
}
]
This is the inspect to container
docker inspect --format "{{ json .NetworkSettings.Networks }}" sfp-api
{"sfp":{"IPAMConfig":null,"Links":null,"Aliases":["sfpapi","bab30efe892b"],"NetworkID":"2076ee845b06df6ace975e1cf3fd360eb174ee97a9ae608911c243b08e98aa42","EndpointID":"3837a6f55449a59267aea7bbafc754d0fab6fedad282e280cce9d880d0c299a7","Gateway":"172.26.0.1","IPAddress":"172.26.0.3","IPPrefixLen":16,"IPv6Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"MacAddress":"02:42:ac:1a:00:03","DriverOpts":null}}

docker-compose service cannot access another service through its name

I am quite new to docker but after watching a few tutorials I gathered that if I wanted my services to have common code I had to use docker-compose.
This is my directory structure:
- project
- docker-compose.yml
- packages
- common
- src
- eureka
- eureka-helper.js
- gateway
- Dockerfile
- src
- (some more directories and files)
- server.js
- users
- Dockerfile
- src
- app.js
- (some more directories and files)
version: '3'
services:
eureka:
image: springcloud/eureka
ports:
- "8761:8761"
gateway:
build:
context: ./
dockerfile: ./packages/gateway/Dockerfile
ports:
- "3000:3000"
links:
- eureka
users:
build:
context: ./
dockerfile: ./users/Dockerfile
ports:
- "3004:3004"
links:
- mongo
- eureka
mongo:
image: mongo
volumes:
- C:\Users\myUser\docker\mongodb-data:/data/db
ports:
- "27017:27017"
My server.js (inside gateway) and app.js (inside users) call a function inside eureka-helper that registers with Eureka.
eurekaHelper.registerWithEureka('gateway', eureka, port);
But it seems that server.js doesn't have a value for this eureka:
gateway_1 | /opt/app/gateway/server.js:27
gateway_1 | eurekaHelper.registerWithEureka('gateway', eureka, port);
gateway_1 | ^
gateway_1 |
gateway_1 | ReferenceError: eureka is not defined
As per this: https://docs.docker.com/compose/networking/, it doesn't seem that I really need the link in docker-compose:
Links allow you to define extra aliases by which a service is reachable from another service. They are not required to enable services to communicate - by default, any service can reach any other service at that service’s name.
... but gateway is not able to reach eureka anyways.
Any idea if I'm missing something.
When I do inspect in gateway:
"Networks": {
"*project*_default": {
"IPAMConfig": null,
"Links": null,
"Aliases": [
"gateway",
"a358b371de9d"
],
"NetworkID": "d986ee77bf6181e9dd59711650359dc49fd8ce3fc34c6772133a0049ab55bd3b",
"EndpointID": "9633a7affe435793345342d4f6d00d5bef43d2267676448dfde0f97d75d5f7af",
"Gateway": "172.19.0.1",
"IPAddress": "172.19.0.3",
"IPPrefixLen": 16,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"MacAddress": "02:42:ac:13:00:03",
"DriverOpts": null
}
}
When I do inspect in eureka:
"Networks": {
"*project*_default": {
"IPAMConfig": null,
"Links": null,
"Aliases": [
"eureka",
"17a218bf6e9e"
],
"NetworkID": "d986ee77bf6181e9dd59711650359dc49fd8ce3fc34c6772133a0049ab55bd3b",
"EndpointID": "43f4a9eb9efd4d30ad65d2c943065b8fb32ed50822bd553e07a2b0020c173fec",
"Gateway": "172.19.0.1",
"IPAddress": "172.19.0.2",
"IPPrefixLen": 16,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"MacAddress": "02:42:ac:13:00:02",
"DriverOpts": null
}
}

Why docker container does not listen the port and I can't ping it from another container

I need to ping one container (k4fntr_sl_listener) from another and I try to do it with container name and ports but it is not working.
networks:
backend-network:
driver: bridge
frontend-network:
driver: bridge
volumes:
redis-data:
pg-data:
services:
&app-service app: &app-service-template
container_name: k4fntr_app
build:
context: ./docker/php-fpm
args:
UID: ${UID?Use your user ID}
GID: ${GID?Use your group ID}
USER: ${USER?Use your user name}
user: "${UID}:${GID}"
hostname: *app-service
volumes:
- /etc/passwd/:/etc/passwd:ro
- /etc/group/:/etc/group:ro
- ./:/var/www/k4fntr
environment:
APP_ENV: "${APP_ENV}"
CONTAINER_ROLE: app
FPM_PORT: &php-fpm-port 9000
FPM_USER: "${UID:-1000}"
FPM_GROUP: "${GID:-1000}"
depends_on:
- redis
- database
networks:
- backend-network
&queue-service queue:
<<: *app-service-template
container_name: k4fntr_queue
restart: always
hostname: *queue-service
depends_on:
- app
environment:
CONTAINER_ROLE: queue
&schedule-service schedule:
<<: *app-service-template
container_name: k4fntr_schedule
restart: always
hostname: *schedule-service
depends_on:
- app
environment:
CONTAINER_ROLE: scheduler
&sportlevel-listener sportlevel_listener:
<<: *app-service-template
container_name: k4fntr_sl_listener
restart: always
hostname: *sportlevel-listener
ports:
- "8999:8999"
depends_on:
- app
environment:
CONTAINER_ROLE: sl_listener
As I can see the container mapping ports
I also can see that containers in one network
docker network inspect fntr_backend-network
"Containers": {
"20057689b3921b3a4b1ee99f015b89b033cf072934f4cbbf63934b38b3b63203": {
"Name": "k4fntr_php-fpm",
"EndpointID": "700f507b0ae0eb830d5922f646ea459e67e2f5cc878867a6c15114ff9bcb202c",
"MacAddress": "02:42:c0:a8:80:07",
"IPv4Address": "192.168.128.7/20",
"IPv6Address": ""
},
"30b4ac098109c512065082d42f6594569cd51b69937e502686b6fc91c9b410ff": {
"Name": "k4fntr_echo",
"EndpointID": "c5e96fa17ac3f932b93bc98ffd19d9f1ba60d71deabe82c5a9c53fe9c67e8020",
"MacAddress": "02:42:c0:a8:80:05",
"IPv4Address": "192.168.128.5/20",
"IPv6Address": ""
},
"3ea5a601c4c0d803864cd420a7e4d689f2a985b27c5ba4c9c58a872e91945a0f": {
"Name": "k4fntr_app",
"EndpointID": "cc2a7dfbd0059f7483398684c9b969c2edf90178a7e38b311c9dce107a22fb61",
"MacAddress": "02:42:c0:a8:80:06",
"IPv4Address": "192.168.128.6/20",
"IPv6Address": ""
},
"a176fed5488fc02e3bb819393ada72f710c7d3435876f0326d5f55214a2e1df9": {
"Name": "k4fntr_queue",
"EndpointID": "4d85ef092a69a9f1c3362eec99c193dc3e94f09682a37f0486596ba12fa70b1e",
"MacAddress": "02:42:c0:a8:80:09",
"IPv4Address": "192.168.128.9/20",
"IPv6Address": ""
},
"a689d0b480fd0f68e0f2385f73b75788714698af5d5466fefc7d65a7dcb39dbb": {
"Name": "k4fntr_mail",
"EndpointID": "d29306a336018773e8937cabafa06774cc5ef3cef7eaa7e62a5ba2eba403bbf0",
"MacAddress": "02:42:c0:a8:80:03",
"IPv4Address": "192.168.128.3/20",
"IPv6Address": ""
},
"a772c494e567017e7315df5fe67aeb45b4ee75ba8e555c4ef671f909928caa30": {
"Name": "k4fntr_database",
"EndpointID": "7d6c43b4d97485c49bbd8ded016b298d80b52d82bed3b69447293b8d5aabb8ce",
"MacAddress": "02:42:c0:a8:80:04",
"IPv4Address": "192.168.128.4/20",
"IPv6Address": ""
},
"d2ebc1a9f62455fee874d64ab89d71e867bdbecd546968efbe75b9fea4f0043d": {
"Name": "k4fntr_schedule",
"EndpointID": "12aa000ce78b9ec8c881e88face25208c22f02e1aa8e094b4bf03e0cca393da8",
"MacAddress": "02:42:c0:a8:80:0a",
"IPv4Address": "192.168.128.10/20",
"IPv6Address": ""
},
"d70d86bb4237f6e46dc1ccd103d8cca7913eeb33bf4c9e331ce7a523cd104648": {
"Name": "k4fntr_redis",
"EndpointID": "24ae090c619ede754eda9d406725388abe953f648aa436bfe4226623a0199001",
"MacAddress": "02:42:c0:a8:80:02",
"IPv4Address": "192.168.128.2/20",
"IPv6Address": ""
},
"fc14098eeee8d87fe34e78a4117ed7cc7962cf1735dfe93c790024db0126bb38": {
"Name": "k4fntr_sl_listener",
"EndpointID": "5483d16d8d542b67aa0d8c9dfa2e2bdd2bbe7633d5324ab8551c451133614d51",
"MacAddress": "02:42:c0:a8:80:08",
"IPv4Address": "192.168.128.8/20",
"IPv6Address": ""
}
},
But when I try to ping this container I got issue that connection refused
I also see that the container which I need to ping does not listen port 8999
There is only one listen port from php process.
How can I solve the problem? And is it possible to ping this php process from another container? I need it because this container was created only for this php process which I need to ping
So I have found the answer. In short, the problem was that my process was listening localhost and I could not to ping it. I changed the script on 0.0.0.0:8096 and changed ports on "8096:8096" and all works fine

Docker Cannot Resolve Hostname

I am trying to link my containerized VueJS frontend with my containerized Spring Boot API backend, with great difficulty.
Whenever I try to make an HTTP request to my API using the container name, I get the following.
OPTIONS http://api:4505/user/sign-in net::ERR_NAME_NOT_RESOLVED
Here is my docker-compose file:
version: "3"
services:
mongodb:
image: mongo
container_name: mongo
ports:
- "27017:27017"
api:
image: registry.gitlab.com/darragh.oflah/api:latest
container_name: api
ports:
- "4505:4505"
links:
- mongodb
web:
image: registry.gitlab.com/darragh.oflah/web:latest
container_name: web
ports:
- "80:8080"
links:
- api
Here what I get when I run sudo docker network inspect tmp_default
So it would seem that the network is set up correctly
[
{
"Name": "tmp_default",
"Id": "75ab7c89cb5a80aa7eddd7c5a3f7f4aafb911cfe96a923ad3db2219552366fd7",
"Created": "2020-02-04T16:55:49.131485109Z",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": [
{
"Subnet": "172.18.0.0/16",
"Gateway": "172.18.0.1"
}
]
},
"Internal": false,
"Attachable": true,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
"6ae3db08be2ed22245a173a677ae1b0f28eca878aa84e43744a320589cbda5af": {
"Name": "mongo",
"EndpointID": "b399bb72f28b6d47a93927712a665dcc725d27a6ba2ee432e715db00c9cbc835",
"MacAddress": "02:42:ac:12:00:02",
"IPv4Address": "172.18.0.2/16",
"IPv6Address": ""
},
"fa7e4066e436181ce2991e048790f8de518af31fb97cf9351316ff8f41824449": {
"Name": "api",
"EndpointID": "bf8d071683bfa4ecbd215f3dd534d0e278702ed4377552ef242e5c65b01c3fa1",
"MacAddress": "02:42:ac:12:00:03",
"IPv4Address": "172.18.0.3/16",
"IPv6Address": ""
},
"fb544ef5389afd74d53f45d6de968008632b65340f161527a9c7aa4214aa7674": {
"Name": "web",
"EndpointID": "2dc3e8a452c241916a2e9f7e25b33ee7997fe2d25ec3543e5d34e888e50d905c",
"MacAddress": "02:42:ac:12:00:04",
"IPv4Address": "172.18.0.4/16",
"IPv6Address": ""
}
},
"Options": {},
"Labels": {
"com.docker.compose.network": "default",
"com.docker.compose.project": "tmp",
"com.docker.compose.version": "1.21.2"
}
}
]
In the net work tab, the request is saying failed, which would indicate to me that the request is failing to leave the container at all.
Your compose file creates a docker network. You're running "web" in a container on that network. All containers in the docker network can access other containers via the hostname. However, the browser is running on your host machine. Your host machine is not in the docker network. Therefore your browser won't be able to access the api container via the hostname. If you type http://api:4505/user/sign-in into the browser url bar, it's doing the same thing, and you'll also get an error.

Docker swarm network not recognizing service/container on worker node. Using Traefik

I'm trying to test out a Traefik load balanced Docker Swarm and added a blank Apache service to the compose file.
For some reason I'm unable to place this Apache service on a worker node. I get a 502 bad gateway error unless it's on the manager node. Did I configure something wrong in the YML file?
networks:
proxy:
external: true
configs:
traefik_toml_v2:
file: $PWD/infra/traefik.toml
services:
traefik:
image: traefik:1.5-alpine
deploy:
replicas: 1
update_config:
parallelism: 1
delay: 5s
labels:
- traefik.enable=true
- traefik.docker.network=proxy
- traefik.frontend.rule=Host:traefik.example.com
- traefik.port=8080
- traefik.backend.loadbalancer.sticky=true
- traefik.frontend.passHostHeader=true
placement:
constraints:
- node.role == manager
restart_policy:
condition: on-failure
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- $PWD/infra/acme.json:/acme.json
networks:
- proxy
ports:
- target: 80
protocol: tcp
published: 80
mode: ingress
- target: 443
protocol: tcp
published: 443
mode: ingress
- target: 8080
protocol: tcp
published: 8080
mode: ingress
configs:
- source: traefik_toml_v2
target: /etc/traefik/traefik.toml
mode: 444
server:
image: bitnami/apache:latest
networks:
- proxy
deploy:
replicas: 1
placement:
constraints:
- node.role == worker
restart_policy:
condition: on-failure
labels:
- traefik.enable=true
- traefik.docker.network=proxy
- traefik.port=80
- traefik.backend=nerdmercs
- traefik.backend.loadbalancer.swarm=true
- traefik.backend.loadbalancer.sticky=true
- traefik.frontend.passHostHeader=true
- traefik.frontend.rule=Host:www.example.com
You'll see I've enabled swarm and everything
The proxy network is an overlay network and I'm able to see it in the worker node:
ubuntu#staging-worker1:~$ sudo docker network ls
NETWORK ID NAME DRIVER SCOPE
f91525416b42 bridge bridge local
7c3264136bcd docker_gwbridge bridge local
7752e312e43f host host local
epaziubbr9r1 ingress overlay swarm
4b50618f0eb4 none null local
qo4wmqsi12lc proxy overlay swarm
ubuntu#staging-worker1:~$
And when I inspect that network ID
$ docker network inspect qo4wmqsi12lcvsqd1pqfq9jxj
[
{
"Name": "proxy",
"Id": "qo4wmqsi12lcvsqd1pqfq9jxj",
"Created": "2018-02-06T09:40:37.822595405Z",
"Scope": "swarm",
"Driver": "overlay",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": [
{
"Subnet": "10.0.0.0/24",
"Gateway": "10.0.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
"1860b30e97b7ea824ffc28319747b23b05c01b3fb11713fa5a2708321882bc5e": {
"Name": "proxy_visualizer.1.dc0elaiyoe88s0mp5xn96ipw0",
"EndpointID": "d6b70d4896ff906958c21afa443ae6c3b5b6950ea365553d8cc06104a6274276",
"MacAddress": "02:42:0a:00:00:09",
"IPv4Address": "10.0.0.9/24",
"IPv6Address": ""
},
"3ad45d8197055f22f5ce629d896236419db71ff5661681e39c50869953892d4e": {
"Name": "proxy_traefik.1.wvsg02fel9qricm3hs6pa78xz",
"EndpointID": "e293f8c98795d0fdfff37be16861afe868e8d3077bbb24df4ecc4185adda1afb",
"MacAddress": "02:42:0a:00:00:18",
"IPv4Address": "10.0.0.24/24",
"IPv6Address": ""
},
"735191796dd68da2da718ebb952b0a431ec8aa1718fe3be2880d8110862644a9": {
"Name": "proxy_portainer.1.xkr5losjx9m5kolo8kjihznvr",
"EndpointID": "de7ef4135e25939a2d8a10b9fd9bad42c544589684b30a9ded5acfa751f9c327",
"MacAddress": "02:42:0a:00:00:07",
"IPv4Address": "10.0.0.7/24",
"IPv6Address": ""
}
},
"Options": {
"com.docker.network.driver.overlay.vxlanid_list": "4102"
},
"Labels": {},
"Peers": [
{
"Name": "be4fb35c80f8",
"IP": "manager IP"
},
{
"Name": "4281cfd9ca73",
"IP": "worker IP"
}
]
}
]
You'll see Traefik, Portainer, and Visualizer all present but not the apache container on the worker node
Inspecting the network on the worker node
$ sudo docker network inspect qo4wmqsi12lc
[
{
"Name": "proxy",
"Id": "qo4wmqsi12lcvsqd1pqfq9jxj",
"Created": "2018-02-06T19:53:29.104259115Z",
"Scope": "swarm",
"Driver": "overlay",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": [
{
"Subnet": "10.0.0.0/24",
"Gateway": "10.0.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
"c5725a332db5922a16b9a5e663424548a77ab44ab021e25dc124109e744b9794": {
"Name": "example_site.1.pwqqddbhhg5tv0t3cysajj9ux",
"EndpointID": "6866abe0ae2a64e7d04aa111adc8f2e35d876a62ad3d5190b121e055ef729182",
"MacAddress": "02:42:0a:00:00:3c",
"IPv4Address": "10.0.0.60/24",
"IPv6Address": ""
}
},
"Options": {
"com.docker.network.driver.overlay.vxlanid_list": "4102"
},
"Labels": {},
"Peers": [
{
"Name": "be4fb35c80f8",
"IP": "manager IP"
},
{
"Name": "4281cfd9ca73",
"IP": "worker IP"
}
]
}
]
It shows up in the network's container list but the manager node containers are not there either.
Portainer is unable to see the apache site when it's on the worker node as well.
This problem is related to this: Creating new docker-machine instance always fails validating certs using openstack driver
Basically the answer is
It turns out my hosting service locked down everything other than 22,
80, and 443 on the Open Stack Security Group Rules. I had to add 2376
TCP Ingress for docker-machine's commands to work.
It helps explain why docker-machine ssh worked but not docker-machine
env
should look at this https://docs.docker.com/datacenter/ucp/2.2/guides/admin/install/system-requirements/#ports-used and make sure they're all open

Resources