Docker swarm worker nodes bad gateway - docker

I setup Traefik with docker swarm based on this guide. It only worked for single node for the SSL. But I add certificate to my other nodes on swarm.
Update cert all the nodes
docker node update --label-add traefik-public.traefik-public-certificates=true $NODE_ID
Start Traefik as Manager Role
docker service create \
--name traefik \
--constraint=node.labels.traefik-public.traefik-public-certificates==true \
--constraint=node.role==manager \
--publish 80:80 \
--publish 443:443 \
--mount type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock \
--mount type=volume,source=traefik-public-certificates,target=/certificates \
--network traefik-public \
--label "traefik.frontend.rule=Host:monitor.$DOMAINNAME" \
--label "traefik.enable=true" \
--label "traefik.port=8080" \
--label "traefik.tags=traefik-public" \
--label "traefik.docker.network=traefik-public" \
--label "traefik.redirectorservice.frontend.entryPoints=http" \
--label "traefik.redirectorservice.frontend.redirect.entryPoint=https" \
--label "traefik.webservice.frontend.entryPoints=https" \
--label "traefik.frontend.auth.basic.users=${HTTP_USERNAME}:${HASHED_PASSWORD}" \
traefik:v1.7 \
--docker \
--docker.swarmmode \
--docker.watch \
--docker.exposedbydefault=false \
--constraints=tag==traefik-public \
--entrypoints='Name:http Address::80' \
--entrypoints='Name:https Address::443 TLS' \
--acme \
--acme.email=$EMAIL \
--acme.storage=/certificates/acme.json \
--acme.entryPoint=https \
--acme.httpChallenge.entryPoint=http\
--acme.onhostrule=true \
--acme.acmelogging=true \
--logLevel=INFO \
--accessLog \
--api
Docker Node Inspect Worker Node
[
{
"ID": "nv15pwb6bie7nvz2yk9uiii10",
"Version": {
"Index": 39862
},
"CreatedAt": "2019-04-20T09:11:29.540161196Z",
"UpdatedAt": "2019-04-22T05:37:40.858645073Z",
"Spec": {
"Labels": {
"traefik-public.traefik-public-certificates": "true"
},
"Role": "worker",
"Availability": "active"
},
"Description": {
"Hostname": "node-1",
"Platform": {
"Architecture": "x86_64",
"OS": "linux"
},
"Resources": {
"NanoCPUs": 1000000000,
"MemoryBytes": 1040351232
},
"Engine": {
"EngineVersion": "18.09.5",
"Labels": {
"provider": "digitalocean"
},
"Plugins": [
{
"Type": "Log",
"Name": "awslogs"
},
{
"Type": "Log",
"Name": "fluentd"
},
{
"Type": "Log",
"Name": "gcplogs"
},
{
"Type": "Log",
"Name": "gelf"
},
{
"Type": "Log",
"Name": "journald"
},
{
"Type": "Log",
"Name": "json-file"
},
{
"Type": "Log",
"Name": "local"
},
{
"Type": "Log",
"Name": "logentries"
},
{
"Type": "Log",
"Name": "splunk"
},
{
"Type": "Log",
"Name": "syslog"
},
{
"Type": "Network",
"Name": "bridge"
},
{
"Type": "Network",
"Name": "host"
},
{
"Type": "Network",
"Name": "macvlan"
},
{
"Type": "Network",
"Name": "null"
},
{
"Type": "Network",
"Name": "overlay"
},
{
"Type": "Volume",
"Name": "local"
}
]
},
"TLSInfo": {
"TrustRoot": "-----BEGIN CERTIFICATE-----\nMIIBazCCARCgAwIBAgIUJaJpxCmObYclnve1gMoDgqpbHpYwCgYIKoZIzj0EAwIw\nEzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTkwNDIwMDEwMDAwWhcNMzkwNDE1MDEw\nMDAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH\nA0IABKZ3tuUh1fbvsBrRxCr/2QpK42UXKH114Y5xUNjCdoVL7sDNJnPqHhGasbXZ\ncuYSf4oFPXau1Euqyo/lHFcn0TqjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB\nAf8EBTADAQH/MB0GA1UdDgQWBBQLUasL0NneSvfzCIyelreL3Zl8GDAKBggqhkjO\nPQQDAgNJADBGAiEAuPDayJHm8fZEf1yYzS6CtrY/XuRYZK8NuNfG8Xsqs9YCIQDI\nPs6g4c65XPS7Gn931JEC/Qi7Zlu+ccMHy+Eup5SHsQ==\n-----END CERTIFICATE-----\n",
"CertIssuerSubject": "MBMxETAPBgNVBAMTCHN3YXJtLWNh",
"CertIssuerPublicKey": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEpne25SHV9u+wGtHEKv/ZCkrjZRcofXXhjnFQ2MJ2hUvuwM0mc+oeEZqxtdly5hJ/igU9dq7US6rKj+UcVyfROg=="
}
},
"Status": {
"State": "ready",
"Addr": "worker-machine-ip"
}
}
]
Docker Node Inspect Manager Node
{
"ID": "fw4k9vgz7y3929i731o7fll7d",
"Version": {
"Index": 39771
},
"CreatedAt": "2019-04-20T01:04:39.695763427Z",
"UpdatedAt": "2019-04-22T05:06:58.875733526Z",
"Spec": {
"Labels": {
"traefik-public.traefik-public-certificates": "true"
},
"Role": "manager",
"Availability": "active"
},
"Description": {
"Hostname": "shijie-master",
"Platform": {
"Architecture": "x86_64",
"OS": "linux"
},
"Resources": {
"NanoCPUs": 1000000000,
"MemoryBytes": 2097283072
},
"Engine": {
"EngineVersion": "18.09.4",
"Plugins": [
{
"Type": "Log",
"Name": "awslogs"
},
{
"Type": "Log",
"Name": "fluentd"
},
{
"Type": "Log",
"Name": "gcplogs"
},
{
"Type": "Log",
"Name": "gelf"
},
{
"Type": "Log",
"Name": "journald"
},
{
"Type": "Log",
"Name": "json-file"
},
{
"Type": "Log",
"Name": "local"
},
{
"Type": "Log",
"Name": "logentries"
},
{
"Type": "Log",
"Name": "splunk"
},
{
"Type": "Log",
"Name": "syslog"
},
{
"Type": "Network",
"Name": "bridge"
},
{
"Type": "Network",
"Name": "host"
},
{
"Type": "Network",
"Name": "macvlan"
},
{
"Type": "Network",
"Name": "null"
},
{
"Type": "Network",
"Name": "overlay"
},
{
"Type": "Volume",
"Name": "local"
}
]
},
"TLSInfo": {
"TrustRoot": "-----BEGIN CERTIFICATE-----\nMIIBazCCARCgAwIBAgIUJaJpxCmObYclnve1gMoDgqpbHpYwCgYIKoZIzj0EAwIw\nEzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTkwNDIwMDEwMDAwWhcNMzkwNDE1MDEw\nMDAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH\nA0IABKZ3tuUh1fbvsBrRxCr/2QpK42UXKH114Y5xUNjCdoVL7sDNJnPqHhGasbXZ\ncuYSf4oFPXau1Euqyo/lHFcn0TqjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB\nAf8EBTADAQH/MB0GA1UdDgQWBBQLUasL0NneSvfzCIyelreL3Zl8GDAKBggqhkjO\nPQQDAgNJADBGAiEAuPDayJHm8fZEf1yYzS6CtrY/XuRYZK8NuNfG8Xsqs9YCIQDI\nPs6g4c65XPS7Gn931JEC/Qi7Zlu+ccMHy+Eup5SHsQ==\n-----END CERTIFICATE-----\n",
"CertIssuerSubject": "MBMxETAPBgNVBAMTCHN3YXJtLWNh",
"CertIssuerPublicKey": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEpne25SHV9u+wGtHEKv/ZCkrjZRcofXXhjnFQ2MJ2hUvuwM0mc+oeEZqxtdly5hJ/igU9dq7US6rKj+UcVyfROg=="
}
},
"Status": {
"State": "ready",
"Addr": "manager-machine-ip"
},
"ManagerStatus": {
"Leader": true,
"Reachability": "reachable",
"Addr": "manager-machine-ip:2377"
}
}
]
Docker network inspect on traefik-public
[
{
"Name": "traefik-public",
"Id": "6655p8lsxjmhqhha3e3fbs5xz",
"Created": "2019-04-21T06:07:01.862111049Z",
"Scope": "swarm",
"Driver": "overlay",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": [
{
"Subnet": "10.0.12.0/24",
"Gateway": "10.0.12.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"Containers": { .. },
"ConfigOnly": false,
"Options": {
"com.docker.network.driver.overlay.vxlanid_list": "4109"
},
"Labels": {},
"Peers": [...]
}
]
Problem
Running services on manager node works fine, but worker nodes return Error 502 Bad Gateway

Related

Unable to connect redis container from my web api container in azure container instance(ACI)

I'm deploying my app in azure container instance (Container Group).
I've 3 docker containers
web_api
redis
neo4j
I'm able to access the database by using localhost:7474 as hostname, but can't access redis by using localhost as the hostname.
This is the same problem I'm facing when I run containers locally using docker run command.
NOTE: I can't use docker-compose as my intention is to use ACI.
azuredeploy.json
{
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"containerGroupName": {
"type": "string",
"defaultValue": "devCG",
"metadata": {
"description": ""
}
}
},
"variables": {
"name_web": "web-api",
"image_web": "dev.azurecr.io/web-api:89",
"name_redis": "redis",
"image_redis": "redis:5.0.9",
"name_neo4j": "neo4j",
"image_neo4j": "neo4j:3.5.6"
},
"resources": [
{
"name": "[parameters('containerGroupName')]",
"type": "Microsoft.ContainerInstance/containerGroups",
"apiVersion": "2019-12-01",
"location": "[resourceGroup().location]",
"properties": {
"containers": [
{
"name": "[variables('name_web')]",
"properties": {
"image": "[variables('image_web')]",
"resources": {
"requests": {
"cpu": 1,
"memoryInGb": 0.5
}
},
"ports": [
{
"port": 80
},
{
"port": 8080
}
]
}
},
{
"name": "[variables('name_redis')]",
"properties": {
"image": "[variables('image_redis')]",
"resources": {
"requests": {
"cpu": 0.5,
"memoryInGb": 0.2
}
}
}
},
{
"name": "[variables('name_neo4j')]",
"properties": {
"image": "[variables('image_neo4j')]",
"resources": {
"requests": {
"cpu": 0.5,
"memoryInGb": 0.2
}
},
"ports": [
{
"port": 7474
}
]
}
}
],
"imageRegistryCredentials": [
{
"server": "dev.azurecr.io",
"username": "dev",
"password": "********************"
}
],
"restartPolicy": "Always",
"osType": "Linux",
"volumes": [
{
"name": "devfs",
"azureFile": {
"shareName": "dev",
"readOnly": "false",
"storageAccountName": "devfs",
"storageAccountKey": "*****************************"
}
}
],
"ipAddress": {
"type": "Public",
"ports": [
{
"protocol": "tcp",
"port": 80
}
],
"dnsNameLabel": "dev"
}
}
}
],
"outputs": {
"containerIPv4Address": {
"type": "string",
"value": "[reference(resourceId('Microsoft.ContainerInstance/containerGroups/', parameters('containerGroupName'))).ipAddress.ip]"
}
}
}
Accessing redis over local host requires special tweaks in the redis config.
Have a look at
https://github.com/docker-library/redis/issues/45
And
https://github.com/luin/ioredis/issues/763
The recommandation is to connect over the redis hostname, you can nap this local up to redis hostname

Rabbitmq in Kubernetes: Command not found

Trying to start up rabbitmq in K8s while attaching a configmap gives me the following error:
/usr/local/bin/docker-entrypoint.sh: line 367: rabbitmq-plugins: command not found
/usr/local/bin/docker-entrypoint.sh: line 405: exec: rabbitmq-server: not found
Exactly the same setup is working fine with docker-compose, so I am a bit lost. Using rabbitmq:3.8.3
Here is a snippet from my deployment:
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"app": "rabbitmq"
}
},
"spec": {
"volumes": [
{
"name": "rabbitmq-configuration",
"configMap": {
"name": "rabbitmq-configuration",
"defaultMode": 420
}
}
],
"containers": [
{
"name": "rabbitmq",
"image": "rabbitmq:3.8.3",
"ports": [
{
"containerPort": 5672,
"protocol": "TCP"
}
],
"env": [
{
"name": "RABBITMQ_DEFAULT_USER",
"value": "guest"
},
{
"name": "RABBITMQ_DEFAULT_PASS",
"value": "guest"
},
{
"name": "RABBITMQ_ENABLED_PLUGINS_FILE",
"value": "/opt/enabled_plugins"
}
],
"resources": {},
"volumeMounts": [
{
"name": "rabbitmq-configuration",
"mountPath": "/opt/"
}
],
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"imagePullPolicy": "IfNotPresent"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"securityContext": {},
"schedulerName": "default-scheduler"
}
},
And here is the configuration:
{
"kind": "ConfigMap",
"apiVersion": "v1",
"metadata": {
"name": "rabbitmq-configuration",
"namespace": "e360",
"selfLink": "/api/v1/namespaces/default/configmaps/rabbitmq-configuration",
"uid": "28071976-98f6-11ea-86b2-0244a03303e1",
"resourceVersion": "1034540",
"creationTimestamp": "2020-05-18T10:55:58Z"
},
"data": {
"enabled_plugins": "[rabbitmq_management].\n"
}
}
That's because you're monting a volume in /opt, which is the rabbitmq home path.
So, the entrypoint script cannot find any of the rabbitmq binaries.
You can see the rabbitmq Dockerfile here

Image update in the external docker registry doesn't trigger deployment

I am importing the following template in the OpenShift web client to create ImageStream, DeploymentConfig & Service.
ImageStream is created from a Docker Image available on an External Docker Registry.
Everything seems to be working fine apart from the fact that whenever the Docker Image changes in the external registry redeployment doesn't take place.
Is it possible with Openshift & External Registry to trigger auto deployments when Docker Image is changed in the external registry.
{
"kind": "Template",
"apiVersion": "v1",
"metadata": {
"name": "test-100"
},
"objects": [
{
"kind": "ImageStream",
"apiVersion": "image.openshift.io/v1",
"metadata": {
"name": "test-100",
"creationTimestamp": null,
"labels": {
"app": "test-100"
},
"annotations": {
"openshift.io/generated-by": "OpenShiftNewApp"
}
},
"spec": {
"lookupPolicy": {
"local": false
},
"tags": [
{
"name": "latest",
"annotations": {
"openshift.io/imported-from": "artifactory.company.com/docker-dev-local/test/dev/test:latest"
},
"from": {
"kind": "DockerImage",
"name": "artifactory.company.com/docker-dev-local/test/dev/test:latest"
},
"generation": null,
"importPolicy": {},
"referencePolicy": {
"type": ""
}
}
]
}
},
{
"kind": "DeploymentConfig",
"apiVersion": "apps.openshift.io/v1",
"metadata": {
"name": "test-100",
"creationTimestamp": null,
"labels": {
"app": "test-100"
},
"annotations": {
"openshift.io/generated-by": "OpenShiftNewApp"
}
},
"spec": {
"strategy": {
"resources": {}
},
"triggers": [
{
"type": "ConfigChange"
},
{
"type": "ImageChange",
"imageChangeParams": {
"automatic": true,
"containerNames": [
"test-100"
],
"from": {
"kind": "ImageStreamTag",
"name": "test-100:latest"
}
}
}
],
"replicas": 1,
"test": false,
"selector": {
"app": "test-100",
"deploymentconfig": "test-100"
},
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"app": "test-100",
"deploymentconfig": "test-100"
},
"annotations": {
"openshift.io/generated-by": "OpenShiftNewApp"
}
},
"spec": {
"containers": [
{
"name": "test-100",
"image": "artifactory.company.com/docker-dev-local/test/dev/test:latest",
"ports": [
{
"containerPort": 8080,
"protocol": "TCP"
},
{
"containerPort": 8443,
"protocol": "TCP"
},
{
"containerPort": 8778,
"protocol": "TCP"
}
],
"resources": {}
}
]
}
}
}
},
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "test-100",
"creationTimestamp": null,
"labels": {
"app": "test-100"
},
"annotations": {
"openshift.io/generated-by": "OpenShiftNewApp"
}
},
"spec": {
"ports": [
{
"name": "8080-tcp",
"protocol": "TCP",
"port": 8080,
"targetPort": 8080
},
{
"name": "8443-tcp",
"protocol": "TCP",
"port": 8443,
"targetPort": 8443
},
{
"name": "8778-tcp",
"protocol": "TCP",
"port": 8778,
"targetPort": 8778
}
],
"selector": {
"app": "test-100",
"deploymentconfig": "test-100"
}
}
}
]
}
OpenShift can not detect image changes on external registry. So you should configure importPolicy.scheduled: true to refresh the image.
e.g.> you can configure imagePolicy every image tag.
apiVersion: v1
kind: ImageStream
metadata:
name: ruby
spec:
tags:
- from:
kind: DockerImage
name: openshift/ruby-20-centos7
name: latest
importPolicy:
scheduled: true
The interval is 15 minutes by default. If you want to change the value, you can adjust the config from /etc/origin/master/master-config.yaml as follows.
e.g.> ScheduledImageImportMinimumIntervalSeconds is interval time for imagestream import. Refer Image Policy Configuration
for other parameter details.
imagePolicyConfig:
MaxScheduledImageImportsPerMinute: 10
ScheduledImageImportMinimumIntervalSeconds: 1800
disableScheduledImport: false
maxImagesBulkImportedPerRepository: 3
Further information is here: Automatically Update Red Hat Container Images on OpenShift 3.11.

Readiness probe failed: Get http://10.244.2.183:5000/: dial tcp 10.244.2.183:5000: connect: connection refused Back-off restarting failed container

I am trying to deploy my application by using Gitlab-CI through pushing the docker images on Azure container and from there deploying the images on azure kubernetes service. these all process is happening automatically through GitlabCI. but i'm facing challenge in deployment section. i can able to see the services, pods is running status also tiller is deployed on kubernetes but it is throwing the below error This is deployment YAML which i took from kubernetes
{
"kind": "Deployment",
"apiVersion": "extensions/v1beta1",
"metadata": {
"name": "review-37-in-cust-iosa7i",
"namespace": "XYZ",
"selfLink": "/apis/extensions/v1beta1/namespaces/XYZ/deployments/review-37-in-cust-iosa7i",
"uid": "9f5f7fff-9d65-11e9-8ceb-0e7a6fb80992",
"resourceVersion": "7143337",
"generation": 1,
"creationTimestamp": "2019-07-03T07:39:00Z",
"labels": {
"app": "review-37-in-cust-iosa7i",
"chart": "auto-deploy-app-0.2.9",
"heritage": "Tiller",
"release": "review-37-in-cust-iosa7i",
"tier": "web",
"track": "stable"
},
"annotations": {
"deployment.kubernetes.io/revision": "1"
}
},
"spec": {
"replicas": 1,
"selector": {
"matchLabels": {
"app": "review-37-in-cust-iosa7i",
"release": "review-37-in-cust-iosa7i",
"tier": "web",
"track": "stable"
}
},
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"app": "review-37-in-cust-iosa7i",
"release": "review-37-in-cust-iosa7i",
"tier": "web",
"track": "stable"
},
"annotations": {
"checksum/application-secrets": ""
}
},
"spec": {
"containers": [
{
"name": "auto-deploy-app",
"image": "stratuscentcrdeve.azurecr.io/XYZ/dev/37-in-customer-group-customer-form-when-admin-opens-up-the-poli:65d2e2bc554242c584d5c6480e172690659ef98b",
"ports": [
{
"name": "web",
"containerPort": 5000,
"protocol": "TCP"
}
],
"env": [
{
"name": "DATABASE_URL",
"value": "postgres://user:testing-password#review-37-in-cust-iosa7i-postgres:5432/review-37-in-cust-iosa7i"
}
],
"resources": {},
"livenessProbe": {
"httpGet": {
"path": "/",
"port": 5000,
"scheme": "HTTP"
},
"initialDelaySeconds": 15,
"timeoutSeconds": 15,
"periodSeconds": 10,
"successThreshold": 1,
"failureThreshold": 3
},
"readinessProbe": {
"httpGet": {
"path": "/",
"port": 5000,
"scheme": "HTTP"
},
"initialDelaySeconds": 5,
"timeoutSeconds": 3,
"periodSeconds": 10,
"successThreshold": 1,
"failureThreshold": 3
},
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"imagePullPolicy": "IfNotPresent"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"securityContext": {},
"imagePullSecrets": [
{
"name": "gitlab-registry"
}
],
"schedulerName": "default-scheduler"
}
},
"strategy": {
"type": "RollingUpdate",
"rollingUpdate": {
"maxUnavailable": 1,
"maxSurge": 1
}
},
"revisionHistoryLimit": 2147483647,
"progressDeadlineSeconds": 2147483647
},
"status": {
"observedGeneration": 1,
"replicas": 1,
"updatedReplicas": 1,
"unavailableReplicas": 1,
"conditions": [
{
"type": "Available",
"status": "True",
"lastUpdateTime": "2019-07-03T07:39:00Z",
"lastTransitionTime": "2019-07-03T07:39:00Z",
"reason": "MinimumReplicasAvailable",
"message": "Deployment has minimum availability."
}
]
}
}
Please comment if any additional info is required?

Using Marathon json to create new implementations

We've been exploring marathon to deploy into a docker cluster. In the application architecture, we have a postgresql database which the application server need to access.
In the development stage, we relied on fig to create links between dockers and then use the environment variables imposed by the docker to connect to destination (app server to postgresql)
Yet, in Marathon we could not find a similar approach, we tried to use dependencies but that did not work, below is our Marathon.json file
{
"id": "/project",
"groups": [
{
"id": "apps",
"apps": [
{
"id": "app",
"mem": 1024,
"env": {
"APP_HOME": "/var/lib/app",
"GIT_BRANCH": "release/2.0.0",
"SETTING_FILE": "development",
"BROKER_URL": "redis://redis_1:6379/0"
},
"dependencies": ["database", "caching", "messaging"],
"container": {
"type": "DOCKER",
"docker": {
"image": "xxx/aok:app"
}
},
"volumes": [
{
"containerPath": "/var/lib/app",
"hostPath": ".",
"mode": "RW"
}
]
},
{
"id": "celery",
"mem": 1024,
"env": {
"APP_HOME": "/var/lib/app",
"GIT_BRANCH": "release/2.0.0",
"SETTING_FILE": "development",
"BROKER_URL": "redis://redis_1:6379/0"
},
"container": {
"type": "DOCKER",
"docker": {
"image": "xxx/aok:celery"
}
},
"volumes": [
{
"containerPath": "/var/lib/app",
"hostPath": ".",
"mode": "RW"
}
]
},
{
"id": "celeryhb",
"mem": 1024,
"env": {
"APP_HOME": "/var/lib/app",
"GIT_BRANCH": "release/2.0.0",
"SETTING_FILE": "development",
"BROKER_URL": "redis://redis_1:6379/0"
},
"container": {
"type": "DOCKER",
"docker": {
"image": "xxx/aok:celeryhb"
}
},
"volumes": [
{
"containerPath": "/var/lib/app",
"hostPath": ".",
"mode": "RW"
}
]
}
]
},
{
"id": "database",
"apps": [
{
"id": "pg",
"mem": 1024,
"container": {
"type": "DOCKER",
"docker": {
"image": "mughrabi/aok:pg"
},
"volumes": [
{
"containerPath": "/var/lib/postgresql/data",
"hostPath": "/tmp/aok-postgres-data",
"mode": "RW"
}
]
}
}
]
},
{
"id": "caching",
"apps": [
{
"id": "redis",
"mem": 1024,
"container": {
"type": "DOCKER",
"docker": {
"image": "redis"
}
}
}
]
},
{
"id": "messaging",
"apps": [
{
"id": "rabbitmq",
"mem": 1024,
"container": {
"type": "DOCKER",
"docker": {
"image": "rabbitmq"
}
}
}
]
}
]
}
Can someone please advise?
Look into using something like Consul https://www.consul.io/ or etcd https://github.com/coreos/etcd

Resources