Portainer Docker Swarm import secrets to compose - docker

add Secrets in Portainer swarm and trying to import as variable any one could give example how i can import it to compose
version: '3.1'
services:
mongodb-sharded:
image: docker.io/bitnami/mongodb-sharded:4.4
environment:
- MONGODB_ADVERTISED_HOSTNAME=mongodb-sharded
- MONGODB_SHARDING_MODE=mongos
- MONGODB_CFG_PRIMARY_HOST=mongodb-cfg-primary
- MONGODB_CFG_REPLICA_SET_NAME=cfgreplicaset
- MONGODB_REPLICA_SET_KEY=${MONGODB_KEY}
- MONGODB_ROOT_PASSWORD=${MONGODB_PASSWORD}
ports:
- "27017:27017"

working with portariner Docker Swarm Secret running mongodb sharded
version: '3.7'
secrets:
mongo-root-password:
external: true
mongo-key:
external: true
services:
mongodb-sharded:
image: docker.io/bitnami/mongodb-sharded:4.4
environment:
- MONGODB_ADVERTISED_HOSTNAME=mongodb-sharded
- MONGODB_SHARDING_MODE=mongos
- MONGODB_CFG_PRIMARY_HOST=mongodb-cfg-primary
- MONGODB_CFG_REPLICA_SET_NAME=cfgreplicaset
- MONGODB_REPLICA_SET_KEY_FILE=/run/secrets/mongo-key
- MONGODB_ROOT_PASSWORD_FILE=/run/secrets/mongo-root-password
ports:
- "27017:27017"
secrets:
- mongo-key
- mongo-root-password
mongodb-sharded-2:
image: docker.io/bitnami/mongodb-sharded:4.4
environment:
- MONGODB_ADVERTISED_HOSTNAME=mongodb-sharded-2
- MONGODB_SHARDING_MODE=mongos
- MONGODB_CFG_PRIMARY_HOST=mongodb-cfg-primary
- MONGODB_CFG_REPLICA_SET_NAME=cfgreplicaset
- MONGODB_REPLICA_SET_KEY_FILE=/run/secrets/mongo-key
- MONGODB_ROOT_PASSWORD_FILE=/run/secrets/mongo-root-password
secrets:
- mongo-key
- mongo-root-password
mongodb-shard0-primary:
image: docker.io/bitnami/mongodb-sharded:4.4
environment:
- MONGODB_ADVERTISED_HOSTNAME=mongodb-shard0-primary
- MONGODB_SHARDING_MODE=shardsvr
- MONGODB_MONGOS_HOST=mongodb-sharded
- MONGODB_ROOT_PASSWORD_FILE=/run/secrets/mongo-root-password
- MONGODB_REPLICA_SET_MODE=primary
- MONGODB_REPLICA_SET_KEY_FILE=/run/secrets/mongo-key
- MONGODB_REPLICA_SET_NAME=shard0
volumes:
- 'shard0_data:/bitnami'
secrets:
- mongo-key
- mongo-root-password
mongodb-shard0-secondary:
image: docker.io/bitnami/mongodb-sharded:4.4
depends_on:
- mongodb-shard0-primary
environment:
- MONGODB_ADVERTISED_HOSTNAME=mongodb-shard0-secondary
- MONGODB_REPLICA_SET_MODE=secondary
- MONGODB_PRIMARY_HOST=mongodb-shard0-primary
- MONGODB_PRIMARY_ROOT_PASSWORD_FILE=/run/secrets/mongo-root-password
- MONGODB_REPLICA_SET_KEY_FILE=/run/secrets/mongo-key
- MONGODB_SHARDING_MODE=shardsvr
- MONGODB_REPLICA_SET_NAME=shard0
volumes:
- 'shard0_sec_data:/bitnami'
secrets:
- mongo-key
- mongo-root-password
mongodb-shard1-primary:
image: docker.io/bitnami/mongodb-sharded:4.4
environment:
- MONGODB_ADVERTISED_HOSTNAME=mongodb-shard1-primary
- MONGODB_SHARDING_MODE=shardsvr
- MONGODB_MONGOS_HOST=mongodb-sharded
- MONGODB_ROOT_PASSWORD_FILE=/run/secrets/mongo-root-password
- MONGODB_REPLICA_SET_MODE=primary
- MONGODB_REPLICA_SET_KEY_FILE=/run/secrets/mongo-key
- MONGODB_REPLICA_SET_NAME=shard1
volumes:
- 'shard1_data:/bitnami'
secrets:
- mongo-key
- mongo-root-password
mongodb-shard1-secondary:
image: docker.io/bitnami/mongodb-sharded:4.4
depends_on:
- mongodb-shard1-primary
environment:
- MONGODB_ADVERTISED_HOSTNAME=mongodb-shard1-secondary
- MONGODB_REPLICA_SET_MODE=secondary
- MONGODB_PRIMARY_HOST=mongodb-shard1-primary
- MONGODB_PRIMARY_ROOT_PASSWORD_FILE=/run/secrets/mongo-root-password
- MONGODB_REPLICA_SET_KEY_FILE=/run/secrets/mongo-key
- MONGODB_SHARDING_MODE=shardsvr
- MONGODB_REPLICA_SET_NAME=shard1
volumes:
- 'shard1_sec_data:/bitnami'
secrets:
- mongo-key
- mongo-root-password
mongodb-shard2-primary:
image: docker.io/bitnami/mongodb-sharded:4.4
environment:
- MONGODB_ADVERTISED_HOSTNAME=mongodb-shard2-primary
- MONGODB_SHARDING_MODE=shardsvr
- MONGODB_MONGOS_HOST=mongodb-sharded
- MONGODB_ROOT_PASSWORD_FILE=/run/secrets/mongo-root-password
- MONGODB_REPLICA_SET_MODE=primary
- MONGODB_REPLICA_SET_KEY_FILE=/run/secrets/mongo-key
- MONGODB_REPLICA_SET_NAME=shard2
volumes:
- 'shard2_data:/bitnami'
secrets:
- mongo-key
- mongo-root-password
mongodb-shard2-secondary:
image: docker.io/bitnami/mongodb-sharded:4.4
depends_on:
- mongodb-shard2-primary
environment:
- MONGODB_ADVERTISED_HOSTNAME=mongodb-shard2-secondary
- MONGODB_REPLICA_SET_MODE=secondary
- MONGODB_PRIMARY_HOST=mongodb-shard2-primary
- MONGODB_PRIMARY_ROOT_PASSWORD_FILE=/run/secrets/mongo-root-password
- MONGODB_REPLICA_SET_KEY_FILE=/run/secrets/mongo-key
- MONGODB_SHARDING_MODE=shardsvr
- MONGODB_REPLICA_SET_NAME=shard2
volumes:
- 'shard2_sec_data:/bitnami'
secrets:
- mongo-key
- mongo-root-password
mongodb-cfg-primary:
image: docker.io/bitnami/mongodb-sharded:4.4
environment:
- MONGODB_ADVERTISED_HOSTNAME=mongodb-cfg-primary
- MONGODB_SHARDING_MODE=configsvr
- MONGODB_ROOT_PASSWORD_FILE=/run/secrets/mongo-root-password
- MONGODB_REPLICA_SET_MODE=primary
- MONGODB_REPLICA_SET_KEY_FILE=/run/secrets/mongo-key
- MONGODB_REPLICA_SET_NAME=cfgreplicaset
volumes:
- 'cfg_data:/bitnami'
secrets:
- mongo-key
- mongo-root-password
mongodb-cfg-secondary:
image: docker.io/bitnami/mongodb-sharded:4.4
depends_on:
- mongodb-cfg-primary
environment:
- MONGODB_ADVERTISED_HOSTNAME=mongodb-cfg-secondary
- MONGODB_REPLICA_SET_MODE=secondary
- MONGODB_PRIMARY_HOST=mongodb-cfg-primary
- MONGODB_PRIMARY_ROOT_PASSWORD_FILE=/run/secrets/mongo-root-password
- MONGODB_REPLICA_SET_KEY_FILE=/run/secrets/mongo-key
- MONGODB_REPLICA_SET_NAME=cfgreplicaset
- MONGODB_SHARDING_MODE=configsvr
volumes:
- 'cfg_sec_data:/bitnami'
secrets:
- mongo-key
- mongo-root-password
volumes:
shard0_data:
driver: local
shard0_sec_data:
driver: local
shard1_data:
driver: local
shard1_sec_data:
driver: local
shard2_data:
driver: local
shard2_sec_data:
driver: local
cfg_data:
driver: local
cfg_sec_data:
driver: local

Docker secrets will be mounted as files in the container under /run/secrets/secret-name (if no explicit mount point was specified). To use it, the application must be able to read the data from these files. That's not always supported. If anything, only a small part of the available variables can usually specified as file.
The official Docker mongodb Image states support only for
MONGO_INITDB_ROOT_USERNAME_FILE and MONGO_INITDB_ROOT_PASSWORD_FILE.
The readme from the bitnami/mongodb-sharded image doesn't provide any info, wether there is support for docker secrets.
The compose file with pre definied secrets for the official image would look something like that:
version: '3.1'
services:
mongo:
image: mongo
restart: always
environment:
MONGO_INITDB_ROOT_USERNAME_FILE: /run/secrets/mongodb-root-username
MONGO_INITDB_ROOT_PASSWORD_FILE: /run/secrets/mongodb-root-password
secrets:
- mongodb-root-username
- mongodb-root-password
secrets:
mongodb-root-username:
external: true
mongodb-root-password:
external: true

Related

Traefik configuration for https external services and http for internal services

I have a working docker-compose.yaml configuration that allow me to easily expose my services on my public domain (mydomain.com). But now, I want to also expose some of my services on a local domain (myservice.lan) so need of https.
Here is my docker-compose.yaml:
version: '3.4'
services:
traefik:
image: traefik:2.5
container_name: traefik
restart: unless-stopped
ports:
- 80:80
- 443:443
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
- ${CONFIG}/traefik/acme.json:/acme.json
- ${CONFIG}/traefik/rules.toml:/etc/traefik/rules.toml
command:
- --api.insecure=true
- --api.debug=true
- --providers.docker=true
- --providers.docker.exposedbydefault=false
- --providers.docker.watch=true
- --entrypoints.insecure.address=:80
- --entrypoints.secure.address=:443
- --certificatesresolvers.letsencrypt.acme.httpchallenge=true
- --certificatesresolvers.letsencrypt.acme.httpchallenge.entrypoint=insecure
- --certificatesresolvers.letsencrypt.acme.email=my_mail
- --certificatesresolvers.letsencrypt.acme.storage=acme.json
- --certificatesresolvers.letsencrypt.acme.caServer=https://acme-v02.api.letsencrypt.org/directory
- --certificatesresolvers.letsencrypt.acme.keyType=EC256
- --providers.file=true
- --providers.file.filename=/etc/traefik/rules.toml
- --providers.file.watch=true
labels:
- traefik.http.middlewares.wss.headers.customrequestheaders.X-Forwarded-Proto=https
grafana:
image: grafana/grafana:latest
container_name: grafana
user: ${PUID}:${PGID}
restart: unless-stopped
volumes:
- ${DATA}/grafana:/var/lib/grafana
environment:
- GF_USERS_ALLOW_SIGN_UP=false
- GF_SERVER_DOMAIN=grafana.${DOMAIN}
- GF_SERVER_ROOT_URL=https://grafana.${DOMAIN}/
- GF_SERVER_SERVE_FROM_SUB_PATH=true
labels:
- traefik.enable=true
- traefik.http.routers.grafana.rule=Host(`grafana.${DOMAIN}`)
- traefik.http.routers.grafana.entrypoints=insecure
- traefik.http.middlewares.https-redirect.redirectscheme.scheme=https
- traefik.http.middlewares.https-redirect.redirectscheme.permanent=true
- traefik.http.routers.grafana-http.middlewares=https-redirect#docker
- traefik.http.routers.grafana-https.entrypoints=secure
- traefik.http.routers.grafana-https.rule=Host(`grafana.${DOMAIN}`)
- traefik.http.routers.grafana-https.tls=true
- traefik.http.routers.grafana-https.tls.certresolver=letsencrypt
esphome: #6052
image: esphome/esphome
container_name: esphome
restart: unless-stopped
privileged: true
volumes:
- ${CONFIG}/esphome:/config
- /etc/localtime:/etc/localtime:ro
labels:
- traefik.enable=true
- traefik.http.routers.esphome.rule=Host(`esphome.${DOMAIN_LOCAL}`)
- traefik.http.routers.esphome.entrypoints=insecure
- traefik.http.services.esphome.loadbalancer.server.port=6052
Grafana service is well exposed on my ${DOMAIN}(grafana.mydomain.com) but esphome (esphome.lan) doesn't work.
Does my configuration is bad ?

I trying docker-compose script postgres connection with 2 backend spring boot application but conncetioon refuse erros

version: '3.3'
services:
#InfluxDB server
influx-db:
image: influxdb:1.8-alpine
container_name: influx-db
ports:
- 8086:8086
restart: always
volumes:
- db-data:/var/lib/influxdb
networks:
- local
#PostgreSQL Database for the application
postgresdb:
image: "postgres:12.0-alpine"
container_name: postgresdb
volumes:
- db-data:/var/lib/postgresql/data
ports:
- 5432:5432
environment:
- POSTGRES_DB=postgres
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
restart: always
networks:
- local
#Fron-end Angular Application
fe:
build: './Frontend-Asset'
ports:
- 4201:4201
links:
- sm_abc_be
- sm_um_be
depends_on:
- sm_abc_be
- sm_um_be
networks:
- local
um_fe:
build: './Frontend-User'
ports:
- 4202:4202
links:
- sm_abc_be
- sm_um_be
depends_on:
- sm_abc_be
- sm_um_be
networks:
- local
#Back-end Spring Boot Application
sm_um_be:
build: './um_be'
ports:
- 8081:8081
restart: always
volumes:
- db-data/
links:
- postgresdb
environment:
- SPRING_DATASOURCE_URL=jdbc:postgresql://postgresdb:5432/abcd
- SPRING_DATASOURCE_USERNAME=abc_user
- SPRING_DATASOURCE_PASSWORD=abcpassword
- SPRING_JPA_HIBERNATE_DDL_AUTO=update
depends_on:
- postgresdb
networks:
- local
sm_am_be:
build: './am_be'
ports:
- 8082:8082
restart: always
volumes:
- db-data/
links:
- postgresdb
- influx-db
environment:
- SPRING_DATASOURCE_URL=jdbc:postgresql://postgresdb:5432/am_uuid?currentSchema=abc
- SPRING_DATASOURCE_USERNAME=am_db_user
- SPRING_DATASOURCE_PASSWORD=abcpassword
- SPRING_JPA_HIBERNATE_DDL_AUTO=update
depends_on:
- postgresdb
- influx-db
networks:
- local
#Volumes for DB data
volumes:
db-data:
networks:
local:
driver: bridge

NiFi 1.14.0 Docker-Compose/Cluster not working

Please help, I have issue i can't make NiFi to work in version 1.14.0 via docker-compose I have problem with TLS certificate.
I don't know how to set up ENV var so TSL works. My approach was to try to set it all up on start but i failed.
I cant find any template online or reference the only is what i can read https://nifi.apache.org/docs.html and that isnt so helpful.
version: "3.3"
services:
zookeeper:
hostname: zookeeper
container_name: zookeeper
image: 'bitnami/zookeeper:latest'
volumes:
- data_share:/data_share
environment:
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_TLS_CLIENT_ENABLE=true
- ZOO_TLS_PORT_NUMBER=3181
- 'ZOO_TLS_CLIENT_KEYSTORE_FILE=/data_share/keystore.p12'
- 'ZOO_TLS_CLIENT_KEYSTORE_PASSWORD=cef4529207f19b4881b8f0f7f51e9fa9'
- 'ZOO_TLS_CLIENT_TRUSTSTORE_FILE=/data_share/truststore.p12'
- 'ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD=2b965c3a7748a3da3c6bf71377c8f5db'
networks:
netnifi:
ipv4_address: 10.40.0.10
nifi_1:
image: apache/nifi:1.14.0
tty: true
stdin_open: true
privileged: true
ports:
- 8443:8443
volumes:
- data_share:/opt/nifi/nifi-current/conf/
environment:
- NIFI_SECURITY_NEEDCLIENTAUTH=true
- NIFI_CLUSTER_PROTOCOL_IS_SECURE=true
- NIFI_SENSITIVE_PROPS_KEY=cef4529207f19b4881b8f0f7f51e9fa9
- 'NIFI_SECURITY_KEYSTORE=/opt/nifi/nifi-current/conf/keystore.p12'
- NIFI_SECURITY_KEYPASSWD=cef4529207f19b4881b8f0f7f51e9fa9
- NIFI_SECURITY_KEYSTOREPASSWD=cef4529207f19b4881b8f0f7f51e9fa9
- NIFI_SECURITY_KEYSTORETYPE=PKCS12
- 'NIFI_SECURITY_TRUSTSTORE=/opt/nifi/nifi-current/conf/truststore.p12'
- NIFI_SECURITY_TRUSTSTORETYPE=PKCS12
- NIFI_SECURITY_TRUSTSTOREPASSWD=2b965c3a7748a3da3c6bf71377c8f5db
- SINGLE_USER_CREDENTIALS_USERNAME=admin
- SINGLE_USER_CREDENTIALS_PASSWORD=ctsBtRBKHRAx69EqUghvvgEvjnaLjFEB
- NIFI_WEB_HTTPS_PORT=8443
- NIFI_CLUSTER_IS_NODE=true
- NIFI_CLUSTER_NODE_PROTOCOL_PORT=8082
- NIFI_ZK_CONNECT_STRING=zookeeper:3181
- NIFI_ELECTION_MAX_WAIT=1 min
networks:
netnifi:
ipv4_address: 10.40.0.20
nifi_2:
image: apache/nifi:1.14.0
tty: true
stdin_open: true
privileged: true
ports:
- 8444:8443
volumes:
- data_share:/home/data_share
environment:
- NIFI_SECURITY_NEEDCLIENTAUTH=true
- NIFI_CLUSTER_PROTOCOL_IS_SECURE=true
- NIFI_SENSITIVE_PROPS_KEY=cef4529207f19b4881b8f0f7f51e9fa9
- 'NIFI_SECURITY_KEYSTORE=/home/data_share/keystore.p12'
- NIFI_SECURITY_KEYPASSWD=cef4529207f19b4881b8f0f7f51e9fa9
- NIFI_SECURITY_KEYSTOREPASSWD=cef4529207f19b4881b8f0f7f51e9fa9
- NIFI_SECURITY_KEYSTORETYPE=PKCS12
- 'NIFI_SECURITY_TRUSTSTORE=/home/data_share/truststore.p12'
- NIFI_SECURITY_TRUSTSTORETYPE=PKCS12
- NIFI_SECURITY_TRUSTSTOREPASSWD=2b965c3a7748a3da3c6bf71377c8f5db
- SINGLE_USER_CREDENTIALS_USERNAME=admin
- SINGLE_USER_CREDENTIALS_PASSWORD=ctsBtRBKHRAx69EqUghvvgEvjnaLjFEB
- NIFI_WEB_HTTPS_PORT=8443
- NIFI_CLUSTER_IS_NODE=true
- NIFI_CLUSTER_NODE_PROTOCOL_PORT=8082
- NIFI_ZK_CONNECT_STRING=zookeeper:3181
- NIFI_ELECTION_MAX_WAIT=1 min
networks:
netnifi:
ipv4_address: 10.40.0.30
nifi_3:
image: apache/nifi:1.14.0
tty: true
stdin_open: true
privileged: true
ports:
- 8445:8443
volumes:
- data_share:/home/data_share
environment:
- NIFI_SECURITY_NEEDCLIENTAUTH=true
- NIFI_CLUSTER_PROTOCOL_IS_SECURE=true
- NIFI_SENSITIVE_PROPS_KEY=cef4529207f19b4881b8f0f7f51e9fa9
- 'NIFI_SECURITY_KEYSTORE=/home/data_share/keystore.p12'
- NIFI_SECURITY_KEYPASSWD=cef4529207f19b4881b8f0f7f51e9fa9
- NIFI_SECURITY_KEYSTOREPASSWD=cef4529207f19b4881b8f0f7f51e9fa9
- NIFI_SECURITY_KEYSTORETYPE=PKCS12
- 'NIFI_SECURITY_TRUSTSTORE=/home/data_share/truststore.p12'
- NIFI_SECURITY_TRUSTSTORETYPE=PKCS12
- NIFI_SECURITY_TRUSTSTOREPASSWD=2b965c3a7748a3da3c6bf71377c8f5db
- SINGLE_USER_CREDENTIALS_USERNAME=admin
- SINGLE_USER_CREDENTIALS_PASSWORD=ctsBtRBKHRAx69EqUghvvgEvjnaLjFEB
- NIFI_WEB_HTTPS_PORT=8443
- NIFI_CLUSTER_IS_NODE=true
- NIFI_CLUSTER_NODE_PROTOCOL_PORT=8082
- NIFI_ZK_CONNECT_STRING=zookeeper:3181
- NIFI_ELECTION_MAX_WAIT=1 min
networks:
netnifi:
ipv4_address: 10.40.0.40
networks:
netnifi:
driver: bridge
ipam:
config:
- subnet: 10.40.0.0/24
volumes:
data_share:
and for version 1.13.2 next docker-compose works perfectly
# maintainer="alex"
# repo="N/A"
# version="v.0.0.1"
# description="OWASP Lesson 1"
version: "3.3"
services:
zookeeper:
hostname: zookeeper
container_name: zookeeper
image: 'bitnami/zookeeper:latest'
environment:
- ALLOW_ANONYMOUS_LOGIN=yes
networks:
netnifi:
ipv4_address: 10.40.0.10
nifi_1:
image: apache/nifi:1.13.2
tty: true
stdin_open: true
privileged: true
ports:
- 8442:8080
volumes:
- data_share:/home/data_share
environment:
- NIFI_WEB_HTTP_PORT=8080
- NIFI_CLUSTER_IS_NODE=true
- NIFI_CLUSTER_NODE_PROTOCOL_PORT=8082
- NIFI_ZK_CONNECT_STRING=zookeeper:2181
- NIFI_ELECTION_MAX_WAIT=1 min
networks:
netnifi:
ipv4_address: 10.40.0.20
nifi_2:
image: apache/nifi:1.13.2
tty: true
stdin_open: true
privileged: true
ports:
- 8443:8080
volumes:
- data_share:/home/data_share
environment:
- NIFI_WEB_HTTP_PORT=8080
- NIFI_CLUSTER_IS_NODE=true
- NIFI_CLUSTER_NODE_PROTOCOL_PORT=8082
- NIFI_ZK_CONNECT_STRING=zookeeper:2181
- NIFI_ELECTION_MAX_WAIT=1 min
networks:
netnifi:
ipv4_address: 10.40.0.30
nifi_3:
image: apache/nifi:1.13.2
tty: true
stdin_open: true
privileged: true
ports:
- 8444:8080
volumes:
- data_share:/home/data_share
environment:
- NIFI_WEB_HTTP_PORT=8080
- NIFI_CLUSTER_IS_NODE=true
- NIFI_CLUSTER_NODE_PROTOCOL_PORT=8082
- NIFI_ZK_CONNECT_STRING=zookeeper:2181
- NIFI_ELECTION_MAX_WAIT=1 min
networks:
netnifi:
ipv4_address: 10.40.0.40
networks:
netnifi:
driver: bridge
ipam:
config:
- subnet: 10.40.0.0/24
volumes:
data_share:

Not able to deploy to Kubernetes cluster using Kompose

I'm currently deploying a project on a kubernetes cluster by using Kompose (http://www.kompose.io) to convert the docker-compose configuration to kubernetes configuration files.
This is a project for a master class at my university and they took care of the kubernetes cluster, so I'm almost certain that the configuration for it is done properly. FYI, this is the version of that kubernetes cluster;
$ kubectl version
Client Version: version.Info{Major:"1", Minor:"16", GitVersion:"v1.16.3", GitCommit:"b3cbbae08ec52a7fc73d334838e18d17e8512749", GitTreeState:"clean", BuildDate:"2019-11-13T11:23:11Z", GoVersion:"go1.12.12", Compiler:"gc", Platform:"linux/amd64"}
Server Version: version.Info{Major:"1", Minor:"16", GitVersion:"v1.16.3", GitCommit:"b3cbbae08ec52a7fc73d334838e18d17e8512749", GitTreeState:"clean", BuildDate:"2019-11-13T11:13:49Z", GoVersion:"go1.12.12", Compiler:"gc", Platform:"linux/amd64"}
This is the version of Kompose;
$ kompose version
1.20.0 (f3d54d784)
The problem that I have is as follows. I use the command kompose convert and this works without any problem, but when I try deploying it by using the command kompose up, it fails with the following error message.
FATA Error while deploying application: Get http://localhost:8080/api: dial tcp [::1]:8080: connect: connection refused
This is my first time using kubernetes and kompose. I've looked for others who also have this problem but nothing really helped for what I've found.
This is my docker-compose file at the moment:
(I'm aware I shouldn't put passwords in my docker-compose file but it's not part of the problem)
version: "3"
services:
zookeeper-container:
image: confluentinc/cp-zookeeper
environment:
- ZOOKEEPER_CLIENT_PORT=2181
kafka-container:
image: confluentinc/cp-kafka
depends_on:
- zookeeper-container
environment:
- KAFKA_BROKER_ID=1
- KAFKA_ZOOKEEPER_CONNECT=zookeeper-container:2181
- KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://kafka-container:9092
- KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1
route-db:
image: neo4j:3.5.6
environment:
- NEO4J_AUTH=neo4j/route
ports:
- 7687:7687
delay-request-db:
image: redis
staff-db:
image: mongo
train-db:
image: mongo
maintenance-db:
image: mysql:5.7
command: --default-authentication-plugin=mysql_native_password
environment:
- MYSQL_ROOT_PASSWORD=maintenancedatabase
- MYSQL_DATABASE=Maintenance
station-db:
image: mysql:5.7
command: --default-authentication-plugin=mysql_native_password
environment:
- MYSQL_ROOT_PASSWORD=stationdatabase
- MYSQL_DATABASE=Station
ticket-sale-db:
image: mysql:5.7
command: --default-authentication-plugin=mysql_native_password
environment:
- MYSQL_ROOT_PASSWORD=ticketsaledatabase
- MYSQL_DATABASE=TicketSale
ticket-validation-db:
image: mysql:5.7
command: --default-authentication-plugin=mysql_native_password
environment:
- MYSQL_ROOT_PASSWORD=ticketvalidationdatabase
- MYSQL_DATABASE=TicketValidation
timetable-db:
image: mysql:5.7
command: --default-authentication-plugin=mysql_native_password
environment:
- MYSQL_ROOT_PASSWORD=timetabledatabase
- MYSQL_DATABASE=Timetable
delay-service:
build: ./railway-app-delay
image: gilliswerrebrouck/railway-app-delay-service
volumes:
- ./railway-app-delay/target:/app
links:
- kafka-container
- zookeeper-container
depends_on:
- kafka-container
- zookeeper-container
maintenance-service:
build: ./railway-app-maintenance
image: gilliswerrebrouck/railway-app-maintenance-service
volumes:
- ./railway-app-maintenance/target:/app
links:
- kafka-container
- zookeeper-container
- maintenance-db
depends_on:
- kafka-container
- zookeeper-container
- maintenance-db
route-service:
build: ./railway-app-route-management
image: gilliswerrebrouck/railway-app-route-management-service
volumes:
- ./railway-app-route-management/target:/app
links:
- kafka-container
- zookeeper-container
- route-db
depends_on:
- kafka-container
- zookeeper-container
- route-db
staff-service:
build: ./railway-app-staff
image: gilliswerrebrouck/railway-app-staff-service
volumes:
- ./railway-app-staff/target:/app
links:
- kafka-container
- zookeeper-container
- staff-db
depends_on:
- kafka-container
- zookeeper-container
- staff-db
station-service:
build: ./railway-app-station
image: gilliswerrebrouck/railway-app-station-service
volumes:
- ./railway-app-station/target:/app
links:
- kafka-container
- zookeeper-container
- station-db
- delay-request-db
depends_on:
- kafka-container
- zookeeper-container
- station-db
- delay-request-db
ticket-sale-service:
build: ./railway-app-ticket-sale
image: gilliswerrebrouck/railway-app-ticket-sale-service
volumes:
- ./railway-app-ticket-sale/target:/app
links:
- kafka-container
- zookeeper-container
- ticket-sale-db
depends_on:
- kafka-container
- zookeeper-container
- ticket-sale-db
ticket-validation-service:
build: ./railway-app-ticket-validation
image: gilliswerrebrouck/railway-app-ticket-validation-service
volumes:
- ./railway-app-ticket-validation/target:/app
links:
- kafka-container
- zookeeper-container
- ticket-validation-db
depends_on:
- kafka-container
- zookeeper-container
- ticket-validation-db
timetable-service:
build: ./railway-app-timetable
image: gilliswerrebrouck/railway-app-timetable-service
volumes:
- ./railway-app-timetable/target:/app
links:
- kafka-container
- zookeeper-container
- timetable-db
- route-service
- station-service
- train-service
depends_on:
- kafka-container
- zookeeper-container
- timetable-db
- route-service
- station-service
- train-service
train-service:
build: ./railway-app-train
image: gilliswerrebrouck/railway-app-train-service
volumes:
- ./railway-app-train/target:/app
links:
- kafka-container
- zookeeper-container
- train-db
depends_on:
- kafka-container
- zookeeper-container
- train-db
apigateway:
build: ./railway-app-api-gateway
image: gilliswerrebrouck/railway-app-api-gateway-service
volumes:
- ./railway-app-api-gateway/target:/app
links:
- kafka-container
- zookeeper-container
- delay-service
- maintenance-service
- route-service
- staff-service
- station-service
- ticket-sale-service
- ticket-validation-service
- timetable-service
- train-service
depends_on:
- kafka-container
- zookeeper-container
- delay-service
- maintenance-service
- route-service
- staff-service
- station-service
- ticket-sale-service
- ticket-validation-service
- timetable-service
- train-service
ports:
- 8080:8080
frontend:
build: ./railway-app-frontend
image: gilliswerrebrouck/railway-app-frontend
volumes:
- ./railway-app-frontend/target:/app
links:
- apigateway
- route-db
depends_on:
- apigateway
- route-db
ports:
- 80:80
Anyone has any tips on how to troubleshoot this issue or how to fix it?
UPDATE:
These are the files generated by the kompose convert command
I've solved it by replacing all apiversions in the deployment files from v1beta2 to apps/v1 and by adding a selector to each deployment.
selector:
matchLabels:
app: ...
I then didn't use the command Kompose up to deploy since this gives me an error, but I used the command kubectl create -f <file(s)> to deploy and this succeeded without the connection error. There are still some pods crashing but I don't think it has anything to do with this original problem.

My docker-compose.yml couldn't build mysql5.7 container

I'm trying to build docker container for laravel with docker-compose.yml.
I hove to build database container for mysql5.7.
Mysql8 cannot be used on my server witch connected.
There is my docker-compose.yml file.
version: "3"
services:
app:
build:
context: ./docker/php
args:
- TZ=${TZ}
ports:
- ${APP_PORT}:8000
volumes:
- ${PROJECT_PATH}:/work
- ./docker/ash:/etc/profile.d
- ./docker/php/psysh:/root/.config/psysh
- ./logs:/var/log/php
- ./docker/php/php.ini:/usr/local/etc/php/php.ini
working_dir: /work
environment:
- DB_CONNECTION=mysql
- DB_HOST=db
- DB_DATABASE=${DB_NAME}
- DB_USERNAME=${DB_USER}
- DB_PASSWORD=${DB_PASS}
- TZ=${TZ}
- MAIL_HOST=${MAIL_HOST}
- MAIL_PORT=${MAIL_PORT}
- CACHE_DRIVER=redis
- SESSION_DRIVER=redis
- QUEUE_DRIVER=redis
- REDIS_HOST=redis
web:
image: nginx:1.17-alpine
depends_on:
- app
ports:
- ${WEB_PORT}:80
volumes:
- ${PROJECT_PATH}:/work
- ./logs:/var/log/nginx
- ./docker/nginx/default.conf:/etc/nginx/conf.d/default.conf
environment:
- TZ=${TZ}
db:
image: mysql:5.7
volumes:
- db-store:/var/lib/mysql
- ./logs:/var/log/mysql
- ./docker/mysql/my.cnf:/etc/mysql/conf.d/my.cnf
environment:
- MYSQL_DATABASE=${DB_NAME}
- MYSQL_USER=${DB_USER}
- MYSQL_PASSWORD=${DB_PASS}
- MYSQL_ROOT_PASSWORD=${DB_PASS}
- TZ=${TZ}
ports:
- ${DB_PORT}:3306
db-testing:
image: mysql:5.7
volumes:
- ./docker/mysql/my.cnf:/etc/mysql/conf.d/my.cnf
tmpfs:
- /var/lib/mysql
- /var/log/mysql
environment:
- MYSQL_DATABASE=${DB_NAME}
- MYSQL_USER=${DB_USER}
- MYSQL_PASSWORD=${DB_PASS}
- MYSQL_ROOT_PASSWORD=${DB_PASS}
- TZ=${TZ}
ports:
- ${DB_TESTING_PORT}:3306
node:
image: node:12.13-alpine
tty: true
volumes:
- ${PROJECT_PATH}:/work
working_dir: /work
redis:
image: redis:5.0-alpine
volumes:
- redis-store:/data
mail:
image: mailhog/mailhog
ports:
- ${MAILHOG_PORT}:8025
volumes:
db-store:
redis-store:
When I execute "docker-compose build" in terminal, it's successfully done, but db container and db-testing container has status "EXIT: 1" or "EXIT: 2".
So, Could you teach me what's wrong.

Resources