SASL/SCRAM works with confluentinc images, but fails with wurstmeister - docker

I have SASL/SCRAM config working with confluentinc kafka/zookeeper:
docker-compose.yml
# Based on: https://github.com/iwpnd/tile38-kafka-sasl
version: "2"
services:
zookeeper:
image: confluentinc/cp-zookeeper:6.0.1
hostname: zookeeper
container_name: zookeeper
environment:
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://zookeeper:2181
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
ZOOKEEPER_SERVER_ID: 3
KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/secrets/sasl/zookeeper_jaas.conf \
-Dzookeeper.authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider \
-Dzookeeper.authProvider.2=org.apache.zookeeper.server.auth.DigestAuthenticationProvider \
-Dquorum.auth.enableSasl=true \
-Dquorum.auth.learnerRequireSasl=true \
-Dquorum.auth.serverRequireSasl=true \
-Dquorum.auth.learner.saslLoginContext=QuorumLearner \
-Dquorum.auth.server.saslLoginContext=QuorumServer \
-Dquorum.cnxn.threads.size=20 \
-DrequireClientAuthScheme=sasl"
volumes:
- ./secrets:/etc/kafka/secrets/sasl
zookeeper-add-kafka-users:
image: confluentinc/cp-kafka:6.0.1
container_name: "zookeeper-add-kafka-users"
depends_on:
- zookeeper
command: "bash -c 'echo Waiting for Zookeeper to be ready... && \
cub zk-ready zookeeper:2181 120 && \
kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-512=[iterations=4096,password=password]' --entity-type users --entity-name admin && \
kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-512=[iterations=4096,password=password]' --entity-type users --entity-name client '"
environment:
KAFKA_BROKER_ID: ignored
KAFKA_ZOOKEEPER_CONNECT: ignored
KAFKA_OPTS: -Djava.security.auth.login.config=/etc/kafka/secrets/sasl/kafka_server_jaas.conf
volumes:
- ./secrets:/etc/kafka/secrets/sasl
broker:
image: confluentinc/cp-kafka:6.0.1
hostname: broker
container_name: broker
depends_on:
- zookeeper
ports:
- "9091:9091"
- "9101:9101"
- "9092:9092"
expose:
- "29090"
environment:
KAFKA_OPTS: "-Dzookeeper.sasl.client=true -Djava.security.auth.login.config=/etc/kafka/secrets/sasl/kafka_server_jaas.conf"
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT,SASL_PLAINHOST:SASL_PLAINTEXT
KAFKA_LISTENERS: INSIDE://:29090,OUTSIDE://:9092,SASL_PLAINHOST://:9091
KAFKA_ADVERTISED_LISTENERS: INSIDE://broker:29090,OUTSIDE://localhost:9092,SASL_PLAINHOST://broker:9091
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_JMX_PORT: 9101
KAFKA_JMX_HOSTNAME: localhost
KAFKA_SECURITY_INTER_BROKER_PROTOCAL: SASL_PLAINTEXT
KAFKA_SASL_ENABLED_MECHANISMS: SCRAM-SHA-512
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAINTEXT
volumes:
- ./secrets:/etc/kafka/secrets/sasl
sercrets/kafka_server_jaas.conf
org.apache.kafka.common.security.scram.ScramLoginModule required
username="admin"
password="password"
user_admin="password"
user_client="password";
};
Client {
org.apache.kafka.common.security.plain.PlainLoginModule required
username="admin"
password="password";
};
KafkaClient {
org.apache.kafka.common.security.scram.ScramLoginModule required
username="client"
password="password";
};
sercerts/zk_server_jaas.conf
org.apache.kafka.common.security.scram.ScramLoginModule required
username="admin"
password="admin-secret"
user_admin="admin-secret";
};
sercrets/zookeeper_jaas.conf zk_server_jaas.conf
Server {
org.apache.kafka.common.security.plain.PlainLoginModule required
user_admin="password";
};
QuorumServer {
org.apache.zookeeper.server.auth.DigestLoginModule required
user_admin="password";
};
QuorumLearner {
org.apache.zookeeper.server.auth.DigestLoginModule required
username="admin"
password="password";
};
Above config works as I expected with confluentinc/cp-zookeeper:6.0.1 image, but when I change images to wurstmeister/zookeeper and wurstmeister/kafka:2.13-2.7.1 I get below errors:
[36mbroker |[0m [Configuring] 'security.inter.broker.protocal' in '/opt/kafka/config/server.properties'
[36mbroker |[0m [Configuring] 'jmx.port' in '/opt/kafka/config/server.properties'
[36mbroker |[0m [Configuring] 'advertised.listeners' in '/opt/kafka/config/server.properties'
[36mbroker |[0m [Configuring] 'port' in '/opt/kafka/config/server.properties'
[36mbroker |[0m [Configuring] 'inter.broker.listener.name' in '/opt/kafka/config/server.properties'
[36mbroker |[0m Excluding KAFKA_OPTS from broker config
[36mbroker |[0m Excluding KAFKA_HOME from broker config
[36mbroker |[0m [Configuring] 'log.dirs' in '/opt/kafka/config/server.properties'
[36mbroker |[0m [Configuring] 'listeners' in '/opt/kafka/config/server.properties'
[36mbroker |[0m Excluding KAFKA_VERSION from broker config
[33mzookeeper |[0m ZooKeeper JMX enabled by default
[33mzookeeper |[0m Using config: /opt/zookeeper-3.4.13/bin/../conf/zoo.cfg
[33mzookeeper |[0m 2021-12-04 13:17:55,364 [myid:] - INFO [main:QuorumPeerConfig#136] - Reading configuration from: /opt/zookeeper-3.4.13/bin/../conf/zoo.cfg
[33mzookeeper |[0m 2021-12-04 13:17:55,370 [myid:] - INFO [main:DatadirCleanupManager#78] - autopurge.snapRetainCount set to 3
[33mzookeeper |[0m 2021-12-04 13:17:55,370 [myid:] - INFO [main:DatadirCleanupManager#79] - autopurge.purgeInterval set to 1
[33mzookeeper |[0m 2021-12-04 13:17:55,371 [myid:] - WARN [main:QuorumPeerMain#116] - Either no config or no quorum defined in config, running in standalone mode
[33mzookeeper |[0m 2021-12-04 13:17:55,376 [myid:] - INFO [PurgeTask:DatadirCleanupManager$PurgeTask#138] - Purge task started.
[33mzookeeper |[0m 2021-12-04 13:17:55,396 [myid:] - INFO [PurgeTask:DatadirCleanupManager$PurgeTask#144] - Purge task completed.
[33mzookeeper |[0m 2021-12-04 13:17:55,396 [myid:] - INFO [main:QuorumPeerConfig#136] - Reading configuration from: /opt/zookeeper-3.4.13/bin/../conf/zoo.cfg
[33mzookeeper |[0m 2021-12-04 13:17:55,397 [myid:] - INFO [main:ZooKeeperServerMain#98] - Starting server
[33mzookeeper |[0m 2021-12-04 13:17:55,409 [myid:] - INFO [main:Environment#100] - Server environment:zookeeper.version=3.4.13-2d71af4dbe22557fda74f9a9b4309b15a7487f03, built on 06/29/2018 04:05 GMT
[33mzookeeper |[0m 2021-12-04 13:17:55,409 [myid:] - INFO [main:Environment#100] - Server environment:host.name=zookeeper
[33mzookeeper |[0m 2021-12-04 13:17:55,409 [myid:] - INFO [main:Environment#100] - Server environment:java.version=1.7.0_65
[33mzookeeper |[0m 2021-12-04 13:17:55,410 [myid:] - INFO [main:Environment#100] - Server environment:java.vendor=Oracle Corporation
[33mzookeeper |[0m 2021-12-04 13:17:55,410 [myid:] - INFO [main:Environment#100] - Server environment:java.home=/usr/lib/jvm/java-7-openjdk-amd64/jre
[33mzookeeper |[0m 2021-12-04 13:17:55,410 [myid:] - INFO [main:Environment#100] - Server environment:java.class.path=/opt/zookeeper-3.4.13/bin/../build/classes:/opt/zookeeper-3.4.13/bin/../build/lib/*.jar:/opt/zookeeper-3.4.13/bin/../lib/slf4j-log4j12-1.7.25.jar:/opt/zookeeper-3.4.13/bin/../lib/slf4j-api-1.7.25.jar:/opt/zookeeper-3.4.13/bin/../lib/netty-3.10.6.Final.jar:/opt/zookeeper-3.4.13/bin/../lib/log4j-1.2.17.jar:/opt/zookeeper-3.4.13/bin/../lib/jline-0.9.94.jar:/opt/zookeeper-3.4.13/bin/../lib/audience-annotations-0.5.0.jar:/opt/zookeeper-3.4.13/bin/../zookeeper-3.4.13.jar:/opt/zookeeper-3.4.13/bin/../src/java/lib/*.jar:/opt/zookeeper-3.4.13/bin/../conf:
[33mzookeeper |[0m 2021-12-04 13:17:55,410 [myid:] - INFO [main:Environment#100] - Server environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib/x86_64-linux-gnu/jni:/lib/x86_64-linux-gnu:/usr/lib/x86_64-linux-gnu:/usr/lib/jni:/lib:/usr/lib
[33mzookeeper |[0m 2021-12-04 13:17:55,410 [myid:] - INFO [main:Environment#100] - Server environment:java.io.tmpdir=/tmp
[33mzookeeper |[0m 2021-12-04 13:17:55,413 [myid:] - INFO [main:Environment#100] - Server environment:java.compiler=<NA>
[33mzookeeper |[0m 2021-12-04 13:17:55,413 [myid:] - INFO [main:Environment#100] - Server environment:os.name=Linux
[33mzookeeper |[0m 2021-12-04 13:17:55,414 [myid:] - INFO [main:Environment#100] - Server environment:os.arch=amd64
[33mzookeeper |[0m 2021-12-04 13:17:55,414 [myid:] - INFO [main:Environment#100] - Server environment:os.version=5.11.0-40-generic
[33mzookeeper |[0m 2021-12-04 13:17:55,414 [myid:] - INFO [main:Environment#100] - Server environment:user.name=root
[33mzookeeper |[0m 2021-12-04 13:17:55,414 [myid:] - INFO [main:Environment#100] - Server environment:user.home=/root
[33mzookeeper |[0m 2021-12-04 13:17:55,415 [myid:] - INFO [main:Environment#100] - Server environment:user.dir=/opt/zookeeper-3.4.13
[33mzookeeper |[0m 2021-12-04 13:17:55,422 [myid:] - INFO [main:ZooKeeperServer#836] - tickTime set to 2000
[33mzookeeper |[0m 2021-12-04 13:17:55,425 [myid:] - INFO [main:ZooKeeperServer#845] - minSessionTimeout set to -1
[33mzookeeper |[0m 2021-12-04 13:17:55,426 [myid:] - INFO [main:ZooKeeperServer#854] - maxSessionTimeout set to -1
[33mzookeeper |[0m 2021-12-04 13:17:55,443 [myid:] - INFO [main:ServerCnxnFactory#117] - Using org.apache.zookeeper.server.NIOServerCnxnFactory as server connection factory
[33mzookeeper |[0m 2021-12-04 13:17:55,453 [myid:] - INFO [main:NIOServerCnxnFactory#89] - binding to port 0.0.0.0/0.0.0.0:2181
[32mzookeeper-add-kafka-users |[0m Waiting for Zookeeper to be ready...
[32mzookeeper-add-kafka-users |[0m bash: line 1: cub: command not found
[36mbroker |[0m [Configuring] 'zookeeper.connect' in '/opt/kafka/config/server.properties'
[36mbroker |[0m [Configuring] 'sasl.mechanism.inter.broker.protocol' in '/opt/kafka/config/server.properties'
[36mbroker |[0m [Configuring] 'offsets.topic.replication.factor' in '/opt/kafka/config/server.properties'
[36mbroker |[0m [Configuring] 'listener.security.protocol.map' in '/opt/kafka/config/server.properties'
[36mbroker |[0m [Configuring] 'jmx.hostname' in '/opt/kafka/config/server.properties'
[36mbroker |[0m [Configuring] 'sasl.enabled.mechanisms' in '/opt/kafka/config/server.properties'
[36mbroker |[0m [Configuring] 'broker.id' in '/opt/kafka/config/server.properties'
[32mzookeeper-add-kafka-users exited with code 127
[0m[36mbroker |[0m [2021-12-04 13:17:58,599] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$)
[36mbroker |[0m [2021-12-04 13:17:59,195] INFO Setting -D jdk.tls.rejectClientInitiatedRenegotiation=true to disable client-initiated TLS renegotiation (org.apache.zookeeper.common.X509Util)
[36mbroker |[0m [2021-12-04 13:17:59,343] INFO Registered signal handlers for TERM, INT, HUP (org.apache.kafka.common.utils.LoggingSignalHandler)
[36mbroker |[0m [2021-12-04 13:17:59,357] INFO starting (kafka.server.KafkaServer)
[36mbroker |[0m [2021-12-04 13:17:59,360] INFO Connecting to zookeeper on zookeeper:2181 (kafka.server.KafkaServer)
[36mbroker |[0m [2021-12-04 13:17:59,398] INFO [ZooKeeperClient Kafka server] Initializing a new session to zookeeper:2181. (kafka.zookeeper.ZooKeeperClient)
[36mbroker |[0m [2021-12-04 13:17:59,429] INFO Client environment:zookeeper.version=3.5.9-83df9301aa5c2a5d284a9940177808c01bc35cef, built on 01/06/2021 20:03 GMT (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,434] INFO Client environment:host.name=broker (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,435] INFO Client environment:java.version=1.8.0_292 (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,435] INFO Client environment:java.vendor=Azul Systems, Inc. (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,435] INFO Client environment:java.home=/usr/lib/jvm/zulu8-ca/jre (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,435] INFO Client environment:java.class.path=/opt/kafka/bin/../libs/activation-1.1.1.jar:/opt/kafka/bin/../libs/aopalliance-repackaged-2.6.1.jar:/opt/kafka/bin/../libs/argparse4j-0.7.0.jar:/opt/kafka/bin/../libs/audience-annotations-0.5.0.jar:/opt/kafka/bin/../libs/commons-cli-1.4.jar:/opt/kafka/bin/../libs/commons-lang3-3.8.1.jar:/opt/kafka/bin/../libs/connect-api-2.7.1.jar:/opt/kafka/bin/../libs/connect-basic-auth-extension-2.7.1.jar:/opt/kafka/bin/../libs/connect-file-2.7.1.jar:/opt/kafka/bin/../libs/connect-json-2.7.1.jar:/opt/kafka/bin/../libs/connect-mirror-2.7.1.jar:/opt/kafka/bin/../libs/connect-mirror-client-2.7.1.jar:/opt/kafka/bin/../libs/connect-runtime-2.7.1.jar:/opt/kafka/bin/../libs/connect-transforms-2.7.1.jar:/opt/kafka/bin/../libs/hk2-api-2.6.1.jar:/opt/kafka/bin/../libs/hk2-locator-2.6.1.jar:/opt/kafka/bin/../libs/hk2-utils-2.6.1.jar:/opt/kafka/bin/../libs/jackson-annotations-2.10.5.jar:/opt/kafka/bin/../libs/jackson-core-2.10.5.jar:/opt/kafka/bin/../libs/jackson-databind-2.10.5.1.jar:/opt/kafka/bin/../libs/jackson-dataformat-csv-2.10.5.jar:/opt/kafka/bin/../libs/jackson-datatype-jdk8-2.10.5.jar:/opt/kafka/bin/../libs/jackson-jaxrs-base-2.10.5.jar:/opt/kafka/bin/../libs/jackson-jaxrs-json-provider-2.10.5.jar:/opt/kafka/bin/../libs/jackson-module-jaxb-annotations-2.10.5.jar:/opt/kafka/bin/../libs/jackson-module-paranamer-2.10.5.jar:/opt/kafka/bin/../libs/jackson-module-scala_2.13-2.10.5.jar:/opt/kafka/bin/../libs/jakarta.activation-api-1.2.1.jar:/opt/kafka/bin/../libs/jakarta.annotation-api-1.3.5.jar:/opt/kafka/bin/../libs/jakarta.inject-2.6.1.jar:/opt/kafka/bin/../libs/jakarta.validation-api-2.0.2.jar:/opt/kafka/bin/../libs/jakarta.ws.rs-api-2.1.6.jar:/opt/kafka/bin/../libs/jakarta.xml.bind-api-2.3.2.jar:/opt/kafka/bin/../libs/javassist-3.25.0-GA.jar:/opt/kafka/bin/../libs/javassist-3.26.0-GA.jar:/opt/kafka/bin/../libs/javax.servlet-api-3.1.0.jar:/opt/kafka/bin/../libs/javax.ws.rs-api-2.1.1.jar:/opt/kafka/bin/../libs/jaxb-api-2.3.0.jar:/opt/kafka/bin/../libs/jersey-client-2.31.jar:/opt/kafka/bin/../libs/jersey-common-2.31.jar:/opt/kafka/bin/../libs/jersey-container-servlet-2.31.jar:/opt/kafka/bin/../libs/jersey-container-servlet-core-2.31.jar:/opt/kafka/bin/../libs/jersey-hk2-2.31.jar:/opt/kafka/bin/../libs/jersey-media-jaxb-2.31.jar:/opt/kafka/bin/../libs/jersey-server-2.31.jar:/opt/kafka/bin/../libs/jetty-client-9.4.38.v20210224.jar:/opt/kafka/bin/../libs/jetty-continuation-9.4.38.v20210224.jar:/opt/kafka/bin/../libs/jetty-http-9.4.38.v20210224.jar:/opt/kafka/bin/../libs/jetty-io-9.4.38.v20210224.jar:/opt/kafka/bin/../libs/jetty-security-9.4.38.v20210224.jar:/opt/kafka/bin/../libs/jetty-server-9.4.38.v20210224.jar:/opt/kafka/bin/../libs/jetty-servlet-9.4.38.v20210224.jar:/opt/kafka/bin/../libs/jetty-servlets-9.4.38.v20210224.jar:/opt/kafka/bin/../libs/jetty-util-9.4.38.v20210224.jar:/opt/kafka/bin/../libs/jetty-util-ajax-9.4.38.v20210224.jar:/opt/kafka/bin/../libs/jopt-simple-5.0.4.jar:/opt/kafka/bin/../libs/kafka-clients-2.7.1.jar:/opt/kafka/bin/../libs/kafka-log4j-appender-2.7.1.jar:/opt/kafka/bin/../libs/kafka-raft-2.7.1.jar:/opt/kafka/bin/../libs/kafka-streams-2.7.1.jar:/opt/kafka/bin/../libs/kafka-streams-examples-2.7.1.jar:/opt/kafka/bin/../libs/kafka-streams-scala_2.13-2.7.1.jar:/opt/kafka/bin/../libs/kafka-streams-test-utils-2.7.1.jar:/opt/kafka/bin/../libs/kafka-tools-2.7.1.jar:/opt/kafka/bin/../libs/kafka_2.13-2.7.1-sources.jar:/opt/kafka/bin/../libs/kafka_2.13-2.7.1.jar:/opt/kafka/bin/../libs/log4j-1.2.17.jar:/opt/kafka/bin/../libs/lz4-java-1.7.1.jar:/opt/kafka/bin/../libs/maven-artifact-3.6.3.jar:/opt/kafka/bin/../libs/metrics-core-2.2.0.jar:/opt/kafka/bin/../libs/netty-buffer-4.1.59.Final.jar:/opt/kafka/bin/../libs/netty-codec-4.1.59.Final.jar:/opt/kafka/bin/../libs/netty-common-4.1.59.Final.jar:/opt/kafka/bin/../libs/netty-handler-4.1.59.Final.jar:/opt/kafka/bin/../libs/netty-resolver-4.1.59.Final.jar:/opt/kafka/bin/../libs/netty-transport-4.1.59.Final.jar:/opt/kafka/bin/../libs/netty-transport-native-epoll-4.1.59.Final.jar:/opt/kafka/bin/../libs/netty-transport-native-unix-common-4.1.59.Final.jar:/opt/kafka/bin/../libs/osgi-resource-locator-1.0.3.jar:/opt/kafka/bin/../libs/paranamer-2.8.jar:/opt/kafka/bin/../libs/plexus-utils-3.2.1.jar:/opt/kafka/bin/../libs/reflections-0.9.12.jar:/opt/kafka/bin/../libs/rocksdbjni-5.18.4.jar:/opt/kafka/bin/../libs/scala-collection-compat_2.13-2.2.0.jar:/opt/kafka/bin/../libs/scala-java8-compat_2.13-0.9.1.jar:/opt/kafka/bin/../libs/scala-library-2.13.3.jar:/opt/kafka/bin/../libs/scala-logging_2.13-3.9.2.jar:/opt/kafka/bin/../libs/scala-reflect-2.13.3.jar:/opt/kafka/bin/../libs/slf4j-api-1.7.30.jar:/opt/kafka/bin/../libs/slf4j-log4j12-1.7.30.jar:/opt/kafka/bin/../libs/snappy-java-1.1.7.7.jar:/opt/kafka/bin/../libs/zookeeper-3.5.9.jar:/opt/kafka/bin/../libs/zookeeper-jute-3.5.9.jar:/opt/kafka/bin/../libs/zstd-jni-1.4.5-6.jar (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,437] INFO Client environment:java.library.path=/usr/lib/jvm/zulu8-ca/jre/lib/amd64/server:/usr/lib/jvm/zulu8-ca/jre/lib/amd64:/usr/lib/jvm/zulu8-ca/jre/../lib/amd64:/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,437] INFO Client environment:java.io.tmpdir=/tmp (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,440] INFO Client environment:java.compiler=<NA> (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,441] INFO Client environment:os.name=Linux (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,441] INFO Client environment:os.arch=amd64 (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,441] INFO Client environment:os.version=5.11.0-40-generic (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,442] INFO Client environment:user.name=root (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,442] INFO Client environment:user.home=/root (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,442] INFO Client environment:user.dir=/ (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,443] INFO Client environment:os.memory.free=1014MB (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,443] INFO Client environment:os.memory.max=1024MB (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,443] INFO Client environment:os.memory.total=1024MB (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,447] INFO Initiating client connection, connectString=zookeeper:2181 sessionTimeout=18000 watcher=kafka.zookeeper.ZooKeeperClient$ZooKeeperClientWatcher$#4cc451f2 (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,459] INFO jute.maxbuffer value is 4194304 Bytes (org.apache.zookeeper.ClientCnxnSocket)
[36mbroker |[0m [2021-12-04 13:17:59,469] INFO zookeeper.request.timeout value is 0. feature enabled= (org.apache.zookeeper.ClientCnxn)
[36mbroker |[0m [2021-12-04 13:17:59,481] INFO [ZooKeeperClient Kafka server] Waiting until connected. (kafka.zookeeper.ZooKeeperClient)
[36mbroker |[0m [2021-12-04 13:17:59,580] INFO Client successfully logged in. (org.apache.zookeeper.Login)
[36mbroker |[0m [2021-12-04 13:17:59,582] INFO Client will use DIGEST-MD5 as SASL mechanism. (org.apache.zookeeper.client.ZooKeeperSaslClient)
[36mbroker |[0m [2021-12-04 13:17:59,595] INFO Opening socket connection to server zookeeper/172.20.0.2:2181. Will attempt to SASL-authenticate using Login Context section 'Client' (org.apache.zookeeper.ClientCnxn)
[33mzookeeper |[0m 2021-12-04 13:17:59,605 [myid:] - INFO [NIOServerCxn.Factory:0.0.0.0/0.0.0.0:2181:NIOServerCnxnFactory#215] - Accepted socket connection from /172.20.0.3:57480
[36mbroker |[0m [2021-12-04 13:17:59,609] INFO Socket connection established, initiating session, client: /172.20.0.3:57480, server: zookeeper/172.20.0.2:2181 (org.apache.zookeeper.ClientCnxn)
[33mzookeeper |[0m 2021-12-04 13:17:59,621 [myid:] - INFO [NIOServerCxn.Factory:0.0.0.0/0.0.0.0:2181:ZooKeeperServer#949] - Client attempting to establish new session at /172.20.0.3:57480
[33mzookeeper |[0m 2021-12-04 13:17:59,624 [myid:] - INFO [SyncThread:0:FileTxnLog#213] - Creating new log file: log.1
[36mbroker |[0m [2021-12-04 13:17:59,642] INFO Session establishment complete on server zookeeper/172.20.0.2:2181, sessionid = 0x100474bc7f70000, negotiated timeout = 18000 (org.apache.zookeeper.ClientCnxn)
[33mzookeeper |[0m 2021-12-04 13:17:59,642 [myid:] - INFO [SyncThread:0:ZooKeeperServer#694] - Established session 0x100474bc7f70000 with negotiated timeout 18000 for client /172.20.0.3:57480
[36mbroker |[0m [2021-12-04 13:17:59,646] INFO [ZooKeeperClient Kafka server] Connected. (kafka.zookeeper.ZooKeeperClient)
[33mzookeeper |[0m 2021-12-04 13:17:59,657 [myid:] - ERROR [NIOServerCxn.Factory:0.0.0.0/0.0.0.0:2181:ZooKeeperServer#1063] - cnxn.saslServer is null: cnxn object did not initialize its saslServer properly.
[36mbroker |[0m [2021-12-04 13:17:59,660] ERROR SASL authentication failed using login context 'Client' with exception: {} (org.apache.zookeeper.client.ZooKeeperSaslClient)
[36mbroker |[0m javax.security.sasl.SaslException: Error in authenticating with a Zookeeper Quorum member: the quorum member's saslToken is null.
[36mbroker |[0m at org.apache.zookeeper.client.ZooKeeperSaslClient.createSaslToken(ZooKeeperSaslClient.java:312)
[36mbroker |[0m at org.apache.zookeeper.client.ZooKeeperSaslClient.respondToServer(ZooKeeperSaslClient.java:275)
[36mbroker |[0m at org.apache.zookeeper.ClientCnxn$SendThread.readResponse(ClientCnxn.java:882)
[36mbroker |[0m at org.apache.zookeeper.ClientCnxnSocketNIO.doIO(ClientCnxnSocketNIO.java:103)
[36mbroker |[0m at org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:365)
[36mbroker |[0m at org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1223)
[36mbroker |[0m [2021-12-04 13:17:59,669] ERROR [ZooKeeperClient Kafka server] Auth failed. (kafka.zookeeper.ZooKeeperClient)
[36mbroker |[0m [2021-12-04 13:17:59,672] INFO EventThread shut down for session: 0x100474bc7f70000 (org.apache.zookeeper.ClientCnxn)
[33mzookeeper |[0m 2021-12-04 13:17:59,794 [myid:] - WARN [NIOServerCxn.Factory:0.0.0.0/0.0.0.0:2181:NIOServerCnxn#376] - Unable to read additional data from client sessionid 0x100474bc7f70000, likely client has closed socket
[33mzookeeper |[0m 2021-12-04 13:17:59,795 [myid:] - INFO [NIOServerCxn.Factory:0.0.0.0/0.0.0.0:2181:NIOServerCnxn#1056] - Closed socket connection for client /172.20.0.3:57480 which had sessionid 0x100474bc7f70000
[36mbroker |[0m [2021-12-04 13:17:59,823] ERROR Fatal error during KafkaServer startup. Prepare to shutdown (kafka.server.KafkaServer)
[36mbroker |[0m org.apache.zookeeper.KeeperException$AuthFailedException: KeeperErrorCode = AuthFailed for /consumers
[36mbroker |[0m at org.apache.zookeeper.KeeperException.create(KeeperException.java:130)
[36mbroker |[0m at org.apache.zookeeper.KeeperException.create(KeeperException.java:54)
[36mbroker |[0m at kafka.zookeeper.AsyncResponse.maybeThrow(ZooKeeperClient.scala:564)
[36mbroker |[0m at kafka.zk.KafkaZkClient.createRecursive(KafkaZkClient.scala:1662)
[36mbroker |[0m at kafka.zk.KafkaZkClient.makeSurePersistentPathExists(KafkaZkClient.scala:1560)
[36mbroker |[0m at kafka.zk.KafkaZkClient.$anonfun$createTopLevelPaths$1(KafkaZkClient.scala:1552)
[36mbroker |[0m at kafka.zk.KafkaZkClient.$anonfun$createTopLevelPaths$1$adapted(KafkaZkClient.scala:1552)
[36mbroker |[0m at scala.collection.immutable.List.foreach(List.scala:333)
[36mbroker |[0m at kafka.zk.KafkaZkClient.createTopLevelPaths(KafkaZkClient.scala:1552)
[36mbroker |[0m at kafka.server.KafkaServer.initZkClient(KafkaServer.scala:467)
[36mbroker |[0m at kafka.server.KafkaServer.startup(KafkaServer.scala:233)
[36mbroker |[0m at kafka.server.KafkaServerStartable.startup(KafkaServerStartable.scala:44)
[36mbroker |[0m at kafka.Kafka$.main(Kafka.scala:82)
[36mbroker |[0m at kafka.Kafka.main(Kafka.scala)
[36mbroker |[0m [2021-12-04 13:17:59,825] INFO shutting down (kafka.server.KafkaServer)
[36mbroker |[0m [2021-12-04 13:17:59,836] INFO [ZooKeeperClient Kafka server] Closing. (kafka.zookeeper.ZooKeeperClient)
[36mbroker |[0m [2021-12-04 13:17:59,845] INFO [ZooKeeperClient Kafka server] Closed. (kafka.zookeeper.ZooKeeperClient)
[36mbroker |[0m [2021-12-04 13:17:59,849] INFO App info kafka.server for -1 unregistered (org.apache.kafka.common.utils.AppInfoParser)
[36mbroker |[0m [2021-12-04 13:17:59,854] INFO shut down completed (kafka.server.KafkaServer)
[36mbroker |[0m [2021-12-04 13:17:59,855] ERROR Exiting Kafka. (kafka.server.KafkaServerStartable)
[36mbroker |[0m [2021-12-04 13:17:59,859] INFO shutting down (kafka.server.KafkaServer)
[36mbroker exited with code 1
[0m
Any tips how to get it work with wurstmeister images?

Related

Kafka cannot connect to zookeeper - connection refused

I have this setup that has been working fine since last year December which suddenly refused to work.
I have this docker-compose yaml file like this:
version: "3.8"
services:
zookeeper1:
image: debezium/zookeeper:1.8
container_name: zookeeper1
ports:
- 2181:2181
networks:
- internalnet
kafka1:
image: debezium/kafka:1.8
container_name: kafka1
ports:
- 9092:9092
depends_on:
- zookeeper1
environment:
- KAFKA_BROKER_ID=100
- KAFKA_ZOOKEEPER_CONNECT=zookeeper1:2181
- KAFKA_ADVERTISED_HOST_NAME=kafka1
- KAFKA_LISTENERS=LISTENER_BOB://kafka1:29092,LISTENER_FRED://localhost:9092
- KAFKA_ADVERTISED_LISTENERS=LISTENER_BOB://kafka1:29092,LISTENER_FRED://localhost:9092
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=LISTENER_BOB:PLAINTEXT,LISTENER_FRED:PLAINTEXT
- KAFKA_INTER_BROKER_LISTENER_NAME=LISTENER_BOB
- KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS=60000
networks:
- internalnet
volumes:
- ./kafka/kafka1/kafka_data:/kafka/data
- ./kafka/kafka1/kafka_logs:/kafka/logs
networks:
internalnet:
driver: bridge
Zookeeper is running ok but kafka fails to run with the following log:
WARNING: Using default NODE_ID=1, which is valid only for non-clustered installations.
Starting in ZooKeeper mode using NODE_ID=1.
Using ZOOKEEPER_CONNECT=0.0.0.0:2181
Using configuration config/server.properties.
Using KAFKA_LISTENERS=LISTENER_BOB://kafka1:29092,LISTENER_FRED://localhost:9092 and KAFKA_ADVERTISED_LISTENERS=LISTENER_BOB://kafka1:29092,LISTENER_FRED://localhost:9092
2022-09-16 16:26:57,844 - INFO [main:Log4jControllerRegistration$#31] - Registered kafka:type=kafka.Log4jController MBean
2022-09-16 16:26:58,521 - INFO [main:X509Util#77] - Setting -D jdk.tls.rejectClientInitiatedRenegotiation=true to disable client-initiated TLS renegotiation
2022-09-16 16:26:58,667 - INFO [main:LoggingSignalHandler#72] - Registered signal handlers for TERM, INT, HUP
2022-09-16 16:26:58,674 - INFO [main:Logging#66] - starting
2022-09-16 16:26:58,678 - INFO [main:Logging#66] - Connecting to zookeeper on 0.0.0.0:2181
2022-09-16 16:26:58,719 - INFO [main:Logging#66] - [ZooKeeperClient Kafka server] Initializing a new session to 0.0.0.0:2181.
2022-09-16 16:26:58,733 - INFO [main:Environment#98] - Client environment:zookeeper.version=3.6.3--6401e4ad2087061bc6b9f80dec2d69f2e3c8660a, built on 04/08/2021 16:35 GMT
2022-09-16 16:26:58,734 - INFO [main:Environment#98] - Client environment:host.name=44841d8b6caa
2022-09-16 16:26:58,734 - INFO [main:Environment#98] - Client environment:java.version=11.0.14.1
2022-09-16 16:26:58,734 - INFO [main:Environment#98] - Client environment:java.vendor=Red Hat, Inc.
2022-09-16 16:26:58,734 - INFO [main:Environment#98] - Client environment:java.home=/usr/lib/jvm/java-11-openjdk-11.0.14.1.1-5.fc34.x86_64
2022-09-16 16:26:58,735 - INFO [main:Environment#98] - Client environment:java.class.path=/kafka/libs/activation-1.1.1.jar:/kafka/libs/aopalliance-repackaged-2.6.1.jar:/kafka/libs/argparse4j-0.7.0.jar:/kafka/libs/audience-annotations-0.5.0.jar:/kafka/libs/commons-cli-1.4.jar:/kafka/libs/commons-lang3-3.8.1.jar:/kafka/libs/connect-api-3.0.0.jar:/kafka/libs/connect-basic-auth-extension-3.0.0.jar:/kafka/libs/connect-file-3.0.0.jar:/kafka/libs/connect-json-3.0.0.jar:/kafka/libs/connect-mirror-3.0.0.jar:/kafka/libs/connect-mirror-client-3.0.0.jar:/kafka/libs/connect-runtime-3.0.0.jar:/kafka/libs/connect-transforms-3.0.0.jar:/kafka/libs/hk2-api-2.6.1.jar:/kafka/libs/hk2-locator-2.6.1.jar:/kafka/libs/hk2-utils-2.6.1.jar:/kafka/libs/jackson-annotations-2.12.3.jar:/kafka/libs/jackson-core-2.12.3.jar:/kafka/libs/jackson-databind-2.12.3.jar:/kafka/libs/jackson-dataformat-csv-2.12.3.jar:/kafka/libs/jackson-datatype-jdk8-2.12.3.jar:/kafka/libs/jackson-jaxrs-base-2.12.3.jar:/kafka/libs/jackson-jaxrs-json-provider-2.12.3.jar:/kafka/libs/jackson-module-jaxb-annotations-2.12.3.jar:/kafka/libs/jackson-module-scala_2.12-2.12.3.jar:/kafka/libs/jakarta.activation-api-1.2.1.jar:/kafka/libs/jakarta.annotation-api-1.3.5.jar:/kafka/libs/jakarta.inject-2.6.1.jar:/kafka/libs/jakarta.validation-api-2.0.2.jar:/kafka/libs/jakarta.ws.rs-api-2.1.6.jar:/kafka/libs/jakarta.xml.bind-api-2.3.2.jar:/kafka/libs/javassist-3.27.0-GA.jar:/kafka/libs/javax.servlet-api-3.1.0.jar:/kafka/libs/javax.ws.rs-api-2.1.1.jar:/kafka/libs/jaxb-api-2.3.0.jar:/kafka/libs/jersey-client-2.34.jar:/kafka/libs/jersey-common-2.34.jar:/kafka/libs/jersey-container-servlet-2.34.jar:/kafka/libs/jersey-container-servlet-core-2.34.jar:/kafka/libs/jersey-hk2-2.34.jar:/kafka/libs/jersey-server-2.34.jar:/kafka/libs/jetty-client-9.4.43.v20210629.jar:/kafka/libs/jetty-continuation-9.4.43.v20210629.jar:/kafka/libs/jetty-http-9.4.43.v20210629.jar:/kafka/libs/jetty-io-9.4.43.v20210629.jar:/kafka/libs/jetty-security-9.4.43.v20210629.jar:/kafka/libs/jetty-server-9.4.43.v20210629.jar:/kafka/libs/jetty-servlet-9.4.43.v20210629.jar:/kafka/libs/jetty-servlets-9.4.43.v20210629.jar:/kafka/libs/jetty-util-9.4.43.v20210629.jar:/kafka/libs/jetty-util-ajax-9.4.43.v20210629.jar:/kafka/libs/jline-3.12.1.jar:/kafka/libs/jopt-simple-5.0.4.jar:/kafka/libs/kafka-clients-3.0.0.jar:/kafka/libs/kafka-log4j-appender-3.0.0.jar:/kafka/libs/kafka-metadata-3.0.0.jar:/kafka/libs/kafka-raft-3.0.0.jar:/kafka/libs/kafka-server-common-3.0.0.jar:/kafka/libs/kafka-shell-3.0.0.jar:/kafka/libs/kafka-storage-3.0.0.jar:/kafka/libs/kafka-storage-api-3.0.0.jar:/kafka/libs/kafka-streams-3.0.0.jar:/kafka/libs/kafka-streams-examples-3.0.0.jar:/kafka/libs/kafka-streams-scala_2.12-3.0.0.jar:/kafka/libs/kafka-streams-test-utils-3.0.0.jar:/kafka/libs/kafka-tools-3.0.0.jar:/kafka/libs/kafka_2.12-3.0.0.jar:/kafka/libs/log4j-1.2.17.jar:/kafka/libs/lz4-java-1.7.1.jar:/kafka/libs/maven-artifact-3.8.1.jar:/kafka/libs/metrics-core-2.2.0.jar:/kafka/libs/metrics-core-4.1.12.1.jar:/kafka/libs/netty-buffer-4.1.62.Final.jar:/kafka/libs/netty-codec-4.1.62.Final.jar:/kafka/libs/netty-common-4.1.62.Final.jar:/kafka/libs/netty-handler-4.1.62.Final.jar:/kafka/libs/netty-resolver-4.1.62.Final.jar:/kafka/libs/netty-transport-4.1.62.Final.jar:/kafka/libs/netty-transport-native-epoll-4.1.62.Final.jar:/kafka/libs/netty-transport-native-unix-common-4.1.62.Final.jar:/kafka/libs/osgi-resource-locator-1.0.3.jar:/kafka/libs/paranamer-2.8.jar:/kafka/libs/plexus-utils-3.2.1.jar:/kafka/libs/reflections-0.9.12.jar:/kafka/libs/rocksdbjni-6.19.3.jar:/kafka/libs/scala-collection-compat_2.12-2.4.4.jar:/kafka/libs/scala-java8-compat_2.12-1.0.0.jar:/kafka/libs/scala-library-2.12.14.jar:/kafka/libs/scala-logging_2.12-3.9.3.jar:/kafka/libs/scala-reflect-2.12.14.jar:/kafka/libs/slf4j-api-1.7.30.jar:/kafka/libs/slf4j-log4j12-1.7.30.jar:/kafka/libs/snappy-java-1.1.8.1.jar:/kafka/libs/trogdor-3.0.0.jar:/kafka/libs/zookeeper-3.6.3.jar:/kafka/libs/zookeeper-jute-3.6.3.jar:/kafka/libs/zstd-jni-1.5.0-2.jar
2022-09-16 16:26:58,740 - INFO [main:Environment#98] - Client environment:java.library.path=/usr/java/packages/lib:/usr/lib64:/lib64:/lib:/usr/lib
2022-09-16 16:26:58,745 - INFO [main:Environment#98] - Client environment:java.io.tmpdir=/tmp
2022-09-16 16:26:58,745 - INFO [main:Environment#98] - Client environment:java.compiler=<NA>
2022-09-16 16:26:58,745 - INFO [main:Environment#98] - Client environment:os.name=Linux
2022-09-16 16:26:58,748 - INFO [main:Environment#98] - Client environment:os.arch=amd64
2022-09-16 16:26:58,748 - INFO [main:Environment#98] - Client environment:os.version=5.10.16.3-microsoft-standard-WSL2
2022-09-16 16:26:58,748 - INFO [main:Environment#98] - Client environment:user.name=kafka
2022-09-16 16:26:58,748 - INFO [main:Environment#98] - Client environment:user.home=/kafka
2022-09-16 16:26:58,749 - INFO [main:Environment#98] - Client environment:user.dir=/kafka
2022-09-16 16:26:58,749 - INFO [main:Environment#98] - Client environment:os.memory.free=975MB
2022-09-16 16:26:58,749 - INFO [main:Environment#98] - Client environment:os.memory.max=1024MB
2022-09-16 16:26:58,749 - INFO [main:Environment#98] - Client environment:os.memory.total=1024MB
2022-09-16 16:26:58,754 - INFO [main:ZooKeeper#1006] - Initiating client connection, connectString=0.0.0.0:2181 sessionTimeout=18000 watcher=kafka.zookeeper.ZooKeeperClient$ZooKeeperClientWatcher$#3fc79729
2022-09-16 16:26:58,782 - INFO [main:ClientCnxnSocket#239] - jute.maxbuffer value is 4194304 Bytes
2022-09-16 16:26:58,797 - INFO [main:ClientCnxn#1736] - zookeeper.request.timeout value is 0. feature enabled=false
2022-09-16 16:26:58,807 - INFO [main:Logging#66] - [ZooKeeperClient Kafka server] Waiting until connected.
2022-09-16 16:26:58,840 - INFO [main-SendThread(0.0.0.0:2181):ClientCnxn$SendThread#1181] - Opening socket connection to server 0.0.0.0/0.0.0.0:2181.
2022-09-16 16:26:58,842 - INFO [main-SendThread(0.0.0.0:2181):ClientCnxn$SendThread#1183] - SASL config status: Will not attempt to authenticate using SASL (unknown error)
2022-09-16 16:26:58,861 - WARN [main-SendThread(0.0.0.0:2181):ClientCnxn$SendThread#1300] - Session 0x0 for sever 0.0.0.0/0.0.0.0:2181, Closing socket connection. Attempting reconnect except it is a SessionExpiredException.
java.net.ConnectException: Connection refused
at java.base/sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
at java.base/sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:777)
at org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:344)
at org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1290)
Also got the following log from kafka server.log
2022-09-16 14:26:52,915 - INFO [main:Log4jControllerRegistration$#31] - Registered kafka:type=kafka.Log4jController MBean
2022-09-16 14:26:54,942 - INFO [main:Logging#66] - starting
2022-09-16 14:26:54,965 - INFO [main:Logging#66] - Connecting to zookeeper on 0.0.0.0:2181
2022-09-16 14:26:55,082 - INFO [main:Logging#66] - [ZooKeeperClient Kafka server] Initializing a new session to 0.0.0.0:2181.
2022-09-16 14:26:55,341 - INFO [main:Logging#66] - [ZooKeeperClient Kafka server] Waiting until connected.
2022-09-16 14:27:55,367 - INFO [main:Logging#66] - [ZooKeeperClient Kafka server] Closing.
2022-09-16 14:27:55,758 - INFO [main:Logging#66] - [ZooKeeperClient Kafka server] Closed.
2022-09-16 14:27:55,797 - ERROR [main:MarkerIgnoringBase#159] - Fatal error during KafkaServer startup. Prepare to shutdown
kafka.zookeeper.ZooKeeperClientTimeoutException: Timed out waiting for connection while in state: CONNECTING
at kafka.zookeeper.ZooKeeperClient.$anonfun$waitUntilConnected$3(ZooKeeperClient.scala:254)
at kafka.zookeeper.ZooKeeperClient.waitUntilConnected(ZooKeeperClient.scala:250)
at kafka.zookeeper.ZooKeeperClient.<init>(ZooKeeperClient.scala:108)
at kafka.zk.KafkaZkClient$.apply(KafkaZkClient.scala:1981)
at kafka.server.KafkaServer.initZkClient(KafkaServer.scala:457)
at kafka.server.KafkaServer.startup(KafkaServer.scala:196)
at kafka.Kafka$.main(Kafka.scala:109)
at kafka.Kafka.main(Kafka.scala)
2022-09-16 14:27:55,813 - INFO [main:Logging#66] - shutting down
2022-09-16 14:27:55,858 - INFO [main:Logging#66] - shut down completed
2022-09-16 14:27:55,861 - ERROR [main:MarkerIgnoringBase#143] - Exiting Kafka.
2022-09-16 14:27:55,864 - INFO [kafka-shutdown-hook:Logging#66] - shutting down
2022-09-16 14:42:16,757 - INFO [main:Log4jControllerRegistration$#31] - Registered kafka:type=kafka.Log4jController MBean
2022-09-16 14:42:18,622 - INFO [main:Logging#66] - starting
2022-09-16 14:42:18,624 - INFO [main:Logging#66] - Connecting to zookeeper on 0.0.0.0:2181
2022-09-16 14:42:18,656 - INFO [main:Logging#66] - [ZooKeeperClient Kafka server] Initializing a new session to 0.0.0.0:2181.
2022-09-16 14:42:18,749 - INFO [main:Logging#66] - [ZooKeeperClient Kafka server] Waiting until connected.
2022-09-16 14:43:18,769 - INFO [main:Logging#66] - [ZooKeeperClient Kafka server] Closing.
2022-09-16 14:43:19,784 - INFO [main:Logging#66] - [ZooKeeperClient Kafka server] Closed.
2022-09-16 14:43:19,796 - ERROR [main:MarkerIgnoringBase#159] - Fatal error during KafkaServer startup. Prepare to shutdown
kafka.zookeeper.ZooKeeperClientTimeoutException: Timed out waiting for connection while in state: CONNECTING
at kafka.zookeeper.ZooKeeperClient.$anonfun$waitUntilConnected$3(ZooKeeperClient.scala:254)
at kafka.zookeeper.ZooKeeperClient.waitUntilConnected(ZooKeeperClient.scala:250)
at kafka.zookeeper.ZooKeeperClient.<init>(ZooKeeperClient.scala:108)
at kafka.zk.KafkaZkClient$.apply(KafkaZkClient.scala:1981)
at kafka.server.KafkaServer.initZkClient(KafkaServer.scala:457)
at kafka.server.KafkaServer.startup(KafkaServer.scala:196)
at kafka.Kafka$.main(Kafka.scala:109)
at kafka.Kafka.main(Kafka.scala)
2022-09-16 14:43:19,809 - INFO [main:Logging#66] - shutting down
2022-09-16 14:43:19,858 - INFO [main:Logging#66] - shut down completed
2022-09-16 14:43:19,870 - ERROR [main:MarkerIgnoringBase#143] - Exiting Kafka.
2022-09-16 14:43:19,876 - INFO [kafka-shutdown-hook:Logging#66] - shutting down
2022-09-16 14:53:57,029 - INFO [main:Log4jControllerRegistration$#31] - Registered kafka:type=kafka.Log4jController MBean
2022-09-16 14:53:59,011 - INFO [main:Logging#66] - starting
2022-09-16 14:53:59,017 - INFO [main:Logging#66] - Connecting to zookeeper on 0.0.0.0:2181
2022-09-16 14:53:59,115 - INFO [main:Logging#66] - [ZooKeeperClient Kafka server] Initializing a new session to 0.0.0.0:2181.
2022-09-16 14:53:59,247 - INFO [main:Logging#66] - [ZooKeeperClient Kafka server] Waiting until connected.
2022-09-16 14:54:59,256 - INFO [main:Logging#66] - [ZooKeeperClient Kafka server] Closing.
2022-09-16 14:55:00,389 - INFO [main:Logging#66] - [ZooKeeperClient Kafka server] Closed.
2022-09-16 14:55:00,397 - ERROR [main:MarkerIgnoringBase#159] - Fatal error during KafkaServer startup. Prepare to shutdown
kafka.zookeeper.ZooKeeperClientTimeoutException: Timed out waiting for connection while in state: CONNECTING
at kafka.zookeeper.ZooKeeperClient.$anonfun$waitUntilConnected$3(ZooKeeperClient.scala:254)
at kafka.zookeeper.ZooKeeperClient.waitUntilConnected(ZooKeeperClient.scala:250)
at kafka.zookeeper.ZooKeeperClient.<init>(ZooKeeperClient.scala:108)
at kafka.zk.KafkaZkClient$.apply(KafkaZkClient.scala:1981)
at kafka.server.KafkaServer.initZkClient(KafkaServer.scala:457)
at kafka.server.KafkaServer.startup(KafkaServer.scala:196)
at kafka.Kafka$.main(Kafka.scala:109)
at kafka.Kafka.main(Kafka.scala)
2022-09-16 14:55:00,400 - INFO [main:Logging#66] - shutting down
2022-09-16 14:55:00,491 - INFO [main:Logging#66] - shut down completed
2022-09-16 14:55:00,525 - ERROR [main:MarkerIgnoringBase#143] - Exiting Kafka.
2022-09-16 14:55:00,529 - INFO [kafka-shutdown-hook:Logging#66] - shutting down
I even went as far as uninstalling docker-desktop and re-installing, still the same issue.
Extra info: i am running docker-desktop (with wsl-2 on ubuntu distro) on windows 11.
It's possible Debezium did an update which broke your setup, so I suggest you grab a latest compose file, many of which exist at
https://github.com/debezium/debezium-examples
Look at the logs,
Using ZOOKEEPER_CONNECT=0.0.0.0:2181
It's not using KAFKA_ZOOKEEPER_CONNECT ... Remove the KAFKA_ prefix to set the appropriate value, then your logs should say something like ZOOKEEPER_CONNECT=zookeeper1:2181

Error in Kafka and Zookeeper container made by a docker-compose file

I need to expose JMX ports of Kafka and Zookeeper container to connect a monitoring tool such as Lenses or Grafana. The monitoring tool would be installed on a different server.
I made a docker-compose file to apply the container's configurations but it throws some error.
Here is the Docker-compose YAML file.
Any help is appreciated.
version: '3.2'
services:
zookeeper:
container_name: zookeeper
image: wurstmeister/zookeeper:latest
environment:
ZOOKEEPER_CLIENT_PORT: 2181
KAFKA_JMX_OPTS: >-
-Dcom.sun.management.jmxremote=true
-Dcom.sun.management.jmxremote.authenticate=false
-Dcom.sun.management.jmxremote.ssl=false
-Djava.rmi.server.hostname=156.17.42.120
-Dcom.sun.management.jmxremote.port=11992
-Dcom.sun.management.jmxremote.rmi.port=11992
JMX_PORT: 11992
ports:
- "2181:2181"
- "11992:11992"
# https://hub.docker.com/r/confluentinc/cp-kafka/
kafka:
container_name: kafka
image: wurstmeister/kafka:latest
environment:
## the >- used below infers a value which is a string and properly
## ignore the multiple lines resulting in one long string:
## https://yaml.org/spec/1.2/spec.html
KAFKA_LOG_RETENTION_MS: 10000
KAFKA_LOG_RETENTION_CHECK_INTERVAL_MS: 5000
KAFKA_ADVERTISED_LISTENERS: >-
LISTENER_DOCKER_INTERNAL://kafka:19092,
LISTENER_DOCKER_EXTERNAL://156.17.42.120:9092
JMX_PORT: 11991
KAFKA_JMX_OPTS: >-
-Djava.rmi.server.hostname=156.17.42.120
-Dcom.sun.management.jmxremote.port=11991
-Dcom.sun.management.jmxremote.rmi.port=11991
-Dcom.sun.management.jmxremote=true
-Dcom.sun.management.jmxremote.authenticate=false
-Dcom.sun.management.jmxremote.ssl=false
KAFKA_LISTENERS: >-
LISTENER_DOCKER_INTERNAL://kafka:19091,
LISTENER_DOCKER_EXTERNAL://156.17.42.120:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: >-
LISTENER_DOCKER_INTERNAL:PLAINTEXT,
LISTENER_DOCKER_EXTERNAL:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: LISTENER_DOCKER_INTERNAL
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_LOG4J_LOGGERS: >-
kafka.controller=INFO,
kafka.producer.async.DefaultEventHandler=INFO,
state.change.logger=INFO
ports:
- 9092:9092
- 11991:11991
depends_on:
- zookeeper
volumes:
- /var/run/docker.sock:/var/run/docker.sock
Docker-compose Log;
[33mzookeeper |[0m Using config: /opt/zookeeper-3.4.13/bin/../conf/zoo.cfg
[36mkafka |[0m delete.topic.enable = true
[33mzookeeper |[0m 2020-02-04 12:03:59,394 [myid:] - INFO [main:QuorumPeerConfig#136] - Reading configuration from: /opt/zookeeper-3.4.13/bin/../conf/zoo.cfg
[36mkafka |[0m fetch.purgatory.purge.interval.requests = 1000
[33mzookeeper |[0m 2020-02-04 12:03:59,398 [myid:] - INFO [main:DatadirCleanupManager#78] - autopurge.snapRetainCount set to 3
[36mkafka |[0m group.initial.rebalance.delay.ms = 0
[33mzookeeper |[0m 2020-02-04 12:03:59,398 [myid:] - INFO [main:DatadirCleanupManager#79] - autopurge.purgeInterval set to 1
[36mkafka |[0m group.max.session.timeout.ms = 1800000
[33mzookeeper |[0m 2020-02-04 12:03:59,399 [myid:] - WARN [main:QuorumPeerMain#116] - Either no config or no quorum defined in config, running in standalone mode
[36mkafka |[0m group.max.size = 2147483647
[33mzookeeper |[0m 2020-02-04 12:03:59,399 [myid:] - INFO [PurgeTask:DatadirCleanupManager$PurgeTask#138] - Purge task started.
[36mkafka |[0m group.min.session.timeout.ms = 6000
[33mzookeeper |[0m 2020-02-04 12:03:59,406 [myid:] - INFO [PurgeTask:DatadirCleanupManager$PurgeTask#144] - Purge task completed.
[36mkafka |[0m host.name =
[33mzookeeper |[0m 2020-02-04 12:03:59,408 [myid:] - INFO [main:QuorumPeerConfig#136] - Reading configuration from: /opt/zookeeper-3.4.13/bin/../conf/zoo.cfg
[36mkafka |[0m inter.broker.listener.name = LISTENER_DOCKER_INTERNAL
[33mzookeeper |[0m 2020-02-04 12:03:59,409 [myid:] - INFO [main:ZooKeeperServerMain#98] - Starting server
[36mkafka |[0m inter.broker.protocol.version = 2.4-IV1
[33mzookeeper |[0m 2020-02-04 12:03:59,414 [myid:] - INFO [main:Environment#100] - Server environment:zookeeper.version=3.4.13-2d71af4dbe22557fda74f9a9b4309b15a7487f03, built on 06/29/2018 04:05 GMT
[36mkafka |[0m kafka.metrics.polling.interval.secs = 10
[33mzookeeper |[0m 2020-02-04 12:03:59,415 [myid:] - INFO [main:Environment#100] - Server environment:host.name=185df4f257aa
[36mkafka |[0m kafka.metrics.reporters = []
[33mzookeeper |[0m 2020-02-04 12:03:59,415 [myid:] - INFO [main:Environment#100] - Server environment:java.version=1.7.0_65
[36mkafka |[0m leader.imbalance.check.interval.seconds = 300
[33mzookeeper |[0m 2020-02-04 12:03:59,415 [myid:] - INFO [main:Environment#100] - Server environment:java.vendor=Oracle Corporation
[36mkafka |[0m leader.imbalance.per.broker.percentage = 10
[33mzookeeper |[0m 2020-02-04 12:03:59,415 [myid:] - INFO [main:Environment#100] - Server environment:java.home=/usr/lib/jvm/java-7-openjdk-amd64/jre
[36mkafka |[0m listener.security.protocol.map = LISTENER_DOCKER_INTERNAL:PLAINTEXT, LISTENER_DOCKER_EXTERNAL:PLAINTEXT
[33mzookeeper |[0m 2020-02-04 12:03:59,415 [myid:] - INFO [main:Environment#100] - Server environment:java.class.path=/opt/zookeeper-3.4.13/bin/../build/classes:/opt/zookeeper-3.4.13/bin/../build/lib/*.jar:/opt/zookeeper-3.4.13/bin/../lib/slf4j-log4j12-1.7.25.jar:/opt/zookeeper-3.4.13/bin/../lib/slf4j-api-1.7.25.jar:/opt/zookeeper-3.4.13/bin/../lib/netty-3.10.6.Final.jar:/opt/zookeeper-3.4.13/bin/../lib/log4j-1.2.17.jar:/opt/zookeeper-3.4.13/bin/../lib/jline-0.9.94.jar:/opt/zookeeper-3.4.13/bin/../lib/audience-annotations-0.5.0.jar:/opt/zookeeper-3.4.13/bin/../zookeeper-3.4.13.jar:/opt/zookeeper-3.4.13/bin/../src/java/lib/*.jar:/opt/zookeeper-3.4.13/bin/../conf:
[36mkafka |[0m listeners = LISTENER_DOCKER_INTERNAL://kafka:19091, LISTENER_DOCKER_EXTERNAL://156.17.42.120:9092
[33mzookeeper |[0m 2020-02-04 12:03:59,415 [myid:] - INFO [main:Environment#100] - Server environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib/x86_64-linux-gnu/jni:/lib/x86_64-linux-gnu:/usr/lib/x86_64-linux-gnu:/usr/lib/jni:/lib:/usr/lib
[36mkafka |[0m log.cleaner.backoff.ms = 15000
[33mzookeeper |[0m 2020-02-04 12:03:59,415 [myid:] - INFO [main:Environment#100] - Server environment:java.io.tmpdir=/tmp
[36mkafka |[0m log.cleaner.dedupe.buffer.size = 134217728
[33mzookeeper |[0m 2020-02-04 12:03:59,417 [myid:] - INFO [main:Environment#100] - Server environment:java.compiler=<NA>
[36mkafka |[0m log.cleaner.delete.retention.ms = 86400000
[33mzookeeper |[0m 2020-02-04 12:03:59,418 [myid:] - INFO [main:Environment#100] - Server environment:os.name=Linux
[36mkafka |[0m log.cleaner.enable = true
[33mzookeeper |[0m 2020-02-04 12:03:59,418 [myid:] - INFO [main:Environment#100] - Server environment:os.arch=amd64
[36mkafka |[0m log.cleaner.io.buffer.load.factor = 0.9
[33mzookeeper |[0m 2020-02-04 12:03:59,418 [myid:] - INFO [main:Environment#100] - Server environment:os.version=4.18.0-147.3.1.el8_1.x86_64
[36mkafka |[0m log.cleaner.io.buffer.size = 524288
[33mzookeeper |[0m 2020-02-04 12:03:59,418 [myid:] - INFO [main:Environment#100] - Server environment:user.name=root
[36mkafka |[0m log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
[33mzookeeper |[0m 2020-02-04 12:03:59,418 [myid:] - INFO [main:Environment#100] - Server environment:user.home=/root
[36mkafka |[0m log.cleaner.max.compaction.lag.ms = 9223372036854775807
[33mzookeeper |[0m 2020-02-04 12:03:59,418 [myid:] - INFO [main:Environment#100] - Server environment:user.dir=/opt/zookeeper-3.4.13
[36mkafka |[0m log.cleaner.min.cleanable.ratio = 0.5
[33mzookeeper |[0m 2020-02-04 12:03:59,421 [myid:] - INFO [main:ZooKeeperServer#836] - tickTime set to 2000
[36mkafka |[0m log.cleaner.min.compaction.lag.ms = 0
[33mzookeeper |[0m 2020-02-04 12:03:59,421 [myid:] - INFO [main:ZooKeeperServer#845] - minSessionTimeout set to -1
[36mkafka |[0m log.cleaner.threads = 1
[33mzookeeper |[0m 2020-02-04 12:03:59,422 [myid:] - INFO [main:ZooKeeperServer#854] - maxSessionTimeout set to -1
[36mkafka |[0m log.cleanup.policy = [delete]
[33mzookeeper |[0m 2020-02-04 12:03:59,430 [myid:] - INFO [main:ServerCnxnFactory#117] - Using org.apache.zookeeper.server.NIOServerCnxnFactory as server connection factory
[36mkafka |[0m log.dir = /tmp/kafka-logs
[33mzookeeper |[0m 2020-02-04 12:03:59,435 [myid:] - INFO [main:NIOServerCnxnFactory#89] - binding to port 0.0.0.0/0.0.0.0:2181
[36mkafka |[0m log.dirs = /kafka/kafka-logs-acff9186a4ad
[33mzookeeper |[0m 2020-02-04 12:04:02,603 [myid:] - INFO [NIOServerCxn.Factory:0.0.0.0/0.0.0.0:2181:NIOServerCnxnFactory#215] - Accepted socket connection from /172.18.0.3:43840
[36mkafka |[0m log.flush.interval.messages = 9223372036854775807
[33mzookeeper |[0m 2020-02-04 12:04:02,608 [myid:] - INFO [NIOServerCxn.Factory:0.0.0.0/0.0.0.0:2181:ZooKeeperServer#949] - Client attempting to establish new session at /172.18.0.3:43840
[36mkafka |[0m log.flush.interval.ms = null
[33mzookeeper |[0m 2020-02-04 12:04:02,610 [myid:] - INFO [SyncThread:0:FileTxnLog#213] - Creating new log file: log.1
[36mkafka |[0m log.flush.offset.checkpoint.interval.ms = 60000
[33mzookeeper |[0m 2020-02-04 12:04:02,661 [myid:] - INFO [SyncThread:0:ZooKeeperServer#694] - Established session 0x1001e9c10d90000 with negotiated timeout 6000 for client /172.18.0.3:43840
[33mzookeeper |[0m 2020-02-04 12:04:02,815 [myid:] - INFO [ProcessThread(sid:0 cport:2181)::PrepRequestProcessor#653] - Got user-level KeeperException when processing sessionid:0x1001e9c10d90000 type:create cxid:0x2 zxid:0x3 txntype:-1 reqpath:n/a Error Path:/brokers Error:KeeperErrorCode = NoNode for /brokers
[36mkafka |[0m log.flush.scheduler.interval.ms = 9223372036854775807
[33mzookeeper |[0m 2020-02-04 12:04:02,870 [myid:] - INFO [ProcessThread(sid:0 cport:2181)::PrepRequestProcessor#653] - Got user-level KeeperException when processing sessionid:0x1001e9c10d90000 type:create cxid:0x6 zxid:0x7 txntype:-1 reqpath:n/a Error Path:/config Error:KeeperErrorCode = NoNode for /config
[36mkafka |[0m log.flush.start.offset.checkpoint.interval.ms = 60000
[33mzookeeper |[0m 2020-02-04 12:04:02,908 [myid:] - INFO [ProcessThread(sid:0 cport:2181)::PrepRequestProcessor#653] - Got user-level KeeperException when processing sessionid:0x1001e9c10d90000 type:create cxid:0x9 zxid:0xa txntype:-1 reqpath:n/a Error Path:/admin Error:KeeperErrorCode = NoNode for /admin
[36mkafka |[0m log.index.interval.bytes = 4096
[33mzookeeper |[0m 2020-02-04 12:04:03,223 [myid:] - INFO [ProcessThread(sid:0 cport:2181)::PrepRequestProcessor#653] - Got user-level KeeperException when processing sessionid:0x1001e9c10d90000 type:create cxid:0x15 zxid:0x15 txntype:-1 reqpath:n/a Error Path:/cluster Error:KeeperErrorCode = NoNode for /cluster
[36mkafka |[0m log.index.size.max.bytes = 10485760
[33mzookeeper |[0m 2020-02-04 12:04:04,030 [myid:] - INFO [ProcessThread(sid:0 cport:2181)::PrepRequestProcessor#487] - Processed session termination for sessionid: 0x1001e9c10d90000
[33mzookeeper |[0m 2020-02-04 12:04:04,045 [myid:] - INFO [NIOServerCxn.Factory:0.0.0.0/0.0.0.0:2181:NIOServerCnxn#1056] - Closed socket connection for client /172.18.0.3:43840 which had sessionid 0x1001e9c10d90000
[36mkafka |[0m log.message.downconversion.enable = true
[33mzookeeper |[0m 2020-02-04 13:03:59,399 [myid:] - INFO [PurgeTask:DatadirCleanupManager$PurgeTask#138] - Purge task started.
[36mkafka |[0m log.message.format.version = 2.4-IV1
[33mzookeeper |[0m 2020-02-04 13:03:59,400 [myid:] - INFO [PurgeTask:DatadirCleanupManager$PurgeTask#144] - Purge task completed.
[36mkafka |[0m log.message.timestamp.difference.max.ms = 9223372036854775807
[36mkafka |[0m log.message.timestamp.type = CreateTime
[36mkafka |[0m log.preallocate = false
[36mkafka |[0m log.retention.bytes = -1
[36mkafka |[0m log.retention.check.interval.ms = 5000
[36mkafka |[0m log.retention.hours = 168
[36mkafka |[0m log.retention.minutes = null
[36mkafka |[0m log.retention.ms = 10000
[36mkafka |[0m log.roll.hours = 168
[36mkafka |[0m log.roll.jitter.hours = 0
[36mkafka |[0m log.roll.jitter.ms = null
[36mkafka |[0m log.roll.ms = null
[36mkafka |[0m log.segment.bytes = 1073741824
[36mkafka |[0m log.segment.delete.delay.ms = 60000
[36mkafka |[0m max.connections = 2147483647
[36mkafka |[0m max.connections.per.ip = 2147483647
[36mkafka |[0m max.connections.per.ip.overrides =
[36mkafka |[0m max.incremental.fetch.session.cache.slots = 1000
[36mkafka |[0m message.max.bytes = 1000012
[36mkafka |[0m metric.reporters = []
[36mkafka |[0m metrics.num.samples = 2
[36mkafka |[0m metrics.recording.level = INFO
[36mkafka |[0m metrics.sample.window.ms = 30000
[36mkafka |[0m min.insync.replicas = 1
[36mkafka |[0m num.io.threads = 8
[36mkafka |[0m num.network.threads = 3
[36mkafka |[0m num.partitions = 1
[36mkafka |[0m num.recovery.threads.per.data.dir = 1
[36mkafka |[0m num.replica.alter.log.dirs.threads = null
[36mkafka |[0m num.replica.fetchers = 1
[36mkafka |[0m offset.metadata.max.bytes = 4096
[36mkafka |[0m offsets.commit.required.acks = -1
[36mkafka |[0m offsets.commit.timeout.ms = 5000
[36mkafka |[0m offsets.load.buffer.size = 5242880
[36mkafka |[0m offsets.retention.check.interval.ms = 600000
[36mkafka |[0m offsets.retention.minutes = 10080
[36mkafka |[0m offsets.topic.compression.codec = 0
[36mkafka |[0m offsets.topic.num.partitions = 50
[36mkafka |[0m offsets.topic.replication.factor = 1
[36mkafka |[0m offsets.topic.segment.bytes = 104857600
[36mkafka |[0m password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding
[36mkafka |[0m password.encoder.iterations = 4096
[36mkafka |[0m password.encoder.key.length = 128
[36mkafka |[0m password.encoder.keyfactory.algorithm = null
[36mkafka |[0m password.encoder.old.secret = null
[36mkafka |[0m password.encoder.secret = null
[36mkafka |[0m port = 9092
[36mkafka |[0m principal.builder.class = null
[36mkafka |[0m producer.purgatory.purge.interval.requests = 1000
[36mkafka |[0m queued.max.request.bytes = -1
[36mkafka |[0m queued.max.requests = 500
[36mkafka |[0m quota.consumer.default = 9223372036854775807
[36mkafka |[0m quota.producer.default = 9223372036854775807
[36mkafka |[0m quota.window.num = 11
[36mkafka |[0m quota.window.size.seconds = 1
[36mkafka |[0m replica.fetch.backoff.ms = 1000
[36mkafka |[0m replica.fetch.max.bytes = 1048576
[36mkafka |[0m replica.fetch.min.bytes = 1
[36mkafka |[0m replica.fetch.response.max.bytes = 10485760
[36mkafka |[0m replica.fetch.wait.max.ms = 500
[36mkafka |[0m replica.high.watermark.checkpoint.interval.ms = 5000
[36mkafka |[0m replica.lag.time.max.ms = 10000
[36mkafka |[0m replica.selector.class = null
[36mkafka |[0m replica.socket.receive.buffer.bytes = 65536
[36mkafka |[0m replica.socket.timeout.ms = 30000
[36mkafka |[0m replication.quota.window.num = 11
[36mkafka |[0m replication.quota.window.size.seconds = 1
[36mkafka |[0m request.timeout.ms = 30000
[36mkafka |[0m reserved.broker.max.id = 1000
[36mkafka |[0m sasl.client.callback.handler.class = null
[36mkafka |[0m sasl.enabled.mechanisms = [GSSAPI]
[36mkafka |[0m sasl.jaas.config = null
[36mkafka |[0m sasl.kerberos.kinit.cmd = /usr/bin/kinit
[36mkafka |[0m sasl.kerberos.min.time.before.relogin = 60000
[36mkafka |[0m sasl.kerberos.principal.to.local.rules = [DEFAULT]
[36mkafka |[0m sasl.kerberos.service.name = null
[36mkafka |[0m sasl.kerberos.ticket.renew.jitter = 0.05
[36mkafka |[0m sasl.kerberos.ticket.renew.window.factor = 0.8
[36mkafka |[0m sasl.login.callback.handler.class = null
[36mkafka |[0m sasl.login.class = null
[36mkafka |[0m sasl.login.refresh.buffer.seconds = 300
[36mkafka |[0m sasl.login.refresh.min.period.seconds = 60
[36mkafka |[0m sasl.login.refresh.window.factor = 0.8
[36mkafka |[0m sasl.login.refresh.window.jitter = 0.05
[36mkafka |[0m sasl.mechanism.inter.broker.protocol = GSSAPI
[36mkafka |[0m sasl.server.callback.handler.class = null
[36mkafka |[0m security.inter.broker.protocol = PLAINTEXT
[36mkafka |[0m security.providers = null
[36mkafka |[0m socket.receive.buffer.bytes = 102400
[36mkafka |[0m socket.request.max.bytes = 104857600
[36mkafka |[0m socket.send.buffer.bytes = 102400
[36mkafka |[0m ssl.cipher.suites = []
[36mkafka |[0m ssl.client.auth = none
[36mkafka |[0m ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
[36mkafka |[0m ssl.endpoint.identification.algorithm = https
[36mkafka |[0m ssl.key.password = null
[36mkafka |[0m ssl.keymanager.algorithm = SunX509
[36mkafka |[0m ssl.keystore.location = null
[36mkafka |[0m ssl.keystore.password = null
[36mkafka |[0m ssl.keystore.type = JKS
[36mkafka |[0m ssl.principal.mapping.rules = DEFAULT
[36mkafka |[0m ssl.protocol = TLS
[36mkafka |[0m ssl.provider = null
[36mkafka |[0m ssl.secure.random.implementation = null
[36mkafka |[0m ssl.trustmanager.algorithm = PKIX
[36mkafka |[0m ssl.truststore.location = null
[36mkafka |[0m ssl.truststore.password = null
[36mkafka |[0m ssl.truststore.type = JKS
[36mkafka |[0m transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000
[36mkafka |[0m transaction.max.timeout.ms = 900000
[36mkafka |[0m transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
[36mkafka |[0m transaction.state.log.load.buffer.size = 5242880
[36mkafka |[0m transaction.state.log.min.isr = 1
[36mkafka |[0m transaction.state.log.num.partitions = 50
[36mkafka |[0m transaction.state.log.replication.factor = 1
[36mkafka |[0m transaction.state.log.segment.bytes = 104857600
[36mkafka |[0m transactional.id.expiration.ms = 604800000
[36mkafka |[0m unclean.leader.election.enable = false
[36mkafka |[0m zookeeper.connect = zookeeper:2181
[36mkafka |[0m zookeeper.connection.timeout.ms = 6000
[36mkafka |[0m zookeeper.max.in.flight.requests = 10
[36mkafka |[0m zookeeper.session.timeout.ms = 6000
[36mkafka |[0m zookeeper.set.acl = false
[36mkafka |[0m zookeeper.sync.time.ms = 2000
[36mkafka |[0m (kafka.server.KafkaConfig)
[36mkafka |[0m [2020-02-04 12:04:03,415] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[36mkafka |[0m [2020-02-04 12:04:03,416] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[36mkafka |[0m [2020-02-04 12:04:03,417] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[36mkafka |[0m [2020-02-04 12:04:03,440] INFO Log directory /kafka/kafka-logs-acff9186a4ad not found, creating it. (kafka.log.LogManager)
[36mkafka |[0m [2020-02-04 12:04:03,448] INFO Loading logs. (kafka.log.LogManager)
[36mkafka |[0m [2020-02-04 12:04:03,455] INFO Logs loading complete in 7 ms. (kafka.log.LogManager)
[36mkafka |[0m [2020-02-04 12:04:03,469] INFO Starting log cleanup with a period of 5000 ms. (kafka.log.LogManager)
[36mkafka |[0m [2020-02-04 12:04:03,477] INFO Starting log flusher with a default period of 9223372036854775807 ms. (kafka.log.LogManager)
[36mkafka |[0m [2020-02-04 12:04:03,945] INFO Awaiting socket connections on kafka:19091. (kafka.network.Acceptor)
[36mkafka |[0m [2020-02-04 12:04:03,979] INFO [SocketServer brokerId=1001] Created data-plane acceptor and processors for endpoint : EndPoint(kafka,19091,ListenerName(LISTENER_DOCKER_INTERNAL),PLAINTEXT) (kafka.network.SocketServer)
[36mkafka |[0m [2020-02-04 12:04:03,983] ERROR [KafkaServer id=1001] Fatal error during KafkaServer startup. Prepare to shutdown (kafka.server.KafkaServer)
[36mkafka |[0m org.apache.kafka.common.KafkaException: Socket server failed to bind to 156.17.42.120:9092: Address not available.
[36mkafka |[0m at kafka.network.Acceptor.openServerSocket(SocketServer.scala:632)
[36mkafka |[0m at kafka.network.Acceptor.<init>(SocketServer.scala:508)
[36mkafka |[0m at kafka.network.SocketServer.createAcceptor(SocketServer.scala:271)
[36mkafka |[0m at kafka.network.SocketServer.$anonfun$createDataPlaneAcceptorsAndProcessors$1(SocketServer.scala:240)
[36mkafka |[0m at kafka.network.SocketServer.$anonfun$createDataPlaneAcceptorsAndProcessors$1$adapted(SocketServer.scala:238)
[36mkafka |[0m at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
[36mkafka |[0m at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
[36mkafka |[0m at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
[36mkafka |[0m at kafka.network.SocketServer.createDataPlaneAcceptorsAndProcessors(SocketServer.scala:238)
[36mkafka |[0m at kafka.network.SocketServer.startup(SocketServer.scala:121)
[36mkafka |[0m at kafka.server.KafkaServer.startup(KafkaServer.scala:263)
[36mkafka |[0m at kafka.server.KafkaServerStartable.startup(KafkaServerStartable.scala:44)
[36mkafka |[0m at kafka.Kafka$.main(Kafka.scala:84)
[36mkafka |[0m at kafka.Kafka.main(Kafka.scala)
[36mkafka |[0m Caused by: java.net.BindException: Address not available
[36mkafka |[0m at sun.nio.ch.Net.bind0(Native Method)
[36mkafka |[0m at sun.nio.ch.Net.bind(Net.java:433)
[36mkafka |[0m at sun.nio.ch.Net.bind(Net.java:425)
[36mkafka |[0m at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:223)
[36mkafka |[0m at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
[36mkafka |[0m at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:67)
[36mkafka |[0m at kafka.network.Acceptor.openServerSocket(SocketServer.scala:628)
[36mkafka |[0m ... 13 more
[36mkafka |[0m [2020-02-04 12:04:03,986] INFO [KafkaServer id=1001] shutting down (kafka.server.KafkaServer)
[36mkafka |[0m [2020-02-04 12:04:03,987] INFO [SocketServer brokerId=1001] Stopping socket server request processors (kafka.network.SocketServer)
[36mkafka |[0m [2020-02-04 12:04:03,993] INFO [SocketServer brokerId=1001] Stopped socket server request processors (kafka.network.SocketServer)
[36mkafka |[0m [2020-02-04 12:04:03,996] INFO Shutting down. (kafka.log.LogManager)
[36mkafka |[0m [2020-02-04 12:04:04,028] INFO Shutdown complete. (kafka.log.LogManager)
[36mkafka |[0m [2020-02-04 12:04:04,029] INFO [ZooKeeperClient Kafka server] Closing. (kafka.zookeeper.ZooKeeperClient)
[36mkafka |[0m [2020-02-04 12:04:04,146] INFO Session: 0x1001e9c10d90000 closed (org.apache.zookeeper.ZooKeeper)
[36mkafka |[0m [2020-02-04 12:04:04,146] INFO EventThread shut down for session: 0x1001e9c10d90000 (org.apache.zookeeper.ClientCnxn)
[36mkafka |[0m [2020-02-04 12:04:04,148] INFO [ZooKeeperClient Kafka server] Closed. (kafka.zookeeper.ZooKeeperClient)
[36mkafka |[0m [2020-02-04 12:04:04,148] INFO [ThrottledChannelReaper-Fetch]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[36mkafka |[0m [2020-02-04 12:04:04,416] INFO [ThrottledChannelReaper-Fetch]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[36mkafka |[0m [2020-02-04 12:04:04,416] INFO [ThrottledChannelReaper-Fetch]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[36mkafka |[0m [2020-02-04 12:04:04,416] INFO [ThrottledChannelReaper-Produce]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[36mkafka |[0m [2020-02-04 12:04:04,416] INFO [ThrottledChannelReaper-Produce]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[36mkafka |[0m [2020-02-04 12:04:04,416] INFO [ThrottledChannelReaper-Produce]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[36mkafka |[0m [2020-02-04 12:04:04,416] INFO [ThrottledChannelReaper-Request]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[36mkafka |[0m [2020-02-04 12:04:04,417] INFO [ThrottledChannelReaper-Request]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[36mkafka |[0m [2020-02-04 12:04:04,417] INFO [ThrottledChannelReaper-Request]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[36mkafka |[0m [2020-02-04 12:04:04,418] INFO [SocketServer brokerId=1001] Shutting down socket server (kafka.network.SocketServer)
[36mkafka |[0m [2020-02-04 12:04:04,457] INFO [SocketServer brokerId=1001] Shutdown completed (kafka.network.SocketServer)
[36mkafka |[0m [2020-02-04 12:04:04,463] INFO [KafkaServer id=1001] shut down completed (kafka.server.KafkaServer)
[36mkafka |[0m [2020-02-04 12:04:04,464] ERROR Exiting Kafka. (kafka.server.KafkaServerStartable)
[36mkafka |[0m [2020-02-04 12:04:04,471] INFO [KafkaServer id=1001] shutting down (kafka.server.KafkaServer)

Kubernetes-Kafka unable to write message on topic

I am trying to write data on kafka topic but, stuck with some errors. Below are my configuration & error details.
Kubernetes Service:
kubectl get services
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kafka-service NodePort 10.105.214.246 <none> 9092:30998/TCP 17m
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 4d
zoo1 ClusterIP 10.101.3.128 <none> 2181/TCP,2888/TCP,3888/TCP 20m
Kubernetes Pods:
kubectl get pods
NAME READY STATUS RESTARTS AGE
kafka-broker0-69c97b67f-4pmw9 1/1 Running 1 1m
zookeeper-deployment-1-796f9d9bcc-cr756 1/1 Running 0 20m
Kafka Docker Process:
docker ps | grep kafka
f79cd0196083 wurstmeister/kafka#sha256:d04dafd2b308f26dbeed8454f67c321579c2818c1eff5e8f695e14a19b1d599b "start-kafka.sh" About a minute ago Up About a minute k8s_kafka_kafka-broker0-69c97b67f-4pmw9_default_a747d38a-0da6-11e9-bd84-fa163e7d3173_1
75393e9e25c1 k8s.gcr.io/pause-amd64:3.1 "/pause" About a minute ago Up About a minute k8s_POD_kafka-broker0-69c97b67f-4pmw9_default_a747d38a-0da6-11e9-bd84-fa163e7d3173_0
Topic test is created successfully in Kafka as shown below:
docker exec k8s_kafka_kafka-broker0-69c97b67f-4pmw9_default_a747d38a-0da6-11e9-bd84-fa163e7d3173_1 /opt/kafka_2.12-2.1.0/bin/kafka-topics.sh --list --zookeeper zoo1:2181
OR
docker exec k8s_kafka_kafka-broker0-69c97b67f-4pmw9_default_a747d38a-0da6-11e9-bd84-fa163e7d3173_1 /opt/kafka_2.12-2.1.0/bin/kafka-topics.sh --list --zookeeper 10.101.3.128:2181
Output of above command:
test
As the topic is available to write data on it, I had executed below command with host machine IP 10.225.36.98 or with service IP 10.105.214.246 :
kubectl exec kafka-broker0-69c97b67f-4pmw9 -c kafka -i -t --
/opt/kafka_2.12-2.1.0/bin/kafka-console-producer.sh [ --broker-list
10.225.36.98:30998 --topic test ]
>{"k":"v"}
But none of them is working for me & throw below exception:
[2019-01-01 09:26:52,215] ERROR Error when sending message to topic test with key: null, value: 9 bytes with error:
(org.apache.kafka.clients.producer.internals.ErrorLoggingCallback)
org.apache.kafka.common.errors.TimeoutException: Failed to update metadata after 60000 ms.
>[2019-01-01 09:27:59,513] WARN [Producer clientId=console-producer]
Connection to node -1 (/10.225.36.98:30998) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient)
When tried to write on broker with hostname kafka:
kubectl exec kafka-broker0-69c97b67f-4pmw9 -c kafka -i -t -- /opt/kafka_2.12-2.1.0/bin/kafka-console-producer.sh [ --broker-list kafka:9092 --topic test ]
[2019-01-01 09:34:41,293] WARN Couldn't resolve server kafka:9092 from bootstrap.servers as DNS resolution failed for kafka
(org.apache.kafka.clients.ClientUtils)
org.apache.kafka.common.KafkaException: Failed to construct kafka producer
As the host & service IP were not working, I tried with pod IP, but get test=LEADER_NOT_AVAILABLE error.
kubectl exec kafka-broker0-69c97b67f-4pmw9 -c kafka -i -t -- /opt/kafka_2.12-2.1.0/bin/kafka-console-producer.sh [ --broker-list 172.17.0.7:9092 --topic test ]
>{"k":"v"}
[2019-01-01 09:52:30,733] WARN [Producer clientId=console-producer] Error while fetching metadata with correlation id 1 : {test=LEADER_NOT_AVAILABLE} (org.apache.kafka.clients.NetworkClient)
After searching Google I found command to get list of available brokers in Zookeeper. So I tried to run it from container & stuck on below error:
bash-4.4# ./opt/zookeeper/bin/zkCli.sh -server zoo1:2181 ls /brokers/ids
Connecting to zoo1:2181
Exception from Zookeeper:
2019-01-01 09:18:05,215 [myid:] - INFO [main:Environment#100] - Client environment:zookeeper.version=3.4.10-39d3a4f269333c922ed3db283be479f9deacaa0f, built on 03/23/2017 10:13 GMT
2019-01-01 09:18:05,219 [myid:] - INFO [main:Environment#100] - Client environment:host.name=zookeeper-deployment-1-796f9d9bcc-cr756
2019-01-01 09:18:05,220 [myid:] - INFO [main:Environment#100] - Client environment:java.version=1.8.0_151
2019-01-01 09:18:05,223 [myid:] - INFO [main:Environment#100] - Client environment:java.vendor=Oracle Corporation
2019-01-01 09:18:05,223 [myid:] - INFO [main:Environment#100] - Client environment:java.home=/usr/lib/jvm/java-1.8-openjdk/jre
2019-01-01 09:18:05,223 [myid:] - INFO [main:Environment#100] - Client environment:java.class.path=/opt/zookeeper/bin/../build/classes:/opt/zookeeper/b
in/../build/lib/*.jar:/opt/zookeeper/bin/../lib/slf4j-log4j12-1.6.1.jar:/opt/zookeeper/bin/../lib/slf4j-api-
1.6.1.jar:/opt/zookeeper/bin/../lib/netty-3.10.5.Final.jar:/opt/zookeeper/bin/../lib/log4j-
1.2.16.jar:/opt/zookeeper/bin/../lib/jline-0.9.94.jar:/opt/zookeeper/bin/../zookeeper-3.4.10.jar:/opt/zookeeper/bin/../src/java/lib/*.jar:/opt/zookeeper/bin/../conf:
2019-01-01 09:18:05,223 [myid:] - INFO [main:Environment#100] - Client environment:java.library.path=/usr/lib/jvm/java-1.8-
openjdk/jre/lib/amd64/server:/usr/lib/jvm/java-1.8-openjdk/jre/lib/amd64:/usr/lib/jvm/java-1.8-openjdk/jre/../lib/amd64:/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib
2019-01-01 09:18:05,223 [myid:] - INFO [main:Environment#100] - Client environment:java.io.tmpdir=/tmp
2019-01-01 09:18:05,224 [myid:] - INFO [main:Environment#100] - Client environment:java.compiler=<NA>
2019-01-01 09:18:05,224 [myid:] - INFO [main:Environment#100] - Client environment:os.name=Linux
2019-01-01 09:18:05,224 [myid:] - INFO [main:Environment#100] - Client environment:os.arch=amd64
2019-01-01 09:18:05,224 [myid:] - INFO [main:Environment#100] - Client environment:os.version=3.10.0-693.11.6.el7.x86_64
2019-01-01 09:18:05,224 [myid:] - INFO [main:Environment#100] - Client environment:user.name=root
2019-01-01 09:18:05,224 [myid:] - INFO [main:Environment#100] - Client environment:user.home=/root
2019-01-01 09:18:05,224 [myid:] - INFO [main:Environment#100] - Client environment:user.dir=/
2019-01-01 09:18:05,225 [myid:] - INFO [main:ZooKeeper#438] - Initiating client connection, connectString=zoo1:2181 sessionTimeout=30000 watcher=org.apache.zookeeper.ZooKeeperMain$MyWatcher#25f38edc
2019-01-01 09:18:05,259 [myid:] - INFO [main-SendThread(zoo1.default.svc.cluster.local:2181):ClientCnxn$SendThread#1032] - Opening socket connection to server zoo1.default.svc.cluster.local/10.101.3.128:2181. Will not attempt to authenticate using SASL (unknown error)
2019-01-01 09:18:35,280 [myid:] - WARN [main-SendThread(zoo1.default.svc.cluster.local:2181):ClientCnxn$SendThread#1108] - Client session timed out, have not heard from server in 30027ms for sessionid 0x0
2019-01-01 09:18:35,282 [myid:] - INFO [main-SendThread(zoo1.default.svc.cluster.local:2181):ClientCnxn$SendThread#1156] - Client session timed out, have not heard from server in 30027ms for sessionid 0x0, closing socket connection and attempting reconnect
Exception in thread "main" org.apache.zookeeper.KeeperException$ConnectionLossException: KeeperErrorCode = ConnectionLoss for /brokers/ids
at org.apache.zookeeper.KeeperException.create(KeeperException.java:99)
at org.apache.zookeeper.KeeperException.create(KeeperException.java:51)
at org.apache.zookeeper.ZooKeeper.getChildren(ZooKeeper.java:1532)
at org.apache.zookeeper.ZooKeeper.getChildren(ZooKeeper.java:1560)
at org.apache.zookeeper.ZooKeeperMain.processZKCmd(ZooKeeperMain.java:731)
at org.apache.zookeeper.ZooKeeperMain.processCmd(ZooKeeperMain.java:599)
at org.apache.zookeeper.ZooKeeperMain.run(ZooKeeperMain.java:362)
at org.apache.zookeeper.ZooKeeperMain.main(ZooKeeperMain.java:290)
I also tried to create Kafka service of type LoadBalancer type, but, No LoadBalancer IP is assigned to service.
References to resolve this issue:
https://rmoff.net/2018/08/02/kafka-listeners-explained/
https://github.com/wurstmeister/kafka-docker/wiki/Connectivity#additional-listener-information
https://github.com/kubernetes/contrib/issues/2891
https://dzone.com/articles/ultimate-guide-to-installing-kafka-docker-on-kuber
https://github.com/wurstmeister/kafka-docker/issues/85
Any help would be appreciated.
Try following command to send data to topic:
docker exec k8s_kafka_kafka-broker0-69c97b67f-4pmw9_default_a747d38a-0da6-11e9-bd84-fa163e7d3173_1
/opt/kafka_2.12-2.1.0/bin/kafka-console-producer.sh
--broker-list kafka-service:30998 --topic test

connect failed between containers with zookeeper and kafka

I use kafka, zookeeper, elasticsearch, 3 nodes of kafka, 3 nodes of zk
I run docker-compose.yml on my mac,
123.345.567 is the output of below command:
ifconfig | grep 'inet 192'| awk '{ print $2}'
docker-compose.yml is
version: '2'
services:
zookeeper:
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
build:
context: /docker/zookeeper
dockerfile: Dockerfile
args:
clientPort: 2181
follow_to_leader_port: 2888
leader_select_port: 3888
environment:
service1: 0.0.0.0:2888:3888
service2: 123.345.567:2889:3889
service3: 123.345.567:2890:3890
clientPort: 2181
ZOO_MY_ID: 1
initLimit: 2
zookeeper2:
ports:
- "2182:2182"
- "2889:2889"
- "3889:3889"
build:
context: /docker/zookeeper
dockerfile: Dockerfile
args:
clientPort: 2182
follow_to_leader_port: 2889
leader_select_port: 3889
environment:
service1: 123.345.567:2888:3888
service2: 0.0.0.0:2889:3889
service3: 123.345.567:2890:3890
clientPort: 2182
ZOO_MY_ID: 2
initLimit: 2
zookeeper3:
ports:
- "2183:2183"
- "2890:2890"
- "3890:3890"
build:
context: /docker/zookeeper
dockerfile: Dockerfile
args:
clientPort: 2183
follow_to_leader_port: 2890
leader_select_port: 3890
environment:
service1: 123.345.567:2888:3888
service2: 123.345.567:2889:3889
service3: 0.0.0.0:2890:3890
clientPort: 2183
initLimit: 2
ZOO_MY_ID: 3
kafka:
build:
context: /docker/kafka
dockerfile: Dockerfile
ports:
- "9096:9096"
environment:
broker_id: 1
listeners: PLAINTEXT://123.345.567:9096
advertised_listeners: PLAINTEXT://123.345.567:9096
zookeeper_connect: 123.345.567:2181,123.345.567:2182,123.345.567:2183/kafka_test
kafka2:
build:
context: /docker/kafka
dockerfile: Dockerfile
ports:
- "9097:9097"
environment:
broker_id: 2
listeners: PLAINTEXT://123.345.567:9097
advertised_listeners: PLAINTEXT://123.345.567:9097
zookeeper_connect: 123.345.567:2181,123.345.567:2182,123.345.567:2183/kafka_test
kafka3:
build:
context: /docker/kafka
dockerfile: Dockerfile
ports:
- "9098:9098"
environment:
broker_id: 3
listeners: PLAINTEXT://123.345.567:9098
advertised_listeners: PLAINTEXT://123.345.567:9098
zookeeper_connect: 123.345.567:2181,123.345.567:2182,123.345.567:2183/kafka_test
docker ps output
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
82c0f30cac77 compose_zookeeper3 "/opt/startzookeeper…" About an hour ago Up 40 minutes 0.0.0.0:2183->2183/tcp, 0.0.0.0:2890->2890/tcp, 0.0.0.0:3890->3890/tcp compose_zookeeper3_1
ebd93d99d0d2 compose_kafka "/opt/startkafka.sh" About an hour ago Up 40 minutes 0.0.0.0:9096->9096/tcp compose_kafka_1
be56ebaee602 compose_kafka2 "/opt/startkafka.sh" About an hour ago Up 40 minutes 0.0.0.0:9097->9097/tcp compose_kafka2_1
8a68874e52f9 compose_kafka3 "/opt/startkafka.sh" About an hour ago Up 40 minutes 0.0.0.0:9098->9098/tcp compose_kafka3_1
000a686a2772 compose_zookeeper2 "/opt/startzookeeper…" About an hour ago Up 40 minutes 0.0.0.0:2182->2182/tcp, 0.0.0.0:2889->2889/tcp, 0.0.0.0:3889->3889/tcp compose_zookeeper2_1
d3fc5ad9b8f8 compose_zookeeper "/opt/startzookeeper…" About an hour ago Up 40 minutes 0.0.0.0:2181->2181/tcp, 0.0.0.0:2888->2888/tcp, 0.0.0.0:3888->3888/tcp compose_zookeeper_1
some configuration in kafka
broker.id=2
listeners=PLAINTEXT://123.345.567:9097
advertised.listeners=PLAINTEXT://123.345.567:9097
zookeeper.connect=123.345.567:2181,123.345.567:2182,123.345.567:2183/kafka_test
some configuration in zookeeper2
clientPort=2182
server.1=123.345.567:2888:3888
server.2=0.0.0.0:2889:3889
server.3=123.345.567:2890:3890
some error log
[32mzookeeper3_1 |[0m 2018-07-15 09:54:19,083 [myid:3] - INFO [QuorumPeer[myid=3]/0.0.0.0:2183:FastLeaderElection#813] - New election. My id = 3, proposed zxid=0x100000047
[32mzookeeper3_1 |[0m 2018-07-15 09:54:19,113 [myid:3] - WARN [RecvWorker:1:QuorumCnxManager$RecvWorker#1025] - Connection broken for id 1, my id = 3, error =
[32mzookeeper3_1 |[0m java.io.EOFException
[32mzookeeper3_1 |[0m at java.io.DataInputStream.readInt(DataInputStream.java:392)
[32mzookeeper3_1 |[0m at org.apache.zookeeper.server.quorum.QuorumCnxManager$RecvWorker.run(QuorumCnxManager.java:1010)
[32mzookeeper3_1 |[0m 2018-07-15 09:54:19,123 [myid:3] - WARN [RecvWorker:1:QuorumCnxManager$RecvWorker#1028] - Interrupting SendWorker
[32mzookeeper3_1 |[0m 2018-07-15 09:54:19,125 [myid:3] - INFO [WorkerReceiver[myid=3]:FastLeaderElection#595] - Notification: 1 (message format version), 3 (n.leader), 0x100000047 (n.zxid), 0x1 (n.round), LOOKING (n.state), 3 (n.sid), 0x1 (n.peerEpoch) LOOKING (my state)
[32mzookeeper3_1 |[0m 2018-07-15 09:54:19,126 [myid:3] - WARN [SendWorker:1:QuorumCnxManager$SendWorker#941] - Interrupted while waiting for message on queue
[32mzookeeper3_1 |[0m java.lang.InterruptedException
[32mzookeeper3_1 |[0m at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.reportInterruptAfterWait(AbstractQueuedSynchronizer.java:2014)
[32mzookeeper3_1 |[0m at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2088)
[32mzookeeper3_1 |[0m at java.util.concurrent.ArrayBlockingQueue.poll(ArrayBlockingQueue.java:418)
[32mzookeeper3_1 |[0m at org.apache.zookeeper.server.quorum.QuorumCnxManager.pollSendQueue(QuorumCnxManager.java:1094)
[32mzookeeper3_1 |[0m at org.apache.zookeeper.server.quorum.QuorumCnxManager.access$700(QuorumCnxManager.java:74)
[32mzookeeper3_1 |[0m at org.apache.zookeeper.server.quorum.QuorumCnxManager$SendWorker.run(QuorumCnxManager.java:929)
kafka
[35mkafka_1 |[0m [2018-07-15 09:54:38,137] ERROR [KafkaServer id=1] Fatal error during KafkaServer startup. Prepare to shutdown (kafka.server.KafkaServer)
[35mkafka_1 |[0m kafka.common.KafkaException: Socket server failed to bind to 123.345.567:9096: Cannot assign requested address.
[35mkafka_1 |[0m at kafka.network.Acceptor.openServerSocket(SocketServer.scala:404)
[35mkafka_1 |[0m at kafka.network.Acceptor.<init>(SocketServer.scala:308)
[35mkafka_1 |[0m at kafka.network.SocketServer$$anonfun$createAcceptorAndProcessors$1.apply(SocketServer.scala:126)
[35mkafka_1 |[0m at kafka.network.SocketServer$$anonfun$createAcceptorAndProcessors$1.apply(SocketServer.scala:122)
[35mkafka_1 |[0m at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
[35mkafka_1 |[0m at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
[35mkafka_1 |[0m at kafka.network.SocketServer.createAcceptorAndProcessors(SocketServer.scala:122)
[35mkafka_1 |[0m at kafka.network.SocketServer.startup(SocketServer.scala:84)
[35mkafka_1 |[0m at kafka.server.KafkaServer.startup(KafkaServer.scala:247)
[35mkafka_1 |[0m at kafka.server.KafkaServerStartable.startup(KafkaServerStartable.scala:38)
[35mkafka_1 |[0m at kafka.Kafka$.main(Kafka.scala:92)
[35mkafka_1 |[0m at kafka.Kafka.main(Kafka.scala)
[35mkafka_1 |[0m Caused by: java.net.BindException: Cannot assign requested address
[35mkafka_1 |[0m at sun.nio.ch.Net.bind0(Native Method)
[35mkafka_1 |[0m at sun.nio.ch.Net.bind(Net.java:433)
[35mkafka_1 |[0m at sun.nio.ch.Net.bind(Net.java:425)
[35mkafka_1 |[0m at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:223)
[35mkafka_1 |[0m at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
[35mkafka_1 |[0m at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:67)
[35mkafka_1 |[0m at kafka.network.Acceptor.openServerSocket(SocketServer.scala:400)
[35mkafka_1 |[0m ... 11 more
[35mkafka_1 |[0m [2018-07-15 09:54:38,144] INFO [KafkaServer id=1] shutting down (kafka.server.KafkaServer)
[35mkafka_1 |[0m [2018-07-15 09:54:38,147] INFO [SocketServer brokerId=1] Stopping socket server request processors (kafka.network.SocketServer)
[35mkafka_1 |[0m [2018-07-15 09:54:38,149] INFO [SocketServer brokerId=1] Stopped socket server request processors (kafka.network.SocketServer)
[35mkafka_1 |[0m [2018-07-15 09:54:38,160] INFO Shutting down. (kafka.log.LogManager)
[35mkafka_1 |[0m [2018-07-15 09:54:38,208] INFO Shutdown complete. (kafka.log.LogManager)
I wonder whether these containers does not need the ip:123.345.567.
Maybe they need some inner ip to communicate between containers?
or I need some command to check or make docker network setting?
thanks your any suggestion!
First of all, 123.345.567 isn't four numbers, so that's not a valid fake IP
Docker compose sets up a DNS network for you using the compose service names. You will never need the external address of your Mac inside a container.
For a fully working Compose file, look at Confluent's. Start with one Zookeeper and Broker, and get that working (Kafka won't run faster on a single machine with multiple containers because they all share one disk)
I also suggest not adding a random chroot to only one of your Zookeepers (/kafka_test)

Spring Boot & Kafka, Producer thrown exception with key='null'

I'm trying to use Spring Boot with Kafka and ZooKeeper with Docker :
docker-compose.yml:
version: '2'
services:
zookeeper:
image: wurstmeister/zookeeper
restart: always
ports:
- "2181:2181"
kafka:
image: wurstmeister/kafka
restart: always
ports:
- "9092:9092"
environment:
KAFKA_ADVERTISED_HOST_NAME: localhost
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
docker ps output:
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
980e6b09f4e3 wurstmeister/kafka "start-kafka.sh" 29 minutes ago Up 29 minutes 0.0.0.0:9092->9092/tcp samplespringkafkaproducerconsumermaster_kafka_1
64519d4808aa wurstmeister/zookeeper "/bin/sh -c '/usr/sb…" 2 hours ago Up 29 minutes 22/tcp, 2888/tcp, 3888/tcp, 0.0.0.0:2181->2181/tcp samplespringkafkaproducerconsumermaster_zookeeper_1
docker-compose up output log:
kafka_1 | [2018-01-12 13:14:49,545] INFO Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib (org.apache.zookeeper.ZooKeeper)
kafka_1 | [2018-01-12 13:14:49,546] INFO Client environment:java.io.tmpdir=/tmp (org.apache.zookeeper.ZooKeeper)
kafka_1 | [2018-01-12 13:14:49,546] INFO Client environment:java.compiler=<NA> (org.apache.zookeeper.ZooKeeper)
kafka_1 | [2018-01-12 13:14:49,547] INFO Client environment:os.name=Linux (org.apache.zookeeper.ZooKeeper)
kafka_1 | [2018-01-12 13:14:49,547] INFO Client environment:os.arch=amd64 (org.apache.zookeeper.ZooKeeper)
kafka_1 | [2018-01-12 13:14:49,548] INFO Client environment:os.version=4.9.60-linuxkit-aufs (org.apache.zookeeper.ZooKeeper)
kafka_1 | [2018-01-12 13:14:49,548] INFO Client environment:user.name=root (org.apache.zookeeper.ZooKeeper)
kafka_1 | [2018-01-12 13:14:49,549] INFO Client environment:user.home=/root (org.apache.zookeeper.ZooKeeper)
kafka_1 | [2018-01-12 13:14:49,549] INFO Client environment:user.dir=/ (org.apache.zookeeper.ZooKeeper)
kafka_1 | [2018-01-12 13:14:49,552] INFO Initiating client connection, connectString=zookeeper:2181 sessionTimeout=6000 watcher=org.I0Itec.zkclient.ZkClient#1534f01b (org.apache.zookeeper.ZooKeeper)
kafka_1 | [2018-01-12 13:14:49,574] INFO Waiting for keeper state SyncConnected (org.I0Itec.zkclient.ZkClient)
kafka_1 | [2018-01-12 13:14:49,578] INFO Opening socket connection to server samplespringkafkaproducerconsumermaster_zookeeper_1.samplespringkafkaproducerconsumermaster_default/192.168.32.2:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn)
zookeeper_1 | 2018-01-12 13:14:49,591 [myid:] - INFO [NIOServerCxn.Factory:0.0.0.0/0.0.0.0:2181:NIOServerCnxnFactory#192] - Accepted socket connection from /192.168.32.3:51466
kafka_1 | [2018-01-12 13:14:49,593] INFO Socket connection established to samplespringkafkaproducerconsumermaster_zookeeper_1.samplespringkafkaproducerconsumermaster_default/192.168.32.2:2181, initiating session (org.apache.zookeeper.ClientCnxn)
zookeeper_1 | 2018-01-12 13:14:49,600 [myid:] - INFO [NIOServerCxn.Factory:0.0.0.0/0.0.0.0:2181:ZooKeeperServer#928] - Client attempting to establish new session at /192.168.32.3:51466
zookeeper_1 | 2018-01-12 13:14:49,603 [myid:] - INFO [SyncThread:0:FileTxnLog#203] - Creating new log file: log.fd
zookeeper_1 | 2018-01-12 13:14:49,613 [myid:] - INFO [SyncThread:0:ZooKeeperServer#673] - Established session 0x160ea8232b00000 with negotiated timeout 6000 for client /192.168.32.3:51466
kafka_1 | [2018-01-12 13:14:49,616] INFO Session establishment complete on server samplespringkafkaproducerconsumermaster_zookeeper_1.samplespringkafkaproducerconsumermaster_default/192.168.32.2:2181, sessionid = 0x160ea8232b00000, negotiated timeout = 6000 (org.apache.zookeeper.ClientCnxn)
kafka_1 | [2018-01-12 13:14:49,619] INFO zookeeper state changed (SyncConnected) (org.I0Itec.zkclient.ZkClient)
kafka_1 | [2018-01-12 13:14:49,992] INFO Cluster ID = Fgy9ybPPQQ-QdLINzHpmVA (kafka.server.KafkaServer)
kafka_1 | [2018-01-12 13:14:50,003] WARN No meta.properties file under dir /kafka/kafka-logs-980e6b09f4e3/meta.properties (kafka.server.BrokerMetadataCheckpoint)
kafka_1 | [2018-01-12 13:14:50,065] INFO [ThrottledRequestReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledRequestReaper)
kafka_1 | [2018-01-12 13:14:50,065] INFO [ThrottledRequestReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledRequestReaper)
kafka_1 | [2018-01-12 13:14:50,067] INFO [ThrottledRequestReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledRequestReaper)
kafka_1 | [2018-01-12 13:14:50,167] INFO Log directory '/kafka/kafka-logs-980e6b09f4e3' not found, creating it. (kafka.log.LogManager)
kafka_1 | [2018-01-12 13:14:50,183] INFO Loading logs. (kafka.log.LogManager)
kafka_1 | [2018-01-12 13:14:50,199] INFO Logs loading complete in 15 ms. (kafka.log.LogManager)
kafka_1 | [2018-01-12 13:14:50,283] INFO Starting log cleanup with a period of 300000 ms. (kafka.log.LogManager)
kafka_1 | [2018-01-12 13:14:50,291] INFO Starting log flusher with a default period of 9223372036854775807 ms. (kafka.log.LogManager)
kafka_1 | [2018-01-12 13:14:50,633] INFO Awaiting socket connections on 0.0.0.0:9092. (kafka.network.Acceptor)
kafka_1 | [2018-01-12 13:14:50,639] INFO [SocketServer brokerId=1005] Started 1 acceptor threads (kafka.network.SocketServer)
kafka_1 | [2018-01-12 13:14:50,673] INFO [ExpirationReaper-1005-Produce]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
kafka_1 | [2018-01-12 13:14:50,674] INFO [ExpirationReaper-1005-Fetch]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
kafka_1 | [2018-01-12 13:14:50,675] INFO [ExpirationReaper-1005-DeleteRecords]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
kafka_1 | [2018-01-12 13:14:50,691] INFO [LogDirFailureHandler]: Starting (kafka.server.ReplicaManager$LogDirFailureHandler)
kafka_1 | [2018-01-12 13:14:50,753] INFO [ExpirationReaper-1005-topic]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
kafka_1 | [2018-01-12 13:14:50,757] INFO [ExpirationReaper-1005-Heartbeat]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
kafka_1 | [2018-01-12 13:14:50,762] INFO [ExpirationReaper-1005-Rebalance]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
kafka_1 | [2018-01-12 13:14:50,777] INFO Creating /controller (is it secure? false) (kafka.utils.ZKCheckedEphemeral)
kafka_1 | [2018-01-12 13:14:50,791] INFO [GroupCoordinator 1005]: Starting up. (kafka.coordinator.group.GroupCoordinator)
kafka_1 | [2018-01-12 13:14:50,791] INFO Result of znode creation is: OK (kafka.utils.ZKCheckedEphemeral)
kafka_1 | [2018-01-12 13:14:50,793] INFO [GroupCoordinator 1005]: Startup complete. (kafka.coordinator.group.GroupCoordinator)
kafka_1 | [2018-01-12 13:14:50,798] INFO [GroupMetadataManager brokerId=1005] Removed 0 expired offsets in 5 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
kafka_1 | [2018-01-12 13:14:50,811] INFO [ProducerId Manager 1005]: Acquired new producerId block (brokerId:1005,blockStartProducerId:5000,blockEndProducerId:5999) by writing to Zk with path version 6 (kafka.coordinator.transaction.ProducerIdManager)
kafka_1 | [2018-01-12 13:14:50,848] INFO [TransactionCoordinator id=1005] Starting up. (kafka.coordinator.transaction.TransactionCoordinator)
kafka_1 | [2018-01-12 13:14:50,850] INFO [Transaction Marker Channel Manager 1005]: Starting (kafka.coordinator.transaction.TransactionMarkerChannelManager)
kafka_1 | [2018-01-12 13:14:50,850] INFO [TransactionCoordinator id=1005] Startup complete. (kafka.coordinator.transaction.TransactionCoordinator)
kafka_1 | [2018-01-12 13:14:50,949] INFO Creating /brokers/ids/1005 (is it secure? false) (kafka.utils.ZKCheckedEphemeral)
zookeeper_1 | 2018-01-12 13:14:50,952 [myid:] - INFO [ProcessThread(sid:0 cport:2181)::PrepRequestProcessor#649] - Got user-level KeeperException when processing sessionid:0x160ea8232b00000 type:create cxid:0x70 zxid:0x102 txntype:-1 reqpath:n/a Error Path:/brokers Error:KeeperErrorCode = NodeExists for /brokers
zookeeper_1 | 2018-01-12 13:14:50,952 [myid:] - INFO [ProcessThread(sid:0 cport:2181)::PrepRequestProcessor#649] - Got user-level KeeperException when processing sessionid:0x160ea8232b00000 type:create cxid:0x71 zxid:0x103 txntype:-1 reqpath:n/a Error Path:/brokers/ids Error:KeeperErrorCode = NodeExists for /brokers/ids
kafka_1 | [2018-01-12 13:14:50,957] INFO Result of znode creation is: OK (kafka.utils.ZKCheckedEphemeral)
kafka_1 | [2018-01-12 13:14:50,959] INFO Registered broker 1005 at path /brokers/ids/1005 with addresses: EndPoint(localhost,9092,ListenerName(PLAINTEXT),PLAINTEXT) (kafka.utils.ZkUtils)
kafka_1 | [2018-01-12 13:14:50,961] WARN No meta.properties file under dir /kafka/kafka-logs-980e6b09f4e3/meta.properties (kafka.server.BrokerMetadataCheckpoint)
kafka_1 | [2018-01-12 13:14:50,992] INFO Kafka version : 1.0.0 (org.apache.kafka.common.utils.AppInfoParser)
kafka_1 | [2018-01-12 13:14:50,993] INFO Kafka commitId : aaa7af6d4a11b29d (org.apache.kafka.common.utils.AppInfoParser)
kafka_1 | [2018-01-12 13:14:51,004] INFO [KafkaServer id=1005] started (kafka.server.KafkaServer)
zookeeper_1 | 2018-01-12 13:14:51,263 [myid:] - INFO [ProcessThread(sid:0 cport:2181)::PrepRequestProcessor#649] - Got user-level KeeperException when processing sessionid:0x160ea8232b00000 type:delete cxid:0xe3 zxid:0x105 txntype:-1 reqpath:n/a Error Path:/admin/preferred_replica_election Error:KeeperErrorCode = NoNode for /admin/preferred_replica_election
kafka_1 | [2018-01-12 13:24:50,793] INFO [GroupMetadataManager brokerId=1005] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
kafka_1 | [2018-01-12 13:34:50,795] INFO [GroupMetadataManager brokerId=1005] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
Kafka maven dependency in Producer and Consumer:
<parent>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>1.5.9.RELEASE</version>
<relativePath/>
</parent>
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
</dependency>
application.properties in Producer:
spring.kafka.producer.bootstrap-servers=0.0.0.0:9092
spring.kafka.consumer.topic=kafka_topic
server.port=8080
application.properties in Consumer:
spring.kafka.consumer.bootstrap-servers=0.0.0.0:9092
spring.kafka.consumer.group-id=WorkUnitApp
spring.kafka.consumer.topic=kafka_topic
server.port=8081
Consumer:
#Component
public class Consumer {
private static final Logger LOGGER = LoggerFactory.getLogger(Consumer.class);
#KafkaListener(topics = "${spring.kafka.consumer.topic}")
public void receive(ConsumerRecord<?, ?> consumerRecord) {
LOGGER.info("received payload='{}'", consumerRecord.toString());
}
}
Producer:
#Component
public class Producer {
private static final Logger LOGGER = LoggerFactory.getLogger(Producer.class);
#Autowired
private KafkaTemplate<String, String> kafkaTemplate;
public void send(String topic, String payload) {
LOGGER.info("sending payload='{}' to topic='{}'", payload, topic);
kafkaTemplate.send(topic, payload);
}
}
ConsumerConfig log:
2018-01-12 15:25:48.220 INFO 20919 --- [ main] o.a.k.clients.consumer.ConsumerConfig : ConsumerConfig values:
auto.commit.interval.ms = 5000
auto.offset.reset = latest
bootstrap.servers = [0.0.0.0:9092]
check.crcs = true
client.id = consumer-1
connections.max.idle.ms = 540000
enable.auto.commit = true
exclude.internal.topics = true
fetch.max.bytes = 52428800
fetch.max.wait.ms = 500
fetch.min.bytes = 1
group.id = WorkUnitApp
heartbeat.interval.ms = 3000
interceptor.classes = null
key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
max.partition.fetch.bytes = 1048576
max.poll.interval.ms = 300000
max.poll.records = 500
metadata.max.age.ms = 300000
metric.reporters = []
metrics.num.samples = 2
metrics.sample.window.ms = 30000
partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor]
receive.buffer.bytes = 65536
reconnect.backoff.ms = 50
request.timeout.ms = 305000
retry.backoff.ms = 100
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.mechanism = GSSAPI
security.protocol = PLAINTEXT
send.buffer.bytes = 131072
session.timeout.ms = 10000
ssl.cipher.suites = null
ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
ssl.endpoint.identification.algorithm = null
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLS
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
ProducerConfig log:
2018-01-12 15:26:27.956 INFO 20924 --- [nio-8080-exec-1] o.a.k.clients.producer.ProducerConfig : ProducerConfig values:
acks = 1
batch.size = 16384
block.on.buffer.full = false
bootstrap.servers = [0.0.0.0:9092]
buffer.memory = 33554432
client.id = producer-1
compression.type = none
connections.max.idle.ms = 540000
interceptor.classes = null
key.serializer = class org.apache.kafka.common.serialization.StringSerializer
linger.ms = 0
max.block.ms = 60000
max.in.flight.requests.per.connection = 5
max.request.size = 1048576
metadata.fetch.timeout.ms = 60000
metadata.max.age.ms = 300000
metric.reporters = []
metrics.num.samples = 2
metrics.sample.window.ms = 30000
partitioner.class = class org.apache.kafka.clients.producer.internals.DefaultPartitioner
receive.buffer.bytes = 32768
reconnect.backoff.ms = 50
request.timeout.ms = 30000
retries = 0
retry.backoff.ms = 100
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.mechanism = GSSAPI
security.protocol = PLAINTEXT
send.buffer.bytes = 131072
ssl.cipher.suites = null
ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
ssl.endpoint.identification.algorithm = null
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLS
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
timeout.ms = 30000
value.serializer = class org.apache.kafka.common.serialization.StringSerializer
When i try to send a message I get an exception:
producer.send("kafka_topic", "test")
exception log:
2018-01-12 15:26:27.975 INFO 20924 --- [nio-8080-exec-1] o.a.kafka.common.utils.AppInfoParser : Kafka version : 0.10.1.1
2018-01-12 15:26:27.975 INFO 20924 --- [nio-8080-exec-1] o.a.kafka.common.utils.AppInfoParser : Kafka commitId : f10ef2720b03b247
2018-01-12 15:26:58.152 ERROR 20924 --- [ad | producer-1] o.s.k.support.LoggingProducerListener : Exception thrown when sending a message with key='null' and payload='test' to topic kafka_topic:
org.apache.kafka.common.errors.TimeoutException: Expiring 1 record(s) for kafka_topic-0 due to 30033 ms has passed since batch creation plus linger time
How to fix it ?
Problem is not with sending key as null, Connection to a broker may not be established
try using local Kafka installation.
If you are using mac Docker for mac having some networking
limitations
https://docs.docker.com/docker-for-mac/networking/#known-limitations-use-cases-and-workarounds
i ran into the same issue. the issue was with my dockercompose file. not 100% but i think KAFKA_ADVERTISED_HOST_NAME and KAFKA_ADVERTISED_LISTENERS both need to refernce localhost. my working compose file.
version: '2'
networks:
sb_net:
driver: bridge
services:
zookeeper:
image: confluentinc/cp-zookeeper:latest
hostname: zookeeper
networks:
- sb_net
ports:
- "2181:2181"
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
kafka:
image: confluentinc/cp-kafka:latest
depends_on:
- zookeeper
networks:
- sb_net
ports:
- "9092:9092"
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ADVERTISED_HOST_NAME: localhost
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:9092
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
error on mindfulness, need to add a link:
links:
- zookeeper:zookeeper
full docker-compose.yml:
version: '2'
services:
zookeeper:
image: wurstmeister/zookeeper
container_name: zookeeper
restart: always
ports:
- 2181:2181
kafka:
image: wurstmeister/kafka
container_name: kafka
restart: always
ports:
- 9092:9092
depends_on:
- zookeeper
links:
- zookeeper:zookeeper
environment:
KAFKA_ADVERTISED_HOST_NAME: localhost
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
I got same problem, Kafka only allow 127.0.0.0/localhost by default.
My solution:
Add this line in Kafka server.properties, and restart Kafka service
listeners=PLAINTEXT://192.168.31.72:9092

Resources