I use kafka, zookeeper, elasticsearch, 3 nodes of kafka, 3 nodes of zk
I run docker-compose.yml on my mac,
123.345.567 is the output of below command:
ifconfig | grep 'inet 192'| awk '{ print $2}'
docker-compose.yml is
version: '2'
services:
zookeeper:
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
build:
context: /docker/zookeeper
dockerfile: Dockerfile
args:
clientPort: 2181
follow_to_leader_port: 2888
leader_select_port: 3888
environment:
service1: 0.0.0.0:2888:3888
service2: 123.345.567:2889:3889
service3: 123.345.567:2890:3890
clientPort: 2181
ZOO_MY_ID: 1
initLimit: 2
zookeeper2:
ports:
- "2182:2182"
- "2889:2889"
- "3889:3889"
build:
context: /docker/zookeeper
dockerfile: Dockerfile
args:
clientPort: 2182
follow_to_leader_port: 2889
leader_select_port: 3889
environment:
service1: 123.345.567:2888:3888
service2: 0.0.0.0:2889:3889
service3: 123.345.567:2890:3890
clientPort: 2182
ZOO_MY_ID: 2
initLimit: 2
zookeeper3:
ports:
- "2183:2183"
- "2890:2890"
- "3890:3890"
build:
context: /docker/zookeeper
dockerfile: Dockerfile
args:
clientPort: 2183
follow_to_leader_port: 2890
leader_select_port: 3890
environment:
service1: 123.345.567:2888:3888
service2: 123.345.567:2889:3889
service3: 0.0.0.0:2890:3890
clientPort: 2183
initLimit: 2
ZOO_MY_ID: 3
kafka:
build:
context: /docker/kafka
dockerfile: Dockerfile
ports:
- "9096:9096"
environment:
broker_id: 1
listeners: PLAINTEXT://123.345.567:9096
advertised_listeners: PLAINTEXT://123.345.567:9096
zookeeper_connect: 123.345.567:2181,123.345.567:2182,123.345.567:2183/kafka_test
kafka2:
build:
context: /docker/kafka
dockerfile: Dockerfile
ports:
- "9097:9097"
environment:
broker_id: 2
listeners: PLAINTEXT://123.345.567:9097
advertised_listeners: PLAINTEXT://123.345.567:9097
zookeeper_connect: 123.345.567:2181,123.345.567:2182,123.345.567:2183/kafka_test
kafka3:
build:
context: /docker/kafka
dockerfile: Dockerfile
ports:
- "9098:9098"
environment:
broker_id: 3
listeners: PLAINTEXT://123.345.567:9098
advertised_listeners: PLAINTEXT://123.345.567:9098
zookeeper_connect: 123.345.567:2181,123.345.567:2182,123.345.567:2183/kafka_test
docker ps output
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
82c0f30cac77 compose_zookeeper3 "/opt/startzookeeper…" About an hour ago Up 40 minutes 0.0.0.0:2183->2183/tcp, 0.0.0.0:2890->2890/tcp, 0.0.0.0:3890->3890/tcp compose_zookeeper3_1
ebd93d99d0d2 compose_kafka "/opt/startkafka.sh" About an hour ago Up 40 minutes 0.0.0.0:9096->9096/tcp compose_kafka_1
be56ebaee602 compose_kafka2 "/opt/startkafka.sh" About an hour ago Up 40 minutes 0.0.0.0:9097->9097/tcp compose_kafka2_1
8a68874e52f9 compose_kafka3 "/opt/startkafka.sh" About an hour ago Up 40 minutes 0.0.0.0:9098->9098/tcp compose_kafka3_1
000a686a2772 compose_zookeeper2 "/opt/startzookeeper…" About an hour ago Up 40 minutes 0.0.0.0:2182->2182/tcp, 0.0.0.0:2889->2889/tcp, 0.0.0.0:3889->3889/tcp compose_zookeeper2_1
d3fc5ad9b8f8 compose_zookeeper "/opt/startzookeeper…" About an hour ago Up 40 minutes 0.0.0.0:2181->2181/tcp, 0.0.0.0:2888->2888/tcp, 0.0.0.0:3888->3888/tcp compose_zookeeper_1
some configuration in kafka
broker.id=2
listeners=PLAINTEXT://123.345.567:9097
advertised.listeners=PLAINTEXT://123.345.567:9097
zookeeper.connect=123.345.567:2181,123.345.567:2182,123.345.567:2183/kafka_test
some configuration in zookeeper2
clientPort=2182
server.1=123.345.567:2888:3888
server.2=0.0.0.0:2889:3889
server.3=123.345.567:2890:3890
some error log
[32mzookeeper3_1 |[0m 2018-07-15 09:54:19,083 [myid:3] - INFO [QuorumPeer[myid=3]/0.0.0.0:2183:FastLeaderElection#813] - New election. My id = 3, proposed zxid=0x100000047
[32mzookeeper3_1 |[0m 2018-07-15 09:54:19,113 [myid:3] - WARN [RecvWorker:1:QuorumCnxManager$RecvWorker#1025] - Connection broken for id 1, my id = 3, error =
[32mzookeeper3_1 |[0m java.io.EOFException
[32mzookeeper3_1 |[0m at java.io.DataInputStream.readInt(DataInputStream.java:392)
[32mzookeeper3_1 |[0m at org.apache.zookeeper.server.quorum.QuorumCnxManager$RecvWorker.run(QuorumCnxManager.java:1010)
[32mzookeeper3_1 |[0m 2018-07-15 09:54:19,123 [myid:3] - WARN [RecvWorker:1:QuorumCnxManager$RecvWorker#1028] - Interrupting SendWorker
[32mzookeeper3_1 |[0m 2018-07-15 09:54:19,125 [myid:3] - INFO [WorkerReceiver[myid=3]:FastLeaderElection#595] - Notification: 1 (message format version), 3 (n.leader), 0x100000047 (n.zxid), 0x1 (n.round), LOOKING (n.state), 3 (n.sid), 0x1 (n.peerEpoch) LOOKING (my state)
[32mzookeeper3_1 |[0m 2018-07-15 09:54:19,126 [myid:3] - WARN [SendWorker:1:QuorumCnxManager$SendWorker#941] - Interrupted while waiting for message on queue
[32mzookeeper3_1 |[0m java.lang.InterruptedException
[32mzookeeper3_1 |[0m at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.reportInterruptAfterWait(AbstractQueuedSynchronizer.java:2014)
[32mzookeeper3_1 |[0m at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2088)
[32mzookeeper3_1 |[0m at java.util.concurrent.ArrayBlockingQueue.poll(ArrayBlockingQueue.java:418)
[32mzookeeper3_1 |[0m at org.apache.zookeeper.server.quorum.QuorumCnxManager.pollSendQueue(QuorumCnxManager.java:1094)
[32mzookeeper3_1 |[0m at org.apache.zookeeper.server.quorum.QuorumCnxManager.access$700(QuorumCnxManager.java:74)
[32mzookeeper3_1 |[0m at org.apache.zookeeper.server.quorum.QuorumCnxManager$SendWorker.run(QuorumCnxManager.java:929)
kafka
[35mkafka_1 |[0m [2018-07-15 09:54:38,137] ERROR [KafkaServer id=1] Fatal error during KafkaServer startup. Prepare to shutdown (kafka.server.KafkaServer)
[35mkafka_1 |[0m kafka.common.KafkaException: Socket server failed to bind to 123.345.567:9096: Cannot assign requested address.
[35mkafka_1 |[0m at kafka.network.Acceptor.openServerSocket(SocketServer.scala:404)
[35mkafka_1 |[0m at kafka.network.Acceptor.<init>(SocketServer.scala:308)
[35mkafka_1 |[0m at kafka.network.SocketServer$$anonfun$createAcceptorAndProcessors$1.apply(SocketServer.scala:126)
[35mkafka_1 |[0m at kafka.network.SocketServer$$anonfun$createAcceptorAndProcessors$1.apply(SocketServer.scala:122)
[35mkafka_1 |[0m at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
[35mkafka_1 |[0m at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
[35mkafka_1 |[0m at kafka.network.SocketServer.createAcceptorAndProcessors(SocketServer.scala:122)
[35mkafka_1 |[0m at kafka.network.SocketServer.startup(SocketServer.scala:84)
[35mkafka_1 |[0m at kafka.server.KafkaServer.startup(KafkaServer.scala:247)
[35mkafka_1 |[0m at kafka.server.KafkaServerStartable.startup(KafkaServerStartable.scala:38)
[35mkafka_1 |[0m at kafka.Kafka$.main(Kafka.scala:92)
[35mkafka_1 |[0m at kafka.Kafka.main(Kafka.scala)
[35mkafka_1 |[0m Caused by: java.net.BindException: Cannot assign requested address
[35mkafka_1 |[0m at sun.nio.ch.Net.bind0(Native Method)
[35mkafka_1 |[0m at sun.nio.ch.Net.bind(Net.java:433)
[35mkafka_1 |[0m at sun.nio.ch.Net.bind(Net.java:425)
[35mkafka_1 |[0m at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:223)
[35mkafka_1 |[0m at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
[35mkafka_1 |[0m at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:67)
[35mkafka_1 |[0m at kafka.network.Acceptor.openServerSocket(SocketServer.scala:400)
[35mkafka_1 |[0m ... 11 more
[35mkafka_1 |[0m [2018-07-15 09:54:38,144] INFO [KafkaServer id=1] shutting down (kafka.server.KafkaServer)
[35mkafka_1 |[0m [2018-07-15 09:54:38,147] INFO [SocketServer brokerId=1] Stopping socket server request processors (kafka.network.SocketServer)
[35mkafka_1 |[0m [2018-07-15 09:54:38,149] INFO [SocketServer brokerId=1] Stopped socket server request processors (kafka.network.SocketServer)
[35mkafka_1 |[0m [2018-07-15 09:54:38,160] INFO Shutting down. (kafka.log.LogManager)
[35mkafka_1 |[0m [2018-07-15 09:54:38,208] INFO Shutdown complete. (kafka.log.LogManager)
I wonder whether these containers does not need the ip:123.345.567.
Maybe they need some inner ip to communicate between containers?
or I need some command to check or make docker network setting?
thanks your any suggestion!
First of all, 123.345.567 isn't four numbers, so that's not a valid fake IP
Docker compose sets up a DNS network for you using the compose service names. You will never need the external address of your Mac inside a container.
For a fully working Compose file, look at Confluent's. Start with one Zookeeper and Broker, and get that working (Kafka won't run faster on a single machine with multiple containers because they all share one disk)
I also suggest not adding a random chroot to only one of your Zookeepers (/kafka_test)
Related
We have been facing an issue while trying to spin up a local containerized confluent kafka ecosystem using docker-compose tool.
Host OS (Virtual Machine connecting through RDP)
$ sudo lsb_release -a
No LSB modules are available.
Distributor ID: Ubuntu
Description: Ubuntu 18.04.6 LTS
Release: 18.04
Codename: bionic
$ uname -a
Linux IS*******1 4.15.0-189-generic #200-Ubuntu SMP Wed Jun 22 19:53:37 UTC 2022 x86_64 x86_64 x86_64 GNU/Linux
ghosh.sayak#ISRXDLKFD001:~$
Docker Engine
$ sudo docker version
Client: Docker Engine - Community
Version: 20.10.20
API version: 1.41
Go version: go1.18.7
Git commit: 9fdeb9c
Built: Tue Oct 18 18:20:19 2022
OS/Arch: linux/amd64
Context: default
Experimental: true
Server: Docker Engine - Community
Engine:
Version: 20.10.20
API version: 1.41 (minimum version 1.12)
Go version: go1.18.7
Git commit: 03df974
Built: Tue Oct 18 18:18:11 2022
OS/Arch: linux/amd64
Experimental: false
containerd:
Version: 1.6.16
GitCommit: 31aa4358a36870b21a992d3ad2bef29e1d693bec
runc:
Version: 1.1.4
GitCommit: v1.1.4-0-g5fd4c4d
docker-init:
Version: 0.19.0
GitCommit: de40ad0
docker-compose.yaml (Compose created network "kafka_default" using default bridge driver)
---
version: '2'
services:
zookeeper:
image: confluentinc/cp-zookeeper:7.3.0
hostname: zookeeper
container_name: zookeeper
ports:
- "2181:2181"
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
broker:
image: confluentinc/cp-server:7.3.0
hostname: broker
container_name: broker
depends_on:
- zookeeper
ports:
- "9092:9092"
- "9101:9101"
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://broker:29092,PLAINTEXT_HOST://localhost:9092
KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1
KAFKA_CONFLUENT_BALANCER_TOPIC_REPLICATION_FACTOR: 1
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_JMX_PORT: 9101
KAFKA_JMX_HOSTNAME: localhost
KAFKA_CONFLUENT_SCHEMA_REGISTRY_URL: http://schema-registry:8081
CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: broker:29092
CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1
CONFLUENT_METRICS_ENABLE: 'true'
CONFLUENT_SUPPORT_CUSTOMER_ID: 'anonymous'
schema-registry:
image: confluentinc/cp-schema-registry:7.3.0
hostname: schema-registry
container_name: schema-registry
depends_on:
- broker
ports:
- "8081:8081"
environment:
SCHEMA_REGISTRY_HOST_NAME: schema-registry
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: 'broker:29092'
SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081
connect:
image: cnfldemos/cp-server-connect-datagen:0.6.0-7.3.0
hostname: connect
container_name: connect
depends_on:
- broker
- schema-registry
ports:
- "8083:8083"
environment:
CONNECT_BOOTSTRAP_SERVERS: 'broker:29092'
CONNECT_REST_ADVERTISED_HOST_NAME: connect
CONNECT_REST_PORT: 8083
CONNECT_GROUP_ID: compose-connect-group
CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
CONNECT_OFFSET_FLUSH_INTERVAL_MS: 10000
CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
CONNECT_SCHEMA_REGISTRY_URL: "http://schema-registry:8081"
CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter
CONNECT_VALUE_CONVERTER_SCHEMAS_ENABLE: "false"
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry:8081
CONNECT_INTERNAL_KEY_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
CONNECT_INTERNAL_VALUE_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
CONNECT_ZOOKEEPER_CONNECT: 'zookeeper:2181'
CLASSPATH: /usr/share/java/monitoring-interceptors/monitoring-interceptors-6.2.0.jar
CONNECT_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor"
CONNECT_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor"
CONNECT_PLUGIN_PATH: "/usr/share/java/ConnectPlugin"
CONNECT_LOG4J_ROOT_LOGLEVEL: INFO
CONNECT_LOG4J_LOGGERS: org.apache.zookeeper=ERROR,org.I0Itec.zkclient=ERROR,org.reflections=ERROR
CONNECT_TOPIC_CREATION_ENABLE: 'false'
volumes:
- ./connect-plugins:/usr/share/java/ConnectPlugin
control-center:
image: confluentinc/cp-enterprise-control-center:7.3.0
hostname: control-center
container_name: control-center
depends_on:
- broker
- schema-registry
- connect
ports:
- "9021:9021"
environment:
CONTROL_CENTER_BOOTSTRAP_SERVERS: 'broker:29092'
CONTROL_CENTER_CONNECT_CONNECT-DEFAULT_CLUSTER: 'connect:8083'
CONTROL_CENTER_SCHEMA_REGISTRY_URL: "http://schema-registry:8081"
CONTROL_CENTER_REPLICATION_FACTOR: 1
CONTROL_CENTER_INTERNAL_TOPICS_PARTITIONS: 1
CONTROL_CENTER_MONITORING_INTERCEPTOR_TOPIC_PARTITIONS: 1
CONFLUENT_METRICS_TOPIC_REPLICATION: 1
PORT: 9021
Now, while executing the command $ sudo docker-compose up -d, we could only observed that the "zookeeper" container is only getting Up and the "broker" i.e. Kafka container is Exited with Status 1 with the following error -
$ sudo docker container logs -t --details broker
2023-02-08T14:07:30.660123686Z ===> User
2023-02-08T14:07:30.662518281Z uid=1000(appuser) gid=1000(appuser) groups=1000(appuser)
2023-02-08T14:07:30.666677333Z ===> Configuring ...
2023-02-08T14:07:34.144645597Z ===> Running preflight checks ...
2023-02-08T14:07:34.147953062Z ===> Check if /var/lib/kafka/data is writable ...
2023-02-08T14:07:34.564183498Z ===> Check if Zookeeper is healthy ...
2023-02-08T14:07:35.861623527Z [2023-02-08 14:07:35,856] INFO Client environment:zookeeper.version=3.6.3--6401e4ad2087061bc6b9f80dec2d69f2e3c8660a, built on 04/08/2021 16:35 GMT (org.apache.zookeeper.ZooKeeper)
2023-02-08T14:07:35.861652177Z [2023-02-08 14:07:35,857] INFO Client environment:host.name=broker (org.apache.zookeeper.ZooKeeper)
2023-02-08T14:07:35.861683838Z [2023-02-08 14:07:35,857] INFO Client environment:java.version=11.0.16.1 (org.apache.zookeeper.ZooKeeper)
2023-02-08T14:07:35.861689083Z [2023-02-08 14:07:35,857] INFO Client environment:java.vendor=Azul Systems, Inc. (org.apache.zookeeper.ZooKeeper)
2023-02-08T14:07:35.861694000Z [2023-02-08 14:07:35,857] INFO Client environment:java.home=/usr/lib/jvm/zulu11-ca (org.apache.zookeeper.ZooKeeper)
2023-02-08T14:07:35.861703288Z [2023-02-08 14:07:35,857] INFO Client environment:java.class.path=/usr/share/java/cp-base-new/slf4j-api-1.7.36.jar:/usr/share/java/cp-base-new/disk-usage-agent-7.3.0.jar:/usr/share/java/cp-base-new/paranamer-2.8.jar:/usr/share/java/cp-base-new/jmx_prometheus_javaagent-0.14.0.jar:/usr/share/java/cp-base-new/jackson-annotations-2.13.2.jar:/usr/share/java/cp-base-new/metrics-core-2.2.0.jar:/usr/share/java/cp-base-new/jolokia-core-1.7.1.jar:/usr/share/java/cp-base-new/slf4j-reload4j-1.7.36.jar:/usr/share/java/cp-base-new/re2j-1.6.jar:/usr/share/java/cp-base-new/snakeyaml-1.30.jar:/usr/share/java/cp-base-new/utility-belt-7.3.0.jar:/usr/share/java/cp-base-new/zookeeper-jute-3.6.3.jar:/usr/share/java/cp-base-new/audience-annotations-0.5.0.jar:/usr/share/java/cp-base-new/scala-collection-compat_2.13-2.6.0.jar:/usr/share/java/cp-base-new/kafka-metadata-7.3.0-ccs.jar:/usr/share/java/cp-base-new/argparse4j-0.7.0.jar:/usr/share/java/cp-base-new/kafka-storage-api-7.3.0-ccs.jar:/usr/share/java/cp-base-new/jopt-simple-5.0.4.jar:/usr/share/java/cp-base-new/common-utils-7.3.0.jar:/usr/share/java/cp-base-new/jackson-dataformat-yaml-2.13.2.jar:/usr/share/java/cp-base-new/logredactor-1.0.10.jar:/usr/share/java/cp-base-new/zookeeper-3.6.3.jar:/usr/share/java/cp-base-new/zstd-jni-1.5.2-1.jar:/usr/share/java/cp-base-new/kafka-raft-7.3.0-ccs.jar:/usr/share/java/cp-base-new/kafka-server-common-7.3.0-ccs.jar:/usr/share/java/cp-base-new/scala-logging_2.13-3.9.4.jar:/usr/share/java/cp-base-new/jose4j-0.7.9.jar:/usr/share/java/cp-base-new/snappy-java-1.1.8.4.jar:/usr/share/java/cp-base-new/scala-reflect-2.13.5.jar:/usr/share/java/cp-base-new/scala-library-2.13.5.jar:/usr/share/java/cp-base-new/scala-java8-compat_2.13-1.0.2.jar:/usr/share/java/cp-base-new/jackson-core-2.13.2.jar:/usr/share/java/cp-base-new/minimal-json-0.9.5.jar:/usr/share/java/cp-base-new/kafka-clients-7.3.0-ccs.jar:/usr/share/java/cp-base-new/jackson-dataformat-csv-2.13.2.jar:/usr/share/java/cp-base-new/jackson-datatype-jdk8-2.13.2.jar:/usr/share/java/cp-base-new/kafka-storage-7.3.0-ccs.jar:/usr/share/java/cp-base-new/lz4-java-1.8.0.jar:/usr/share/java/cp-base-new/jolokia-jvm-1.7.1.jar:/usr/share/java/cp-base-new/jackson-module-scala_2.13-2.13.2.jar:/usr/share/java/cp-base-new/kafka_2.13-7.3.0-ccs.jar:/usr/share/java/cp-base-new/jackson-databind-2.13.2.2.jar:/usr/share/java/cp-base-new/logredactor-metrics-1.0.10.jar:/usr/share/java/cp-base-new/gson-2.9.0.jar:/usr/share/java/cp-base-new/json-simple-1.1.1.jar:/usr/share/java/cp-base-new/reload4j-1.2.19.jar:/usr/share/java/cp-base-new/metrics-core-4.1.12.1.jar:/usr/share/java/cp-base-new/commons-cli-1.4.jar (org.apache.zookeeper.ZooKeeper)
2023-02-08T14:07:35.861725300Z [2023-02-08 14:07:35,857] INFO Client environment:java.library.path=/usr/java/packages/lib:/usr/lib64:/lib64:/lib:/usr/lib (org.apache.zookeeper.ZooKeeper)
2023-02-08T14:07:35.861730555Z [2023-02-08 14:07:35,857] INFO Client environment:java.io.tmpdir=/tmp (org.apache.zookeeper.ZooKeeper)
2023-02-08T14:07:35.861734946Z [2023-02-08 14:07:35,857] INFO Client environment:java.compiler=<NA> (org.apache.zookeeper.ZooKeeper)
2023-02-08T14:07:35.861740934Z [2023-02-08 14:07:35,857] INFO Client environment:os.name=Linux (org.apache.zookeeper.ZooKeeper)
2023-02-08T14:07:35.861745386Z [2023-02-08 14:07:35,857] INFO Client environment:os.arch=amd64 (org.apache.zookeeper.ZooKeeper)
2023-02-08T14:07:35.861749485Z [2023-02-08 14:07:35,857] INFO Client environment:os.version=4.15.0-189-generic (org.apache.zookeeper.ZooKeeper)
2023-02-08T14:07:35.861754326Z [2023-02-08 14:07:35,857] INFO Client environment:user.name=appuser (org.apache.zookeeper.ZooKeeper)
2023-02-08T14:07:35.862778083Z [2023-02-08 14:07:35,857] INFO Client environment:user.home=/home/appuser (org.apache.zookeeper.ZooKeeper)
2023-02-08T14:07:35.862781895Z [2023-02-08 14:07:35,857] INFO Client environment:user.dir=/home/appuser (org.apache.zookeeper.ZooKeeper)
2023-02-08T14:07:35.862785144Z [2023-02-08 14:07:35,857] INFO Client environment:os.memory.free=242MB (org.apache.zookeeper.ZooKeeper)
2023-02-08T14:07:35.862788600Z [2023-02-08 14:07:35,857] INFO Client environment:os.memory.max=4006MB (org.apache.zookeeper.ZooKeeper)
2023-02-08T14:07:35.862792192Z [2023-02-08 14:07:35,857] INFO Client environment:os.memory.total=252MB (org.apache.zookeeper.ZooKeeper)
2023-02-08T14:07:35.875374527Z [2023-02-08 14:07:35,869] INFO Initiating client connection, connectString=zookeeper:2181 sessionTimeout=40000 watcher=io.confluent.admin.utils.ZookeeperConnectionWatcher#3c0a50da (org.apache.zookeeper.ZooKeeper)
2023-02-08T14:07:35.875395839Z [2023-02-08 14:07:35,873] INFO Setting -D jdk.tls.rejectClientInitiatedRenegotiation=true to disable client-initiated TLS renegotiation (org.apache.zookeeper.common.X509Util)
2023-02-08T14:07:35.898794157Z [2023-02-08 14:07:35,894] INFO jute.maxbuffer value is 1048575 Bytes (org.apache.zookeeper.ClientCnxnSocket)
2023-02-08T14:07:35.910713716Z [2023-02-08 14:07:35,905] INFO zookeeper.request.timeout value is 0. feature enabled=false (org.apache.zookeeper.ClientCnxn)
2023-02-08T14:07:36.094475506Z [2023-02-08 14:07:36,087] INFO Opening socket connection to server zookeeper/172.19.0.2:2181. (org.apache.zookeeper.ClientCnxn)
2023-02-08T14:07:36.094498351Z [2023-02-08 14:07:36,090] INFO SASL config status: Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn)
2023-02-08T14:07:36.114680668Z [2023-02-08 14:07:36,111] ERROR Unable to open socket to zookeeper/172.19.0.2:2181 (org.apache.zookeeper.ClientCnxnSocketNIO)
2023-02-08T14:07:36.121566533Z [2023-02-08 14:07:36,112] WARN Session 0x0 for sever zookeeper/172.19.0.2:2181, Closing socket connection. Attempting reconnect except it is a SessionExpiredException. (org.apache.zookeeper.ClientCnxn)
2023-02-08T14:07:36.121606559Z java.net.ConnectException: Connection refused
2023-02-08T14:07:36.121610572Z at java.base/sun.nio.ch.Net.connect0(Native Method)
2023-02-08T14:07:36.121614024Z at java.base/sun.nio.ch.Net.connect(Net.java:483)
2023-02-08T14:07:36.121617448Z at java.base/sun.nio.ch.Net.connect(Net.java:472)
2023-02-08T14:07:36.121620610Z at java.base/sun.nio.ch.SocketChannelImpl.connect(SocketChannelImpl.java:692)
2023-02-08T14:07:36.121623850Z at org.apache.zookeeper.ClientCnxnSocketNIO.registerAndConnect(ClientCnxnSocketNIO.java:260)
2023-02-08T14:07:36.121627034Z at org.apache.zookeeper.ClientCnxnSocketNIO.connect(ClientCnxnSocketNIO.java:270)
2023-02-08T14:07:36.121630306Z at org.apache.zookeeper.ClientCnxn$SendThread.startConnect(ClientCnxn.java:1177)
2023-02-08T14:07:36.121633562Z at org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1210)
2023-02-08T14:07:37.230453807Z [2023-02-08 14:07:37,224] INFO Opening socket connection to server zookeeper/172.19.0.2:2181. (org.apache.zookeeper.ClientCnxn)
2023-02-08T14:07:37.230488726Z [2023-02-08 14:07:37,224] INFO SASL config status: Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn)
2023-02-08T14:07:37.230492954Z [2023-02-08 14:07:37,224] ERROR Unable to open socket to zookeeper/172.19.0.2:2181 (org.apache.zookeeper.ClientCnxnSocketNIO)
2023-02-08T14:07:37.230496368Z [2023-02-08 14:07:37,225] WARN Session 0x0 for sever zookeeper/172.19.0.2:2181, Closing socket connection. Attempting reconnect except it is a SessionExpiredException. (org.apache.zookeeper.ClientCnxn)
To debug the connectivity, I have done couple of flight checks like -
Created a netshoot container within same "kafka_default" network using sudo docker run -it --net container:zookeeper nicolaka/netshoot
Some useful information -
zookeeper# cat /etc/hosts
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
172.19.0.2 zookeeper
zookeeper# cat /etc/resolv.conf
search corp.***.com
nameserver 127.0.0.11
options ndots:0
Tried nslookup, netcat, ping, nmap, telnet from within the container and the results are following -
zookeeper# nslookup zookeeper
Server: 127.0.0.11
Address: 127.0.0.11#53
Non-authoritative answer:
Name: zookeeper
Address: 172.19.0.2
zookeeper# nc -v -l -p 2181
nc: Address in use
zookeeper# ping -c 2 zookeeper
PING zookeeper (172.19.0.2) 56(84) bytes of data.
64 bytes from zookeeper (172.19.0.2): icmp_seq=1 ttl=64 time=0.054 ms
64 bytes from zookeeper (172.19.0.2): icmp_seq=2 ttl=64 time=0.049 ms
--- zookeeper ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1015ms
rtt min/avg/max/mdev = 0.049/0.051/0.054/0.002 ms
zookeeper# nmap -p0- -v -A -T4 zookeeper
Starting Nmap 7.93 ( https://nmap.org ) at 2023-02-08 15:43 UTC
NSE: Loaded 155 scripts for scanning.
NSE: Script Pre-scanning.
Initiating NSE at 15:43
Completed NSE at 15:43, 0.00s elapsed
Initiating NSE at 15:43
Completed NSE at 15:43, 0.00s elapsed
Initiating NSE at 15:43
Completed NSE at 15:43, 0.00s elapsed
Initiating SYN Stealth Scan at 15:43
Scanning zookeeper (172.19.0.2) [65536 ports]
Completed SYN Stealth Scan at 15:43, 2.96s elapsed (65536 total ports)
Initiating Service scan at 15:43
Initiating OS detection (try #1) against zookeeper (172.19.0.2)
Retrying OS detection (try #2) against zookeeper (172.19.0.2)
NSE: Script scanning 172.19.0.2.
Initiating NSE at 15:43
Completed NSE at 15:43, 0.01s elapsed
Initiating NSE at 15:43
Completed NSE at 15:43, 0.00s elapsed
Initiating NSE at 15:43
Completed NSE at 15:43, 0.00s elapsed
Nmap scan report for zookeeper (172.19.0.2)
Host is up (0.000060s latency).
Not shown: 65533 closed tcp ports (reset)
PORT STATE SERVICE VERSION
2181/tcp filtered eforward
8080/tcp filtered http-proxy
39671/tcp filtered unknown
Too many fingerprints match this host to give specific OS details
Network Distance: 0 hops
NSE: Script Post-scanning.
Initiating NSE at 15:43
Completed NSE at 15:43, 0.00s elapsed
Initiating NSE at 15:43
Completed NSE at 15:43, 0.00s elapsed
Initiating NSE at 15:43
Completed NSE at 15:43, 0.00s elapsed
Read data files from: /usr/bin/../share/nmap
OS and Service detection performed. Please report any incorrect results at https://nmap.org/submit/ .
Nmap done: 1 IP address (1 host up) scanned in 5.55 seconds
Raw packets sent: 65551 (2.885MB) | Rcvd: 131094 (5.508MB)
zookeeper# telnet localhost 2181
Connected to localhost
***zookeeper# telnet zookeeper 2181
telnet: can't connect to remote host (172.19.0.2): Connection refused***
BUT, WHEN, tried from Host Machine, it succeeded -
ghosh.sayak#IS********1:~/Kafka$ telnet localhost 2181
Trying 127.0.0.1...
Connected to localhost.
Escape character is '^]'.
^]
telnet> q
Connection closed.
Also, here is the current status of the container -
ghosh.sayak#IS********1:~/Kafka$ sudo docker-compose ps
[sudo] password for ghosh.sayak:
Name Command State Ports
-------------------------------------------------------------------------------------------------------------------
broker /etc/confluent/docker/run Exit 1
connect /etc/confluent/docker/run Exit 1
control-center /etc/confluent/docker/run Exit 1
schema-registry /etc/confluent/docker/run Exit 1
zookeeper /etc/confluent/docker/run Up 0.0.0.0:2181->2181/tcp,:::2181->2181/tcp, 2888/tcp, 3888/tcp
NOTE: Docker Engine has been freshly installed by following this official documentation.
Stuck with this issue! Any help will be much appreciated!
Thanks in advance!
I have SASL/SCRAM config working with confluentinc kafka/zookeeper:
docker-compose.yml
# Based on: https://github.com/iwpnd/tile38-kafka-sasl
version: "2"
services:
zookeeper:
image: confluentinc/cp-zookeeper:6.0.1
hostname: zookeeper
container_name: zookeeper
environment:
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://zookeeper:2181
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
ZOOKEEPER_SERVER_ID: 3
KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/secrets/sasl/zookeeper_jaas.conf \
-Dzookeeper.authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider \
-Dzookeeper.authProvider.2=org.apache.zookeeper.server.auth.DigestAuthenticationProvider \
-Dquorum.auth.enableSasl=true \
-Dquorum.auth.learnerRequireSasl=true \
-Dquorum.auth.serverRequireSasl=true \
-Dquorum.auth.learner.saslLoginContext=QuorumLearner \
-Dquorum.auth.server.saslLoginContext=QuorumServer \
-Dquorum.cnxn.threads.size=20 \
-DrequireClientAuthScheme=sasl"
volumes:
- ./secrets:/etc/kafka/secrets/sasl
zookeeper-add-kafka-users:
image: confluentinc/cp-kafka:6.0.1
container_name: "zookeeper-add-kafka-users"
depends_on:
- zookeeper
command: "bash -c 'echo Waiting for Zookeeper to be ready... && \
cub zk-ready zookeeper:2181 120 && \
kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-512=[iterations=4096,password=password]' --entity-type users --entity-name admin && \
kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-512=[iterations=4096,password=password]' --entity-type users --entity-name client '"
environment:
KAFKA_BROKER_ID: ignored
KAFKA_ZOOKEEPER_CONNECT: ignored
KAFKA_OPTS: -Djava.security.auth.login.config=/etc/kafka/secrets/sasl/kafka_server_jaas.conf
volumes:
- ./secrets:/etc/kafka/secrets/sasl
broker:
image: confluentinc/cp-kafka:6.0.1
hostname: broker
container_name: broker
depends_on:
- zookeeper
ports:
- "9091:9091"
- "9101:9101"
- "9092:9092"
expose:
- "29090"
environment:
KAFKA_OPTS: "-Dzookeeper.sasl.client=true -Djava.security.auth.login.config=/etc/kafka/secrets/sasl/kafka_server_jaas.conf"
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT,SASL_PLAINHOST:SASL_PLAINTEXT
KAFKA_LISTENERS: INSIDE://:29090,OUTSIDE://:9092,SASL_PLAINHOST://:9091
KAFKA_ADVERTISED_LISTENERS: INSIDE://broker:29090,OUTSIDE://localhost:9092,SASL_PLAINHOST://broker:9091
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_JMX_PORT: 9101
KAFKA_JMX_HOSTNAME: localhost
KAFKA_SECURITY_INTER_BROKER_PROTOCAL: SASL_PLAINTEXT
KAFKA_SASL_ENABLED_MECHANISMS: SCRAM-SHA-512
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAINTEXT
volumes:
- ./secrets:/etc/kafka/secrets/sasl
sercrets/kafka_server_jaas.conf
org.apache.kafka.common.security.scram.ScramLoginModule required
username="admin"
password="password"
user_admin="password"
user_client="password";
};
Client {
org.apache.kafka.common.security.plain.PlainLoginModule required
username="admin"
password="password";
};
KafkaClient {
org.apache.kafka.common.security.scram.ScramLoginModule required
username="client"
password="password";
};
sercerts/zk_server_jaas.conf
org.apache.kafka.common.security.scram.ScramLoginModule required
username="admin"
password="admin-secret"
user_admin="admin-secret";
};
sercrets/zookeeper_jaas.conf zk_server_jaas.conf
Server {
org.apache.kafka.common.security.plain.PlainLoginModule required
user_admin="password";
};
QuorumServer {
org.apache.zookeeper.server.auth.DigestLoginModule required
user_admin="password";
};
QuorumLearner {
org.apache.zookeeper.server.auth.DigestLoginModule required
username="admin"
password="password";
};
Above config works as I expected with confluentinc/cp-zookeeper:6.0.1 image, but when I change images to wurstmeister/zookeeper and wurstmeister/kafka:2.13-2.7.1 I get below errors:
[36mbroker |[0m [Configuring] 'security.inter.broker.protocal' in '/opt/kafka/config/server.properties'
[36mbroker |[0m [Configuring] 'jmx.port' in '/opt/kafka/config/server.properties'
[36mbroker |[0m [Configuring] 'advertised.listeners' in '/opt/kafka/config/server.properties'
[36mbroker |[0m [Configuring] 'port' in '/opt/kafka/config/server.properties'
[36mbroker |[0m [Configuring] 'inter.broker.listener.name' in '/opt/kafka/config/server.properties'
[36mbroker |[0m Excluding KAFKA_OPTS from broker config
[36mbroker |[0m Excluding KAFKA_HOME from broker config
[36mbroker |[0m [Configuring] 'log.dirs' in '/opt/kafka/config/server.properties'
[36mbroker |[0m [Configuring] 'listeners' in '/opt/kafka/config/server.properties'
[36mbroker |[0m Excluding KAFKA_VERSION from broker config
[33mzookeeper |[0m ZooKeeper JMX enabled by default
[33mzookeeper |[0m Using config: /opt/zookeeper-3.4.13/bin/../conf/zoo.cfg
[33mzookeeper |[0m 2021-12-04 13:17:55,364 [myid:] - INFO [main:QuorumPeerConfig#136] - Reading configuration from: /opt/zookeeper-3.4.13/bin/../conf/zoo.cfg
[33mzookeeper |[0m 2021-12-04 13:17:55,370 [myid:] - INFO [main:DatadirCleanupManager#78] - autopurge.snapRetainCount set to 3
[33mzookeeper |[0m 2021-12-04 13:17:55,370 [myid:] - INFO [main:DatadirCleanupManager#79] - autopurge.purgeInterval set to 1
[33mzookeeper |[0m 2021-12-04 13:17:55,371 [myid:] - WARN [main:QuorumPeerMain#116] - Either no config or no quorum defined in config, running in standalone mode
[33mzookeeper |[0m 2021-12-04 13:17:55,376 [myid:] - INFO [PurgeTask:DatadirCleanupManager$PurgeTask#138] - Purge task started.
[33mzookeeper |[0m 2021-12-04 13:17:55,396 [myid:] - INFO [PurgeTask:DatadirCleanupManager$PurgeTask#144] - Purge task completed.
[33mzookeeper |[0m 2021-12-04 13:17:55,396 [myid:] - INFO [main:QuorumPeerConfig#136] - Reading configuration from: /opt/zookeeper-3.4.13/bin/../conf/zoo.cfg
[33mzookeeper |[0m 2021-12-04 13:17:55,397 [myid:] - INFO [main:ZooKeeperServerMain#98] - Starting server
[33mzookeeper |[0m 2021-12-04 13:17:55,409 [myid:] - INFO [main:Environment#100] - Server environment:zookeeper.version=3.4.13-2d71af4dbe22557fda74f9a9b4309b15a7487f03, built on 06/29/2018 04:05 GMT
[33mzookeeper |[0m 2021-12-04 13:17:55,409 [myid:] - INFO [main:Environment#100] - Server environment:host.name=zookeeper
[33mzookeeper |[0m 2021-12-04 13:17:55,409 [myid:] - INFO [main:Environment#100] - Server environment:java.version=1.7.0_65
[33mzookeeper |[0m 2021-12-04 13:17:55,410 [myid:] - INFO [main:Environment#100] - Server environment:java.vendor=Oracle Corporation
[33mzookeeper |[0m 2021-12-04 13:17:55,410 [myid:] - INFO [main:Environment#100] - Server environment:java.home=/usr/lib/jvm/java-7-openjdk-amd64/jre
[33mzookeeper |[0m 2021-12-04 13:17:55,410 [myid:] - INFO [main:Environment#100] - Server environment:java.class.path=/opt/zookeeper-3.4.13/bin/../build/classes:/opt/zookeeper-3.4.13/bin/../build/lib/*.jar:/opt/zookeeper-3.4.13/bin/../lib/slf4j-log4j12-1.7.25.jar:/opt/zookeeper-3.4.13/bin/../lib/slf4j-api-1.7.25.jar:/opt/zookeeper-3.4.13/bin/../lib/netty-3.10.6.Final.jar:/opt/zookeeper-3.4.13/bin/../lib/log4j-1.2.17.jar:/opt/zookeeper-3.4.13/bin/../lib/jline-0.9.94.jar:/opt/zookeeper-3.4.13/bin/../lib/audience-annotations-0.5.0.jar:/opt/zookeeper-3.4.13/bin/../zookeeper-3.4.13.jar:/opt/zookeeper-3.4.13/bin/../src/java/lib/*.jar:/opt/zookeeper-3.4.13/bin/../conf:
[33mzookeeper |[0m 2021-12-04 13:17:55,410 [myid:] - INFO [main:Environment#100] - Server environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib/x86_64-linux-gnu/jni:/lib/x86_64-linux-gnu:/usr/lib/x86_64-linux-gnu:/usr/lib/jni:/lib:/usr/lib
[33mzookeeper |[0m 2021-12-04 13:17:55,410 [myid:] - INFO [main:Environment#100] - Server environment:java.io.tmpdir=/tmp
[33mzookeeper |[0m 2021-12-04 13:17:55,413 [myid:] - INFO [main:Environment#100] - Server environment:java.compiler=<NA>
[33mzookeeper |[0m 2021-12-04 13:17:55,413 [myid:] - INFO [main:Environment#100] - Server environment:os.name=Linux
[33mzookeeper |[0m 2021-12-04 13:17:55,414 [myid:] - INFO [main:Environment#100] - Server environment:os.arch=amd64
[33mzookeeper |[0m 2021-12-04 13:17:55,414 [myid:] - INFO [main:Environment#100] - Server environment:os.version=5.11.0-40-generic
[33mzookeeper |[0m 2021-12-04 13:17:55,414 [myid:] - INFO [main:Environment#100] - Server environment:user.name=root
[33mzookeeper |[0m 2021-12-04 13:17:55,414 [myid:] - INFO [main:Environment#100] - Server environment:user.home=/root
[33mzookeeper |[0m 2021-12-04 13:17:55,415 [myid:] - INFO [main:Environment#100] - Server environment:user.dir=/opt/zookeeper-3.4.13
[33mzookeeper |[0m 2021-12-04 13:17:55,422 [myid:] - INFO [main:ZooKeeperServer#836] - tickTime set to 2000
[33mzookeeper |[0m 2021-12-04 13:17:55,425 [myid:] - INFO [main:ZooKeeperServer#845] - minSessionTimeout set to -1
[33mzookeeper |[0m 2021-12-04 13:17:55,426 [myid:] - INFO [main:ZooKeeperServer#854] - maxSessionTimeout set to -1
[33mzookeeper |[0m 2021-12-04 13:17:55,443 [myid:] - INFO [main:ServerCnxnFactory#117] - Using org.apache.zookeeper.server.NIOServerCnxnFactory as server connection factory
[33mzookeeper |[0m 2021-12-04 13:17:55,453 [myid:] - INFO [main:NIOServerCnxnFactory#89] - binding to port 0.0.0.0/0.0.0.0:2181
[32mzookeeper-add-kafka-users |[0m Waiting for Zookeeper to be ready...
[32mzookeeper-add-kafka-users |[0m bash: line 1: cub: command not found
[36mbroker |[0m [Configuring] 'zookeeper.connect' in '/opt/kafka/config/server.properties'
[36mbroker |[0m [Configuring] 'sasl.mechanism.inter.broker.protocol' in '/opt/kafka/config/server.properties'
[36mbroker |[0m [Configuring] 'offsets.topic.replication.factor' in '/opt/kafka/config/server.properties'
[36mbroker |[0m [Configuring] 'listener.security.protocol.map' in '/opt/kafka/config/server.properties'
[36mbroker |[0m [Configuring] 'jmx.hostname' in '/opt/kafka/config/server.properties'
[36mbroker |[0m [Configuring] 'sasl.enabled.mechanisms' in '/opt/kafka/config/server.properties'
[36mbroker |[0m [Configuring] 'broker.id' in '/opt/kafka/config/server.properties'
[32mzookeeper-add-kafka-users exited with code 127
[0m[36mbroker |[0m [2021-12-04 13:17:58,599] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$)
[36mbroker |[0m [2021-12-04 13:17:59,195] INFO Setting -D jdk.tls.rejectClientInitiatedRenegotiation=true to disable client-initiated TLS renegotiation (org.apache.zookeeper.common.X509Util)
[36mbroker |[0m [2021-12-04 13:17:59,343] INFO Registered signal handlers for TERM, INT, HUP (org.apache.kafka.common.utils.LoggingSignalHandler)
[36mbroker |[0m [2021-12-04 13:17:59,357] INFO starting (kafka.server.KafkaServer)
[36mbroker |[0m [2021-12-04 13:17:59,360] INFO Connecting to zookeeper on zookeeper:2181 (kafka.server.KafkaServer)
[36mbroker |[0m [2021-12-04 13:17:59,398] INFO [ZooKeeperClient Kafka server] Initializing a new session to zookeeper:2181. (kafka.zookeeper.ZooKeeperClient)
[36mbroker |[0m [2021-12-04 13:17:59,429] INFO Client environment:zookeeper.version=3.5.9-83df9301aa5c2a5d284a9940177808c01bc35cef, built on 01/06/2021 20:03 GMT (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,434] INFO Client environment:host.name=broker (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,435] INFO Client environment:java.version=1.8.0_292 (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,435] INFO Client environment:java.vendor=Azul Systems, Inc. (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,435] INFO Client environment:java.home=/usr/lib/jvm/zulu8-ca/jre (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,435] INFO Client environment:java.class.path=/opt/kafka/bin/../libs/activation-1.1.1.jar:/opt/kafka/bin/../libs/aopalliance-repackaged-2.6.1.jar:/opt/kafka/bin/../libs/argparse4j-0.7.0.jar:/opt/kafka/bin/../libs/audience-annotations-0.5.0.jar:/opt/kafka/bin/../libs/commons-cli-1.4.jar:/opt/kafka/bin/../libs/commons-lang3-3.8.1.jar:/opt/kafka/bin/../libs/connect-api-2.7.1.jar:/opt/kafka/bin/../libs/connect-basic-auth-extension-2.7.1.jar:/opt/kafka/bin/../libs/connect-file-2.7.1.jar:/opt/kafka/bin/../libs/connect-json-2.7.1.jar:/opt/kafka/bin/../libs/connect-mirror-2.7.1.jar:/opt/kafka/bin/../libs/connect-mirror-client-2.7.1.jar:/opt/kafka/bin/../libs/connect-runtime-2.7.1.jar:/opt/kafka/bin/../libs/connect-transforms-2.7.1.jar:/opt/kafka/bin/../libs/hk2-api-2.6.1.jar:/opt/kafka/bin/../libs/hk2-locator-2.6.1.jar:/opt/kafka/bin/../libs/hk2-utils-2.6.1.jar:/opt/kafka/bin/../libs/jackson-annotations-2.10.5.jar:/opt/kafka/bin/../libs/jackson-core-2.10.5.jar:/opt/kafka/bin/../libs/jackson-databind-2.10.5.1.jar:/opt/kafka/bin/../libs/jackson-dataformat-csv-2.10.5.jar:/opt/kafka/bin/../libs/jackson-datatype-jdk8-2.10.5.jar:/opt/kafka/bin/../libs/jackson-jaxrs-base-2.10.5.jar:/opt/kafka/bin/../libs/jackson-jaxrs-json-provider-2.10.5.jar:/opt/kafka/bin/../libs/jackson-module-jaxb-annotations-2.10.5.jar:/opt/kafka/bin/../libs/jackson-module-paranamer-2.10.5.jar:/opt/kafka/bin/../libs/jackson-module-scala_2.13-2.10.5.jar:/opt/kafka/bin/../libs/jakarta.activation-api-1.2.1.jar:/opt/kafka/bin/../libs/jakarta.annotation-api-1.3.5.jar:/opt/kafka/bin/../libs/jakarta.inject-2.6.1.jar:/opt/kafka/bin/../libs/jakarta.validation-api-2.0.2.jar:/opt/kafka/bin/../libs/jakarta.ws.rs-api-2.1.6.jar:/opt/kafka/bin/../libs/jakarta.xml.bind-api-2.3.2.jar:/opt/kafka/bin/../libs/javassist-3.25.0-GA.jar:/opt/kafka/bin/../libs/javassist-3.26.0-GA.jar:/opt/kafka/bin/../libs/javax.servlet-api-3.1.0.jar:/opt/kafka/bin/../libs/javax.ws.rs-api-2.1.1.jar:/opt/kafka/bin/../libs/jaxb-api-2.3.0.jar:/opt/kafka/bin/../libs/jersey-client-2.31.jar:/opt/kafka/bin/../libs/jersey-common-2.31.jar:/opt/kafka/bin/../libs/jersey-container-servlet-2.31.jar:/opt/kafka/bin/../libs/jersey-container-servlet-core-2.31.jar:/opt/kafka/bin/../libs/jersey-hk2-2.31.jar:/opt/kafka/bin/../libs/jersey-media-jaxb-2.31.jar:/opt/kafka/bin/../libs/jersey-server-2.31.jar:/opt/kafka/bin/../libs/jetty-client-9.4.38.v20210224.jar:/opt/kafka/bin/../libs/jetty-continuation-9.4.38.v20210224.jar:/opt/kafka/bin/../libs/jetty-http-9.4.38.v20210224.jar:/opt/kafka/bin/../libs/jetty-io-9.4.38.v20210224.jar:/opt/kafka/bin/../libs/jetty-security-9.4.38.v20210224.jar:/opt/kafka/bin/../libs/jetty-server-9.4.38.v20210224.jar:/opt/kafka/bin/../libs/jetty-servlet-9.4.38.v20210224.jar:/opt/kafka/bin/../libs/jetty-servlets-9.4.38.v20210224.jar:/opt/kafka/bin/../libs/jetty-util-9.4.38.v20210224.jar:/opt/kafka/bin/../libs/jetty-util-ajax-9.4.38.v20210224.jar:/opt/kafka/bin/../libs/jopt-simple-5.0.4.jar:/opt/kafka/bin/../libs/kafka-clients-2.7.1.jar:/opt/kafka/bin/../libs/kafka-log4j-appender-2.7.1.jar:/opt/kafka/bin/../libs/kafka-raft-2.7.1.jar:/opt/kafka/bin/../libs/kafka-streams-2.7.1.jar:/opt/kafka/bin/../libs/kafka-streams-examples-2.7.1.jar:/opt/kafka/bin/../libs/kafka-streams-scala_2.13-2.7.1.jar:/opt/kafka/bin/../libs/kafka-streams-test-utils-2.7.1.jar:/opt/kafka/bin/../libs/kafka-tools-2.7.1.jar:/opt/kafka/bin/../libs/kafka_2.13-2.7.1-sources.jar:/opt/kafka/bin/../libs/kafka_2.13-2.7.1.jar:/opt/kafka/bin/../libs/log4j-1.2.17.jar:/opt/kafka/bin/../libs/lz4-java-1.7.1.jar:/opt/kafka/bin/../libs/maven-artifact-3.6.3.jar:/opt/kafka/bin/../libs/metrics-core-2.2.0.jar:/opt/kafka/bin/../libs/netty-buffer-4.1.59.Final.jar:/opt/kafka/bin/../libs/netty-codec-4.1.59.Final.jar:/opt/kafka/bin/../libs/netty-common-4.1.59.Final.jar:/opt/kafka/bin/../libs/netty-handler-4.1.59.Final.jar:/opt/kafka/bin/../libs/netty-resolver-4.1.59.Final.jar:/opt/kafka/bin/../libs/netty-transport-4.1.59.Final.jar:/opt/kafka/bin/../libs/netty-transport-native-epoll-4.1.59.Final.jar:/opt/kafka/bin/../libs/netty-transport-native-unix-common-4.1.59.Final.jar:/opt/kafka/bin/../libs/osgi-resource-locator-1.0.3.jar:/opt/kafka/bin/../libs/paranamer-2.8.jar:/opt/kafka/bin/../libs/plexus-utils-3.2.1.jar:/opt/kafka/bin/../libs/reflections-0.9.12.jar:/opt/kafka/bin/../libs/rocksdbjni-5.18.4.jar:/opt/kafka/bin/../libs/scala-collection-compat_2.13-2.2.0.jar:/opt/kafka/bin/../libs/scala-java8-compat_2.13-0.9.1.jar:/opt/kafka/bin/../libs/scala-library-2.13.3.jar:/opt/kafka/bin/../libs/scala-logging_2.13-3.9.2.jar:/opt/kafka/bin/../libs/scala-reflect-2.13.3.jar:/opt/kafka/bin/../libs/slf4j-api-1.7.30.jar:/opt/kafka/bin/../libs/slf4j-log4j12-1.7.30.jar:/opt/kafka/bin/../libs/snappy-java-1.1.7.7.jar:/opt/kafka/bin/../libs/zookeeper-3.5.9.jar:/opt/kafka/bin/../libs/zookeeper-jute-3.5.9.jar:/opt/kafka/bin/../libs/zstd-jni-1.4.5-6.jar (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,437] INFO Client environment:java.library.path=/usr/lib/jvm/zulu8-ca/jre/lib/amd64/server:/usr/lib/jvm/zulu8-ca/jre/lib/amd64:/usr/lib/jvm/zulu8-ca/jre/../lib/amd64:/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,437] INFO Client environment:java.io.tmpdir=/tmp (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,440] INFO Client environment:java.compiler=<NA> (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,441] INFO Client environment:os.name=Linux (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,441] INFO Client environment:os.arch=amd64 (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,441] INFO Client environment:os.version=5.11.0-40-generic (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,442] INFO Client environment:user.name=root (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,442] INFO Client environment:user.home=/root (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,442] INFO Client environment:user.dir=/ (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,443] INFO Client environment:os.memory.free=1014MB (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,443] INFO Client environment:os.memory.max=1024MB (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,443] INFO Client environment:os.memory.total=1024MB (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,447] INFO Initiating client connection, connectString=zookeeper:2181 sessionTimeout=18000 watcher=kafka.zookeeper.ZooKeeperClient$ZooKeeperClientWatcher$#4cc451f2 (org.apache.zookeeper.ZooKeeper)
[36mbroker |[0m [2021-12-04 13:17:59,459] INFO jute.maxbuffer value is 4194304 Bytes (org.apache.zookeeper.ClientCnxnSocket)
[36mbroker |[0m [2021-12-04 13:17:59,469] INFO zookeeper.request.timeout value is 0. feature enabled= (org.apache.zookeeper.ClientCnxn)
[36mbroker |[0m [2021-12-04 13:17:59,481] INFO [ZooKeeperClient Kafka server] Waiting until connected. (kafka.zookeeper.ZooKeeperClient)
[36mbroker |[0m [2021-12-04 13:17:59,580] INFO Client successfully logged in. (org.apache.zookeeper.Login)
[36mbroker |[0m [2021-12-04 13:17:59,582] INFO Client will use DIGEST-MD5 as SASL mechanism. (org.apache.zookeeper.client.ZooKeeperSaslClient)
[36mbroker |[0m [2021-12-04 13:17:59,595] INFO Opening socket connection to server zookeeper/172.20.0.2:2181. Will attempt to SASL-authenticate using Login Context section 'Client' (org.apache.zookeeper.ClientCnxn)
[33mzookeeper |[0m 2021-12-04 13:17:59,605 [myid:] - INFO [NIOServerCxn.Factory:0.0.0.0/0.0.0.0:2181:NIOServerCnxnFactory#215] - Accepted socket connection from /172.20.0.3:57480
[36mbroker |[0m [2021-12-04 13:17:59,609] INFO Socket connection established, initiating session, client: /172.20.0.3:57480, server: zookeeper/172.20.0.2:2181 (org.apache.zookeeper.ClientCnxn)
[33mzookeeper |[0m 2021-12-04 13:17:59,621 [myid:] - INFO [NIOServerCxn.Factory:0.0.0.0/0.0.0.0:2181:ZooKeeperServer#949] - Client attempting to establish new session at /172.20.0.3:57480
[33mzookeeper |[0m 2021-12-04 13:17:59,624 [myid:] - INFO [SyncThread:0:FileTxnLog#213] - Creating new log file: log.1
[36mbroker |[0m [2021-12-04 13:17:59,642] INFO Session establishment complete on server zookeeper/172.20.0.2:2181, sessionid = 0x100474bc7f70000, negotiated timeout = 18000 (org.apache.zookeeper.ClientCnxn)
[33mzookeeper |[0m 2021-12-04 13:17:59,642 [myid:] - INFO [SyncThread:0:ZooKeeperServer#694] - Established session 0x100474bc7f70000 with negotiated timeout 18000 for client /172.20.0.3:57480
[36mbroker |[0m [2021-12-04 13:17:59,646] INFO [ZooKeeperClient Kafka server] Connected. (kafka.zookeeper.ZooKeeperClient)
[33mzookeeper |[0m 2021-12-04 13:17:59,657 [myid:] - ERROR [NIOServerCxn.Factory:0.0.0.0/0.0.0.0:2181:ZooKeeperServer#1063] - cnxn.saslServer is null: cnxn object did not initialize its saslServer properly.
[36mbroker |[0m [2021-12-04 13:17:59,660] ERROR SASL authentication failed using login context 'Client' with exception: {} (org.apache.zookeeper.client.ZooKeeperSaslClient)
[36mbroker |[0m javax.security.sasl.SaslException: Error in authenticating with a Zookeeper Quorum member: the quorum member's saslToken is null.
[36mbroker |[0m at org.apache.zookeeper.client.ZooKeeperSaslClient.createSaslToken(ZooKeeperSaslClient.java:312)
[36mbroker |[0m at org.apache.zookeeper.client.ZooKeeperSaslClient.respondToServer(ZooKeeperSaslClient.java:275)
[36mbroker |[0m at org.apache.zookeeper.ClientCnxn$SendThread.readResponse(ClientCnxn.java:882)
[36mbroker |[0m at org.apache.zookeeper.ClientCnxnSocketNIO.doIO(ClientCnxnSocketNIO.java:103)
[36mbroker |[0m at org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:365)
[36mbroker |[0m at org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1223)
[36mbroker |[0m [2021-12-04 13:17:59,669] ERROR [ZooKeeperClient Kafka server] Auth failed. (kafka.zookeeper.ZooKeeperClient)
[36mbroker |[0m [2021-12-04 13:17:59,672] INFO EventThread shut down for session: 0x100474bc7f70000 (org.apache.zookeeper.ClientCnxn)
[33mzookeeper |[0m 2021-12-04 13:17:59,794 [myid:] - WARN [NIOServerCxn.Factory:0.0.0.0/0.0.0.0:2181:NIOServerCnxn#376] - Unable to read additional data from client sessionid 0x100474bc7f70000, likely client has closed socket
[33mzookeeper |[0m 2021-12-04 13:17:59,795 [myid:] - INFO [NIOServerCxn.Factory:0.0.0.0/0.0.0.0:2181:NIOServerCnxn#1056] - Closed socket connection for client /172.20.0.3:57480 which had sessionid 0x100474bc7f70000
[36mbroker |[0m [2021-12-04 13:17:59,823] ERROR Fatal error during KafkaServer startup. Prepare to shutdown (kafka.server.KafkaServer)
[36mbroker |[0m org.apache.zookeeper.KeeperException$AuthFailedException: KeeperErrorCode = AuthFailed for /consumers
[36mbroker |[0m at org.apache.zookeeper.KeeperException.create(KeeperException.java:130)
[36mbroker |[0m at org.apache.zookeeper.KeeperException.create(KeeperException.java:54)
[36mbroker |[0m at kafka.zookeeper.AsyncResponse.maybeThrow(ZooKeeperClient.scala:564)
[36mbroker |[0m at kafka.zk.KafkaZkClient.createRecursive(KafkaZkClient.scala:1662)
[36mbroker |[0m at kafka.zk.KafkaZkClient.makeSurePersistentPathExists(KafkaZkClient.scala:1560)
[36mbroker |[0m at kafka.zk.KafkaZkClient.$anonfun$createTopLevelPaths$1(KafkaZkClient.scala:1552)
[36mbroker |[0m at kafka.zk.KafkaZkClient.$anonfun$createTopLevelPaths$1$adapted(KafkaZkClient.scala:1552)
[36mbroker |[0m at scala.collection.immutable.List.foreach(List.scala:333)
[36mbroker |[0m at kafka.zk.KafkaZkClient.createTopLevelPaths(KafkaZkClient.scala:1552)
[36mbroker |[0m at kafka.server.KafkaServer.initZkClient(KafkaServer.scala:467)
[36mbroker |[0m at kafka.server.KafkaServer.startup(KafkaServer.scala:233)
[36mbroker |[0m at kafka.server.KafkaServerStartable.startup(KafkaServerStartable.scala:44)
[36mbroker |[0m at kafka.Kafka$.main(Kafka.scala:82)
[36mbroker |[0m at kafka.Kafka.main(Kafka.scala)
[36mbroker |[0m [2021-12-04 13:17:59,825] INFO shutting down (kafka.server.KafkaServer)
[36mbroker |[0m [2021-12-04 13:17:59,836] INFO [ZooKeeperClient Kafka server] Closing. (kafka.zookeeper.ZooKeeperClient)
[36mbroker |[0m [2021-12-04 13:17:59,845] INFO [ZooKeeperClient Kafka server] Closed. (kafka.zookeeper.ZooKeeperClient)
[36mbroker |[0m [2021-12-04 13:17:59,849] INFO App info kafka.server for -1 unregistered (org.apache.kafka.common.utils.AppInfoParser)
[36mbroker |[0m [2021-12-04 13:17:59,854] INFO shut down completed (kafka.server.KafkaServer)
[36mbroker |[0m [2021-12-04 13:17:59,855] ERROR Exiting Kafka. (kafka.server.KafkaServerStartable)
[36mbroker |[0m [2021-12-04 13:17:59,859] INFO shutting down (kafka.server.KafkaServer)
[36mbroker exited with code 1
[0m
Any tips how to get it work with wurstmeister images?
I have the following docker-compose file which is a copy of the docker-compose from the docker apache flink site. The only difference is that I am using the Mac m1 version.
version: "2.2"
services:
jobmanager:
image: arm64v8/flink:alpine
ports:
- "8081:8081"
command: standalone-job --job-classname com.job.ClassName [--job-id <job id>] [--fromSavepoint /path/to/savepoint [--allowNonRestoredState]] [job arguments]
volumes:
- ~/sg_flink/artifacts:/opt/flink/usrlib
networks:
- flink-network
environment:
- |
FLINK_PROPERTIES=
jobmanager.rpc.address: jobmanager
parallelism.default: 2
taskmanager:
image: arm64v8/flink:alpine
depends_on:
- jobmanager
command: taskmanager
scale: 1
volumes:
- ~/sg_flink/artifacts:/opt/flink/usrlib
networks:
- flink-network
environment:
- |
FLINK_PROPERTIES=
jobmanager.rpc.address: jobmanager
taskmanager.numberOfTaskSlots: 2
parallelism.default: 2
networks:
flink-network:
The error is a connection is refused
taskmanager_1 | 2021-11-03 17:43:02,724 INFO org.apache.flink.runtime.taskexecutor.TaskExecutor - Could not resolve ResourceManager address akka.tcp://flink#9cf35ea13c8b:6123/user/resourcemanager, retrying in 10000 ms: Could not connect to rpc endpoint under address akka.tcp://flink#9cf35ea13c8b:6123/user/resourcemanager..
taskmanager_1 | 2021-11-03 17:43:12,753 WARN akka.remote.transport.netty.NettyTransport - Remote connection to [null] failed with java.net.ConnectException: Connection refused: 9cf35ea13c8b/172.20.0.3:6123
taskmanager_1 | 2021-11-03 17:43:12,756 WARN akka.remote.ReliableDeliverySupervisor - Association with remote system [akka.tcp://flink#9cf35ea13c8b:6123] has failed, address is now gated for [50] ms. Reason: [Association failed with [akka.tcp://flink#9cf35ea13c8b:6123]] Caused by: [Connection refused: 9cf35ea13c8b/172.20.0.3:6123]
taskmanager_1 | 2021-11-03 17:43:12,758 INFO org.apache.flink.runtime.taskexecutor.TaskExecutor - Could not resolve ResourceManager address akka.tcp://flink#9cf35ea13c8b:6123/user/resourcemanager, retrying in 10000 ms: Could not connect to rpc endpoint under address akka.tcp://flink#9cf35ea13c8b:6123/user/resourcemanager..
docker ps output looks like this
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
4416f88f60c2 arm64v8/flink:alpine "/docker-entrypoint.…" 44 seconds ago Up 44 seconds 6123/tcp, 8081/tcp sg_flink_taskmanager_1
c211940acf41 arm64v8/flink:alpine "/docker-entrypoint.…" 45 seconds ago Up 44 seconds 6123/tcp, 0.0.0.0:8081->8081/tcp sg_flink_jobmanager_1
```
I have met this trouble in same condittion,and I fixed it use docker-compse links.
taskmanager:
links:
- jobmanager
I'm working on Mac with Docker Desktop. I'm trying to run wurstmeister/kafka from docker compose and connect a producer to it.
This is my docker-compose.yml:
version: '3.8'
services:
zookeeper:
container_name: zookeeper
image: zookeeper:3.7.0
ports:
- "2181:2181"
environment:
ZOO_MY_ID: 1
ZOO_SERVERS: server.1=zookeeper:2888:3888;2181
restart: on-failure
kafka:
container_name: kafka
image: wurstmeister/kafka:2.13-2.7.0
ports:
- "9092:9092"
environment:
KAFKA_LISTENERS: INTERNAL://kafka:19092,EXTERNAL://localhost:9092
KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka:19092,EXTERNAL://localhost:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2181"
KAFKA_BROKER_ID: 1
restart: on-failure
depends_on:
- zookeeper
Then I have producer connecting to localhost:9092 and sending a simple message. The producer works fine - tested with another kafka image confluentinc/cp-kafka:6.2.0.
When I try to use producer with wurstmeister/kafka I'm getting a lot of this errors:
22:07:13.421 [kafka-producer-network-thread | simple-producer] DEBUG org.apache.kafka.clients.NetworkClient - [Producer clientId=simple-producer] Initialize connection to node localhost:9092 (id: -1 rack: null) for sending metadata request
22:07:13.421 [kafka-producer-network-thread | simple-producer] DEBUG org.apache.kafka.clients.NetworkClient - [Producer clientId=simple-producer] Initiating connection to node localhost:9092 (id: -1 rack: null) using address localhost/127.0.0.1
22:07:13.421 [kafka-producer-network-thread | simple-producer] DEBUG org.apache.kafka.common.network.Selector - [Producer clientId=simple-producer] Created socket with SO_RCVBUF = 326640, SO_SNDBUF = 146988, SO_TIMEOUT = 0 to node -1
22:07:13.421 [kafka-producer-network-thread | simple-producer] DEBUG org.apache.kafka.clients.NetworkClient - [Producer clientId=simple-producer] Completed connection to node -1. Fetching API versions.
22:07:13.421 [kafka-producer-network-thread | simple-producer] DEBUG org.apache.kafka.clients.NetworkClient - [Producer clientId=simple-producer] Initiating API versions fetch from node -1.
22:07:13.422 [kafka-producer-network-thread | simple-producer] DEBUG org.apache.kafka.clients.NetworkClient - [Producer clientId=simple-producer] Sending API_VERSIONS request with header RequestHeader(apiKey=API_VERSIONS, apiVersion=3, clientId=simple-producer, correlationId=20) and timeout 30000 to node -1: {client_software_name=apache-kafka-java,client_software_version=2.7.0,_tagged_fields={}}
22:07:13.423 [kafka-producer-network-thread | simple-producer] DEBUG org.apache.kafka.common.network.Selector - [Producer clientId=simple-producer] Connection with localhost/127.0.0.1 disconnected
java.io.EOFException: null
at org.apache.kafka.common.network.NetworkReceive.readFrom(NetworkReceive.java:97)
at org.apache.kafka.common.network.KafkaChannel.receive(KafkaChannel.java:447)
at org.apache.kafka.common.network.KafkaChannel.read(KafkaChannel.java:397)
at org.apache.kafka.common.network.Selector.attemptRead(Selector.java:674)
at org.apache.kafka.common.network.Selector.pollSelectionKeys(Selector.java:576)
at org.apache.kafka.common.network.Selector.poll(Selector.java:481)
at org.apache.kafka.clients.NetworkClient.poll(NetworkClient.java:561)
at org.apache.kafka.clients.producer.internals.Sender.runOnce(Sender.java:325)
at org.apache.kafka.clients.producer.internals.Sender.run(Sender.java:240)
at java.base/java.lang.Thread.run(Thread.java:829)
22:07:13.424 [kafka-producer-network-thread | simple-producer] DEBUG org.apache.kafka.clients.NetworkClient - [Producer clientId=simple-producer] Node -1 disconnected.
22:07:13.424 [kafka-producer-network-thread | simple-producer] WARN org.apache.kafka.clients.NetworkClient - [Producer clientId=simple-producer] Bootstrap broker localhost:9092 (id: -1 rack: null) disconnected
Why this happens? What is the cause of this error? And how can make it work?
EDIT: added kafka container logs
Kafka container last logs below and no new logs added when I try to connect producer:
[2021-09-09 21:07:28,227] INFO Kafka version: 2.7.0 (org.apache.kafka.common.utils.AppInfoParser)
[2021-09-09 21:07:28,230] INFO Kafka commitId: 448719dc99a19793 (org.apache.kafka.common.utils.AppInfoParser)
[2021-09-09 21:07:28,231] INFO Kafka startTimeMs: 1631221648211 (org.apache.kafka.common.utils.AppInfoParser)
[2021-09-09 21:07:28,238] INFO [KafkaServer id=1] started (kafka.server.KafkaServer)
[2021-09-09 21:07:28,365] INFO [broker-1-to-controller-send-thread]: Recorded new controller, from now on will use broker 1 (kafka.server.BrokerToControllerRequestThread)
I need to expose JMX ports of Kafka and Zookeeper container to connect a monitoring tool such as Lenses or Grafana. The monitoring tool would be installed on a different server.
I made a docker-compose file to apply the container's configurations but it throws some error.
Here is the Docker-compose YAML file.
Any help is appreciated.
version: '3.2'
services:
zookeeper:
container_name: zookeeper
image: wurstmeister/zookeeper:latest
environment:
ZOOKEEPER_CLIENT_PORT: 2181
KAFKA_JMX_OPTS: >-
-Dcom.sun.management.jmxremote=true
-Dcom.sun.management.jmxremote.authenticate=false
-Dcom.sun.management.jmxremote.ssl=false
-Djava.rmi.server.hostname=156.17.42.120
-Dcom.sun.management.jmxremote.port=11992
-Dcom.sun.management.jmxremote.rmi.port=11992
JMX_PORT: 11992
ports:
- "2181:2181"
- "11992:11992"
# https://hub.docker.com/r/confluentinc/cp-kafka/
kafka:
container_name: kafka
image: wurstmeister/kafka:latest
environment:
## the >- used below infers a value which is a string and properly
## ignore the multiple lines resulting in one long string:
## https://yaml.org/spec/1.2/spec.html
KAFKA_LOG_RETENTION_MS: 10000
KAFKA_LOG_RETENTION_CHECK_INTERVAL_MS: 5000
KAFKA_ADVERTISED_LISTENERS: >-
LISTENER_DOCKER_INTERNAL://kafka:19092,
LISTENER_DOCKER_EXTERNAL://156.17.42.120:9092
JMX_PORT: 11991
KAFKA_JMX_OPTS: >-
-Djava.rmi.server.hostname=156.17.42.120
-Dcom.sun.management.jmxremote.port=11991
-Dcom.sun.management.jmxremote.rmi.port=11991
-Dcom.sun.management.jmxremote=true
-Dcom.sun.management.jmxremote.authenticate=false
-Dcom.sun.management.jmxremote.ssl=false
KAFKA_LISTENERS: >-
LISTENER_DOCKER_INTERNAL://kafka:19091,
LISTENER_DOCKER_EXTERNAL://156.17.42.120:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: >-
LISTENER_DOCKER_INTERNAL:PLAINTEXT,
LISTENER_DOCKER_EXTERNAL:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: LISTENER_DOCKER_INTERNAL
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_LOG4J_LOGGERS: >-
kafka.controller=INFO,
kafka.producer.async.DefaultEventHandler=INFO,
state.change.logger=INFO
ports:
- 9092:9092
- 11991:11991
depends_on:
- zookeeper
volumes:
- /var/run/docker.sock:/var/run/docker.sock
Docker-compose Log;
[33mzookeeper |[0m Using config: /opt/zookeeper-3.4.13/bin/../conf/zoo.cfg
[36mkafka |[0m delete.topic.enable = true
[33mzookeeper |[0m 2020-02-04 12:03:59,394 [myid:] - INFO [main:QuorumPeerConfig#136] - Reading configuration from: /opt/zookeeper-3.4.13/bin/../conf/zoo.cfg
[36mkafka |[0m fetch.purgatory.purge.interval.requests = 1000
[33mzookeeper |[0m 2020-02-04 12:03:59,398 [myid:] - INFO [main:DatadirCleanupManager#78] - autopurge.snapRetainCount set to 3
[36mkafka |[0m group.initial.rebalance.delay.ms = 0
[33mzookeeper |[0m 2020-02-04 12:03:59,398 [myid:] - INFO [main:DatadirCleanupManager#79] - autopurge.purgeInterval set to 1
[36mkafka |[0m group.max.session.timeout.ms = 1800000
[33mzookeeper |[0m 2020-02-04 12:03:59,399 [myid:] - WARN [main:QuorumPeerMain#116] - Either no config or no quorum defined in config, running in standalone mode
[36mkafka |[0m group.max.size = 2147483647
[33mzookeeper |[0m 2020-02-04 12:03:59,399 [myid:] - INFO [PurgeTask:DatadirCleanupManager$PurgeTask#138] - Purge task started.
[36mkafka |[0m group.min.session.timeout.ms = 6000
[33mzookeeper |[0m 2020-02-04 12:03:59,406 [myid:] - INFO [PurgeTask:DatadirCleanupManager$PurgeTask#144] - Purge task completed.
[36mkafka |[0m host.name =
[33mzookeeper |[0m 2020-02-04 12:03:59,408 [myid:] - INFO [main:QuorumPeerConfig#136] - Reading configuration from: /opt/zookeeper-3.4.13/bin/../conf/zoo.cfg
[36mkafka |[0m inter.broker.listener.name = LISTENER_DOCKER_INTERNAL
[33mzookeeper |[0m 2020-02-04 12:03:59,409 [myid:] - INFO [main:ZooKeeperServerMain#98] - Starting server
[36mkafka |[0m inter.broker.protocol.version = 2.4-IV1
[33mzookeeper |[0m 2020-02-04 12:03:59,414 [myid:] - INFO [main:Environment#100] - Server environment:zookeeper.version=3.4.13-2d71af4dbe22557fda74f9a9b4309b15a7487f03, built on 06/29/2018 04:05 GMT
[36mkafka |[0m kafka.metrics.polling.interval.secs = 10
[33mzookeeper |[0m 2020-02-04 12:03:59,415 [myid:] - INFO [main:Environment#100] - Server environment:host.name=185df4f257aa
[36mkafka |[0m kafka.metrics.reporters = []
[33mzookeeper |[0m 2020-02-04 12:03:59,415 [myid:] - INFO [main:Environment#100] - Server environment:java.version=1.7.0_65
[36mkafka |[0m leader.imbalance.check.interval.seconds = 300
[33mzookeeper |[0m 2020-02-04 12:03:59,415 [myid:] - INFO [main:Environment#100] - Server environment:java.vendor=Oracle Corporation
[36mkafka |[0m leader.imbalance.per.broker.percentage = 10
[33mzookeeper |[0m 2020-02-04 12:03:59,415 [myid:] - INFO [main:Environment#100] - Server environment:java.home=/usr/lib/jvm/java-7-openjdk-amd64/jre
[36mkafka |[0m listener.security.protocol.map = LISTENER_DOCKER_INTERNAL:PLAINTEXT, LISTENER_DOCKER_EXTERNAL:PLAINTEXT
[33mzookeeper |[0m 2020-02-04 12:03:59,415 [myid:] - INFO [main:Environment#100] - Server environment:java.class.path=/opt/zookeeper-3.4.13/bin/../build/classes:/opt/zookeeper-3.4.13/bin/../build/lib/*.jar:/opt/zookeeper-3.4.13/bin/../lib/slf4j-log4j12-1.7.25.jar:/opt/zookeeper-3.4.13/bin/../lib/slf4j-api-1.7.25.jar:/opt/zookeeper-3.4.13/bin/../lib/netty-3.10.6.Final.jar:/opt/zookeeper-3.4.13/bin/../lib/log4j-1.2.17.jar:/opt/zookeeper-3.4.13/bin/../lib/jline-0.9.94.jar:/opt/zookeeper-3.4.13/bin/../lib/audience-annotations-0.5.0.jar:/opt/zookeeper-3.4.13/bin/../zookeeper-3.4.13.jar:/opt/zookeeper-3.4.13/bin/../src/java/lib/*.jar:/opt/zookeeper-3.4.13/bin/../conf:
[36mkafka |[0m listeners = LISTENER_DOCKER_INTERNAL://kafka:19091, LISTENER_DOCKER_EXTERNAL://156.17.42.120:9092
[33mzookeeper |[0m 2020-02-04 12:03:59,415 [myid:] - INFO [main:Environment#100] - Server environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib/x86_64-linux-gnu/jni:/lib/x86_64-linux-gnu:/usr/lib/x86_64-linux-gnu:/usr/lib/jni:/lib:/usr/lib
[36mkafka |[0m log.cleaner.backoff.ms = 15000
[33mzookeeper |[0m 2020-02-04 12:03:59,415 [myid:] - INFO [main:Environment#100] - Server environment:java.io.tmpdir=/tmp
[36mkafka |[0m log.cleaner.dedupe.buffer.size = 134217728
[33mzookeeper |[0m 2020-02-04 12:03:59,417 [myid:] - INFO [main:Environment#100] - Server environment:java.compiler=<NA>
[36mkafka |[0m log.cleaner.delete.retention.ms = 86400000
[33mzookeeper |[0m 2020-02-04 12:03:59,418 [myid:] - INFO [main:Environment#100] - Server environment:os.name=Linux
[36mkafka |[0m log.cleaner.enable = true
[33mzookeeper |[0m 2020-02-04 12:03:59,418 [myid:] - INFO [main:Environment#100] - Server environment:os.arch=amd64
[36mkafka |[0m log.cleaner.io.buffer.load.factor = 0.9
[33mzookeeper |[0m 2020-02-04 12:03:59,418 [myid:] - INFO [main:Environment#100] - Server environment:os.version=4.18.0-147.3.1.el8_1.x86_64
[36mkafka |[0m log.cleaner.io.buffer.size = 524288
[33mzookeeper |[0m 2020-02-04 12:03:59,418 [myid:] - INFO [main:Environment#100] - Server environment:user.name=root
[36mkafka |[0m log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
[33mzookeeper |[0m 2020-02-04 12:03:59,418 [myid:] - INFO [main:Environment#100] - Server environment:user.home=/root
[36mkafka |[0m log.cleaner.max.compaction.lag.ms = 9223372036854775807
[33mzookeeper |[0m 2020-02-04 12:03:59,418 [myid:] - INFO [main:Environment#100] - Server environment:user.dir=/opt/zookeeper-3.4.13
[36mkafka |[0m log.cleaner.min.cleanable.ratio = 0.5
[33mzookeeper |[0m 2020-02-04 12:03:59,421 [myid:] - INFO [main:ZooKeeperServer#836] - tickTime set to 2000
[36mkafka |[0m log.cleaner.min.compaction.lag.ms = 0
[33mzookeeper |[0m 2020-02-04 12:03:59,421 [myid:] - INFO [main:ZooKeeperServer#845] - minSessionTimeout set to -1
[36mkafka |[0m log.cleaner.threads = 1
[33mzookeeper |[0m 2020-02-04 12:03:59,422 [myid:] - INFO [main:ZooKeeperServer#854] - maxSessionTimeout set to -1
[36mkafka |[0m log.cleanup.policy = [delete]
[33mzookeeper |[0m 2020-02-04 12:03:59,430 [myid:] - INFO [main:ServerCnxnFactory#117] - Using org.apache.zookeeper.server.NIOServerCnxnFactory as server connection factory
[36mkafka |[0m log.dir = /tmp/kafka-logs
[33mzookeeper |[0m 2020-02-04 12:03:59,435 [myid:] - INFO [main:NIOServerCnxnFactory#89] - binding to port 0.0.0.0/0.0.0.0:2181
[36mkafka |[0m log.dirs = /kafka/kafka-logs-acff9186a4ad
[33mzookeeper |[0m 2020-02-04 12:04:02,603 [myid:] - INFO [NIOServerCxn.Factory:0.0.0.0/0.0.0.0:2181:NIOServerCnxnFactory#215] - Accepted socket connection from /172.18.0.3:43840
[36mkafka |[0m log.flush.interval.messages = 9223372036854775807
[33mzookeeper |[0m 2020-02-04 12:04:02,608 [myid:] - INFO [NIOServerCxn.Factory:0.0.0.0/0.0.0.0:2181:ZooKeeperServer#949] - Client attempting to establish new session at /172.18.0.3:43840
[36mkafka |[0m log.flush.interval.ms = null
[33mzookeeper |[0m 2020-02-04 12:04:02,610 [myid:] - INFO [SyncThread:0:FileTxnLog#213] - Creating new log file: log.1
[36mkafka |[0m log.flush.offset.checkpoint.interval.ms = 60000
[33mzookeeper |[0m 2020-02-04 12:04:02,661 [myid:] - INFO [SyncThread:0:ZooKeeperServer#694] - Established session 0x1001e9c10d90000 with negotiated timeout 6000 for client /172.18.0.3:43840
[33mzookeeper |[0m 2020-02-04 12:04:02,815 [myid:] - INFO [ProcessThread(sid:0 cport:2181)::PrepRequestProcessor#653] - Got user-level KeeperException when processing sessionid:0x1001e9c10d90000 type:create cxid:0x2 zxid:0x3 txntype:-1 reqpath:n/a Error Path:/brokers Error:KeeperErrorCode = NoNode for /brokers
[36mkafka |[0m log.flush.scheduler.interval.ms = 9223372036854775807
[33mzookeeper |[0m 2020-02-04 12:04:02,870 [myid:] - INFO [ProcessThread(sid:0 cport:2181)::PrepRequestProcessor#653] - Got user-level KeeperException when processing sessionid:0x1001e9c10d90000 type:create cxid:0x6 zxid:0x7 txntype:-1 reqpath:n/a Error Path:/config Error:KeeperErrorCode = NoNode for /config
[36mkafka |[0m log.flush.start.offset.checkpoint.interval.ms = 60000
[33mzookeeper |[0m 2020-02-04 12:04:02,908 [myid:] - INFO [ProcessThread(sid:0 cport:2181)::PrepRequestProcessor#653] - Got user-level KeeperException when processing sessionid:0x1001e9c10d90000 type:create cxid:0x9 zxid:0xa txntype:-1 reqpath:n/a Error Path:/admin Error:KeeperErrorCode = NoNode for /admin
[36mkafka |[0m log.index.interval.bytes = 4096
[33mzookeeper |[0m 2020-02-04 12:04:03,223 [myid:] - INFO [ProcessThread(sid:0 cport:2181)::PrepRequestProcessor#653] - Got user-level KeeperException when processing sessionid:0x1001e9c10d90000 type:create cxid:0x15 zxid:0x15 txntype:-1 reqpath:n/a Error Path:/cluster Error:KeeperErrorCode = NoNode for /cluster
[36mkafka |[0m log.index.size.max.bytes = 10485760
[33mzookeeper |[0m 2020-02-04 12:04:04,030 [myid:] - INFO [ProcessThread(sid:0 cport:2181)::PrepRequestProcessor#487] - Processed session termination for sessionid: 0x1001e9c10d90000
[33mzookeeper |[0m 2020-02-04 12:04:04,045 [myid:] - INFO [NIOServerCxn.Factory:0.0.0.0/0.0.0.0:2181:NIOServerCnxn#1056] - Closed socket connection for client /172.18.0.3:43840 which had sessionid 0x1001e9c10d90000
[36mkafka |[0m log.message.downconversion.enable = true
[33mzookeeper |[0m 2020-02-04 13:03:59,399 [myid:] - INFO [PurgeTask:DatadirCleanupManager$PurgeTask#138] - Purge task started.
[36mkafka |[0m log.message.format.version = 2.4-IV1
[33mzookeeper |[0m 2020-02-04 13:03:59,400 [myid:] - INFO [PurgeTask:DatadirCleanupManager$PurgeTask#144] - Purge task completed.
[36mkafka |[0m log.message.timestamp.difference.max.ms = 9223372036854775807
[36mkafka |[0m log.message.timestamp.type = CreateTime
[36mkafka |[0m log.preallocate = false
[36mkafka |[0m log.retention.bytes = -1
[36mkafka |[0m log.retention.check.interval.ms = 5000
[36mkafka |[0m log.retention.hours = 168
[36mkafka |[0m log.retention.minutes = null
[36mkafka |[0m log.retention.ms = 10000
[36mkafka |[0m log.roll.hours = 168
[36mkafka |[0m log.roll.jitter.hours = 0
[36mkafka |[0m log.roll.jitter.ms = null
[36mkafka |[0m log.roll.ms = null
[36mkafka |[0m log.segment.bytes = 1073741824
[36mkafka |[0m log.segment.delete.delay.ms = 60000
[36mkafka |[0m max.connections = 2147483647
[36mkafka |[0m max.connections.per.ip = 2147483647
[36mkafka |[0m max.connections.per.ip.overrides =
[36mkafka |[0m max.incremental.fetch.session.cache.slots = 1000
[36mkafka |[0m message.max.bytes = 1000012
[36mkafka |[0m metric.reporters = []
[36mkafka |[0m metrics.num.samples = 2
[36mkafka |[0m metrics.recording.level = INFO
[36mkafka |[0m metrics.sample.window.ms = 30000
[36mkafka |[0m min.insync.replicas = 1
[36mkafka |[0m num.io.threads = 8
[36mkafka |[0m num.network.threads = 3
[36mkafka |[0m num.partitions = 1
[36mkafka |[0m num.recovery.threads.per.data.dir = 1
[36mkafka |[0m num.replica.alter.log.dirs.threads = null
[36mkafka |[0m num.replica.fetchers = 1
[36mkafka |[0m offset.metadata.max.bytes = 4096
[36mkafka |[0m offsets.commit.required.acks = -1
[36mkafka |[0m offsets.commit.timeout.ms = 5000
[36mkafka |[0m offsets.load.buffer.size = 5242880
[36mkafka |[0m offsets.retention.check.interval.ms = 600000
[36mkafka |[0m offsets.retention.minutes = 10080
[36mkafka |[0m offsets.topic.compression.codec = 0
[36mkafka |[0m offsets.topic.num.partitions = 50
[36mkafka |[0m offsets.topic.replication.factor = 1
[36mkafka |[0m offsets.topic.segment.bytes = 104857600
[36mkafka |[0m password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding
[36mkafka |[0m password.encoder.iterations = 4096
[36mkafka |[0m password.encoder.key.length = 128
[36mkafka |[0m password.encoder.keyfactory.algorithm = null
[36mkafka |[0m password.encoder.old.secret = null
[36mkafka |[0m password.encoder.secret = null
[36mkafka |[0m port = 9092
[36mkafka |[0m principal.builder.class = null
[36mkafka |[0m producer.purgatory.purge.interval.requests = 1000
[36mkafka |[0m queued.max.request.bytes = -1
[36mkafka |[0m queued.max.requests = 500
[36mkafka |[0m quota.consumer.default = 9223372036854775807
[36mkafka |[0m quota.producer.default = 9223372036854775807
[36mkafka |[0m quota.window.num = 11
[36mkafka |[0m quota.window.size.seconds = 1
[36mkafka |[0m replica.fetch.backoff.ms = 1000
[36mkafka |[0m replica.fetch.max.bytes = 1048576
[36mkafka |[0m replica.fetch.min.bytes = 1
[36mkafka |[0m replica.fetch.response.max.bytes = 10485760
[36mkafka |[0m replica.fetch.wait.max.ms = 500
[36mkafka |[0m replica.high.watermark.checkpoint.interval.ms = 5000
[36mkafka |[0m replica.lag.time.max.ms = 10000
[36mkafka |[0m replica.selector.class = null
[36mkafka |[0m replica.socket.receive.buffer.bytes = 65536
[36mkafka |[0m replica.socket.timeout.ms = 30000
[36mkafka |[0m replication.quota.window.num = 11
[36mkafka |[0m replication.quota.window.size.seconds = 1
[36mkafka |[0m request.timeout.ms = 30000
[36mkafka |[0m reserved.broker.max.id = 1000
[36mkafka |[0m sasl.client.callback.handler.class = null
[36mkafka |[0m sasl.enabled.mechanisms = [GSSAPI]
[36mkafka |[0m sasl.jaas.config = null
[36mkafka |[0m sasl.kerberos.kinit.cmd = /usr/bin/kinit
[36mkafka |[0m sasl.kerberos.min.time.before.relogin = 60000
[36mkafka |[0m sasl.kerberos.principal.to.local.rules = [DEFAULT]
[36mkafka |[0m sasl.kerberos.service.name = null
[36mkafka |[0m sasl.kerberos.ticket.renew.jitter = 0.05
[36mkafka |[0m sasl.kerberos.ticket.renew.window.factor = 0.8
[36mkafka |[0m sasl.login.callback.handler.class = null
[36mkafka |[0m sasl.login.class = null
[36mkafka |[0m sasl.login.refresh.buffer.seconds = 300
[36mkafka |[0m sasl.login.refresh.min.period.seconds = 60
[36mkafka |[0m sasl.login.refresh.window.factor = 0.8
[36mkafka |[0m sasl.login.refresh.window.jitter = 0.05
[36mkafka |[0m sasl.mechanism.inter.broker.protocol = GSSAPI
[36mkafka |[0m sasl.server.callback.handler.class = null
[36mkafka |[0m security.inter.broker.protocol = PLAINTEXT
[36mkafka |[0m security.providers = null
[36mkafka |[0m socket.receive.buffer.bytes = 102400
[36mkafka |[0m socket.request.max.bytes = 104857600
[36mkafka |[0m socket.send.buffer.bytes = 102400
[36mkafka |[0m ssl.cipher.suites = []
[36mkafka |[0m ssl.client.auth = none
[36mkafka |[0m ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
[36mkafka |[0m ssl.endpoint.identification.algorithm = https
[36mkafka |[0m ssl.key.password = null
[36mkafka |[0m ssl.keymanager.algorithm = SunX509
[36mkafka |[0m ssl.keystore.location = null
[36mkafka |[0m ssl.keystore.password = null
[36mkafka |[0m ssl.keystore.type = JKS
[36mkafka |[0m ssl.principal.mapping.rules = DEFAULT
[36mkafka |[0m ssl.protocol = TLS
[36mkafka |[0m ssl.provider = null
[36mkafka |[0m ssl.secure.random.implementation = null
[36mkafka |[0m ssl.trustmanager.algorithm = PKIX
[36mkafka |[0m ssl.truststore.location = null
[36mkafka |[0m ssl.truststore.password = null
[36mkafka |[0m ssl.truststore.type = JKS
[36mkafka |[0m transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000
[36mkafka |[0m transaction.max.timeout.ms = 900000
[36mkafka |[0m transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
[36mkafka |[0m transaction.state.log.load.buffer.size = 5242880
[36mkafka |[0m transaction.state.log.min.isr = 1
[36mkafka |[0m transaction.state.log.num.partitions = 50
[36mkafka |[0m transaction.state.log.replication.factor = 1
[36mkafka |[0m transaction.state.log.segment.bytes = 104857600
[36mkafka |[0m transactional.id.expiration.ms = 604800000
[36mkafka |[0m unclean.leader.election.enable = false
[36mkafka |[0m zookeeper.connect = zookeeper:2181
[36mkafka |[0m zookeeper.connection.timeout.ms = 6000
[36mkafka |[0m zookeeper.max.in.flight.requests = 10
[36mkafka |[0m zookeeper.session.timeout.ms = 6000
[36mkafka |[0m zookeeper.set.acl = false
[36mkafka |[0m zookeeper.sync.time.ms = 2000
[36mkafka |[0m (kafka.server.KafkaConfig)
[36mkafka |[0m [2020-02-04 12:04:03,415] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[36mkafka |[0m [2020-02-04 12:04:03,416] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[36mkafka |[0m [2020-02-04 12:04:03,417] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[36mkafka |[0m [2020-02-04 12:04:03,440] INFO Log directory /kafka/kafka-logs-acff9186a4ad not found, creating it. (kafka.log.LogManager)
[36mkafka |[0m [2020-02-04 12:04:03,448] INFO Loading logs. (kafka.log.LogManager)
[36mkafka |[0m [2020-02-04 12:04:03,455] INFO Logs loading complete in 7 ms. (kafka.log.LogManager)
[36mkafka |[0m [2020-02-04 12:04:03,469] INFO Starting log cleanup with a period of 5000 ms. (kafka.log.LogManager)
[36mkafka |[0m [2020-02-04 12:04:03,477] INFO Starting log flusher with a default period of 9223372036854775807 ms. (kafka.log.LogManager)
[36mkafka |[0m [2020-02-04 12:04:03,945] INFO Awaiting socket connections on kafka:19091. (kafka.network.Acceptor)
[36mkafka |[0m [2020-02-04 12:04:03,979] INFO [SocketServer brokerId=1001] Created data-plane acceptor and processors for endpoint : EndPoint(kafka,19091,ListenerName(LISTENER_DOCKER_INTERNAL),PLAINTEXT) (kafka.network.SocketServer)
[36mkafka |[0m [2020-02-04 12:04:03,983] ERROR [KafkaServer id=1001] Fatal error during KafkaServer startup. Prepare to shutdown (kafka.server.KafkaServer)
[36mkafka |[0m org.apache.kafka.common.KafkaException: Socket server failed to bind to 156.17.42.120:9092: Address not available.
[36mkafka |[0m at kafka.network.Acceptor.openServerSocket(SocketServer.scala:632)
[36mkafka |[0m at kafka.network.Acceptor.<init>(SocketServer.scala:508)
[36mkafka |[0m at kafka.network.SocketServer.createAcceptor(SocketServer.scala:271)
[36mkafka |[0m at kafka.network.SocketServer.$anonfun$createDataPlaneAcceptorsAndProcessors$1(SocketServer.scala:240)
[36mkafka |[0m at kafka.network.SocketServer.$anonfun$createDataPlaneAcceptorsAndProcessors$1$adapted(SocketServer.scala:238)
[36mkafka |[0m at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
[36mkafka |[0m at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
[36mkafka |[0m at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
[36mkafka |[0m at kafka.network.SocketServer.createDataPlaneAcceptorsAndProcessors(SocketServer.scala:238)
[36mkafka |[0m at kafka.network.SocketServer.startup(SocketServer.scala:121)
[36mkafka |[0m at kafka.server.KafkaServer.startup(KafkaServer.scala:263)
[36mkafka |[0m at kafka.server.KafkaServerStartable.startup(KafkaServerStartable.scala:44)
[36mkafka |[0m at kafka.Kafka$.main(Kafka.scala:84)
[36mkafka |[0m at kafka.Kafka.main(Kafka.scala)
[36mkafka |[0m Caused by: java.net.BindException: Address not available
[36mkafka |[0m at sun.nio.ch.Net.bind0(Native Method)
[36mkafka |[0m at sun.nio.ch.Net.bind(Net.java:433)
[36mkafka |[0m at sun.nio.ch.Net.bind(Net.java:425)
[36mkafka |[0m at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:223)
[36mkafka |[0m at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
[36mkafka |[0m at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:67)
[36mkafka |[0m at kafka.network.Acceptor.openServerSocket(SocketServer.scala:628)
[36mkafka |[0m ... 13 more
[36mkafka |[0m [2020-02-04 12:04:03,986] INFO [KafkaServer id=1001] shutting down (kafka.server.KafkaServer)
[36mkafka |[0m [2020-02-04 12:04:03,987] INFO [SocketServer brokerId=1001] Stopping socket server request processors (kafka.network.SocketServer)
[36mkafka |[0m [2020-02-04 12:04:03,993] INFO [SocketServer brokerId=1001] Stopped socket server request processors (kafka.network.SocketServer)
[36mkafka |[0m [2020-02-04 12:04:03,996] INFO Shutting down. (kafka.log.LogManager)
[36mkafka |[0m [2020-02-04 12:04:04,028] INFO Shutdown complete. (kafka.log.LogManager)
[36mkafka |[0m [2020-02-04 12:04:04,029] INFO [ZooKeeperClient Kafka server] Closing. (kafka.zookeeper.ZooKeeperClient)
[36mkafka |[0m [2020-02-04 12:04:04,146] INFO Session: 0x1001e9c10d90000 closed (org.apache.zookeeper.ZooKeeper)
[36mkafka |[0m [2020-02-04 12:04:04,146] INFO EventThread shut down for session: 0x1001e9c10d90000 (org.apache.zookeeper.ClientCnxn)
[36mkafka |[0m [2020-02-04 12:04:04,148] INFO [ZooKeeperClient Kafka server] Closed. (kafka.zookeeper.ZooKeeperClient)
[36mkafka |[0m [2020-02-04 12:04:04,148] INFO [ThrottledChannelReaper-Fetch]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[36mkafka |[0m [2020-02-04 12:04:04,416] INFO [ThrottledChannelReaper-Fetch]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[36mkafka |[0m [2020-02-04 12:04:04,416] INFO [ThrottledChannelReaper-Fetch]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[36mkafka |[0m [2020-02-04 12:04:04,416] INFO [ThrottledChannelReaper-Produce]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[36mkafka |[0m [2020-02-04 12:04:04,416] INFO [ThrottledChannelReaper-Produce]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[36mkafka |[0m [2020-02-04 12:04:04,416] INFO [ThrottledChannelReaper-Produce]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[36mkafka |[0m [2020-02-04 12:04:04,416] INFO [ThrottledChannelReaper-Request]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[36mkafka |[0m [2020-02-04 12:04:04,417] INFO [ThrottledChannelReaper-Request]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[36mkafka |[0m [2020-02-04 12:04:04,417] INFO [ThrottledChannelReaper-Request]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[36mkafka |[0m [2020-02-04 12:04:04,418] INFO [SocketServer brokerId=1001] Shutting down socket server (kafka.network.SocketServer)
[36mkafka |[0m [2020-02-04 12:04:04,457] INFO [SocketServer brokerId=1001] Shutdown completed (kafka.network.SocketServer)
[36mkafka |[0m [2020-02-04 12:04:04,463] INFO [KafkaServer id=1001] shut down completed (kafka.server.KafkaServer)
[36mkafka |[0m [2020-02-04 12:04:04,464] ERROR Exiting Kafka. (kafka.server.KafkaServerStartable)
[36mkafka |[0m [2020-02-04 12:04:04,471] INFO [KafkaServer id=1001] shutting down (kafka.server.KafkaServer)