Created
July 8, 2020 17:59
-
-
Save joshuajnoble/c9f703d512dcabbb5b59191d68c941c8 to your computer and use it in GitHub Desktop.
output
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
zookeeper_1 | [2020-07-08 17:56:34,373] INFO Server environment:java.home=/usr/lib/jvm/zulu-8-amd64/jre (org.apache.zookeeper.server.ZooKeeperServer) | |
zookeeper_1 | [2020-07-08 17:56:34,374] INFO Server environment:java.class.path=/usr/bin/../share/java/kafka/httpmime-4.5.11.jar:/usr/bin/../share/java/kafka/jakarta.activation-api-1.2.1.jar:/usr/bin/../share/java/kafka/maven-artifact-3.6.3.jar:/usr/bin/../share/java/kafka/netty-transport-4.1.45.Final.jar:/usr/bin/../share/java/kafka/hk2-locator-2.5.0.jar:/usr/bin/../share/java/kafka/javax.servlet-api-3.1.0.jar:/usr/bin/../share/java/kafka/scala-collection-compat_2.12-2.1.3.jar:/usr/bin/../share/java/kafka/metrics-core-2.2.0.jar:/usr/bin/../share/java/kafka/paranamer-2.8.jar:/usr/bin/../share/java/kafka/kafka_2.12-5.5.0-ccs-scaladoc.jar:/usr/bin/../share/java/kafka/netty-resolver-4.1.45.Final.jar:/usr/bin/../share/java/kafka/kafka_2.12-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/jetty-servlet-9.4.24.v20191120.jar:/usr/bin/../share/java/kafka/javassist-3.22.0-CR2.jar:/usr/bin/../share/java/kafka/jakarta.ws.rs-api-2.1.5.jar:/usr/bin/../share/java/kafka/httpclient-4.5.11.jar:/usr/bin/../share/java/kafka/jetty-io-9.4.24.v20191120.jar:/usr/bin/../share/java/kafka/jersey-common-2.28.jar:/usr/bin/../share/java/kafka/netty-common-4.1.45.Final.jar:/usr/bin/../share/java/kafka/netty-transport-native-epoll-4.1.45.Final.jar:/usr/bin/../share/java/kafka/lz4-java-1.7.1.jar:/usr/bin/../share/java/kafka/hk2-utils-2.5.0.jar:/usr/bin/../share/java/kafka/jakarta.inject-2.5.0.jar:/usr/bin/../share/java/kafka/jetty-server-9.4.24.v20191120.jar:/usr/bin/../share/java/kafka/jackson-module-jaxb-annotations-2.10.2.jar:/usr/bin/../share/java/kafka/kafka-log4j-appender-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/commons-cli-1.4.jar:/usr/bin/../share/java/kafka/reflections-0.9.12.jar:/usr/bin/../share/java/kafka/jetty-security-9.4.24.v20191120.jar:/usr/bin/../share/java/kafka/avro-1.9.2.jar:/usr/bin/../share/java/kafka/jersey-client-2.28.jar:/usr/bin/../share/java/kafka/commons-codec-1.11.jar:/usr/bin/../share/java/kafka/commons-logging-1.2.jar:/usr/bin/../share/java/kafka/connect-mirror-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/jersey-container-servlet-2.28.jar:/usr/bin/../share/java/kafka/kafka.jar:/usr/bin/../share/java/kafka/commons-lang3-3.8.1.jar:/usr/bin/../share/java/kafka/jersey-server-2.28.jar:/usr/bin/../share/java/kafka/zookeeper-3.5.7.jar:/usr/bin/../share/java/kafka/netty-codec-4.1.45.Final.jar:/usr/bin/../share/java/kafka/osgi-resource-locator-1.0.1.jar:/usr/bin/../share/java/kafka/jackson-dataformat-csv-2.10.2.jar:/usr/bin/../share/java/kafka/jetty-continuation-9.4.24.v20191120.jar:/usr/bin/../share/java/kafka/connect-file-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/netty-transport-native-unix-common-4.1.45.Final.jar:/usr/bin/../share/java/kafka/jakarta.annotation-api-1.3.4.jar:/usr/bin/../share/java/kafka/support-metrics-client-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/kafka_2.12-5.5.0-ccs-javadoc.jar:/usr/bin/../share/java/kafka/netty-buffer-4.1.45.Final.jar:/usr/bin/../share/java/kafka/jetty-util-9.4.24.v20191120.jar:/usr/bin/../share/java/kafka/log4j-1.2.17.jar:/usr/bin/../share/java/kafka/argparse4j-0.7.0.jar:/usr/bin/../share/java/kafka/support-metrics-common-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/jersey-hk2-2.28.jar:/usr/bin/../share/java/kafka/plexus-utils-3.2.1.jar:/usr/bin/../share/java/kafka/kafka-clients-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/scala-java8-compat_2.12-0.9.0.jar:/usr/bin/../share/java/kafka/connect-transforms-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/connect-api-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/kafka-streams-examples-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/jackson-module-paranamer-2.10.2.jar:/usr/bin/../share/java/kafka/kafka_2.12-5.5.0-ccs-sources.jar:/usr/bin/../share/java/kafka/jackson-core-2.10.2.jar:/usr/bin/../share/java/kafka/snappy-java-1.1.7.3.jar:/usr/bin/../share/java/kafka/connect-runtime-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/slf4j-api-1.7.30.jar:/usr/bin/../share/java/kafka/jersey-container-servlet-core-2.28.jar:/usr/bin/../share/java/kafka/connect-mirror-client-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/jackson-datatype-jdk8-2.10.2.jar:/usr/bin/../share/java/kafka/kafka_2.12-5.5.0-ccs-test-sources.jar:/usr/bin/../share/java/kafka/httpcore-4.4.13.jar:/usr/bin/../share/java/kafka/commons-compress-1.19.jar:/usr/bin/../share/java/kafka/jersey-media-jaxb-2.28.jar:/usr/bin/../share/java/kafka/kafka-streams-scala_2.12-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/netty-handler-4.1.45.Final.jar:/usr/bin/../share/java/kafka/jetty-http-9.4.24.v20191120.jar:/usr/bin/../share/java/kafka/scala-reflect-2.12.10.jar:/usr/bin/../share/java/kafka/kafka_2.12-5.5.0-ccs-test.jar:/usr/bin/../share/java/kafka/aopalliance-repackaged-2.5.0.jar:/usr/bin/../share/java/kafka/scala-library-2.12.10.jar:/usr/bin/../share/java/kafka/jackson-module-scala_2.12-2.10.2.jar:/usr/bin/../share/java/kafka/javassist-3.26.0-GA.jar:/usr/bin/../share/java/kafka/jackson-jaxrs-json-provider-2.10.2.jar:/usr/bin/../share/java/kafka/validation-api-2.0.1.Final.jar:/usr/bin/../share/java/kafka/jetty-client-9.4.24.v20191120.jar:/usr/bin/../share/java/kafka/jaxb-api-2.3.0.jar:/usr/bin/../share/java/kafka/zookeeper-jute-3.5.7.jar:/usr/bin/../share/java/kafka/zstd-jni-1.4.4-7.jar:/usr/bin/../share/java/kafka/jackson-databind-2.10.2.jar:/usr/bin/../share/java/kafka/jackson-annotations-2.10.2.jar:/usr/bin/../share/java/kafka/jackson-jaxrs-base-2.10.2.jar:/usr/bin/../share/java/kafka/hk2-api-2.5.0.jar:/usr/bin/../share/java/kafka/kafka-tools-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/javax.ws.rs-api-2.1.1.jar:/usr/bin/../share/java/kafka/kafka-streams-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/activation-1.1.1.jar:/usr/bin/../share/java/kafka/jopt-simple-5.0.4.jar:/usr/bin/../share/java/kafka/rocksdbjni-5.18.3.jar:/usr/bin/../share/java/kafka/slf4j-log4j12-1.7.30.jar:/usr/bin/../share/java/kafka/audience-annotations-0.5.0.jar:/usr/bin/../share/java/kafka/connect-basic-auth-extension-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/kafka-streams-test-utils-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/jetty-servlets-9.4.24.v20191120.jar:/usr/bin/../share/java/kafka/scala-logging_2.12-3.9.2.jar:/usr/bin/../share/java/kafka/jakarta.xml.bind-api-2.3.2.jar:/usr/bin/../share/java/kafka/connect-json-5.5.0-ccs.jar:/usr/bin/../support-metrics-client/build/dependant-libs-2.12/*:/usr/bin/../support-metrics-client/build/libs/*:/usr/share/java/support-metrics-client/* (org.apache.zookeeper.server.ZooKeeperServer) | |
zookeeper_1 | [2020-07-08 17:56:34,374] INFO Server environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib (org.apache.zookeeper.server.ZooKeeperServer) | |
zookeeper_1 | [2020-07-08 17:56:34,375] INFO Server environment:java.io.tmpdir=/tmp (org.apache.zookeeper.server.ZooKeeperServer) | |
zookeeper_1 | [2020-07-08 17:56:34,375] INFO Server environment:java.compiler=<NA> (org.apache.zookeeper.server.ZooKeeperServer) | |
zookeeper_1 | [2020-07-08 17:56:34,375] INFO Server environment:os.name=Linux (org.apache.zookeeper.server.ZooKeeperServer) | |
zookeeper_1 | [2020-07-08 17:56:34,375] INFO Server environment:os.arch=amd64 (org.apache.zookeeper.server.ZooKeeperServer) | |
zookeeper_1 | [2020-07-08 17:56:34,376] INFO Server environment:os.version=4.9.0-12-amd64 (org.apache.zookeeper.server.ZooKeeperServer) | |
zookeeper_1 | [2020-07-08 17:56:34,376] INFO Server environment:user.name=root (org.apache.zookeeper.server.ZooKeeperServer) | |
zookeeper_1 | [2020-07-08 17:56:34,376] INFO Server environment:user.home=/root (org.apache.zookeeper.server.ZooKeeperServer) | |
zookeeper_1 | [2020-07-08 17:56:34,376] INFO Server environment:user.dir=/ (org.apache.zookeeper.server.ZooKeeperServer) | |
zookeeper_1 | [2020-07-08 17:56:34,376] INFO Server environment:os.memory.free=500MB (org.apache.zookeeper.server.ZooKeeperServer) | |
zookeeper_1 | [2020-07-08 17:56:34,377] INFO Server environment:os.memory.max=512MB (org.apache.zookeeper.server.ZooKeeperServer) | |
zookeeper_1 | [2020-07-08 17:56:34,377] INFO Server environment:os.memory.total=512MB (org.apache.zookeeper.server.ZooKeeperServer) | |
zookeeper_1 | [2020-07-08 17:56:34,378] INFO minSessionTimeout set to 4000 (org.apache.zookeeper.server.ZooKeeperServer) | |
zookeeper_1 | [2020-07-08 17:56:34,378] INFO maxSessionTimeout set to 40000 (org.apache.zookeeper.server.ZooKeeperServer) | |
zookeeper_1 | [2020-07-08 17:56:34,389] INFO Created server with tickTime 2000 minSessionTimeout 4000 maxSessionTimeout 40000 datadir /var/lib/zookeeper/log/version-2 snapdir /var/lib/zookeeper/data/version-2 (org.apache.zookeeper.server.ZooKeeperServer) | |
kafka_1 | ===> Check if Zookeeper is healthy ... | |
zookeeper_1 | [2020-07-08 17:56:34,531] INFO Logging initialized @2744ms to org.eclipse.jetty.util.log.Slf4jLog (org.eclipse.jetty.util.log) | |
zookeeper_1 | [2020-07-08 17:56:35,318] WARN o.e.j.s.ServletContextHandler@74650e52{/,null,UNAVAILABLE} contextPath ends with /* (org.eclipse.jetty.server.handler.ContextHandler) | |
zookeeper_1 | [2020-07-08 17:56:35,336] WARN Empty contextPath (org.eclipse.jetty.server.handler.ContextHandler) | |
zookeeper_1 | [2020-07-08 17:56:35,442] INFO jetty-9.4.24.v20191120; built: 2019-11-20T21:37:49.771Z; git: 363d5f2df3a8a28de40604320230664b9c793c16; jvm 1.8.0_212-b04 (org.eclipse.jetty.server.Server) | |
zookeeper_1 | [2020-07-08 17:56:35,727] INFO DefaultSessionIdManager workerName=node0 (org.eclipse.jetty.server.session) | |
zookeeper_1 | [2020-07-08 17:56:35,748] INFO No SessionScavenger set, using defaults (org.eclipse.jetty.server.session) | |
zookeeper_1 | [2020-07-08 17:56:35,751] INFO node0 Scavenging every 600000ms (org.eclipse.jetty.server.session) | |
zookeeper_1 | [2020-07-08 17:56:35,888] INFO Started o.e.j.s.ServletContextHandler@74650e52{/,null,AVAILABLE} (org.eclipse.jetty.server.handler.ContextHandler) | |
zookeeper_1 | [2020-07-08 17:56:35,973] INFO Started ServerConnector@31dc339b{HTTP/1.1,[http/1.1]}{0.0.0.0:8080} (org.eclipse.jetty.server.AbstractConnector) | |
zookeeper_1 | [2020-07-08 17:56:35,982] INFO Started @4193ms (org.eclipse.jetty.server.Server) | |
zookeeper_1 | [2020-07-08 17:56:35,982] INFO Started AdminServer on address 0.0.0.0, port 8080 and command URL /commands (org.apache.zookeeper.server.admin.JettyAdminServer) | |
zookeeper_1 | [2020-07-08 17:56:36,006] INFO Using org.apache.zookeeper.server.NIOServerCnxnFactory as server connection factory (org.apache.zookeeper.server.ServerCnxnFactory) | |
zookeeper_1 | [2020-07-08 17:56:36,013] INFO Configuring NIO connection handler with 10s sessionless connection timeout, 1 selector thread(s), 2 worker threads, and 64 kB direct buffers. (org.apache.zookeeper.server.NIOServerCnxnFactory) | |
zookeeper_1 | [2020-07-08 17:56:36,028] INFO binding to port 0.0.0.0/0.0.0.0:32181 (org.apache.zookeeper.server.NIOServerCnxnFactory) | |
zookeeper_1 | [2020-07-08 17:56:36,101] INFO zookeeper.snapshotSizeFactor = 0.33 (org.apache.zookeeper.server.ZKDatabase) | |
zookeeper_1 | [2020-07-08 17:56:36,113] INFO Reading snapshot /var/lib/zookeeper/data/version-2/snapshot.0 (org.apache.zookeeper.server.persistence.FileSnap) | |
zookeeper_1 | [2020-07-08 17:56:36,213] INFO Snapshotting: 0x25 to /var/lib/zookeeper/data/version-2/snapshot.25 (org.apache.zookeeper.server.persistence.FileTxnSnapLog) | |
zookeeper_1 | [2020-07-08 17:56:36,305] INFO Using checkIntervalMs=60000 maxPerMinute=10000 (org.apache.zookeeper.server.ContainerManager) | |
kafka_1 | [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:zookeeper.version=3.5.7-f0fdd52973d373ffd9c86b81d99842dc2c7f660e, built on 02/10/2020 11:30 GMT | |
kafka_1 | [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:host.name=7b9b53326453 | |
kafka_1 | [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.version=1.8.0_212 | |
kafka_1 | [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.vendor=Azul Systems, Inc. | |
kafka_1 | [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.home=/usr/lib/jvm/zulu-8-amd64/jre | |
kafka_1 | [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.class.path=/etc/confluent/docker/docker-utils.jar | |
kafka_1 | [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib | |
kafka_1 | [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.io.tmpdir=/tmp | |
kafka_1 | [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.compiler=<NA> | |
kafka_1 | [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:os.name=Linux | |
kafka_1 | [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:os.arch=amd64 | |
kafka_1 | [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:os.version=4.9.0-12-amd64 | |
kafka_1 | [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:user.name=root | |
kafka_1 | [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:user.home=/root | |
kafka_1 | [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:user.dir=/ | |
kafka_1 | [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:os.memory.free=53MB | |
kafka_1 | [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:os.memory.max=897MB | |
kafka_1 | [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:os.memory.total=56MB | |
kafka_1 | [main] INFO org.apache.zookeeper.ZooKeeper - Initiating client connection, connectString=zookeeper:32181 sessionTimeout=40000 watcher=io.confluent.admin.utils.ZookeeperConnectionWatcher@cc34f4d | |
kafka_1 | [main] INFO org.apache.zookeeper.common.X509Util - Setting -D jdk.tls.rejectClientInitiatedRenegotiation=true to disable client-initiated TLS renegotiation | |
kafka_1 | [main] INFO org.apache.zookeeper.ClientCnxnSocket - jute.maxbuffer value is 4194304 Bytes | |
kafka_1 | [main] INFO org.apache.zookeeper.ClientCnxn - zookeeper.request.timeout value is 0. feature enabled= | |
kafka_1 | [main-SendThread(zookeeper:32181)] INFO org.apache.zookeeper.ClientCnxn - Opening socket connection to server zookeeper/172.22.0.3:32181. Will not attempt to authenticate using SASL (unknown error) | |
kafka_1 | [main-SendThread(zookeeper:32181)] INFO org.apache.zookeeper.ClientCnxn - Socket connection established, initiating session, client: /172.22.0.6:33966, server: zookeeper/172.22.0.3:32181 | |
zookeeper_1 | [2020-07-08 17:56:37,062] INFO Creating new log file: log.26 (org.apache.zookeeper.server.persistence.FileTxnLog) | |
kafka_1 | [main-SendThread(zookeeper:32181)] INFO org.apache.zookeeper.ClientCnxn - Session establishment complete on server zookeeper/172.22.0.3:32181, sessionid = 0x10007da654d0000, negotiated timeout = 40000 | |
kafka_1 | [main] INFO org.apache.zookeeper.ZooKeeper - Session: 0x10007da654d0000 closed | |
kafka_1 | [main-EventThread] INFO org.apache.zookeeper.ClientCnxn - EventThread shut down for session: 0x10007da654d0000 | |
kafka_1 | ===> Launching ... | |
kafka_1 | ===> Launching kafka ... | |
cloudera_1 | * Started Hadoop namenode: | |
cloudera_1 | starting secondarynamenode, logging to /var/log/hadoop-hdfs/hadoop-hdfs-secondarynamenode-7fee93de0168.out | |
kafka_1 | [2020-07-08 17:56:39,210] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$) | |
kafka_1 | [2020-07-08 17:56:40,856] INFO KafkaConfig values: | |
kafka_1 | advertised.host.name = null | |
kafka_1 | advertised.listeners = PLAINTEXT://kafka:29092 | |
kafka_1 | advertised.port = null | |
kafka_1 | alter.config.policy.class.name = null | |
kafka_1 | alter.log.dirs.replication.quota.window.num = 11 | |
kafka_1 | alter.log.dirs.replication.quota.window.size.seconds = 1 | |
kafka_1 | authorizer.class.name = | |
kafka_1 | auto.create.topics.enable = true | |
kafka_1 | auto.leader.rebalance.enable = true | |
kafka_1 | background.threads = 10 | |
kafka_1 | broker.id = 1 | |
kafka_1 | broker.id.generation.enable = true | |
kafka_1 | broker.rack = null | |
kafka_1 | client.quota.callback.class = null | |
kafka_1 | compression.type = producer | |
kafka_1 | connection.failed.authentication.delay.ms = 100 | |
kafka_1 | connections.max.idle.ms = 600000 | |
kafka_1 | connections.max.reauth.ms = 0 | |
kafka_1 | control.plane.listener.name = null | |
kafka_1 | controlled.shutdown.enable = true | |
kafka_1 | controlled.shutdown.max.retries = 3 | |
kafka_1 | controlled.shutdown.retry.backoff.ms = 5000 | |
kafka_1 | controller.socket.timeout.ms = 30000 | |
kafka_1 | create.topic.policy.class.name = null | |
kafka_1 | default.replication.factor = 1 | |
kafka_1 | delegation.token.expiry.check.interval.ms = 3600000 | |
kafka_1 | delegation.token.expiry.time.ms = 86400000 | |
kafka_1 | delegation.token.master.key = null | |
kafka_1 | delegation.token.max.lifetime.ms = 604800000 | |
kafka_1 | delete.records.purgatory.purge.interval.requests = 1 | |
kafka_1 | delete.topic.enable = true | |
kafka_1 | fetch.max.bytes = 57671680 | |
kafka_1 | fetch.purgatory.purge.interval.requests = 1000 | |
kafka_1 | group.initial.rebalance.delay.ms = 3000 | |
kafka_1 | group.max.session.timeout.ms = 1800000 | |
kafka_1 | group.max.size = 2147483647 | |
kafka_1 | group.min.session.timeout.ms = 6000 | |
kafka_1 | host.name = | |
kafka_1 | inter.broker.listener.name = null | |
kafka_1 | inter.broker.protocol.version = 2.5-IV0 | |
kafka_1 | kafka.metrics.polling.interval.secs = 10 | |
kafka_1 | kafka.metrics.reporters = [] | |
kafka_1 | leader.imbalance.check.interval.seconds = 300 | |
kafka_1 | leader.imbalance.per.broker.percentage = 10 | |
kafka_1 | listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL | |
kafka_1 | listeners = PLAINTEXT://0.0.0.0:29092 | |
kafka_1 | log.cleaner.backoff.ms = 15000 | |
kafka_1 | log.cleaner.dedupe.buffer.size = 134217728 | |
kafka_1 | log.cleaner.delete.retention.ms = 86400000 | |
kafka_1 | log.cleaner.enable = true | |
kafka_1 | log.cleaner.io.buffer.load.factor = 0.9 | |
kafka_1 | log.cleaner.io.buffer.size = 524288 | |
kafka_1 | log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308 | |
kafka_1 | log.cleaner.max.compaction.lag.ms = 9223372036854775807 | |
kafka_1 | log.cleaner.min.cleanable.ratio = 0.5 | |
kafka_1 | log.cleaner.min.compaction.lag.ms = 0 | |
kafka_1 | log.cleaner.threads = 1 | |
kafka_1 | log.cleanup.policy = [delete] | |
kafka_1 | log.dir = /tmp/kafka-logs | |
kafka_1 | log.dirs = /var/lib/kafka/data | |
kafka_1 | log.flush.interval.messages = 9223372036854775807 | |
kafka_1 | log.flush.interval.ms = null | |
kafka_1 | log.flush.offset.checkpoint.interval.ms = 60000 | |
kafka_1 | log.flush.scheduler.interval.ms = 9223372036854775807 | |
kafka_1 | log.flush.start.offset.checkpoint.interval.ms = 60000 | |
kafka_1 | log.index.interval.bytes = 4096 | |
kafka_1 | log.index.size.max.bytes = 10485760 | |
kafka_1 | log.message.downconversion.enable = true | |
kafka_1 | log.message.format.version = 2.5-IV0 | |
kafka_1 | log.message.timestamp.difference.max.ms = 9223372036854775807 | |
kafka_1 | log.message.timestamp.type = CreateTime | |
kafka_1 | log.preallocate = false | |
kafka_1 | log.retention.bytes = -1 | |
kafka_1 | log.retention.check.interval.ms = 300000 | |
kafka_1 | log.retention.hours = 168 | |
kafka_1 | log.retention.minutes = null | |
kafka_1 | log.retention.ms = null | |
kafka_1 | log.roll.hours = 168 | |
kafka_1 | log.roll.jitter.hours = 0 | |
kafka_1 | log.roll.jitter.ms = null | |
kafka_1 | log.roll.ms = null | |
kafka_1 | log.segment.bytes = 1073741824 | |
kafka_1 | log.segment.delete.delay.ms = 60000 | |
kafka_1 | max.connections = 2147483647 | |
kafka_1 | max.connections.per.ip = 2147483647 | |
kafka_1 | max.connections.per.ip.overrides = | |
kafka_1 | max.incremental.fetch.session.cache.slots = 1000 | |
kafka_1 | message.max.bytes = 1048588 | |
kafka_1 | metric.reporters = [] | |
kafka_1 | metrics.num.samples = 2 | |
kafka_1 | metrics.recording.level = INFO | |
kafka_1 | metrics.sample.window.ms = 30000 | |
kafka_1 | min.insync.replicas = 1 | |
kafka_1 | num.io.threads = 8 | |
kafka_1 | num.network.threads = 3 | |
kafka_1 | num.partitions = 1 | |
kafka_1 | num.recovery.threads.per.data.dir = 1 | |
kafka_1 | num.replica.alter.log.dirs.threads = null | |
kafka_1 | num.replica.fetchers = 1 | |
kafka_1 | offset.metadata.max.bytes = 4096 | |
kafka_1 | offsets.commit.required.acks = -1 | |
kafka_1 | offsets.commit.timeout.ms = 5000 | |
kafka_1 | offsets.load.buffer.size = 5242880 | |
kafka_1 | offsets.retention.check.interval.ms = 600000 | |
kafka_1 | offsets.retention.minutes = 10080 | |
kafka_1 | offsets.topic.compression.codec = 0 | |
kafka_1 | offsets.topic.num.partitions = 50 | |
kafka_1 | offsets.topic.replication.factor = 1 | |
kafka_1 | offsets.topic.segment.bytes = 104857600 | |
kafka_1 | password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding | |
kafka_1 | password.encoder.iterations = 4096 | |
kafka_1 | password.encoder.key.length = 128 | |
kafka_1 | password.encoder.keyfactory.algorithm = null | |
kafka_1 | password.encoder.old.secret = null | |
kafka_1 | password.encoder.secret = null | |
kafka_1 | port = 9092 | |
kafka_1 | principal.builder.class = null | |
kafka_1 | producer.purgatory.purge.interval.requests = 1000 | |
kafka_1 | queued.max.request.bytes = -1 | |
kafka_1 | queued.max.requests = 500 | |
kafka_1 | quota.consumer.default = 9223372036854775807 | |
kafka_1 | quota.producer.default = 9223372036854775807 | |
kafka_1 | quota.window.num = 11 | |
kafka_1 | quota.window.size.seconds = 1 | |
kafka_1 | replica.fetch.backoff.ms = 1000 | |
kafka_1 | replica.fetch.max.bytes = 1048576 | |
kafka_1 | replica.fetch.min.bytes = 1 | |
kafka_1 | replica.fetch.response.max.bytes = 10485760 | |
kafka_1 | replica.fetch.wait.max.ms = 500 | |
kafka_1 | replica.high.watermark.checkpoint.interval.ms = 5000 | |
kafka_1 | replica.lag.time.max.ms = 30000 | |
kafka_1 | replica.selector.class = null | |
kafka_1 | replica.socket.receive.buffer.bytes = 65536 | |
kafka_1 | replica.socket.timeout.ms = 30000 | |
kafka_1 | replication.quota.window.num = 11 | |
kafka_1 | replication.quota.window.size.seconds = 1 | |
kafka_1 | request.timeout.ms = 30000 | |
kafka_1 | reserved.broker.max.id = 1000 | |
kafka_1 | sasl.client.callback.handler.class = null | |
kafka_1 | sasl.enabled.mechanisms = [GSSAPI] | |
kafka_1 | sasl.jaas.config = null | |
kafka_1 | sasl.kerberos.kinit.cmd = /usr/bin/kinit | |
kafka_1 | sasl.kerberos.min.time.before.relogin = 60000 | |
kafka_1 | sasl.kerberos.principal.to.local.rules = [DEFAULT] | |
kafka_1 | sasl.kerberos.service.name = null | |
kafka_1 | sasl.kerberos.ticket.renew.jitter = 0.05 | |
kafka_1 | sasl.kerberos.ticket.renew.window.factor = 0.8 | |
kafka_1 | sasl.login.callback.handler.class = null | |
kafka_1 | sasl.login.class = null | |
kafka_1 | sasl.login.refresh.buffer.seconds = 300 | |
kafka_1 | sasl.login.refresh.min.period.seconds = 60 | |
kafka_1 | sasl.login.refresh.window.factor = 0.8 | |
kafka_1 | sasl.login.refresh.window.jitter = 0.05 | |
kafka_1 | sasl.mechanism.inter.broker.protocol = GSSAPI | |
kafka_1 | sasl.server.callback.handler.class = null | |
kafka_1 | security.inter.broker.protocol = PLAINTEXT | |
kafka_1 | security.providers = null | |
kafka_1 | socket.receive.buffer.bytes = 102400 | |
kafka_1 | socket.request.max.bytes = 104857600 | |
kafka_1 | socket.send.buffer.bytes = 102400 | |
kafka_1 | ssl.cipher.suites = [] | |
kafka_1 | ssl.client.auth = none | |
kafka_1 | ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1] | |
kafka_1 | ssl.endpoint.identification.algorithm = https | |
kafka_1 | ssl.key.password = null | |
kafka_1 | ssl.keymanager.algorithm = SunX509 | |
kafka_1 | ssl.keystore.location = null | |
kafka_1 | ssl.keystore.password = null | |
kafka_1 | ssl.keystore.type = JKS | |
kafka_1 | ssl.principal.mapping.rules = DEFAULT | |
kafka_1 | ssl.protocol = TLS | |
kafka_1 | ssl.provider = null | |
kafka_1 | ssl.secure.random.implementation = null | |
kafka_1 | ssl.trustmanager.algorithm = PKIX | |
kafka_1 | ssl.truststore.location = null | |
kafka_1 | ssl.truststore.password = null | |
kafka_1 | ssl.truststore.type = JKS | |
kafka_1 | transaction.abort.timed.out.transaction.cleanup.interval.ms = 10000 | |
kafka_1 | transaction.max.timeout.ms = 900000 | |
kafka_1 | transaction.remove.expired.transaction.cleanup.interval.ms = 3600000 | |
kafka_1 | transaction.state.log.load.buffer.size = 5242880 | |
kafka_1 | transaction.state.log.min.isr = 2 | |
kafka_1 | transaction.state.log.num.partitions = 50 | |
kafka_1 | transaction.state.log.replication.factor = 3 | |
kafka_1 | transaction.state.log.segment.bytes = 104857600 | |
kafka_1 | transactional.id.expiration.ms = 604800000 | |
kafka_1 | unclean.leader.election.enable = false | |
kafka_1 | zookeeper.clientCnxnSocket = null | |
kafka_1 | zookeeper.connect = zookeeper:32181 | |
kafka_1 | zookeeper.connection.timeout.ms = null | |
kafka_1 | zookeeper.max.in.flight.requests = 10 | |
kafka_1 | zookeeper.session.timeout.ms = 18000 | |
kafka_1 | zookeeper.set.acl = false | |
kafka_1 | zookeeper.ssl.cipher.suites = null | |
kafka_1 | zookeeper.ssl.client.enable = false | |
kafka_1 | zookeeper.ssl.crl.enable = false | |
kafka_1 | zookeeper.ssl.enabled.protocols = null | |
kafka_1 | zookeeper.ssl.endpoint.identification.algorithm = HTTPS | |
kafka_1 | zookeeper.ssl.keystore.location = null | |
kafka_1 | zookeeper.ssl.keystore.password = null | |
kafka_1 | zookeeper.ssl.keystore.type = null | |
kafka_1 | zookeeper.ssl.ocsp.enable = false | |
kafka_1 | zookeeper.ssl.protocol = TLSv1.2 | |
kafka_1 | zookeeper.ssl.truststore.location = null | |
kafka_1 | zookeeper.ssl.truststore.password = null | |
kafka_1 | zookeeper.ssl.truststore.type = null | |
kafka_1 | zookeeper.sync.time.ms = 2000 | |
kafka_1 | (kafka.server.KafkaConfig) | |
kafka_1 | [2020-07-08 17:56:40,978] INFO Setting -D jdk.tls.rejectClientInitiatedRenegotiation=true to disable client-initiated TLS renegotiation (org.apache.zookeeper.common.X509Util) | |
kafka_1 | [2020-07-08 17:56:41,151] WARN The package io.confluent.support.metrics.collectors.FullCollector for collecting the full set of support metrics could not be loaded, so we are reverting to anonymous, basic metric collection. If you are a Confluent customer, please refer to the Confluent Platform documentation, section Proactive Support, on how to activate full metrics collection. (io.confluent.support.metrics.KafkaSupportConfig) | |
kafka_1 | [2020-07-08 17:56:41,178] WARN Please note that the support metrics collection feature ("Metrics") of Proactive Support is enabled. With Metrics enabled, this broker is configured to collect and report certain broker and cluster metadata ("Metadata") about your use of the Confluent Platform (including without limitation, your remote internet protocol address) to Confluent, Inc. ("Confluent") or its parent, subsidiaries, affiliates or service providers every 24hours. This Metadata may be transferred to any country in which Confluent maintains facilities. For a more in depth discussion of how Confluent processes such information, please read our Privacy Policy located at http://www.confluent.io/privacy. By proceeding with `confluent.support.metrics.enable=true`, you agree to all such collection, transfer, storage and use of Metadata by Confluent. You can turn the Metrics feature off by setting `confluent.support.metrics.enable=false` in the broker configuration and restarting the broker. See the Confluent Platform documentation for further information. (io.confluent.support.metrics.SupportedServerStartable) | |
kafka_1 | [2020-07-08 17:56:41,193] INFO Registered signal handlers for TERM, INT, HUP (org.apache.kafka.common.utils.LoggingSignalHandler) | |
kafka_1 | [2020-07-08 17:56:41,196] INFO starting (kafka.server.KafkaServer) | |
kafka_1 | [2020-07-08 17:56:41,206] INFO Connecting to zookeeper on zookeeper:32181 (kafka.server.KafkaServer) | |
kafka_1 | [2020-07-08 17:56:41,271] INFO [ZooKeeperClient Kafka server] Initializing a new session to zookeeper:32181. (kafka.zookeeper.ZooKeeperClient) | |
kafka_1 | [2020-07-08 17:56:41,285] INFO Client environment:zookeeper.version=3.5.7-f0fdd52973d373ffd9c86b81d99842dc2c7f660e, built on 02/10/2020 11:30 GMT (org.apache.zookeeper.ZooKeeper) | |
kafka_1 | [2020-07-08 17:56:41,287] INFO Client environment:host.name=7b9b53326453 (org.apache.zookeeper.ZooKeeper) | |
kafka_1 | [2020-07-08 17:56:41,288] INFO Client environment:java.version=1.8.0_212 (org.apache.zookeeper.ZooKeeper) | |
kafka_1 | [2020-07-08 17:56:41,288] INFO Client environment:java.vendor=Azul Systems, Inc. (org.apache.zookeeper.ZooKeeper) | |
kafka_1 | [2020-07-08 17:56:41,289] INFO Client environment:java.home=/usr/lib/jvm/zulu-8-amd64/jre (org.apache.zookeeper.ZooKeeper) | |
kafka_1 | [2020-07-08 17:56:41,290] INFO Client environment:java.class.path=/usr/bin/../share/java/kafka/httpmime-4.5.11.jar:/usr/bin/../share/java/kafka/jakarta.activation-api-1.2.1.jar:/usr/bin/../share/java/kafka/maven-artifact-3.6.3.jar:/usr/bin/../share/java/kafka/netty-transport-4.1.45.Final.jar:/usr/bin/../share/java/kafka/hk2-locator-2.5.0.jar:/usr/bin/../share/java/kafka/javax.servlet-api-3.1.0.jar:/usr/bin/../share/java/kafka/scala-collection-compat_2.12-2.1.3.jar:/usr/bin/../share/java/kafka/metrics-core-2.2.0.jar:/usr/bin/../share/java/kafka/paranamer-2.8.jar:/usr/bin/../share/java/kafka/kafka_2.12-5.5.0-ccs-scaladoc.jar:/usr/bin/../share/java/kafka/netty-resolver-4.1.45.Final.jar:/usr/bin/../share/java/kafka/kafka_2.12-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/jetty-servlet-9.4.24.v20191120.jar:/usr/bin/../share/java/kafka/javassist-3.22.0-CR2.jar:/usr/bin/../share/java/kafka/jakarta.ws.rs-api-2.1.5.jar:/usr/bin/../share/java/kafka/httpclient-4.5.11.jar:/usr/bin/../share/java/kafka/jetty-io-9.4.24.v20191120.jar:/usr/bin/../share/java/kafka/jersey-common-2.28.jar:/usr/bin/../share/java/kafka/netty-common-4.1.45.Final.jar:/usr/bin/../share/java/kafka/netty-transport-native-epoll-4.1.45.Final.jar:/usr/bin/../share/java/kafka/lz4-java-1.7.1.jar:/usr/bin/../share/java/kafka/hk2-utils-2.5.0.jar:/usr/bin/../share/java/kafka/jakarta.inject-2.5.0.jar:/usr/bin/../share/java/kafka/jetty-server-9.4.24.v20191120.jar:/usr/bin/../share/java/kafka/jackson-module-jaxb-annotations-2.10.2.jar:/usr/bin/../share/java/kafka/kafka-log4j-appender-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/commons-cli-1.4.jar:/usr/bin/../share/java/kafka/reflections-0.9.12.jar:/usr/bin/../share/java/kafka/jetty-security-9.4.24.v20191120.jar:/usr/bin/../share/java/kafka/avro-1.9.2.jar:/usr/bin/../share/java/kafka/jersey-client-2.28.jar:/usr/bin/../share/java/kafka/commons-codec-1.11.jar:/usr/bin/../share/java/kafka/commons-logging-1.2.jar:/usr/bin/../share/java/kafka/connect-mirror-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/jersey-container-servlet-2.28.jar:/usr/bin/../share/java/kafka/kafka.jar:/usr/bin/../share/java/kafka/commons-lang3-3.8.1.jar:/usr/bin/../share/java/kafka/jersey-server-2.28.jar:/usr/bin/../share/java/kafka/zookeeper-3.5.7.jar:/usr/bin/../share/java/kafka/netty-codec-4.1.45.Final.jar:/usr/bin/../share/java/kafka/osgi-resource-locator-1.0.1.jar:/usr/bin/../share/java/kafka/jackson-dataformat-csv-2.10.2.jar:/usr/bin/../share/java/kafka/jetty-continuation-9.4.24.v20191120.jar:/usr/bin/../share/java/kafka/connect-file-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/netty-transport-native-unix-common-4.1.45.Final.jar:/usr/bin/../share/java/kafka/jakarta.annotation-api-1.3.4.jar:/usr/bin/../share/java/kafka/support-metrics-client-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/kafka_2.12-5.5.0-ccs-javadoc.jar:/usr/bin/../share/java/kafka/netty-buffer-4.1.45.Final.jar:/usr/bin/../share/java/kafka/jetty-util-9.4.24.v20191120.jar:/usr/bin/../share/java/kafka/log4j-1.2.17.jar:/usr/bin/../share/java/kafka/argparse4j-0.7.0.jar:/usr/bin/../share/java/kafka/support-metrics-common-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/jersey-hk2-2.28.jar:/usr/bin/../share/java/kafka/plexus-utils-3.2.1.jar:/usr/bin/../share/java/kafka/kafka-clients-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/scala-java8-compat_2.12-0.9.0.jar:/usr/bin/../share/java/kafka/connect-transforms-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/connect-api-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/kafka-streams-examples-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/jackson-module-paranamer-2.10.2.jar:/usr/bin/../share/java/kafka/kafka_2.12-5.5.0-ccs-sources.jar:/usr/bin/../share/java/kafka/jackson-core-2.10.2.jar:/usr/bin/../share/java/kafka/snappy-java-1.1.7.3.jar:/usr/bin/../share/java/kafka/connect-runtime-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/slf4j-api-1.7.30.jar:/usr/bin/../share/java/kafka/jersey-container-servlet-core-2.28.jar:/usr/bin/../share/java/kafka/connect-mirror-client-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/jackson-datatype-jdk8-2.10.2.jar:/usr/bin/../share/java/kafka/kafka_2.12-5.5.0-ccs-test-sources.jar:/usr/bin/../share/java/kafka/httpcore-4.4.13.jar:/usr/bin/../share/java/kafka/commons-compress-1.19.jar:/usr/bin/../share/java/kafka/jersey-media-jaxb-2.28.jar:/usr/bin/../share/java/kafka/kafka-streams-scala_2.12-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/netty-handler-4.1.45.Final.jar:/usr/bin/../share/java/kafka/jetty-http-9.4.24.v20191120.jar:/usr/bin/../share/java/kafka/scala-reflect-2.12.10.jar:/usr/bin/../share/java/kafka/kafka_2.12-5.5.0-ccs-test.jar:/usr/bin/../share/java/kafka/aopalliance-repackaged-2.5.0.jar:/usr/bin/../share/java/kafka/scala-library-2.12.10.jar:/usr/bin/../share/java/kafka/jackson-module-scala_2.12-2.10.2.jar:/usr/bin/../share/java/kafka/javassist-3.26.0-GA.jar:/usr/bin/../share/java/kafka/jackson-jaxrs-json-provider-2.10.2.jar:/usr/bin/../share/java/kafka/validation-api-2.0.1.Final.jar:/usr/bin/../share/java/kafka/jetty-client-9.4.24.v20191120.jar:/usr/bin/../share/java/kafka/jaxb-api-2.3.0.jar:/usr/bin/../share/java/kafka/zookeeper-jute-3.5.7.jar:/usr/bin/../share/java/kafka/zstd-jni-1.4.4-7.jar:/usr/bin/../share/java/kafka/jackson-databind-2.10.2.jar:/usr/bin/../share/java/kafka/jackson-annotations-2.10.2.jar:/usr/bin/../share/java/kafka/jackson-jaxrs-base-2.10.2.jar:/usr/bin/../share/java/kafka/hk2-api-2.5.0.jar:/usr/bin/../share/java/kafka/kafka-tools-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/javax.ws.rs-api-2.1.1.jar:/usr/bin/../share/java/kafka/kafka-streams-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/activation-1.1.1.jar:/usr/bin/../share/java/kafka/jopt-simple-5.0.4.jar:/usr/bin/../share/java/kafka/rocksdbjni-5.18.3.jar:/usr/bin/../share/java/kafka/slf4j-log4j12-1.7.30.jar:/usr/bin/../share/java/kafka/audience-annotations-0.5.0.jar:/usr/bin/../share/java/kafka/connect-basic-auth-extension-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/kafka-streams-test-utils-5.5.0-ccs.jar:/usr/bin/../share/java/kafka/jetty-servlets-9.4.24.v20191120.jar:/usr/bin/../share/java/kafka/scala-logging_2.12-3.9.2.jar:/usr/bin/../share/java/kafka/jakarta.xml.bind-api-2.3.2.jar:/usr/bin/../share/java/kafka/connect-json-5.5.0-ccs.jar:/usr/bin/../support-metrics-client/build/dependant-libs-2.12/*:/usr/bin/../support-metrics-client/build/libs/*:/usr/share/java/support-metrics-client/* (org.apache.zookeeper.ZooKeeper) | |
kafka_1 | [2020-07-08 17:56:41,290] INFO Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib (org.apache.zookeeper.ZooKeeper) | |
kafka_1 | [2020-07-08 17:56:41,291] INFO Client environment:java.io.tmpdir=/tmp (org.apache.zookeeper.ZooKeeper) | |
kafka_1 | [2020-07-08 17:56:41,304] INFO Client environment:java.compiler=<NA> (org.apache.zookeeper.ZooKeeper) | |
kafka_1 | [2020-07-08 17:56:41,305] INFO Client environment:os.name=Linux (org.apache.zookeeper.ZooKeeper) | |
kafka_1 | [2020-07-08 17:56:41,305] INFO Client environment:os.arch=amd64 (org.apache.zookeeper.ZooKeeper) | |
kafka_1 | [2020-07-08 17:56:41,305] INFO Client environment:os.version=4.9.0-12-amd64 (org.apache.zookeeper.ZooKeeper) | |
kafka_1 | [2020-07-08 17:56:41,306] INFO Client environment:user.name=root (org.apache.zookeeper.ZooKeeper) | |
kafka_1 | [2020-07-08 17:56:41,306] INFO Client environment:user.home=/root (org.apache.zookeeper.ZooKeeper) | |
kafka_1 | [2020-07-08 17:56:41,306] INFO Client environment:user.dir=/ (org.apache.zookeeper.ZooKeeper) | |
kafka_1 | [2020-07-08 17:56:41,307] INFO Client environment:os.memory.free=980MB (org.apache.zookeeper.ZooKeeper) | |
kafka_1 | [2020-07-08 17:56:41,310] INFO Client environment:os.memory.max=1024MB (org.apache.zookeeper.ZooKeeper) | |
kafka_1 | [2020-07-08 17:56:41,310] INFO Client environment:os.memory.total=1024MB (org.apache.zookeeper.ZooKeeper) | |
kafka_1 | [2020-07-08 17:56:41,318] INFO Initiating client connection, connectString=zookeeper:32181 sessionTimeout=18000 watcher=kafka.zookeeper.ZooKeeperClient$ZooKeeperClientWatcher$@6356695f (org.apache.zookeeper.ZooKeeper) | |
kafka_1 | [2020-07-08 17:56:41,335] INFO jute.maxbuffer value is 4194304 Bytes (org.apache.zookeeper.ClientCnxnSocket) | |
kafka_1 | [2020-07-08 17:56:41,356] INFO zookeeper.request.timeout value is 0. feature enabled= (org.apache.zookeeper.ClientCnxn) | |
kafka_1 | [2020-07-08 17:56:41,363] INFO [ZooKeeperClient Kafka server] Waiting until connected. (kafka.zookeeper.ZooKeeperClient) | |
kafka_1 | [2020-07-08 17:56:41,421] INFO Opening socket connection to server zookeeper/172.22.0.3:32181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) | |
kafka_1 | [2020-07-08 17:56:41,435] INFO Socket connection established, initiating session, client: /172.22.0.6:33978, server: zookeeper/172.22.0.3:32181 (org.apache.zookeeper.ClientCnxn) | |
kafka_1 | [2020-07-08 17:56:41,450] INFO Session establishment complete on server zookeeper/172.22.0.3:32181, sessionid = 0x10007da654d0001, negotiated timeout = 18000 (org.apache.zookeeper.ClientCnxn) | |
kafka_1 | [2020-07-08 17:56:41,463] INFO [ZooKeeperClient Kafka server] Connected. (kafka.zookeeper.ZooKeeperClient) | |
kafka_1 | [2020-07-08 17:56:42,386] INFO Cluster ID = epSKzw_IS8iIYBfj3Q708A (kafka.server.KafkaServer) | |
kafka_1 | [2020-07-08 17:56:42,800] INFO KafkaConfig values: | |
kafka_1 | advertised.host.name = null | |
kafka_1 | advertised.listeners = PLAINTEXT://kafka:29092 | |
kafka_1 | advertised.port = null | |
kafka_1 | alter.config.policy.class.name = null | |
kafka_1 | alter.log.dirs.replication.quota.window.num = 11 | |
kafka_1 | alter.log.dirs.replication.quota.window.size.seconds = 1 | |
kafka_1 | authorizer.class.name = | |
kafka_1 | auto.create.topics.enable = true | |
kafka_1 | auto.leader.rebalance.enable = true | |
kafka_1 | background.threads = 10 | |
kafka_1 | broker.id = 1 | |
kafka_1 | broker.id.generation.enable = true | |
kafka_1 | broker.rack = null | |
kafka_1 | client.quota.callback.class = null | |
kafka_1 | compression.type = producer | |
kafka_1 | connection.failed.authentication.delay.ms = 100 | |
kafka_1 | connections.max.idle.ms = 600000 | |
kafka_1 | connections.max.reauth.ms = 0 | |
kafka_1 | control.plane.listener.name = null | |
kafka_1 | controlled.shutdown.enable = true | |
kafka_1 | controlled.shutdown.max.retries = 3 | |
kafka_1 | controlled.shutdown.retry.backoff.ms = 5000 | |
kafka_1 | controller.socket.timeout.ms = 30000 | |
kafka_1 | create.topic.policy.class.name = null | |
kafka_1 | default.replication.factor = 1 | |
kafka_1 | delegation.token.expiry.check.interval.ms = 3600000 | |
kafka_1 | delegation.token.expiry.time.ms = 86400000 | |
kafka_1 | delegation.token.master.key = null | |
kafka_1 | delegation.token.max.lifetime.ms = 604800000 | |
kafka_1 | delete.records.purgatory.purge.interval.requests = 1 | |
kafka_1 | delete.topic.enable = true | |
kafka_1 | fetch.max.bytes = 57671680 | |
kafka_1 | fetch.purgatory.purge.interval.requests = 1000 | |
kafka_1 | group.initial.rebalance.delay.ms = 3000 | |
kafka_1 | group.max.session.timeout.ms = 1800000 | |
kafka_1 | group.max.size = 2147483647 | |
kafka_1 | group.min.session.timeout.ms = 6000 | |
kafka_1 | host.name = | |
kafka_1 | inter.broker.listener.name = null | |
kafka_1 | inter.broker.protocol.version = 2.5-IV0 | |
kafka_1 | kafka.metrics.polling.interval.secs = 10 | |
kafka_1 | kafka.metrics.reporters = [] | |
kafka_1 | leader.imbalance.check.interval.seconds = 300 | |
kafka_1 | leader.imbalance.per.broker.percentage = 10 | |
kafka_1 | listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL | |
kafka_1 | listeners = PLAINTEXT://0.0.0.0:29092 | |
kafka_1 | log.cleaner.backoff.ms = 15000 | |
kafka_1 | log.cleaner.dedupe.buffer.size = 134217728 | |
kafka_1 | log.cleaner.delete.retention.ms = 86400000 | |
kafka_1 | log.cleaner.enable = true | |
kafka_1 | log.cleaner.io.buffer.load.factor = 0.9 | |
kafka_1 | log.cleaner.io.buffer.size = 524288 | |
kafka_1 | log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308 | |
kafka_1 | log.cleaner.max.compaction.lag.ms = 9223372036854775807 | |
kafka_1 | log.cleaner.min.cleanable.ratio = 0.5 | |
kafka_1 | log.cleaner.min.compaction.lag.ms = 0 | |
kafka_1 | log.cleaner.threads = 1 | |
kafka_1 | log.cleanup.policy = [delete] | |
kafka_1 | log.dir = /tmp/kafka-logs | |
kafka_1 | log.dirs = /var/lib/kafka/data | |
kafka_1 | log.flush.interval.messages = 9223372036854775807 | |
kafka_1 | log.flush.interval.ms = null | |
kafka_1 | log.flush.offset.checkpoint.interval.ms = 60000 | |
kafka_1 | log.flush.scheduler.interval.ms = 9223372036854775807 | |
kafka_1 | log.flush.start.offset.checkpoint.interval.ms = 60000 | |
kafka_1 | log.index.interval.bytes = 4096 | |
kafka_1 | log.index.size.max.bytes = 10485760 | |
kafka_1 | log.message.downconversion.enable = true | |
kafka_1 | log.message.format.version = 2.5-IV0 | |
kafka_1 | log.message.timestamp.difference.max.ms = 9223372036854775807 | |
kafka_1 | log.message.timestamp.type = CreateTime | |
kafka_1 | log.preallocate = false | |
kafka_1 | log.retention.bytes = -1 | |
kafka_1 | log.retention.check.interval.ms = 300000 | |
kafka_1 | log.retention.hours = 168 | |
kafka_1 | log.retention.minutes = null | |
kafka_1 | log.retention.ms = null | |
kafka_1 | log.roll.hours = 168 | |
kafka_1 | log.roll.jitter.hours = 0 | |
kafka_1 | log.roll.jitter.ms = null | |
kafka_1 | log.roll.ms = null | |
kafka_1 | log.segment.bytes = 1073741824 | |
kafka_1 | log.segment.delete.delay.ms = 60000 | |
kafka_1 | max.connections = 2147483647 | |
kafka_1 | max.connections.per.ip = 2147483647 | |
kafka_1 | max.connections.per.ip.overrides = | |
kafka_1 | max.incremental.fetch.session.cache.slots = 1000 | |
kafka_1 | message.max.bytes = 1048588 | |
kafka_1 | metric.reporters = [] | |
kafka_1 | metrics.num.samples = 2 | |
kafka_1 | metrics.recording.level = INFO | |
kafka_1 | metrics.sample.window.ms = 30000 | |
kafka_1 | min.insync.replicas = 1 | |
kafka_1 | num.io.threads = 8 | |
kafka_1 | num.network.threads = 3 | |
kafka_1 | num.partitions = 1 | |
kafka_1 | num.recovery.threads.per.data.dir = 1 | |
kafka_1 | num.replica.alter.log.dirs.threads = null | |
kafka_1 | num.replica.fetchers = 1 | |
kafka_1 | offset.metadata.max.bytes = 4096 | |
kafka_1 | offsets.commit.required.acks = -1 | |
kafka_1 | offsets.commit.timeout.ms = 5000 | |
kafka_1 | offsets.load.buffer.size = 5242880 | |
kafka_1 | offsets.retention.check.interval.ms = 600000 | |
kafka_1 | offsets.retention.minutes = 10080 | |
kafka_1 | offsets.topic.compression.codec = 0 | |
kafka_1 | offsets.topic.num.partitions = 50 | |
kafka_1 | offsets.topic.replication.factor = 1 | |
kafka_1 | offsets.topic.segment.bytes = 104857600 | |
kafka_1 | password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding | |
kafka_1 | password.encoder.iterations = 4096 | |
kafka_1 | password.encoder.key.length = 128 | |
kafka_1 | password.encoder.keyfactory.algorithm = null | |
kafka_1 | password.encoder.old.secret = null | |
kafka_1 | password.encoder.secret = null | |
kafka_1 | port = 9092 | |
kafka_1 | principal.builder.class = null | |
kafka_1 | producer.purgatory.purge.interval.requests = 1000 | |
kafka_1 | queued.max.request.bytes = -1 | |
kafka_1 | queued.max.requests = 500 | |
kafka_1 | quota.consumer.default = 9223372036854775807 | |
kafka_1 | quota.producer.default = 9223372036854775807 | |
kafka_1 | quota.window.num = 11 | |
kafka_1 | quota.window.size.seconds = 1 | |
kafka_1 | replica.fetch.backoff.ms = 1000 | |
kafka_1 | replica.fetch.max.bytes = 1048576 | |
kafka_1 | replica.fetch.min.bytes = 1 | |
kafka_1 | replica.fetch.response.max.bytes = 10485760 | |
kafka_1 | replica.fetch.wait.max.ms = 500 | |
kafka_1 | replica.high.watermark.checkpoint.interval.ms = 5000 | |
kafka_1 | replica.lag.time.max.ms = 30000 | |
kafka_1 | replica.selector.class = null | |
kafka_1 | replica.socket.receive.buffer.bytes = 65536 | |
kafka_1 | replica.socket.timeout.ms = 30000 | |
kafka_1 | replication.quota.window.num = 11 | |
kafka_1 | replication.quota.window.size.seconds = 1 | |
kafka_1 | request.timeout.ms = 30000 | |
kafka_1 | reserved.broker.max.id = 1000 | |
kafka_1 | sasl.client.callback.handler.class = null | |
kafka_1 | sasl.enabled.mechanisms = [GSSAPI] | |
kafka_1 | sasl.jaas.config = null | |
kafka_1 | sasl.kerberos.kinit.cmd = /usr/bin/kinit | |
kafka_1 | sasl.kerberos.min.time.before.relogin = 60000 | |
kafka_1 | sasl.kerberos.principal.to.local.rules = [DEFAULT] | |
kafka_1 | sasl.kerberos.service.name = null | |
kafka_1 | sasl.kerberos.ticket.renew.jitter = 0.05 | |
kafka_1 | sasl.kerberos.ticket.renew.window.factor = 0.8 | |
kafka_1 | sasl.login.callback.handler.class = null | |
kafka_1 | sasl.login.class = null | |
kafka_1 | sasl.login.refresh.buffer.seconds = 300 | |
kafka_1 | sasl.login.refresh.min.period.seconds = 60 | |
kafka_1 | sasl.login.refresh.window.factor = 0.8 | |
kafka_1 | sasl.login.refresh.window.jitter = 0.05 | |
kafka_1 | sasl.mechanism.inter.broker.protocol = GSSAPI | |
kafka_1 | sasl.server.callback.handler.class = null | |
kafka_1 | security.inter.broker.protocol = PLAINTEXT | |
kafka_1 | security.providers = null | |
kafka_1 | socket.receive.buffer.bytes = 102400 | |
kafka_1 | socket.request.max.bytes = 104857600 | |
kafka_1 | socket.send.buffer.bytes = 102400 | |
kafka_1 | ssl.cipher.suites = [] | |
kafka_1 | ssl.client.auth = none | |
kafka_1 | ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1] | |
kafka_1 | ssl.endpoint.identification.algorithm = https | |
kafka_1 | ssl.key.password = null | |
kafka_1 | ssl.keymanager.algorithm = SunX509 | |
kafka_1 | ssl.keystore.location = null | |
kafka_1 | ssl.keystore.password = null | |
kafka_1 | ssl.keystore.type = JKS | |
kafka_1 | ssl.principal.mapping.rules = DEFAULT | |
kafka_1 | ssl.protocol = TLS | |
kafka_1 | ssl.provider = null | |
kafka_1 | ssl.secure.random.implementation = null | |
kafka_1 | ssl.trustmanager.algorithm = PKIX | |
kafka_1 | ssl.truststore.location = null | |
kafka_1 | ssl.truststore.password = null | |
kafka_1 | ssl.truststore.type = JKS | |
kafka_1 | transaction.abort.timed.out.transaction.cleanup.interval.ms = 10000 | |
kafka_1 | transaction.max.timeout.ms = 900000 | |
kafka_1 | transaction.remove.expired.transaction.cleanup.interval.ms = 3600000 | |
kafka_1 | transaction.state.log.load.buffer.size = 5242880 | |
kafka_1 | transaction.state.log.min.isr = 2 | |
kafka_1 | transaction.state.log.num.partitions = 50 | |
kafka_1 | transaction.state.log.replication.factor = 3 | |
kafka_1 | transaction.state.log.segment.bytes = 104857600 | |
kafka_1 | transactional.id.expiration.ms = 604800000 | |
kafka_1 | unclean.leader.election.enable = false | |
kafka_1 | zookeeper.clientCnxnSocket = null | |
kafka_1 | zookeeper.connect = zookeeper:32181 | |
kafka_1 | zookeeper.connection.timeout.ms = null | |
kafka_1 | zookeeper.max.in.flight.requests = 10 | |
kafka_1 | zookeeper.session.timeout.ms = 18000 | |
kafka_1 | zookeeper.set.acl = false | |
kafka_1 | zookeeper.ssl.cipher.suites = null | |
kafka_1 | zookeeper.ssl.client.enable = false | |
kafka_1 | zookeeper.ssl.crl.enable = false | |
kafka_1 | zookeeper.ssl.enabled.protocols = null | |
kafka_1 | zookeeper.ssl.endpoint.identification.algorithm = HTTPS | |
kafka_1 | zookeeper.ssl.keystore.location = null | |
kafka_1 | zookeeper.ssl.keystore.password = null | |
kafka_1 | zookeeper.ssl.keystore.type = null | |
kafka_1 | zookeeper.ssl.ocsp.enable = false | |
kafka_1 | zookeeper.ssl.protocol = TLSv1.2 | |
kafka_1 | zookeeper.ssl.truststore.location = null | |
kafka_1 | zookeeper.ssl.truststore.password = null | |
kafka_1 | zookeeper.ssl.truststore.type = null | |
kafka_1 | zookeeper.sync.time.ms = 2000 | |
kafka_1 | (kafka.server.KafkaConfig) | |
kafka_1 | [2020-07-08 17:56:42,892] INFO KafkaConfig values: | |
kafka_1 | advertised.host.name = null | |
kafka_1 | advertised.listeners = PLAINTEXT://kafka:29092 | |
kafka_1 | advertised.port = null | |
kafka_1 | alter.config.policy.class.name = null | |
kafka_1 | alter.log.dirs.replication.quota.window.num = 11 | |
kafka_1 | alter.log.dirs.replication.quota.window.size.seconds = 1 | |
kafka_1 | authorizer.class.name = | |
kafka_1 | auto.create.topics.enable = true | |
kafka_1 | auto.leader.rebalance.enable = true | |
kafka_1 | background.threads = 10 | |
kafka_1 | broker.id = 1 | |
kafka_1 | broker.id.generation.enable = true | |
kafka_1 | broker.rack = null | |
kafka_1 | client.quota.callback.class = null | |
kafka_1 | compression.type = producer | |
kafka_1 | connection.failed.authentication.delay.ms = 100 | |
kafka_1 | connections.max.idle.ms = 600000 | |
kafka_1 | connections.max.reauth.ms = 0 | |
kafka_1 | control.plane.listener.name = null | |
kafka_1 | controlled.shutdown.enable = true | |
kafka_1 | controlled.shutdown.max.retries = 3 | |
kafka_1 | controlled.shutdown.retry.backoff.ms = 5000 | |
kafka_1 | controller.socket.timeout.ms = 30000 | |
kafka_1 | create.topic.policy.class.name = null | |
kafka_1 | default.replication.factor = 1 | |
kafka_1 | delegation.token.expiry.check.interval.ms = 3600000 | |
kafka_1 | delegation.token.expiry.time.ms = 86400000 | |
kafka_1 | delegation.token.master.key = null | |
kafka_1 | delegation.token.max.lifetime.ms = 604800000 | |
kafka_1 | delete.records.purgatory.purge.interval.requests = 1 | |
kafka_1 | delete.topic.enable = true | |
kafka_1 | fetch.max.bytes = 57671680 | |
kafka_1 | fetch.purgatory.purge.interval.requests = 1000 | |
kafka_1 | group.initial.rebalance.delay.ms = 3000 | |
kafka_1 | group.max.session.timeout.ms = 1800000 | |
kafka_1 | group.max.size = 2147483647 | |
kafka_1 | group.min.session.timeout.ms = 6000 | |
kafka_1 | host.name = | |
kafka_1 | inter.broker.listener.name = null | |
kafka_1 | inter.broker.protocol.version = 2.5-IV0 | |
kafka_1 | kafka.metrics.polling.interval.secs = 10 | |
kafka_1 | kafka.metrics.reporters = [] | |
kafka_1 | leader.imbalance.check.interval.seconds = 300 | |
kafka_1 | leader.imbalance.per.broker.percentage = 10 | |
kafka_1 | listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL | |
kafka_1 | listeners = PLAINTEXT://0.0.0.0:29092 | |
kafka_1 | log.cleaner.backoff.ms = 15000 | |
kafka_1 | log.cleaner.dedupe.buffer.size = 134217728 | |
kafka_1 | log.cleaner.delete.retention.ms = 86400000 | |
kafka_1 | log.cleaner.enable = true | |
kafka_1 | log.cleaner.io.buffer.load.factor = 0.9 | |
kafka_1 | log.cleaner.io.buffer.size = 524288 | |
kafka_1 | log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308 | |
kafka_1 | log.cleaner.max.compaction.lag.ms = 9223372036854775807 | |
kafka_1 | log.cleaner.min.cleanable.ratio = 0.5 | |
kafka_1 | log.cleaner.min.compaction.lag.ms = 0 | |
kafka_1 | log.cleaner.threads = 1 | |
kafka_1 | log.cleanup.policy = [delete] | |
kafka_1 | log.dir = /tmp/kafka-logs | |
kafka_1 | log.dirs = /var/lib/kafka/data | |
kafka_1 | log.flush.interval.messages = 9223372036854775807 | |
kafka_1 | log.flush.interval.ms = null | |
kafka_1 | log.flush.offset.checkpoint.interval.ms = 60000 | |
kafka_1 | log.flush.scheduler.interval.ms = 9223372036854775807 | |
kafka_1 | log.flush.start.offset.checkpoint.interval.ms = 60000 | |
kafka_1 | log.index.interval.bytes = 4096 | |
kafka_1 | log.index.size.max.bytes = 10485760 | |
kafka_1 | log.message.downconversion.enable = true | |
kafka_1 | log.message.format.version = 2.5-IV0 | |
kafka_1 | log.message.timestamp.difference.max.ms = 9223372036854775807 | |
kafka_1 | log.message.timestamp.type = CreateTime | |
kafka_1 | log.preallocate = false | |
kafka_1 | log.retention.bytes = -1 | |
kafka_1 | log.retention.check.interval.ms = 300000 | |
kafka_1 | log.retention.hours = 168 | |
kafka_1 | log.retention.minutes = null | |
kafka_1 | log.retention.ms = null | |
kafka_1 | log.roll.hours = 168 | |
kafka_1 | log.roll.jitter.hours = 0 | |
kafka_1 | log.roll.jitter.ms = null | |
kafka_1 | log.roll.ms = null | |
kafka_1 | log.segment.bytes = 1073741824 | |
kafka_1 | log.segment.delete.delay.ms = 60000 | |
kafka_1 | max.connections = 2147483647 | |
kafka_1 | max.connections.per.ip = 2147483647 | |
kafka_1 | max.connections.per.ip.overrides = | |
kafka_1 | max.incremental.fetch.session.cache.slots = 1000 | |
kafka_1 | message.max.bytes = 1048588 | |
kafka_1 | metric.reporters = [] | |
kafka_1 | metrics.num.samples = 2 | |
kafka_1 | metrics.recording.level = INFO | |
kafka_1 | metrics.sample.window.ms = 30000 | |
kafka_1 | min.insync.replicas = 1 | |
kafka_1 | num.io.threads = 8 | |
kafka_1 | num.network.threads = 3 | |
kafka_1 | num.partitions = 1 | |
kafka_1 | num.recovery.threads.per.data.dir = 1 | |
kafka_1 | num.replica.alter.log.dirs.threads = null | |
kafka_1 | num.replica.fetchers = 1 | |
kafka_1 | offset.metadata.max.bytes = 4096 | |
kafka_1 | offsets.commit.required.acks = -1 | |
kafka_1 | offsets.commit.timeout.ms = 5000 | |
kafka_1 | offsets.load.buffer.size = 5242880 | |
kafka_1 | offsets.retention.check.interval.ms = 600000 | |
kafka_1 | offsets.retention.minutes = 10080 | |
kafka_1 | offsets.topic.compression.codec = 0 | |
kafka_1 | offsets.topic.num.partitions = 50 | |
kafka_1 | offsets.topic.replication.factor = 1 | |
kafka_1 | offsets.topic.segment.bytes = 104857600 | |
kafka_1 | password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding | |
kafka_1 | password.encoder.iterations = 4096 | |
kafka_1 | password.encoder.key.length = 128 | |
kafka_1 | password.encoder.keyfactory.algorithm = null | |
kafka_1 | password.encoder.old.secret = null | |
kafka_1 | password.encoder.secret = null | |
kafka_1 | port = 9092 | |
kafka_1 | principal.builder.class = null | |
kafka_1 | producer.purgatory.purge.interval.requests = 1000 | |
kafka_1 | queued.max.request.bytes = -1 | |
kafka_1 | queued.max.requests = 500 | |
kafka_1 | quota.consumer.default = 9223372036854775807 | |
kafka_1 | quota.producer.default = 9223372036854775807 | |
kafka_1 | quota.window.num = 11 | |
kafka_1 | quota.window.size.seconds = 1 | |
kafka_1 | replica.fetch.backoff.ms = 1000 | |
kafka_1 | replica.fetch.max.bytes = 1048576 | |
kafka_1 | replica.fetch.min.bytes = 1 | |
kafka_1 | replica.fetch.response.max.bytes = 10485760 | |
kafka_1 | replica.fetch.wait.max.ms = 500 | |
kafka_1 | replica.high.watermark.checkpoint.interval.ms = 5000 | |
kafka_1 | replica.lag.time.max.ms = 30000 | |
kafka_1 | replica.selector.class = null | |
kafka_1 | replica.socket.receive.buffer.bytes = 65536 | |
kafka_1 | replica.socket.timeout.ms = 30000 | |
kafka_1 | replication.quota.window.num = 11 | |
kafka_1 | replication.quota.window.size.seconds = 1 | |
kafka_1 | request.timeout.ms = 30000 | |
kafka_1 | reserved.broker.max.id = 1000 | |
kafka_1 | sasl.client.callback.handler.class = null | |
kafka_1 | sasl.enabled.mechanisms = [GSSAPI] | |
kafka_1 | sasl.jaas.config = null | |
kafka_1 | sasl.kerberos.kinit.cmd = /usr/bin/kinit | |
kafka_1 | sasl.kerberos.min.time.before.relogin = 60000 | |
kafka_1 | sasl.kerberos.principal.to.local.rules = [DEFAULT] | |
kafka_1 | sasl.kerberos.service.name = null | |
kafka_1 | sasl.kerberos.ticket.renew.jitter = 0.05 | |
kafka_1 | sasl.kerberos.ticket.renew.window.factor = 0.8 | |
kafka_1 | sasl.login.callback.handler.class = null | |
kafka_1 | sasl.login.class = null | |
kafka_1 | sasl.login.refresh.buffer.seconds = 300 | |
kafka_1 | sasl.login.refresh.min.period.seconds = 60 | |
kafka_1 | sasl.login.refresh.window.factor = 0.8 | |
kafka_1 | sasl.login.refresh.window.jitter = 0.05 | |
kafka_1 | sasl.mechanism.inter.broker.protocol = GSSAPI | |
kafka_1 | sasl.server.callback.handler.class = null | |
kafka_1 | security.inter.broker.protocol = PLAINTEXT | |
kafka_1 | security.providers = null | |
kafka_1 | socket.receive.buffer.bytes = 102400 | |
kafka_1 | socket.request.max.bytes = 104857600 | |
kafka_1 | socket.send.buffer.bytes = 102400 | |
kafka_1 | ssl.cipher.suites = [] | |
kafka_1 | ssl.client.auth = none | |
kafka_1 | ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1] | |
kafka_1 | ssl.endpoint.identification.algorithm = https | |
kafka_1 | ssl.key.password = null | |
kafka_1 | ssl.keymanager.algorithm = SunX509 | |
kafka_1 | ssl.keystore.location = null | |
kafka_1 | ssl.keystore.password = null | |
kafka_1 | ssl.keystore.type = JKS | |
kafka_1 | ssl.principal.mapping.rules = DEFAULT | |
kafka_1 | ssl.protocol = TLS | |
kafka_1 | ssl.provider = null | |
kafka_1 | ssl.secure.random.implementation = null | |
kafka_1 | ssl.trustmanager.algorithm = PKIX | |
kafka_1 | ssl.truststore.location = null | |
kafka_1 | ssl.truststore.password = null | |
kafka_1 | ssl.truststore.type = JKS | |
kafka_1 | transaction.abort.timed.out.transaction.cleanup.interval.ms = 10000 | |
kafka_1 | transaction.max.timeout.ms = 900000 | |
kafka_1 | transaction.remove.expired.transaction.cleanup.interval.ms = 3600000 | |
kafka_1 | transaction.state.log.load.buffer.size = 5242880 | |
kafka_1 | transaction.state.log.min.isr = 2 | |
kafka_1 | transaction.state.log.num.partitions = 50 | |
kafka_1 | transaction.state.log.replication.factor = 3 | |
kafka_1 | transaction.state.log.segment.bytes = 104857600 | |
kafka_1 | transactional.id.expiration.ms = 604800000 | |
kafka_1 | unclean.leader.election.enable = false | |
kafka_1 | zookeeper.clientCnxnSocket = null | |
kafka_1 | zookeeper.connect = zookeeper:32181 | |
kafka_1 | zookeeper.connection.timeout.ms = null | |
kafka_1 | zookeeper.max.in.flight.requests = 10 | |
kafka_1 | zookeeper.session.timeout.ms = 18000 | |
kafka_1 | zookeeper.set.acl = false | |
kafka_1 | zookeeper.ssl.cipher.suites = null | |
kafka_1 | zookeeper.ssl.client.enable = false | |
kafka_1 | zookeeper.ssl.crl.enable = false | |
kafka_1 | zookeeper.ssl.enabled.protocols = null | |
kafka_1 | zookeeper.ssl.endpoint.identification.algorithm = HTTPS | |
kafka_1 | zookeeper.ssl.keystore.location = null | |
kafka_1 | zookeeper.ssl.keystore.password = null | |
kafka_1 | zookeeper.ssl.keystore.type = null | |
kafka_1 | zookeeper.ssl.ocsp.enable = false | |
kafka_1 | zookeeper.ssl.protocol = TLSv1.2 | |
kafka_1 | zookeeper.ssl.truststore.location = null | |
kafka_1 | zookeeper.ssl.truststore.password = null | |
kafka_1 | zookeeper.ssl.truststore.type = null | |
kafka_1 | zookeeper.sync.time.ms = 2000 | |
kafka_1 | (kafka.server.KafkaConfig) | |
kafka_1 | [2020-07-08 17:56:43,076] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) | |
kafka_1 | [2020-07-08 17:56:43,076] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) | |
kafka_1 | [2020-07-08 17:56:43,080] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) | |
kafka_1 | [2020-07-08 17:56:43,215] INFO Loading logs. (kafka.log.LogManager) | |
kafka_1 | [2020-07-08 17:56:43,551] INFO [Log partition=__confluent.support.metrics-0, dir=/var/lib/kafka/data] Loading producer state till offset 1 with message format version 2 (kafka.log.Log) | |
kafka_1 | [2020-07-08 17:56:43,596] INFO [ProducerStateManager partition=__confluent.support.metrics-0] Loading producer state from snapshot file '/var/lib/kafka/data/__confluent.support.metrics-0/00000000000000000001.snapshot' (kafka.log.ProducerStateManager) | |
kafka_1 | [2020-07-08 17:56:43,662] INFO [Log partition=__confluent.support.metrics-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 1 in 349 ms (kafka.log.Log) | |
kafka_1 | [2020-07-08 17:56:43,747] INFO Logs loading complete in 527 ms. (kafka.log.LogManager) | |
kafka_1 | [2020-07-08 17:56:43,820] INFO Starting log cleanup with a period of 300000 ms. (kafka.log.LogManager) | |
kafka_1 | [2020-07-08 17:56:43,823] INFO Starting log flusher with a default period of 9223372036854775807 ms. (kafka.log.LogManager) | |
kafka_1 | [2020-07-08 17:56:43,845] INFO Starting the log cleaner (kafka.log.LogCleaner) | |
kafka_1 | [2020-07-08 17:56:44,088] INFO [kafka-log-cleaner-thread-0]: Starting (kafka.log.LogCleaner) | |
kafka_1 | [2020-07-08 17:56:45,619] INFO Awaiting socket connections on 0.0.0.0:29092. (kafka.network.Acceptor) | |
kafka_1 | [2020-07-08 17:56:45,840] INFO [SocketServer brokerId=1] Created data-plane acceptor and processors for endpoint : EndPoint(0.0.0.0,29092,ListenerName(PLAINTEXT),PLAINTEXT) (kafka.network.SocketServer) | |
kafka_1 | [2020-07-08 17:56:45,843] INFO [SocketServer brokerId=1] Started 1 acceptor threads for data-plane (kafka.network.SocketServer) | |
kafka_1 | [2020-07-08 17:56:45,915] INFO [ExpirationReaper-1-Produce]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) | |
kafka_1 | [2020-07-08 17:56:45,925] INFO [ExpirationReaper-1-DeleteRecords]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) | |
kafka_1 | [2020-07-08 17:56:45,925] INFO [ExpirationReaper-1-Fetch]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) | |
kafka_1 | [2020-07-08 17:56:45,934] INFO [ExpirationReaper-1-ElectLeader]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) | |
kafka_1 | [2020-07-08 17:56:46,004] INFO [LogDirFailureHandler]: Starting (kafka.server.ReplicaManager$LogDirFailureHandler) | |
kafka_1 | [2020-07-08 17:56:46,168] INFO Creating /brokers/ids/1 (is it secure? false) (kafka.zk.KafkaZkClient) | |
kafka_1 | [2020-07-08 17:56:46,243] INFO Stat of the created znode at /brokers/ids/1 is: 54,54,1594231006213,1594231006213,1,0,0,72066228621737985,182,0,54 | |
kafka_1 | (kafka.zk.KafkaZkClient) | |
kafka_1 | [2020-07-08 17:56:46,249] INFO Registered broker 1 at path /brokers/ids/1 with addresses: ArrayBuffer(EndPoint(kafka,29092,ListenerName(PLAINTEXT),PLAINTEXT)), czxid (broker epoch): 54 (kafka.zk.KafkaZkClient) | |
kafka_1 | [2020-07-08 17:56:46,543] INFO [ControllerEventThread controllerId=1] Starting (kafka.controller.ControllerEventManager$ControllerEventThread) | |
kafka_1 | [2020-07-08 17:56:46,621] INFO [ExpirationReaper-1-topic]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) | |
kafka_1 | [2020-07-08 17:56:46,623] INFO [ExpirationReaper-1-Heartbeat]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) | |
kafka_1 | [2020-07-08 17:56:46,661] INFO [ExpirationReaper-1-Rebalance]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) | |
kafka_1 | [2020-07-08 17:56:46,683] INFO [Controller id=1] 1 successfully elected as the controller. Epoch incremented to 2 and epoch zk version is now 2 (kafka.controller.KafkaController) | |
kafka_1 | [2020-07-08 17:56:46,690] INFO [Controller id=1] Registering handlers (kafka.controller.KafkaController) | |
kafka_1 | [2020-07-08 17:56:46,717] INFO [Controller id=1] Deleting log dir event notifications (kafka.controller.KafkaController) | |
kafka_1 | [2020-07-08 17:56:46,769] INFO [Controller id=1] Deleting isr change notifications (kafka.controller.KafkaController) | |
kafka_1 | [2020-07-08 17:56:46,785] INFO [Controller id=1] Initializing controller context (kafka.controller.KafkaController) | |
kafka_1 | [2020-07-08 17:56:46,905] INFO [Controller id=1] Initialized broker epochs cache: Map(1 -> 54) (kafka.controller.KafkaController) | |
kafka_1 | [2020-07-08 17:56:46,961] INFO [GroupCoordinator 1]: Starting up. (kafka.coordinator.group.GroupCoordinator) | |
kafka_1 | [2020-07-08 17:56:46,967] DEBUG [Controller id=1] Register BrokerModifications handler for Set(1) (kafka.controller.KafkaController) | |
kafka_1 | [2020-07-08 17:56:46,997] INFO [GroupCoordinator 1]: Startup complete. (kafka.coordinator.group.GroupCoordinator) | |
kafka_1 | [2020-07-08 17:56:47,081] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 79 milliseconds. (kafka.coordinator.group.GroupMetadataManager) | |
kafka_1 | [2020-07-08 17:56:47,096] DEBUG [Channel manager on controller 1]: Controller 1 trying to connect to broker 1 (kafka.controller.ControllerChannelManager) | |
kafka_1 | [2020-07-08 17:56:47,120] INFO [ProducerId Manager 1]: Acquired new producerId block (brokerId:1,blockStartProducerId:1000,blockEndProducerId:1999) by writing to Zk with path version 2 (kafka.coordinator.transaction.ProducerIdManager) | |
cloudera_1 | * Started Hadoop secondarynamenode: | |
cloudera_1 | Start Components | |
cloudera_1 | Press Ctrl+P and Ctrl+Q to background this process. | |
cloudera_1 | Use exec command to open a new bash instance for this instance (Eg. "docker exec -i -t CONTAINER_ID bash"). Container ID can be obtained using "docker ps" command. | |
cloudera_1 | Start Terminal | |
cloudera_1 | Press Ctrl+C to stop instance. | |
kafka_1 | [2020-07-08 17:56:47,228] INFO [Controller id=1] Currently active brokers in the cluster: Set(1) (kafka.controller.KafkaController) | |
kafka_1 | [2020-07-08 17:56:47,229] INFO [Controller id=1] Currently shutting brokers in the cluster: Set() (kafka.controller.KafkaController) | |
kafka_1 | [2020-07-08 17:56:47,232] INFO [Controller id=1] Current list of topics in the cluster: Set(__confluent.support.metrics) (kafka.controller.KafkaController) | |
kafka_1 | [2020-07-08 17:56:47,235] INFO [RequestSendThread controllerId=1] Starting (kafka.controller.RequestSendThread) | |
kafka_1 | [2020-07-08 17:56:47,242] INFO [Controller id=1] Fetching topic deletions in progress (kafka.controller.KafkaController) | |
kafka_1 | [2020-07-08 17:56:47,267] INFO [Controller id=1] List of topics to be deleted: (kafka.controller.KafkaController) | |
kafka_1 | [2020-07-08 17:56:47,270] INFO [Controller id=1] List of topics ineligible for deletion: (kafka.controller.KafkaController) | |
kafka_1 | [2020-07-08 17:56:47,272] INFO [Controller id=1] Initializing topic deletion manager (kafka.controller.KafkaController) | |
kafka_1 | [2020-07-08 17:56:47,270] INFO [TransactionCoordinator id=1] Starting up. (kafka.coordinator.transaction.TransactionCoordinator) | |
kafka_1 | [2020-07-08 17:56:47,288] INFO [Topic Deletion Manager 1] Initializing manager with initial deletions: Set(), initial ineligible deletions: Set() (kafka.controller.TopicDeletionManager) | |
kafka_1 | [2020-07-08 17:56:47,296] INFO [Controller id=1] Sending update metadata request (kafka.controller.KafkaController) | |
kafka_1 | [2020-07-08 17:56:47,303] INFO [TransactionCoordinator id=1] Startup complete. (kafka.coordinator.transaction.TransactionCoordinator) | |
kafka_1 | [2020-07-08 17:56:47,304] INFO [Transaction Marker Channel Manager 1]: Starting (kafka.coordinator.transaction.TransactionMarkerChannelManager) | |
kafka_1 | [2020-07-08 17:56:47,454] INFO [ReplicaStateMachine controllerId=1] Initializing replica state (kafka.controller.ZkReplicaStateMachine) | |
kafka_1 | [2020-07-08 17:56:47,515] INFO [ReplicaStateMachine controllerId=1] Triggering online replica state changes (kafka.controller.ZkReplicaStateMachine) | |
kafka_1 | [2020-07-08 17:56:47,525] INFO [RequestSendThread controllerId=1] Controller 1 connected to kafka:29092 (id: 1 rack: null) for sending state change requests (kafka.controller.RequestSendThread) | |
kafka_1 | [2020-07-08 17:56:47,618] INFO [ExpirationReaper-1-AlterAcls]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) | |
kafka_1 | [2020-07-08 17:56:47,691] TRACE [Controller id=1 epoch=2] Changed state of replica 1 for partition __confluent.support.metrics-0 from OnlineReplica to OnlineReplica (state.change.logger) | |
kafka_1 | [2020-07-08 17:56:47,756] TRACE [Controller id=1 epoch=2] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__confluent.support.metrics', partitionIndex=0, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1], zkVersion=0, replicas=[1], addingReplicas=[], removingReplicas=[], isNew=false) to broker 1 for partition __confluent.support.metrics-0 (state.change.logger) | |
kafka_1 | [2020-07-08 17:56:47,785] TRACE [Controller id=1 epoch=2] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='__confluent.support.metrics', partitionIndex=0, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1], zkVersion=0, replicas=[1], offlineReplicas=[]) to brokers Set(1) for partition __confluent.support.metrics-0 (state.change.logger) | |
kafka_1 | [2020-07-08 17:56:47,805] INFO [ReplicaStateMachine controllerId=1] Triggering offline replica state changes (kafka.controller.ZkReplicaStateMachine) | |
kafka_1 | [2020-07-08 17:56:47,807] INFO [/config/changes-event-process-thread]: Starting (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread) | |
kafka_1 | [2020-07-08 17:56:47,815] DEBUG [ReplicaStateMachine controllerId=1] Started replica state machine with initial state -> Map([Topic=__confluent.support.metrics,Partition=0,Replica=1] -> OnlineReplica) (kafka.controller.ZkReplicaStateMachine) | |
kafka_1 | [2020-07-08 17:56:47,839] INFO [PartitionStateMachine controllerId=1] Initializing partition state (kafka.controller.ZkPartitionStateMachine) | |
kafka_1 | [2020-07-08 17:56:47,847] INFO [PartitionStateMachine controllerId=1] Triggering online partition state changes (kafka.controller.ZkPartitionStateMachine) | |
kafka_1 | [2020-07-08 17:56:47,891] DEBUG [PartitionStateMachine controllerId=1] Started partition state machine with initial state -> Map(__confluent.support.metrics-0 -> OnlinePartition) (kafka.controller.ZkPartitionStateMachine) | |
kafka_1 | [2020-07-08 17:56:47,916] INFO [Controller id=1] Ready to serve as the new controller with epoch 2 (kafka.controller.KafkaController) | |
kafka_1 | [2020-07-08 17:56:48,012] INFO [Controller id=1] Partitions undergoing preferred replica election: (kafka.controller.KafkaController) | |
kafka_1 | [2020-07-08 17:56:48,181] INFO [SocketServer brokerId=1] Started data-plane processors for 1 acceptors (kafka.network.SocketServer) | |
kafka_1 | [2020-07-08 17:56:48,192] INFO [Controller id=1] Partitions that completed preferred replica election: (kafka.controller.KafkaController) | |
kafka_1 | [2020-07-08 17:56:48,238] INFO [Controller id=1] Skipping preferred replica election for partitions due to topic deletion: (kafka.controller.KafkaController) | |
kafka_1 | [2020-07-08 17:56:48,245] INFO [Controller id=1] Resuming preferred replica election for partitions: (kafka.controller.KafkaController) | |
kafka_1 | [2020-07-08 17:56:48,247] INFO [Controller id=1] Starting replica leader election (PREFERRED) for partitions triggered by ZkTriggered (kafka.controller.KafkaController) | |
kafka_1 | [2020-07-08 17:56:48,260] INFO Kafka version: 5.5.0-ccs (org.apache.kafka.common.utils.AppInfoParser) | |
kafka_1 | [2020-07-08 17:56:48,262] INFO Kafka commitId: 606822a624024828 (org.apache.kafka.common.utils.AppInfoParser) | |
kafka_1 | [2020-07-08 17:56:48,264] INFO Kafka startTimeMs: 1594231008205 (org.apache.kafka.common.utils.AppInfoParser) | |
kafka_1 | [2020-07-08 17:56:48,267] INFO [KafkaServer id=1] started (kafka.server.KafkaServer) | |
kafka_1 | [2020-07-08 17:56:48,315] INFO Waiting until monitored service is ready for metrics collection (io.confluent.support.metrics.BaseMetricsReporter) | |
kafka_1 | [2020-07-08 17:56:48,319] INFO Monitored service is now ready (io.confluent.support.metrics.BaseMetricsReporter) | |
kafka_1 | [2020-07-08 17:56:48,328] INFO Attempting to collect and submit metrics (io.confluent.support.metrics.BaseMetricsReporter) | |
kafka_1 | [2020-07-08 17:56:48,392] INFO [Controller id=1] Starting the controller scheduler (kafka.controller.KafkaController) | |
kafka_1 | [2020-07-08 17:56:48,702] TRACE [Controller id=1 epoch=2] Received response {error_code=0,_tagged_fields={}} for request UPDATE_METADATA with correlation id 0 sent to broker kafka:29092 (id: 1 rack: null) (state.change.logger) | |
kafka_1 | [2020-07-08 17:56:48,742] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__confluent.support.metrics', partitionIndex=0, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1], zkVersion=0, replicas=[1], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 1 epoch 2 (state.change.logger) | |
kafka_1 | [2020-07-08 17:56:48,905] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 1 epoch 2 starting the become-leader transition for partition __confluent.support.metrics-0 (state.change.logger) | |
kafka_1 | [2020-07-08 17:56:48,910] INFO [ReplicaFetcherManager on broker 1] Removed fetcher for partitions Set(__confluent.support.metrics-0) (kafka.server.ReplicaFetcherManager) | |
kafka_1 | [2020-07-08 17:56:49,025] INFO [Partition __confluent.support.metrics-0 broker=1] Log loaded for partition __confluent.support.metrics-0 with initial high watermark 1 (kafka.cluster.Partition) | |
kafka_1 | [2020-07-08 17:56:49,045] INFO [Partition __confluent.support.metrics-0 broker=1] __confluent.support.metrics-0 starts at leader epoch 0 from offset 1 with high watermark 1. Previous leader epoch was -1. (kafka.cluster.Partition) | |
kafka_1 | [2020-07-08 17:56:49,111] TRACE [Broker id=1] Stopped fetchers as part of become-leader request from controller 1 epoch 2 with correlation id 1 for partition __confluent.support.metrics-0 (last update controller epoch 1) (state.change.logger) | |
kafka_1 | [2020-07-08 17:56:49,126] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 1 epoch 2 for the become-leader transition for partition __confluent.support.metrics-0 (state.change.logger) | |
kafka_1 | [2020-07-08 17:56:49,241] WARN The replication factor of topic __confluent.support.metrics is 1, which is less than the desired replication factor of 3. If you happen to add more brokers to this cluster, then it is important to increase the replication factor of the topic to eventually 3 to ensure reliable and durable metrics collection. (io.confluent.support.metrics.common.kafka.KafkaUtilities) | |
kafka_1 | [2020-07-08 17:56:49,250] TRACE [Controller id=1 epoch=2] Received response {error_code=0,partition_errors=[{topic_name=__confluent.support.metrics,partition_index=0,error_code=0,_tagged_fields={}}],_tagged_fields={}} for request LEADER_AND_ISR with correlation id 1 sent to broker kafka:29092 (id: 1 rack: null) (state.change.logger) | |
kafka_1 | [2020-07-08 17:56:49,310] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__confluent.support.metrics', partitionIndex=0, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1], zkVersion=0, replicas=[1], offlineReplicas=[]) for partition __confluent.support.metrics-0 in response to UpdateMetadata request sent by controller 1 epoch 2 with correlation id 2 (state.change.logger) | |
kafka_1 | [2020-07-08 17:56:49,320] TRACE [Controller id=1 epoch=2] Received response {error_code=0,_tagged_fields={}} for request UPDATE_METADATA with correlation id 2 sent to broker kafka:29092 (id: 1 rack: null) (state.change.logger) | |
kafka_1 | [2020-07-08 17:56:49,369] INFO ProducerConfig values: | |
kafka_1 | acks = 1 | |
kafka_1 | batch.size = 16384 | |
kafka_1 | bootstrap.servers = [PLAINTEXT://kafka:29092] | |
kafka_1 | buffer.memory = 33554432 | |
kafka_1 | client.dns.lookup = default | |
kafka_1 | client.id = producer-1 | |
kafka_1 | compression.type = none | |
kafka_1 | connections.max.idle.ms = 540000 | |
kafka_1 | delivery.timeout.ms = 120000 | |
kafka_1 | enable.idempotence = false | |
kafka_1 | interceptor.classes = [] | |
kafka_1 | key.serializer = class org.apache.kafka.common.serialization.ByteArraySerializer | |
kafka_1 | linger.ms = 0 | |
kafka_1 | max.block.ms = 10000 | |
kafka_1 | max.in.flight.requests.per.connection = 5 | |
kafka_1 | max.request.size = 1048576 | |
kafka_1 | metadata.max.age.ms = 300000 | |
kafka_1 | metadata.max.idle.ms = 300000 | |
kafka_1 | metric.reporters = [] | |
kafka_1 | metrics.num.samples = 2 | |
kafka_1 | metrics.recording.level = INFO | |
kafka_1 | metrics.sample.window.ms = 30000 | |
kafka_1 | partitioner.class = class org.apache.kafka.clients.producer.internals.DefaultPartitioner | |
kafka_1 | receive.buffer.bytes = 32768 | |
kafka_1 | reconnect.backoff.max.ms = 1000 | |
kafka_1 | reconnect.backoff.ms = 50 | |
kafka_1 | request.timeout.ms = 30000 | |
kafka_1 | retries = 2147483647 | |
kafka_1 | retry.backoff.ms = 100 | |
kafka_1 | sasl.client.callback.handler.class = null | |
kafka_1 | sasl.jaas.config = null | |
kafka_1 | sasl.kerberos.kinit.cmd = /usr/bin/kinit | |
kafka_1 | sasl.kerberos.min.time.before.relogin = 60000 | |
kafka_1 | sasl.kerberos.service.name = null | |
kafka_1 | sasl.kerberos.ticket.renew.jitter = 0.05 | |
kafka_1 | sasl.kerberos.ticket.renew.window.factor = 0.8 | |
kafka_1 | sasl.login.callback.handler.class = null | |
kafka_1 | sasl.login.class = null | |
kafka_1 | sasl.login.refresh.buffer.seconds = 300 | |
kafka_1 | sasl.login.refresh.min.period.seconds = 60 | |
kafka_1 | sasl.login.refresh.window.factor = 0.8 | |
kafka_1 | sasl.login.refresh.window.jitter = 0.05 | |
kafka_1 | sasl.mechanism = GSSAPI | |
kafka_1 | security.protocol = PLAINTEXT | |
kafka_1 | security.providers = null | |
kafka_1 | send.buffer.bytes = 131072 | |
kafka_1 | ssl.cipher.suites = null | |
kafka_1 | ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1] | |
kafka_1 | ssl.endpoint.identification.algorithm = https | |
kafka_1 | ssl.key.password = null | |
kafka_1 | ssl.keymanager.algorithm = SunX509 | |
kafka_1 | ssl.keystore.location = null | |
kafka_1 | ssl.keystore.password = null | |
kafka_1 | ssl.keystore.type = JKS | |
kafka_1 | ssl.protocol = TLS | |
kafka_1 | ssl.provider = null | |
kafka_1 | ssl.secure.random.implementation = null | |
kafka_1 | ssl.trustmanager.algorithm = PKIX | |
kafka_1 | ssl.truststore.location = null | |
kafka_1 | ssl.truststore.password = null | |
kafka_1 | ssl.truststore.type = JKS | |
kafka_1 | transaction.timeout.ms = 60000 | |
kafka_1 | transactional.id = null | |
kafka_1 | value.serializer = class org.apache.kafka.common.serialization.ByteArraySerializer | |
kafka_1 | (org.apache.kafka.clients.producer.ProducerConfig) | |
kafka_1 | [2020-07-08 17:56:49,501] INFO Kafka version: 5.5.0-ccs (org.apache.kafka.common.utils.AppInfoParser) | |
kafka_1 | [2020-07-08 17:56:49,509] INFO Kafka commitId: 606822a624024828 (org.apache.kafka.common.utils.AppInfoParser) | |
kafka_1 | [2020-07-08 17:56:49,509] INFO Kafka startTimeMs: 1594231009489 (org.apache.kafka.common.utils.AppInfoParser) | |
kafka_1 | [2020-07-08 17:56:49,693] INFO [Producer clientId=producer-1] Cluster ID: epSKzw_IS8iIYBfj3Q708A (org.apache.kafka.clients.Metadata) | |
kafka_1 | [2020-07-08 17:56:50,205] INFO [Producer clientId=producer-1] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms. (org.apache.kafka.clients.producer.KafkaProducer) | |
kafka_1 | [2020-07-08 17:56:50,228] INFO Successfully submitted metrics to Kafka topic __confluent.support.metrics (io.confluent.support.metrics.submitters.KafkaSubmitter) | |
kafka_1 | [2020-07-08 17:56:52,581] INFO Successfully submitted metrics to Confluent via secure endpoint (io.confluent.support.metrics.submitters.ConfluentSubmitter) | |
kafka_1 | [2020-07-08 17:56:53,398] INFO [Controller id=1] Processing automatic preferred replica leader election (kafka.controller.KafkaController) | |
kafka_1 | [2020-07-08 17:56:53,400] TRACE [Controller id=1] Checking need to trigger auto leader balancing (kafka.controller.KafkaController) | |
kafka_1 | [2020-07-08 17:56:53,425] DEBUG [Controller id=1] Topics not in preferred replica for broker 1 Map() (kafka.controller.KafkaController) | |
kafka_1 | [2020-07-08 17:56:53,427] TRACE [Controller id=1] Leader imbalance ratio for broker 1 is 0.0 (kafka.controller.KafkaController) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment