diff --git a/commands.txt b/commands.txt index 9e0c4bf..817af68 100644 --- a/commands.txt +++ b/commands.txt @@ -177,6 +177,33 @@ USE PYTHON CLIENT $ cd /usr/share/gocode/src/github.com/google/gnxi/gnmi_cli_py $ python py_gnmicli.py -n -m get -t localhost -p 10161 -x /topology -u foo -pass bar +== KAFKA +$ cd /root/OFC_SC472/kafka + +(INSTALL) +$ pip3 install kafka-python +$ wget https://ftp.cixug.es/apache/kafka/2.8.0/kafka_2.13-2.8.0.tgz +$ tar -xzf kafka_2.13-2.8.0.tgz +(RUN) +$ cd kafka_2.13-2.8.0 +$ bin/zookeeper-server-start.sh config/zookeeper.properties +(In new window) +$ cd /root/OFC_SC472/kafka/kafka_2.13-2.8.0 +$ bin/kafka-server-start.sh config/server.properties + +CREATE TOPIC +(In new window) +$ cd /root/OFC_SC472/kafka/kafka_2.13-2.8.0 +$ bin/kafka-topics.sh --create --topic my-topic --bootstrap-server localhost:9092 + +(In new window) +$ cd /root/OFC_SC472/kafka +$ python3 sub.py + +(In new window) +$ cd /root/OFC_SC472/kafka +$ python3 pub.py + == APPENDIX: CONFD $ cd /root/OFC_SC472/netconf $ unzip confd-basic-6.4.linux.x86_64.zip diff --git a/kafka/kafka_2.13-2.8.0.tgz b/kafka/kafka_2.13-2.8.0.tgz new file mode 100644 index 0000000..29538c5 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0.tgz differ diff --git a/kafka/kafka_2.13-2.8.0/LICENSE b/kafka/kafka_2.13-2.8.0/LICENSE new file mode 100644 index 0000000..4d8e2c7 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/LICENSE @@ -0,0 +1,320 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------------------------------------------------------------------- +This project bundles some components that are also licensed under the Apache +License Version 2.0: + +audience-annotations-0.5.0 +commons-cli-1.4 +commons-lang3-3.8.1 +jackson-annotations-2.10.5 +jackson-core-2.10.5 +jackson-databind-2.10.5.1 +jackson-dataformat-csv-2.10.5 +jackson-datatype-jdk8-2.10.5 +jackson-jaxrs-base-2.10.5 +jackson-jaxrs-json-provider-2.10.5 +jackson-module-jaxb-annotations-2.10.5 +jackson-module-paranamer-2.10.5 +jackson-module-scala_2.13-2.10.5 +jakarta.validation-api-2.0.2 +javassist-3.27.0-GA +jetty-client-9.4.38.v20210224 +jetty-continuation-9.4.38.v20210224 +jetty-http-9.4.38.v20210224 +jetty-io-9.4.38.v20210224 +jetty-security-9.4.38.v20210224 +jetty-server-9.4.38.v20210224 +jetty-servlet-9.4.38.v20210224 +jetty-servlets-9.4.38.v20210224 +jetty-util-9.4.38.v20210224 +jetty-util-ajax-9.4.38.v20210224 +jersey-common-2.31 +jersey-server-2.31 +log4j-1.2.17 +lz4-java-1.7.1 +maven-artifact-3.6.3 +metrics-core-2.2.0 +netty-buffer-4.1.59.Final +netty-codec-4.1.59.Final +netty-common-4.1.59.Final +netty-handler-4.1.59.Final +netty-resolver-4.1.59.Final +netty-transport-4.1.59.Final +netty-transport-native-epoll-4.1.59.Final +netty-transport-native-epoll-4.1.59.Final +netty-transport-native-unix-common-4.1.59.Final +plexus-utils-3.2.1 +rocksdbjni-5.18.4 +scala-collection-compat_2.13-2.3.0 +scala-library-2.13.5 +scala-logging_2.13-3.9.2 +scala-reflect-2.13.5 +scala-java8-compat_2.13-0.9.1 +snappy-java-1.1.8.1 +zookeeper-3.5.9 +zookeeper-jute-3.5.9 + +=============================================================================== +This product bundles various third-party components under other open source +licenses. This section summarizes those components and their licenses. +See licenses/ for text of these licenses. + +--------------------------------------- +Eclipse Distribution License - v 1.0 +see: licenses/eclipse-distribution-license-1.0 + +jakarta.activation-api-1.2.1 +jakarta.xml.bind-api-2.3.2 + +--------------------------------------- +Eclipse Public License - v 2.0 +see: licenses/eclipse-public-license-2.0 + +jakarta.annotation-api-1.3.5 +jakarta.ws.rs-api-2.1.6 +javax.ws.rs-api-2.1.1 +hk2-api-2.6.1 +hk2-locator-2.6.1 +hk2-utils-2.6.1 +osgi-resource-locator-1.0.3 +aopalliance-repackaged-2.6.1 +jakarta.inject-2.6.1 +jersey-container-servlet-2.31 +jersey-container-servlet-core-2.31 +jersey-client-2.31 +jersey-hk2-2.31 +jersey-media-jaxb-2.31 + +--------------------------------------- +CDDL 1.1 + GPLv2 with classpath exception +see: licenses/CDDL+GPL-1.1 + +javax.servlet-api-3.1.0 +jaxb-api-2.3.0 +activation-1.1.1 + +--------------------------------------- +MIT License + +argparse4j-0.7.0, see: licenses/argparse-MIT +jopt-simple-5.0.4, see: licenses/jopt-simple-MIT +slf4j-api-1.7.30, see: licenses/slf4j-MIT +slf4j-log4j12-1.7.30, see: licenses/slf4j-MIT + +--------------------------------------- +BSD 2-Clause + +zstd-jni-1.4.9-1, see: licenses/zstd-jni-BSD-2-clause + +--------------------------------------- +BSD 3-Clause + +paranamer-2.8, see: licenses/paranamer-BSD-3-clause + +--------------------------------------- +Do What The F*ck You Want To Public License +see: licenses/DWTFYWTPL + +reflections-0.9.12 \ No newline at end of file diff --git a/kafka/kafka_2.13-2.8.0/NOTICE b/kafka/kafka_2.13-2.8.0/NOTICE new file mode 100644 index 0000000..674c942 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/NOTICE @@ -0,0 +1,19 @@ +Apache Kafka +Copyright 2021 The Apache Software Foundation. + +This product includes software developed at +The Apache Software Foundation (https://www.apache.org/). + +This distribution has a binary dependency on jersey, which is available under the CDDL +License. The source code of jersey can be found at https://github.com/jersey/jersey/. + +The streams-scala (streams/streams-scala) module was donated by Lightbend and the original code was copyrighted by them: +Copyright (C) 2018 Lightbend Inc. +Copyright (C) 2017-2018 Alexis Seigneurin. + +This project contains the following code copied from Apache Hadoop: +clients/src/main/java/org/apache/kafka/common/utils/PureJavaCrc32C.java +Some portions of this file Copyright (c) 2004-2006 Intel Corporation and licensed under the BSD license. + +This project contains the following code copied from Apache Hive: +streams/src/main/java/org/apache/kafka/streams/state/internals/Murmur3.java diff --git a/kafka/kafka_2.13-2.8.0/bin/connect-distributed.sh b/kafka/kafka_2.13-2.8.0/bin/connect-distributed.sh new file mode 100755 index 0000000..b8088ad --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/connect-distributed.sh @@ -0,0 +1,45 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [ $# -lt 1 ]; +then + echo "USAGE: $0 [-daemon] connect-distributed.properties" + exit 1 +fi + +base_dir=$(dirname $0) + +if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then + export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/connect-log4j.properties" +fi + +if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then + export KAFKA_HEAP_OPTS="-Xms256M -Xmx2G" +fi + +EXTRA_ARGS=${EXTRA_ARGS-'-name connectDistributed'} + +COMMAND=$1 +case $COMMAND in + -daemon) + EXTRA_ARGS="-daemon "$EXTRA_ARGS + shift + ;; + *) + ;; +esac + +exec $(dirname $0)/kafka-run-class.sh $EXTRA_ARGS org.apache.kafka.connect.cli.ConnectDistributed "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/connect-mirror-maker.sh b/kafka/kafka_2.13-2.8.0/bin/connect-mirror-maker.sh new file mode 100755 index 0000000..8e2b2e1 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/connect-mirror-maker.sh @@ -0,0 +1,45 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [ $# -lt 1 ]; +then + echo "USAGE: $0 [-daemon] mm2.properties" + exit 1 +fi + +base_dir=$(dirname $0) + +if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then + export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/connect-log4j.properties" +fi + +if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then + export KAFKA_HEAP_OPTS="-Xms256M -Xmx2G" +fi + +EXTRA_ARGS=${EXTRA_ARGS-'-name mirrorMaker'} + +COMMAND=$1 +case $COMMAND in + -daemon) + EXTRA_ARGS="-daemon "$EXTRA_ARGS + shift + ;; + *) + ;; +esac + +exec $(dirname $0)/kafka-run-class.sh $EXTRA_ARGS org.apache.kafka.connect.mirror.MirrorMaker "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/connect-standalone.sh b/kafka/kafka_2.13-2.8.0/bin/connect-standalone.sh new file mode 100755 index 0000000..441069f --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/connect-standalone.sh @@ -0,0 +1,45 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [ $# -lt 1 ]; +then + echo "USAGE: $0 [-daemon] connect-standalone.properties" + exit 1 +fi + +base_dir=$(dirname $0) + +if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then + export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/connect-log4j.properties" +fi + +if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then + export KAFKA_HEAP_OPTS="-Xms256M -Xmx2G" +fi + +EXTRA_ARGS=${EXTRA_ARGS-'-name connectStandalone'} + +COMMAND=$1 +case $COMMAND in + -daemon) + EXTRA_ARGS="-daemon "$EXTRA_ARGS + shift + ;; + *) + ;; +esac + +exec $(dirname $0)/kafka-run-class.sh $EXTRA_ARGS org.apache.kafka.connect.cli.ConnectStandalone "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/kafka-acls.sh b/kafka/kafka_2.13-2.8.0/bin/kafka-acls.sh new file mode 100755 index 0000000..8fa6554 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/kafka-acls.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +exec $(dirname $0)/kafka-run-class.sh kafka.admin.AclCommand "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/kafka-broker-api-versions.sh b/kafka/kafka_2.13-2.8.0/bin/kafka-broker-api-versions.sh new file mode 100755 index 0000000..4f560a0 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/kafka-broker-api-versions.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +exec $(dirname $0)/kafka-run-class.sh kafka.admin.BrokerApiVersionsCommand "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/kafka-cluster.sh b/kafka/kafka_2.13-2.8.0/bin/kafka-cluster.sh new file mode 100755 index 0000000..574007e --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/kafka-cluster.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +exec $(dirname $0)/kafka-run-class.sh kafka.tools.ClusterTool "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/kafka-configs.sh b/kafka/kafka_2.13-2.8.0/bin/kafka-configs.sh new file mode 100755 index 0000000..2f9eb8c --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/kafka-configs.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +exec $(dirname $0)/kafka-run-class.sh kafka.admin.ConfigCommand "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/kafka-console-consumer.sh b/kafka/kafka_2.13-2.8.0/bin/kafka-console-consumer.sh new file mode 100755 index 0000000..dbaac2b --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/kafka-console-consumer.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then + export KAFKA_HEAP_OPTS="-Xmx512M" +fi + +exec $(dirname $0)/kafka-run-class.sh kafka.tools.ConsoleConsumer "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/kafka-console-producer.sh b/kafka/kafka_2.13-2.8.0/bin/kafka-console-producer.sh new file mode 100755 index 0000000..e5187b8 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/kafka-console-producer.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then + export KAFKA_HEAP_OPTS="-Xmx512M" +fi +exec $(dirname $0)/kafka-run-class.sh kafka.tools.ConsoleProducer "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/kafka-consumer-groups.sh b/kafka/kafka_2.13-2.8.0/bin/kafka-consumer-groups.sh new file mode 100755 index 0000000..feb063d --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/kafka-consumer-groups.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +exec $(dirname $0)/kafka-run-class.sh kafka.admin.ConsumerGroupCommand "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/kafka-consumer-perf-test.sh b/kafka/kafka_2.13-2.8.0/bin/kafka-consumer-perf-test.sh new file mode 100755 index 0000000..77cda72 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/kafka-consumer-perf-test.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then + export KAFKA_HEAP_OPTS="-Xmx512M" +fi +exec $(dirname $0)/kafka-run-class.sh kafka.tools.ConsumerPerformance "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/kafka-delegation-tokens.sh b/kafka/kafka_2.13-2.8.0/bin/kafka-delegation-tokens.sh new file mode 100755 index 0000000..49cb276 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/kafka-delegation-tokens.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +exec $(dirname $0)/kafka-run-class.sh kafka.admin.DelegationTokenCommand "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/kafka-delete-records.sh b/kafka/kafka_2.13-2.8.0/bin/kafka-delete-records.sh new file mode 100755 index 0000000..8726f91 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/kafka-delete-records.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +exec $(dirname $0)/kafka-run-class.sh kafka.admin.DeleteRecordsCommand "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/kafka-dump-log.sh b/kafka/kafka_2.13-2.8.0/bin/kafka-dump-log.sh new file mode 100755 index 0000000..a97ea7d --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/kafka-dump-log.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +exec $(dirname $0)/kafka-run-class.sh kafka.tools.DumpLogSegments "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/kafka-features.sh b/kafka/kafka_2.13-2.8.0/bin/kafka-features.sh new file mode 100755 index 0000000..9dd9f16 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/kafka-features.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +exec $(dirname $0)/kafka-run-class.sh kafka.admin.FeatureCommand "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/kafka-leader-election.sh b/kafka/kafka_2.13-2.8.0/bin/kafka-leader-election.sh new file mode 100755 index 0000000..88baef3 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/kafka-leader-election.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +exec $(dirname $0)/kafka-run-class.sh kafka.admin.LeaderElectionCommand "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/kafka-log-dirs.sh b/kafka/kafka_2.13-2.8.0/bin/kafka-log-dirs.sh new file mode 100755 index 0000000..dc16edc --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/kafka-log-dirs.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +exec $(dirname $0)/kafka-run-class.sh kafka.admin.LogDirsCommand "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/kafka-metadata-shell.sh b/kafka/kafka_2.13-2.8.0/bin/kafka-metadata-shell.sh new file mode 100755 index 0000000..289f0c1 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/kafka-metadata-shell.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.shell.MetadataShell "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/kafka-mirror-maker.sh b/kafka/kafka_2.13-2.8.0/bin/kafka-mirror-maker.sh new file mode 100755 index 0000000..981f271 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/kafka-mirror-maker.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +exec $(dirname $0)/kafka-run-class.sh kafka.tools.MirrorMaker "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/kafka-preferred-replica-election.sh b/kafka/kafka_2.13-2.8.0/bin/kafka-preferred-replica-election.sh new file mode 100755 index 0000000..638a92a --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/kafka-preferred-replica-election.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +exec $(dirname $0)/kafka-run-class.sh kafka.admin.PreferredReplicaLeaderElectionCommand "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/kafka-producer-perf-test.sh b/kafka/kafka_2.13-2.8.0/bin/kafka-producer-perf-test.sh new file mode 100755 index 0000000..73a6288 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/kafka-producer-perf-test.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then + export KAFKA_HEAP_OPTS="-Xmx512M" +fi +exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.ProducerPerformance "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/kafka-reassign-partitions.sh b/kafka/kafka_2.13-2.8.0/bin/kafka-reassign-partitions.sh new file mode 100755 index 0000000..4c7f1bc --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/kafka-reassign-partitions.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +exec $(dirname $0)/kafka-run-class.sh kafka.admin.ReassignPartitionsCommand "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/kafka-replica-verification.sh b/kafka/kafka_2.13-2.8.0/bin/kafka-replica-verification.sh new file mode 100755 index 0000000..4960836 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/kafka-replica-verification.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +exec $(dirname $0)/kafka-run-class.sh kafka.tools.ReplicaVerificationTool "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/kafka-run-class.sh b/kafka/kafka_2.13-2.8.0/bin/kafka-run-class.sh new file mode 100755 index 0000000..3889be7 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/kafka-run-class.sh @@ -0,0 +1,331 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [ $# -lt 1 ]; +then + echo "USAGE: $0 [-daemon] [-name servicename] [-loggc] classname [opts]" + exit 1 +fi + +# CYGWIN == 1 if Cygwin is detected, else 0. +if [[ $(uname -a) =~ "CYGWIN" ]]; then + CYGWIN=1 +else + CYGWIN=0 +fi + +if [ -z "$INCLUDE_TEST_JARS" ]; then + INCLUDE_TEST_JARS=false +fi + +# Exclude jars not necessary for running commands. +regex="(-(test|test-sources|src|scaladoc|javadoc)\.jar|jar.asc)$" +should_include_file() { + if [ "$INCLUDE_TEST_JARS" = true ]; then + return 0 + fi + file=$1 + if [ -z "$(echo "$file" | egrep "$regex")" ] ; then + return 0 + else + return 1 + fi +} + +base_dir=$(dirname $0)/.. + +if [ -z "$SCALA_VERSION" ]; then + SCALA_VERSION=2.13.5 + if [[ -f "$base_dir/gradle.properties" ]]; then + SCALA_VERSION=`grep "^scalaVersion=" "$base_dir/gradle.properties" | cut -d= -f 2` + fi +fi + +if [ -z "$SCALA_BINARY_VERSION" ]; then + SCALA_BINARY_VERSION=$(echo $SCALA_VERSION | cut -f 1-2 -d '.') +fi + +# run ./gradlew copyDependantLibs to get all dependant jars in a local dir +shopt -s nullglob +if [ -z "$UPGRADE_KAFKA_STREAMS_TEST_VERSION" ]; then + for dir in "$base_dir"/core/build/dependant-libs-${SCALA_VERSION}*; + do + CLASSPATH="$CLASSPATH:$dir/*" + done +fi + +for file in "$base_dir"/examples/build/libs/kafka-examples*.jar; +do + if should_include_file "$file"; then + CLASSPATH="$CLASSPATH":"$file" + fi +done + +if [ -z "$UPGRADE_KAFKA_STREAMS_TEST_VERSION" ]; then + clients_lib_dir=$(dirname $0)/../clients/build/libs + streams_lib_dir=$(dirname $0)/../streams/build/libs + streams_dependant_clients_lib_dir=$(dirname $0)/../streams/build/dependant-libs-${SCALA_VERSION} +else + clients_lib_dir=/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs + streams_lib_dir=$clients_lib_dir + streams_dependant_clients_lib_dir=$streams_lib_dir +fi + + +for file in "$clients_lib_dir"/kafka-clients*.jar; +do + if should_include_file "$file"; then + CLASSPATH="$CLASSPATH":"$file" + fi +done + +for file in "$streams_lib_dir"/kafka-streams*.jar; +do + if should_include_file "$file"; then + CLASSPATH="$CLASSPATH":"$file" + fi +done + +if [ -z "$UPGRADE_KAFKA_STREAMS_TEST_VERSION" ]; then + for file in "$base_dir"/streams/examples/build/libs/kafka-streams-examples*.jar; + do + if should_include_file "$file"; then + CLASSPATH="$CLASSPATH":"$file" + fi + done +else + VERSION_NO_DOTS=`echo $UPGRADE_KAFKA_STREAMS_TEST_VERSION | sed 's/\.//g'` + SHORT_VERSION_NO_DOTS=${VERSION_NO_DOTS:0:((${#VERSION_NO_DOTS} - 1))} # remove last char, ie, bug-fix number + for file in "$base_dir"/streams/upgrade-system-tests-$SHORT_VERSION_NO_DOTS/build/libs/kafka-streams-upgrade-system-tests*.jar; + do + if should_include_file "$file"; then + CLASSPATH="$file":"$CLASSPATH" + fi + done + if [ "$SHORT_VERSION_NO_DOTS" = "0100" ]; then + CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zkclient-0.8.jar":"$CLASSPATH" + CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zookeeper-3.4.6.jar":"$CLASSPATH" + fi + if [ "$SHORT_VERSION_NO_DOTS" = "0101" ]; then + CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zkclient-0.9.jar":"$CLASSPATH" + CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zookeeper-3.4.8.jar":"$CLASSPATH" + fi +fi + +for file in "$streams_dependant_clients_lib_dir"/rocksdb*.jar; +do + CLASSPATH="$CLASSPATH":"$file" +done + +for file in "$streams_dependant_clients_lib_dir"/*hamcrest*.jar; +do + CLASSPATH="$CLASSPATH":"$file" +done + +for file in "$base_dir"/shell/build/libs/kafka-shell*.jar; +do + if should_include_file "$file"; then + CLASSPATH="$CLASSPATH":"$file" + fi +done + +for dir in "$base_dir"/shell/build/dependant-libs-${SCALA_VERSION}*; +do + CLASSPATH="$CLASSPATH:$dir/*" +done + +for file in "$base_dir"/tools/build/libs/kafka-tools*.jar; +do + if should_include_file "$file"; then + CLASSPATH="$CLASSPATH":"$file" + fi +done + +for dir in "$base_dir"/tools/build/dependant-libs-${SCALA_VERSION}*; +do + CLASSPATH="$CLASSPATH:$dir/*" +done + +for cc_pkg in "api" "transforms" "runtime" "file" "mirror" "mirror-client" "json" "tools" "basic-auth-extension" +do + for file in "$base_dir"/connect/${cc_pkg}/build/libs/connect-${cc_pkg}*.jar; + do + if should_include_file "$file"; then + CLASSPATH="$CLASSPATH":"$file" + fi + done + if [ -d "$base_dir/connect/${cc_pkg}/build/dependant-libs" ] ; then + CLASSPATH="$CLASSPATH:$base_dir/connect/${cc_pkg}/build/dependant-libs/*" + fi +done + +# classpath addition for release +for file in "$base_dir"/libs/*; +do + if should_include_file "$file"; then + CLASSPATH="$CLASSPATH":"$file" + fi +done + +for file in "$base_dir"/core/build/libs/kafka_${SCALA_BINARY_VERSION}*.jar; +do + if should_include_file "$file"; then + CLASSPATH="$CLASSPATH":"$file" + fi +done +shopt -u nullglob + +if [ -z "$CLASSPATH" ] ; then + echo "Classpath is empty. Please build the project first e.g. by running './gradlew jar -PscalaVersion=$SCALA_VERSION'" + exit 1 +fi + +# JMX settings +if [ -z "$KAFKA_JMX_OPTS" ]; then + KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false " +fi + +# JMX port to use +if [ $JMX_PORT ]; then + KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT " +fi + +# Log directory to use +if [ "x$LOG_DIR" = "x" ]; then + LOG_DIR="$base_dir/logs" +fi + +# Log4j settings +if [ -z "$KAFKA_LOG4J_OPTS" ]; then + # Log to console. This is a tool. + LOG4J_DIR="$base_dir/config/tools-log4j.properties" + # If Cygwin is detected, LOG4J_DIR is converted to Windows format. + (( CYGWIN )) && LOG4J_DIR=$(cygpath --path --mixed "${LOG4J_DIR}") + KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:${LOG4J_DIR}" +else + # create logs directory + if [ ! -d "$LOG_DIR" ]; then + mkdir -p "$LOG_DIR" + fi +fi + +# If Cygwin is detected, LOG_DIR is converted to Windows format. +(( CYGWIN )) && LOG_DIR=$(cygpath --path --mixed "${LOG_DIR}") +KAFKA_LOG4J_OPTS="-Dkafka.logs.dir=$LOG_DIR $KAFKA_LOG4J_OPTS" + +# Generic jvm settings you want to add +if [ -z "$KAFKA_OPTS" ]; then + KAFKA_OPTS="" +fi + +# Set Debug options if enabled +if [ "x$KAFKA_DEBUG" != "x" ]; then + + # Use default ports + DEFAULT_JAVA_DEBUG_PORT="5005" + + if [ -z "$JAVA_DEBUG_PORT" ]; then + JAVA_DEBUG_PORT="$DEFAULT_JAVA_DEBUG_PORT" + fi + + # Use the defaults if JAVA_DEBUG_OPTS was not set + DEFAULT_JAVA_DEBUG_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=${DEBUG_SUSPEND_FLAG:-n},address=$JAVA_DEBUG_PORT" + if [ -z "$JAVA_DEBUG_OPTS" ]; then + JAVA_DEBUG_OPTS="$DEFAULT_JAVA_DEBUG_OPTS" + fi + + echo "Enabling Java debug options: $JAVA_DEBUG_OPTS" + KAFKA_OPTS="$JAVA_DEBUG_OPTS $KAFKA_OPTS" +fi + +# Which java to use +if [ -z "$JAVA_HOME" ]; then + JAVA="java" +else + JAVA="$JAVA_HOME/bin/java" +fi + +# Memory options +if [ -z "$KAFKA_HEAP_OPTS" ]; then + KAFKA_HEAP_OPTS="-Xmx256M" +fi + +# JVM performance options +# MaxInlineLevel=15 is the default since JDK 14 and can be removed once older JDKs are no longer supported +if [ -z "$KAFKA_JVM_PERFORMANCE_OPTS" ]; then + KAFKA_JVM_PERFORMANCE_OPTS="-server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 -Djava.awt.headless=true" +fi + +while [ $# -gt 0 ]; do + COMMAND=$1 + case $COMMAND in + -name) + DAEMON_NAME=$2 + CONSOLE_OUTPUT_FILE=$LOG_DIR/$DAEMON_NAME.out + shift 2 + ;; + -loggc) + if [ -z "$KAFKA_GC_LOG_OPTS" ]; then + GC_LOG_ENABLED="true" + fi + shift + ;; + -daemon) + DAEMON_MODE="true" + shift + ;; + *) + break + ;; + esac +done + +# GC options +GC_FILE_SUFFIX='-gc.log' +GC_LOG_FILE_NAME='' +if [ "x$GC_LOG_ENABLED" = "xtrue" ]; then + GC_LOG_FILE_NAME=$DAEMON_NAME$GC_FILE_SUFFIX + + # The first segment of the version number, which is '1' for releases before Java 9 + # it then becomes '9', '10', ... + # Some examples of the first line of `java --version`: + # 8 -> java version "1.8.0_152" + # 9.0.4 -> java version "9.0.4" + # 10 -> java version "10" 2018-03-20 + # 10.0.1 -> java version "10.0.1" 2018-04-17 + # We need to match to the end of the line to prevent sed from printing the characters that do not match + JAVA_MAJOR_VERSION=$("$JAVA" -version 2>&1 | sed -E -n 's/.* version "([0-9]*).*$/\1/p') + if [[ "$JAVA_MAJOR_VERSION" -ge "9" ]] ; then + KAFKA_GC_LOG_OPTS="-Xlog:gc*:file=$LOG_DIR/$GC_LOG_FILE_NAME:time,tags:filecount=10,filesize=100M" + else + KAFKA_GC_LOG_OPTS="-Xloggc:$LOG_DIR/$GC_LOG_FILE_NAME -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M" + fi +fi + +# Remove a possible colon prefix from the classpath (happens at lines like `CLASSPATH="$CLASSPATH:$file"` when CLASSPATH is blank) +# Syntax used on the right side is native Bash string manipulation; for more details see +# http://tldp.org/LDP/abs/html/string-manipulation.html, specifically the section titled "Substring Removal" +CLASSPATH=${CLASSPATH#:} + +# If Cygwin is detected, classpath is converted to Windows format. +(( CYGWIN )) && CLASSPATH=$(cygpath --path --mixed "${CLASSPATH}") + +# Launch mode +if [ "x$DAEMON_MODE" = "xtrue" ]; then + nohup "$JAVA" $KAFKA_HEAP_OPTS $KAFKA_JVM_PERFORMANCE_OPTS $KAFKA_GC_LOG_OPTS $KAFKA_JMX_OPTS $KAFKA_LOG4J_OPTS -cp "$CLASSPATH" $KAFKA_OPTS "$@" > "$CONSOLE_OUTPUT_FILE" 2>&1 < /dev/null & +else + exec "$JAVA" $KAFKA_HEAP_OPTS $KAFKA_JVM_PERFORMANCE_OPTS $KAFKA_GC_LOG_OPTS $KAFKA_JMX_OPTS $KAFKA_LOG4J_OPTS -cp "$CLASSPATH" $KAFKA_OPTS "$@" +fi diff --git a/kafka/kafka_2.13-2.8.0/bin/kafka-server-start.sh b/kafka/kafka_2.13-2.8.0/bin/kafka-server-start.sh new file mode 100755 index 0000000..5a53126 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/kafka-server-start.sh @@ -0,0 +1,44 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [ $# -lt 1 ]; +then + echo "USAGE: $0 [-daemon] server.properties [--override property=value]*" + exit 1 +fi +base_dir=$(dirname $0) + +if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then + export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties" +fi + +if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then + export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G" +fi + +EXTRA_ARGS=${EXTRA_ARGS-'-name kafkaServer -loggc'} + +COMMAND=$1 +case $COMMAND in + -daemon) + EXTRA_ARGS="-daemon "$EXTRA_ARGS + shift + ;; + *) + ;; +esac + +exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.Kafka "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/kafka-server-stop.sh b/kafka/kafka_2.13-2.8.0/bin/kafka-server-stop.sh new file mode 100755 index 0000000..437189f --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/kafka-server-stop.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +SIGNAL=${SIGNAL:-TERM} + +OSNAME=$(uname -s) +if [[ "$OSNAME" == "OS/390" ]]; then + if [ -z $JOBNAME ]; then + JOBNAME="KAFKSTRT" + fi + PIDS=$(ps -A -o pid,jobname,comm | grep -i $JOBNAME | grep java | grep -v grep | awk '{print $1}') +elif [[ "$OSNAME" == "OS400" ]]; then + PIDS=$(ps -Af | grep -i 'kafka\.Kafka' | grep java | grep -v grep | awk '{print $2}') +else + PIDS=$(ps ax | grep ' kafka\.Kafka ' | grep java | grep -v grep | awk '{print $1}') +fi + +if [ -z "$PIDS" ]; then + echo "No kafka server to stop" + exit 1 +else + kill -s $SIGNAL $PIDS +fi diff --git a/kafka/kafka_2.13-2.8.0/bin/kafka-storage.sh b/kafka/kafka_2.13-2.8.0/bin/kafka-storage.sh new file mode 100755 index 0000000..eef9342 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/kafka-storage.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +exec $(dirname $0)/kafka-run-class.sh kafka.tools.StorageTool "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/kafka-streams-application-reset.sh b/kafka/kafka_2.13-2.8.0/bin/kafka-streams-application-reset.sh new file mode 100755 index 0000000..3363732 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/kafka-streams-application-reset.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then + export KAFKA_HEAP_OPTS="-Xmx512M" +fi + +exec $(dirname $0)/kafka-run-class.sh kafka.tools.StreamsResetter "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/kafka-topics.sh b/kafka/kafka_2.13-2.8.0/bin/kafka-topics.sh new file mode 100755 index 0000000..ad6a2d4 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/kafka-topics.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +exec $(dirname $0)/kafka-run-class.sh kafka.admin.TopicCommand "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/kafka-verifiable-consumer.sh b/kafka/kafka_2.13-2.8.0/bin/kafka-verifiable-consumer.sh new file mode 100755 index 0000000..852847d --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/kafka-verifiable-consumer.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then + export KAFKA_HEAP_OPTS="-Xmx512M" +fi +exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.VerifiableConsumer "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/kafka-verifiable-producer.sh b/kafka/kafka_2.13-2.8.0/bin/kafka-verifiable-producer.sh new file mode 100755 index 0000000..b59bae7 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/kafka-verifiable-producer.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then + export KAFKA_HEAP_OPTS="-Xmx512M" +fi +exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.VerifiableProducer "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/trogdor.sh b/kafka/kafka_2.13-2.8.0/bin/trogdor.sh new file mode 100755 index 0000000..3324c4e --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/trogdor.sh @@ -0,0 +1,50 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +usage() { + cat <nul 2>&1 + IF NOT ERRORLEVEL 1 ( + rem 32-bit OS + set KAFKA_HEAP_OPTS=-Xmx512M -Xms512M + ) ELSE ( + rem 64-bit OS + set KAFKA_HEAP_OPTS=-Xmx1G -Xms1G + ) +) +"%~dp0kafka-run-class.bat" kafka.Kafka %* +EndLocal diff --git a/kafka/kafka_2.13-2.8.0/bin/windows/kafka-server-stop.bat b/kafka/kafka_2.13-2.8.0/bin/windows/kafka-server-stop.bat new file mode 100644 index 0000000..676577c --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/windows/kafka-server-stop.bat @@ -0,0 +1,18 @@ +@echo off +rem Licensed to the Apache Software Foundation (ASF) under one or more +rem contributor license agreements. See the NOTICE file distributed with +rem this work for additional information regarding copyright ownership. +rem The ASF licenses this file to You under the Apache License, Version 2.0 +rem (the "License"); you may not use this file except in compliance with +rem the License. You may obtain a copy of the License at +rem +rem http://www.apache.org/licenses/LICENSE-2.0 +rem +rem Unless required by applicable law or agreed to in writing, software +rem distributed under the License is distributed on an "AS IS" BASIS, +rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +rem See the License for the specific language governing permissions and +rem limitations under the License. + +wmic process where (commandline like "%%kafka.Kafka%%" and not name="wmic.exe") delete +rem ps ax | grep -i 'kafka.Kafka' | grep -v grep | awk '{print $1}' | xargs kill -SIGTERM diff --git a/kafka/kafka_2.13-2.8.0/bin/windows/kafka-streams-application-reset.bat b/kafka/kafka_2.13-2.8.0/bin/windows/kafka-streams-application-reset.bat new file mode 100644 index 0000000..1cfb6f5 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/windows/kafka-streams-application-reset.bat @@ -0,0 +1,23 @@ +@echo off +rem Licensed to the Apache Software Foundation (ASF) under one or more +rem contributor license agreements. See the NOTICE file distributed with +rem this work for additional information regarding copyright ownership. +rem The ASF licenses this file to You under the Apache License, Version 2.0 +rem (the "License"); you may not use this file except in compliance with +rem the License. You may obtain a copy of the License at +rem +rem http://www.apache.org/licenses/LICENSE-2.0 +rem +rem Unless required by applicable law or agreed to in writing, software +rem distributed under the License is distributed on an "AS IS" BASIS, +rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +rem See the License for the specific language governing permissions and +rem limitations under the License. + +SetLocal +IF ["%KAFKA_HEAP_OPTS%"] EQU [""] ( + set KAFKA_HEAP_OPTS=-Xmx512M +) + +"%~dp0kafka-run-class.bat" kafka.tools.StreamsResetter %* +EndLocal diff --git a/kafka/kafka_2.13-2.8.0/bin/windows/kafka-topics.bat b/kafka/kafka_2.13-2.8.0/bin/windows/kafka-topics.bat new file mode 100644 index 0000000..677b09d --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/windows/kafka-topics.bat @@ -0,0 +1,17 @@ +@echo off +rem Licensed to the Apache Software Foundation (ASF) under one or more +rem contributor license agreements. See the NOTICE file distributed with +rem this work for additional information regarding copyright ownership. +rem The ASF licenses this file to You under the Apache License, Version 2.0 +rem (the "License"); you may not use this file except in compliance with +rem the License. You may obtain a copy of the License at +rem +rem http://www.apache.org/licenses/LICENSE-2.0 +rem +rem Unless required by applicable law or agreed to in writing, software +rem distributed under the License is distributed on an "AS IS" BASIS, +rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +rem See the License for the specific language governing permissions and +rem limitations under the License. + +"%~dp0kafka-run-class.bat" kafka.admin.TopicCommand %* diff --git a/kafka/kafka_2.13-2.8.0/bin/windows/zookeeper-server-start.bat b/kafka/kafka_2.13-2.8.0/bin/windows/zookeeper-server-start.bat new file mode 100644 index 0000000..f201a58 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/windows/zookeeper-server-start.bat @@ -0,0 +1,30 @@ +@echo off +rem Licensed to the Apache Software Foundation (ASF) under one or more +rem contributor license agreements. See the NOTICE file distributed with +rem this work for additional information regarding copyright ownership. +rem The ASF licenses this file to You under the Apache License, Version 2.0 +rem (the "License"); you may not use this file except in compliance with +rem the License. You may obtain a copy of the License at +rem +rem http://www.apache.org/licenses/LICENSE-2.0 +rem +rem Unless required by applicable law or agreed to in writing, software +rem distributed under the License is distributed on an "AS IS" BASIS, +rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +rem See the License for the specific language governing permissions and +rem limitations under the License. + +IF [%1] EQU [] ( + echo USAGE: %0 zookeeper.properties + EXIT /B 1 +) + +SetLocal +IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] ( + set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%~dp0../../config/log4j.properties +) +IF ["%KAFKA_HEAP_OPTS%"] EQU [""] ( + set KAFKA_HEAP_OPTS=-Xmx512M -Xms512M +) +"%~dp0kafka-run-class.bat" org.apache.zookeeper.server.quorum.QuorumPeerMain %* +EndLocal diff --git a/kafka/kafka_2.13-2.8.0/bin/windows/zookeeper-server-stop.bat b/kafka/kafka_2.13-2.8.0/bin/windows/zookeeper-server-stop.bat new file mode 100644 index 0000000..8b57dd8 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/windows/zookeeper-server-stop.bat @@ -0,0 +1,17 @@ +@echo off +rem Licensed to the Apache Software Foundation (ASF) under one or more +rem contributor license agreements. See the NOTICE file distributed with +rem this work for additional information regarding copyright ownership. +rem The ASF licenses this file to You under the Apache License, Version 2.0 +rem (the "License"); you may not use this file except in compliance with +rem the License. You may obtain a copy of the License at +rem +rem http://www.apache.org/licenses/LICENSE-2.0 +rem +rem Unless required by applicable law or agreed to in writing, software +rem distributed under the License is distributed on an "AS IS" BASIS, +rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +rem See the License for the specific language governing permissions and +rem limitations under the License. + +wmic process where (commandline like "%%zookeeper%%" and not name="wmic.exe") delete diff --git a/kafka/kafka_2.13-2.8.0/bin/windows/zookeeper-shell.bat b/kafka/kafka_2.13-2.8.0/bin/windows/zookeeper-shell.bat new file mode 100644 index 0000000..f1c86c4 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/windows/zookeeper-shell.bat @@ -0,0 +1,22 @@ +@echo off +rem Licensed to the Apache Software Foundation (ASF) under one or more +rem contributor license agreements. See the NOTICE file distributed with +rem this work for additional information regarding copyright ownership. +rem The ASF licenses this file to You under the Apache License, Version 2.0 +rem (the "License"); you may not use this file except in compliance with +rem the License. You may obtain a copy of the License at +rem +rem http://www.apache.org/licenses/LICENSE-2.0 +rem +rem Unless required by applicable law or agreed to in writing, software +rem distributed under the License is distributed on an "AS IS" BASIS, +rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +rem See the License for the specific language governing permissions and +rem limitations under the License. + +IF [%1] EQU [] ( + echo USAGE: %0 zookeeper_host:port[/path] [-zk-tls-config-file file] [args...] + EXIT /B 1 +) + +"%~dp0kafka-run-class.bat" org.apache.zookeeper.ZooKeeperMainWithTlsSupportForKafka -server %* diff --git a/kafka/kafka_2.13-2.8.0/bin/zookeeper-security-migration.sh b/kafka/kafka_2.13-2.8.0/bin/zookeeper-security-migration.sh new file mode 100755 index 0000000..722bde7 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/zookeeper-security-migration.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +exec $(dirname $0)/kafka-run-class.sh kafka.admin.ZkSecurityMigrator "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/zookeeper-server-start.sh b/kafka/kafka_2.13-2.8.0/bin/zookeeper-server-start.sh new file mode 100755 index 0000000..bd9c114 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/zookeeper-server-start.sh @@ -0,0 +1,44 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [ $# -lt 1 ]; +then + echo "USAGE: $0 [-daemon] zookeeper.properties" + exit 1 +fi +base_dir=$(dirname $0) + +if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then + export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties" +fi + +if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then + export KAFKA_HEAP_OPTS="-Xmx512M -Xms512M" +fi + +EXTRA_ARGS=${EXTRA_ARGS-'-name zookeeper -loggc'} + +COMMAND=$1 +case $COMMAND in + -daemon) + EXTRA_ARGS="-daemon "$EXTRA_ARGS + shift + ;; + *) + ;; +esac + +exec $base_dir/kafka-run-class.sh $EXTRA_ARGS org.apache.zookeeper.server.quorum.QuorumPeerMain "$@" diff --git a/kafka/kafka_2.13-2.8.0/bin/zookeeper-server-stop.sh b/kafka/kafka_2.13-2.8.0/bin/zookeeper-server-stop.sh new file mode 100755 index 0000000..11665f3 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/zookeeper-server-stop.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +SIGNAL=${SIGNAL:-TERM} + +OSNAME=$(uname -s) +if [[ "$OSNAME" == "OS/390" ]]; then + if [ -z $JOBNAME ]; then + JOBNAME="ZKEESTRT" + fi + PIDS=$(ps -A -o pid,jobname,comm | grep -i $JOBNAME | grep java | grep -v grep | awk '{print $1}') +elif [[ "$OSNAME" == "OS400" ]]; then + PIDS=$(ps -Af | grep java | grep -i QuorumPeerMain | grep -v grep | awk '{print $2}') +else + PIDS=$(ps ax | grep java | grep -i QuorumPeerMain | grep -v grep | awk '{print $1}') +fi + +if [ -z "$PIDS" ]; then + echo "No zookeeper server to stop" + exit 1 +else + kill -s $SIGNAL $PIDS +fi diff --git a/kafka/kafka_2.13-2.8.0/bin/zookeeper-shell.sh b/kafka/kafka_2.13-2.8.0/bin/zookeeper-shell.sh new file mode 100755 index 0000000..2f1d0f2 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/bin/zookeeper-shell.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [ $# -lt 1 ]; +then + echo "USAGE: $0 zookeeper_host:port[/path] [-zk-tls-config-file file] [args...]" + exit 1 +fi + +exec $(dirname $0)/kafka-run-class.sh org.apache.zookeeper.ZooKeeperMainWithTlsSupportForKafka -server "$@" diff --git a/kafka/kafka_2.13-2.8.0/config/connect-console-sink.properties b/kafka/kafka_2.13-2.8.0/config/connect-console-sink.properties new file mode 100644 index 0000000..e240a8f --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/config/connect-console-sink.properties @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name=local-console-sink +connector.class=org.apache.kafka.connect.file.FileStreamSinkConnector +tasks.max=1 +topics=connect-test \ No newline at end of file diff --git a/kafka/kafka_2.13-2.8.0/config/connect-console-source.properties b/kafka/kafka_2.13-2.8.0/config/connect-console-source.properties new file mode 100644 index 0000000..d0e2069 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/config/connect-console-source.properties @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name=local-console-source +connector.class=org.apache.kafka.connect.file.FileStreamSourceConnector +tasks.max=1 +topic=connect-test \ No newline at end of file diff --git a/kafka/kafka_2.13-2.8.0/config/connect-distributed.properties b/kafka/kafka_2.13-2.8.0/config/connect-distributed.properties new file mode 100644 index 0000000..72db145 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/config/connect-distributed.properties @@ -0,0 +1,86 @@ +## +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +# This file contains some of the configurations for the Kafka Connect distributed worker. This file is intended +# to be used with the examples, and some settings may differ from those used in a production system, especially +# the `bootstrap.servers` and those specifying replication factors. + +# A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. +bootstrap.servers=localhost:9092 + +# unique name for the cluster, used in forming the Connect cluster group. Note that this must not conflict with consumer group IDs +group.id=connect-cluster + +# The converters specify the format of data in Kafka and how to translate it into Connect data. Every Connect user will +# need to configure these based on the format they want their data in when loaded from or stored into Kafka +key.converter=org.apache.kafka.connect.json.JsonConverter +value.converter=org.apache.kafka.connect.json.JsonConverter +# Converter-specific settings can be passed in by prefixing the Converter's setting with the converter we want to apply +# it to +key.converter.schemas.enable=true +value.converter.schemas.enable=true + +# Topic to use for storing offsets. This topic should have many partitions and be replicated and compacted. +# Kafka Connect will attempt to create the topic automatically when needed, but you can always manually create +# the topic before starting Kafka Connect if a specific topic configuration is needed. +# Most users will want to use the built-in default replication factor of 3 or in some cases even specify a larger value. +# Since this means there must be at least as many brokers as the maximum replication factor used, we'd like to be able +# to run this example on a single-broker cluster and so here we instead set the replication factor to 1. +offset.storage.topic=connect-offsets +offset.storage.replication.factor=1 +#offset.storage.partitions=25 + +# Topic to use for storing connector and task configurations; note that this should be a single partition, highly replicated, +# and compacted topic. Kafka Connect will attempt to create the topic automatically when needed, but you can always manually create +# the topic before starting Kafka Connect if a specific topic configuration is needed. +# Most users will want to use the built-in default replication factor of 3 or in some cases even specify a larger value. +# Since this means there must be at least as many brokers as the maximum replication factor used, we'd like to be able +# to run this example on a single-broker cluster and so here we instead set the replication factor to 1. +config.storage.topic=connect-configs +config.storage.replication.factor=1 + +# Topic to use for storing statuses. This topic can have multiple partitions and should be replicated and compacted. +# Kafka Connect will attempt to create the topic automatically when needed, but you can always manually create +# the topic before starting Kafka Connect if a specific topic configuration is needed. +# Most users will want to use the built-in default replication factor of 3 or in some cases even specify a larger value. +# Since this means there must be at least as many brokers as the maximum replication factor used, we'd like to be able +# to run this example on a single-broker cluster and so here we instead set the replication factor to 1. +status.storage.topic=connect-status +status.storage.replication.factor=1 +#status.storage.partitions=5 + +# Flush much faster than normal, which is useful for testing/debugging +offset.flush.interval.ms=10000 + +# These are provided to inform the user about the presence of the REST host and port configs +# Hostname & Port for the REST API to listen on. If this is set, it will bind to the interface used to listen to requests. +#rest.host.name= +#rest.port=8083 + +# The Hostname & Port that will be given out to other workers to connect to i.e. URLs that are routable from other servers. +#rest.advertised.host.name= +#rest.advertised.port= + +# Set to a list of filesystem paths separated by commas (,) to enable class loading isolation for plugins +# (connectors, converters, transformations). The list should consist of top level directories that include +# any combination of: +# a) directories immediately containing jars with plugins and their dependencies +# b) uber-jars with plugins and their dependencies +# c) directories immediately containing the package directory structure of classes of plugins and their dependencies +# Examples: +# plugin.path=/usr/local/share/java,/usr/local/share/kafka/plugins,/opt/connectors, +#plugin.path= diff --git a/kafka/kafka_2.13-2.8.0/config/connect-file-sink.properties b/kafka/kafka_2.13-2.8.0/config/connect-file-sink.properties new file mode 100644 index 0000000..594ccc6 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/config/connect-file-sink.properties @@ -0,0 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name=local-file-sink +connector.class=FileStreamSink +tasks.max=1 +file=test.sink.txt +topics=connect-test \ No newline at end of file diff --git a/kafka/kafka_2.13-2.8.0/config/connect-file-source.properties b/kafka/kafka_2.13-2.8.0/config/connect-file-source.properties new file mode 100644 index 0000000..599cf4c --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/config/connect-file-source.properties @@ -0,0 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name=local-file-source +connector.class=FileStreamSource +tasks.max=1 +file=test.txt +topic=connect-test \ No newline at end of file diff --git a/kafka/kafka_2.13-2.8.0/config/connect-log4j.properties b/kafka/kafka_2.13-2.8.0/config/connect-log4j.properties new file mode 100644 index 0000000..f695e37 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/config/connect-log4j.properties @@ -0,0 +1,43 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +log4j.rootLogger=INFO, stdout, connectAppender + +# Send the logs to the console. +# +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout + +# Send the logs to a file, rolling the file at midnight local time. For example, the `File` option specifies the +# location of the log files (e.g. ${kafka.logs.dir}/connect.log), and at midnight local time the file is closed +# and copied in the same directory but with a filename that ends in the `DatePattern` option. +# +log4j.appender.connectAppender=org.apache.log4j.DailyRollingFileAppender +log4j.appender.connectAppender.DatePattern='.'yyyy-MM-dd-HH +log4j.appender.connectAppender.File=${kafka.logs.dir}/connect.log +log4j.appender.connectAppender.layout=org.apache.log4j.PatternLayout + +# The `%X{connector.context}` parameter in the layout includes connector-specific and task-specific information +# in the log message, where appropriate. This makes it easier to identify those log messages that apply to a +# specific connector. Simply add this parameter to the log layout configuration below to include the contextual information. +# +connect.log.pattern=[%d] %p %m (%c:%L)%n +#connect.log.pattern=[%d] %p %X{connector.context}%m (%c:%L)%n + +log4j.appender.stdout.layout.ConversionPattern=${connect.log.pattern} +log4j.appender.connectAppender.layout.ConversionPattern=${connect.log.pattern} + +log4j.logger.org.apache.zookeeper=ERROR +log4j.logger.org.reflections=ERROR diff --git a/kafka/kafka_2.13-2.8.0/config/connect-mirror-maker.properties b/kafka/kafka_2.13-2.8.0/config/connect-mirror-maker.properties new file mode 100644 index 0000000..40afda5 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/config/connect-mirror-maker.properties @@ -0,0 +1,59 @@ +# Licensed to the Apache Software Foundation (ASF) under A or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# see org.apache.kafka.clients.consumer.ConsumerConfig for more details + +# Sample MirrorMaker 2.0 top-level configuration file +# Run with ./bin/connect-mirror-maker.sh connect-mirror-maker.properties + +# specify any number of cluster aliases +clusters = A, B + +# connection information for each cluster +# This is a comma separated host:port pairs for each cluster +# for e.g. "A_host1:9092, A_host2:9092, A_host3:9092" +A.bootstrap.servers = A_host1:9092, A_host2:9092, A_host3:9092 +B.bootstrap.servers = B_host1:9092, B_host2:9092, B_host3:9092 + +# enable and configure individual replication flows +A->B.enabled = true + +# regex which defines which topics gets replicated. For eg "foo-.*" +A->B.topics = .* + +B->A.enabled = true +B->A.topics = .* + +# Setting replication factor of newly created remote topics +replication.factor=1 + +############################# Internal Topic Settings ############################# +# The replication factor for mm2 internal topics "heartbeats", "B.checkpoints.internal" and +# "mm2-offset-syncs.B.internal" +# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. +checkpoints.topic.replication.factor=1 +heartbeats.topic.replication.factor=1 +offset-syncs.topic.replication.factor=1 + +# The replication factor for connect internal topics "mm2-configs.B.internal", "mm2-offsets.B.internal" and +# "mm2-status.B.internal" +# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. +offset.storage.replication.factor=1 +status.storage.replication.factor=1 +config.storage.replication.factor=1 + +# customize as needed +# replication.policy.separator = _ +# sync.topic.acls.enabled = false +# emit.heartbeats.interval.seconds = 5 diff --git a/kafka/kafka_2.13-2.8.0/config/connect-standalone.properties b/kafka/kafka_2.13-2.8.0/config/connect-standalone.properties new file mode 100644 index 0000000..a340a3b --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/config/connect-standalone.properties @@ -0,0 +1,41 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# These are defaults. This file just demonstrates how to override some settings. +bootstrap.servers=localhost:9092 + +# The converters specify the format of data in Kafka and how to translate it into Connect data. Every Connect user will +# need to configure these based on the format they want their data in when loaded from or stored into Kafka +key.converter=org.apache.kafka.connect.json.JsonConverter +value.converter=org.apache.kafka.connect.json.JsonConverter +# Converter-specific settings can be passed in by prefixing the Converter's setting with the converter we want to apply +# it to +key.converter.schemas.enable=true +value.converter.schemas.enable=true + +offset.storage.file.filename=/tmp/connect.offsets +# Flush much faster than normal, which is useful for testing/debugging +offset.flush.interval.ms=10000 + +# Set to a list of filesystem paths separated by commas (,) to enable class loading isolation for plugins +# (connectors, converters, transformations). The list should consist of top level directories that include +# any combination of: +# a) directories immediately containing jars with plugins and their dependencies +# b) uber-jars with plugins and their dependencies +# c) directories immediately containing the package directory structure of classes of plugins and their dependencies +# Note: symlinks will be followed to discover dependencies or plugins. +# Examples: +# plugin.path=/usr/local/share/java,/usr/local/share/kafka/plugins,/opt/connectors, +#plugin.path= diff --git a/kafka/kafka_2.13-2.8.0/config/consumer.properties b/kafka/kafka_2.13-2.8.0/config/consumer.properties new file mode 100644 index 0000000..01bb12e --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/config/consumer.properties @@ -0,0 +1,26 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# see org.apache.kafka.clients.consumer.ConsumerConfig for more details + +# list of brokers used for bootstrapping knowledge about the rest of the cluster +# format: host1:port1,host2:port2 ... +bootstrap.servers=localhost:9092 + +# consumer group id +group.id=test-consumer-group + +# What to do when there is no initial offset in Kafka or if the current +# offset does not exist any more on the server: latest, earliest, none +#auto.offset.reset= diff --git a/kafka/kafka_2.13-2.8.0/config/kraft/README.md b/kafka/kafka_2.13-2.8.0/config/kraft/README.md new file mode 100644 index 0000000..466dbe0 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/config/kraft/README.md @@ -0,0 +1,178 @@ +KRaft (aka KIP-500) mode Early Access Release +========================================================= + +# Introduction +It is now possible to run Apache Kafka without Apache ZooKeeper! We call this the [Kafka Raft metadata mode](https://cwiki.apache.org/confluence/display/KAFKA/KIP-500%3A+Replace+ZooKeeper+with+a+Self-Managed+Metadata+Quorum), typically shortened to `KRaft mode`. +`KRaft` is intended to be pronounced like `craft` (as in `craftsmanship`). It is currently *EARLY ACCESS AND SHOULD NOT BE USED IN PRODUCTION*, but it +is available for testing in the Kafka 2.8 release. + +When the Kafka cluster is in KRaft mode, it does not store its metadata in ZooKeeper. In fact, you do not have to run ZooKeeper at all, because it stores its metadata in a KRaft quorum of controller nodes. + +KRaft mode has many benefits -- some obvious, and some not so obvious. Clearly, it is nice to manage and configure one service rather than two services. In addition, you can now run a single process Kafka cluster. +Most important of all, KRaft mode is more scalable. We expect to be able to [support many more topics and partitions](https://www.confluent.io/kafka-summit-san-francisco-2019/kafka-needs-no-keeper/) in this mode. + +# Quickstart + +## Warning +KRaft mode in Kafka 2.8 is provided for testing only, *NOT* for production. We do not yet support upgrading existing ZooKeeper-based Kafka clusters into this mode. In fact, when Kafka 3.0 is released, +it will not be possible to upgrade your KRaft clusters from 2.8 to 3.0. There may be bugs, including serious ones. You should *assume that your data could be lost at any time* if you try the early access release of KRaft mode. + +## Generate a cluster ID +The first step is to generate an ID for your new cluster, using the kafka-storage tool: + +~~~~ +$ ./bin/kafka-storage.sh random-uuid +xtzWWN4bTjitpL3kfd9s5g +~~~~ + +## Format Storage Directories +The next step is to format your storage directories. If you are running in single-node mode, you can do this with one command: + +~~~~ +$ ./bin/kafka-storage.sh format -t -c ./config/kraft/server.properties +Formatting /tmp/kraft-combined-logs +~~~~ + +If you are using multiple nodes, then you should run the format command on each node. Be sure to use the same cluster ID for each one. + +## Start the Kafka Server +Finally, you are ready to start the Kafka server on each node. + +~~~~ +$ ./bin/kafka-server-start.sh ./config/kraft/server.properties +[2021-02-26 15:37:11,071] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$) +[2021-02-26 15:37:11,294] INFO Setting -D jdk.tls.rejectClientInitiatedRenegotiation=true to disable client-initiated TLS renegotiation (org.apache.zookeeper.common.X509Util) +[2021-02-26 15:37:11,466] INFO [Log partition=@metadata-0, dir=/tmp/kraft-combined-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-02-26 15:37:11,509] INFO [raft-expiration-reaper]: Starting (kafka.raft.TimingWheelExpirationService$ExpiredOperationReaper) +[2021-02-26 15:37:11,640] INFO [RaftManager nodeId=1] Completed transition to Unattached(epoch=0, voters=[1], electionTimeoutMs=9037) (org.apache.kafka.raft.QuorumState) +... +~~~~ + +Just like with a ZooKeeper based broker, you can connect to port 9092 (or whatever port you configured) to perform administrative operations or produce or consume data. + +~~~~ +$ ./bin/kafka-topics.sh --create --topic foo --partitions 1 --replication-factor 1 --bootstrap-server localhost:9092 +Created topic foo. +~~~~ + +# Deployment + +## Controller Servers +In KRaft mode, only a small group of specially selected servers can act as controllers (unlike the ZooKeeper-based mode, where any server can become the +Controller). The specially selected controller servers will participate in the metadata quorum. Each controller server is either active, or a hot +standby for the current active controller server. + +You will typically select 3 or 5 servers for this role, depending on factors like cost and the number of concurrent failures your system should withstand +without availability impact. Just like with ZooKeeper, you must keep a majority of the controllers alive in order to maintain availability. So if you have 3 +controllers, you can tolerate 1 failure; with 5 controllers, you can tolerate 2 failures. + +## Process Roles +Each Kafka server now has a new configuration key called `process.roles` which can have the following values: + +* If `process.roles` is set to `broker`, the server acts as a broker in KRaft mode. +* If `process.roles` is set to `controller`, the server acts as a controller in KRaft mode. +* If `process.roles` is set to `broker,controller`, the server acts as both a broker and a controller in KRaft mode. +* If `process.roles` is not set at all then we are assumed to be in ZooKeeper mode. As mentioned earlier, you can't currently transition back and forth between ZooKeeper mode and KRaft mode without reformatting. + +Nodes that act as both brokers and controllers are referred to as "combined" nodes. Combined nodes are simpler to operate for simple use cases and allow you to avoid +some fixed memory overheads associated with JVMs. The key disadvantage is that the controller will be less isolated from the rest of the system. For example, if activity on the broker causes an out of +memory condition, the controller part of the server is not isolated from that OOM condition. + +## Quorum Voters +All nodes in the system must set the `controller.quorum.voters` configuration. This identifies the quorum controller servers that should be used. All the controllers must be enumerated. +This is similar to how, when using ZooKeeper, the `zookeeper.connect` configuration must contain all the ZooKeeper servers. Unlike with the ZooKeeper config, however, `controller.quorum.voters` +also has IDs for each node. The format is id1@host1:port1,id2@host2:port2, etc. + +So if you have 10 brokers and 3 controllers named controller1, controller2, controller3, you might have the following configuration on controller1: +``` +process.roles=controller +node.id=1 +listeners=CONTROLLER://controller1.example.com:9093 +controller.quorum.voters=1@controller1.example.com:9093,2@controller2.example.com:9093,3@controller3.example.com:9093 +``` + +Each broker and each controller must set `controller.quorum.voters`. Note that the node ID supplied in the `controller.quorum.voters` configuration must match that supplied to the server. +So on controller1, node.id must be set to 1, and so forth. Note that there is no requirement for controller IDs to start at 0 or 1. However, the easiest and least confusing way to allocate +node IDs is probably just to give each server a numeric ID, starting from 0. + +Note that clients never need to configure `controller.quorum.voters`; only servers do. + +## Kafka Storage Tool +As described above in the QuickStart section, you must use the `kafka-storage.sh` tool to generate a cluster ID for your new cluster, and then run the format command on each node before starting the node. + +This is different from how Kafka has operated in the past. Previously, Kafka would format blank storage directories automatically, and also generate a new cluster UUID automatically. One reason for the change +is that auto-formatting can sometimes obscure an error condition. For example, under UNIX, if a data directory can't be mounted, it may show up as blank. In this case, auto-formatting would be the wrong thing to do. + +This is particularly important for the metadata log maintained by the controller servers. If two controllers out of three controllers were able to start with blank logs, a leader might be able to be elected with +nothing in the log, which would cause all metadata to be lost. + +# Missing Features +We do not yet support generating or loading KIP-630 metadata snapshots. This means that after a while, the time required to restart a broker will become very large. This is a known issue and we are working on +completing snapshots for the next release. + +We also don't support any kind of upgrade right now, either to or from KRaft mode. This is another important gap that we are working on. + +Finally, the following Kafka features have not yet been fully implemented: + +* Support for certain security features: configuring an Authorizer, setting up SCRAM, delegation tokens, and so forth +* Support for transactions and exactly-once semantics +* Support for adding partitions to existing topics +* Support for partition reassignment +* Support for some configurations, like enabling unclean leader election by default or dynamically changing broker endpoints +* Support for KIP-112 "JBOD" modes +* Support for KIP-631 controller metrics + +We've tried to make it clear when a feature is not supported in the early access release, but you may encounter some rough edges. We will cover these feature gaps incrementally in the `trunk` branch. + +# Debugging +If you encounter an issue, you might want to take a look at the metadata log. + +## kafka-dump-log +One way to view the metadata log is with kafka-dump-log.sh tool, like so: + +~~~~ +$ ./bin/kafka-dump-log.sh --cluster-metadata-decoder --skip-record-metadata --files /tmp/kraft-combined-logs/\@metadata-0/*.log +Dumping /tmp/kraft-combined-logs/@metadata-0/00000000000000000000.log +Starting offset: 0 +baseOffset: 0 lastOffset: 0 count: 1 baseSequence: -1 lastSequence: -1 producerId: -1 producerEpoch: -1 partitionLeaderEpoch: 1 isTransactional: false isControl: true position: 0 CreateTime: 1614382631640 size: 89 magic: 2 compresscodec: NONE crc: 1438115474 isvalid: true + +baseOffset: 1 lastOffset: 1 count: 1 baseSequence: -1 lastSequence: -1 producerId: -1 producerEpoch: -1 partitionLeaderEpoch: 1 isTransactional: false isControl: false position: 89 CreateTime: 1614382632329 size: 137 magic: 2 compresscodec: NONE crc: 1095855865 isvalid: true + payload: {"type":"REGISTER_BROKER_RECORD","version":0,"data":{"brokerId":1,"incarnationId":"P3UFsWoNR-erL9PK98YLsA","brokerEpoch":0,"endPoints":[{"name":"PLAINTEXT","host":"localhost","port":9092,"securityProtocol":0}],"features":[],"rack":null}} +baseOffset: 2 lastOffset: 2 count: 1 baseSequence: -1 lastSequence: -1 producerId: -1 producerEpoch: -1 partitionLeaderEpoch: 1 isTransactional: false isControl: false position: 226 CreateTime: 1614382632453 size: 83 magic: 2 compresscodec: NONE crc: 455187130 isvalid: true + payload: {"type":"UNFENCE_BROKER_RECORD","version":0,"data":{"id":1,"epoch":0}} +baseOffset: 3 lastOffset: 3 count: 1 baseSequence: -1 lastSequence: -1 producerId: -1 producerEpoch: -1 partitionLeaderEpoch: 1 isTransactional: false isControl: false position: 309 CreateTime: 1614382634484 size: 83 magic: 2 compresscodec: NONE crc: 4055692847 isvalid: true + payload: {"type":"FENCE_BROKER_RECORD","version":0,"data":{"id":1,"epoch":0}} +baseOffset: 4 lastOffset: 4 count: 1 baseSequence: -1 lastSequence: -1 producerId: -1 producerEpoch: -1 partitionLeaderEpoch: 2 isTransactional: false isControl: true position: 392 CreateTime: 1614382671857 size: 89 magic: 2 compresscodec: NONE crc: 1318571838 isvalid: true + +baseOffset: 5 lastOffset: 5 count: 1 baseSequence: -1 lastSequence: -1 producerId: -1 producerEpoch: -1 partitionLeaderEpoch: 2 isTransactional: false isControl: false position: 481 CreateTime: 1614382672440 size: 137 magic: 2 compresscodec: NONE crc: 841144615 isvalid: true + payload: {"type":"REGISTER_BROKER_RECORD","version":0,"data":{"brokerId":1,"incarnationId":"RXRJu7cnScKRZOnWQGs86g","brokerEpoch":4,"endPoints":[{"name":"PLAINTEXT","host":"localhost","port":9092,"securityProtocol":0}],"features":[],"rack":null}} +baseOffset: 6 lastOffset: 6 count: 1 baseSequence: -1 lastSequence: -1 producerId: -1 producerEpoch: -1 partitionLeaderEpoch: 2 isTransactional: false isControl: false position: 618 CreateTime: 1614382672544 size: 83 magic: 2 compresscodec: NONE crc: 4155905922 isvalid: true + payload: {"type":"UNFENCE_BROKER_RECORD","version":0,"data":{"id":1,"epoch":4}} +baseOffset: 7 lastOffset: 8 count: 2 baseSequence: -1 lastSequence: -1 producerId: -1 producerEpoch: -1 partitionLeaderEpoch: 2 isTransactional: false isControl: false position: 701 CreateTime: 1614382712158 size: 159 magic: 2 compresscodec: NONE crc: 3726758683 isvalid: true + payload: {"type":"TOPIC_RECORD","version":0,"data":{"name":"foo","topicId":"5zoAlv-xEh9xRANKXt1Lbg"}} + payload: {"type":"PARTITION_RECORD","version":0,"data":{"partitionId":0,"topicId":"5zoAlv-xEh9xRANKXt1Lbg","replicas":[1],"isr":[1],"removingReplicas":null,"addingReplicas":null,"leader":1,"leaderEpoch":0,"partitionEpoch":0}} +~~~~ + +## The Metadata Shell +Another tool for examining the metadata logs is the Kafka metadata shell. Just like the ZooKeeper shell, this allows you to inspect the metadata of the cluster. + +~~~~ +$ ./bin/kafka-metadata-shell.sh --snapshot /tmp/kraft-combined-logs/\@metadata-0/00000000000000000000.log +>> ls / +brokers local metadataQuorum topicIds topics +>> ls /topics +foo +>> cat /topics/foo/0/data +{ + "partitionId" : 0, + "topicId" : "5zoAlv-xEh9xRANKXt1Lbg", + "replicas" : [ 1 ], + "isr" : [ 1 ], + "removingReplicas" : null, + "addingReplicas" : null, + "leader" : 1, + "leaderEpoch" : 0, + "partitionEpoch" : 0 +} +>> exit +~~~~ diff --git a/kafka/kafka_2.13-2.8.0/config/kraft/broker.properties b/kafka/kafka_2.13-2.8.0/config/kraft/broker.properties new file mode 100644 index 0000000..1b71803 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/config/kraft/broker.properties @@ -0,0 +1,128 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# This configuration file is intended for use in KRaft mode, where +# Apache ZooKeeper is not present. See config/kraft/README.md for details. +# + +############################# Server Basics ############################# + +# The role of this server. Setting this puts us in KRaft mode +process.roles=broker + +# The node id associated with this instance's roles +node.id=1 + +# The connect string for the controller quorum +controller.quorum.voters=1@localhost:9093 + +############################# Socket Server Settings ############################# + +# The address the socket server listens on. It will get the value returned from +# java.net.InetAddress.getCanonicalHostName() if not configured. +# FORMAT: +# listeners = listener_name://host_name:port +# EXAMPLE: +# listeners = PLAINTEXT://your.host.name:9092 +listeners=PLAINTEXT://localhost:9092 +inter.broker.listener.name=PLAINTEXT + +# Hostname and port the broker will advertise to producers and consumers. If not set, +# it uses the value for "listeners" if configured. Otherwise, it will use the value +# returned from java.net.InetAddress.getCanonicalHostName(). +advertised.listeners=PLAINTEXT://localhost:9092 + +# Listener, host name, and port for the controller to advertise to the brokers. If +# this server is a controller, this listener must be configured. +controller.listener.names=CONTROLLER + +# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details +listener.security.protocol.map=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL + +# The number of threads that the server uses for receiving requests from the network and sending responses to the network +num.network.threads=3 + +# The number of threads that the server uses for processing requests, which may include disk I/O +num.io.threads=8 + +# The send buffer (SO_SNDBUF) used by the socket server +socket.send.buffer.bytes=102400 + +# The receive buffer (SO_RCVBUF) used by the socket server +socket.receive.buffer.bytes=102400 + +# The maximum size of a request that the socket server will accept (protection against OOM) +socket.request.max.bytes=104857600 + + +############################# Log Basics ############################# + +# A comma separated list of directories under which to store log files +log.dirs=/tmp/kraft-broker-logs + +# The default number of log partitions per topic. More partitions allow greater +# parallelism for consumption, but this will also result in more files across +# the brokers. +num.partitions=1 + +# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. +# This value is recommended to be increased for installations with data dirs located in RAID array. +num.recovery.threads.per.data.dir=1 + +############################# Internal Topic Settings ############################# +# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" +# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. +offsets.topic.replication.factor=1 +transaction.state.log.replication.factor=1 +transaction.state.log.min.isr=1 + +############################# Log Flush Policy ############################# + +# Messages are immediately written to the filesystem but by default we only fsync() to sync +# the OS cache lazily. The following configurations control the flush of data to disk. +# There are a few important trade-offs here: +# 1. Durability: Unflushed data may be lost if you are not using replication. +# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. +# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. +# The settings below allow one to configure the flush policy to flush data after a period of time or +# every N messages (or both). This can be done globally and overridden on a per-topic basis. + +# The number of messages to accept before forcing a flush of data to disk +#log.flush.interval.messages=10000 + +# The maximum amount of time a message can sit in a log before we force a flush +#log.flush.interval.ms=1000 + +############################# Log Retention Policy ############################# + +# The following configurations control the disposal of log segments. The policy can +# be set to delete segments after a period of time, or after a given size has accumulated. +# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens +# from the end of the log. + +# The minimum age of a log file to be eligible for deletion due to age +log.retention.hours=168 + +# A size-based retention policy for logs. Segments are pruned from the log unless the remaining +# segments drop below log.retention.bytes. Functions independently of log.retention.hours. +#log.retention.bytes=1073741824 + +# The maximum size of a log segment file. When this size is reached a new log segment will be created. +log.segment.bytes=1073741824 + +# The interval at which log segments are checked to see if they can be deleted according +# to the retention policies +log.retention.check.interval.ms=300000 diff --git a/kafka/kafka_2.13-2.8.0/config/kraft/controller.properties b/kafka/kafka_2.13-2.8.0/config/kraft/controller.properties new file mode 100644 index 0000000..30fe3e7 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/config/kraft/controller.properties @@ -0,0 +1,127 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# This configuration file is intended for use in KRaft mode, where +# Apache ZooKeeper is not present. See config/kraft/README.md for details. +# + +############################# Server Basics ############################# + +# The role of this server. Setting this puts us in KRaft mode +process.roles=controller + +# The node id associated with this instance's roles +node.id=1 + +# The connect string for the controller quorum +controller.quorum.voters=1@localhost:9093 + +############################# Socket Server Settings ############################# + +# The address the socket server listens on. It will get the value returned from +# java.net.InetAddress.getCanonicalHostName() if not configured. +# FORMAT: +# listeners = listener_name://host_name:port +# EXAMPLE: +# listeners = PLAINTEXT://your.host.name:9092 +listeners=PLAINTEXT://:9093 + +# Hostname and port the broker will advertise to producers and consumers. If not set, +# it uses the value for "listeners" if configured. Otherwise, it will use the value +# returned from java.net.InetAddress.getCanonicalHostName(). +#advertised.listeners=PLAINTEXT://your.host.name:9092 + +# Listener, host name, and port for the controller to advertise to the brokers. If +# this server is a controller, this listener must be configured. +controller.listener.names=PLAINTEXT + +# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details +#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL + +# The number of threads that the server uses for receiving requests from the network and sending responses to the network +num.network.threads=3 + +# The number of threads that the server uses for processing requests, which may include disk I/O +num.io.threads=8 + +# The send buffer (SO_SNDBUF) used by the socket server +socket.send.buffer.bytes=102400 + +# The receive buffer (SO_RCVBUF) used by the socket server +socket.receive.buffer.bytes=102400 + +# The maximum size of a request that the socket server will accept (protection against OOM) +socket.request.max.bytes=104857600 + + +############################# Log Basics ############################# + +# A comma separated list of directories under which to store log files +log.dirs=/tmp/raft-controller-logs + +# The default number of log partitions per topic. More partitions allow greater +# parallelism for consumption, but this will also result in more files across +# the brokers. +num.partitions=1 + +# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. +# This value is recommended to be increased for installations with data dirs located in RAID array. +num.recovery.threads.per.data.dir=1 + +############################# Internal Topic Settings ############################# +# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" +# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. +offsets.topic.replication.factor=1 +transaction.state.log.replication.factor=1 +transaction.state.log.min.isr=1 + +############################# Log Flush Policy ############################# + +# Messages are immediately written to the filesystem but by default we only fsync() to sync +# the OS cache lazily. The following configurations control the flush of data to disk. +# There are a few important trade-offs here: +# 1. Durability: Unflushed data may be lost if you are not using replication. +# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. +# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. +# The settings below allow one to configure the flush policy to flush data after a period of time or +# every N messages (or both). This can be done globally and overridden on a per-topic basis. + +# The number of messages to accept before forcing a flush of data to disk +#log.flush.interval.messages=10000 + +# The maximum amount of time a message can sit in a log before we force a flush +#log.flush.interval.ms=1000 + +############################# Log Retention Policy ############################# + +# The following configurations control the disposal of log segments. The policy can +# be set to delete segments after a period of time, or after a given size has accumulated. +# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens +# from the end of the log. + +# The minimum age of a log file to be eligible for deletion due to age +log.retention.hours=168 + +# A size-based retention policy for logs. Segments are pruned from the log unless the remaining +# segments drop below log.retention.bytes. Functions independently of log.retention.hours. +#log.retention.bytes=1073741824 + +# The maximum size of a log segment file. When this size is reached a new log segment will be created. +log.segment.bytes=1073741824 + +# The interval at which log segments are checked to see if they can be deleted according +# to the retention policies +log.retention.check.interval.ms=300000 diff --git a/kafka/kafka_2.13-2.8.0/config/kraft/server.properties b/kafka/kafka_2.13-2.8.0/config/kraft/server.properties new file mode 100644 index 0000000..8e6406c --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/config/kraft/server.properties @@ -0,0 +1,128 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# This configuration file is intended for use in KRaft mode, where +# Apache ZooKeeper is not present. See config/kraft/README.md for details. +# + +############################# Server Basics ############################# + +# The role of this server. Setting this puts us in KRaft mode +process.roles=broker,controller + +# The node id associated with this instance's roles +node.id=1 + +# The connect string for the controller quorum +controller.quorum.voters=1@localhost:9093 + +############################# Socket Server Settings ############################# + +# The address the socket server listens on. It will get the value returned from +# java.net.InetAddress.getCanonicalHostName() if not configured. +# FORMAT: +# listeners = listener_name://host_name:port +# EXAMPLE: +# listeners = PLAINTEXT://your.host.name:9092 +listeners=PLAINTEXT://:9092,CONTROLLER://:9093 +inter.broker.listener.name=PLAINTEXT + +# Hostname and port the broker will advertise to producers and consumers. If not set, +# it uses the value for "listeners" if configured. Otherwise, it will use the value +# returned from java.net.InetAddress.getCanonicalHostName(). +advertised.listeners=PLAINTEXT://localhost:9092 + +# Listener, host name, and port for the controller to advertise to the brokers. If +# this server is a controller, this listener must be configured. +controller.listener.names=CONTROLLER + +# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details +listener.security.protocol.map=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL + +# The number of threads that the server uses for receiving requests from the network and sending responses to the network +num.network.threads=3 + +# The number of threads that the server uses for processing requests, which may include disk I/O +num.io.threads=8 + +# The send buffer (SO_SNDBUF) used by the socket server +socket.send.buffer.bytes=102400 + +# The receive buffer (SO_RCVBUF) used by the socket server +socket.receive.buffer.bytes=102400 + +# The maximum size of a request that the socket server will accept (protection against OOM) +socket.request.max.bytes=104857600 + + +############################# Log Basics ############################# + +# A comma separated list of directories under which to store log files +log.dirs=/tmp/kraft-combined-logs + +# The default number of log partitions per topic. More partitions allow greater +# parallelism for consumption, but this will also result in more files across +# the brokers. +num.partitions=1 + +# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. +# This value is recommended to be increased for installations with data dirs located in RAID array. +num.recovery.threads.per.data.dir=1 + +############################# Internal Topic Settings ############################# +# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" +# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. +offsets.topic.replication.factor=1 +transaction.state.log.replication.factor=1 +transaction.state.log.min.isr=1 + +############################# Log Flush Policy ############################# + +# Messages are immediately written to the filesystem but by default we only fsync() to sync +# the OS cache lazily. The following configurations control the flush of data to disk. +# There are a few important trade-offs here: +# 1. Durability: Unflushed data may be lost if you are not using replication. +# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. +# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. +# The settings below allow one to configure the flush policy to flush data after a period of time or +# every N messages (or both). This can be done globally and overridden on a per-topic basis. + +# The number of messages to accept before forcing a flush of data to disk +#log.flush.interval.messages=10000 + +# The maximum amount of time a message can sit in a log before we force a flush +#log.flush.interval.ms=1000 + +############################# Log Retention Policy ############################# + +# The following configurations control the disposal of log segments. The policy can +# be set to delete segments after a period of time, or after a given size has accumulated. +# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens +# from the end of the log. + +# The minimum age of a log file to be eligible for deletion due to age +log.retention.hours=168 + +# A size-based retention policy for logs. Segments are pruned from the log unless the remaining +# segments drop below log.retention.bytes. Functions independently of log.retention.hours. +#log.retention.bytes=1073741824 + +# The maximum size of a log segment file. When this size is reached a new log segment will be created. +log.segment.bytes=1073741824 + +# The interval at which log segments are checked to see if they can be deleted according +# to the retention policies +log.retention.check.interval.ms=300000 diff --git a/kafka/kafka_2.13-2.8.0/config/log4j.properties b/kafka/kafka_2.13-2.8.0/config/log4j.properties new file mode 100644 index 0000000..4cbce9d --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/config/log4j.properties @@ -0,0 +1,91 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Unspecified loggers and loggers with additivity=true output to server.log and stdout +# Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise +log4j.rootLogger=INFO, stdout, kafkaAppender + +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n + +log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender +log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH +log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log +log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout +log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + +log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender +log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH +log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log +log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout +log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + +log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender +log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH +log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log +log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout +log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + +log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender +log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH +log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log +log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout +log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + +log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender +log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH +log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log +log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout +log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + +log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender +log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd-HH +log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log +log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout +log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + +# Change the line below to adjust ZK client logging +log4j.logger.org.apache.zookeeper=INFO + +# Change the two lines below to adjust the general broker logging level (output to server.log and stdout) +log4j.logger.kafka=INFO +log4j.logger.org.apache.kafka=INFO + +# Change to DEBUG or TRACE to enable request logging +log4j.logger.kafka.request.logger=WARN, requestAppender +log4j.additivity.kafka.request.logger=false + +# Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output +# related to the handling of requests +#log4j.logger.kafka.network.Processor=TRACE, requestAppender +#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender +#log4j.additivity.kafka.server.KafkaApis=false +log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender +log4j.additivity.kafka.network.RequestChannel$=false + +log4j.logger.kafka.controller=TRACE, controllerAppender +log4j.additivity.kafka.controller=false + +log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender +log4j.additivity.kafka.log.LogCleaner=false + +log4j.logger.state.change.logger=INFO, stateChangeAppender +log4j.additivity.state.change.logger=false + +# Access denials are logged at INFO level, change to DEBUG to also log allowed accesses +log4j.logger.kafka.authorizer.logger=INFO, authorizerAppender +log4j.additivity.kafka.authorizer.logger=false + diff --git a/kafka/kafka_2.13-2.8.0/config/producer.properties b/kafka/kafka_2.13-2.8.0/config/producer.properties new file mode 100644 index 0000000..4786b98 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/config/producer.properties @@ -0,0 +1,45 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# see org.apache.kafka.clients.producer.ProducerConfig for more details + +############################# Producer Basics ############################# + +# list of brokers used for bootstrapping knowledge about the rest of the cluster +# format: host1:port1,host2:port2 ... +bootstrap.servers=localhost:9092 + +# specify the compression codec for all data generated: none, gzip, snappy, lz4, zstd +compression.type=none + +# name of the partitioner class for partitioning events; default partition spreads data randomly +#partitioner.class= + +# the maximum amount of time the client will wait for the response of a request +#request.timeout.ms= + +# how long `KafkaProducer.send` and `KafkaProducer.partitionsFor` will block for +#max.block.ms= + +# the producer will wait for up to the given delay to allow other records to be sent so that the sends can be batched together +#linger.ms= + +# the maximum size of a request in bytes +#max.request.size= + +# the default batch size in bytes when batching multiple records sent to a partition +#batch.size= + +# the total bytes of memory the producer can use to buffer records waiting to be sent to the server +#buffer.memory= diff --git a/kafka/kafka_2.13-2.8.0/config/server.properties b/kafka/kafka_2.13-2.8.0/config/server.properties new file mode 100644 index 0000000..b1cf5c4 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/config/server.properties @@ -0,0 +1,136 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# see kafka.server.KafkaConfig for additional details and defaults + +############################# Server Basics ############################# + +# The id of the broker. This must be set to a unique integer for each broker. +broker.id=0 + +############################# Socket Server Settings ############################# + +# The address the socket server listens on. It will get the value returned from +# java.net.InetAddress.getCanonicalHostName() if not configured. +# FORMAT: +# listeners = listener_name://host_name:port +# EXAMPLE: +# listeners = PLAINTEXT://your.host.name:9092 +#listeners=PLAINTEXT://:9092 + +# Hostname and port the broker will advertise to producers and consumers. If not set, +# it uses the value for "listeners" if configured. Otherwise, it will use the value +# returned from java.net.InetAddress.getCanonicalHostName(). +#advertised.listeners=PLAINTEXT://your.host.name:9092 + +# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details +#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL + +# The number of threads that the server uses for receiving requests from the network and sending responses to the network +num.network.threads=3 + +# The number of threads that the server uses for processing requests, which may include disk I/O +num.io.threads=8 + +# The send buffer (SO_SNDBUF) used by the socket server +socket.send.buffer.bytes=102400 + +# The receive buffer (SO_RCVBUF) used by the socket server +socket.receive.buffer.bytes=102400 + +# The maximum size of a request that the socket server will accept (protection against OOM) +socket.request.max.bytes=104857600 + + +############################# Log Basics ############################# + +# A comma separated list of directories under which to store log files +log.dirs=/tmp/kafka-logs + +# The default number of log partitions per topic. More partitions allow greater +# parallelism for consumption, but this will also result in more files across +# the brokers. +num.partitions=1 + +# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. +# This value is recommended to be increased for installations with data dirs located in RAID array. +num.recovery.threads.per.data.dir=1 + +############################# Internal Topic Settings ############################# +# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" +# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. +offsets.topic.replication.factor=1 +transaction.state.log.replication.factor=1 +transaction.state.log.min.isr=1 + +############################# Log Flush Policy ############################# + +# Messages are immediately written to the filesystem but by default we only fsync() to sync +# the OS cache lazily. The following configurations control the flush of data to disk. +# There are a few important trade-offs here: +# 1. Durability: Unflushed data may be lost if you are not using replication. +# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. +# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. +# The settings below allow one to configure the flush policy to flush data after a period of time or +# every N messages (or both). This can be done globally and overridden on a per-topic basis. + +# The number of messages to accept before forcing a flush of data to disk +#log.flush.interval.messages=10000 + +# The maximum amount of time a message can sit in a log before we force a flush +#log.flush.interval.ms=1000 + +############################# Log Retention Policy ############################# + +# The following configurations control the disposal of log segments. The policy can +# be set to delete segments after a period of time, or after a given size has accumulated. +# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens +# from the end of the log. + +# The minimum age of a log file to be eligible for deletion due to age +log.retention.hours=168 + +# A size-based retention policy for logs. Segments are pruned from the log unless the remaining +# segments drop below log.retention.bytes. Functions independently of log.retention.hours. +#log.retention.bytes=1073741824 + +# The maximum size of a log segment file. When this size is reached a new log segment will be created. +log.segment.bytes=1073741824 + +# The interval at which log segments are checked to see if they can be deleted according +# to the retention policies +log.retention.check.interval.ms=300000 + +############################# Zookeeper ############################# + +# Zookeeper connection string (see zookeeper docs for details). +# This is a comma separated host:port pairs, each corresponding to a zk +# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". +# You can also append an optional chroot string to the urls to specify the +# root directory for all kafka znodes. +zookeeper.connect=localhost:2181 + +# Timeout in ms for connecting to zookeeper +zookeeper.connection.timeout.ms=18000 + + +############################# Group Coordinator Settings ############################# + +# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance. +# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms. +# The default value for this is 3 seconds. +# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing. +# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup. +group.initial.rebalance.delay.ms=0 diff --git a/kafka/kafka_2.13-2.8.0/config/tools-log4j.properties b/kafka/kafka_2.13-2.8.0/config/tools-log4j.properties new file mode 100644 index 0000000..b19e343 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/config/tools-log4j.properties @@ -0,0 +1,21 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +log4j.rootLogger=WARN, stderr + +log4j.appender.stderr=org.apache.log4j.ConsoleAppender +log4j.appender.stderr.layout=org.apache.log4j.PatternLayout +log4j.appender.stderr.layout.ConversionPattern=[%d] %p %m (%c)%n +log4j.appender.stderr.Target=System.err diff --git a/kafka/kafka_2.13-2.8.0/config/trogdor.conf b/kafka/kafka_2.13-2.8.0/config/trogdor.conf new file mode 100644 index 0000000..320cbe7 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/config/trogdor.conf @@ -0,0 +1,25 @@ +{ + "_comment": [ + "Licensed to the Apache Software Foundation (ASF) under one or more", + "contributor license agreements. See the NOTICE file distributed with", + "this work for additional information regarding copyright ownership.", + "The ASF licenses this file to You under the Apache License, Version 2.0", + "(the \"License\"); you may not use this file except in compliance with", + "the License. You may obtain a copy of the License at", + "", + "http://www.apache.org/licenses/LICENSE-2.0", + "", + "Unless required by applicable law or agreed to in writing, software", + "distributed under the License is distributed on an \"AS IS\" BASIS,", + "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", + "See the License for the specific language governing permissions and", + "limitations under the License." + ], + "platform": "org.apache.kafka.trogdor.basic.BasicPlatform", "nodes": { + "node0": { + "hostname": "localhost", + "trogdor.agent.port": 8888, + "trogdor.coordinator.port": 8889 + } + } +} diff --git a/kafka/kafka_2.13-2.8.0/config/zookeeper.properties b/kafka/kafka_2.13-2.8.0/config/zookeeper.properties new file mode 100644 index 0000000..90f4332 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/config/zookeeper.properties @@ -0,0 +1,24 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# the directory where the snapshot is stored. +dataDir=/tmp/zookeeper +# the port at which the clients will connect +clientPort=2181 +# disable the per-ip limit on the number of connections since this is a non-production config +maxClientCnxns=0 +# Disable the adminserver by default to avoid port conflicts. +# Set the port to something non-conflicting if choosing to enable this +admin.enableServer=false +# admin.serverPort=8080 diff --git a/kafka/kafka_2.13-2.8.0/libs/activation-1.1.1.jar b/kafka/kafka_2.13-2.8.0/libs/activation-1.1.1.jar new file mode 100644 index 0000000..1b703ab Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/activation-1.1.1.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/aopalliance-repackaged-2.6.1.jar b/kafka/kafka_2.13-2.8.0/libs/aopalliance-repackaged-2.6.1.jar new file mode 100644 index 0000000..35502f0 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/aopalliance-repackaged-2.6.1.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/argparse4j-0.7.0.jar b/kafka/kafka_2.13-2.8.0/libs/argparse4j-0.7.0.jar new file mode 100644 index 0000000..b1865dd Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/argparse4j-0.7.0.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/audience-annotations-0.5.0.jar b/kafka/kafka_2.13-2.8.0/libs/audience-annotations-0.5.0.jar new file mode 100644 index 0000000..52491a7 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/audience-annotations-0.5.0.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/commons-cli-1.4.jar b/kafka/kafka_2.13-2.8.0/libs/commons-cli-1.4.jar new file mode 100644 index 0000000..22deb30 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/commons-cli-1.4.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/commons-lang3-3.8.1.jar b/kafka/kafka_2.13-2.8.0/libs/commons-lang3-3.8.1.jar new file mode 100644 index 0000000..2c65ce6 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/commons-lang3-3.8.1.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/connect-api-2.8.0.jar b/kafka/kafka_2.13-2.8.0/libs/connect-api-2.8.0.jar new file mode 100644 index 0000000..7c5ba43 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/connect-api-2.8.0.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/connect-basic-auth-extension-2.8.0.jar b/kafka/kafka_2.13-2.8.0/libs/connect-basic-auth-extension-2.8.0.jar new file mode 100644 index 0000000..86fcdd7 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/connect-basic-auth-extension-2.8.0.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/connect-file-2.8.0.jar b/kafka/kafka_2.13-2.8.0/libs/connect-file-2.8.0.jar new file mode 100644 index 0000000..ba1b1b1 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/connect-file-2.8.0.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/connect-json-2.8.0.jar b/kafka/kafka_2.13-2.8.0/libs/connect-json-2.8.0.jar new file mode 100644 index 0000000..b96a4b0 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/connect-json-2.8.0.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/connect-mirror-2.8.0.jar b/kafka/kafka_2.13-2.8.0/libs/connect-mirror-2.8.0.jar new file mode 100644 index 0000000..9947af9 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/connect-mirror-2.8.0.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/connect-mirror-client-2.8.0.jar b/kafka/kafka_2.13-2.8.0/libs/connect-mirror-client-2.8.0.jar new file mode 100644 index 0000000..4099b02 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/connect-mirror-client-2.8.0.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/connect-runtime-2.8.0.jar b/kafka/kafka_2.13-2.8.0/libs/connect-runtime-2.8.0.jar new file mode 100644 index 0000000..ce0fb81 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/connect-runtime-2.8.0.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/connect-transforms-2.8.0.jar b/kafka/kafka_2.13-2.8.0/libs/connect-transforms-2.8.0.jar new file mode 100644 index 0000000..b72a124 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/connect-transforms-2.8.0.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/hk2-api-2.6.1.jar b/kafka/kafka_2.13-2.8.0/libs/hk2-api-2.6.1.jar new file mode 100644 index 0000000..03d6eb0 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/hk2-api-2.6.1.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/hk2-locator-2.6.1.jar b/kafka/kafka_2.13-2.8.0/libs/hk2-locator-2.6.1.jar new file mode 100644 index 0000000..0906bd1 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/hk2-locator-2.6.1.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/hk2-utils-2.6.1.jar b/kafka/kafka_2.13-2.8.0/libs/hk2-utils-2.6.1.jar new file mode 100644 index 0000000..768bc48 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/hk2-utils-2.6.1.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jackson-annotations-2.10.5.jar b/kafka/kafka_2.13-2.8.0/libs/jackson-annotations-2.10.5.jar new file mode 100644 index 0000000..74a8b1b Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jackson-annotations-2.10.5.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jackson-core-2.10.5.jar b/kafka/kafka_2.13-2.8.0/libs/jackson-core-2.10.5.jar new file mode 100644 index 0000000..ed18173 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jackson-core-2.10.5.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jackson-databind-2.10.5.1.jar b/kafka/kafka_2.13-2.8.0/libs/jackson-databind-2.10.5.1.jar new file mode 100644 index 0000000..3246c3d Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jackson-databind-2.10.5.1.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jackson-dataformat-csv-2.10.5.jar b/kafka/kafka_2.13-2.8.0/libs/jackson-dataformat-csv-2.10.5.jar new file mode 100644 index 0000000..df564d9 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jackson-dataformat-csv-2.10.5.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jackson-datatype-jdk8-2.10.5.jar b/kafka/kafka_2.13-2.8.0/libs/jackson-datatype-jdk8-2.10.5.jar new file mode 100644 index 0000000..32d0200 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jackson-datatype-jdk8-2.10.5.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jackson-jaxrs-base-2.10.5.jar b/kafka/kafka_2.13-2.8.0/libs/jackson-jaxrs-base-2.10.5.jar new file mode 100644 index 0000000..74d74f5 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jackson-jaxrs-base-2.10.5.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jackson-jaxrs-json-provider-2.10.5.jar b/kafka/kafka_2.13-2.8.0/libs/jackson-jaxrs-json-provider-2.10.5.jar new file mode 100644 index 0000000..9d6dde1 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jackson-jaxrs-json-provider-2.10.5.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jackson-module-jaxb-annotations-2.10.5.jar b/kafka/kafka_2.13-2.8.0/libs/jackson-module-jaxb-annotations-2.10.5.jar new file mode 100644 index 0000000..ea23eb7 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jackson-module-jaxb-annotations-2.10.5.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jackson-module-paranamer-2.10.5.jar b/kafka/kafka_2.13-2.8.0/libs/jackson-module-paranamer-2.10.5.jar new file mode 100644 index 0000000..63efc6a Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jackson-module-paranamer-2.10.5.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jackson-module-scala_2.13-2.10.5.jar b/kafka/kafka_2.13-2.8.0/libs/jackson-module-scala_2.13-2.10.5.jar new file mode 100644 index 0000000..5132663 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jackson-module-scala_2.13-2.10.5.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jakarta.activation-api-1.2.1.jar b/kafka/kafka_2.13-2.8.0/libs/jakarta.activation-api-1.2.1.jar new file mode 100644 index 0000000..bbfb52f Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jakarta.activation-api-1.2.1.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jakarta.annotation-api-1.3.5.jar b/kafka/kafka_2.13-2.8.0/libs/jakarta.annotation-api-1.3.5.jar new file mode 100644 index 0000000..606d992 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jakarta.annotation-api-1.3.5.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jakarta.inject-2.6.1.jar b/kafka/kafka_2.13-2.8.0/libs/jakarta.inject-2.6.1.jar new file mode 100644 index 0000000..cee6acd Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jakarta.inject-2.6.1.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jakarta.validation-api-2.0.2.jar b/kafka/kafka_2.13-2.8.0/libs/jakarta.validation-api-2.0.2.jar new file mode 100644 index 0000000..d68c9f7 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jakarta.validation-api-2.0.2.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jakarta.ws.rs-api-2.1.6.jar b/kafka/kafka_2.13-2.8.0/libs/jakarta.ws.rs-api-2.1.6.jar new file mode 100644 index 0000000..4850659 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jakarta.ws.rs-api-2.1.6.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jakarta.xml.bind-api-2.3.2.jar b/kafka/kafka_2.13-2.8.0/libs/jakarta.xml.bind-api-2.3.2.jar new file mode 100644 index 0000000..b16236d Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jakarta.xml.bind-api-2.3.2.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/javassist-3.27.0-GA.jar b/kafka/kafka_2.13-2.8.0/libs/javassist-3.27.0-GA.jar new file mode 100644 index 0000000..092e59b Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/javassist-3.27.0-GA.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/javax.servlet-api-3.1.0.jar b/kafka/kafka_2.13-2.8.0/libs/javax.servlet-api-3.1.0.jar new file mode 100644 index 0000000..6b14c3d Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/javax.servlet-api-3.1.0.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/javax.ws.rs-api-2.1.1.jar b/kafka/kafka_2.13-2.8.0/libs/javax.ws.rs-api-2.1.1.jar new file mode 100644 index 0000000..3eabbf0 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/javax.ws.rs-api-2.1.1.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jaxb-api-2.3.0.jar b/kafka/kafka_2.13-2.8.0/libs/jaxb-api-2.3.0.jar new file mode 100644 index 0000000..0817c08 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jaxb-api-2.3.0.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jersey-client-2.31.jar b/kafka/kafka_2.13-2.8.0/libs/jersey-client-2.31.jar new file mode 100644 index 0000000..6f4cbe1 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jersey-client-2.31.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jersey-common-2.31.jar b/kafka/kafka_2.13-2.8.0/libs/jersey-common-2.31.jar new file mode 100644 index 0000000..22a1638 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jersey-common-2.31.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jersey-container-servlet-2.31.jar b/kafka/kafka_2.13-2.8.0/libs/jersey-container-servlet-2.31.jar new file mode 100644 index 0000000..5395f6d Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jersey-container-servlet-2.31.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jersey-container-servlet-core-2.31.jar b/kafka/kafka_2.13-2.8.0/libs/jersey-container-servlet-core-2.31.jar new file mode 100644 index 0000000..9e0d13f Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jersey-container-servlet-core-2.31.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jersey-hk2-2.31.jar b/kafka/kafka_2.13-2.8.0/libs/jersey-hk2-2.31.jar new file mode 100644 index 0000000..1c8faea Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jersey-hk2-2.31.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jersey-media-jaxb-2.31.jar b/kafka/kafka_2.13-2.8.0/libs/jersey-media-jaxb-2.31.jar new file mode 100644 index 0000000..b30472a Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jersey-media-jaxb-2.31.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jersey-server-2.31.jar b/kafka/kafka_2.13-2.8.0/libs/jersey-server-2.31.jar new file mode 100644 index 0000000..8093610 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jersey-server-2.31.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jetty-client-9.4.39.v20210325.jar b/kafka/kafka_2.13-2.8.0/libs/jetty-client-9.4.39.v20210325.jar new file mode 100644 index 0000000..8f34564 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jetty-client-9.4.39.v20210325.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jetty-continuation-9.4.39.v20210325.jar b/kafka/kafka_2.13-2.8.0/libs/jetty-continuation-9.4.39.v20210325.jar new file mode 100644 index 0000000..628c66a Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jetty-continuation-9.4.39.v20210325.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jetty-http-9.4.39.v20210325.jar b/kafka/kafka_2.13-2.8.0/libs/jetty-http-9.4.39.v20210325.jar new file mode 100644 index 0000000..0e4b3a6 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jetty-http-9.4.39.v20210325.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jetty-io-9.4.39.v20210325.jar b/kafka/kafka_2.13-2.8.0/libs/jetty-io-9.4.39.v20210325.jar new file mode 100644 index 0000000..410024d Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jetty-io-9.4.39.v20210325.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jetty-security-9.4.39.v20210325.jar b/kafka/kafka_2.13-2.8.0/libs/jetty-security-9.4.39.v20210325.jar new file mode 100644 index 0000000..959f426 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jetty-security-9.4.39.v20210325.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jetty-server-9.4.39.v20210325.jar b/kafka/kafka_2.13-2.8.0/libs/jetty-server-9.4.39.v20210325.jar new file mode 100644 index 0000000..93ef391 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jetty-server-9.4.39.v20210325.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jetty-servlet-9.4.39.v20210325.jar b/kafka/kafka_2.13-2.8.0/libs/jetty-servlet-9.4.39.v20210325.jar new file mode 100644 index 0000000..87712d8 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jetty-servlet-9.4.39.v20210325.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jetty-servlets-9.4.39.v20210325.jar b/kafka/kafka_2.13-2.8.0/libs/jetty-servlets-9.4.39.v20210325.jar new file mode 100644 index 0000000..6fb55c2 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jetty-servlets-9.4.39.v20210325.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jetty-util-9.4.39.v20210325.jar b/kafka/kafka_2.13-2.8.0/libs/jetty-util-9.4.39.v20210325.jar new file mode 100644 index 0000000..a55699d Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jetty-util-9.4.39.v20210325.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jetty-util-ajax-9.4.39.v20210325.jar b/kafka/kafka_2.13-2.8.0/libs/jetty-util-ajax-9.4.39.v20210325.jar new file mode 100644 index 0000000..edfdee3 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jetty-util-ajax-9.4.39.v20210325.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jline-3.12.1.jar b/kafka/kafka_2.13-2.8.0/libs/jline-3.12.1.jar new file mode 100644 index 0000000..fcb8d4d Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jline-3.12.1.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/jopt-simple-5.0.4.jar b/kafka/kafka_2.13-2.8.0/libs/jopt-simple-5.0.4.jar new file mode 100644 index 0000000..317b2b0 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/jopt-simple-5.0.4.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/kafka-clients-2.8.0.jar b/kafka/kafka_2.13-2.8.0/libs/kafka-clients-2.8.0.jar new file mode 100644 index 0000000..375b2f8 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/kafka-clients-2.8.0.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/kafka-log4j-appender-2.8.0.jar b/kafka/kafka_2.13-2.8.0/libs/kafka-log4j-appender-2.8.0.jar new file mode 100644 index 0000000..83ca256 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/kafka-log4j-appender-2.8.0.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/kafka-metadata-2.8.0.jar b/kafka/kafka_2.13-2.8.0/libs/kafka-metadata-2.8.0.jar new file mode 100644 index 0000000..ab64f11 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/kafka-metadata-2.8.0.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/kafka-raft-2.8.0.jar b/kafka/kafka_2.13-2.8.0/libs/kafka-raft-2.8.0.jar new file mode 100644 index 0000000..93b2c24 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/kafka-raft-2.8.0.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/kafka-shell-2.8.0.jar b/kafka/kafka_2.13-2.8.0/libs/kafka-shell-2.8.0.jar new file mode 100644 index 0000000..4f1271f Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/kafka-shell-2.8.0.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/kafka-streams-2.8.0.jar b/kafka/kafka_2.13-2.8.0/libs/kafka-streams-2.8.0.jar new file mode 100644 index 0000000..fc135de Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/kafka-streams-2.8.0.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/kafka-streams-examples-2.8.0.jar b/kafka/kafka_2.13-2.8.0/libs/kafka-streams-examples-2.8.0.jar new file mode 100644 index 0000000..7bc6250 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/kafka-streams-examples-2.8.0.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/kafka-streams-scala_2.13-2.8.0.jar b/kafka/kafka_2.13-2.8.0/libs/kafka-streams-scala_2.13-2.8.0.jar new file mode 100644 index 0000000..49b1bd1 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/kafka-streams-scala_2.13-2.8.0.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/kafka-streams-test-utils-2.8.0.jar b/kafka/kafka_2.13-2.8.0/libs/kafka-streams-test-utils-2.8.0.jar new file mode 100644 index 0000000..50e0135 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/kafka-streams-test-utils-2.8.0.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/kafka-tools-2.8.0.jar b/kafka/kafka_2.13-2.8.0/libs/kafka-tools-2.8.0.jar new file mode 100644 index 0000000..ef49b42 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/kafka-tools-2.8.0.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/kafka_2.13-2.8.0-javadoc.jar b/kafka/kafka_2.13-2.8.0/libs/kafka_2.13-2.8.0-javadoc.jar new file mode 100644 index 0000000..31705d7 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/kafka_2.13-2.8.0-javadoc.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/kafka_2.13-2.8.0-javadoc.jar.asc b/kafka/kafka_2.13-2.8.0/libs/kafka_2.13-2.8.0-javadoc.jar.asc new file mode 100644 index 0000000..fb9b148 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/libs/kafka_2.13-2.8.0-javadoc.jar.asc @@ -0,0 +1,17 @@ +-----BEGIN PGP SIGNATURE----- +Version: BCPG v1.64 + +iQIcBAABCgAGBQJgdv10AAoJEAFsz8oOL9Vh/HQP/ixN0ulA7WE41KuceF60xJ7+ +kiM3r4pJaJkcUuJRn8Q2U9w5vym0AMlRe7OlbHtRztpsHnUj2fyNvw/lIcWvvHey +G9UWCrjfReq7+Lcb7XsCRJZ1KCnlyKz6r2Jdh5E7nNlH9Fxe1pZv/1M4SZVXeud9 +Rq5byoQsVWGdNinmPvwckFbdmcmyiSGWbUlCVe9MYx+NfxcUisREGDkPdqaOeN+Y +HCEdmY0vpeMgkIpJOEDJHGwrJmp5sWT50Qor5J4FFtBlVPFyEeWVxyL9/025WyZe +wFzAVBmUXYLdhuF1Tq2fmfYTJWasbnu34PHsduq3t0TVAFzOyLDbgoRUnLWRA2Ws +eJgRRn2gKzB40vNpLTk79PjPEe37BKto2HxK/hhZebgl4TlhwR+atn54mfnuL73W ++eCoNikh2Xww1d83Fyv43dUggIr6H+3yipIRqoch4gf5Q40CFArbTDY5QBjWwiTW +2Ee8QuqxK5J21+ThwH+O/3ThcHZPWQvSqKz5AdpgF0JR2/Tzp/or93fthuBAJ+cM +op4IrfCt2uJlUNHzZL5x+WVxwVyhoC7HGJ9tq90aeN1F8d+2wT7gWRVtfASDnbE2 +Se8G3rfJEx2YD2Q0d1oxRR8na0mtfxT3gu4jcxhjlfRaBEBoGZ20nhnaIh2ZT7xN +9FBtpv1yg9q9PW0pordO +=heAh +-----END PGP SIGNATURE----- diff --git a/kafka/kafka_2.13-2.8.0/libs/kafka_2.13-2.8.0-sources.jar b/kafka/kafka_2.13-2.8.0/libs/kafka_2.13-2.8.0-sources.jar new file mode 100644 index 0000000..75d51d1 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/kafka_2.13-2.8.0-sources.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/kafka_2.13-2.8.0-sources.jar.asc b/kafka/kafka_2.13-2.8.0/libs/kafka_2.13-2.8.0-sources.jar.asc new file mode 100644 index 0000000..6abc5e3 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/libs/kafka_2.13-2.8.0-sources.jar.asc @@ -0,0 +1,17 @@ +-----BEGIN PGP SIGNATURE----- +Version: BCPG v1.64 + +iQIcBAABCgAGBQJgdv10AAoJEAFsz8oOL9VhPeUP/0ZRAJo+0KZ4JycjaR1ea09S +FKXedSNGRMtU/ttwOuTMeICqvPDqxS5OvJD60Kp1yahODUIHvSb30WfjnQEdfM+5 +UiqOYz809NYVbwdLx951dm/c4++aQ+/cnucJLjl6R8Wj0hxOSU/Whb383tl7RBx8 +GVScgAEN+q0qC+jjNi6dm5TKjBxJqFbMcSlc34yk8Wno0d8y1ls1dBvNFmEtIZ1I ++gMUCuRKHrooudis5zPKXLrjCL8VZxqDAuovuRDLbVJDawbMB8sHYXE9phfOvoYi +27SDc7gpQJZ6Xwy2Ns1QaX8DowZgEx4xgZSzO1zHJfTvUoIxIFrg76L9CxbChr+P +eoWY0/UVpIiHRjLwkiVoHSCvgTKohL4kv74fID3xELdvClU4AMNAy9jVVakhLcgq +D/qK4t7+4VlWj/CZOoiN8ZrT/A2l3RcBp6OuXLcLQrEfuL1b2d/SkP9N3VGB3qfV +OUomFztr/CLxR8CNHvMWG2GbbVY/zDVaYPGMtHaorszLV/0+Fpz7+W0WRj4ZwU72 +fxIDKwM/nUrctJZS/D8zXveHtPypDvxsJVIwCGqULhtoYvKBmSaMFtVJ8eq8yO7A +BSbC1/nRGj8aaVItfKPjFoQtjOByzkuqQnG7WgwGIdiwOjOJz0kSkNvRAuRDntUa +j5ACmAY/hiwWEcmEqh7t +=zTA7 +-----END PGP SIGNATURE----- diff --git a/kafka/kafka_2.13-2.8.0/libs/kafka_2.13-2.8.0-test-sources.jar b/kafka/kafka_2.13-2.8.0/libs/kafka_2.13-2.8.0-test-sources.jar new file mode 100644 index 0000000..d9bfb26 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/kafka_2.13-2.8.0-test-sources.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/kafka_2.13-2.8.0-test-sources.jar.asc b/kafka/kafka_2.13-2.8.0/libs/kafka_2.13-2.8.0-test-sources.jar.asc new file mode 100644 index 0000000..cb14701 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/libs/kafka_2.13-2.8.0-test-sources.jar.asc @@ -0,0 +1,17 @@ +-----BEGIN PGP SIGNATURE----- +Version: BCPG v1.64 + +iQIcBAABCgAGBQJgdv1zAAoJEAFsz8oOL9Vh3gIP/00PY5XCwsUFDR9hYnqdNAbZ +5xQWW5hCpnIWNbpAFPY5qf+oW40IuhsjcoJFKk6PNnAwptekx1I/2avz3PD3F613 +mj4b1fPx1LPFtS7+IUEHdb+KuzS4pIQ6zMyRwbISjmJFyPUsxULsx6nrFix53201 ++EPSlxdrygGIb1nlGzSaBe+4UybdsODo1HagXV6jBIsMntFkW7Y5wZAZh+QFQnOm +wPZPFBiwZ1Ly2R1eTs2uWvPfOhoG7Vgdoc1LWIJfWXnlLqE+eSdn7/TMxQ9KYKkZ +1UjWzLE5YEj6rVj4TnxpqNKq8hMGSX6ZDFEXlu95UJuqSZt+o4Z4PxYXY8SNo3U+ +uYpfftnkL26DVyqQS3BjLILSsnICtsuGLnlVdJJr3NzWQRCFVvsuWKyKRixlruk9 +KUDvZN4aR2FJW5N0Xy3cCS6b5m0WoiH14PiA0rgAq1KYD0QMPgFm/FhZkovTslcG +wfZYHtTULWO3NImkPSfRYUX+2RyEkHhfELfnLmQ8P560ULFW8Ckj3OoBk/Im5Faw +rO//gVonTKOuyh9EiGjHVx+VaihbIjf8GBZ7b9q/yZZqTybAashDpqnFD+vAo+jy +YwvxM6fF2CekJpD0Sk+eOcJNB9Ujwz973bDGuqSYhXG330xa1JbJ1hH73p10fsvR +g6OUBQRbTDCXmODUTcsf +=gek7 +-----END PGP SIGNATURE----- diff --git a/kafka/kafka_2.13-2.8.0/libs/kafka_2.13-2.8.0-test.jar b/kafka/kafka_2.13-2.8.0/libs/kafka_2.13-2.8.0-test.jar new file mode 100644 index 0000000..7a7e9f7 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/kafka_2.13-2.8.0-test.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/kafka_2.13-2.8.0-test.jar.asc b/kafka/kafka_2.13-2.8.0/libs/kafka_2.13-2.8.0-test.jar.asc new file mode 100644 index 0000000..85b5dfe --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/libs/kafka_2.13-2.8.0-test.jar.asc @@ -0,0 +1,17 @@ +-----BEGIN PGP SIGNATURE----- +Version: BCPG v1.64 + +iQIcBAABCgAGBQJgdv1zAAoJEAFsz8oOL9VhGp0QAKRfapWQIdqIRBWZUEzHFlbJ +Z+x0pLGiRJMDyoKj4pDqMyHWaTkW1Oav+XXQXlXHrZVClTiAnwyvYtpVQ42NbDu1 ++EyNuJ20phLVRMRhlMYYNaOVsI309a13RmmRxKWy4L147SZr9v+ay3g+jkjWU/GO +SjnQpT4VrmZjBvbfNYBlP7+yXevoIajpaozau1pIfROzsTdAXDW4pYbDZq7R/htq +fyxrqM2X3pY+WvMm2NpJ+zG3oZtAfSfI4/57PQse9dqWFzxzoGBDW/S9EJdqBWMT +M+6q36N+vYDQ8Y3Wlhe3lZfpsbQ8s2+JbloxO+c5qLyTJpLFnj0lMuc5AqQ4f8pd +B2uVQ3J/dlhj9JL9KA2nL0l1kQ40itUrzSEEsyhkeM/JExpG3GOjUrh0i1fAqaAA +kl04d9cfRNTYnOLL3d1D6M1rcQA9ydBk3MbvAV+3pQLSjKpkGuYDQeH4IcBBKW0F +XvIZEp+xw0eFLoT3DqPYkZJ/ry0r7+JFGJ+zVRU/djs8msspx5nRsEhB+6hvD3sj +SFGwAyJXmi8PMDrJTBqIewNgFC0OXoN4RQVxN0mQKaKVy98gqD2JaRQOjALJvDQQ +kJN2lyZ6OYLfsD8vHm2lYVU3WtRFMugWj/gWs2qJ6ikrDIkLbsFav83pS59gRao/ +vCnwpEMCseMe6XiLH2x5 +=0naQ +-----END PGP SIGNATURE----- diff --git a/kafka/kafka_2.13-2.8.0/libs/kafka_2.13-2.8.0.jar b/kafka/kafka_2.13-2.8.0/libs/kafka_2.13-2.8.0.jar new file mode 100644 index 0000000..158454d Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/kafka_2.13-2.8.0.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/kafka_2.13-2.8.0.jar.asc b/kafka/kafka_2.13-2.8.0/libs/kafka_2.13-2.8.0.jar.asc new file mode 100644 index 0000000..1ea0782 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/libs/kafka_2.13-2.8.0.jar.asc @@ -0,0 +1,17 @@ +-----BEGIN PGP SIGNATURE----- +Version: BCPG v1.64 + +iQIcBAABCgAGBQJgdv10AAoJEAFsz8oOL9VhscQP/2Fac2u6DezA+EToziIjIBMh +9gUfnWBBsY//tAaxosYnKp/VX2eRUd2XZ3ChKj1mk1qSosQ8jUOpu8fHgDV45t1T +uWsV1KsTTHu+fgy8zdQ/G4XGQYPdFalc9v8kvHI//ZlHWXcSTvR/B8m4pKyLg/mu +1xQuaLycrEt5dpXIxq+FYG+GnA+lJIrQlq11ZDTWJMSg1qnGVAXLX333HLIIlC5V +YoG7HFCImqX7aHe6sapKGlXYSeMLgO+wnLFXL9cjvxv+2wd+G4DSAz8s05mYiPjf +Bqx2Pp34ah3Oqh8lh4RL9aRqGWAmHrbFQkKQmo4sx8RW1qXp6qaMwSASPAcJQC5e +8gpwGwwo781l1HeKZO633ASrQs9RbQ4rgs4BciT+fhCu63ytjN1vRLz7vLbh9D5t +ZkXsJw33BU6h639sFPKu8eMyYe6ArqQ/5QsLMA+kE88b0wH5qCi57zf40iQQ+HAc +uULfefE+PnEIJrALvOcY9QDJVGxyvS6SOINZ0Fl6sLnfkxLMG/CV5B0UVWWWntNA +ei9F8b7utP8PzSxTnxCr3yWGnE2qVz2hrJeYkXdadZS3Tq6Qt2lVMZ+n0hf1szVn +0DmY27c/cADLIeviJHQzmB2ifVRCZP5dBGn1WqUWSg8oZfh5z+nYktuvdbZIROOQ +DvZIiIsAYS7eJHuJBYQs +=oHk2 +-----END PGP SIGNATURE----- diff --git a/kafka/kafka_2.13-2.8.0/libs/log4j-1.2.17.jar b/kafka/kafka_2.13-2.8.0/libs/log4j-1.2.17.jar new file mode 100644 index 0000000..1d425cf Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/log4j-1.2.17.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/lz4-java-1.7.1.jar b/kafka/kafka_2.13-2.8.0/libs/lz4-java-1.7.1.jar new file mode 100644 index 0000000..95f57ca Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/lz4-java-1.7.1.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/maven-artifact-3.6.3.jar b/kafka/kafka_2.13-2.8.0/libs/maven-artifact-3.6.3.jar new file mode 100644 index 0000000..42c8111 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/maven-artifact-3.6.3.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/metrics-core-2.2.0.jar b/kafka/kafka_2.13-2.8.0/libs/metrics-core-2.2.0.jar new file mode 100644 index 0000000..0f6d1cb Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/metrics-core-2.2.0.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/netty-buffer-4.1.62.Final.jar b/kafka/kafka_2.13-2.8.0/libs/netty-buffer-4.1.62.Final.jar new file mode 100644 index 0000000..2f02134 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/netty-buffer-4.1.62.Final.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/netty-codec-4.1.62.Final.jar b/kafka/kafka_2.13-2.8.0/libs/netty-codec-4.1.62.Final.jar new file mode 100644 index 0000000..d637c6b Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/netty-codec-4.1.62.Final.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/netty-common-4.1.62.Final.jar b/kafka/kafka_2.13-2.8.0/libs/netty-common-4.1.62.Final.jar new file mode 100644 index 0000000..b9c80a8 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/netty-common-4.1.62.Final.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/netty-handler-4.1.62.Final.jar b/kafka/kafka_2.13-2.8.0/libs/netty-handler-4.1.62.Final.jar new file mode 100644 index 0000000..c28e942 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/netty-handler-4.1.62.Final.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/netty-resolver-4.1.62.Final.jar b/kafka/kafka_2.13-2.8.0/libs/netty-resolver-4.1.62.Final.jar new file mode 100644 index 0000000..45e3662 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/netty-resolver-4.1.62.Final.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/netty-transport-4.1.62.Final.jar b/kafka/kafka_2.13-2.8.0/libs/netty-transport-4.1.62.Final.jar new file mode 100644 index 0000000..e277f71 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/netty-transport-4.1.62.Final.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/netty-transport-native-epoll-4.1.62.Final.jar b/kafka/kafka_2.13-2.8.0/libs/netty-transport-native-epoll-4.1.62.Final.jar new file mode 100644 index 0000000..0d1ea38 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/netty-transport-native-epoll-4.1.62.Final.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/netty-transport-native-unix-common-4.1.62.Final.jar b/kafka/kafka_2.13-2.8.0/libs/netty-transport-native-unix-common-4.1.62.Final.jar new file mode 100644 index 0000000..66e66cc Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/netty-transport-native-unix-common-4.1.62.Final.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/osgi-resource-locator-1.0.3.jar b/kafka/kafka_2.13-2.8.0/libs/osgi-resource-locator-1.0.3.jar new file mode 100644 index 0000000..0f3c386 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/osgi-resource-locator-1.0.3.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/paranamer-2.8.jar b/kafka/kafka_2.13-2.8.0/libs/paranamer-2.8.jar new file mode 100644 index 0000000..0bf659b Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/paranamer-2.8.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/plexus-utils-3.2.1.jar b/kafka/kafka_2.13-2.8.0/libs/plexus-utils-3.2.1.jar new file mode 100644 index 0000000..d749dd7 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/plexus-utils-3.2.1.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/reflections-0.9.12.jar b/kafka/kafka_2.13-2.8.0/libs/reflections-0.9.12.jar new file mode 100644 index 0000000..0f176b9 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/reflections-0.9.12.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/rocksdbjni-5.18.4.jar b/kafka/kafka_2.13-2.8.0/libs/rocksdbjni-5.18.4.jar new file mode 100644 index 0000000..86ddb6e Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/rocksdbjni-5.18.4.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/scala-collection-compat_2.13-2.3.0.jar b/kafka/kafka_2.13-2.8.0/libs/scala-collection-compat_2.13-2.3.0.jar new file mode 100644 index 0000000..cf5a1ea Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/scala-collection-compat_2.13-2.3.0.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/scala-java8-compat_2.13-0.9.1.jar b/kafka/kafka_2.13-2.8.0/libs/scala-java8-compat_2.13-0.9.1.jar new file mode 100644 index 0000000..5063339 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/scala-java8-compat_2.13-0.9.1.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/scala-library-2.13.5.jar b/kafka/kafka_2.13-2.8.0/libs/scala-library-2.13.5.jar new file mode 100644 index 0000000..1eb317d Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/scala-library-2.13.5.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/scala-logging_2.13-3.9.2.jar b/kafka/kafka_2.13-2.8.0/libs/scala-logging_2.13-3.9.2.jar new file mode 100644 index 0000000..1e44651 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/scala-logging_2.13-3.9.2.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/scala-reflect-2.13.5.jar b/kafka/kafka_2.13-2.8.0/libs/scala-reflect-2.13.5.jar new file mode 100644 index 0000000..ebd9e24 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/scala-reflect-2.13.5.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/slf4j-api-1.7.30.jar b/kafka/kafka_2.13-2.8.0/libs/slf4j-api-1.7.30.jar new file mode 100644 index 0000000..29ac26f Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/slf4j-api-1.7.30.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/slf4j-log4j12-1.7.30.jar b/kafka/kafka_2.13-2.8.0/libs/slf4j-log4j12-1.7.30.jar new file mode 100644 index 0000000..c6bc8b2 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/slf4j-log4j12-1.7.30.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/snappy-java-1.1.8.1.jar b/kafka/kafka_2.13-2.8.0/libs/snappy-java-1.1.8.1.jar new file mode 100644 index 0000000..896967f Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/snappy-java-1.1.8.1.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/zookeeper-3.5.9.jar b/kafka/kafka_2.13-2.8.0/libs/zookeeper-3.5.9.jar new file mode 100644 index 0000000..f5eceaa Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/zookeeper-3.5.9.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/zookeeper-jute-3.5.9.jar b/kafka/kafka_2.13-2.8.0/libs/zookeeper-jute-3.5.9.jar new file mode 100644 index 0000000..3c9c184 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/zookeeper-jute-3.5.9.jar differ diff --git a/kafka/kafka_2.13-2.8.0/libs/zstd-jni-1.4.9-1.jar b/kafka/kafka_2.13-2.8.0/libs/zstd-jni-1.4.9-1.jar new file mode 100644 index 0000000..5e88a7b Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/libs/zstd-jni-1.4.9-1.jar differ diff --git a/kafka/kafka_2.13-2.8.0/licenses/CDDL+GPL-1.1 b/kafka/kafka_2.13-2.8.0/licenses/CDDL+GPL-1.1 new file mode 100644 index 0000000..4b156e6 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/licenses/CDDL+GPL-1.1 @@ -0,0 +1,760 @@ +COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1 + +1. Definitions. + + 1.1. "Contributor" means each individual or entity that creates or + contributes to the creation of Modifications. + + 1.2. "Contributor Version" means the combination of the Original + Software, prior Modifications used by a Contributor (if any), and + the Modifications made by that particular Contributor. + + 1.3. "Covered Software" means (a) the Original Software, or (b) + Modifications, or (c) the combination of files containing Original + Software with files containing Modifications, in each case including + portions thereof. + + 1.4. "Executable" means the Covered Software in any form other than + Source Code. + + 1.5. "Initial Developer" means the individual or entity that first + makes Original Software available under this License. + + 1.6. "Larger Work" means a work which combines Covered Software or + portions thereof with code not governed by the terms of this License. + + 1.7. "License" means this document. + + 1.8. "Licensable" means having the right to grant, to the maximum + extent possible, whether at the time of the initial grant or + subsequently acquired, any and all of the rights conveyed herein. + + 1.9. "Modifications" means the Source Code and Executable form of + any of the following: + + A. Any file that results from an addition to, deletion from or + modification of the contents of a file containing Original Software + or previous Modifications; + + B. Any new file that contains any part of the Original Software or + previous Modification; or + + C. Any new file that is contributed or otherwise made available + under the terms of this License. + + 1.10. "Original Software" means the Source Code and Executable form + of computer software code that is originally released under this + License. + + 1.11. "Patent Claims" means any patent claim(s), now owned or + hereafter acquired, including without limitation, method, process, + and apparatus claims, in any patent Licensable by grantor. + + 1.12. "Source Code" means (a) the common form of computer software + code in which modifications are made and (b) associated + documentation included in or with such code. + + 1.13. "You" (or "Your") means an individual or a legal entity + exercising rights under, and complying with all of the terms of, + this License. For legal entities, "You" includes any entity which + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants. + + 2.1. The Initial Developer Grant. + + Conditioned upon Your compliance with Section 3.1 below and subject + to third party intellectual property claims, the Initial Developer + hereby grants You a world-wide, royalty-free, non-exclusive license: + + (a) under intellectual property rights (other than patent or + trademark) Licensable by Initial Developer, to use, reproduce, + modify, display, perform, sublicense and distribute the Original + Software (or portions thereof), with or without Modifications, + and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, using or selling of + Original Software, to make, have made, use, practice, sell, and + offer for sale, and/or otherwise dispose of the Original Software + (or portions thereof). + + (c) The licenses granted in Sections 2.1(a) and (b) are effective on + the date Initial Developer first distributes or otherwise makes the + Original Software available to a third party under the terms of this + License. + + (d) Notwithstanding Section 2.1(b) above, no patent license is + granted: (1) for code that You delete from the Original Software, or + (2) for infringements caused by: (i) the modification of the + Original Software, or (ii) the combination of the Original Software + with other software or devices. + + 2.2. Contributor Grant. + + Conditioned upon Your compliance with Section 3.1 below and subject + to third party intellectual property claims, each Contributor hereby + grants You a world-wide, royalty-free, non-exclusive license: + + (a) under intellectual property rights (other than patent or + trademark) Licensable by Contributor to use, reproduce, modify, + display, perform, sublicense and distribute the Modifications + created by such Contributor (or portions thereof), either on an + unmodified basis, with other Modifications, as Covered Software + and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, using, or selling + of Modifications made by that Contributor either alone and/or in + combination with its Contributor Version (or portions of such + combination), to make, use, sell, offer for sale, have made, and/or + otherwise dispose of: (1) Modifications made by that Contributor (or + portions thereof); and (2) the combination of Modifications made by + that Contributor with its Contributor Version (or portions of such + combination). + + (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective + on the date Contributor first distributes or otherwise makes the + Modifications available to a third party. + + (d) Notwithstanding Section 2.2(b) above, no patent license is + granted: (1) for any code that Contributor has deleted from the + Contributor Version; (2) for infringements caused by: (i) third + party modifications of Contributor Version, or (ii) the combination + of Modifications made by that Contributor with other software + (except as part of the Contributor Version) or other devices; or (3) + under Patent Claims infringed by Covered Software in the absence of + Modifications made by that Contributor. + +3. Distribution Obligations. + + 3.1. Availability of Source Code. + + Any Covered Software that You distribute or otherwise make available + in Executable form must also be made available in Source Code form + and that Source Code form must be distributed only under the terms + of this License. You must include a copy of this License with every + copy of the Source Code form of the Covered Software You distribute + or otherwise make available. You must inform recipients of any such + Covered Software in Executable form as to how they can obtain such + Covered Software in Source Code form in a reasonable manner on or + through a medium customarily used for software exchange. + + 3.2. Modifications. + + The Modifications that You create or to which You contribute are + governed by the terms of this License. You represent that You + believe Your Modifications are Your original creation(s) and/or You + have sufficient rights to grant the rights conveyed by this License. + + 3.3. Required Notices. + + You must include a notice in each of Your Modifications that + identifies You as the Contributor of the Modification. You may not + remove or alter any copyright, patent or trademark notices contained + within the Covered Software, or any notices of licensing or any + descriptive text giving attribution to any Contributor or the + Initial Developer. + + 3.4. Application of Additional Terms. + + You may not offer or impose any terms on any Covered Software in + Source Code form that alters or restricts the applicable version of + this License or the recipients' rights hereunder. You may choose to + offer, and to charge a fee for, warranty, support, indemnity or + liability obligations to one or more recipients of Covered Software. + However, you may do so only on Your own behalf, and not on behalf of + the Initial Developer or any Contributor. You must make it + absolutely clear that any such warranty, support, indemnity or + liability obligation is offered by You alone, and You hereby agree + to indemnify the Initial Developer and every Contributor for any + liability incurred by the Initial Developer or such Contributor as a + result of warranty, support, indemnity or liability terms You offer. + + 3.5. Distribution of Executable Versions. + + You may distribute the Executable form of the Covered Software under + the terms of this License or under the terms of a license of Your + choice, which may contain terms different from this License, + provided that You are in compliance with the terms of this License + and that the license for the Executable form does not attempt to + limit or alter the recipient's rights in the Source Code form from + the rights set forth in this License. If You distribute the Covered + Software in Executable form under a different license, You must make + it absolutely clear that any terms which differ from this License + are offered by You alone, not by the Initial Developer or + Contributor. You hereby agree to indemnify the Initial Developer and + every Contributor for any liability incurred by the Initial + Developer or such Contributor as a result of any such terms You offer. + + 3.6. Larger Works. + + You may create a Larger Work by combining Covered Software with + other code not governed by the terms of this License and distribute + the Larger Work as a single product. In such a case, You must make + sure the requirements of this License are fulfilled for the Covered + Software. + +4. Versions of the License. + + 4.1. New Versions. + + Oracle is the initial license steward and may publish revised and/or + new versions of this License from time to time. Each version will be + given a distinguishing version number. Except as provided in Section + 4.3, no one other than the license steward has the right to modify + this License. + + 4.2. Effect of New Versions. + + You may always continue to use, distribute or otherwise make the + Covered Software available under the terms of the version of the + License under which You originally received the Covered Software. If + the Initial Developer includes a notice in the Original Software + prohibiting it from being distributed or otherwise made available + under any subsequent version of the License, You must distribute and + make the Covered Software available under the terms of the version + of the License under which You originally received the Covered + Software. Otherwise, You may also choose to use, distribute or + otherwise make the Covered Software available under the terms of any + subsequent version of the License published by the license steward. + + 4.3. Modified Versions. + + When You are an Initial Developer and You want to create a new + license for Your Original Software, You may create and use a + modified version of this License if You: (a) rename the license and + remove any references to the name of the license steward (except to + note that the license differs from this License); and (b) otherwise + make it clear that the license contains terms which differ from this + License. + +5. DISCLAIMER OF WARRANTY. + + COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, + INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE + IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR + NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF + THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE + DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY + OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, + REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN + ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS + AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. + +6. TERMINATION. + + 6.1. This License and the rights granted hereunder will terminate + automatically if You fail to comply with terms herein and fail to + cure such breach within 30 days of becoming aware of the breach. + Provisions which, by their nature, must remain in effect beyond the + termination of this License shall survive. + + 6.2. If You assert a patent infringement claim (excluding + declaratory judgment actions) against Initial Developer or a + Contributor (the Initial Developer or Contributor against whom You + assert such claim is referred to as "Participant") alleging that the + Participant Software (meaning the Contributor Version where the + Participant is a Contributor or the Original Software where the + Participant is the Initial Developer) directly or indirectly + infringes any patent, then any and all rights granted directly or + indirectly to You by such Participant, the Initial Developer (if the + Initial Developer is not the Participant) and all Contributors under + Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice + from Participant terminate prospectively and automatically at the + expiration of such 60 day notice period, unless if within such 60 + day period You withdraw Your claim with respect to the Participant + Software against such Participant either unilaterally or pursuant to + a written agreement with Participant. + + 6.3. If You assert a patent infringement claim against Participant + alleging that the Participant Software directly or indirectly + infringes any patent where such claim is resolved (such as by + license or settlement) prior to the initiation of patent + infringement litigation, then the reasonable value of the licenses + granted by such Participant under Sections 2.1 or 2.2 shall be taken + into account in determining the amount or value of any payment or + license. + + 6.4. In the event of termination under Sections 6.1 or 6.2 above, + all end user licenses that have been validly granted by You or any + distributor hereunder prior to termination (excluding licenses + granted to You by any distributor) shall survive termination. + +7. LIMITATION OF LIABILITY. + + UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT + (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE + INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF + COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE + TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR + CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT + LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER + FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR + LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE + POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT + APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH + PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH + LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR + LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION + AND LIMITATION MAY NOT APPLY TO YOU. + +8. U.S. GOVERNMENT END USERS. + + The Covered Software is a "commercial item," as that term is defined + in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer + software" (as that term is defined at 48 C.F.R. § + 252.227-7014(a)(1)) and "commercial computer software documentation" + as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent + with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 + (June 1995), all U.S. Government End Users acquire Covered Software + with only those rights set forth herein. This U.S. Government Rights + clause is in lieu of, and supersedes, any other FAR, DFAR, or other + clause or provision that addresses Government rights in computer + software under this License. + +9. MISCELLANEOUS. + + This License represents the complete agreement concerning subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. This License shall be governed by + the law of the jurisdiction specified in a notice contained within + the Original Software (except to the extent applicable law, if any, + provides otherwise), excluding such jurisdiction's conflict-of-law + provisions. Any litigation relating to this License shall be subject + to the jurisdiction of the courts located in the jurisdiction and + venue specified in a notice contained within the Original Software, + with the losing party responsible for costs, including, without + limitation, court costs and reasonable attorneys' fees and expenses. + The application of the United Nations Convention on Contracts for + the International Sale of Goods is expressly excluded. Any law or + regulation which provides that the language of a contract shall be + construed against the drafter shall not apply to this License. You + agree that You alone are responsible for compliance with the United + States export administration regulations (and the export control + laws and regulation of any other countries) when You use, distribute + or otherwise make available any Covered Software. + +10. RESPONSIBILITY FOR CLAIMS. + + As between Initial Developer and the Contributors, each party is + responsible for claims and damages arising, directly or indirectly, + out of its utilization of rights under this License and You agree to + work with Initial Developer and Contributors to distribute such + responsibility on an equitable basis. Nothing herein is intended or + shall be deemed to constitute any admission of liability. + +------------------------------------------------------------------------ + +NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION +LICENSE (CDDL) + +The code released under the CDDL shall be governed by the laws of the +State of California (excluding conflict-of-law provisions). Any +litigation relating to this License shall be subject to the jurisdiction +of the Federal Courts of the Northern District of California and the +state courts of the State of California, with venue lying in Santa Clara +County, California. + + + + The GNU General Public License (GPL) Version 2, June 1991 + +Copyright (C) 1989, 1991 Free Software Foundation, Inc. +51 Franklin Street, Fifth Floor +Boston, MA 02110-1335 +USA + +Everyone is permitted to copy and distribute verbatim copies +of this license document, but changing it is not allowed. + +Preamble + +The licenses for most software are designed to take away your freedom to +share and change it. By contrast, the GNU General Public License is +intended to guarantee your freedom to share and change free software--to +make sure the software is free for all its users. This General Public +License applies to most of the Free Software Foundation's software and +to any other program whose authors commit to using it. (Some other Free +Software Foundation software is covered by the GNU Library General +Public License instead.) You can apply it to your programs, too. + +When we speak of free software, we are referring to freedom, not price. +Our General Public Licenses are designed to make sure that you have the +freedom to distribute copies of free software (and charge for this +service if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs; and that you know you can do these things. + +To protect your rights, we need to make restrictions that forbid anyone +to deny you these rights or to ask you to surrender the rights. These +restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + +For example, if you distribute copies of such a program, whether gratis +or for a fee, you must give the recipients all the rights that you have. +You must make sure that they, too, receive or can get the source code. +And you must show them these terms so they know their rights. + +We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + +Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + +Finally, any free program is threatened constantly by software patents. +We wish to avoid the danger that redistributors of a free program will +individually obtain patent licenses, in effect making the program +proprietary. To prevent this, we have made it clear that any patent must +be licensed for everyone's free use or not licensed at all. + +The precise terms and conditions for copying, distribution and +modification follow. + +TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + +0. This License applies to any program or other work which contains a +notice placed by the copyright holder saying it may be distributed under +the terms of this General Public License. The "Program", below, refers +to any such program or work, and a "work based on the Program" means +either the Program or any derivative work under copyright law: that is +to say, a work containing the Program or a portion of it, either +verbatim or with modifications and/or translated into another language. +(Hereinafter, translation is included without limitation in the term +"modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of running +the Program is not restricted, and the output from the Program is +covered only if its contents constitute a work based on the Program +(independent of having been made by running the Program). Whether that +is true depends on what the Program does. + +1. You may copy and distribute verbatim copies of the Program's source +code as you receive it, in any medium, provided that you conspicuously +and appropriately publish on each copy an appropriate copyright notice +and disclaimer of warranty; keep intact all the notices that refer to +this License and to the absence of any warranty; and give any other +recipients of the Program a copy of this License along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + +2. You may modify your copy or copies of the Program or any portion of +it, thus forming a work based on the Program, and copy and distribute +such modifications or work under the terms of Section 1 above, provided +that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any part + thereof, to be licensed as a whole at no charge to all third parties + under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a notice + that there is no warranty (or else, saying that you provide a + warranty) and that users may redistribute the program under these + conditions, and telling the user how to view a copy of this License. + (Exception: if the Program itself is interactive but does not + normally print such an announcement, your work based on the Program + is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, and +can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based on +the Program, the distribution of the whole must be on the terms of this +License, whose permissions for other licensees extend to the entire +whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of a +storage or distribution medium does not bring the other work under the +scope of this License. + +3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections 1 + and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your cost + of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer to + distribute corresponding source code. (This alternative is allowed + only for noncommercial distribution and only if you received the + program in object code or executable form with such an offer, in + accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source code +means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to control +compilation and installation of the executable. However, as a special +exception, the source code distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies the +executable. + +If distribution of executable or object code is made by offering access +to copy from a designated place, then offering equivalent access to copy +the source code from the same place counts as distribution of the source +code, even though third parties are not compelled to copy the source +along with the object code. + +4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt otherwise +to copy, modify, sublicense or distribute the Program is void, and will +automatically terminate your rights under this License. However, parties +who have received copies, or rights, from you under this License will +not have their licenses terminated so long as such parties remain in +full compliance. + +5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and all +its terms and conditions for copying, distributing or modifying the +Program or works based on it. + +6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further restrictions +on the recipients' exercise of the rights granted herein. You are not +responsible for enforcing compliance by third parties to this License. + +7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot distribute +so as to satisfy simultaneously your obligations under this License and +any other pertinent obligations, then as a consequence you may not +distribute the Program at all. For example, if a patent license would +not permit royalty-free redistribution of the Program by all those who +receive copies directly or indirectly through you, then the only way you +could satisfy both it and this License would be to refrain entirely from +distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is implemented +by public license practices. Many people have made generous +contributions to the wide range of software distributed through that +system in reliance on consistent application of that system; it is up to +the author/donor to decide if he or she is willing to distribute +software through any other system and a licensee cannot impose that choice. + +This section is intended to make thoroughly clear what is believed to be +a consequence of the rest of this License. + +8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License may +add an explicit geographical distribution limitation excluding those +countries, so that distribution is permitted only in or among countries +not thus excluded. In such case, this License incorporates the +limitation as if written in the body of this License. + +9. The Free Software Foundation may publish revised and/or new +versions of the General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Program does not specify a version +number of this License, you may choose any version ever published by the +Free Software Foundation. + +10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the +author to ask for permission. For software which is copyrighted by the +Free Software Foundation, write to the Free Software Foundation; we +sometimes make exceptions for this. Our decision will be guided by the +two goals of preserving the free status of all derivatives of our free +software and of promoting the sharing and reuse of software generally. + +NO WARRANTY + +11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, +EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE +ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH +YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL +NECESSARY SERVICING, REPAIR OR CORRECTION. + +12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR +DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL +DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM +(INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED +INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF +THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR +OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +END OF TERMS AND CONDITIONS + +How to Apply These Terms to Your New Programs + +If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + +To do so, attach the following notices to the program. It is safest to +attach them to the start of each source file to most effectively convey +the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + One line to give the program's name and a brief idea of what it does. + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type + `show w'. This is free software, and you are welcome to redistribute + it under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the +appropriate parts of the General Public License. Of course, the commands +you use may be called something other than `show w' and `show c'; they +could even be mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + program `Gnomovision' (which makes passes at compilers) written by + James Hacker. + + signature of Ty Coon, 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications +with the library. If this is what you want to do, use the GNU Library +General Public License instead of this License. + +# + +Certain source files distributed by Oracle America, Inc. and/or its +affiliates are subject to the following clarification and special +exception to the GPLv2, based on the GNU Project exception for its +Classpath libraries, known as the GNU Classpath Exception, but only +where Oracle has expressly included in the particular source file's +header the words "Oracle designates this particular file as subject to +the "Classpath" exception as provided by Oracle in the LICENSE file +that accompanied this code." + +You should also note that Oracle includes multiple, independent +programs in this software package. Some of those programs are provided +under licenses deemed incompatible with the GPLv2 by the Free Software +Foundation and others. For example, the package includes programs +licensed under the Apache License, Version 2.0. Such programs are +licensed to you under their original licenses. + +Oracle facilitates your further distribution of this package by adding +the Classpath Exception to the necessary parts of its GPLv2 code, which +permits you to use that code in combination with other independent +modules not licensed under the GPLv2. However, note that this would +not permit you to commingle code under an incompatible license with +Oracle's GPLv2 licensed code by, for example, cutting and pasting such +code into a file also containing Oracle's GPLv2 licensed code and then +distributing the result. Additionally, if you were to remove the +Classpath Exception from any of the files to which it applies and +distribute the result, you would likely be required to license some or +all of the other code in that distribution under the GPLv2 as well, and +since the GPLv2 is incompatible with the license terms of some items +included in the distribution by Oracle, removing the Classpath +Exception could therefore effectively compromise your ability to +further distribute the package. + +Proceed with caution and we recommend that you obtain the advice of a +lawyer skilled in open source matters before removing the Classpath +Exception or making modifications to this package which may +subsequently be redistributed and/or involve the use of third party +software. + +CLASSPATH EXCEPTION +Linking this library statically or dynamically with other modules is +making a combined work based on this library. Thus, the terms and +conditions of the GNU General Public License version 2 cover the whole +combination. + +As a special exception, the copyright holders of this library give you +permission to link this library with independent modules to produce an +executable, regardless of the license terms of these independent +modules, and to copy and distribute the resulting executable under +terms of your choice, provided that you also meet, for each linked +independent module, the terms and conditions of the license of that +module. An independent module is a module which is not derived from or +based on this library. If you modify this library, you may extend this +exception to your version of the library, but you are not obligated to +do so. If you do not wish to do so, delete this exception statement +from your version. + diff --git a/kafka/kafka_2.13-2.8.0/licenses/DWTFYWTPL b/kafka/kafka_2.13-2.8.0/licenses/DWTFYWTPL new file mode 100644 index 0000000..5a8e332 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/licenses/DWTFYWTPL @@ -0,0 +1,14 @@ + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + Version 2, December 2004 + + Copyright (C) 2004 Sam Hocevar + + Everyone is permitted to copy and distribute verbatim or modified + copies of this license document, and changing it is allowed as long + as the name is changed. + + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. You just DO WHAT THE FUCK YOU WANT TO. + diff --git a/kafka/kafka_2.13-2.8.0/licenses/argparse-MIT b/kafka/kafka_2.13-2.8.0/licenses/argparse-MIT new file mode 100644 index 0000000..773b0df --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/licenses/argparse-MIT @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2011-2017 Tatsuhiro Tsujikawa + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ diff --git a/kafka/kafka_2.13-2.8.0/licenses/eclipse-distribution-license-1.0 b/kafka/kafka_2.13-2.8.0/licenses/eclipse-distribution-license-1.0 new file mode 100644 index 0000000..5f06513 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/licenses/eclipse-distribution-license-1.0 @@ -0,0 +1,13 @@ +Eclipse Distribution License - v 1.0 + +Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors. + +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. +* Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/kafka/kafka_2.13-2.8.0/licenses/eclipse-public-license-2.0 b/kafka/kafka_2.13-2.8.0/licenses/eclipse-public-license-2.0 new file mode 100644 index 0000000..c9f1425 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/licenses/eclipse-public-license-2.0 @@ -0,0 +1,87 @@ +Eclipse Public License - v 2.0 + +THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE (“AGREEMENT”). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. +1. DEFINITIONS + +“Contribution” means: + + a) in the case of the initial Contributor, the initial content Distributed under this Agreement, and + b) in the case of each subsequent Contributor: + i) changes to the Program, and + ii) additions to the Program; + where such changes and/or additions to the Program originate from and are Distributed by that particular Contributor. A Contribution “originates” from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include changes or additions to the Program that are not Modified Works. + +“Contributor” means any person or entity that Distributes the Program. + +“Licensed Patents” mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program. + +“Program” means the Contributions Distributed in accordance with this Agreement. + +“Recipient” means anyone who receives the Program under this Agreement or any Secondary License (as applicable), including Contributors. + +“Derivative Works” shall mean any work, whether in Source Code or other form, that is based on (or derived from) the Program and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. + +“Modified Works” shall mean any work in Source Code or other form that results from an addition to, deletion from, or modification of the contents of the Program, including, for purposes of clarity any new file in Source Code form that contains any contents of the Program. Modified Works shall not include works that contain only declarations, interfaces, types, classes, structures, or files of the Program solely in each case in order to link to, bind by name, or subclass the Program or Modified Works thereof. + +“Distribute” means the acts of a) distributing or b) making available in any manner that enables the transfer of a copy. + +“Source Code” means the form of a Program preferred for making modifications, including but not limited to software source code, documentation source, and configuration files. + +“Secondary License” means either the GNU General Public License, Version 2.0, or any later versions of that license, including any exceptions or additional permissions as identified by the initial Contributor. +2. GRANT OF RIGHTS + + a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, Distribute and sublicense the Contribution of such Contributor, if any, and such Derivative Works. + b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in Source Code or other form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder. + c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to Distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program. + d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement. + e) Notwithstanding the terms of any Secondary License, no Contributor makes additional grants to any Recipient (other than those set forth in this Agreement) as a result of such Recipient's receipt of the Program under the terms of a Secondary License (if permitted under the terms of Section 3). + +3. REQUIREMENTS + +3.1 If a Contributor Distributes the Program in any form, then: + + a) the Program must also be made available as Source Code, in accordance with section 3.2, and the Contributor must accompany the Program with a statement that the Source Code for the Program is available under this Agreement, and informs Recipients how to obtain it in a reasonable manner on or through a medium customarily used for software exchange; and + b) the Contributor may Distribute the Program under a license different than this Agreement, provided that such license: + i) effectively disclaims on behalf of all other Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose; + ii) effectively excludes on behalf of all other Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits; + iii) does not attempt to limit or alter the recipients' rights in the Source Code under section 3.2; and + iv) requires any subsequent distribution of the Program by any party to be under a license that satisfies the requirements of this section 3. + +3.2 When the Program is Distributed as Source Code: + + a) it must be made available under this Agreement, or if the Program (i) is combined with other material in a separate file or files made available under a Secondary License, and (ii) the initial Contributor attached to the Source Code the notice described in Exhibit A of this Agreement, then the Program may be made available under the terms of such Secondary Licenses, and + b) a copy of this Agreement must be included with each copy of the Program. + +3.3 Contributors may not remove or alter any copyright, patent, trademark, attribution notices, disclaimers of warranty, or limitations of liability (‘notices’) contained within the Program from any copy of the Program which they Distribute, provided that Contributors may add their own appropriate notices. +4. COMMERCIAL DISTRIBUTION + +Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor (“Commercial Contributor”) hereby agrees to defend and indemnify every other Contributor (“Indemnified Contributor”) against any losses, damages and costs (collectively “Losses”) arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense. + +For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages. +5. NO WARRANTY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE PROGRAM IS PROVIDED ON AN “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement, including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations. +6. DISCLAIMER OF LIABILITY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT PERMITTED BY APPLICABLE LAW, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. +7. GENERAL + +If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. + +If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed. + +All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive. + +Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be Distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to Distribute the Program (including its Contributions) under the new version. + +Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved. Nothing in this Agreement is intended to be enforceable by any entity that is not a Contributor or Recipient. No third-party beneficiary rights are created under this Agreement. +Exhibit A – Form of Secondary Licenses Notice + +“This Source Code may also be made available under the following Secondary Licenses when the conditions for such availability set forth in the Eclipse Public License, v. 2.0 are satisfied: {name license(s), version(s), and exceptions or additional permissions here}.” + + Simply including a copy of this Agreement, including this Exhibit A is not sufficient to license the Source Code under Secondary Licenses. + + If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice. + + You may add additional accurate notices of copyright ownership. + diff --git a/kafka/kafka_2.13-2.8.0/licenses/jopt-simple-MIT b/kafka/kafka_2.13-2.8.0/licenses/jopt-simple-MIT new file mode 100644 index 0000000..54b2732 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/licenses/jopt-simple-MIT @@ -0,0 +1,24 @@ +/* + The MIT License + + Copyright (c) 2004-2016 Paul R. Holser, Jr. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ diff --git a/kafka/kafka_2.13-2.8.0/licenses/paranamer-BSD-3-clause b/kafka/kafka_2.13-2.8.0/licenses/paranamer-BSD-3-clause new file mode 100644 index 0000000..9eab879 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/licenses/paranamer-BSD-3-clause @@ -0,0 +1,29 @@ +[ ParaNamer used to be 'Pubic Domain', but since it includes a small piece of ASM it is now the same license as that: BSD ] + + Portions copyright (c) 2006-2018 Paul Hammant & ThoughtWorks Inc + Portions copyright (c) 2000-2007 INRIA, France Telecom + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + 3. Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + THE POSSIBILITY OF SUCH DAMAGE. diff --git a/kafka/kafka_2.13-2.8.0/licenses/slf4j-MIT b/kafka/kafka_2.13-2.8.0/licenses/slf4j-MIT new file mode 100644 index 0000000..315bd49 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/licenses/slf4j-MIT @@ -0,0 +1,24 @@ +Copyright (c) 2004-2017 QOS.ch +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + + diff --git a/kafka/kafka_2.13-2.8.0/licenses/zstd-jni-BSD-2-clause b/kafka/kafka_2.13-2.8.0/licenses/zstd-jni-BSD-2-clause new file mode 100644 index 0000000..66abb8a --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/licenses/zstd-jni-BSD-2-clause @@ -0,0 +1,26 @@ +Zstd-jni: JNI bindings to Zstd Library + +Copyright (c) 2015-present, Luben Karavelov/ All rights reserved. + +BSD License + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/kafka/kafka_2.13-2.8.0/logs/controller.log b/kafka/kafka_2.13-2.8.0/logs/controller.log new file mode 100644 index 0000000..64a2b81 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/logs/controller.log @@ -0,0 +1,138 @@ +[2021-05-10 04:16:22,996] DEBUG preRegister called. Server=com.sun.jmx.mbeanserver.JmxMBeanServer@5479e3f, name=log4j:logger=kafka.controller (kafka.controller) +[2021-05-10 04:16:55,254] INFO [ControllerEventThread controllerId=0] Starting (kafka.controller.ControllerEventManager$ControllerEventThread) +[2021-05-10 04:16:55,320] INFO [Controller id=0] 0 successfully elected as the controller. Epoch incremented to 1 and epoch zk version is now 1 (kafka.controller.KafkaController) +[2021-05-10 04:16:55,324] INFO [Controller id=0] Creating FeatureZNode at path: /feature with contents: FeatureZNode(Enabled,Features{}) (kafka.controller.KafkaController) +[2021-05-10 04:16:55,432] INFO [Controller id=0] Registering handlers (kafka.controller.KafkaController) +[2021-05-10 04:16:55,440] INFO [Controller id=0] Deleting log dir event notifications (kafka.controller.KafkaController) +[2021-05-10 04:16:55,449] INFO [Controller id=0] Deleting isr change notifications (kafka.controller.KafkaController) +[2021-05-10 04:16:55,451] INFO [Controller id=0] Initializing controller context (kafka.controller.KafkaController) +[2021-05-10 04:16:55,484] INFO [Controller id=0] Initialized broker epochs cache: HashMap(0 -> 25) (kafka.controller.KafkaController) +[2021-05-10 04:16:55,497] DEBUG [Controller id=0] Register BrokerModifications handler for Set(0) (kafka.controller.KafkaController) +[2021-05-10 04:16:55,516] DEBUG [Channel manager on controller 0]: Controller 0 trying to connect to broker 0 (kafka.controller.ControllerChannelManager) +[2021-05-10 04:16:55,596] INFO [Controller id=0] Currently active brokers in the cluster: Set(0) (kafka.controller.KafkaController) +[2021-05-10 04:16:55,597] INFO [Controller id=0] Currently shutting brokers in the cluster: HashSet() (kafka.controller.KafkaController) +[2021-05-10 04:16:55,603] INFO [RequestSendThread controllerId=0] Starting (kafka.controller.RequestSendThread) +[2021-05-10 04:16:55,612] INFO [Controller id=0] Current list of topics in the cluster: HashSet() (kafka.controller.KafkaController) +[2021-05-10 04:16:55,612] INFO [Controller id=0] Fetching topic deletions in progress (kafka.controller.KafkaController) +[2021-05-10 04:16:55,630] INFO [Controller id=0] List of topics to be deleted: (kafka.controller.KafkaController) +[2021-05-10 04:16:55,631] INFO [Controller id=0] List of topics ineligible for deletion: (kafka.controller.KafkaController) +[2021-05-10 04:16:55,631] INFO [Controller id=0] Initializing topic deletion manager (kafka.controller.KafkaController) +[2021-05-10 04:16:55,636] INFO [Topic Deletion Manager 0] Initializing manager with initial deletions: Set(), initial ineligible deletions: HashSet() (kafka.controller.TopicDeletionManager) +[2021-05-10 04:16:55,636] INFO [Controller id=0] Sending update metadata request (kafka.controller.KafkaController) +[2021-05-10 04:16:55,681] INFO [ReplicaStateMachine controllerId=0] Initializing replica state (kafka.controller.ZkReplicaStateMachine) +[2021-05-10 04:16:55,682] INFO [ReplicaStateMachine controllerId=0] Triggering online replica state changes (kafka.controller.ZkReplicaStateMachine) +[2021-05-10 04:16:55,684] INFO [ReplicaStateMachine controllerId=0] Triggering offline replica state changes (kafka.controller.ZkReplicaStateMachine) +[2021-05-10 04:16:55,684] DEBUG [ReplicaStateMachine controllerId=0] Started replica state machine with initial state -> HashMap() (kafka.controller.ZkReplicaStateMachine) +[2021-05-10 04:16:55,684] INFO [PartitionStateMachine controllerId=0] Initializing partition state (kafka.controller.ZkPartitionStateMachine) +[2021-05-10 04:16:55,685] INFO [PartitionStateMachine controllerId=0] Triggering online partition state changes (kafka.controller.ZkPartitionStateMachine) +[2021-05-10 04:16:55,694] INFO [RequestSendThread controllerId=0] Controller 0 connected to osboxes:9092 (id: 0 rack: null) for sending state change requests (kafka.controller.RequestSendThread) +[2021-05-10 04:16:55,707] DEBUG [PartitionStateMachine controllerId=0] Started partition state machine with initial state -> HashMap() (kafka.controller.ZkPartitionStateMachine) +[2021-05-10 04:16:55,707] INFO [Controller id=0] Ready to serve as the new controller with epoch 1 (kafka.controller.KafkaController) +[2021-05-10 04:16:55,722] INFO [Controller id=0] Partitions undergoing preferred replica election: (kafka.controller.KafkaController) +[2021-05-10 04:16:55,723] INFO [Controller id=0] Partitions that completed preferred replica election: (kafka.controller.KafkaController) +[2021-05-10 04:16:55,723] INFO [Controller id=0] Skipping preferred replica election for partitions due to topic deletion: (kafka.controller.KafkaController) +[2021-05-10 04:16:55,723] INFO [Controller id=0] Resuming preferred replica election for partitions: (kafka.controller.KafkaController) +[2021-05-10 04:16:55,724] INFO [Controller id=0] Starting replica leader election (PREFERRED) for partitions triggered by ZkTriggered (kafka.controller.KafkaController) +[2021-05-10 04:16:55,789] INFO [Controller id=0] Starting the controller scheduler (kafka.controller.KafkaController) +[2021-05-10 04:17:00,791] INFO [Controller id=0] Processing automatic preferred replica leader election (kafka.controller.KafkaController) +[2021-05-10 04:17:00,791] TRACE [Controller id=0] Checking need to trigger auto leader balancing (kafka.controller.KafkaController) +[2021-05-10 04:18:51,276] INFO [Controller id=0] New topics: [Set(my-topic)], deleted topics: [HashSet()], new partition replica assignment [Set(TopicIdReplicaAssignment(my-topic,Some(i1eUlnOpRKmfLlxxBdsCjQ),Map(my-topic-0 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=))))] (kafka.controller.KafkaController) +[2021-05-10 04:18:51,284] INFO [Controller id=0] New partition creation callback for my-topic-0 (kafka.controller.KafkaController) +[2021-05-10 04:22:00,801] INFO [Controller id=0] Processing automatic preferred replica leader election (kafka.controller.KafkaController) +[2021-05-10 04:22:00,802] TRACE [Controller id=0] Checking need to trigger auto leader balancing (kafka.controller.KafkaController) +[2021-05-10 04:22:00,803] DEBUG [Controller id=0] Topics not in preferred replica for broker 0 Map() (kafka.controller.KafkaController) +[2021-05-10 04:22:00,803] TRACE [Controller id=0] Leader imbalance ratio for broker 0 is 0.0 (kafka.controller.KafkaController) +[2021-05-10 04:22:18,012] INFO [Controller id=0] New topics: [Set(json-topic)], deleted topics: [HashSet()], new partition replica assignment [Set(TopicIdReplicaAssignment(json-topic,Some(bMFOKtaKTD283FnN_rOQBQ),Map(json-topic-0 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=))))] (kafka.controller.KafkaController) +[2021-05-10 04:22:18,012] INFO [Controller id=0] New partition creation callback for json-topic-0 (kafka.controller.KafkaController) +[2021-05-10 04:27:00,804] INFO [Controller id=0] Processing automatic preferred replica leader election (kafka.controller.KafkaController) +[2021-05-10 04:27:00,804] TRACE [Controller id=0] Checking need to trigger auto leader balancing (kafka.controller.KafkaController) +[2021-05-10 04:27:00,804] DEBUG [Controller id=0] Topics not in preferred replica for broker 0 Map() (kafka.controller.KafkaController) +[2021-05-10 04:27:00,804] TRACE [Controller id=0] Leader imbalance ratio for broker 0 is 0.0 (kafka.controller.KafkaController) +[2021-05-10 04:28:57,477] INFO [Controller id=0] New topics: [Set(__consumer_offsets)], deleted topics: [HashSet()], new partition replica assignment [Set(TopicIdReplicaAssignment(__consumer_offsets,Some(G84NQKQ7SaqyyQcAbMweoA),HashMap(__consumer_offsets-22 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-30 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-25 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-35 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-37 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-38 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-13 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-8 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-21 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-4 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-27 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-7 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-9 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-46 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-41 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-33 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-23 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-49 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-47 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-16 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-28 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-31 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-36 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-42 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-3 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-18 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-15 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-24 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-17 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-48 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-19 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-11 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-2 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-43 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-6 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-14 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-20 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-0 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-44 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-39 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-12 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-45 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-1 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-5 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-26 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-29 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-34 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-10 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-32 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-40 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=))))] (kafka.controller.KafkaController) +[2021-05-10 04:28:57,477] INFO [Controller id=0] New partition creation callback for __consumer_offsets-22,__consumer_offsets-30,__consumer_offsets-25,__consumer_offsets-35,__consumer_offsets-37,__consumer_offsets-38,__consumer_offsets-13,__consumer_offsets-8,__consumer_offsets-21,__consumer_offsets-4,__consumer_offsets-27,__consumer_offsets-7,__consumer_offsets-9,__consumer_offsets-46,__consumer_offsets-41,__consumer_offsets-33,__consumer_offsets-23,__consumer_offsets-49,__consumer_offsets-47,__consumer_offsets-16,__consumer_offsets-28,__consumer_offsets-31,__consumer_offsets-36,__consumer_offsets-42,__consumer_offsets-3,__consumer_offsets-18,__consumer_offsets-15,__consumer_offsets-24,__consumer_offsets-17,__consumer_offsets-48,__consumer_offsets-19,__consumer_offsets-11,__consumer_offsets-2,__consumer_offsets-43,__consumer_offsets-6,__consumer_offsets-14,__consumer_offsets-20,__consumer_offsets-0,__consumer_offsets-44,__consumer_offsets-39,__consumer_offsets-12,__consumer_offsets-45,__consumer_offsets-1,__consumer_offsets-5,__consumer_offsets-26,__consumer_offsets-29,__consumer_offsets-34,__consumer_offsets-10,__consumer_offsets-32,__consumer_offsets-40 (kafka.controller.KafkaController) +[2021-05-10 04:32:00,804] INFO [Controller id=0] Processing automatic preferred replica leader election (kafka.controller.KafkaController) +[2021-05-10 04:32:00,805] TRACE [Controller id=0] Checking need to trigger auto leader balancing (kafka.controller.KafkaController) +[2021-05-10 04:32:00,808] DEBUG [Controller id=0] Topics not in preferred replica for broker 0 HashMap() (kafka.controller.KafkaController) +[2021-05-10 04:32:00,808] TRACE [Controller id=0] Leader imbalance ratio for broker 0 is 0.0 (kafka.controller.KafkaController) +[2021-05-10 04:37:00,809] INFO [Controller id=0] Processing automatic preferred replica leader election (kafka.controller.KafkaController) +[2021-05-10 04:37:00,810] TRACE [Controller id=0] Checking need to trigger auto leader balancing (kafka.controller.KafkaController) +[2021-05-10 04:37:00,815] DEBUG [Controller id=0] Topics not in preferred replica for broker 0 HashMap() (kafka.controller.KafkaController) +[2021-05-10 04:37:00,815] TRACE [Controller id=0] Leader imbalance ratio for broker 0 is 0.0 (kafka.controller.KafkaController) +[2021-05-10 04:42:00,816] INFO [Controller id=0] Processing automatic preferred replica leader election (kafka.controller.KafkaController) +[2021-05-10 04:42:00,816] TRACE [Controller id=0] Checking need to trigger auto leader balancing (kafka.controller.KafkaController) +[2021-05-10 04:42:00,817] DEBUG [Controller id=0] Topics not in preferred replica for broker 0 HashMap() (kafka.controller.KafkaController) +[2021-05-10 04:42:00,817] TRACE [Controller id=0] Leader imbalance ratio for broker 0 is 0.0 (kafka.controller.KafkaController) +[2021-05-10 04:47:00,823] INFO [Controller id=0] Processing automatic preferred replica leader election (kafka.controller.KafkaController) +[2021-05-10 04:47:00,823] TRACE [Controller id=0] Checking need to trigger auto leader balancing (kafka.controller.KafkaController) +[2021-05-10 04:47:00,824] DEBUG [Controller id=0] Topics not in preferred replica for broker 0 HashMap() (kafka.controller.KafkaController) +[2021-05-10 04:47:00,824] TRACE [Controller id=0] Leader imbalance ratio for broker 0 is 0.0 (kafka.controller.KafkaController) +[2021-05-10 04:47:24,083] DEBUG preRegister called. Server=com.sun.jmx.mbeanserver.JmxMBeanServer@5479e3f, name=log4j:logger=kafka.controller (kafka.controller) +[2021-05-10 04:47:24,434] INFO [Controller id=0] Shutting down broker 0 (kafka.controller.KafkaController) +[2021-05-10 04:47:24,434] DEBUG [Controller id=0] All shutting down brokers: 0 (kafka.controller.KafkaController) +[2021-05-10 04:47:24,434] DEBUG [Controller id=0] Live brokers: (kafka.controller.KafkaController) +[2021-05-10 04:47:24,437] TRACE [Controller id=0] All leaders = __consumer_offsets-13 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-46 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-9 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-42 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-21 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-17 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-30 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-26 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-5 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-38 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),my-topic-0 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-1 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-34 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-16 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-45 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-12 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-41 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-24 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-20 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-49 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-0 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-29 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-25 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-8 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-37 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-4 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-33 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-15 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-48 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-11 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-44 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-23 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-19 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-32 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-28 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-7 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-40 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-3 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),json-topic-0 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-36 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-47 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-14 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-43 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-10 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-22 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-18 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-31 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-27 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-39 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-6 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-35 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-2 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) (kafka.controller.KafkaController) +[2021-05-10 04:47:26,237] INFO [ControllerEventThread controllerId=0] Shutting down (kafka.controller.ControllerEventManager$ControllerEventThread) +[2021-05-10 04:47:26,237] INFO [ControllerEventThread controllerId=0] Shutdown completed (kafka.controller.ControllerEventManager$ControllerEventThread) +[2021-05-10 04:47:26,237] INFO [ControllerEventThread controllerId=0] Stopped (kafka.controller.ControllerEventManager$ControllerEventThread) +[2021-05-10 04:47:26,238] DEBUG [Controller id=0] Resigning (kafka.controller.KafkaController) +[2021-05-10 04:47:26,238] DEBUG [Controller id=0] Unregister BrokerModifications handler for Set(0) (kafka.controller.KafkaController) +[2021-05-10 04:47:26,239] INFO [PartitionStateMachine controllerId=0] Stopped partition state machine (kafka.controller.ZkPartitionStateMachine) +[2021-05-10 04:47:26,240] INFO [ReplicaStateMachine controllerId=0] Stopped replica state machine (kafka.controller.ZkReplicaStateMachine) +[2021-05-10 04:47:26,240] INFO [RequestSendThread controllerId=0] Shutting down (kafka.controller.RequestSendThread) +[2021-05-10 04:47:26,240] INFO [RequestSendThread controllerId=0] Shutdown completed (kafka.controller.RequestSendThread) +[2021-05-10 04:47:26,240] INFO [RequestSendThread controllerId=0] Stopped (kafka.controller.RequestSendThread) +[2021-05-10 04:47:26,245] INFO [Controller id=0] Resigned (kafka.controller.KafkaController) +[2021-05-10 04:47:38,716] DEBUG preRegister called. Server=com.sun.jmx.mbeanserver.JmxMBeanServer@5479e3f, name=log4j:logger=kafka.controller (kafka.controller) +[2021-05-10 04:47:41,693] INFO [ControllerEventThread controllerId=0] Starting (kafka.controller.ControllerEventManager$ControllerEventThread) +[2021-05-10 04:47:41,736] INFO [Controller id=0] 0 successfully elected as the controller. Epoch incremented to 2 and epoch zk version is now 2 (kafka.controller.KafkaController) +[2021-05-10 04:47:41,739] INFO [Controller id=0] Registering handlers (kafka.controller.KafkaController) +[2021-05-10 04:47:41,744] INFO [Controller id=0] Deleting log dir event notifications (kafka.controller.KafkaController) +[2021-05-10 04:47:41,746] INFO [Controller id=0] Deleting isr change notifications (kafka.controller.KafkaController) +[2021-05-10 04:47:41,751] INFO [Controller id=0] Initializing controller context (kafka.controller.KafkaController) +[2021-05-10 04:47:41,784] INFO [Controller id=0] Initialized broker epochs cache: HashMap(0 -> 163) (kafka.controller.KafkaController) +[2021-05-10 04:47:41,815] DEBUG [Controller id=0] Register BrokerModifications handler for Set(0) (kafka.controller.KafkaController) +[2021-05-10 04:47:41,908] DEBUG [Channel manager on controller 0]: Controller 0 trying to connect to broker 0 (kafka.controller.ControllerChannelManager) +[2021-05-10 04:47:41,990] INFO [Controller id=0] Currently active brokers in the cluster: Set(0) (kafka.controller.KafkaController) +[2021-05-10 04:47:41,991] INFO [Controller id=0] Currently shutting brokers in the cluster: HashSet() (kafka.controller.KafkaController) +[2021-05-10 04:47:41,991] INFO [Controller id=0] Current list of topics in the cluster: HashSet(my-topic, json-topic, __consumer_offsets) (kafka.controller.KafkaController) +[2021-05-10 04:47:41,991] INFO [Controller id=0] Fetching topic deletions in progress (kafka.controller.KafkaController) +[2021-05-10 04:47:41,991] INFO [RequestSendThread controllerId=0] Starting (kafka.controller.RequestSendThread) +[2021-05-10 04:47:42,002] INFO [Controller id=0] List of topics to be deleted: (kafka.controller.KafkaController) +[2021-05-10 04:47:42,006] INFO [Controller id=0] List of topics ineligible for deletion: (kafka.controller.KafkaController) +[2021-05-10 04:47:42,006] INFO [Controller id=0] Initializing topic deletion manager (kafka.controller.KafkaController) +[2021-05-10 04:47:42,007] INFO [Topic Deletion Manager 0] Initializing manager with initial deletions: Set(), initial ineligible deletions: HashSet() (kafka.controller.TopicDeletionManager) +[2021-05-10 04:47:42,027] INFO [Controller id=0] Sending update metadata request (kafka.controller.KafkaController) +[2021-05-10 04:47:42,076] INFO [ReplicaStateMachine controllerId=0] Initializing replica state (kafka.controller.ZkReplicaStateMachine) +[2021-05-10 04:47:42,084] INFO [RequestSendThread controllerId=0] Controller 0 connected to osboxes:9092 (id: 0 rack: null) for sending state change requests (kafka.controller.RequestSendThread) +[2021-05-10 04:47:42,088] INFO [ReplicaStateMachine controllerId=0] Triggering online replica state changes (kafka.controller.ZkReplicaStateMachine) +[2021-05-10 04:47:42,158] INFO [ReplicaStateMachine controllerId=0] Triggering offline replica state changes (kafka.controller.ZkReplicaStateMachine) +[2021-05-10 04:47:42,159] DEBUG [ReplicaStateMachine controllerId=0] Started replica state machine with initial state -> HashMap([Topic=__consumer_offsets,Partition=40,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=27,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=49,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=47,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=3,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=18,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=44,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=8,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=34,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=25,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=14,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=24,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=36,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=42,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=45,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=11,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=32,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=12,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=30,Replica=0] -> OnlineReplica, [Topic=json-topic,Partition=0,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=9,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=39,Replica=0] -> OnlineReplica, [Topic=my-topic,Partition=0,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=38,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=23,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=19,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=17,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=41,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=37,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=48,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=29,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=10,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=46,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=1,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=16,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=5,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=15,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=4,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=6,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=7,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=43,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=0,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=20,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=31,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=28,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=26,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=2,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=33,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=22,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=21,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=13,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=35,Replica=0] -> OnlineReplica) (kafka.controller.ZkReplicaStateMachine) +[2021-05-10 04:47:42,160] INFO [PartitionStateMachine controllerId=0] Initializing partition state (kafka.controller.ZkPartitionStateMachine) +[2021-05-10 04:47:42,171] INFO [PartitionStateMachine controllerId=0] Triggering online partition state changes (kafka.controller.ZkPartitionStateMachine) +[2021-05-10 04:47:42,176] DEBUG [PartitionStateMachine controllerId=0] Started partition state machine with initial state -> HashMap(__consumer_offsets-13 -> OnlinePartition, __consumer_offsets-46 -> OnlinePartition, __consumer_offsets-9 -> OnlinePartition, __consumer_offsets-42 -> OnlinePartition, __consumer_offsets-21 -> OnlinePartition, __consumer_offsets-17 -> OnlinePartition, __consumer_offsets-30 -> OnlinePartition, __consumer_offsets-26 -> OnlinePartition, __consumer_offsets-5 -> OnlinePartition, __consumer_offsets-38 -> OnlinePartition, my-topic-0 -> OnlinePartition, __consumer_offsets-1 -> OnlinePartition, __consumer_offsets-34 -> OnlinePartition, __consumer_offsets-16 -> OnlinePartition, __consumer_offsets-45 -> OnlinePartition, __consumer_offsets-12 -> OnlinePartition, __consumer_offsets-41 -> OnlinePartition, __consumer_offsets-24 -> OnlinePartition, __consumer_offsets-20 -> OnlinePartition, __consumer_offsets-49 -> OnlinePartition, __consumer_offsets-0 -> OnlinePartition, __consumer_offsets-29 -> OnlinePartition, __consumer_offsets-25 -> OnlinePartition, __consumer_offsets-8 -> OnlinePartition, __consumer_offsets-37 -> OnlinePartition, __consumer_offsets-4 -> OnlinePartition, __consumer_offsets-33 -> OnlinePartition, __consumer_offsets-15 -> OnlinePartition, __consumer_offsets-48 -> OnlinePartition, __consumer_offsets-11 -> OnlinePartition, __consumer_offsets-44 -> OnlinePartition, __consumer_offsets-23 -> OnlinePartition, __consumer_offsets-19 -> OnlinePartition, __consumer_offsets-32 -> OnlinePartition, __consumer_offsets-28 -> OnlinePartition, __consumer_offsets-7 -> OnlinePartition, __consumer_offsets-40 -> OnlinePartition, __consumer_offsets-3 -> OnlinePartition, json-topic-0 -> OnlinePartition, __consumer_offsets-36 -> OnlinePartition, __consumer_offsets-47 -> OnlinePartition, __consumer_offsets-14 -> OnlinePartition, __consumer_offsets-43 -> OnlinePartition, __consumer_offsets-10 -> OnlinePartition, __consumer_offsets-22 -> OnlinePartition, __consumer_offsets-18 -> OnlinePartition, __consumer_offsets-31 -> OnlinePartition, __consumer_offsets-27 -> OnlinePartition, __consumer_offsets-39 -> OnlinePartition, __consumer_offsets-6 -> OnlinePartition, __consumer_offsets-35 -> OnlinePartition, __consumer_offsets-2 -> OnlinePartition) (kafka.controller.ZkPartitionStateMachine) +[2021-05-10 04:47:42,176] INFO [Controller id=0] Ready to serve as the new controller with epoch 2 (kafka.controller.KafkaController) +[2021-05-10 04:47:42,182] INFO [Controller id=0] Partitions undergoing preferred replica election: (kafka.controller.KafkaController) +[2021-05-10 04:47:42,182] INFO [Controller id=0] Partitions that completed preferred replica election: (kafka.controller.KafkaController) +[2021-05-10 04:47:42,182] INFO [Controller id=0] Skipping preferred replica election for partitions due to topic deletion: (kafka.controller.KafkaController) +[2021-05-10 04:47:42,182] INFO [Controller id=0] Resuming preferred replica election for partitions: (kafka.controller.KafkaController) +[2021-05-10 04:47:42,191] INFO [Controller id=0] Starting replica leader election (PREFERRED) for partitions triggered by ZkTriggered (kafka.controller.KafkaController) +[2021-05-10 04:47:42,211] INFO [Controller id=0] Starting the controller scheduler (kafka.controller.KafkaController) +[2021-05-10 04:47:47,212] INFO [Controller id=0] Processing automatic preferred replica leader election (kafka.controller.KafkaController) +[2021-05-10 04:47:47,213] TRACE [Controller id=0] Checking need to trigger auto leader balancing (kafka.controller.KafkaController) +[2021-05-10 04:47:47,216] DEBUG [Controller id=0] Topics not in preferred replica for broker 0 HashMap() (kafka.controller.KafkaController) +[2021-05-10 04:47:47,217] TRACE [Controller id=0] Leader imbalance ratio for broker 0 is 0.0 (kafka.controller.KafkaController) +[2021-05-10 04:48:06,610] INFO [Controller id=0] Shutting down broker 0 (kafka.controller.KafkaController) +[2021-05-10 04:48:06,611] DEBUG [Controller id=0] All shutting down brokers: 0 (kafka.controller.KafkaController) +[2021-05-10 04:48:06,611] DEBUG [Controller id=0] Live brokers: (kafka.controller.KafkaController) +[2021-05-10 04:48:06,614] TRACE [Controller id=0] All leaders = __consumer_offsets-13 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-46 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-9 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-42 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-21 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-17 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-30 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-26 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-5 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-38 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),my-topic-0 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-1 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-34 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-16 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-45 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-12 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-41 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-24 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-20 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-49 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-0 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-29 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-25 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-8 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-37 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-4 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-33 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-15 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-48 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-11 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-44 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-23 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-19 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-32 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-28 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-7 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-40 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-3 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),json-topic-0 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-36 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-47 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-14 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-43 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-10 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-22 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-18 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-31 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-27 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-39 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-6 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-35 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1),__consumer_offsets-2 -> (Leader:0,ISR:0,LeaderEpoch:0,ControllerEpoch:1) (kafka.controller.KafkaController) +[2021-05-10 04:48:07,851] INFO [ControllerEventThread controllerId=0] Shutting down (kafka.controller.ControllerEventManager$ControllerEventThread) +[2021-05-10 04:48:07,851] INFO [ControllerEventThread controllerId=0] Stopped (kafka.controller.ControllerEventManager$ControllerEventThread) +[2021-05-10 04:48:07,852] INFO [ControllerEventThread controllerId=0] Shutdown completed (kafka.controller.ControllerEventManager$ControllerEventThread) +[2021-05-10 04:48:07,853] DEBUG [Controller id=0] Resigning (kafka.controller.KafkaController) +[2021-05-10 04:48:07,853] DEBUG [Controller id=0] Unregister BrokerModifications handler for Set(0) (kafka.controller.KafkaController) +[2021-05-10 04:48:07,854] INFO [PartitionStateMachine controllerId=0] Stopped partition state machine (kafka.controller.ZkPartitionStateMachine) +[2021-05-10 04:48:07,855] INFO [ReplicaStateMachine controllerId=0] Stopped replica state machine (kafka.controller.ZkReplicaStateMachine) +[2021-05-10 04:48:07,856] INFO [RequestSendThread controllerId=0] Shutting down (kafka.controller.RequestSendThread) +[2021-05-10 04:48:07,857] INFO [RequestSendThread controllerId=0] Shutdown completed (kafka.controller.RequestSendThread) +[2021-05-10 04:48:07,856] INFO [RequestSendThread controllerId=0] Stopped (kafka.controller.RequestSendThread) +[2021-05-10 04:48:07,858] INFO [Controller id=0] Resigned (kafka.controller.KafkaController) diff --git a/kafka/kafka_2.13-2.8.0/logs/kafka-authorizer.log b/kafka/kafka_2.13-2.8.0/logs/kafka-authorizer.log new file mode 100644 index 0000000..e69de29 diff --git a/kafka/kafka_2.13-2.8.0/logs/kafka-request.log b/kafka/kafka_2.13-2.8.0/logs/kafka-request.log new file mode 100644 index 0000000..e69de29 diff --git a/kafka/kafka_2.13-2.8.0/logs/kafkaServer-gc.log b/kafka/kafka_2.13-2.8.0/logs/kafkaServer-gc.log new file mode 100644 index 0000000..aa9b58b --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/logs/kafkaServer-gc.log @@ -0,0 +1,111 @@ +[2021-05-10T04:47:34.087-0400][gc,heap] Heap region size: 1M +[2021-05-10T04:47:34.092-0400][gc ] Using G1 +[2021-05-10T04:47:34.092-0400][gc,heap,coops] Heap address: 0x00000000c0000000, size: 1024 MB, Compressed Oops mode: 32-bit +[2021-05-10T04:47:34.092-0400][gc,cds ] Mark closed archive regions in map: [0x00000000fff00000, 0x00000000fff69ff8] +[2021-05-10T04:47:34.092-0400][gc,cds ] Mark open archive regions in map: [0x00000000ffe00000, 0x00000000ffe33ff8] +[2021-05-10T04:47:39.644-0400][gc,start ] GC(0) Pause Young (Normal) (GCLocker Initiated GC) +[2021-05-10T04:47:39.644-0400][gc,task ] GC(0) Using 2 workers of 2 for evacuation +[2021-05-10T04:47:39.652-0400][gc,phases ] GC(0) Pre Evacuate Collection Set: 0.0ms +[2021-05-10T04:47:39.652-0400][gc,phases ] GC(0) Evacuate Collection Set: 7.8ms +[2021-05-10T04:47:39.652-0400][gc,phases ] GC(0) Post Evacuate Collection Set: 0.4ms +[2021-05-10T04:47:39.652-0400][gc,phases ] GC(0) Other: 0.1ms +[2021-05-10T04:47:39.652-0400][gc,heap ] GC(0) Eden regions: 51->0(44) +[2021-05-10T04:47:39.652-0400][gc,heap ] GC(0) Survivor regions: 0->7(7) +[2021-05-10T04:47:39.652-0400][gc,heap ] GC(0) Old regions: 2->2 +[2021-05-10T04:47:39.652-0400][gc,heap ] GC(0) Humongous regions: 0->0 +[2021-05-10T04:47:39.652-0400][gc,metaspace ] GC(0) Metaspace: 15569K->15569K(1062912K) +[2021-05-10T04:47:39.653-0400][gc ] GC(0) Pause Young (Normal) (GCLocker Initiated GC) 51M->7M(1024M) 8.452ms +[2021-05-10T04:47:39.653-0400][gc,cpu ] GC(0) User=0.02s Sys=0.00s Real=0.01s +[2021-05-10T04:47:39.969-0400][gc,start ] GC(1) Pause Young (Concurrent Start) (Metadata GC Threshold) +[2021-05-10T04:47:39.969-0400][gc,task ] GC(1) Using 2 workers of 2 for evacuation +[2021-05-10T04:47:39.985-0400][gc,phases ] GC(1) Pre Evacuate Collection Set: 0.1ms +[2021-05-10T04:47:39.985-0400][gc,phases ] GC(1) Evacuate Collection Set: 14.8ms +[2021-05-10T04:47:39.985-0400][gc,phases ] GC(1) Post Evacuate Collection Set: 0.3ms +[2021-05-10T04:47:39.985-0400][gc,phases ] GC(1) Other: 0.2ms +[2021-05-10T04:47:39.985-0400][gc,heap ] GC(1) Eden regions: 19->0(50) +[2021-05-10T04:47:39.985-0400][gc,heap ] GC(1) Survivor regions: 7->1(7) +[2021-05-10T04:47:39.985-0400][gc,heap ] GC(1) Old regions: 2->9 +[2021-05-10T04:47:39.985-0400][gc,heap ] GC(1) Humongous regions: 0->0 +[2021-05-10T04:47:39.985-0400][gc,metaspace ] GC(1) Metaspace: 20315K->20315K(1067008K) +[2021-05-10T04:47:39.985-0400][gc ] GC(1) Pause Young (Concurrent Start) (Metadata GC Threshold) 26M->7M(1024M) 15.458ms +[2021-05-10T04:47:39.985-0400][gc,cpu ] GC(1) User=0.01s Sys=0.00s Real=0.02s +[2021-05-10T04:47:39.985-0400][gc ] GC(2) Concurrent Cycle +[2021-05-10T04:47:39.985-0400][gc,marking ] GC(2) Concurrent Clear Claimed Marks +[2021-05-10T04:47:39.985-0400][gc,marking ] GC(2) Concurrent Clear Claimed Marks 0.037ms +[2021-05-10T04:47:39.985-0400][gc,marking ] GC(2) Concurrent Scan Root Regions +[2021-05-10T04:47:39.985-0400][gc,marking ] GC(2) Concurrent Scan Root Regions 0.534ms +[2021-05-10T04:47:39.985-0400][gc,marking ] GC(2) Concurrent Mark (5.900s) +[2021-05-10T04:47:39.985-0400][gc,marking ] GC(2) Concurrent Mark From Roots +[2021-05-10T04:47:39.985-0400][gc,task ] GC(2) Using 1 workers of 1 for marking +[2021-05-10T04:47:40.014-0400][gc,marking ] GC(2) Concurrent Mark From Roots 28.335ms +[2021-05-10T04:47:40.014-0400][gc,marking ] GC(2) Concurrent Preclean +[2021-05-10T04:47:40.014-0400][gc,marking ] GC(2) Concurrent Preclean 0.039ms +[2021-05-10T04:47:40.014-0400][gc,marking ] GC(2) Concurrent Mark (5.900s, 5.929s) 28.407ms +[2021-05-10T04:47:40.015-0400][gc,start ] GC(2) Pause Remark +[2021-05-10T04:47:40.016-0400][gc,stringtable] GC(2) Cleaned string and symbol table, strings: 5109 processed, 0 removed, symbols: 50600 processed, 42 removed +[2021-05-10T04:47:40.017-0400][gc ] GC(2) Pause Remark 10M->10M(1024M) 1.912ms +[2021-05-10T04:47:40.017-0400][gc,cpu ] GC(2) User=0.01s Sys=0.00s Real=0.00s +[2021-05-10T04:47:40.017-0400][gc,marking ] GC(2) Concurrent Rebuild Remembered Sets +[2021-05-10T04:47:40.027-0400][gc,marking ] GC(2) Concurrent Rebuild Remembered Sets 10.778ms +[2021-05-10T04:47:40.031-0400][gc,start ] GC(2) Pause Cleanup +[2021-05-10T04:47:40.031-0400][gc ] GC(2) Pause Cleanup 10M->10M(1024M) 0.168ms +[2021-05-10T04:47:40.031-0400][gc,cpu ] GC(2) User=0.01s Sys=0.00s Real=0.00s +[2021-05-10T04:47:40.031-0400][gc,marking ] GC(2) Concurrent Cleanup for Next Mark +[2021-05-10T04:47:40.037-0400][gc,marking ] GC(2) Concurrent Cleanup for Next Mark 6.409ms +[2021-05-10T04:47:40.037-0400][gc ] GC(2) Concurrent Cycle 52.595ms +[2021-05-10T04:47:41.317-0400][gc,start ] GC(3) Pause Young (Normal) (G1 Evacuation Pause) +[2021-05-10T04:47:41.317-0400][gc,task ] GC(3) Using 2 workers of 2 for evacuation +[2021-05-10T04:47:41.328-0400][gc,phases ] GC(3) Pre Evacuate Collection Set: 0.1ms +[2021-05-10T04:47:41.328-0400][gc,phases ] GC(3) Evacuate Collection Set: 10.2ms +[2021-05-10T04:47:41.328-0400][gc,phases ] GC(3) Post Evacuate Collection Set: 0.4ms +[2021-05-10T04:47:41.328-0400][gc,phases ] GC(3) Other: 0.4ms +[2021-05-10T04:47:41.328-0400][gc,heap ] GC(3) Eden regions: 50->0(46) +[2021-05-10T04:47:41.328-0400][gc,heap ] GC(3) Survivor regions: 1->5(7) +[2021-05-10T04:47:41.328-0400][gc,heap ] GC(3) Old regions: 9->9 +[2021-05-10T04:47:41.328-0400][gc,heap ] GC(3) Humongous regions: 129->129 +[2021-05-10T04:47:41.328-0400][gc,metaspace ] GC(3) Metaspace: 28893K->28893K(1075200K) +[2021-05-10T04:47:41.328-0400][gc ] GC(3) Pause Young (Normal) (G1 Evacuation Pause) 186M->140M(1024M) 11.127ms +[2021-05-10T04:47:41.328-0400][gc,cpu ] GC(3) User=0.00s Sys=0.00s Real=0.01s +[2021-05-10T04:47:41.957-0400][gc,start ] GC(4) Pause Young (Concurrent Start) (Metadata GC Threshold) +[2021-05-10T04:47:41.957-0400][gc,task ] GC(4) Using 2 workers of 2 for evacuation +[2021-05-10T04:47:41.983-0400][gc,mmu ] GC(4) MMU target violated: 21.0ms (20.0ms/21.0ms) +[2021-05-10T04:47:41.983-0400][gc,phases ] GC(4) Pre Evacuate Collection Set: 0.2ms +[2021-05-10T04:47:41.983-0400][gc,phases ] GC(4) Evacuate Collection Set: 24.5ms +[2021-05-10T04:47:41.983-0400][gc,phases ] GC(4) Post Evacuate Collection Set: 0.4ms +[2021-05-10T04:47:41.983-0400][gc,phases ] GC(4) Other: 0.3ms +[2021-05-10T04:47:41.983-0400][gc,heap ] GC(4) Eden regions: 27->0(44) +[2021-05-10T04:47:41.983-0400][gc,heap ] GC(4) Survivor regions: 5->7(7) +[2021-05-10T04:47:41.983-0400][gc,heap ] GC(4) Old regions: 9->9 +[2021-05-10T04:47:41.983-0400][gc,heap ] GC(4) Humongous regions: 129->129 +[2021-05-10T04:47:41.983-0400][gc,metaspace ] GC(4) Metaspace: 34546K->34546K(1081344K) +[2021-05-10T04:47:41.983-0400][gc ] GC(4) Pause Young (Concurrent Start) (Metadata GC Threshold) 167M->142M(1024M) 25.323ms +[2021-05-10T04:47:41.983-0400][gc,cpu ] GC(4) User=0.02s Sys=0.01s Real=0.03s +[2021-05-10T04:47:41.983-0400][gc ] GC(5) Concurrent Cycle +[2021-05-10T04:47:41.983-0400][gc,marking ] GC(5) Concurrent Clear Claimed Marks +[2021-05-10T04:47:41.983-0400][gc,marking ] GC(5) Concurrent Clear Claimed Marks 0.136ms +[2021-05-10T04:47:41.983-0400][gc,marking ] GC(5) Concurrent Scan Root Regions +[2021-05-10T04:47:41.986-0400][gc,marking ] GC(5) Concurrent Scan Root Regions 2.704ms +[2021-05-10T04:47:41.986-0400][gc,marking ] GC(5) Concurrent Mark (7.901s) +[2021-05-10T04:47:41.986-0400][gc,marking ] GC(5) Concurrent Mark From Roots +[2021-05-10T04:47:41.986-0400][gc,task ] GC(5) Using 1 workers of 1 for marking +[2021-05-10T04:47:42.042-0400][gc,marking ] GC(5) Concurrent Mark From Roots 56.721ms +[2021-05-10T04:47:42.042-0400][gc,marking ] GC(5) Concurrent Preclean +[2021-05-10T04:47:42.043-0400][gc,marking ] GC(5) Concurrent Preclean 0.100ms +[2021-05-10T04:47:42.043-0400][gc,marking ] GC(5) Concurrent Mark (7.901s, 7.958s) 56.880ms +[2021-05-10T04:47:42.057-0400][gc,start ] GC(5) Pause Remark +[2021-05-10T04:47:42.062-0400][gc,stringtable] GC(5) Cleaned string and symbol table, strings: 10857 processed, 11 removed, symbols: 83076 processed, 20 removed +[2021-05-10T04:47:42.063-0400][gc ] GC(5) Pause Remark 145M->145M(1024M) 6.008ms +[2021-05-10T04:47:42.063-0400][gc,cpu ] GC(5) User=0.01s Sys=0.00s Real=0.01s +[2021-05-10T04:47:42.063-0400][gc,marking ] GC(5) Concurrent Rebuild Remembered Sets +[2021-05-10T04:47:42.076-0400][gc,marking ] GC(5) Concurrent Rebuild Remembered Sets 13.239ms +[2021-05-10T04:47:42.118-0400][gc,start ] GC(5) Pause Cleanup +[2021-05-10T04:47:42.119-0400][gc ] GC(5) Pause Cleanup 146M->146M(1024M) 0.241ms +[2021-05-10T04:47:42.119-0400][gc,cpu ] GC(5) User=0.00s Sys=0.00s Real=0.00s +[2021-05-10T04:47:42.119-0400][gc,marking ] GC(5) Concurrent Cleanup for Next Mark +[2021-05-10T04:47:42.122-0400][gc,marking ] GC(5) Concurrent Cleanup for Next Mark 3.731ms +[2021-05-10T04:47:42.122-0400][gc ] GC(5) Concurrent Cycle 139.648ms +[2021-05-10T04:48:11.195-0400][gc,heap,exit ] Heap +[2021-05-10T04:48:11.195-0400][gc,heap,exit ] garbage-first heap total 1048576K, used 194401K [0x00000000c0000000, 0x0000000100000000) +[2021-05-10T04:48:11.195-0400][gc,heap,exit ] region size 1024K, 50 young (51200K), 7 survivors (7168K) +[2021-05-10T04:48:11.195-0400][gc,heap,exit ] Metaspace used 41188K, capacity 43790K, committed 44028K, reserved 1087488K +[2021-05-10T04:48:11.195-0400][gc,heap,exit ] class space used 5554K, capacity 6835K, committed 6912K, reserved 1048576K diff --git a/kafka/kafka_2.13-2.8.0/logs/kafkaServer-gc.log.0 b/kafka/kafka_2.13-2.8.0/logs/kafkaServer-gc.log.0 new file mode 100644 index 0000000..37d431f --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/logs/kafkaServer-gc.log.0 @@ -0,0 +1,138 @@ +[2021-05-10T04:16:52.506-0400][gc,heap] Heap region size: 1M +[2021-05-10T04:16:52.511-0400][gc ] Using G1 +[2021-05-10T04:16:52.511-0400][gc,heap,coops] Heap address: 0x00000000c0000000, size: 1024 MB, Compressed Oops mode: 32-bit +[2021-05-10T04:16:52.511-0400][gc,cds ] Mark closed archive regions in map: [0x00000000fff00000, 0x00000000fff69ff8] +[2021-05-10T04:16:52.511-0400][gc,cds ] Mark open archive regions in map: [0x00000000ffe00000, 0x00000000ffe33ff8] +[2021-05-10T04:16:53.784-0400][gc,start ] GC(0) Pause Young (Normal) (GCLocker Initiated GC) +[2021-05-10T04:16:53.785-0400][gc,task ] GC(0) Using 2 workers of 2 for evacuation +[2021-05-10T04:16:53.797-0400][gc,phases ] GC(0) Pre Evacuate Collection Set: 0.0ms +[2021-05-10T04:16:53.797-0400][gc,phases ] GC(0) Evacuate Collection Set: 11.8ms +[2021-05-10T04:16:53.797-0400][gc,phases ] GC(0) Post Evacuate Collection Set: 0.3ms +[2021-05-10T04:16:53.797-0400][gc,phases ] GC(0) Other: 0.3ms +[2021-05-10T04:16:53.797-0400][gc,heap ] GC(0) Eden regions: 51->0(44) +[2021-05-10T04:16:53.797-0400][gc,heap ] GC(0) Survivor regions: 0->7(7) +[2021-05-10T04:16:53.797-0400][gc,heap ] GC(0) Old regions: 2->2 +[2021-05-10T04:16:53.797-0400][gc,heap ] GC(0) Humongous regions: 0->0 +[2021-05-10T04:16:53.797-0400][gc,metaspace ] GC(0) Metaspace: 15538K->15538K(1062912K) +[2021-05-10T04:16:53.797-0400][gc ] GC(0) Pause Young (Normal) (GCLocker Initiated GC) 51M->7M(1024M) 12.521ms +[2021-05-10T04:16:53.797-0400][gc,cpu ] GC(0) User=0.02s Sys=0.01s Real=0.01s +[2021-05-10T04:16:54.243-0400][gc,start ] GC(1) Pause Young (Concurrent Start) (Metadata GC Threshold) +[2021-05-10T04:16:54.243-0400][gc,task ] GC(1) Using 2 workers of 2 for evacuation +[2021-05-10T04:16:54.261-0400][gc,phases ] GC(1) Pre Evacuate Collection Set: 0.0ms +[2021-05-10T04:16:54.261-0400][gc,phases ] GC(1) Evacuate Collection Set: 18.0ms +[2021-05-10T04:16:54.261-0400][gc,phases ] GC(1) Post Evacuate Collection Set: 0.6ms +[2021-05-10T04:16:54.261-0400][gc,phases ] GC(1) Other: 0.2ms +[2021-05-10T04:16:54.261-0400][gc,heap ] GC(1) Eden regions: 21->0(50) +[2021-05-10T04:16:54.261-0400][gc,heap ] GC(1) Survivor regions: 7->1(7) +[2021-05-10T04:16:54.261-0400][gc,heap ] GC(1) Old regions: 2->9 +[2021-05-10T04:16:54.261-0400][gc,heap ] GC(1) Humongous regions: 0->0 +[2021-05-10T04:16:54.261-0400][gc,metaspace ] GC(1) Metaspace: 20561K->20561K(1067008K) +[2021-05-10T04:16:54.262-0400][gc ] GC(1) Pause Young (Concurrent Start) (Metadata GC Threshold) 27M->7M(1024M) 18.912ms +[2021-05-10T04:16:54.262-0400][gc,cpu ] GC(1) User=0.02s Sys=0.01s Real=0.02s +[2021-05-10T04:16:54.262-0400][gc ] GC(2) Concurrent Cycle +[2021-05-10T04:16:54.262-0400][gc,marking ] GC(2) Concurrent Clear Claimed Marks +[2021-05-10T04:16:54.262-0400][gc,marking ] GC(2) Concurrent Clear Claimed Marks 0.073ms +[2021-05-10T04:16:54.262-0400][gc,marking ] GC(2) Concurrent Scan Root Regions +[2021-05-10T04:16:54.263-0400][gc,marking ] GC(2) Concurrent Scan Root Regions 1.139ms +[2021-05-10T04:16:54.263-0400][gc,marking ] GC(2) Concurrent Mark (1.759s) +[2021-05-10T04:16:54.263-0400][gc,marking ] GC(2) Concurrent Mark From Roots +[2021-05-10T04:16:54.263-0400][gc,task ] GC(2) Using 1 workers of 1 for marking +[2021-05-10T04:16:54.272-0400][gc,marking ] GC(2) Concurrent Mark From Roots 9.050ms +[2021-05-10T04:16:54.272-0400][gc,marking ] GC(2) Concurrent Preclean +[2021-05-10T04:16:54.272-0400][gc,marking ] GC(2) Concurrent Preclean 0.041ms +[2021-05-10T04:16:54.272-0400][gc,marking ] GC(2) Concurrent Mark (1.759s, 1.768s) 9.127ms +[2021-05-10T04:16:54.272-0400][gc,start ] GC(2) Pause Remark +[2021-05-10T04:16:54.280-0400][gc,stringtable] GC(2) Cleaned string and symbol table, strings: 5141 processed, 0 removed, symbols: 50637 processed, 45 removed +[2021-05-10T04:16:54.285-0400][gc ] GC(2) Pause Remark 8M->8M(1024M) 13.221ms +[2021-05-10T04:16:54.285-0400][gc,cpu ] GC(2) User=0.01s Sys=0.00s Real=0.01s +[2021-05-10T04:16:54.290-0400][gc,marking ] GC(2) Concurrent Rebuild Remembered Sets +[2021-05-10T04:16:54.294-0400][gc,marking ] GC(2) Concurrent Rebuild Remembered Sets 4.006ms +[2021-05-10T04:16:54.298-0400][gc,start ] GC(2) Pause Cleanup +[2021-05-10T04:16:54.298-0400][gc ] GC(2) Pause Cleanup 8M->8M(1024M) 0.223ms +[2021-05-10T04:16:54.298-0400][gc,cpu ] GC(2) User=0.00s Sys=0.00s Real=0.00s +[2021-05-10T04:16:54.298-0400][gc,marking ] GC(2) Concurrent Cleanup for Next Mark +[2021-05-10T04:16:54.315-0400][gc,marking ] GC(2) Concurrent Cleanup for Next Mark 16.215ms +[2021-05-10T04:16:54.315-0400][gc ] GC(2) Concurrent Cycle 53.170ms +[2021-05-10T04:16:55.218-0400][gc,start ] GC(3) Pause Young (Normal) (G1 Evacuation Pause) +[2021-05-10T04:16:55.218-0400][gc,task ] GC(3) Using 2 workers of 2 for evacuation +[2021-05-10T04:16:55.230-0400][gc,phases ] GC(3) Pre Evacuate Collection Set: 0.0ms +[2021-05-10T04:16:55.230-0400][gc,phases ] GC(3) Evacuate Collection Set: 10.9ms +[2021-05-10T04:16:55.230-0400][gc,phases ] GC(3) Post Evacuate Collection Set: 0.5ms +[2021-05-10T04:16:55.230-0400][gc,phases ] GC(3) Other: 0.2ms +[2021-05-10T04:16:55.230-0400][gc,heap ] GC(3) Eden regions: 50->0(46) +[2021-05-10T04:16:55.230-0400][gc,heap ] GC(3) Survivor regions: 1->5(7) +[2021-05-10T04:16:55.230-0400][gc,heap ] GC(3) Old regions: 9->9 +[2021-05-10T04:16:55.230-0400][gc,heap ] GC(3) Humongous regions: 129->129 +[2021-05-10T04:16:55.230-0400][gc,metaspace ] GC(3) Metaspace: 30031K->30031K(1077248K) +[2021-05-10T04:16:55.230-0400][gc ] GC(3) Pause Young (Normal) (G1 Evacuation Pause) 186M->140M(1024M) 11.640ms +[2021-05-10T04:16:55.230-0400][gc,cpu ] GC(3) User=0.01s Sys=0.00s Real=0.00s +[2021-05-10T04:16:55.811-0400][gc,start ] GC(4) Pause Young (Concurrent Start) (Metadata GC Threshold) +[2021-05-10T04:16:55.811-0400][gc,task ] GC(4) Using 2 workers of 2 for evacuation +[2021-05-10T04:16:55.834-0400][gc,mmu ] GC(4) MMU target violated: 21.0ms (20.0ms/21.0ms) +[2021-05-10T04:16:55.834-0400][gc,phases ] GC(4) Pre Evacuate Collection Set: 0.2ms +[2021-05-10T04:16:55.834-0400][gc,phases ] GC(4) Evacuate Collection Set: 22.9ms +[2021-05-10T04:16:55.834-0400][gc,phases ] GC(4) Post Evacuate Collection Set: 0.3ms +[2021-05-10T04:16:55.834-0400][gc,phases ] GC(4) Other: 0.2ms +[2021-05-10T04:16:55.834-0400][gc,heap ] GC(4) Eden regions: 22->0(49) +[2021-05-10T04:16:55.834-0400][gc,heap ] GC(4) Survivor regions: 5->2(7) +[2021-05-10T04:16:55.834-0400][gc,heap ] GC(4) Old regions: 9->14 +[2021-05-10T04:16:55.834-0400][gc,heap ] GC(4) Humongous regions: 129->129 +[2021-05-10T04:16:55.834-0400][gc,metaspace ] GC(4) Metaspace: 34347K->34347K(1081344K) +[2021-05-10T04:16:55.835-0400][gc ] GC(4) Pause Young (Concurrent Start) (Metadata GC Threshold) 161M->142M(1024M) 23.586ms +[2021-05-10T04:16:55.835-0400][gc,cpu ] GC(4) User=0.02s Sys=0.00s Real=0.02s +[2021-05-10T04:16:55.835-0400][gc ] GC(5) Concurrent Cycle +[2021-05-10T04:16:55.835-0400][gc,marking ] GC(5) Concurrent Clear Claimed Marks +[2021-05-10T04:16:55.835-0400][gc,marking ] GC(5) Concurrent Clear Claimed Marks 0.085ms +[2021-05-10T04:16:55.835-0400][gc,marking ] GC(5) Concurrent Scan Root Regions +[2021-05-10T04:16:55.836-0400][gc,marking ] GC(5) Concurrent Scan Root Regions 1.070ms +[2021-05-10T04:16:55.836-0400][gc,marking ] GC(5) Concurrent Mark (3.331s) +[2021-05-10T04:16:55.836-0400][gc,marking ] GC(5) Concurrent Mark From Roots +[2021-05-10T04:16:55.836-0400][gc,task ] GC(5) Using 1 workers of 1 for marking +[2021-05-10T04:16:55.887-0400][gc,marking ] GC(5) Concurrent Mark From Roots 51.060ms +[2021-05-10T04:16:55.887-0400][gc,marking ] GC(5) Concurrent Preclean +[2021-05-10T04:16:55.887-0400][gc,marking ] GC(5) Concurrent Preclean 0.050ms +[2021-05-10T04:16:55.887-0400][gc,marking ] GC(5) Concurrent Mark (3.331s, 3.383s) 51.151ms +[2021-05-10T04:16:55.887-0400][gc,start ] GC(5) Pause Remark +[2021-05-10T04:16:55.892-0400][gc,stringtable] GC(5) Cleaned string and symbol table, strings: 10719 processed, 11 removed, symbols: 81858 processed, 17 removed +[2021-05-10T04:16:55.892-0400][gc ] GC(5) Pause Remark 144M->144M(1024M) 5.220ms +[2021-05-10T04:16:55.892-0400][gc,cpu ] GC(5) User=0.00s Sys=0.00s Real=0.01s +[2021-05-10T04:16:55.893-0400][gc,marking ] GC(5) Concurrent Rebuild Remembered Sets +[2021-05-10T04:16:55.906-0400][gc,marking ] GC(5) Concurrent Rebuild Remembered Sets 13.600ms +[2021-05-10T04:16:55.908-0400][gc,start ] GC(5) Pause Cleanup +[2021-05-10T04:16:55.908-0400][gc ] GC(5) Pause Cleanup 145M->145M(1024M) 0.317ms +[2021-05-10T04:16:55.909-0400][gc,cpu ] GC(5) User=0.00s Sys=0.00s Real=0.00s +[2021-05-10T04:16:55.909-0400][gc,marking ] GC(5) Concurrent Cleanup for Next Mark +[2021-05-10T04:16:55.924-0400][gc,marking ] GC(5) Concurrent Cleanup for Next Mark 15.509ms +[2021-05-10T04:16:55.924-0400][gc ] GC(5) Concurrent Cycle 89.613ms +[2021-05-10T04:28:58.721-0400][gc,start ] GC(6) Pause Young (Normal) (G1 Evacuation Pause) +[2021-05-10T04:28:58.721-0400][gc,task ] GC(6) Using 2 workers of 2 for evacuation +[2021-05-10T04:28:58.733-0400][gc,phases ] GC(6) Pre Evacuate Collection Set: 0.0ms +[2021-05-10T04:28:58.733-0400][gc,phases ] GC(6) Evacuate Collection Set: 11.3ms +[2021-05-10T04:28:58.733-0400][gc,phases ] GC(6) Post Evacuate Collection Set: 0.5ms +[2021-05-10T04:28:58.733-0400][gc,phases ] GC(6) Other: 0.2ms +[2021-05-10T04:28:58.733-0400][gc,heap ] GC(6) Eden regions: 49->0(47) +[2021-05-10T04:28:58.733-0400][gc,heap ] GC(6) Survivor regions: 2->4(7) +[2021-05-10T04:28:58.733-0400][gc,heap ] GC(6) Old regions: 14->14 +[2021-05-10T04:28:58.733-0400][gc,heap ] GC(6) Humongous regions: 129->129 +[2021-05-10T04:28:58.733-0400][gc,metaspace ] GC(6) Metaspace: 40359K->40359K(1085440K) +[2021-05-10T04:28:58.733-0400][gc ] GC(6) Pause Young (Normal) (G1 Evacuation Pause) 191M->144M(1024M) 12.105ms +[2021-05-10T04:28:58.733-0400][gc,cpu ] GC(6) User=0.02s Sys=0.00s Real=0.01s +[2021-05-10T04:44:01.433-0400][gc,start ] GC(7) Pause Young (Normal) (G1 Evacuation Pause) +[2021-05-10T04:44:01.433-0400][gc,task ] GC(7) Using 2 workers of 2 for evacuation +[2021-05-10T04:44:01.455-0400][gc,mmu ] GC(7) MMU target violated: 21.0ms (20.0ms/21.0ms) +[2021-05-10T04:44:01.455-0400][gc,phases ] GC(7) Pre Evacuate Collection Set: 0.1ms +[2021-05-10T04:44:01.455-0400][gc,phases ] GC(7) Evacuate Collection Set: 20.9ms +[2021-05-10T04:44:01.455-0400][gc,phases ] GC(7) Post Evacuate Collection Set: 0.5ms +[2021-05-10T04:44:01.455-0400][gc,phases ] GC(7) Other: 0.4ms +[2021-05-10T04:44:01.455-0400][gc,heap ] GC(7) Eden regions: 47->0(46) +[2021-05-10T04:44:01.455-0400][gc,heap ] GC(7) Survivor regions: 4->5(7) +[2021-05-10T04:44:01.455-0400][gc,heap ] GC(7) Old regions: 14->15 +[2021-05-10T04:44:01.455-0400][gc,heap ] GC(7) Humongous regions: 129->129 +[2021-05-10T04:44:01.455-0400][gc,metaspace ] GC(7) Metaspace: 42353K->42353K(1087488K) +[2021-05-10T04:44:01.455-0400][gc ] GC(7) Pause Young (Normal) (G1 Evacuation Pause) 191M->146M(1024M) 21.958ms +[2021-05-10T04:44:01.455-0400][gc,cpu ] GC(7) User=0.03s Sys=0.00s Real=0.02s +[2021-05-10T04:47:27.971-0400][gc,heap,exit ] Heap +[2021-05-10T04:47:27.971-0400][gc,heap,exit ] garbage-first heap total 1048576K, used 172962K [0x00000000c0000000, 0x0000000100000000) +[2021-05-10T04:47:27.971-0400][gc,heap,exit ] region size 1024K, 28 young (28672K), 5 survivors (5120K) +[2021-05-10T04:47:27.971-0400][gc,heap,exit ] Metaspace used 43219K, capacity 46080K, committed 46208K, reserved 1087488K +[2021-05-10T04:47:27.971-0400][gc,heap,exit ] class space used 5790K, capacity 7173K, committed 7296K, reserved 1048576K diff --git a/kafka/kafka_2.13-2.8.0/logs/log-cleaner.log b/kafka/kafka_2.13-2.8.0/logs/log-cleaner.log new file mode 100644 index 0000000..5a4248e --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/logs/log-cleaner.log @@ -0,0 +1,12 @@ +[2021-05-10 04:16:54,369] INFO Starting the log cleaner (kafka.log.LogCleaner) +[2021-05-10 04:16:54,458] INFO [kafka-log-cleaner-thread-0]: Starting (kafka.log.LogCleaner) +[2021-05-10 04:47:26,132] INFO Shutting down the log cleaner. (kafka.log.LogCleaner) +[2021-05-10 04:47:26,132] INFO [kafka-log-cleaner-thread-0]: Shutting down (kafka.log.LogCleaner) +[2021-05-10 04:47:26,132] INFO [kafka-log-cleaner-thread-0]: Stopped (kafka.log.LogCleaner) +[2021-05-10 04:47:26,132] INFO [kafka-log-cleaner-thread-0]: Shutdown completed (kafka.log.LogCleaner) +[2021-05-10 04:47:40,961] INFO Starting the log cleaner (kafka.log.LogCleaner) +[2021-05-10 04:47:41,015] INFO [kafka-log-cleaner-thread-0]: Starting (kafka.log.LogCleaner) +[2021-05-10 04:48:07,751] INFO Shutting down the log cleaner. (kafka.log.LogCleaner) +[2021-05-10 04:48:07,751] INFO [kafka-log-cleaner-thread-0]: Shutting down (kafka.log.LogCleaner) +[2021-05-10 04:48:07,751] INFO [kafka-log-cleaner-thread-0]: Stopped (kafka.log.LogCleaner) +[2021-05-10 04:48:07,752] INFO [kafka-log-cleaner-thread-0]: Shutdown completed (kafka.log.LogCleaner) diff --git a/kafka/kafka_2.13-2.8.0/logs/server.log b/kafka/kafka_2.13-2.8.0/logs/server.log new file mode 100644 index 0000000..915baf7 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/logs/server.log @@ -0,0 +1,2219 @@ +[2021-05-10 04:16:22,959] INFO Reading configuration from: config/zookeeper.properties (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2021-05-10 04:16:22,960] WARN config/zookeeper.properties is relative. Prepend ./ to indicate that you're sure! (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2021-05-10 04:16:22,965] INFO clientPortAddress is 0.0.0.0:2181 (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2021-05-10 04:16:22,965] INFO secureClientPort is not set (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2021-05-10 04:16:22,969] INFO autopurge.snapRetainCount set to 3 (org.apache.zookeeper.server.DatadirCleanupManager) +[2021-05-10 04:16:22,969] INFO autopurge.purgeInterval set to 0 (org.apache.zookeeper.server.DatadirCleanupManager) +[2021-05-10 04:16:22,970] INFO Purge task is not scheduled. (org.apache.zookeeper.server.DatadirCleanupManager) +[2021-05-10 04:16:22,970] WARN Either no config or no quorum defined in config, running in standalone mode (org.apache.zookeeper.server.quorum.QuorumPeerMain) +[2021-05-10 04:16:22,973] INFO Log4j 1.2 jmx support found and enabled. (org.apache.zookeeper.jmx.ManagedUtil) +[2021-05-10 04:16:22,998] INFO Reading configuration from: config/zookeeper.properties (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2021-05-10 04:16:23,003] WARN config/zookeeper.properties is relative. Prepend ./ to indicate that you're sure! (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2021-05-10 04:16:23,003] INFO clientPortAddress is 0.0.0.0:2181 (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2021-05-10 04:16:23,003] INFO secureClientPort is not set (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2021-05-10 04:16:23,004] INFO Starting server (org.apache.zookeeper.server.ZooKeeperServerMain) +[2021-05-10 04:16:23,007] INFO zookeeper.snapshot.trust.empty : false (org.apache.zookeeper.server.persistence.FileTxnSnapLog) +[2021-05-10 04:16:23,028] INFO Server environment:zookeeper.version=3.5.9-83df9301aa5c2a5d284a9940177808c01bc35cef, built on 01/06/2021 20:03 GMT (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:16:23,028] INFO Server environment:host.name=osboxes (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:16:23,028] INFO Server environment:java.version=11.0.4 (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:16:23,028] INFO Server environment:java.vendor=Ubuntu (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:16:23,028] INFO Server environment:java.home=/usr/lib/jvm/java-11-openjdk-amd64 (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:16:23,028] INFO Server environment:java.class.path=/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/activation-1.1.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/aopalliance-repackaged-2.6.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/argparse4j-0.7.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/audience-annotations-0.5.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/commons-cli-1.4.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/commons-lang3-3.8.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-api-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-basic-auth-extension-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-file-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-json-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-mirror-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-mirror-client-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-runtime-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-transforms-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/hk2-api-2.6.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/hk2-locator-2.6.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/hk2-utils-2.6.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-annotations-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-core-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-databind-2.10.5.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-dataformat-csv-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-datatype-jdk8-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-jaxrs-base-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-jaxrs-json-provider-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-module-jaxb-annotations-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-module-paranamer-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-module-scala_2.13-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jakarta.activation-api-1.2.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jakarta.annotation-api-1.3.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jakarta.inject-2.6.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jakarta.validation-api-2.0.2.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jakarta.ws.rs-api-2.1.6.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jakarta.xml.bind-api-2.3.2.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/javassist-3.27.0-GA.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/javax.servlet-api-3.1.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/javax.ws.rs-api-2.1.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jaxb-api-2.3.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-client-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-common-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-container-servlet-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-container-servlet-core-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-hk2-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-media-jaxb-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-server-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-client-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-continuation-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-http-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-io-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-security-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-server-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-servlet-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-servlets-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-util-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-util-ajax-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jline-3.12.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jopt-simple-5.0.4.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka_2.13-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka_2.13-2.8.0-sources.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-clients-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-log4j-appender-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-metadata-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-raft-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-shell-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-streams-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-streams-examples-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-streams-scala_2.13-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-streams-test-utils-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-tools-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/log4j-1.2.17.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/lz4-java-1.7.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/maven-artifact-3.6.3.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/metrics-core-2.2.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-buffer-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-codec-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-common-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-handler-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-resolver-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-transport-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-transport-native-epoll-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-transport-native-unix-common-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/osgi-resource-locator-1.0.3.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/paranamer-2.8.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/plexus-utils-3.2.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/reflections-0.9.12.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/rocksdbjni-5.18.4.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/scala-collection-compat_2.13-2.3.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/scala-java8-compat_2.13-0.9.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/scala-library-2.13.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/scala-logging_2.13-3.9.2.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/scala-reflect-2.13.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/slf4j-api-1.7.30.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/slf4j-log4j12-1.7.30.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/snappy-java-1.1.8.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/zookeeper-3.5.9.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/zookeeper-jute-3.5.9.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/zstd-jni-1.4.9-1.jar (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:16:23,028] INFO Server environment:java.library.path=/usr/java/packages/lib:/usr/lib/x86_64-linux-gnu/jni:/lib/x86_64-linux-gnu:/usr/lib/x86_64-linux-gnu:/usr/lib/jni:/lib:/usr/lib (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:16:23,028] INFO Server environment:java.io.tmpdir=/tmp (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:16:23,028] INFO Server environment:java.compiler= (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:16:23,028] INFO Server environment:os.name=Linux (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:16:23,028] INFO Server environment:os.arch=amd64 (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:16:23,029] INFO Server environment:os.version=5.0.0-36-generic (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:16:23,029] INFO Server environment:user.name=root (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:16:23,029] INFO Server environment:user.home=/root (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:16:23,029] INFO Server environment:user.dir=/root/OFC_SC472/kafka/kafka_2.13-2.8.0 (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:16:23,029] INFO Server environment:os.memory.free=494MB (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:16:23,030] INFO Server environment:os.memory.max=512MB (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:16:23,030] INFO Server environment:os.memory.total=512MB (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:16:23,031] INFO minSessionTimeout set to 6000 (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:16:23,031] INFO maxSessionTimeout set to 60000 (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:16:23,031] INFO Created server with tickTime 3000 minSessionTimeout 6000 maxSessionTimeout 60000 datadir /tmp/zookeeper/version-2 snapdir /tmp/zookeeper/version-2 (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:16:23,063] INFO Using org.apache.zookeeper.server.NIOServerCnxnFactory as server connection factory (org.apache.zookeeper.server.ServerCnxnFactory) +[2021-05-10 04:16:23,072] INFO Configuring NIO connection handler with 10s sessionless connection timeout, 1 selector thread(s), 4 worker threads, and 64 kB direct buffers. (org.apache.zookeeper.server.NIOServerCnxnFactory) +[2021-05-10 04:16:23,089] INFO binding to port 0.0.0.0/0.0.0.0:2181 (org.apache.zookeeper.server.NIOServerCnxnFactory) +[2021-05-10 04:16:23,136] INFO zookeeper.snapshotSizeFactor = 0.33 (org.apache.zookeeper.server.ZKDatabase) +[2021-05-10 04:16:23,141] INFO Snapshotting: 0x0 to /tmp/zookeeper/version-2/snapshot.0 (org.apache.zookeeper.server.persistence.FileTxnSnapLog) +[2021-05-10 04:16:23,145] INFO Snapshotting: 0x0 to /tmp/zookeeper/version-2/snapshot.0 (org.apache.zookeeper.server.persistence.FileTxnSnapLog) +[2021-05-10 04:16:23,178] INFO PrepRequestProcessor (sid:0) started, reconfigEnabled=false (org.apache.zookeeper.server.PrepRequestProcessor) +[2021-05-10 04:16:23,184] INFO Using checkIntervalMs=60000 maxPerMinute=10000 (org.apache.zookeeper.server.ContainerManager) +[2021-05-10 04:16:52,910] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$) +[2021-05-10 04:16:53,247] INFO Setting -D jdk.tls.rejectClientInitiatedRenegotiation=true to disable client-initiated TLS renegotiation (org.apache.zookeeper.common.X509Util) +[2021-05-10 04:16:53,399] INFO Registered signal handlers for TERM, INT, HUP (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:16:53,416] INFO starting (kafka.server.KafkaServer) +[2021-05-10 04:16:53,423] INFO Connecting to zookeeper on localhost:2181 (kafka.server.KafkaServer) +[2021-05-10 04:16:53,505] INFO [ZooKeeperClient Kafka server] Initializing a new session to localhost:2181. (kafka.zookeeper.ZooKeeperClient) +[2021-05-10 04:16:53,510] INFO Client environment:zookeeper.version=3.5.9-83df9301aa5c2a5d284a9940177808c01bc35cef, built on 01/06/2021 20:03 GMT (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:16:53,510] INFO Client environment:host.name=osboxes (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:16:53,510] INFO Client environment:java.version=11.0.4 (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:16:53,510] INFO Client environment:java.vendor=Ubuntu (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:16:53,510] INFO Client environment:java.home=/usr/lib/jvm/java-11-openjdk-amd64 (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:16:53,510] INFO Client environment:java.class.path=/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/activation-1.1.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/aopalliance-repackaged-2.6.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/argparse4j-0.7.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/audience-annotations-0.5.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/commons-cli-1.4.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/commons-lang3-3.8.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-api-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-basic-auth-extension-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-file-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-json-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-mirror-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-mirror-client-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-runtime-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-transforms-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/hk2-api-2.6.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/hk2-locator-2.6.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/hk2-utils-2.6.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-annotations-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-core-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-databind-2.10.5.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-dataformat-csv-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-datatype-jdk8-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-jaxrs-base-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-jaxrs-json-provider-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-module-jaxb-annotations-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-module-paranamer-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-module-scala_2.13-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jakarta.activation-api-1.2.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jakarta.annotation-api-1.3.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jakarta.inject-2.6.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jakarta.validation-api-2.0.2.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jakarta.ws.rs-api-2.1.6.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jakarta.xml.bind-api-2.3.2.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/javassist-3.27.0-GA.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/javax.servlet-api-3.1.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/javax.ws.rs-api-2.1.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jaxb-api-2.3.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-client-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-common-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-container-servlet-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-container-servlet-core-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-hk2-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-media-jaxb-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-server-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-client-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-continuation-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-http-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-io-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-security-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-server-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-servlet-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-servlets-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-util-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-util-ajax-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jline-3.12.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jopt-simple-5.0.4.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka_2.13-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka_2.13-2.8.0-sources.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-clients-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-log4j-appender-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-metadata-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-raft-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-shell-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-streams-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-streams-examples-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-streams-scala_2.13-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-streams-test-utils-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-tools-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/log4j-1.2.17.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/lz4-java-1.7.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/maven-artifact-3.6.3.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/metrics-core-2.2.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-buffer-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-codec-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-common-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-handler-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-resolver-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-transport-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-transport-native-epoll-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-transport-native-unix-common-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/osgi-resource-locator-1.0.3.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/paranamer-2.8.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/plexus-utils-3.2.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/reflections-0.9.12.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/rocksdbjni-5.18.4.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/scala-collection-compat_2.13-2.3.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/scala-java8-compat_2.13-0.9.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/scala-library-2.13.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/scala-logging_2.13-3.9.2.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/scala-reflect-2.13.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/slf4j-api-1.7.30.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/slf4j-log4j12-1.7.30.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/snappy-java-1.1.8.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/zookeeper-3.5.9.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/zookeeper-jute-3.5.9.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/zstd-jni-1.4.9-1.jar (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:16:53,510] INFO Client environment:java.library.path=/usr/java/packages/lib:/usr/lib/x86_64-linux-gnu/jni:/lib/x86_64-linux-gnu:/usr/lib/x86_64-linux-gnu:/usr/lib/jni:/lib:/usr/lib (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:16:53,510] INFO Client environment:java.io.tmpdir=/tmp (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:16:53,510] INFO Client environment:java.compiler= (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:16:53,510] INFO Client environment:os.name=Linux (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:16:53,510] INFO Client environment:os.arch=amd64 (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:16:53,510] INFO Client environment:os.version=5.0.0-36-generic (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:16:53,510] INFO Client environment:user.name=root (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:16:53,510] INFO Client environment:user.home=/root (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:16:53,510] INFO Client environment:user.dir=/root/OFC_SC472/kafka/kafka_2.13-2.8.0 (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:16:53,510] INFO Client environment:os.memory.free=977MB (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:16:53,510] INFO Client environment:os.memory.max=1024MB (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:16:53,511] INFO Client environment:os.memory.total=1024MB (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:16:53,513] INFO Initiating client connection, connectString=localhost:2181 sessionTimeout=18000 watcher=kafka.zookeeper.ZooKeeperClient$ZooKeeperClientWatcher$@205d38da (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:16:53,518] INFO jute.maxbuffer value is 4194304 Bytes (org.apache.zookeeper.ClientCnxnSocket) +[2021-05-10 04:16:53,539] INFO zookeeper.request.timeout value is 0. feature enabled= (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:16:53,555] INFO [ZooKeeperClient Kafka server] Waiting until connected. (kafka.zookeeper.ZooKeeperClient) +[2021-05-10 04:16:53,559] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:16:53,579] INFO Socket connection established, initiating session, client: /127.0.0.1:58156, server: localhost/127.0.0.1:2181 (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:16:53,593] INFO Creating new log file: log.1 (org.apache.zookeeper.server.persistence.FileTxnLog) +[2021-05-10 04:16:53,608] INFO Session establishment complete on server localhost/127.0.0.1:2181, sessionid = 0x10001721e430000, negotiated timeout = 18000 (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:16:53,617] INFO [ZooKeeperClient Kafka server] Connected. (kafka.zookeeper.ZooKeeperClient) +[2021-05-10 04:16:53,805] INFO [feature-zk-node-event-process-thread]: Starting (kafka.server.FinalizedFeatureChangeListener$ChangeNotificationProcessorThread) +[2021-05-10 04:16:53,811] INFO Feature ZK node at path: /feature does not exist (kafka.server.FinalizedFeatureChangeListener) +[2021-05-10 04:16:53,812] INFO Cleared cache (kafka.server.FinalizedFeatureCache) +[2021-05-10 04:16:54,011] INFO Cluster ID = QF8w599ZSney2uTLCq4H8Q (kafka.server.KafkaServer) +[2021-05-10 04:16:54,017] WARN No meta.properties file under dir /tmp/kafka-logs/meta.properties (kafka.server.BrokerMetadataCheckpoint) +[2021-05-10 04:16:54,107] INFO KafkaConfig values: + advertised.host.name = null + advertised.listeners = null + advertised.port = null + alter.config.policy.class.name = null + alter.log.dirs.replication.quota.window.num = 11 + alter.log.dirs.replication.quota.window.size.seconds = 1 + authorizer.class.name = + auto.create.topics.enable = true + auto.leader.rebalance.enable = true + background.threads = 10 + broker.heartbeat.interval.ms = 2000 + broker.id = 0 + broker.id.generation.enable = true + broker.rack = null + broker.session.timeout.ms = 9000 + client.quota.callback.class = null + compression.type = producer + connection.failed.authentication.delay.ms = 100 + connections.max.idle.ms = 600000 + connections.max.reauth.ms = 0 + control.plane.listener.name = null + controlled.shutdown.enable = true + controlled.shutdown.max.retries = 3 + controlled.shutdown.retry.backoff.ms = 5000 + controller.listener.names = null + controller.quorum.append.linger.ms = 25 + controller.quorum.election.backoff.max.ms = 1000 + controller.quorum.election.timeout.ms = 1000 + controller.quorum.fetch.timeout.ms = 2000 + controller.quorum.request.timeout.ms = 2000 + controller.quorum.retry.backoff.ms = 20 + controller.quorum.voters = [] + controller.quota.window.num = 11 + controller.quota.window.size.seconds = 1 + controller.socket.timeout.ms = 30000 + create.topic.policy.class.name = null + default.replication.factor = 1 + delegation.token.expiry.check.interval.ms = 3600000 + delegation.token.expiry.time.ms = 86400000 + delegation.token.master.key = null + delegation.token.max.lifetime.ms = 604800000 + delegation.token.secret.key = null + delete.records.purgatory.purge.interval.requests = 1 + delete.topic.enable = true + fetch.max.bytes = 57671680 + fetch.purgatory.purge.interval.requests = 1000 + group.initial.rebalance.delay.ms = 0 + group.max.session.timeout.ms = 1800000 + group.max.size = 2147483647 + group.min.session.timeout.ms = 6000 + host.name = + initial.broker.registration.timeout.ms = 60000 + inter.broker.listener.name = null + inter.broker.protocol.version = 2.8-IV1 + kafka.metrics.polling.interval.secs = 10 + kafka.metrics.reporters = [] + leader.imbalance.check.interval.seconds = 300 + leader.imbalance.per.broker.percentage = 10 + listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL + listeners = null + log.cleaner.backoff.ms = 15000 + log.cleaner.dedupe.buffer.size = 134217728 + log.cleaner.delete.retention.ms = 86400000 + log.cleaner.enable = true + log.cleaner.io.buffer.load.factor = 0.9 + log.cleaner.io.buffer.size = 524288 + log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308 + log.cleaner.max.compaction.lag.ms = 9223372036854775807 + log.cleaner.min.cleanable.ratio = 0.5 + log.cleaner.min.compaction.lag.ms = 0 + log.cleaner.threads = 1 + log.cleanup.policy = [delete] + log.dir = /tmp/kafka-logs + log.dirs = /tmp/kafka-logs + log.flush.interval.messages = 9223372036854775807 + log.flush.interval.ms = null + log.flush.offset.checkpoint.interval.ms = 60000 + log.flush.scheduler.interval.ms = 9223372036854775807 + log.flush.start.offset.checkpoint.interval.ms = 60000 + log.index.interval.bytes = 4096 + log.index.size.max.bytes = 10485760 + log.message.downconversion.enable = true + log.message.format.version = 2.8-IV1 + log.message.timestamp.difference.max.ms = 9223372036854775807 + log.message.timestamp.type = CreateTime + log.preallocate = false + log.retention.bytes = -1 + log.retention.check.interval.ms = 300000 + log.retention.hours = 168 + log.retention.minutes = null + log.retention.ms = null + log.roll.hours = 168 + log.roll.jitter.hours = 0 + log.roll.jitter.ms = null + log.roll.ms = null + log.segment.bytes = 1073741824 + log.segment.delete.delay.ms = 60000 + max.connection.creation.rate = 2147483647 + max.connections = 2147483647 + max.connections.per.ip = 2147483647 + max.connections.per.ip.overrides = + max.incremental.fetch.session.cache.slots = 1000 + message.max.bytes = 1048588 + metadata.log.dir = null + metric.reporters = [] + metrics.num.samples = 2 + metrics.recording.level = INFO + metrics.sample.window.ms = 30000 + min.insync.replicas = 1 + node.id = -1 + num.io.threads = 8 + num.network.threads = 3 + num.partitions = 1 + num.recovery.threads.per.data.dir = 1 + num.replica.alter.log.dirs.threads = null + num.replica.fetchers = 1 + offset.metadata.max.bytes = 4096 + offsets.commit.required.acks = -1 + offsets.commit.timeout.ms = 5000 + offsets.load.buffer.size = 5242880 + offsets.retention.check.interval.ms = 600000 + offsets.retention.minutes = 10080 + offsets.topic.compression.codec = 0 + offsets.topic.num.partitions = 50 + offsets.topic.replication.factor = 1 + offsets.topic.segment.bytes = 104857600 + password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding + password.encoder.iterations = 4096 + password.encoder.key.length = 128 + password.encoder.keyfactory.algorithm = null + password.encoder.old.secret = null + password.encoder.secret = null + port = 9092 + principal.builder.class = null + process.roles = [] + producer.purgatory.purge.interval.requests = 1000 + queued.max.request.bytes = -1 + queued.max.requests = 500 + quota.consumer.default = 9223372036854775807 + quota.producer.default = 9223372036854775807 + quota.window.num = 11 + quota.window.size.seconds = 1 + replica.fetch.backoff.ms = 1000 + replica.fetch.max.bytes = 1048576 + replica.fetch.min.bytes = 1 + replica.fetch.response.max.bytes = 10485760 + replica.fetch.wait.max.ms = 500 + replica.high.watermark.checkpoint.interval.ms = 5000 + replica.lag.time.max.ms = 30000 + replica.selector.class = null + replica.socket.receive.buffer.bytes = 65536 + replica.socket.timeout.ms = 30000 + replication.quota.window.num = 11 + replication.quota.window.size.seconds = 1 + request.timeout.ms = 30000 + reserved.broker.max.id = 1000 + sasl.client.callback.handler.class = null + sasl.enabled.mechanisms = [GSSAPI] + sasl.jaas.config = null + sasl.kerberos.kinit.cmd = /usr/bin/kinit + sasl.kerberos.min.time.before.relogin = 60000 + sasl.kerberos.principal.to.local.rules = [DEFAULT] + sasl.kerberos.service.name = null + sasl.kerberos.ticket.renew.jitter = 0.05 + sasl.kerberos.ticket.renew.window.factor = 0.8 + sasl.login.callback.handler.class = null + sasl.login.class = null + sasl.login.refresh.buffer.seconds = 300 + sasl.login.refresh.min.period.seconds = 60 + sasl.login.refresh.window.factor = 0.8 + sasl.login.refresh.window.jitter = 0.05 + sasl.mechanism.controller.protocol = GSSAPI + sasl.mechanism.inter.broker.protocol = GSSAPI + sasl.server.callback.handler.class = null + security.inter.broker.protocol = PLAINTEXT + security.providers = null + socket.connection.setup.timeout.max.ms = 30000 + socket.connection.setup.timeout.ms = 10000 + socket.receive.buffer.bytes = 102400 + socket.request.max.bytes = 104857600 + socket.send.buffer.bytes = 102400 + ssl.cipher.suites = [] + ssl.client.auth = none + ssl.enabled.protocols = [TLSv1.2, TLSv1.3] + ssl.endpoint.identification.algorithm = https + ssl.engine.factory.class = null + ssl.key.password = null + ssl.keymanager.algorithm = SunX509 + ssl.keystore.certificate.chain = null + ssl.keystore.key = null + ssl.keystore.location = null + ssl.keystore.password = null + ssl.keystore.type = JKS + ssl.principal.mapping.rules = DEFAULT + ssl.protocol = TLSv1.3 + ssl.provider = null + ssl.secure.random.implementation = null + ssl.trustmanager.algorithm = PKIX + ssl.truststore.certificates = null + ssl.truststore.location = null + ssl.truststore.password = null + ssl.truststore.type = JKS + transaction.abort.timed.out.transaction.cleanup.interval.ms = 10000 + transaction.max.timeout.ms = 900000 + transaction.remove.expired.transaction.cleanup.interval.ms = 3600000 + transaction.state.log.load.buffer.size = 5242880 + transaction.state.log.min.isr = 1 + transaction.state.log.num.partitions = 50 + transaction.state.log.replication.factor = 1 + transaction.state.log.segment.bytes = 104857600 + transactional.id.expiration.ms = 604800000 + unclean.leader.election.enable = false + zookeeper.clientCnxnSocket = null + zookeeper.connect = localhost:2181 + zookeeper.connection.timeout.ms = 18000 + zookeeper.max.in.flight.requests = 10 + zookeeper.session.timeout.ms = 18000 + zookeeper.set.acl = false + zookeeper.ssl.cipher.suites = null + zookeeper.ssl.client.enable = false + zookeeper.ssl.crl.enable = false + zookeeper.ssl.enabled.protocols = null + zookeeper.ssl.endpoint.identification.algorithm = HTTPS + zookeeper.ssl.keystore.location = null + zookeeper.ssl.keystore.password = null + zookeeper.ssl.keystore.type = null + zookeeper.ssl.ocsp.enable = false + zookeeper.ssl.protocol = TLSv1.2 + zookeeper.ssl.truststore.location = null + zookeeper.ssl.truststore.password = null + zookeeper.ssl.truststore.type = null + zookeeper.sync.time.ms = 2000 + (kafka.server.KafkaConfig) +[2021-05-10 04:16:54,115] INFO KafkaConfig values: + advertised.host.name = null + advertised.listeners = null + advertised.port = null + alter.config.policy.class.name = null + alter.log.dirs.replication.quota.window.num = 11 + alter.log.dirs.replication.quota.window.size.seconds = 1 + authorizer.class.name = + auto.create.topics.enable = true + auto.leader.rebalance.enable = true + background.threads = 10 + broker.heartbeat.interval.ms = 2000 + broker.id = 0 + broker.id.generation.enable = true + broker.rack = null + broker.session.timeout.ms = 9000 + client.quota.callback.class = null + compression.type = producer + connection.failed.authentication.delay.ms = 100 + connections.max.idle.ms = 600000 + connections.max.reauth.ms = 0 + control.plane.listener.name = null + controlled.shutdown.enable = true + controlled.shutdown.max.retries = 3 + controlled.shutdown.retry.backoff.ms = 5000 + controller.listener.names = null + controller.quorum.append.linger.ms = 25 + controller.quorum.election.backoff.max.ms = 1000 + controller.quorum.election.timeout.ms = 1000 + controller.quorum.fetch.timeout.ms = 2000 + controller.quorum.request.timeout.ms = 2000 + controller.quorum.retry.backoff.ms = 20 + controller.quorum.voters = [] + controller.quota.window.num = 11 + controller.quota.window.size.seconds = 1 + controller.socket.timeout.ms = 30000 + create.topic.policy.class.name = null + default.replication.factor = 1 + delegation.token.expiry.check.interval.ms = 3600000 + delegation.token.expiry.time.ms = 86400000 + delegation.token.master.key = null + delegation.token.max.lifetime.ms = 604800000 + delegation.token.secret.key = null + delete.records.purgatory.purge.interval.requests = 1 + delete.topic.enable = true + fetch.max.bytes = 57671680 + fetch.purgatory.purge.interval.requests = 1000 + group.initial.rebalance.delay.ms = 0 + group.max.session.timeout.ms = 1800000 + group.max.size = 2147483647 + group.min.session.timeout.ms = 6000 + host.name = + initial.broker.registration.timeout.ms = 60000 + inter.broker.listener.name = null + inter.broker.protocol.version = 2.8-IV1 + kafka.metrics.polling.interval.secs = 10 + kafka.metrics.reporters = [] + leader.imbalance.check.interval.seconds = 300 + leader.imbalance.per.broker.percentage = 10 + listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL + listeners = null + log.cleaner.backoff.ms = 15000 + log.cleaner.dedupe.buffer.size = 134217728 + log.cleaner.delete.retention.ms = 86400000 + log.cleaner.enable = true + log.cleaner.io.buffer.load.factor = 0.9 + log.cleaner.io.buffer.size = 524288 + log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308 + log.cleaner.max.compaction.lag.ms = 9223372036854775807 + log.cleaner.min.cleanable.ratio = 0.5 + log.cleaner.min.compaction.lag.ms = 0 + log.cleaner.threads = 1 + log.cleanup.policy = [delete] + log.dir = /tmp/kafka-logs + log.dirs = /tmp/kafka-logs + log.flush.interval.messages = 9223372036854775807 + log.flush.interval.ms = null + log.flush.offset.checkpoint.interval.ms = 60000 + log.flush.scheduler.interval.ms = 9223372036854775807 + log.flush.start.offset.checkpoint.interval.ms = 60000 + log.index.interval.bytes = 4096 + log.index.size.max.bytes = 10485760 + log.message.downconversion.enable = true + log.message.format.version = 2.8-IV1 + log.message.timestamp.difference.max.ms = 9223372036854775807 + log.message.timestamp.type = CreateTime + log.preallocate = false + log.retention.bytes = -1 + log.retention.check.interval.ms = 300000 + log.retention.hours = 168 + log.retention.minutes = null + log.retention.ms = null + log.roll.hours = 168 + log.roll.jitter.hours = 0 + log.roll.jitter.ms = null + log.roll.ms = null + log.segment.bytes = 1073741824 + log.segment.delete.delay.ms = 60000 + max.connection.creation.rate = 2147483647 + max.connections = 2147483647 + max.connections.per.ip = 2147483647 + max.connections.per.ip.overrides = + max.incremental.fetch.session.cache.slots = 1000 + message.max.bytes = 1048588 + metadata.log.dir = null + metric.reporters = [] + metrics.num.samples = 2 + metrics.recording.level = INFO + metrics.sample.window.ms = 30000 + min.insync.replicas = 1 + node.id = -1 + num.io.threads = 8 + num.network.threads = 3 + num.partitions = 1 + num.recovery.threads.per.data.dir = 1 + num.replica.alter.log.dirs.threads = null + num.replica.fetchers = 1 + offset.metadata.max.bytes = 4096 + offsets.commit.required.acks = -1 + offsets.commit.timeout.ms = 5000 + offsets.load.buffer.size = 5242880 + offsets.retention.check.interval.ms = 600000 + offsets.retention.minutes = 10080 + offsets.topic.compression.codec = 0 + offsets.topic.num.partitions = 50 + offsets.topic.replication.factor = 1 + offsets.topic.segment.bytes = 104857600 + password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding + password.encoder.iterations = 4096 + password.encoder.key.length = 128 + password.encoder.keyfactory.algorithm = null + password.encoder.old.secret = null + password.encoder.secret = null + port = 9092 + principal.builder.class = null + process.roles = [] + producer.purgatory.purge.interval.requests = 1000 + queued.max.request.bytes = -1 + queued.max.requests = 500 + quota.consumer.default = 9223372036854775807 + quota.producer.default = 9223372036854775807 + quota.window.num = 11 + quota.window.size.seconds = 1 + replica.fetch.backoff.ms = 1000 + replica.fetch.max.bytes = 1048576 + replica.fetch.min.bytes = 1 + replica.fetch.response.max.bytes = 10485760 + replica.fetch.wait.max.ms = 500 + replica.high.watermark.checkpoint.interval.ms = 5000 + replica.lag.time.max.ms = 30000 + replica.selector.class = null + replica.socket.receive.buffer.bytes = 65536 + replica.socket.timeout.ms = 30000 + replication.quota.window.num = 11 + replication.quota.window.size.seconds = 1 + request.timeout.ms = 30000 + reserved.broker.max.id = 1000 + sasl.client.callback.handler.class = null + sasl.enabled.mechanisms = [GSSAPI] + sasl.jaas.config = null + sasl.kerberos.kinit.cmd = /usr/bin/kinit + sasl.kerberos.min.time.before.relogin = 60000 + sasl.kerberos.principal.to.local.rules = [DEFAULT] + sasl.kerberos.service.name = null + sasl.kerberos.ticket.renew.jitter = 0.05 + sasl.kerberos.ticket.renew.window.factor = 0.8 + sasl.login.callback.handler.class = null + sasl.login.class = null + sasl.login.refresh.buffer.seconds = 300 + sasl.login.refresh.min.period.seconds = 60 + sasl.login.refresh.window.factor = 0.8 + sasl.login.refresh.window.jitter = 0.05 + sasl.mechanism.controller.protocol = GSSAPI + sasl.mechanism.inter.broker.protocol = GSSAPI + sasl.server.callback.handler.class = null + security.inter.broker.protocol = PLAINTEXT + security.providers = null + socket.connection.setup.timeout.max.ms = 30000 + socket.connection.setup.timeout.ms = 10000 + socket.receive.buffer.bytes = 102400 + socket.request.max.bytes = 104857600 + socket.send.buffer.bytes = 102400 + ssl.cipher.suites = [] + ssl.client.auth = none + ssl.enabled.protocols = [TLSv1.2, TLSv1.3] + ssl.endpoint.identification.algorithm = https + ssl.engine.factory.class = null + ssl.key.password = null + ssl.keymanager.algorithm = SunX509 + ssl.keystore.certificate.chain = null + ssl.keystore.key = null + ssl.keystore.location = null + ssl.keystore.password = null + ssl.keystore.type = JKS + ssl.principal.mapping.rules = DEFAULT + ssl.protocol = TLSv1.3 + ssl.provider = null + ssl.secure.random.implementation = null + ssl.trustmanager.algorithm = PKIX + ssl.truststore.certificates = null + ssl.truststore.location = null + ssl.truststore.password = null + ssl.truststore.type = JKS + transaction.abort.timed.out.transaction.cleanup.interval.ms = 10000 + transaction.max.timeout.ms = 900000 + transaction.remove.expired.transaction.cleanup.interval.ms = 3600000 + transaction.state.log.load.buffer.size = 5242880 + transaction.state.log.min.isr = 1 + transaction.state.log.num.partitions = 50 + transaction.state.log.replication.factor = 1 + transaction.state.log.segment.bytes = 104857600 + transactional.id.expiration.ms = 604800000 + unclean.leader.election.enable = false + zookeeper.clientCnxnSocket = null + zookeeper.connect = localhost:2181 + zookeeper.connection.timeout.ms = 18000 + zookeeper.max.in.flight.requests = 10 + zookeeper.session.timeout.ms = 18000 + zookeeper.set.acl = false + zookeeper.ssl.cipher.suites = null + zookeeper.ssl.client.enable = false + zookeeper.ssl.crl.enable = false + zookeeper.ssl.enabled.protocols = null + zookeeper.ssl.endpoint.identification.algorithm = HTTPS + zookeeper.ssl.keystore.location = null + zookeeper.ssl.keystore.password = null + zookeeper.ssl.keystore.type = null + zookeeper.ssl.ocsp.enable = false + zookeeper.ssl.protocol = TLSv1.2 + zookeeper.ssl.truststore.location = null + zookeeper.ssl.truststore.password = null + zookeeper.ssl.truststore.type = null + zookeeper.sync.time.ms = 2000 + (kafka.server.KafkaConfig) +[2021-05-10 04:16:54,217] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:16:54,226] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:16:54,234] INFO [ThrottledChannelReaper-ControllerMutation]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:16:54,232] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:16:54,292] INFO Log directory /tmp/kafka-logs not found, creating it. (kafka.log.LogManager) +[2021-05-10 04:16:54,335] INFO Loading logs from log dirs ArraySeq(/tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:16:54,338] INFO Attempting recovery for all logs in /tmp/kafka-logs since no clean shutdown file was found (kafka.log.LogManager) +[2021-05-10 04:16:54,342] INFO Loaded 0 logs in 6ms. (kafka.log.LogManager) +[2021-05-10 04:16:54,342] INFO Starting log cleanup with a period of 300000 ms. (kafka.log.LogManager) +[2021-05-10 04:16:54,356] INFO Starting log flusher with a default period of 9223372036854775807 ms. (kafka.log.LogManager) +[2021-05-10 04:16:54,757] INFO Updated connection-accept-rate max connection creation rate to 2147483647 (kafka.network.ConnectionQuotas) +[2021-05-10 04:16:54,761] INFO Awaiting socket connections on 0.0.0.0:9092. (kafka.network.Acceptor) +[2021-05-10 04:16:54,827] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Created data-plane acceptor and processors for endpoint : ListenerName(PLAINTEXT) (kafka.network.SocketServer) +[2021-05-10 04:16:54,879] INFO [broker-0-to-controller-send-thread]: Starting (kafka.server.BrokerToControllerRequestThread) +[2021-05-10 04:16:54,899] INFO [ExpirationReaper-0-Produce]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:16:54,924] INFO [ExpirationReaper-0-Fetch]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:16:54,924] INFO [ExpirationReaper-0-DeleteRecords]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:16:54,934] INFO [ExpirationReaper-0-ElectLeader]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:16:54,998] INFO [LogDirFailureHandler]: Starting (kafka.server.ReplicaManager$LogDirFailureHandler) +[2021-05-10 04:16:55,059] INFO Creating /brokers/ids/0 (is it secure? false) (kafka.zk.KafkaZkClient) +[2021-05-10 04:16:55,119] INFO Stat of the created znode at /brokers/ids/0 is: 25,25,1620634615100,1620634615100,1,0,0,72059183683534848,198,0,25 + (kafka.zk.KafkaZkClient) +[2021-05-10 04:16:55,120] INFO Registered broker 0 at path /brokers/ids/0 with addresses: PLAINTEXT://osboxes:9092, czxid (broker epoch): 25 (kafka.zk.KafkaZkClient) +[2021-05-10 04:16:55,277] INFO [ExpirationReaper-0-topic]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:16:55,287] INFO [ExpirationReaper-0-Heartbeat]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:16:55,299] INFO Successfully created /controller_epoch with initial epoch 0 (kafka.zk.KafkaZkClient) +[2021-05-10 04:16:55,319] INFO [ExpirationReaper-0-Rebalance]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:16:55,332] INFO [GroupCoordinator 0]: Starting up. (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:16:55,337] INFO Feature ZK node created at path: /feature (kafka.server.FinalizedFeatureChangeListener) +[2021-05-10 04:16:55,342] INFO [GroupCoordinator 0]: Startup complete. (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:16:55,431] INFO Updated cache from existing to latest FinalizedFeaturesAndEpoch(features=Features{}, epoch=0). (kafka.server.FinalizedFeatureCache) +[2021-05-10 04:16:55,439] INFO [ProducerId Manager 0]: Acquired new producerId block (brokerId:0,blockStartProducerId:0,blockEndProducerId:999) by writing to Zk with path version 1 (kafka.coordinator.transaction.ProducerIdManager) +[2021-05-10 04:16:55,440] INFO [TransactionCoordinator id=0] Starting up. (kafka.coordinator.transaction.TransactionCoordinator) +[2021-05-10 04:16:55,451] INFO [TransactionCoordinator id=0] Startup complete. (kafka.coordinator.transaction.TransactionCoordinator) +[2021-05-10 04:16:55,464] INFO [Transaction Marker Channel Manager 0]: Starting (kafka.coordinator.transaction.TransactionMarkerChannelManager) +[2021-05-10 04:16:55,538] INFO [ExpirationReaper-0-AlterAcls]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:16:55,610] INFO [/config/changes-event-process-thread]: Starting (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread) +[2021-05-10 04:16:55,671] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Starting socket server acceptors and processors (kafka.network.SocketServer) +[2021-05-10 04:16:55,787] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Started data-plane acceptor and processor(s) for endpoint : ListenerName(PLAINTEXT) (kafka.network.SocketServer) +[2021-05-10 04:16:55,787] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Started socket server acceptors and processors (kafka.network.SocketServer) +[2021-05-10 04:16:55,802] INFO Kafka version: 2.8.0 (org.apache.kafka.common.utils.AppInfoParser) +[2021-05-10 04:16:55,802] INFO Kafka commitId: ebb1d6e21cc92130 (org.apache.kafka.common.utils.AppInfoParser) +[2021-05-10 04:16:55,803] INFO Kafka startTimeMs: 1620634615789 (org.apache.kafka.common.utils.AppInfoParser) +[2021-05-10 04:16:55,803] INFO [KafkaServer id=0] started (kafka.server.KafkaServer) +[2021-05-10 04:16:55,938] INFO [broker-0-to-controller-send-thread]: Recorded new controller, from now on will use broker osboxes:9092 (id: 0 rack: null) (kafka.server.BrokerToControllerRequestThread) +[2021-05-10 04:18:51,223] INFO Creating topic my-topic with configuration {} and initial partition assignment HashMap(0 -> ArrayBuffer(0)) (kafka.zk.AdminZkClient) +[2021-05-10 04:18:51,345] INFO [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(my-topic-0) (kafka.server.ReplicaFetcherManager) +[2021-05-10 04:18:51,412] INFO [Log partition=my-topic-0, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:18:51,414] INFO Created log for partition my-topic-0 in /tmp/kafka-logs/my-topic-0 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:18:51,415] INFO [Partition my-topic-0 broker=0] No checkpointed highwatermark is found for partition my-topic-0 (kafka.cluster.Partition) +[2021-05-10 04:18:51,416] INFO [Partition my-topic-0 broker=0] Log loaded for partition my-topic-0 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:22:17,976] INFO Creating topic json-topic with configuration {} and initial partition assignment HashMap(0 -> ArrayBuffer(0)) (kafka.zk.AdminZkClient) +[2021-05-10 04:22:18,029] INFO [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(json-topic-0) (kafka.server.ReplicaFetcherManager) +[2021-05-10 04:22:18,032] INFO [Log partition=json-topic-0, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:22:18,032] INFO Created log for partition json-topic-0 in /tmp/kafka-logs/json-topic-0 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:22:18,033] INFO [Partition json-topic-0 broker=0] No checkpointed highwatermark is found for partition json-topic-0 (kafka.cluster.Partition) +[2021-05-10 04:22:18,036] INFO [Partition json-topic-0 broker=0] Log loaded for partition json-topic-0 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:57,448] INFO Creating topic __consumer_offsets with configuration {compression.type=producer, cleanup.policy=compact, segment.bytes=104857600} and initial partition assignment HashMap(0 -> ArrayBuffer(0), 1 -> ArrayBuffer(0), 2 -> ArrayBuffer(0), 3 -> ArrayBuffer(0), 4 -> ArrayBuffer(0), 5 -> ArrayBuffer(0), 6 -> ArrayBuffer(0), 7 -> ArrayBuffer(0), 8 -> ArrayBuffer(0), 9 -> ArrayBuffer(0), 10 -> ArrayBuffer(0), 11 -> ArrayBuffer(0), 12 -> ArrayBuffer(0), 13 -> ArrayBuffer(0), 14 -> ArrayBuffer(0), 15 -> ArrayBuffer(0), 16 -> ArrayBuffer(0), 17 -> ArrayBuffer(0), 18 -> ArrayBuffer(0), 19 -> ArrayBuffer(0), 20 -> ArrayBuffer(0), 21 -> ArrayBuffer(0), 22 -> ArrayBuffer(0), 23 -> ArrayBuffer(0), 24 -> ArrayBuffer(0), 25 -> ArrayBuffer(0), 26 -> ArrayBuffer(0), 27 -> ArrayBuffer(0), 28 -> ArrayBuffer(0), 29 -> ArrayBuffer(0), 30 -> ArrayBuffer(0), 31 -> ArrayBuffer(0), 32 -> ArrayBuffer(0), 33 -> ArrayBuffer(0), 34 -> ArrayBuffer(0), 35 -> ArrayBuffer(0), 36 -> ArrayBuffer(0), 37 -> ArrayBuffer(0), 38 -> ArrayBuffer(0), 39 -> ArrayBuffer(0), 40 -> ArrayBuffer(0), 41 -> ArrayBuffer(0), 42 -> ArrayBuffer(0), 43 -> ArrayBuffer(0), 44 -> ArrayBuffer(0), 45 -> ArrayBuffer(0), 46 -> ArrayBuffer(0), 47 -> ArrayBuffer(0), 48 -> ArrayBuffer(0), 49 -> ArrayBuffer(0)) (kafka.zk.AdminZkClient) +[2021-05-10 04:28:57,622] INFO [ReplicaFetcherManager on broker 0] Removed fetcher for partitions HashSet(__consumer_offsets-22, __consumer_offsets-30, __consumer_offsets-25, __consumer_offsets-35, __consumer_offsets-37, __consumer_offsets-38, __consumer_offsets-13, __consumer_offsets-8, __consumer_offsets-21, __consumer_offsets-4, __consumer_offsets-27, __consumer_offsets-7, __consumer_offsets-9, __consumer_offsets-46, __consumer_offsets-41, __consumer_offsets-33, __consumer_offsets-23, __consumer_offsets-49, __consumer_offsets-47, __consumer_offsets-16, __consumer_offsets-28, __consumer_offsets-31, __consumer_offsets-36, __consumer_offsets-42, __consumer_offsets-3, __consumer_offsets-18, __consumer_offsets-15, __consumer_offsets-24, __consumer_offsets-17, __consumer_offsets-48, __consumer_offsets-19, __consumer_offsets-11, __consumer_offsets-2, __consumer_offsets-43, __consumer_offsets-6, __consumer_offsets-14, __consumer_offsets-20, __consumer_offsets-0, __consumer_offsets-44, __consumer_offsets-39, __consumer_offsets-12, __consumer_offsets-45, __consumer_offsets-1, __consumer_offsets-5, __consumer_offsets-26, __consumer_offsets-29, __consumer_offsets-34, __consumer_offsets-10, __consumer_offsets-32, __consumer_offsets-40) (kafka.server.ReplicaFetcherManager) +[2021-05-10 04:28:57,627] INFO [Log partition=__consumer_offsets-3, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:57,628] INFO Created log for partition __consumer_offsets-3 in /tmp/kafka-logs/__consumer_offsets-3 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:57,630] INFO [Partition __consumer_offsets-3 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-3 (kafka.cluster.Partition) +[2021-05-10 04:28:57,630] INFO [Partition __consumer_offsets-3 broker=0] Log loaded for partition __consumer_offsets-3 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:57,651] INFO [Log partition=__consumer_offsets-18, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:57,651] INFO Created log for partition __consumer_offsets-18 in /tmp/kafka-logs/__consumer_offsets-18 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:57,655] INFO [Partition __consumer_offsets-18 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-18 (kafka.cluster.Partition) +[2021-05-10 04:28:57,656] INFO [Partition __consumer_offsets-18 broker=0] Log loaded for partition __consumer_offsets-18 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:57,667] INFO [Log partition=__consumer_offsets-41, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:57,667] INFO Created log for partition __consumer_offsets-41 in /tmp/kafka-logs/__consumer_offsets-41 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:57,667] INFO [Partition __consumer_offsets-41 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-41 (kafka.cluster.Partition) +[2021-05-10 04:28:57,667] INFO [Partition __consumer_offsets-41 broker=0] Log loaded for partition __consumer_offsets-41 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:57,673] INFO [Log partition=__consumer_offsets-10, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:57,674] INFO Created log for partition __consumer_offsets-10 in /tmp/kafka-logs/__consumer_offsets-10 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:57,674] INFO [Partition __consumer_offsets-10 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-10 (kafka.cluster.Partition) +[2021-05-10 04:28:57,674] INFO [Partition __consumer_offsets-10 broker=0] Log loaded for partition __consumer_offsets-10 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:57,679] INFO [Log partition=__consumer_offsets-33, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:57,680] INFO Created log for partition __consumer_offsets-33 in /tmp/kafka-logs/__consumer_offsets-33 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:57,681] INFO [Partition __consumer_offsets-33 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-33 (kafka.cluster.Partition) +[2021-05-10 04:28:57,681] INFO [Partition __consumer_offsets-33 broker=0] Log loaded for partition __consumer_offsets-33 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:57,702] INFO [Log partition=__consumer_offsets-48, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:57,708] INFO Created log for partition __consumer_offsets-48 in /tmp/kafka-logs/__consumer_offsets-48 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:57,709] INFO [Partition __consumer_offsets-48 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-48 (kafka.cluster.Partition) +[2021-05-10 04:28:57,709] INFO [Partition __consumer_offsets-48 broker=0] Log loaded for partition __consumer_offsets-48 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:57,720] INFO [Log partition=__consumer_offsets-19, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:57,721] INFO Created log for partition __consumer_offsets-19 in /tmp/kafka-logs/__consumer_offsets-19 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:57,721] INFO [Partition __consumer_offsets-19 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-19 (kafka.cluster.Partition) +[2021-05-10 04:28:57,721] INFO [Partition __consumer_offsets-19 broker=0] Log loaded for partition __consumer_offsets-19 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:57,734] INFO [Log partition=__consumer_offsets-34, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:57,735] INFO Created log for partition __consumer_offsets-34 in /tmp/kafka-logs/__consumer_offsets-34 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:57,736] INFO [Partition __consumer_offsets-34 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-34 (kafka.cluster.Partition) +[2021-05-10 04:28:57,736] INFO [Partition __consumer_offsets-34 broker=0] Log loaded for partition __consumer_offsets-34 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:57,748] INFO [Log partition=__consumer_offsets-4, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:57,749] INFO Created log for partition __consumer_offsets-4 in /tmp/kafka-logs/__consumer_offsets-4 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:57,752] INFO [Partition __consumer_offsets-4 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-4 (kafka.cluster.Partition) +[2021-05-10 04:28:57,752] INFO [Partition __consumer_offsets-4 broker=0] Log loaded for partition __consumer_offsets-4 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:57,760] INFO [Log partition=__consumer_offsets-11, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:57,760] INFO Created log for partition __consumer_offsets-11 in /tmp/kafka-logs/__consumer_offsets-11 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:57,760] INFO [Partition __consumer_offsets-11 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-11 (kafka.cluster.Partition) +[2021-05-10 04:28:57,760] INFO [Partition __consumer_offsets-11 broker=0] Log loaded for partition __consumer_offsets-11 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:57,778] INFO [Log partition=__consumer_offsets-26, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:57,781] INFO Created log for partition __consumer_offsets-26 in /tmp/kafka-logs/__consumer_offsets-26 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:57,781] INFO [Partition __consumer_offsets-26 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-26 (kafka.cluster.Partition) +[2021-05-10 04:28:57,781] INFO [Partition __consumer_offsets-26 broker=0] Log loaded for partition __consumer_offsets-26 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:57,792] INFO [Log partition=__consumer_offsets-49, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:57,794] INFO Created log for partition __consumer_offsets-49 in /tmp/kafka-logs/__consumer_offsets-49 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:57,795] INFO [Partition __consumer_offsets-49 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-49 (kafka.cluster.Partition) +[2021-05-10 04:28:57,795] INFO [Partition __consumer_offsets-49 broker=0] Log loaded for partition __consumer_offsets-49 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:57,809] INFO [Log partition=__consumer_offsets-39, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:57,815] INFO Created log for partition __consumer_offsets-39 in /tmp/kafka-logs/__consumer_offsets-39 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:57,815] INFO [Partition __consumer_offsets-39 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-39 (kafka.cluster.Partition) +[2021-05-10 04:28:57,815] INFO [Partition __consumer_offsets-39 broker=0] Log loaded for partition __consumer_offsets-39 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:57,822] INFO [Log partition=__consumer_offsets-9, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:57,824] INFO Created log for partition __consumer_offsets-9 in /tmp/kafka-logs/__consumer_offsets-9 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:57,824] INFO [Partition __consumer_offsets-9 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-9 (kafka.cluster.Partition) +[2021-05-10 04:28:57,825] INFO [Partition __consumer_offsets-9 broker=0] Log loaded for partition __consumer_offsets-9 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:57,835] INFO [Log partition=__consumer_offsets-24, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:57,836] INFO Created log for partition __consumer_offsets-24 in /tmp/kafka-logs/__consumer_offsets-24 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:57,836] INFO [Partition __consumer_offsets-24 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-24 (kafka.cluster.Partition) +[2021-05-10 04:28:57,836] INFO [Partition __consumer_offsets-24 broker=0] Log loaded for partition __consumer_offsets-24 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:57,845] INFO [Log partition=__consumer_offsets-31, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:57,848] INFO Created log for partition __consumer_offsets-31 in /tmp/kafka-logs/__consumer_offsets-31 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:57,848] INFO [Partition __consumer_offsets-31 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-31 (kafka.cluster.Partition) +[2021-05-10 04:28:57,848] INFO [Partition __consumer_offsets-31 broker=0] Log loaded for partition __consumer_offsets-31 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:57,853] INFO [Log partition=__consumer_offsets-46, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:57,854] INFO Created log for partition __consumer_offsets-46 in /tmp/kafka-logs/__consumer_offsets-46 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:57,854] INFO [Partition __consumer_offsets-46 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-46 (kafka.cluster.Partition) +[2021-05-10 04:28:57,855] INFO [Partition __consumer_offsets-46 broker=0] Log loaded for partition __consumer_offsets-46 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:57,871] INFO [Log partition=__consumer_offsets-1, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:57,875] INFO Created log for partition __consumer_offsets-1 in /tmp/kafka-logs/__consumer_offsets-1 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:57,878] INFO [Partition __consumer_offsets-1 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-1 (kafka.cluster.Partition) +[2021-05-10 04:28:57,879] INFO [Partition __consumer_offsets-1 broker=0] Log loaded for partition __consumer_offsets-1 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:57,888] INFO [Log partition=__consumer_offsets-16, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:57,889] INFO Created log for partition __consumer_offsets-16 in /tmp/kafka-logs/__consumer_offsets-16 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:57,890] INFO [Partition __consumer_offsets-16 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-16 (kafka.cluster.Partition) +[2021-05-10 04:28:57,890] INFO [Partition __consumer_offsets-16 broker=0] Log loaded for partition __consumer_offsets-16 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:57,898] INFO [Log partition=__consumer_offsets-2, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:57,902] INFO Created log for partition __consumer_offsets-2 in /tmp/kafka-logs/__consumer_offsets-2 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:57,902] INFO [Partition __consumer_offsets-2 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-2 (kafka.cluster.Partition) +[2021-05-10 04:28:57,902] INFO [Partition __consumer_offsets-2 broker=0] Log loaded for partition __consumer_offsets-2 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:57,915] INFO [Log partition=__consumer_offsets-25, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:57,918] INFO Created log for partition __consumer_offsets-25 in /tmp/kafka-logs/__consumer_offsets-25 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:57,920] INFO [Partition __consumer_offsets-25 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-25 (kafka.cluster.Partition) +[2021-05-10 04:28:57,920] INFO [Partition __consumer_offsets-25 broker=0] Log loaded for partition __consumer_offsets-25 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:57,936] INFO [Log partition=__consumer_offsets-40, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:57,937] INFO Created log for partition __consumer_offsets-40 in /tmp/kafka-logs/__consumer_offsets-40 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:57,937] INFO [Partition __consumer_offsets-40 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-40 (kafka.cluster.Partition) +[2021-05-10 04:28:57,937] INFO [Partition __consumer_offsets-40 broker=0] Log loaded for partition __consumer_offsets-40 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:57,950] INFO [Log partition=__consumer_offsets-47, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:57,951] INFO Created log for partition __consumer_offsets-47 in /tmp/kafka-logs/__consumer_offsets-47 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:57,951] INFO [Partition __consumer_offsets-47 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-47 (kafka.cluster.Partition) +[2021-05-10 04:28:57,951] INFO [Partition __consumer_offsets-47 broker=0] Log loaded for partition __consumer_offsets-47 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:57,978] INFO [Log partition=__consumer_offsets-17, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:57,978] INFO Created log for partition __consumer_offsets-17 in /tmp/kafka-logs/__consumer_offsets-17 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:57,979] INFO [Partition __consumer_offsets-17 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-17 (kafka.cluster.Partition) +[2021-05-10 04:28:57,979] INFO [Partition __consumer_offsets-17 broker=0] Log loaded for partition __consumer_offsets-17 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:57,988] INFO [Log partition=__consumer_offsets-32, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:57,989] INFO Created log for partition __consumer_offsets-32 in /tmp/kafka-logs/__consumer_offsets-32 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:57,989] INFO [Partition __consumer_offsets-32 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-32 (kafka.cluster.Partition) +[2021-05-10 04:28:57,989] INFO [Partition __consumer_offsets-32 broker=0] Log loaded for partition __consumer_offsets-32 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:58,001] INFO [Log partition=__consumer_offsets-37, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:58,005] INFO Created log for partition __consumer_offsets-37 in /tmp/kafka-logs/__consumer_offsets-37 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:58,005] INFO [Partition __consumer_offsets-37 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-37 (kafka.cluster.Partition) +[2021-05-10 04:28:58,005] INFO [Partition __consumer_offsets-37 broker=0] Log loaded for partition __consumer_offsets-37 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:58,020] INFO [Log partition=__consumer_offsets-7, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:58,024] INFO Created log for partition __consumer_offsets-7 in /tmp/kafka-logs/__consumer_offsets-7 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:58,024] INFO [Partition __consumer_offsets-7 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-7 (kafka.cluster.Partition) +[2021-05-10 04:28:58,025] INFO [Partition __consumer_offsets-7 broker=0] Log loaded for partition __consumer_offsets-7 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:58,036] INFO [Log partition=__consumer_offsets-22, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:58,036] INFO Created log for partition __consumer_offsets-22 in /tmp/kafka-logs/__consumer_offsets-22 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:58,036] INFO [Partition __consumer_offsets-22 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-22 (kafka.cluster.Partition) +[2021-05-10 04:28:58,037] INFO [Partition __consumer_offsets-22 broker=0] Log loaded for partition __consumer_offsets-22 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:58,052] INFO [Log partition=__consumer_offsets-29, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:58,053] INFO Created log for partition __consumer_offsets-29 in /tmp/kafka-logs/__consumer_offsets-29 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:58,053] INFO [Partition __consumer_offsets-29 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-29 (kafka.cluster.Partition) +[2021-05-10 04:28:58,053] INFO [Partition __consumer_offsets-29 broker=0] Log loaded for partition __consumer_offsets-29 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:58,061] INFO [Log partition=__consumer_offsets-44, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:58,061] INFO Created log for partition __consumer_offsets-44 in /tmp/kafka-logs/__consumer_offsets-44 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:58,061] INFO [Partition __consumer_offsets-44 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-44 (kafka.cluster.Partition) +[2021-05-10 04:28:58,061] INFO [Partition __consumer_offsets-44 broker=0] Log loaded for partition __consumer_offsets-44 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:58,070] INFO [Log partition=__consumer_offsets-14, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:58,071] INFO Created log for partition __consumer_offsets-14 in /tmp/kafka-logs/__consumer_offsets-14 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:58,071] INFO [Partition __consumer_offsets-14 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-14 (kafka.cluster.Partition) +[2021-05-10 04:28:58,071] INFO [Partition __consumer_offsets-14 broker=0] Log loaded for partition __consumer_offsets-14 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:58,090] INFO [Log partition=__consumer_offsets-23, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:58,091] INFO Created log for partition __consumer_offsets-23 in /tmp/kafka-logs/__consumer_offsets-23 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:58,091] INFO [Partition __consumer_offsets-23 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-23 (kafka.cluster.Partition) +[2021-05-10 04:28:58,091] INFO [Partition __consumer_offsets-23 broker=0] Log loaded for partition __consumer_offsets-23 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:58,104] INFO [Log partition=__consumer_offsets-38, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:58,105] INFO Created log for partition __consumer_offsets-38 in /tmp/kafka-logs/__consumer_offsets-38 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:58,105] INFO [Partition __consumer_offsets-38 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-38 (kafka.cluster.Partition) +[2021-05-10 04:28:58,105] INFO [Partition __consumer_offsets-38 broker=0] Log loaded for partition __consumer_offsets-38 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:58,116] INFO [Log partition=__consumer_offsets-8, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:58,116] INFO Created log for partition __consumer_offsets-8 in /tmp/kafka-logs/__consumer_offsets-8 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:58,116] INFO [Partition __consumer_offsets-8 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-8 (kafka.cluster.Partition) +[2021-05-10 04:28:58,116] INFO [Partition __consumer_offsets-8 broker=0] Log loaded for partition __consumer_offsets-8 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:58,121] INFO [Log partition=__consumer_offsets-45, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:58,121] INFO Created log for partition __consumer_offsets-45 in /tmp/kafka-logs/__consumer_offsets-45 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:58,121] INFO [Partition __consumer_offsets-45 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-45 (kafka.cluster.Partition) +[2021-05-10 04:28:58,121] INFO [Partition __consumer_offsets-45 broker=0] Log loaded for partition __consumer_offsets-45 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:58,133] INFO [Log partition=__consumer_offsets-15, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:58,133] INFO Created log for partition __consumer_offsets-15 in /tmp/kafka-logs/__consumer_offsets-15 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:58,133] INFO [Partition __consumer_offsets-15 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-15 (kafka.cluster.Partition) +[2021-05-10 04:28:58,133] INFO [Partition __consumer_offsets-15 broker=0] Log loaded for partition __consumer_offsets-15 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:58,151] INFO [Log partition=__consumer_offsets-30, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:58,151] INFO Created log for partition __consumer_offsets-30 in /tmp/kafka-logs/__consumer_offsets-30 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:58,151] INFO [Partition __consumer_offsets-30 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-30 (kafka.cluster.Partition) +[2021-05-10 04:28:58,151] INFO [Partition __consumer_offsets-30 broker=0] Log loaded for partition __consumer_offsets-30 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:58,165] INFO [Log partition=__consumer_offsets-0, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:58,166] INFO Created log for partition __consumer_offsets-0 in /tmp/kafka-logs/__consumer_offsets-0 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:58,166] INFO [Partition __consumer_offsets-0 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-0 (kafka.cluster.Partition) +[2021-05-10 04:28:58,166] INFO [Partition __consumer_offsets-0 broker=0] Log loaded for partition __consumer_offsets-0 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:58,177] INFO [Log partition=__consumer_offsets-35, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:58,177] INFO Created log for partition __consumer_offsets-35 in /tmp/kafka-logs/__consumer_offsets-35 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:58,178] INFO [Partition __consumer_offsets-35 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-35 (kafka.cluster.Partition) +[2021-05-10 04:28:58,178] INFO [Partition __consumer_offsets-35 broker=0] Log loaded for partition __consumer_offsets-35 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:58,196] INFO [Log partition=__consumer_offsets-5, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:58,197] INFO Created log for partition __consumer_offsets-5 in /tmp/kafka-logs/__consumer_offsets-5 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:58,197] INFO [Partition __consumer_offsets-5 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-5 (kafka.cluster.Partition) +[2021-05-10 04:28:58,197] INFO [Partition __consumer_offsets-5 broker=0] Log loaded for partition __consumer_offsets-5 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:58,216] INFO [Log partition=__consumer_offsets-20, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:58,217] INFO Created log for partition __consumer_offsets-20 in /tmp/kafka-logs/__consumer_offsets-20 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:58,217] INFO [Partition __consumer_offsets-20 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-20 (kafka.cluster.Partition) +[2021-05-10 04:28:58,217] INFO [Partition __consumer_offsets-20 broker=0] Log loaded for partition __consumer_offsets-20 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:58,233] INFO [Log partition=__consumer_offsets-27, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:58,235] INFO Created log for partition __consumer_offsets-27 in /tmp/kafka-logs/__consumer_offsets-27 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:58,235] INFO [Partition __consumer_offsets-27 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-27 (kafka.cluster.Partition) +[2021-05-10 04:28:58,235] INFO [Partition __consumer_offsets-27 broker=0] Log loaded for partition __consumer_offsets-27 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:58,252] INFO [Log partition=__consumer_offsets-42, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:58,253] INFO Created log for partition __consumer_offsets-42 in /tmp/kafka-logs/__consumer_offsets-42 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:58,253] INFO [Partition __consumer_offsets-42 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-42 (kafka.cluster.Partition) +[2021-05-10 04:28:58,253] INFO [Partition __consumer_offsets-42 broker=0] Log loaded for partition __consumer_offsets-42 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:58,268] INFO [Log partition=__consumer_offsets-12, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:58,269] INFO Created log for partition __consumer_offsets-12 in /tmp/kafka-logs/__consumer_offsets-12 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:58,269] INFO [Partition __consumer_offsets-12 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-12 (kafka.cluster.Partition) +[2021-05-10 04:28:58,269] INFO [Partition __consumer_offsets-12 broker=0] Log loaded for partition __consumer_offsets-12 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:58,273] INFO [Log partition=__consumer_offsets-21, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:58,274] INFO Created log for partition __consumer_offsets-21 in /tmp/kafka-logs/__consumer_offsets-21 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:58,274] INFO [Partition __consumer_offsets-21 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-21 (kafka.cluster.Partition) +[2021-05-10 04:28:58,274] INFO [Partition __consumer_offsets-21 broker=0] Log loaded for partition __consumer_offsets-21 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:58,284] INFO [Log partition=__consumer_offsets-36, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:58,285] INFO Created log for partition __consumer_offsets-36 in /tmp/kafka-logs/__consumer_offsets-36 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:58,285] INFO [Partition __consumer_offsets-36 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-36 (kafka.cluster.Partition) +[2021-05-10 04:28:58,285] INFO [Partition __consumer_offsets-36 broker=0] Log loaded for partition __consumer_offsets-36 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:58,292] INFO [Log partition=__consumer_offsets-6, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:58,293] INFO Created log for partition __consumer_offsets-6 in /tmp/kafka-logs/__consumer_offsets-6 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:58,293] INFO [Partition __consumer_offsets-6 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-6 (kafka.cluster.Partition) +[2021-05-10 04:28:58,293] INFO [Partition __consumer_offsets-6 broker=0] Log loaded for partition __consumer_offsets-6 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:58,311] INFO [Log partition=__consumer_offsets-43, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:58,312] INFO Created log for partition __consumer_offsets-43 in /tmp/kafka-logs/__consumer_offsets-43 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:58,312] INFO [Partition __consumer_offsets-43 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-43 (kafka.cluster.Partition) +[2021-05-10 04:28:58,312] INFO [Partition __consumer_offsets-43 broker=0] Log loaded for partition __consumer_offsets-43 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:58,317] INFO [Log partition=__consumer_offsets-13, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:58,317] INFO Created log for partition __consumer_offsets-13 in /tmp/kafka-logs/__consumer_offsets-13 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:58,317] INFO [Partition __consumer_offsets-13 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-13 (kafka.cluster.Partition) +[2021-05-10 04:28:58,317] INFO [Partition __consumer_offsets-13 broker=0] Log loaded for partition __consumer_offsets-13 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:58,321] INFO [Log partition=__consumer_offsets-28, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:28:58,322] INFO Created log for partition __consumer_offsets-28 in /tmp/kafka-logs/__consumer_offsets-28 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 104857600, flush.messages -> 9223372036854775807, message.format.version -> 2.8-IV1, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) +[2021-05-10 04:28:58,322] INFO [Partition __consumer_offsets-28 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-28 (kafka.cluster.Partition) +[2021-05-10 04:28:58,322] INFO [Partition __consumer_offsets-28 broker=0] Log loaded for partition __consumer_offsets-28 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:28:58,325] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 3 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,331] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-3 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,333] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 18 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,333] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-18 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,333] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 41 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,333] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-41 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,333] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 10 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,333] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-10 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,333] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 33 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,333] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-33 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,333] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 48 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,333] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-48 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,333] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 19 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,333] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-19 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,333] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 34 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,333] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-34 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,333] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 4 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,333] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-4 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,333] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 11 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,333] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-11 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,333] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 26 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,333] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-26 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,333] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 49 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,333] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-49 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,333] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 39 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,334] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-39 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,334] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 9 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,334] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-9 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,334] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 24 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,334] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-24 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,334] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 31 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,334] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-31 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,334] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 46 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,334] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-46 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,334] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 1 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,334] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-1 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,334] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 16 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,334] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-16 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,334] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 2 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,334] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-2 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,334] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 25 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,334] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-25 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,334] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 40 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,334] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-40 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,334] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 47 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,334] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-47 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,334] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 17 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,334] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-17 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,334] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 32 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,334] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-32 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,334] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 37 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,334] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-37 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,334] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 7 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,334] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-7 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,334] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 22 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,334] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-22 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,335] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 29 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,335] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-29 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,335] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 44 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,335] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-44 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,335] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 14 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,335] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-14 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,335] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 23 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,335] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-23 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,335] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 38 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,335] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-38 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,335] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 8 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,335] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-8 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,335] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 45 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,335] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-45 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,335] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 15 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,335] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-15 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,335] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 30 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,335] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-30 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,335] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 0 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,335] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-0 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,335] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 35 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,335] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-35 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,335] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 5 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,335] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-5 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,335] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 20 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,335] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-20 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,335] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 27 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,335] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-27 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,335] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 42 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,335] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-42 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,335] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 12 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,342] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-12 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,342] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 21 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,342] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-21 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,342] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 36 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,342] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-36 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,343] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 6 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,343] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-6 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,343] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 43 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,343] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-43 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,343] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 13 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,343] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-13 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,343] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 28 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,343] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-28 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,338] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-3 in 7 milliseconds, of which 4 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,344] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-18 in 11 milliseconds, of which 10 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,344] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-41 in 11 milliseconds, of which 11 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,345] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-10 in 12 milliseconds, of which 11 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,345] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-33 in 12 milliseconds, of which 12 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,345] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-48 in 12 milliseconds, of which 12 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,345] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-19 in 12 milliseconds, of which 12 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,345] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-34 in 12 milliseconds, of which 12 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,345] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-4 in 12 milliseconds, of which 12 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,345] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-11 in 12 milliseconds, of which 12 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,345] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-26 in 12 milliseconds, of which 12 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,345] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-49 in 12 milliseconds, of which 12 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,345] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-39 in 11 milliseconds, of which 11 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,345] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-9 in 11 milliseconds, of which 11 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,346] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-24 in 12 milliseconds, of which 11 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,346] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-31 in 12 milliseconds, of which 12 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,346] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-46 in 12 milliseconds, of which 12 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,346] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-1 in 12 milliseconds, of which 12 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,346] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-16 in 12 milliseconds, of which 12 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,347] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-2 in 13 milliseconds, of which 12 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,347] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-25 in 13 milliseconds, of which 13 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,347] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-40 in 13 milliseconds, of which 13 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,347] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-47 in 13 milliseconds, of which 13 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,347] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-17 in 13 milliseconds, of which 13 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,347] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-32 in 13 milliseconds, of which 13 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,347] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-37 in 13 milliseconds, of which 13 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,347] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-7 in 13 milliseconds, of which 13 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,347] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-22 in 12 milliseconds, of which 12 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,347] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-29 in 12 milliseconds, of which 12 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,347] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-44 in 12 milliseconds, of which 12 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,347] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-14 in 12 milliseconds, of which 12 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,348] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-23 in 13 milliseconds, of which 12 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,348] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-38 in 13 milliseconds, of which 13 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,348] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-8 in 13 milliseconds, of which 13 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,348] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-45 in 13 milliseconds, of which 13 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,348] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-15 in 13 milliseconds, of which 13 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,348] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-30 in 13 milliseconds, of which 13 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,348] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-0 in 13 milliseconds, of which 13 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,348] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-35 in 13 milliseconds, of which 13 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,348] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-5 in 13 milliseconds, of which 13 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,348] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-20 in 13 milliseconds, of which 13 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,348] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-27 in 13 milliseconds, of which 13 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,348] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-42 in 13 milliseconds, of which 13 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,349] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-12 in 7 milliseconds, of which 6 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,349] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-21 in 7 milliseconds, of which 7 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,349] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-36 in 6 milliseconds, of which 6 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,349] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-6 in 6 milliseconds, of which 6 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,349] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-43 in 6 milliseconds, of which 6 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,349] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-13 in 6 milliseconds, of which 6 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,350] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-28 in 7 milliseconds, of which 7 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:28:58,597] INFO [GroupCoordinator 0]: Dynamic Member with unknown member id joins group my-group in Empty state. Created a new member id kafka-python-2.0.2-7a82d7a8-c66e-4838-bff3-5b37a5933dda for this member and add to the group. (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,605] INFO [GroupCoordinator 0]: Preparing to rebalance group my-group in state PreparingRebalance with old generation 0 (__consumer_offsets-12) (reason: Adding new member kafka-python-2.0.2-7a82d7a8-c66e-4838-bff3-5b37a5933dda with group instance id None) (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,615] INFO [GroupCoordinator 0]: Stabilized group my-group generation 1 (__consumer_offsets-12) with 1 members (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:28:58,633] INFO [GroupCoordinator 0]: Assignment received from leader for group my-group for generation 1. The group has 1 members, 0 of which are static. (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:30:35,874] INFO [GroupCoordinator 0]: Member kafka-python-2.0.2-7a82d7a8-c66e-4838-bff3-5b37a5933dda in group my-group has failed, removing it from the group (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:30:35,879] INFO [GroupCoordinator 0]: Preparing to rebalance group my-group in state PreparingRebalance with old generation 1 (__consumer_offsets-12) (reason: removing member kafka-python-2.0.2-7a82d7a8-c66e-4838-bff3-5b37a5933dda on heartbeat expiration) (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:30:35,882] INFO [GroupCoordinator 0]: Group my-group with generation 2 is now empty (__consumer_offsets-12) (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:34:56,057] INFO [GroupCoordinator 0]: Dynamic Member with unknown member id joins group my-group in Empty state. Created a new member id kafka-python-2.0.2-2e62a041-35a3-48bc-992e-cf873414c0ac for this member and add to the group. (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:34:56,059] INFO [GroupCoordinator 0]: Preparing to rebalance group my-group in state PreparingRebalance with old generation 2 (__consumer_offsets-12) (reason: Adding new member kafka-python-2.0.2-2e62a041-35a3-48bc-992e-cf873414c0ac with group instance id None) (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:34:56,062] INFO [GroupCoordinator 0]: Stabilized group my-group generation 3 (__consumer_offsets-12) with 1 members (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:34:56,075] INFO [GroupCoordinator 0]: Assignment received from leader for group my-group for generation 3. The group has 1 members, 0 of which are static. (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:46:46,129] INFO Unable to read additional data from server sessionid 0x10001721e430000, likely server has closed socket, closing socket connection and attempting reconnect (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:46:47,288] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:46:47,291] INFO [KafkaServer id=0] shutting down (kafka.server.KafkaServer) +[2021-05-10 04:46:47,292] INFO [KafkaServer id=0] Starting controlled shutdown (kafka.server.KafkaServer) +[2021-05-10 04:46:47,493] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:46:47,507] INFO Socket error occurred: localhost/127.0.0.1:2181: Connection refused (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:46:47,609] INFO [ZooKeeperClient Kafka server] Waiting until connected. (kafka.zookeeper.ZooKeeperClient) +[2021-05-10 04:46:49,240] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:46:49,240] INFO Socket error occurred: localhost/127.0.0.1:2181: Connection refused (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:46:49,527] INFO [GroupCoordinator 0]: Member kafka-python-2.0.2-2e62a041-35a3-48bc-992e-cf873414c0ac in group my-group has failed, removing it from the group (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:46:49,528] INFO [GroupCoordinator 0]: Preparing to rebalance group my-group in state PreparingRebalance with old generation 3 (__consumer_offsets-12) (reason: removing member kafka-python-2.0.2-2e62a041-35a3-48bc-992e-cf873414c0ac on heartbeat expiration) (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:46:49,528] INFO [GroupCoordinator 0]: Group my-group with generation 4 is now empty (__consumer_offsets-12) (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:46:50,500] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:46:50,501] INFO Socket error occurred: localhost/127.0.0.1:2181: Connection refused (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:46:52,291] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:46:52,292] INFO Socket error occurred: localhost/127.0.0.1:2181: Connection refused (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:46:53,823] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:46:53,824] INFO Socket error occurred: localhost/127.0.0.1:2181: Connection refused (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:46:55,089] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:46:55,090] INFO Socket error occurred: localhost/127.0.0.1:2181: Connection refused (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:46:56,197] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:46:56,540] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:46:56,541] INFO Socket error occurred: localhost/127.0.0.1:2181: Connection refused (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:46:58,563] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:46:58,564] INFO Socket error occurred: localhost/127.0.0.1:2181: Connection refused (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:00,656] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:00,657] INFO Socket error occurred: localhost/127.0.0.1:2181: Connection refused (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:01,868] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:01,870] INFO Socket error occurred: localhost/127.0.0.1:2181: Connection refused (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:03,790] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:03,791] INFO Socket error occurred: localhost/127.0.0.1:2181: Connection refused (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:05,702] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:05,702] INFO Socket error occurred: localhost/127.0.0.1:2181: Connection refused (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:07,475] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:07,476] INFO Socket error occurred: localhost/127.0.0.1:2181: Connection refused (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:08,947] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:08,947] INFO Socket error occurred: localhost/127.0.0.1:2181: Connection refused (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:10,554] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:10,554] INFO Socket error occurred: localhost/127.0.0.1:2181: Connection refused (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:12,324] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:12,325] INFO Socket error occurred: localhost/127.0.0.1:2181: Connection refused (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:13,566] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:13,566] INFO Socket error occurred: localhost/127.0.0.1:2181: Connection refused (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:15,147] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:15,147] INFO Socket error occurred: localhost/127.0.0.1:2181: Connection refused (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:16,453] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:16,579] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:16,579] INFO Socket error occurred: localhost/127.0.0.1:2181: Connection refused (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:16,854] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:17,214] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:17,718] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:17,752] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:17,783] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:17,814] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:17,845] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:17,877] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:17,910] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:17,942] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:17,979] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,014] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,047] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,078] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,110] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,142] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,177] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,206] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,238] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:18,239] INFO Socket error occurred: localhost/127.0.0.1:2181: Connection refused (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:18,240] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,273] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,303] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,337] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,369] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,401] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,432] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,462] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,493] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,522] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,555] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,585] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,618] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,649] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,683] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,716] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,747] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,779] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,811] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,844] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,873] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,905] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,932] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,964] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:18,995] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:19,032] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:19,063] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:19,097] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:19,127] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:19,155] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:19,188] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:19,219] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:19,253] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:19,283] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:19,314] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:19,347] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:20,298] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:20,299] INFO Socket error occurred: localhost/127.0.0.1:2181: Connection refused (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:21,822] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:21,824] INFO Socket error occurred: localhost/127.0.0.1:2181: Connection refused (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:23,061] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:23,062] INFO Socket error occurred: localhost/127.0.0.1:2181: Connection refused (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:24,064] INFO Reading configuration from: config/zookeeper.properties (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2021-05-10 04:47:24,065] WARN config/zookeeper.properties is relative. Prepend ./ to indicate that you're sure! (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2021-05-10 04:47:24,072] INFO clientPortAddress is 0.0.0.0:2181 (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2021-05-10 04:47:24,073] INFO secureClientPort is not set (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2021-05-10 04:47:24,074] INFO autopurge.snapRetainCount set to 3 (org.apache.zookeeper.server.DatadirCleanupManager) +[2021-05-10 04:47:24,074] INFO autopurge.purgeInterval set to 0 (org.apache.zookeeper.server.DatadirCleanupManager) +[2021-05-10 04:47:24,074] INFO Purge task is not scheduled. (org.apache.zookeeper.server.DatadirCleanupManager) +[2021-05-10 04:47:24,074] WARN Either no config or no quorum defined in config, running in standalone mode (org.apache.zookeeper.server.quorum.QuorumPeerMain) +[2021-05-10 04:47:24,077] INFO Log4j 1.2 jmx support found and enabled. (org.apache.zookeeper.jmx.ManagedUtil) +[2021-05-10 04:47:24,085] INFO Reading configuration from: config/zookeeper.properties (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2021-05-10 04:47:24,086] WARN config/zookeeper.properties is relative. Prepend ./ to indicate that you're sure! (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2021-05-10 04:47:24,086] INFO clientPortAddress is 0.0.0.0:2181 (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2021-05-10 04:47:24,086] INFO secureClientPort is not set (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2021-05-10 04:47:24,086] INFO Starting server (org.apache.zookeeper.server.ZooKeeperServerMain) +[2021-05-10 04:47:24,088] INFO zookeeper.snapshot.trust.empty : false (org.apache.zookeeper.server.persistence.FileTxnSnapLog) +[2021-05-10 04:47:24,095] INFO Server environment:zookeeper.version=3.5.9-83df9301aa5c2a5d284a9940177808c01bc35cef, built on 01/06/2021 20:03 GMT (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:24,095] INFO Server environment:host.name=osboxes (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:24,095] INFO Server environment:java.version=11.0.4 (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:24,095] INFO Server environment:java.vendor=Ubuntu (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:24,095] INFO Server environment:java.home=/usr/lib/jvm/java-11-openjdk-amd64 (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:24,095] INFO Server environment:java.class.path=/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/activation-1.1.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/aopalliance-repackaged-2.6.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/argparse4j-0.7.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/audience-annotations-0.5.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/commons-cli-1.4.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/commons-lang3-3.8.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-api-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-basic-auth-extension-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-file-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-json-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-mirror-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-mirror-client-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-runtime-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-transforms-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/hk2-api-2.6.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/hk2-locator-2.6.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/hk2-utils-2.6.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-annotations-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-core-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-databind-2.10.5.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-dataformat-csv-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-datatype-jdk8-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-jaxrs-base-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-jaxrs-json-provider-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-module-jaxb-annotations-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-module-paranamer-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-module-scala_2.13-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jakarta.activation-api-1.2.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jakarta.annotation-api-1.3.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jakarta.inject-2.6.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jakarta.validation-api-2.0.2.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jakarta.ws.rs-api-2.1.6.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jakarta.xml.bind-api-2.3.2.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/javassist-3.27.0-GA.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/javax.servlet-api-3.1.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/javax.ws.rs-api-2.1.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jaxb-api-2.3.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-client-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-common-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-container-servlet-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-container-servlet-core-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-hk2-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-media-jaxb-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-server-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-client-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-continuation-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-http-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-io-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-security-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-server-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-servlet-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-servlets-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-util-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-util-ajax-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jline-3.12.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jopt-simple-5.0.4.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka_2.13-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka_2.13-2.8.0-sources.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-clients-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-log4j-appender-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-metadata-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-raft-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-shell-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-streams-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-streams-examples-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-streams-scala_2.13-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-streams-test-utils-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-tools-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/log4j-1.2.17.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/lz4-java-1.7.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/maven-artifact-3.6.3.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/metrics-core-2.2.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-buffer-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-codec-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-common-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-handler-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-resolver-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-transport-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-transport-native-epoll-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-transport-native-unix-common-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/osgi-resource-locator-1.0.3.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/paranamer-2.8.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/plexus-utils-3.2.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/reflections-0.9.12.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/rocksdbjni-5.18.4.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/scala-collection-compat_2.13-2.3.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/scala-java8-compat_2.13-0.9.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/scala-library-2.13.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/scala-logging_2.13-3.9.2.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/scala-reflect-2.13.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/slf4j-api-1.7.30.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/slf4j-log4j12-1.7.30.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/snappy-java-1.1.8.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/zookeeper-3.5.9.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/zookeeper-jute-3.5.9.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/zstd-jni-1.4.9-1.jar (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:24,095] INFO Server environment:java.library.path=/usr/java/packages/lib:/usr/lib/x86_64-linux-gnu/jni:/lib/x86_64-linux-gnu:/usr/lib/x86_64-linux-gnu:/usr/lib/jni:/lib:/usr/lib (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:24,096] INFO Server environment:java.io.tmpdir=/tmp (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:24,096] INFO Server environment:java.compiler= (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:24,096] INFO Server environment:os.name=Linux (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:24,096] INFO Server environment:os.arch=amd64 (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:24,096] INFO Server environment:os.version=5.0.0-36-generic (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:24,096] INFO Server environment:user.name=root (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:24,096] INFO Server environment:user.home=/root (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:24,096] INFO Server environment:user.dir=/root/OFC_SC472/kafka/kafka_2.13-2.8.0 (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:24,096] INFO Server environment:os.memory.free=494MB (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:24,096] INFO Server environment:os.memory.max=512MB (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:24,096] INFO Server environment:os.memory.total=512MB (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:24,097] INFO minSessionTimeout set to 6000 (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:24,097] INFO maxSessionTimeout set to 60000 (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:24,097] INFO Created server with tickTime 3000 minSessionTimeout 6000 maxSessionTimeout 60000 datadir /tmp/zookeeper/version-2 snapdir /tmp/zookeeper/version-2 (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:24,102] INFO Using org.apache.zookeeper.server.NIOServerCnxnFactory as server connection factory (org.apache.zookeeper.server.ServerCnxnFactory) +[2021-05-10 04:47:24,104] INFO Configuring NIO connection handler with 10s sessionless connection timeout, 1 selector thread(s), 4 worker threads, and 64 kB direct buffers. (org.apache.zookeeper.server.NIOServerCnxnFactory) +[2021-05-10 04:47:24,107] INFO binding to port 0.0.0.0/0.0.0.0:2181 (org.apache.zookeeper.server.NIOServerCnxnFactory) +[2021-05-10 04:47:24,134] INFO zookeeper.snapshotSizeFactor = 0.33 (org.apache.zookeeper.server.ZKDatabase) +[2021-05-10 04:47:24,135] INFO Reading snapshot /tmp/zookeeper/version-2/snapshot.0 (org.apache.zookeeper.server.persistence.FileSnap) +[2021-05-10 04:47:24,210] INFO Snapshotting: 0x92 to /tmp/zookeeper/version-2/snapshot.92 (org.apache.zookeeper.server.persistence.FileTxnSnapLog) +[2021-05-10 04:47:24,245] INFO PrepRequestProcessor (sid:0) started, reconfigEnabled=false (org.apache.zookeeper.server.PrepRequestProcessor) +[2021-05-10 04:47:24,255] INFO Using checkIntervalMs=60000 maxPerMinute=10000 (org.apache.zookeeper.server.ContainerManager) +[2021-05-10 04:47:24,355] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:24,356] INFO Socket connection established, initiating session, client: /127.0.0.1:58264, server: localhost/127.0.0.1:2181 (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:24,367] INFO Session establishment complete on server localhost/127.0.0.1:2181, sessionid = 0x10001721e430000, negotiated timeout = 18000 (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:24,367] INFO [ZooKeeperClient Kafka server] Connected. (kafka.zookeeper.ZooKeeperClient) +[2021-05-10 04:47:24,440] INFO [KafkaServer id=0] Controlled shutdown succeeded (kafka.server.KafkaServer) +[2021-05-10 04:47:24,446] INFO [/config/changes-event-process-thread]: Shutting down (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread) +[2021-05-10 04:47:24,446] INFO [/config/changes-event-process-thread]: Stopped (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread) +[2021-05-10 04:47:24,447] INFO [/config/changes-event-process-thread]: Shutdown completed (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread) +[2021-05-10 04:47:24,447] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Stopping socket server request processors (kafka.network.SocketServer) +[2021-05-10 04:47:24,463] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Stopped socket server request processors (kafka.network.SocketServer) +[2021-05-10 04:47:24,466] INFO [data-plane Kafka Request Handler on Broker 0], shutting down (kafka.server.KafkaRequestHandlerPool) +[2021-05-10 04:47:24,477] INFO [data-plane Kafka Request Handler on Broker 0], shut down completely (kafka.server.KafkaRequestHandlerPool) +[2021-05-10 04:47:24,502] INFO [ExpirationReaper-0-AlterAcls]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:24,695] INFO [ExpirationReaper-0-AlterAcls]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:24,696] INFO [ExpirationReaper-0-AlterAcls]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:24,698] INFO [KafkaApi-0] Shutdown complete. (kafka.server.KafkaApis) +[2021-05-10 04:47:24,701] INFO [ExpirationReaper-0-topic]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:24,902] INFO [ExpirationReaper-0-topic]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:24,902] INFO [ExpirationReaper-0-topic]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:24,907] INFO [TransactionCoordinator id=0] Shutting down. (kafka.coordinator.transaction.TransactionCoordinator) +[2021-05-10 04:47:24,909] INFO [ProducerId Manager 0]: Shutdown complete: last producerId assigned 0 (kafka.coordinator.transaction.ProducerIdManager) +[2021-05-10 04:47:24,910] INFO [Transaction State Manager 0]: Shutdown complete (kafka.coordinator.transaction.TransactionStateManager) +[2021-05-10 04:47:24,910] INFO [Transaction Marker Channel Manager 0]: Shutting down (kafka.coordinator.transaction.TransactionMarkerChannelManager) +[2021-05-10 04:47:24,910] INFO [Transaction Marker Channel Manager 0]: Shutdown completed (kafka.coordinator.transaction.TransactionMarkerChannelManager) +[2021-05-10 04:47:24,910] INFO [Transaction Marker Channel Manager 0]: Stopped (kafka.coordinator.transaction.TransactionMarkerChannelManager) +[2021-05-10 04:47:24,913] INFO [TransactionCoordinator id=0] Shutdown complete. (kafka.coordinator.transaction.TransactionCoordinator) +[2021-05-10 04:47:24,914] INFO [GroupCoordinator 0]: Shutting down. (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:24,915] INFO [ExpirationReaper-0-Heartbeat]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:25,102] INFO [ExpirationReaper-0-Heartbeat]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:25,103] INFO [ExpirationReaper-0-Heartbeat]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:25,103] INFO [ExpirationReaper-0-Rebalance]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:25,303] INFO [ExpirationReaper-0-Rebalance]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:25,304] INFO [ExpirationReaper-0-Rebalance]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:25,305] INFO [GroupCoordinator 0]: Shutdown complete. (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:25,306] INFO [ReplicaManager broker=0] Shutting down (kafka.server.ReplicaManager) +[2021-05-10 04:47:25,306] INFO [LogDirFailureHandler]: Shutting down (kafka.server.ReplicaManager$LogDirFailureHandler) +[2021-05-10 04:47:25,307] INFO [LogDirFailureHandler]: Shutdown completed (kafka.server.ReplicaManager$LogDirFailureHandler) +[2021-05-10 04:47:25,307] INFO [LogDirFailureHandler]: Stopped (kafka.server.ReplicaManager$LogDirFailureHandler) +[2021-05-10 04:47:25,308] INFO [ReplicaFetcherManager on broker 0] shutting down (kafka.server.ReplicaFetcherManager) +[2021-05-10 04:47:25,320] INFO [ReplicaFetcherManager on broker 0] shutdown completed (kafka.server.ReplicaFetcherManager) +[2021-05-10 04:47:25,321] INFO [ReplicaAlterLogDirsManager on broker 0] shutting down (kafka.server.ReplicaAlterLogDirsManager) +[2021-05-10 04:47:25,322] INFO [ReplicaAlterLogDirsManager on broker 0] shutdown completed (kafka.server.ReplicaAlterLogDirsManager) +[2021-05-10 04:47:25,322] INFO [ExpirationReaper-0-Fetch]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:25,504] INFO [ExpirationReaper-0-Fetch]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:25,504] INFO [ExpirationReaper-0-Fetch]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:25,504] INFO [ExpirationReaper-0-Produce]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:25,706] INFO [ExpirationReaper-0-Produce]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:25,706] INFO [ExpirationReaper-0-Produce]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:25,706] INFO [ExpirationReaper-0-DeleteRecords]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:25,917] INFO [ExpirationReaper-0-DeleteRecords]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:25,917] INFO [ExpirationReaper-0-DeleteRecords]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:25,917] INFO [ExpirationReaper-0-ElectLeader]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:26,114] INFO [ExpirationReaper-0-ElectLeader]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:26,115] INFO [ExpirationReaper-0-ElectLeader]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:26,128] INFO [ReplicaManager broker=0] Shut down completely (kafka.server.ReplicaManager) +[2021-05-10 04:47:26,128] INFO [broker-0-to-controller-send-thread]: Shutting down (kafka.server.BrokerToControllerRequestThread) +[2021-05-10 04:47:26,128] INFO [broker-0-to-controller-send-thread]: Stopped (kafka.server.BrokerToControllerRequestThread) +[2021-05-10 04:47:26,129] INFO [broker-0-to-controller-send-thread]: Shutdown completed (kafka.server.BrokerToControllerRequestThread) +[2021-05-10 04:47:26,130] INFO Broker to controller channel manager for alterIsrChannel shutdown (kafka.server.BrokerToControllerChannelManagerImpl) +[2021-05-10 04:47:26,131] INFO Shutting down. (kafka.log.LogManager) +[2021-05-10 04:47:26,158] INFO [ProducerStateManager partition=json-topic-0] Writing producer snapshot at offset 1 (kafka.log.ProducerStateManager) +[2021-05-10 04:47:26,171] INFO [ProducerStateManager partition=my-topic-0] Writing producer snapshot at offset 20 (kafka.log.ProducerStateManager) +[2021-05-10 04:47:26,209] INFO [ProducerStateManager partition=__consumer_offsets-12] Writing producer snapshot at offset 161 (kafka.log.ProducerStateManager) +[2021-05-10 04:47:26,237] INFO Shutdown complete. (kafka.log.LogManager) +[2021-05-10 04:47:26,245] INFO [feature-zk-node-event-process-thread]: Shutting down (kafka.server.FinalizedFeatureChangeListener$ChangeNotificationProcessorThread) +[2021-05-10 04:47:26,246] INFO [feature-zk-node-event-process-thread]: Stopped (kafka.server.FinalizedFeatureChangeListener$ChangeNotificationProcessorThread) +[2021-05-10 04:47:26,246] INFO [feature-zk-node-event-process-thread]: Shutdown completed (kafka.server.FinalizedFeatureChangeListener$ChangeNotificationProcessorThread) +[2021-05-10 04:47:26,247] INFO [ZooKeeperClient Kafka server] Closing. (kafka.zookeeper.ZooKeeperClient) +[2021-05-10 04:47:26,248] INFO Creating new log file: log.93 (org.apache.zookeeper.server.persistence.FileTxnLog) +[2021-05-10 04:47:26,361] INFO Session: 0x10001721e430000 closed (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:47:26,361] INFO EventThread shut down for session: 0x10001721e430000 (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:26,363] INFO [ZooKeeperClient Kafka server] Closed. (kafka.zookeeper.ZooKeeperClient) +[2021-05-10 04:47:26,364] INFO [ThrottledChannelReaper-Fetch]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:47:26,908] INFO [ThrottledChannelReaper-Fetch]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:47:26,908] INFO [ThrottledChannelReaper-Fetch]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:47:26,908] INFO [ThrottledChannelReaper-Produce]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:47:27,291] INFO [ThrottledChannelReaper-Produce]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:47:27,291] INFO [ThrottledChannelReaper-Produce]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:47:27,292] INFO [ThrottledChannelReaper-Request]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:47:27,292] INFO [ThrottledChannelReaper-Request]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:47:27,292] INFO [ThrottledChannelReaper-Request]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:47:27,292] INFO [ThrottledChannelReaper-ControllerMutation]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:47:27,915] INFO [ThrottledChannelReaper-ControllerMutation]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:47:27,915] INFO [ThrottledChannelReaper-ControllerMutation]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:47:27,916] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Shutting down socket server (kafka.network.SocketServer) +[2021-05-10 04:47:27,961] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Shutdown completed (kafka.network.SocketServer) +[2021-05-10 04:47:27,962] INFO Metrics scheduler closed (org.apache.kafka.common.metrics.Metrics) +[2021-05-10 04:47:27,962] INFO Closing reporter org.apache.kafka.common.metrics.JmxReporter (org.apache.kafka.common.metrics.Metrics) +[2021-05-10 04:47:27,962] INFO Metrics reporters closed (org.apache.kafka.common.metrics.Metrics) +[2021-05-10 04:47:27,963] INFO Broker and topic stats closed (kafka.server.BrokerTopicStats) +[2021-05-10 04:47:27,967] INFO App info kafka.server for 0 unregistered (org.apache.kafka.common.utils.AppInfoParser) +[2021-05-10 04:47:27,967] INFO [KafkaServer id=0] shut down completed (kafka.server.KafkaServer) +[2021-05-10 04:47:34,568] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$) +[2021-05-10 04:47:34,864] INFO Setting -D jdk.tls.rejectClientInitiatedRenegotiation=true to disable client-initiated TLS renegotiation (org.apache.zookeeper.common.X509Util) +[2021-05-10 04:47:34,966] INFO Registered signal handlers for TERM, INT, HUP (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:47:34,969] INFO starting (kafka.server.KafkaServer) +[2021-05-10 04:47:34,969] INFO Connecting to zookeeper on localhost:2181 (kafka.server.KafkaServer) +[2021-05-10 04:47:34,991] INFO [ZooKeeperClient Kafka server] Initializing a new session to localhost:2181. (kafka.zookeeper.ZooKeeperClient) +[2021-05-10 04:47:35,006] INFO Client environment:zookeeper.version=3.5.9-83df9301aa5c2a5d284a9940177808c01bc35cef, built on 01/06/2021 20:03 GMT (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:47:35,006] INFO Client environment:host.name=osboxes (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:47:35,006] INFO Client environment:java.version=11.0.4 (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:47:35,006] INFO Client environment:java.vendor=Ubuntu (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:47:35,006] INFO Client environment:java.home=/usr/lib/jvm/java-11-openjdk-amd64 (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:47:35,006] INFO Client environment:java.class.path=/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/activation-1.1.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/aopalliance-repackaged-2.6.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/argparse4j-0.7.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/audience-annotations-0.5.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/commons-cli-1.4.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/commons-lang3-3.8.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-api-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-basic-auth-extension-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-file-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-json-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-mirror-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-mirror-client-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-runtime-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-transforms-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/hk2-api-2.6.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/hk2-locator-2.6.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/hk2-utils-2.6.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-annotations-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-core-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-databind-2.10.5.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-dataformat-csv-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-datatype-jdk8-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-jaxrs-base-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-jaxrs-json-provider-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-module-jaxb-annotations-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-module-paranamer-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-module-scala_2.13-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jakarta.activation-api-1.2.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jakarta.annotation-api-1.3.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jakarta.inject-2.6.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jakarta.validation-api-2.0.2.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jakarta.ws.rs-api-2.1.6.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jakarta.xml.bind-api-2.3.2.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/javassist-3.27.0-GA.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/javax.servlet-api-3.1.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/javax.ws.rs-api-2.1.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jaxb-api-2.3.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-client-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-common-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-container-servlet-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-container-servlet-core-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-hk2-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-media-jaxb-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-server-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-client-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-continuation-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-http-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-io-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-security-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-server-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-servlet-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-servlets-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-util-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-util-ajax-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jline-3.12.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jopt-simple-5.0.4.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka_2.13-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka_2.13-2.8.0-sources.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-clients-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-log4j-appender-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-metadata-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-raft-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-shell-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-streams-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-streams-examples-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-streams-scala_2.13-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-streams-test-utils-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-tools-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/log4j-1.2.17.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/lz4-java-1.7.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/maven-artifact-3.6.3.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/metrics-core-2.2.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-buffer-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-codec-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-common-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-handler-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-resolver-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-transport-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-transport-native-epoll-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-transport-native-unix-common-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/osgi-resource-locator-1.0.3.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/paranamer-2.8.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/plexus-utils-3.2.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/reflections-0.9.12.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/rocksdbjni-5.18.4.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/scala-collection-compat_2.13-2.3.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/scala-java8-compat_2.13-0.9.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/scala-library-2.13.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/scala-logging_2.13-3.9.2.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/scala-reflect-2.13.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/slf4j-api-1.7.30.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/slf4j-log4j12-1.7.30.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/snappy-java-1.1.8.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/zookeeper-3.5.9.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/zookeeper-jute-3.5.9.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/zstd-jni-1.4.9-1.jar (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:47:35,007] INFO Client environment:java.library.path=/usr/java/packages/lib:/usr/lib/x86_64-linux-gnu/jni:/lib/x86_64-linux-gnu:/usr/lib/x86_64-linux-gnu:/usr/lib/jni:/lib:/usr/lib (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:47:35,007] INFO Client environment:java.io.tmpdir=/tmp (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:47:35,007] INFO Client environment:java.compiler= (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:47:35,007] INFO Client environment:os.name=Linux (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:47:35,007] INFO Client environment:os.arch=amd64 (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:47:35,007] INFO Client environment:os.version=5.0.0-36-generic (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:47:35,007] INFO Client environment:user.name=root (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:47:35,007] INFO Client environment:user.home=/root (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:47:35,007] INFO Client environment:user.dir=/root/OFC_SC472/kafka/kafka_2.13-2.8.0 (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:47:35,007] INFO Client environment:os.memory.free=977MB (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:47:35,007] INFO Client environment:os.memory.max=1024MB (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:47:35,007] INFO Client environment:os.memory.total=1024MB (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:47:35,009] INFO Initiating client connection, connectString=localhost:2181 sessionTimeout=18000 watcher=kafka.zookeeper.ZooKeeperClient$ZooKeeperClientWatcher$@205d38da (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:47:35,017] INFO jute.maxbuffer value is 4194304 Bytes (org.apache.zookeeper.ClientCnxnSocket) +[2021-05-10 04:47:35,027] INFO zookeeper.request.timeout value is 0. feature enabled= (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:35,047] INFO [ZooKeeperClient Kafka server] Waiting until connected. (kafka.zookeeper.ZooKeeperClient) +[2021-05-10 04:47:35,061] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:35,075] INFO Socket error occurred: localhost/127.0.0.1:2181: Connection refused (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:36,181] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:36,182] INFO Socket error occurred: localhost/127.0.0.1:2181: Connection refused (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:37,285] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:37,286] INFO Socket error occurred: localhost/127.0.0.1:2181: Connection refused (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:38,387] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:38,388] INFO Socket error occurred: localhost/127.0.0.1:2181: Connection refused (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:38,674] INFO Reading configuration from: config/zookeeper.properties (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2021-05-10 04:47:38,675] WARN config/zookeeper.properties is relative. Prepend ./ to indicate that you're sure! (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2021-05-10 04:47:38,680] INFO clientPortAddress is 0.0.0.0:2181 (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2021-05-10 04:47:38,680] INFO secureClientPort is not set (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2021-05-10 04:47:38,681] INFO autopurge.snapRetainCount set to 3 (org.apache.zookeeper.server.DatadirCleanupManager) +[2021-05-10 04:47:38,681] INFO autopurge.purgeInterval set to 0 (org.apache.zookeeper.server.DatadirCleanupManager) +[2021-05-10 04:47:38,682] INFO Purge task is not scheduled. (org.apache.zookeeper.server.DatadirCleanupManager) +[2021-05-10 04:47:38,682] WARN Either no config or no quorum defined in config, running in standalone mode (org.apache.zookeeper.server.quorum.QuorumPeerMain) +[2021-05-10 04:47:38,684] INFO Log4j 1.2 jmx support found and enabled. (org.apache.zookeeper.jmx.ManagedUtil) +[2021-05-10 04:47:38,718] INFO Reading configuration from: config/zookeeper.properties (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2021-05-10 04:47:38,727] WARN config/zookeeper.properties is relative. Prepend ./ to indicate that you're sure! (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2021-05-10 04:47:38,728] INFO clientPortAddress is 0.0.0.0:2181 (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2021-05-10 04:47:38,734] INFO secureClientPort is not set (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2021-05-10 04:47:38,734] INFO Starting server (org.apache.zookeeper.server.ZooKeeperServerMain) +[2021-05-10 04:47:38,745] INFO zookeeper.snapshot.trust.empty : false (org.apache.zookeeper.server.persistence.FileTxnSnapLog) +[2021-05-10 04:47:38,774] INFO Server environment:zookeeper.version=3.5.9-83df9301aa5c2a5d284a9940177808c01bc35cef, built on 01/06/2021 20:03 GMT (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:38,775] INFO Server environment:host.name=osboxes (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:38,777] INFO Server environment:java.version=11.0.4 (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:38,777] INFO Server environment:java.vendor=Ubuntu (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:38,777] INFO Server environment:java.home=/usr/lib/jvm/java-11-openjdk-amd64 (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:38,777] INFO Server environment:java.class.path=/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/activation-1.1.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/aopalliance-repackaged-2.6.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/argparse4j-0.7.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/audience-annotations-0.5.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/commons-cli-1.4.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/commons-lang3-3.8.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-api-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-basic-auth-extension-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-file-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-json-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-mirror-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-mirror-client-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-runtime-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/connect-transforms-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/hk2-api-2.6.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/hk2-locator-2.6.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/hk2-utils-2.6.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-annotations-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-core-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-databind-2.10.5.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-dataformat-csv-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-datatype-jdk8-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-jaxrs-base-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-jaxrs-json-provider-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-module-jaxb-annotations-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-module-paranamer-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jackson-module-scala_2.13-2.10.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jakarta.activation-api-1.2.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jakarta.annotation-api-1.3.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jakarta.inject-2.6.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jakarta.validation-api-2.0.2.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jakarta.ws.rs-api-2.1.6.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jakarta.xml.bind-api-2.3.2.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/javassist-3.27.0-GA.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/javax.servlet-api-3.1.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/javax.ws.rs-api-2.1.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jaxb-api-2.3.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-client-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-common-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-container-servlet-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-container-servlet-core-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-hk2-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-media-jaxb-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jersey-server-2.31.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-client-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-continuation-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-http-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-io-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-security-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-server-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-servlet-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-servlets-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-util-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jetty-util-ajax-9.4.39.v20210325.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jline-3.12.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/jopt-simple-5.0.4.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka_2.13-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka_2.13-2.8.0-sources.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-clients-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-log4j-appender-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-metadata-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-raft-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-shell-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-streams-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-streams-examples-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-streams-scala_2.13-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-streams-test-utils-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/kafka-tools-2.8.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/log4j-1.2.17.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/lz4-java-1.7.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/maven-artifact-3.6.3.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/metrics-core-2.2.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-buffer-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-codec-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-common-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-handler-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-resolver-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-transport-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-transport-native-epoll-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/netty-transport-native-unix-common-4.1.62.Final.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/osgi-resource-locator-1.0.3.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/paranamer-2.8.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/plexus-utils-3.2.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/reflections-0.9.12.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/rocksdbjni-5.18.4.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/scala-collection-compat_2.13-2.3.0.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/scala-java8-compat_2.13-0.9.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/scala-library-2.13.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/scala-logging_2.13-3.9.2.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/scala-reflect-2.13.5.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/slf4j-api-1.7.30.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/slf4j-log4j12-1.7.30.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/snappy-java-1.1.8.1.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/zookeeper-3.5.9.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/zookeeper-jute-3.5.9.jar:/root/OFC_SC472/kafka/kafka_2.13-2.8.0/bin/../libs/zstd-jni-1.4.9-1.jar (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:38,777] INFO Server environment:java.library.path=/usr/java/packages/lib:/usr/lib/x86_64-linux-gnu/jni:/lib/x86_64-linux-gnu:/usr/lib/x86_64-linux-gnu:/usr/lib/jni:/lib:/usr/lib (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:38,777] INFO Server environment:java.io.tmpdir=/tmp (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:38,777] INFO Server environment:java.compiler= (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:38,777] INFO Server environment:os.name=Linux (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:38,777] INFO Server environment:os.arch=amd64 (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:38,778] INFO Server environment:os.version=5.0.0-36-generic (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:38,778] INFO Server environment:user.name=root (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:38,778] INFO Server environment:user.home=/root (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:38,778] INFO Server environment:user.dir=/root/OFC_SC472/kafka/kafka_2.13-2.8.0 (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:38,778] INFO Server environment:os.memory.free=494MB (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:38,778] INFO Server environment:os.memory.max=512MB (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:38,779] INFO Server environment:os.memory.total=512MB (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:38,780] INFO minSessionTimeout set to 6000 (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:38,783] INFO maxSessionTimeout set to 60000 (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:38,784] INFO Created server with tickTime 3000 minSessionTimeout 6000 maxSessionTimeout 60000 datadir /tmp/zookeeper/version-2 snapdir /tmp/zookeeper/version-2 (org.apache.zookeeper.server.ZooKeeperServer) +[2021-05-10 04:47:38,798] INFO Using org.apache.zookeeper.server.NIOServerCnxnFactory as server connection factory (org.apache.zookeeper.server.ServerCnxnFactory) +[2021-05-10 04:47:38,806] INFO Configuring NIO connection handler with 10s sessionless connection timeout, 1 selector thread(s), 4 worker threads, and 64 kB direct buffers. (org.apache.zookeeper.server.NIOServerCnxnFactory) +[2021-05-10 04:47:38,822] INFO binding to port 0.0.0.0/0.0.0.0:2181 (org.apache.zookeeper.server.NIOServerCnxnFactory) +[2021-05-10 04:47:38,851] INFO zookeeper.snapshotSizeFactor = 0.33 (org.apache.zookeeper.server.ZKDatabase) +[2021-05-10 04:47:38,852] INFO Reading snapshot /tmp/zookeeper/version-2/snapshot.92 (org.apache.zookeeper.server.persistence.FileSnap) +[2021-05-10 04:47:38,899] INFO Snapshotting: 0x93 to /tmp/zookeeper/version-2/snapshot.93 (org.apache.zookeeper.server.persistence.FileTxnSnapLog) +[2021-05-10 04:47:38,938] INFO Using checkIntervalMs=60000 maxPerMinute=10000 (org.apache.zookeeper.server.ContainerManager) +[2021-05-10 04:47:38,938] INFO PrepRequestProcessor (sid:0) started, reconfigEnabled=false (org.apache.zookeeper.server.PrepRequestProcessor) +[2021-05-10 04:47:39,489] INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:39,490] INFO Socket connection established, initiating session, client: /127.0.0.1:58276, server: localhost/127.0.0.1:2181 (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:39,503] INFO Creating new log file: log.94 (org.apache.zookeeper.server.persistence.FileTxnLog) +[2021-05-10 04:47:39,513] INFO Session establishment complete on server localhost/127.0.0.1:2181, sessionid = 0x100018ebd740000, negotiated timeout = 18000 (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:47:39,515] INFO [ZooKeeperClient Kafka server] Connected. (kafka.zookeeper.ZooKeeperClient) +[2021-05-10 04:47:39,656] INFO [feature-zk-node-event-process-thread]: Starting (kafka.server.FinalizedFeatureChangeListener$ChangeNotificationProcessorThread) +[2021-05-10 04:47:39,810] INFO Updated cache from existing to latest FinalizedFeaturesAndEpoch(features=Features{}, epoch=0). (kafka.server.FinalizedFeatureCache) +[2021-05-10 04:47:39,814] INFO Cluster ID = QF8w599ZSney2uTLCq4H8Q (kafka.server.KafkaServer) +[2021-05-10 04:47:39,879] INFO KafkaConfig values: + advertised.host.name = null + advertised.listeners = null + advertised.port = null + alter.config.policy.class.name = null + alter.log.dirs.replication.quota.window.num = 11 + alter.log.dirs.replication.quota.window.size.seconds = 1 + authorizer.class.name = + auto.create.topics.enable = true + auto.leader.rebalance.enable = true + background.threads = 10 + broker.heartbeat.interval.ms = 2000 + broker.id = 0 + broker.id.generation.enable = true + broker.rack = null + broker.session.timeout.ms = 9000 + client.quota.callback.class = null + compression.type = producer + connection.failed.authentication.delay.ms = 100 + connections.max.idle.ms = 600000 + connections.max.reauth.ms = 0 + control.plane.listener.name = null + controlled.shutdown.enable = true + controlled.shutdown.max.retries = 3 + controlled.shutdown.retry.backoff.ms = 5000 + controller.listener.names = null + controller.quorum.append.linger.ms = 25 + controller.quorum.election.backoff.max.ms = 1000 + controller.quorum.election.timeout.ms = 1000 + controller.quorum.fetch.timeout.ms = 2000 + controller.quorum.request.timeout.ms = 2000 + controller.quorum.retry.backoff.ms = 20 + controller.quorum.voters = [] + controller.quota.window.num = 11 + controller.quota.window.size.seconds = 1 + controller.socket.timeout.ms = 30000 + create.topic.policy.class.name = null + default.replication.factor = 1 + delegation.token.expiry.check.interval.ms = 3600000 + delegation.token.expiry.time.ms = 86400000 + delegation.token.master.key = null + delegation.token.max.lifetime.ms = 604800000 + delegation.token.secret.key = null + delete.records.purgatory.purge.interval.requests = 1 + delete.topic.enable = true + fetch.max.bytes = 57671680 + fetch.purgatory.purge.interval.requests = 1000 + group.initial.rebalance.delay.ms = 0 + group.max.session.timeout.ms = 1800000 + group.max.size = 2147483647 + group.min.session.timeout.ms = 6000 + host.name = + initial.broker.registration.timeout.ms = 60000 + inter.broker.listener.name = null + inter.broker.protocol.version = 2.8-IV1 + kafka.metrics.polling.interval.secs = 10 + kafka.metrics.reporters = [] + leader.imbalance.check.interval.seconds = 300 + leader.imbalance.per.broker.percentage = 10 + listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL + listeners = null + log.cleaner.backoff.ms = 15000 + log.cleaner.dedupe.buffer.size = 134217728 + log.cleaner.delete.retention.ms = 86400000 + log.cleaner.enable = true + log.cleaner.io.buffer.load.factor = 0.9 + log.cleaner.io.buffer.size = 524288 + log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308 + log.cleaner.max.compaction.lag.ms = 9223372036854775807 + log.cleaner.min.cleanable.ratio = 0.5 + log.cleaner.min.compaction.lag.ms = 0 + log.cleaner.threads = 1 + log.cleanup.policy = [delete] + log.dir = /tmp/kafka-logs + log.dirs = /tmp/kafka-logs + log.flush.interval.messages = 9223372036854775807 + log.flush.interval.ms = null + log.flush.offset.checkpoint.interval.ms = 60000 + log.flush.scheduler.interval.ms = 9223372036854775807 + log.flush.start.offset.checkpoint.interval.ms = 60000 + log.index.interval.bytes = 4096 + log.index.size.max.bytes = 10485760 + log.message.downconversion.enable = true + log.message.format.version = 2.8-IV1 + log.message.timestamp.difference.max.ms = 9223372036854775807 + log.message.timestamp.type = CreateTime + log.preallocate = false + log.retention.bytes = -1 + log.retention.check.interval.ms = 300000 + log.retention.hours = 168 + log.retention.minutes = null + log.retention.ms = null + log.roll.hours = 168 + log.roll.jitter.hours = 0 + log.roll.jitter.ms = null + log.roll.ms = null + log.segment.bytes = 1073741824 + log.segment.delete.delay.ms = 60000 + max.connection.creation.rate = 2147483647 + max.connections = 2147483647 + max.connections.per.ip = 2147483647 + max.connections.per.ip.overrides = + max.incremental.fetch.session.cache.slots = 1000 + message.max.bytes = 1048588 + metadata.log.dir = null + metric.reporters = [] + metrics.num.samples = 2 + metrics.recording.level = INFO + metrics.sample.window.ms = 30000 + min.insync.replicas = 1 + node.id = -1 + num.io.threads = 8 + num.network.threads = 3 + num.partitions = 1 + num.recovery.threads.per.data.dir = 1 + num.replica.alter.log.dirs.threads = null + num.replica.fetchers = 1 + offset.metadata.max.bytes = 4096 + offsets.commit.required.acks = -1 + offsets.commit.timeout.ms = 5000 + offsets.load.buffer.size = 5242880 + offsets.retention.check.interval.ms = 600000 + offsets.retention.minutes = 10080 + offsets.topic.compression.codec = 0 + offsets.topic.num.partitions = 50 + offsets.topic.replication.factor = 1 + offsets.topic.segment.bytes = 104857600 + password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding + password.encoder.iterations = 4096 + password.encoder.key.length = 128 + password.encoder.keyfactory.algorithm = null + password.encoder.old.secret = null + password.encoder.secret = null + port = 9092 + principal.builder.class = null + process.roles = [] + producer.purgatory.purge.interval.requests = 1000 + queued.max.request.bytes = -1 + queued.max.requests = 500 + quota.consumer.default = 9223372036854775807 + quota.producer.default = 9223372036854775807 + quota.window.num = 11 + quota.window.size.seconds = 1 + replica.fetch.backoff.ms = 1000 + replica.fetch.max.bytes = 1048576 + replica.fetch.min.bytes = 1 + replica.fetch.response.max.bytes = 10485760 + replica.fetch.wait.max.ms = 500 + replica.high.watermark.checkpoint.interval.ms = 5000 + replica.lag.time.max.ms = 30000 + replica.selector.class = null + replica.socket.receive.buffer.bytes = 65536 + replica.socket.timeout.ms = 30000 + replication.quota.window.num = 11 + replication.quota.window.size.seconds = 1 + request.timeout.ms = 30000 + reserved.broker.max.id = 1000 + sasl.client.callback.handler.class = null + sasl.enabled.mechanisms = [GSSAPI] + sasl.jaas.config = null + sasl.kerberos.kinit.cmd = /usr/bin/kinit + sasl.kerberos.min.time.before.relogin = 60000 + sasl.kerberos.principal.to.local.rules = [DEFAULT] + sasl.kerberos.service.name = null + sasl.kerberos.ticket.renew.jitter = 0.05 + sasl.kerberos.ticket.renew.window.factor = 0.8 + sasl.login.callback.handler.class = null + sasl.login.class = null + sasl.login.refresh.buffer.seconds = 300 + sasl.login.refresh.min.period.seconds = 60 + sasl.login.refresh.window.factor = 0.8 + sasl.login.refresh.window.jitter = 0.05 + sasl.mechanism.controller.protocol = GSSAPI + sasl.mechanism.inter.broker.protocol = GSSAPI + sasl.server.callback.handler.class = null + security.inter.broker.protocol = PLAINTEXT + security.providers = null + socket.connection.setup.timeout.max.ms = 30000 + socket.connection.setup.timeout.ms = 10000 + socket.receive.buffer.bytes = 102400 + socket.request.max.bytes = 104857600 + socket.send.buffer.bytes = 102400 + ssl.cipher.suites = [] + ssl.client.auth = none + ssl.enabled.protocols = [TLSv1.2, TLSv1.3] + ssl.endpoint.identification.algorithm = https + ssl.engine.factory.class = null + ssl.key.password = null + ssl.keymanager.algorithm = SunX509 + ssl.keystore.certificate.chain = null + ssl.keystore.key = null + ssl.keystore.location = null + ssl.keystore.password = null + ssl.keystore.type = JKS + ssl.principal.mapping.rules = DEFAULT + ssl.protocol = TLSv1.3 + ssl.provider = null + ssl.secure.random.implementation = null + ssl.trustmanager.algorithm = PKIX + ssl.truststore.certificates = null + ssl.truststore.location = null + ssl.truststore.password = null + ssl.truststore.type = JKS + transaction.abort.timed.out.transaction.cleanup.interval.ms = 10000 + transaction.max.timeout.ms = 900000 + transaction.remove.expired.transaction.cleanup.interval.ms = 3600000 + transaction.state.log.load.buffer.size = 5242880 + transaction.state.log.min.isr = 1 + transaction.state.log.num.partitions = 50 + transaction.state.log.replication.factor = 1 + transaction.state.log.segment.bytes = 104857600 + transactional.id.expiration.ms = 604800000 + unclean.leader.election.enable = false + zookeeper.clientCnxnSocket = null + zookeeper.connect = localhost:2181 + zookeeper.connection.timeout.ms = 18000 + zookeeper.max.in.flight.requests = 10 + zookeeper.session.timeout.ms = 18000 + zookeeper.set.acl = false + zookeeper.ssl.cipher.suites = null + zookeeper.ssl.client.enable = false + zookeeper.ssl.crl.enable = false + zookeeper.ssl.enabled.protocols = null + zookeeper.ssl.endpoint.identification.algorithm = HTTPS + zookeeper.ssl.keystore.location = null + zookeeper.ssl.keystore.password = null + zookeeper.ssl.keystore.type = null + zookeeper.ssl.ocsp.enable = false + zookeeper.ssl.protocol = TLSv1.2 + zookeeper.ssl.truststore.location = null + zookeeper.ssl.truststore.password = null + zookeeper.ssl.truststore.type = null + zookeeper.sync.time.ms = 2000 + (kafka.server.KafkaConfig) +[2021-05-10 04:47:39,899] INFO KafkaConfig values: + advertised.host.name = null + advertised.listeners = null + advertised.port = null + alter.config.policy.class.name = null + alter.log.dirs.replication.quota.window.num = 11 + alter.log.dirs.replication.quota.window.size.seconds = 1 + authorizer.class.name = + auto.create.topics.enable = true + auto.leader.rebalance.enable = true + background.threads = 10 + broker.heartbeat.interval.ms = 2000 + broker.id = 0 + broker.id.generation.enable = true + broker.rack = null + broker.session.timeout.ms = 9000 + client.quota.callback.class = null + compression.type = producer + connection.failed.authentication.delay.ms = 100 + connections.max.idle.ms = 600000 + connections.max.reauth.ms = 0 + control.plane.listener.name = null + controlled.shutdown.enable = true + controlled.shutdown.max.retries = 3 + controlled.shutdown.retry.backoff.ms = 5000 + controller.listener.names = null + controller.quorum.append.linger.ms = 25 + controller.quorum.election.backoff.max.ms = 1000 + controller.quorum.election.timeout.ms = 1000 + controller.quorum.fetch.timeout.ms = 2000 + controller.quorum.request.timeout.ms = 2000 + controller.quorum.retry.backoff.ms = 20 + controller.quorum.voters = [] + controller.quota.window.num = 11 + controller.quota.window.size.seconds = 1 + controller.socket.timeout.ms = 30000 + create.topic.policy.class.name = null + default.replication.factor = 1 + delegation.token.expiry.check.interval.ms = 3600000 + delegation.token.expiry.time.ms = 86400000 + delegation.token.master.key = null + delegation.token.max.lifetime.ms = 604800000 + delegation.token.secret.key = null + delete.records.purgatory.purge.interval.requests = 1 + delete.topic.enable = true + fetch.max.bytes = 57671680 + fetch.purgatory.purge.interval.requests = 1000 + group.initial.rebalance.delay.ms = 0 + group.max.session.timeout.ms = 1800000 + group.max.size = 2147483647 + group.min.session.timeout.ms = 6000 + host.name = + initial.broker.registration.timeout.ms = 60000 + inter.broker.listener.name = null + inter.broker.protocol.version = 2.8-IV1 + kafka.metrics.polling.interval.secs = 10 + kafka.metrics.reporters = [] + leader.imbalance.check.interval.seconds = 300 + leader.imbalance.per.broker.percentage = 10 + listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL + listeners = null + log.cleaner.backoff.ms = 15000 + log.cleaner.dedupe.buffer.size = 134217728 + log.cleaner.delete.retention.ms = 86400000 + log.cleaner.enable = true + log.cleaner.io.buffer.load.factor = 0.9 + log.cleaner.io.buffer.size = 524288 + log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308 + log.cleaner.max.compaction.lag.ms = 9223372036854775807 + log.cleaner.min.cleanable.ratio = 0.5 + log.cleaner.min.compaction.lag.ms = 0 + log.cleaner.threads = 1 + log.cleanup.policy = [delete] + log.dir = /tmp/kafka-logs + log.dirs = /tmp/kafka-logs + log.flush.interval.messages = 9223372036854775807 + log.flush.interval.ms = null + log.flush.offset.checkpoint.interval.ms = 60000 + log.flush.scheduler.interval.ms = 9223372036854775807 + log.flush.start.offset.checkpoint.interval.ms = 60000 + log.index.interval.bytes = 4096 + log.index.size.max.bytes = 10485760 + log.message.downconversion.enable = true + log.message.format.version = 2.8-IV1 + log.message.timestamp.difference.max.ms = 9223372036854775807 + log.message.timestamp.type = CreateTime + log.preallocate = false + log.retention.bytes = -1 + log.retention.check.interval.ms = 300000 + log.retention.hours = 168 + log.retention.minutes = null + log.retention.ms = null + log.roll.hours = 168 + log.roll.jitter.hours = 0 + log.roll.jitter.ms = null + log.roll.ms = null + log.segment.bytes = 1073741824 + log.segment.delete.delay.ms = 60000 + max.connection.creation.rate = 2147483647 + max.connections = 2147483647 + max.connections.per.ip = 2147483647 + max.connections.per.ip.overrides = + max.incremental.fetch.session.cache.slots = 1000 + message.max.bytes = 1048588 + metadata.log.dir = null + metric.reporters = [] + metrics.num.samples = 2 + metrics.recording.level = INFO + metrics.sample.window.ms = 30000 + min.insync.replicas = 1 + node.id = -1 + num.io.threads = 8 + num.network.threads = 3 + num.partitions = 1 + num.recovery.threads.per.data.dir = 1 + num.replica.alter.log.dirs.threads = null + num.replica.fetchers = 1 + offset.metadata.max.bytes = 4096 + offsets.commit.required.acks = -1 + offsets.commit.timeout.ms = 5000 + offsets.load.buffer.size = 5242880 + offsets.retention.check.interval.ms = 600000 + offsets.retention.minutes = 10080 + offsets.topic.compression.codec = 0 + offsets.topic.num.partitions = 50 + offsets.topic.replication.factor = 1 + offsets.topic.segment.bytes = 104857600 + password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding + password.encoder.iterations = 4096 + password.encoder.key.length = 128 + password.encoder.keyfactory.algorithm = null + password.encoder.old.secret = null + password.encoder.secret = null + port = 9092 + principal.builder.class = null + process.roles = [] + producer.purgatory.purge.interval.requests = 1000 + queued.max.request.bytes = -1 + queued.max.requests = 500 + quota.consumer.default = 9223372036854775807 + quota.producer.default = 9223372036854775807 + quota.window.num = 11 + quota.window.size.seconds = 1 + replica.fetch.backoff.ms = 1000 + replica.fetch.max.bytes = 1048576 + replica.fetch.min.bytes = 1 + replica.fetch.response.max.bytes = 10485760 + replica.fetch.wait.max.ms = 500 + replica.high.watermark.checkpoint.interval.ms = 5000 + replica.lag.time.max.ms = 30000 + replica.selector.class = null + replica.socket.receive.buffer.bytes = 65536 + replica.socket.timeout.ms = 30000 + replication.quota.window.num = 11 + replication.quota.window.size.seconds = 1 + request.timeout.ms = 30000 + reserved.broker.max.id = 1000 + sasl.client.callback.handler.class = null + sasl.enabled.mechanisms = [GSSAPI] + sasl.jaas.config = null + sasl.kerberos.kinit.cmd = /usr/bin/kinit + sasl.kerberos.min.time.before.relogin = 60000 + sasl.kerberos.principal.to.local.rules = [DEFAULT] + sasl.kerberos.service.name = null + sasl.kerberos.ticket.renew.jitter = 0.05 + sasl.kerberos.ticket.renew.window.factor = 0.8 + sasl.login.callback.handler.class = null + sasl.login.class = null + sasl.login.refresh.buffer.seconds = 300 + sasl.login.refresh.min.period.seconds = 60 + sasl.login.refresh.window.factor = 0.8 + sasl.login.refresh.window.jitter = 0.05 + sasl.mechanism.controller.protocol = GSSAPI + sasl.mechanism.inter.broker.protocol = GSSAPI + sasl.server.callback.handler.class = null + security.inter.broker.protocol = PLAINTEXT + security.providers = null + socket.connection.setup.timeout.max.ms = 30000 + socket.connection.setup.timeout.ms = 10000 + socket.receive.buffer.bytes = 102400 + socket.request.max.bytes = 104857600 + socket.send.buffer.bytes = 102400 + ssl.cipher.suites = [] + ssl.client.auth = none + ssl.enabled.protocols = [TLSv1.2, TLSv1.3] + ssl.endpoint.identification.algorithm = https + ssl.engine.factory.class = null + ssl.key.password = null + ssl.keymanager.algorithm = SunX509 + ssl.keystore.certificate.chain = null + ssl.keystore.key = null + ssl.keystore.location = null + ssl.keystore.password = null + ssl.keystore.type = JKS + ssl.principal.mapping.rules = DEFAULT + ssl.protocol = TLSv1.3 + ssl.provider = null + ssl.secure.random.implementation = null + ssl.trustmanager.algorithm = PKIX + ssl.truststore.certificates = null + ssl.truststore.location = null + ssl.truststore.password = null + ssl.truststore.type = JKS + transaction.abort.timed.out.transaction.cleanup.interval.ms = 10000 + transaction.max.timeout.ms = 900000 + transaction.remove.expired.transaction.cleanup.interval.ms = 3600000 + transaction.state.log.load.buffer.size = 5242880 + transaction.state.log.min.isr = 1 + transaction.state.log.num.partitions = 50 + transaction.state.log.replication.factor = 1 + transaction.state.log.segment.bytes = 104857600 + transactional.id.expiration.ms = 604800000 + unclean.leader.election.enable = false + zookeeper.clientCnxnSocket = null + zookeeper.connect = localhost:2181 + zookeeper.connection.timeout.ms = 18000 + zookeeper.max.in.flight.requests = 10 + zookeeper.session.timeout.ms = 18000 + zookeeper.set.acl = false + zookeeper.ssl.cipher.suites = null + zookeeper.ssl.client.enable = false + zookeeper.ssl.crl.enable = false + zookeeper.ssl.enabled.protocols = null + zookeeper.ssl.endpoint.identification.algorithm = HTTPS + zookeeper.ssl.keystore.location = null + zookeeper.ssl.keystore.password = null + zookeeper.ssl.keystore.type = null + zookeeper.ssl.ocsp.enable = false + zookeeper.ssl.protocol = TLSv1.2 + zookeeper.ssl.truststore.location = null + zookeeper.ssl.truststore.password = null + zookeeper.ssl.truststore.type = null + zookeeper.sync.time.ms = 2000 + (kafka.server.KafkaConfig) +[2021-05-10 04:47:40,003] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:47:40,003] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:47:40,003] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:47:40,009] INFO [ThrottledChannelReaper-ControllerMutation]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:47:40,080] INFO Loading logs from log dirs ArraySeq(/tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,082] INFO Skipping recovery for all logs in /tmp/kafka-logs since clean shutdown file was found (kafka.log.LogManager) +[2021-05-10 04:47:40,169] INFO [Log partition=__consumer_offsets-9, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,204] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-9, topic=__consumer_offsets, partition=9, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 114ms (1/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,211] INFO [Log partition=__consumer_offsets-5, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,223] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-5, topic=__consumer_offsets, partition=5, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 17ms (2/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,227] INFO [Log partition=__consumer_offsets-16, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,234] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-16, topic=__consumer_offsets, partition=16, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 12ms (3/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,243] INFO [Log partition=__consumer_offsets-41, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,250] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-41, topic=__consumer_offsets, partition=41, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 16ms (4/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,255] INFO [Log partition=__consumer_offsets-44, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,266] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-44, topic=__consumer_offsets, partition=44, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 16ms (5/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,271] INFO [Log partition=__consumer_offsets-14, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,279] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-14, topic=__consumer_offsets, partition=14, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 12ms (6/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,283] INFO [Log partition=__consumer_offsets-31, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,290] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-31, topic=__consumer_offsets, partition=31, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 10ms (7/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,298] INFO [Log partition=__consumer_offsets-34, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,303] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-34, topic=__consumer_offsets, partition=34, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 12ms (8/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,312] INFO [Log partition=__consumer_offsets-38, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,317] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-38, topic=__consumer_offsets, partition=38, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 14ms (9/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,322] INFO [Log partition=__consumer_offsets-26, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,330] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-26, topic=__consumer_offsets, partition=26, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 13ms (10/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,335] INFO [Log partition=__consumer_offsets-13, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,336] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-13, topic=__consumer_offsets, partition=13, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 5ms (11/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,369] INFO [Log partition=__consumer_offsets-6, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,385] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-6, topic=__consumer_offsets, partition=6, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 47ms (12/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,391] INFO [Log partition=__consumer_offsets-36, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,402] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-36, topic=__consumer_offsets, partition=36, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 17ms (13/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,408] INFO [Log partition=__consumer_offsets-2, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,409] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-2, topic=__consumer_offsets, partition=2, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 7ms (14/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,426] INFO [Log partition=__consumer_offsets-40, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,427] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-40, topic=__consumer_offsets, partition=40, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 18ms (15/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,442] INFO [Log partition=__consumer_offsets-49, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,445] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-49, topic=__consumer_offsets, partition=49, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 17ms (16/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,461] INFO [Log partition=__consumer_offsets-0, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,464] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-0, topic=__consumer_offsets, partition=0, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 12ms (17/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,482] INFO [Log partition=__consumer_offsets-3, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,483] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-3, topic=__consumer_offsets, partition=3, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 20ms (18/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,488] INFO [Log partition=__consumer_offsets-4, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,489] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-4, topic=__consumer_offsets, partition=4, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 5ms (19/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,499] INFO [Log partition=__consumer_offsets-15, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,500] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-15, topic=__consumer_offsets, partition=15, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 12ms (20/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,505] INFO [Log partition=__consumer_offsets-29, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,506] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-29, topic=__consumer_offsets, partition=29, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 5ms (21/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,545] INFO [Log partition=__consumer_offsets-12, dir=/tmp/kafka-logs] Loading producer state till offset 161 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,549] INFO [ProducerStateManager partition=__consumer_offsets-12] Loading producer state from snapshot file 'SnapshotFile(/tmp/kafka-logs/__consumer_offsets-12/00000000000000000161.snapshot,161)' (kafka.log.ProducerStateManager) +[2021-05-10 04:47:40,566] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-12, topic=__consumer_offsets, partition=12, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=161) with 1 segments in 59ms (22/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,580] INFO [Log partition=__consumer_offsets-46, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,595] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-46, topic=__consumer_offsets, partition=46, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 29ms (23/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,604] INFO [Log partition=__consumer_offsets-18, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,624] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-18, topic=__consumer_offsets, partition=18, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 28ms (24/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,631] INFO [Log partition=__consumer_offsets-27, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,634] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-27, topic=__consumer_offsets, partition=27, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 9ms (25/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,642] INFO [Log partition=json-topic-0, dir=/tmp/kafka-logs] Loading producer state till offset 1 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,642] INFO [ProducerStateManager partition=json-topic-0] Loading producer state from snapshot file 'SnapshotFile(/tmp/kafka-logs/json-topic-0/00000000000000000001.snapshot,1)' (kafka.log.ProducerStateManager) +[2021-05-10 04:47:40,661] INFO Completed load of Log(dir=/tmp/kafka-logs/json-topic-0, topic=json-topic, partition=0, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=1) with 1 segments in 21ms (26/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,669] INFO [Log partition=__consumer_offsets-32, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,670] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-32, topic=__consumer_offsets, partition=32, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 9ms (27/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,679] INFO [Log partition=__consumer_offsets-35, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,680] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-35, topic=__consumer_offsets, partition=35, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 9ms (28/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,688] INFO [Log partition=__consumer_offsets-10, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,700] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-10, topic=__consumer_offsets, partition=10, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 20ms (29/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,709] INFO [Log partition=__consumer_offsets-8, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,710] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-8, topic=__consumer_offsets, partition=8, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 9ms (30/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,724] INFO [Log partition=my-topic-0, dir=/tmp/kafka-logs] Loading producer state till offset 20 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,724] INFO [ProducerStateManager partition=my-topic-0] Loading producer state from snapshot file 'SnapshotFile(/tmp/kafka-logs/my-topic-0/00000000000000000020.snapshot,20)' (kafka.log.ProducerStateManager) +[2021-05-10 04:47:40,726] INFO Completed load of Log(dir=/tmp/kafka-logs/my-topic-0, topic=my-topic, partition=0, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=20) with 1 segments in 16ms (31/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,741] INFO [Log partition=__consumer_offsets-43, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,744] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-43, topic=__consumer_offsets, partition=43, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 18ms (32/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,754] INFO [Log partition=__consumer_offsets-33, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,757] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-33, topic=__consumer_offsets, partition=33, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 12ms (33/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,763] INFO [Log partition=__consumer_offsets-17, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,764] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-17, topic=__consumer_offsets, partition=17, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 6ms (34/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,769] INFO [Log partition=__consumer_offsets-48, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,776] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-48, topic=__consumer_offsets, partition=48, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 11ms (35/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,782] INFO [Log partition=__consumer_offsets-1, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,783] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-1, topic=__consumer_offsets, partition=1, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 6ms (36/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,788] INFO [Log partition=__consumer_offsets-39, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,789] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-39, topic=__consumer_offsets, partition=39, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 6ms (37/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,793] INFO [Log partition=__consumer_offsets-21, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,794] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-21, topic=__consumer_offsets, partition=21, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 5ms (38/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,809] INFO [Log partition=__consumer_offsets-42, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,811] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-42, topic=__consumer_offsets, partition=42, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 16ms (39/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,816] INFO [Log partition=__consumer_offsets-47, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,817] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-47, topic=__consumer_offsets, partition=47, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 6ms (40/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,822] INFO [Log partition=__consumer_offsets-23, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,823] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-23, topic=__consumer_offsets, partition=23, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 5ms (41/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,835] INFO [Log partition=__consumer_offsets-24, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,836] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-24, topic=__consumer_offsets, partition=24, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 12ms (42/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,852] INFO [Log partition=__consumer_offsets-11, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,853] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-11, topic=__consumer_offsets, partition=11, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 13ms (43/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,860] INFO [Log partition=__consumer_offsets-19, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,861] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-19, topic=__consumer_offsets, partition=19, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 8ms (44/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,865] INFO [Log partition=__consumer_offsets-45, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,865] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-45, topic=__consumer_offsets, partition=45, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 4ms (45/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,874] INFO [Log partition=__consumer_offsets-37, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,877] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-37, topic=__consumer_offsets, partition=37, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 7ms (46/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,885] INFO [Log partition=__consumer_offsets-7, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,886] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-7, topic=__consumer_offsets, partition=7, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 8ms (47/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,889] INFO [Log partition=__consumer_offsets-30, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,890] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-30, topic=__consumer_offsets, partition=30, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 4ms (48/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,904] INFO [Log partition=__consumer_offsets-22, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,906] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-22, topic=__consumer_offsets, partition=22, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 16ms (49/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,915] INFO [Log partition=__consumer_offsets-25, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,916] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-25, topic=__consumer_offsets, partition=25, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 10ms (50/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,926] INFO [Log partition=__consumer_offsets-28, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,927] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-28, topic=__consumer_offsets, partition=28, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 11ms (51/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,931] INFO [Log partition=__consumer_offsets-20, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) +[2021-05-10 04:47:40,932] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-20, topic=__consumer_offsets, partition=20, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments in 4ms (52/52 loaded in /tmp/kafka-logs) (kafka.log.LogManager) +[2021-05-10 04:47:40,933] INFO Loaded 52 logs in 853ms. (kafka.log.LogManager) +[2021-05-10 04:47:40,936] INFO Starting log cleanup with a period of 300000 ms. (kafka.log.LogManager) +[2021-05-10 04:47:40,939] INFO Starting log flusher with a default period of 9223372036854775807 ms. (kafka.log.LogManager) +[2021-05-10 04:47:41,274] INFO Updated connection-accept-rate max connection creation rate to 2147483647 (kafka.network.ConnectionQuotas) +[2021-05-10 04:47:41,277] INFO Awaiting socket connections on 0.0.0.0:9092. (kafka.network.Acceptor) +[2021-05-10 04:47:41,382] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Created data-plane acceptor and processors for endpoint : ListenerName(PLAINTEXT) (kafka.network.SocketServer) +[2021-05-10 04:47:41,447] INFO [broker-0-to-controller-send-thread]: Starting (kafka.server.BrokerToControllerRequestThread) +[2021-05-10 04:47:41,469] INFO [ExpirationReaper-0-Produce]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:41,470] INFO [ExpirationReaper-0-DeleteRecords]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:41,482] INFO [ExpirationReaper-0-Fetch]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:41,486] INFO [ExpirationReaper-0-ElectLeader]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:41,496] INFO [LogDirFailureHandler]: Starting (kafka.server.ReplicaManager$LogDirFailureHandler) +[2021-05-10 04:47:41,578] INFO Creating /brokers/ids/0 (is it secure? false) (kafka.zk.KafkaZkClient) +[2021-05-10 04:47:41,620] INFO Stat of the created znode at /brokers/ids/0 is: 163,163,1620636461603,1620636461603,1,0,0,72059306613407744,198,0,163 + (kafka.zk.KafkaZkClient) +[2021-05-10 04:47:41,621] INFO Registered broker 0 at path /brokers/ids/0 with addresses: PLAINTEXT://osboxes:9092, czxid (broker epoch): 163 (kafka.zk.KafkaZkClient) +[2021-05-10 04:47:41,693] INFO [ExpirationReaper-0-topic]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:41,694] INFO [ExpirationReaper-0-Heartbeat]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:41,694] INFO [ExpirationReaper-0-Rebalance]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:41,705] INFO [GroupCoordinator 0]: Starting up. (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:41,774] INFO [GroupCoordinator 0]: Startup complete. (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:41,858] INFO [ProducerId Manager 0]: Acquired new producerId block (brokerId:0,blockStartProducerId:1000,blockEndProducerId:1999) by writing to Zk with path version 2 (kafka.coordinator.transaction.ProducerIdManager) +[2021-05-10 04:47:41,879] INFO [TransactionCoordinator id=0] Starting up. (kafka.coordinator.transaction.TransactionCoordinator) +[2021-05-10 04:47:41,901] INFO [TransactionCoordinator id=0] Startup complete. (kafka.coordinator.transaction.TransactionCoordinator) +[2021-05-10 04:47:41,932] INFO [Transaction Marker Channel Manager 0]: Starting (kafka.coordinator.transaction.TransactionMarkerChannelManager) +[2021-05-10 04:47:42,038] INFO [ExpirationReaper-0-AlterAcls]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:47:42,171] INFO [/config/changes-event-process-thread]: Starting (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread) +[2021-05-10 04:47:42,219] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Starting socket server acceptors and processors (kafka.network.SocketServer) +[2021-05-10 04:47:42,270] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Started data-plane acceptor and processor(s) for endpoint : ListenerName(PLAINTEXT) (kafka.network.SocketServer) +[2021-05-10 04:47:42,275] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Started socket server acceptors and processors (kafka.network.SocketServer) +[2021-05-10 04:47:42,288] INFO Kafka version: 2.8.0 (org.apache.kafka.common.utils.AppInfoParser) +[2021-05-10 04:47:42,303] INFO Kafka commitId: ebb1d6e21cc92130 (org.apache.kafka.common.utils.AppInfoParser) +[2021-05-10 04:47:42,306] INFO Kafka startTimeMs: 1620636462275 (org.apache.kafka.common.utils.AppInfoParser) +[2021-05-10 04:47:42,308] INFO [KafkaServer id=0] started (kafka.server.KafkaServer) +[2021-05-10 04:47:42,397] INFO [broker-0-to-controller-send-thread]: Recorded new controller, from now on will use broker osboxes:9092 (id: 0 rack: null) (kafka.server.BrokerToControllerRequestThread) +[2021-05-10 04:47:42,477] INFO [ReplicaFetcherManager on broker 0] Removed fetcher for partitions HashSet(__consumer_offsets-22, __consumer_offsets-30, __consumer_offsets-25, __consumer_offsets-35, __consumer_offsets-37, __consumer_offsets-13, __consumer_offsets-8, __consumer_offsets-21, __consumer_offsets-4, __consumer_offsets-27, __consumer_offsets-7, __consumer_offsets-9, __consumer_offsets-46, __consumer_offsets-41, __consumer_offsets-33, __consumer_offsets-23, __consumer_offsets-49, __consumer_offsets-47, __consumer_offsets-16, __consumer_offsets-28, __consumer_offsets-31, __consumer_offsets-36, __consumer_offsets-42, __consumer_offsets-3, __consumer_offsets-18, __consumer_offsets-15, __consumer_offsets-24, my-topic-0, __consumer_offsets-38, __consumer_offsets-17, __consumer_offsets-48, __consumer_offsets-19, __consumer_offsets-11, __consumer_offsets-2, __consumer_offsets-43, __consumer_offsets-6, __consumer_offsets-14, __consumer_offsets-20, __consumer_offsets-0, __consumer_offsets-44, __consumer_offsets-39, __consumer_offsets-12, __consumer_offsets-45, __consumer_offsets-1, __consumer_offsets-5, __consumer_offsets-26, json-topic-0, __consumer_offsets-29, __consumer_offsets-34, __consumer_offsets-10, __consumer_offsets-32, __consumer_offsets-40) (kafka.server.ReplicaFetcherManager) +[2021-05-10 04:47:42,496] INFO [Partition __consumer_offsets-3 broker=0] Log loaded for partition __consumer_offsets-3 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,515] INFO [Partition __consumer_offsets-18 broker=0] Log loaded for partition __consumer_offsets-18 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,522] INFO [Partition __consumer_offsets-41 broker=0] Log loaded for partition __consumer_offsets-41 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,525] INFO [Partition __consumer_offsets-10 broker=0] Log loaded for partition __consumer_offsets-10 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,530] INFO [Partition __consumer_offsets-33 broker=0] Log loaded for partition __consumer_offsets-33 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,536] INFO [Partition __consumer_offsets-48 broker=0] Log loaded for partition __consumer_offsets-48 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,539] INFO [Partition __consumer_offsets-19 broker=0] Log loaded for partition __consumer_offsets-19 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,546] INFO [Partition __consumer_offsets-34 broker=0] Log loaded for partition __consumer_offsets-34 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,549] INFO [Partition __consumer_offsets-4 broker=0] Log loaded for partition __consumer_offsets-4 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,551] INFO [Partition __consumer_offsets-11 broker=0] Log loaded for partition __consumer_offsets-11 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,556] INFO [Partition __consumer_offsets-26 broker=0] Log loaded for partition __consumer_offsets-26 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,561] INFO [Partition __consumer_offsets-49 broker=0] Log loaded for partition __consumer_offsets-49 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,565] INFO [Partition __consumer_offsets-39 broker=0] Log loaded for partition __consumer_offsets-39 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,569] INFO [Partition __consumer_offsets-9 broker=0] Log loaded for partition __consumer_offsets-9 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,577] INFO [Partition __consumer_offsets-24 broker=0] Log loaded for partition __consumer_offsets-24 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,581] INFO [Partition __consumer_offsets-31 broker=0] Log loaded for partition __consumer_offsets-31 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,586] INFO [Partition __consumer_offsets-46 broker=0] Log loaded for partition __consumer_offsets-46 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,589] INFO [Partition __consumer_offsets-1 broker=0] Log loaded for partition __consumer_offsets-1 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,591] INFO [Partition __consumer_offsets-16 broker=0] Log loaded for partition __consumer_offsets-16 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,596] INFO [Partition __consumer_offsets-2 broker=0] Log loaded for partition __consumer_offsets-2 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,603] INFO [Partition __consumer_offsets-25 broker=0] Log loaded for partition __consumer_offsets-25 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,607] INFO [Partition __consumer_offsets-40 broker=0] Log loaded for partition __consumer_offsets-40 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,610] INFO [Partition __consumer_offsets-47 broker=0] Log loaded for partition __consumer_offsets-47 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,613] INFO [Partition __consumer_offsets-17 broker=0] Log loaded for partition __consumer_offsets-17 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,615] INFO [Partition json-topic-0 broker=0] Log loaded for partition json-topic-0 with initial high watermark 1 (kafka.cluster.Partition) +[2021-05-10 04:47:42,616] INFO [Partition __consumer_offsets-32 broker=0] Log loaded for partition __consumer_offsets-32 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,633] INFO [Partition __consumer_offsets-37 broker=0] Log loaded for partition __consumer_offsets-37 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,637] INFO [Partition __consumer_offsets-7 broker=0] Log loaded for partition __consumer_offsets-7 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,644] INFO [Partition __consumer_offsets-22 broker=0] Log loaded for partition __consumer_offsets-22 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,659] INFO [Partition __consumer_offsets-29 broker=0] Log loaded for partition __consumer_offsets-29 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,663] INFO [Partition __consumer_offsets-44 broker=0] Log loaded for partition __consumer_offsets-44 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,666] INFO [Partition __consumer_offsets-14 broker=0] Log loaded for partition __consumer_offsets-14 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,669] INFO [Partition __consumer_offsets-23 broker=0] Log loaded for partition __consumer_offsets-23 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,672] INFO [Partition __consumer_offsets-38 broker=0] Log loaded for partition __consumer_offsets-38 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,674] INFO [Partition __consumer_offsets-8 broker=0] Log loaded for partition __consumer_offsets-8 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,676] INFO [Partition __consumer_offsets-45 broker=0] Log loaded for partition __consumer_offsets-45 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,697] INFO [Partition __consumer_offsets-15 broker=0] Log loaded for partition __consumer_offsets-15 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,702] INFO [Partition __consumer_offsets-30 broker=0] Log loaded for partition __consumer_offsets-30 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,705] INFO [Partition __consumer_offsets-0 broker=0] Log loaded for partition __consumer_offsets-0 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,708] INFO [Partition __consumer_offsets-35 broker=0] Log loaded for partition __consumer_offsets-35 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,711] INFO [Partition __consumer_offsets-5 broker=0] Log loaded for partition __consumer_offsets-5 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,713] INFO [Partition __consumer_offsets-20 broker=0] Log loaded for partition __consumer_offsets-20 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,717] INFO [Partition __consumer_offsets-27 broker=0] Log loaded for partition __consumer_offsets-27 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,721] INFO [Partition __consumer_offsets-42 broker=0] Log loaded for partition __consumer_offsets-42 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,725] INFO [Partition __consumer_offsets-12 broker=0] Log loaded for partition __consumer_offsets-12 with initial high watermark 161 (kafka.cluster.Partition) +[2021-05-10 04:47:42,728] INFO [Partition __consumer_offsets-21 broker=0] Log loaded for partition __consumer_offsets-21 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,731] INFO [Partition __consumer_offsets-36 broker=0] Log loaded for partition __consumer_offsets-36 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,735] INFO [Partition __consumer_offsets-6 broker=0] Log loaded for partition __consumer_offsets-6 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,738] INFO [Partition __consumer_offsets-43 broker=0] Log loaded for partition __consumer_offsets-43 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,742] INFO [Partition __consumer_offsets-13 broker=0] Log loaded for partition __consumer_offsets-13 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,746] INFO [Partition __consumer_offsets-28 broker=0] Log loaded for partition __consumer_offsets-28 with initial high watermark 0 (kafka.cluster.Partition) +[2021-05-10 04:47:42,749] INFO [Partition my-topic-0 broker=0] Log loaded for partition my-topic-0 with initial high watermark 20 (kafka.cluster.Partition) +[2021-05-10 04:47:42,758] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 3 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,761] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-3 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,772] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-3 in 11 milliseconds, of which 0 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,775] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 18 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,775] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-18 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,775] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-18 in 0 milliseconds, of which 0 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,777] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 41 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,777] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-41 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,777] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-41 in 0 milliseconds, of which 0 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,778] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 10 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,778] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-10 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,778] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-10 in 0 milliseconds, of which 0 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,778] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 33 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,778] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-33 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,778] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-33 in 0 milliseconds, of which 0 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,778] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 48 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,778] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-48 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,779] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-48 in 1 milliseconds, of which 0 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,780] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 19 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,780] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-19 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,781] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-19 in 1 milliseconds, of which 0 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,782] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 34 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,782] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-34 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,783] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-34 in 1 milliseconds, of which 1 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,783] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 4 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,783] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-4 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,783] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-4 in 0 milliseconds, of which 0 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,783] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 11 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,783] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-11 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,784] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-11 in 1 milliseconds, of which 1 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,784] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 26 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,784] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-26 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,784] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-26 in 0 milliseconds, of which 0 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,784] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 49 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,784] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-49 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,784] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-49 in 0 milliseconds, of which 0 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,784] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 39 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,784] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-39 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,784] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 9 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,785] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-9 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,785] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 24 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,784] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-39 in 0 milliseconds, of which 0 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,786] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-9 in 1 milliseconds, of which 1 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,787] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-24 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,787] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 31 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,787] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-31 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,788] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 46 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,788] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-46 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,787] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-24 in 0 milliseconds, of which 0 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,788] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-31 in 0 milliseconds, of which 0 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,788] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 1 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,788] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-1 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,788] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 16 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,789] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-16 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,789] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 2 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,789] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-2 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,789] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 25 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,789] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-25 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,789] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 40 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,789] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-40 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,789] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 47 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,789] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-47 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,788] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-46 in 0 milliseconds, of which 0 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,790] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-1 in 2 milliseconds, of which 2 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,790] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-16 in 1 milliseconds, of which 1 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,790] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-2 in 1 milliseconds, of which 1 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,791] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-25 in 2 milliseconds, of which 2 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,792] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-40 in 3 milliseconds, of which 3 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,792] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 17 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,792] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-17 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,792] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 32 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,792] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-32 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,792] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 37 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,792] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-37 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,792] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 7 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,792] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-7 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,792] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 22 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,792] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-22 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,792] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 29 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,792] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-29 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,792] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 44 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,792] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-44 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,792] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 14 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,792] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-14 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,792] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 23 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,792] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-23 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,793] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 38 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,793] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-38 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,793] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 8 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,793] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-8 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,793] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 45 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,793] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-45 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,793] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 15 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,793] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-15 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,793] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 30 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,793] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-30 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,793] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 0 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,793] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-0 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,793] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 35 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,793] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-35 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,793] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 5 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,793] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-5 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,793] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 20 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,793] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-20 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,793] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 27 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,793] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-27 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,793] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 42 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,793] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-42 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,793] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 12 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,793] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-12 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,793] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 21 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,793] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-21 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,793] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 36 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,793] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-36 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,793] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 6 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,793] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-6 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,793] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 43 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,793] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-43 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,793] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 13 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,793] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-13 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,793] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 28 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,793] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-28 (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,806] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-47 in 17 milliseconds, of which 3 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,806] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-17 in 14 milliseconds, of which 14 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,806] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-32 in 14 milliseconds, of which 14 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,807] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-37 in 15 milliseconds, of which 15 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,807] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-7 in 15 milliseconds, of which 15 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,807] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-22 in 15 milliseconds, of which 15 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,807] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-29 in 15 milliseconds, of which 15 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,807] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-44 in 15 milliseconds, of which 15 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,807] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-14 in 15 milliseconds, of which 15 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,811] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-23 in 18 milliseconds, of which 18 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,811] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-38 in 18 milliseconds, of which 18 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,811] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-8 in 18 milliseconds, of which 18 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,811] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-45 in 18 milliseconds, of which 18 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,811] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-15 in 18 milliseconds, of which 18 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,811] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-30 in 18 milliseconds, of which 18 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,812] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-0 in 19 milliseconds, of which 19 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,812] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-35 in 19 milliseconds, of which 19 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,812] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-5 in 19 milliseconds, of which 19 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,812] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-20 in 19 milliseconds, of which 19 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,813] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-27 in 20 milliseconds, of which 19 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,813] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-42 in 20 milliseconds, of which 20 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,880] INFO Loaded member MemberMetadata(memberId=kafka-python-2.0.2-7a82d7a8-c66e-4838-bff3-5b37a5933dda, groupInstanceId=None, clientId=kafka-python-2.0.2, clientHost=/127.0.0.1, sessionTimeoutMs=10000, rebalanceTimeoutMs=300000, supportedProtocols=List(range), ) in group my-group with generation 1. (kafka.coordinator.group.GroupMetadata$) +[2021-05-10 04:47:42,894] INFO Loaded member MemberMetadata(memberId=kafka-python-2.0.2-2e62a041-35a3-48bc-992e-cf873414c0ac, groupInstanceId=None, clientId=kafka-python-2.0.2, clientHost=/127.0.0.1, sessionTimeoutMs=10000, rebalanceTimeoutMs=300000, supportedProtocols=List(range), ) in group my-group with generation 3. (kafka.coordinator.group.GroupMetadata$) +[2021-05-10 04:47:42,923] INFO [GroupCoordinator 0]: Loading group metadata for my-group with generation 4 (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:42,923] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-12 in 130 milliseconds, of which 21 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,924] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-21 in 131 milliseconds, of which 131 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,927] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-36 in 134 milliseconds, of which 134 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,927] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-6 in 134 milliseconds, of which 134 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,927] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-43 in 134 milliseconds, of which 134 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,928] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-13 in 135 milliseconds, of which 135 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:42,928] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-28 in 135 milliseconds, of which 135 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2021-05-10 04:47:49,859] INFO [GroupCoordinator 0]: Dynamic Member with unknown member id joins group my-group in Empty state. Created a new member id kafka-python-2.0.2-9ff8f7c1-2f6d-4163-ba6f-158f15afc656 for this member and add to the group. (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:49,864] INFO [GroupCoordinator 0]: Preparing to rebalance group my-group in state PreparingRebalance with old generation 4 (__consumer_offsets-12) (reason: Adding new member kafka-python-2.0.2-9ff8f7c1-2f6d-4163-ba6f-158f15afc656 with group instance id None) (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:49,868] INFO [GroupCoordinator 0]: Stabilized group my-group generation 5 (__consumer_offsets-12) with 1 members (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:47:49,882] INFO [GroupCoordinator 0]: Assignment received from leader for group my-group for generation 5. The group has 1 members, 0 of which are static. (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:48:06,562] INFO Terminating process due to signal SIGINT (org.apache.kafka.common.utils.LoggingSignalHandler) +[2021-05-10 04:48:06,580] INFO [KafkaServer id=0] shutting down (kafka.server.KafkaServer) +[2021-05-10 04:48:06,587] INFO [KafkaServer id=0] Starting controlled shutdown (kafka.server.KafkaServer) +[2021-05-10 04:48:06,619] INFO [KafkaServer id=0] Controlled shutdown succeeded (kafka.server.KafkaServer) +[2021-05-10 04:48:06,639] INFO [/config/changes-event-process-thread]: Shutting down (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread) +[2021-05-10 04:48:06,647] INFO [/config/changes-event-process-thread]: Stopped (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread) +[2021-05-10 04:48:06,647] INFO [/config/changes-event-process-thread]: Shutdown completed (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread) +[2021-05-10 04:48:06,648] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Stopping socket server request processors (kafka.network.SocketServer) +[2021-05-10 04:48:06,656] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Stopped socket server request processors (kafka.network.SocketServer) +[2021-05-10 04:48:06,658] INFO [data-plane Kafka Request Handler on Broker 0], shutting down (kafka.server.KafkaRequestHandlerPool) +[2021-05-10 04:48:06,661] INFO [data-plane Kafka Request Handler on Broker 0], shut down completely (kafka.server.KafkaRequestHandlerPool) +[2021-05-10 04:48:06,681] INFO [ExpirationReaper-0-AlterAcls]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:48:06,833] INFO [ExpirationReaper-0-AlterAcls]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:48:06,833] INFO [ExpirationReaper-0-AlterAcls]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:48:06,834] INFO [KafkaApi-0] Shutdown complete. (kafka.server.KafkaApis) +[2021-05-10 04:48:06,834] INFO [ExpirationReaper-0-topic]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:48:06,902] INFO [ExpirationReaper-0-topic]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:48:06,902] INFO [ExpirationReaper-0-topic]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:48:06,904] INFO [TransactionCoordinator id=0] Shutting down. (kafka.coordinator.transaction.TransactionCoordinator) +[2021-05-10 04:48:06,904] INFO [ProducerId Manager 0]: Shutdown complete: last producerId assigned 1000 (kafka.coordinator.transaction.ProducerIdManager) +[2021-05-10 04:48:06,905] INFO [Transaction State Manager 0]: Shutdown complete (kafka.coordinator.transaction.TransactionStateManager) +[2021-05-10 04:48:06,905] INFO [Transaction Marker Channel Manager 0]: Shutting down (kafka.coordinator.transaction.TransactionMarkerChannelManager) +[2021-05-10 04:48:06,906] INFO [Transaction Marker Channel Manager 0]: Stopped (kafka.coordinator.transaction.TransactionMarkerChannelManager) +[2021-05-10 04:48:06,906] INFO [Transaction Marker Channel Manager 0]: Shutdown completed (kafka.coordinator.transaction.TransactionMarkerChannelManager) +[2021-05-10 04:48:06,907] INFO [TransactionCoordinator id=0] Shutdown complete. (kafka.coordinator.transaction.TransactionCoordinator) +[2021-05-10 04:48:06,907] INFO [GroupCoordinator 0]: Shutting down. (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:48:06,908] INFO [ExpirationReaper-0-Heartbeat]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:48:07,034] INFO [ExpirationReaper-0-Heartbeat]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:48:07,034] INFO [ExpirationReaper-0-Heartbeat]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:48:07,035] INFO [ExpirationReaper-0-Rebalance]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:48:07,185] INFO [ExpirationReaper-0-Rebalance]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:48:07,185] INFO [ExpirationReaper-0-Rebalance]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:48:07,186] INFO [GroupCoordinator 0]: Shutdown complete. (kafka.coordinator.group.GroupCoordinator) +[2021-05-10 04:48:07,186] INFO [ReplicaManager broker=0] Shutting down (kafka.server.ReplicaManager) +[2021-05-10 04:48:07,187] INFO [LogDirFailureHandler]: Shutting down (kafka.server.ReplicaManager$LogDirFailureHandler) +[2021-05-10 04:48:07,187] INFO [LogDirFailureHandler]: Stopped (kafka.server.ReplicaManager$LogDirFailureHandler) +[2021-05-10 04:48:07,187] INFO [LogDirFailureHandler]: Shutdown completed (kafka.server.ReplicaManager$LogDirFailureHandler) +[2021-05-10 04:48:07,187] INFO [ReplicaFetcherManager on broker 0] shutting down (kafka.server.ReplicaFetcherManager) +[2021-05-10 04:48:07,188] INFO [ReplicaFetcherManager on broker 0] shutdown completed (kafka.server.ReplicaFetcherManager) +[2021-05-10 04:48:07,188] INFO [ReplicaAlterLogDirsManager on broker 0] shutting down (kafka.server.ReplicaAlterLogDirsManager) +[2021-05-10 04:48:07,188] INFO [ReplicaAlterLogDirsManager on broker 0] shutdown completed (kafka.server.ReplicaAlterLogDirsManager) +[2021-05-10 04:48:07,188] INFO [ExpirationReaper-0-Fetch]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:48:07,306] INFO [ExpirationReaper-0-Fetch]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:48:07,306] INFO [ExpirationReaper-0-Fetch]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:48:07,307] INFO [ExpirationReaper-0-Produce]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:48:07,433] INFO [ExpirationReaper-0-Produce]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:48:07,433] INFO [ExpirationReaper-0-Produce]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:48:07,433] INFO [ExpirationReaper-0-DeleteRecords]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:48:07,633] INFO [ExpirationReaper-0-DeleteRecords]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:48:07,634] INFO [ExpirationReaper-0-DeleteRecords]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:48:07,634] INFO [ExpirationReaper-0-ElectLeader]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:48:07,704] INFO [ExpirationReaper-0-ElectLeader]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:48:07,704] INFO [ExpirationReaper-0-ElectLeader]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2021-05-10 04:48:07,719] INFO [ReplicaManager broker=0] Shut down completely (kafka.server.ReplicaManager) +[2021-05-10 04:48:07,719] INFO [broker-0-to-controller-send-thread]: Shutting down (kafka.server.BrokerToControllerRequestThread) +[2021-05-10 04:48:07,719] INFO [broker-0-to-controller-send-thread]: Stopped (kafka.server.BrokerToControllerRequestThread) +[2021-05-10 04:48:07,720] INFO [broker-0-to-controller-send-thread]: Shutdown completed (kafka.server.BrokerToControllerRequestThread) +[2021-05-10 04:48:07,721] INFO Broker to controller channel manager for alterIsrChannel shutdown (kafka.server.BrokerToControllerChannelManagerImpl) +[2021-05-10 04:48:07,749] INFO Shutting down. (kafka.log.LogManager) +[2021-05-10 04:48:07,781] INFO [ProducerStateManager partition=my-topic-0] Writing producer snapshot at offset 22 (kafka.log.ProducerStateManager) +[2021-05-10 04:48:07,808] INFO [ProducerStateManager partition=__consumer_offsets-12] Writing producer snapshot at offset 164 (kafka.log.ProducerStateManager) +[2021-05-10 04:48:07,851] INFO Shutdown complete. (kafka.log.LogManager) +[2021-05-10 04:48:07,859] INFO [feature-zk-node-event-process-thread]: Shutting down (kafka.server.FinalizedFeatureChangeListener$ChangeNotificationProcessorThread) +[2021-05-10 04:48:07,859] INFO [feature-zk-node-event-process-thread]: Stopped (kafka.server.FinalizedFeatureChangeListener$ChangeNotificationProcessorThread) +[2021-05-10 04:48:07,859] INFO [feature-zk-node-event-process-thread]: Shutdown completed (kafka.server.FinalizedFeatureChangeListener$ChangeNotificationProcessorThread) +[2021-05-10 04:48:07,876] INFO [ZooKeeperClient Kafka server] Closing. (kafka.zookeeper.ZooKeeperClient) +[2021-05-10 04:48:08,000] INFO Session: 0x100018ebd740000 closed (org.apache.zookeeper.ZooKeeper) +[2021-05-10 04:48:08,000] INFO EventThread shut down for session: 0x100018ebd740000 (org.apache.zookeeper.ClientCnxn) +[2021-05-10 04:48:08,001] INFO [ZooKeeperClient Kafka server] Closed. (kafka.zookeeper.ZooKeeperClient) +[2021-05-10 04:48:08,001] INFO [ThrottledChannelReaper-Fetch]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:48:08,121] INFO [ThrottledChannelReaper-Fetch]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:48:08,121] INFO [ThrottledChannelReaper-Fetch]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:48:08,121] INFO [ThrottledChannelReaper-Produce]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:48:09,121] INFO [ThrottledChannelReaper-Produce]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:48:09,122] INFO [ThrottledChannelReaper-Produce]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:48:09,122] INFO [ThrottledChannelReaper-Request]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:48:10,122] INFO [ThrottledChannelReaper-Request]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:48:10,122] INFO [ThrottledChannelReaper-Request]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:48:10,122] INFO [ThrottledChannelReaper-ControllerMutation]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:48:11,152] INFO [ThrottledChannelReaper-ControllerMutation]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:48:11,153] INFO [ThrottledChannelReaper-ControllerMutation]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2021-05-10 04:48:11,153] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Shutting down socket server (kafka.network.SocketServer) +[2021-05-10 04:48:11,180] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Shutdown completed (kafka.network.SocketServer) +[2021-05-10 04:48:11,180] INFO Metrics scheduler closed (org.apache.kafka.common.metrics.Metrics) +[2021-05-10 04:48:11,180] INFO Closing reporter org.apache.kafka.common.metrics.JmxReporter (org.apache.kafka.common.metrics.Metrics) +[2021-05-10 04:48:11,180] INFO Metrics reporters closed (org.apache.kafka.common.metrics.Metrics) +[2021-05-10 04:48:11,181] INFO Broker and topic stats closed (kafka.server.BrokerTopicStats) +[2021-05-10 04:48:11,182] INFO App info kafka.server for 0 unregistered (org.apache.kafka.common.utils.AppInfoParser) +[2021-05-10 04:48:11,182] INFO [KafkaServer id=0] shut down completed (kafka.server.KafkaServer) diff --git a/kafka/kafka_2.13-2.8.0/logs/state-change.log b/kafka/kafka_2.13-2.8.0/logs/state-change.log new file mode 100644 index 0000000..e3323be --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/logs/state-change.log @@ -0,0 +1,245 @@ +[2021-05-10 04:16:55,648] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet(0) for 0 partitions (state.change.logger) +[2021-05-10 04:18:51,287] INFO [Controller id=0 epoch=1] Changed partition my-topic-0 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:18:51,287] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet() for 0 partitions (state.change.logger) +[2021-05-10 04:18:51,294] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet() for 0 partitions (state.change.logger) +[2021-05-10 04:18:51,320] INFO [Controller id=0 epoch=1] Changed partition my-topic-0 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:18:51,322] INFO [Controller id=0 epoch=1] Sending LeaderAndIsr request to broker 0 with 1 become-leader and 0 become-follower partitions (state.change.logger) +[2021-05-10 04:18:51,325] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet(0) for 1 partitions (state.change.logger) +[2021-05-10 04:18:51,326] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet() for 0 partitions (state.change.logger) +[2021-05-10 04:18:51,330] INFO [Broker id=0] Handling LeaderAndIsr request correlationId 1 from controller 0 for 1 partitions (state.change.logger) +[2021-05-10 04:18:51,346] INFO [Broker id=0] Stopped fetchers as part of LeaderAndIsr request correlationId 1 from controller 0 epoch 1 as part of the become-leader transition for 1 partitions (state.change.logger) +[2021-05-10 04:18:51,417] INFO [Broker id=0] Leader my-topic-0 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:18:51,436] INFO [Broker id=0] Finished LeaderAndIsr request in 107ms correlationId 1 from controller 0 for 1 partitions (state.change.logger) +[2021-05-10 04:18:51,469] INFO [Broker id=0] Add 1 partitions and deleted 0 partitions from metadata cache in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 2 (state.change.logger) +[2021-05-10 04:22:18,012] INFO [Controller id=0 epoch=1] Changed partition json-topic-0 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:22:18,012] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet() for 0 partitions (state.change.logger) +[2021-05-10 04:22:18,012] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet() for 0 partitions (state.change.logger) +[2021-05-10 04:22:18,026] INFO [Controller id=0 epoch=1] Changed partition json-topic-0 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:22:18,026] INFO [Controller id=0 epoch=1] Sending LeaderAndIsr request to broker 0 with 1 become-leader and 0 become-follower partitions (state.change.logger) +[2021-05-10 04:22:18,027] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet(0) for 1 partitions (state.change.logger) +[2021-05-10 04:22:18,027] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet() for 0 partitions (state.change.logger) +[2021-05-10 04:22:18,028] INFO [Broker id=0] Handling LeaderAndIsr request correlationId 3 from controller 0 for 1 partitions (state.change.logger) +[2021-05-10 04:22:18,029] INFO [Broker id=0] Stopped fetchers as part of LeaderAndIsr request correlationId 3 from controller 0 epoch 1 as part of the become-leader transition for 1 partitions (state.change.logger) +[2021-05-10 04:22:18,036] INFO [Broker id=0] Leader json-topic-0 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:22:18,039] INFO [Broker id=0] Finished LeaderAndIsr request in 11ms correlationId 3 from controller 0 for 1 partitions (state.change.logger) +[2021-05-10 04:22:18,041] INFO [Broker id=0] Add 1 partitions and deleted 0 partitions from metadata cache in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 4 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-22 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-30 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-25 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-35 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-37 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-38 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-13 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-8 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-21 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-4 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-27 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-7 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-9 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-46 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-41 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-33 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-23 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-49 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-47 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-16 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-28 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-31 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-36 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-42 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-3 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-18 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-15 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-24 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-17 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-48 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-19 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-11 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-2 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-43 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-6 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-14 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,477] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-20 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,478] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-0 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,478] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-44 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,478] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-39 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,478] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-12 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,478] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-45 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,478] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-1 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,478] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-5 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,478] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-26 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,478] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-29 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,478] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-34 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,478] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-10 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,478] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-32 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,478] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-40 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2021-05-10 04:28:57,478] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet() for 0 partitions (state.change.logger) +[2021-05-10 04:28:57,479] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet() for 0 partitions (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-22 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-30 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-25 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-35 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-37 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-38 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-13 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-8 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-21 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-4 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-27 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-7 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-9 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-46 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-41 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-33 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-23 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-49 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-47 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-16 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-28 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-31 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-36 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-42 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-3 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-18 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-15 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-24 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-17 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-48 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-19 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-11 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-2 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-43 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-6 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-14 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-20 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-0 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-44 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-39 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-12 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-45 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-1 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-5 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-26 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-29 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,592] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-34 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,593] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-10 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,593] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-32 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,593] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-40 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isr=List(0), zkVersion=0) (state.change.logger) +[2021-05-10 04:28:57,593] INFO [Controller id=0 epoch=1] Sending LeaderAndIsr request to broker 0 with 50 become-leader and 0 become-follower partitions (state.change.logger) +[2021-05-10 04:28:57,593] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet(0) for 50 partitions (state.change.logger) +[2021-05-10 04:28:57,594] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet() for 0 partitions (state.change.logger) +[2021-05-10 04:28:57,595] INFO [Broker id=0] Handling LeaderAndIsr request correlationId 5 from controller 0 for 50 partitions (state.change.logger) +[2021-05-10 04:28:57,622] INFO [Broker id=0] Stopped fetchers as part of LeaderAndIsr request correlationId 5 from controller 0 epoch 1 as part of the become-leader transition for 50 partitions (state.change.logger) +[2021-05-10 04:28:57,630] INFO [Broker id=0] Leader __consumer_offsets-3 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:57,657] INFO [Broker id=0] Leader __consumer_offsets-18 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:57,667] INFO [Broker id=0] Leader __consumer_offsets-41 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:57,674] INFO [Broker id=0] Leader __consumer_offsets-10 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:57,681] INFO [Broker id=0] Leader __consumer_offsets-33 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:57,709] INFO [Broker id=0] Leader __consumer_offsets-48 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:57,721] INFO [Broker id=0] Leader __consumer_offsets-19 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:57,736] INFO [Broker id=0] Leader __consumer_offsets-34 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:57,752] INFO [Broker id=0] Leader __consumer_offsets-4 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:57,760] INFO [Broker id=0] Leader __consumer_offsets-11 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:57,781] INFO [Broker id=0] Leader __consumer_offsets-26 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:57,795] INFO [Broker id=0] Leader __consumer_offsets-49 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:57,815] INFO [Broker id=0] Leader __consumer_offsets-39 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:57,825] INFO [Broker id=0] Leader __consumer_offsets-9 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:57,839] INFO [Broker id=0] Leader __consumer_offsets-24 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:57,848] INFO [Broker id=0] Leader __consumer_offsets-31 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:57,855] INFO [Broker id=0] Leader __consumer_offsets-46 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:57,879] INFO [Broker id=0] Leader __consumer_offsets-1 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:57,890] INFO [Broker id=0] Leader __consumer_offsets-16 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:57,903] INFO [Broker id=0] Leader __consumer_offsets-2 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:57,920] INFO [Broker id=0] Leader __consumer_offsets-25 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:57,937] INFO [Broker id=0] Leader __consumer_offsets-40 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:57,952] INFO [Broker id=0] Leader __consumer_offsets-47 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:57,979] INFO [Broker id=0] Leader __consumer_offsets-17 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:57,989] INFO [Broker id=0] Leader __consumer_offsets-32 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:58,006] INFO [Broker id=0] Leader __consumer_offsets-37 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:58,025] INFO [Broker id=0] Leader __consumer_offsets-7 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:58,037] INFO [Broker id=0] Leader __consumer_offsets-22 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:58,053] INFO [Broker id=0] Leader __consumer_offsets-29 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:58,061] INFO [Broker id=0] Leader __consumer_offsets-44 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:58,071] INFO [Broker id=0] Leader __consumer_offsets-14 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:58,091] INFO [Broker id=0] Leader __consumer_offsets-23 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:58,105] INFO [Broker id=0] Leader __consumer_offsets-38 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:58,116] INFO [Broker id=0] Leader __consumer_offsets-8 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:58,121] INFO [Broker id=0] Leader __consumer_offsets-45 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:58,143] INFO [Broker id=0] Leader __consumer_offsets-15 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:58,152] INFO [Broker id=0] Leader __consumer_offsets-30 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:58,166] INFO [Broker id=0] Leader __consumer_offsets-0 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:58,178] INFO [Broker id=0] Leader __consumer_offsets-35 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:58,198] INFO [Broker id=0] Leader __consumer_offsets-5 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:58,217] INFO [Broker id=0] Leader __consumer_offsets-20 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:58,235] INFO [Broker id=0] Leader __consumer_offsets-27 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:58,253] INFO [Broker id=0] Leader __consumer_offsets-42 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:58,269] INFO [Broker id=0] Leader __consumer_offsets-12 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:58,274] INFO [Broker id=0] Leader __consumer_offsets-21 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:58,285] INFO [Broker id=0] Leader __consumer_offsets-36 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:58,293] INFO [Broker id=0] Leader __consumer_offsets-6 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:58,312] INFO [Broker id=0] Leader __consumer_offsets-43 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:58,317] INFO [Broker id=0] Leader __consumer_offsets-13 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:58,322] INFO [Broker id=0] Leader __consumer_offsets-28 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:28:58,343] INFO [Broker id=0] Finished LeaderAndIsr request in 748ms correlationId 5 from controller 0 for 50 partitions (state.change.logger) +[2021-05-10 04:28:58,352] INFO [Broker id=0] Add 50 partitions and deleted 0 partitions from metadata cache in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 6 (state.change.logger) +[2021-05-10 04:47:24,436] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet() for 0 partitions (state.change.logger) +[2021-05-10 04:47:42,031] INFO [Controller id=0 epoch=2] Sending UpdateMetadata request to brokers HashSet(0) for 0 partitions (state.change.logger) +[2021-05-10 04:47:42,154] INFO [Controller id=0 epoch=2] Sending LeaderAndIsr request to broker 0 with 52 become-leader and 0 become-follower partitions (state.change.logger) +[2021-05-10 04:47:42,158] INFO [Controller id=0 epoch=2] Sending UpdateMetadata request to brokers HashSet(0) for 52 partitions (state.change.logger) +[2021-05-10 04:47:42,383] INFO [Broker id=0] Handling LeaderAndIsr request correlationId 1 from controller 0 for 52 partitions (state.change.logger) +[2021-05-10 04:47:42,480] INFO [Broker id=0] Stopped fetchers as part of LeaderAndIsr request correlationId 1 from controller 0 epoch 2 as part of the become-leader transition for 52 partitions (state.change.logger) +[2021-05-10 04:47:42,497] INFO [Broker id=0] Leader __consumer_offsets-3 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,515] INFO [Broker id=0] Leader __consumer_offsets-18 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,523] INFO [Broker id=0] Leader __consumer_offsets-41 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,525] INFO [Broker id=0] Leader __consumer_offsets-10 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,533] INFO [Broker id=0] Leader __consumer_offsets-33 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,536] INFO [Broker id=0] Leader __consumer_offsets-48 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,543] INFO [Broker id=0] Leader __consumer_offsets-19 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,546] INFO [Broker id=0] Leader __consumer_offsets-34 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,549] INFO [Broker id=0] Leader __consumer_offsets-4 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,552] INFO [Broker id=0] Leader __consumer_offsets-11 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,556] INFO [Broker id=0] Leader __consumer_offsets-26 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,562] INFO [Broker id=0] Leader __consumer_offsets-49 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,567] INFO [Broker id=0] Leader __consumer_offsets-39 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,570] INFO [Broker id=0] Leader __consumer_offsets-9 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,578] INFO [Broker id=0] Leader __consumer_offsets-24 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,581] INFO [Broker id=0] Leader __consumer_offsets-31 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,586] INFO [Broker id=0] Leader __consumer_offsets-46 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,589] INFO [Broker id=0] Leader __consumer_offsets-1 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,592] INFO [Broker id=0] Leader __consumer_offsets-16 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,598] INFO [Broker id=0] Leader __consumer_offsets-2 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,603] INFO [Broker id=0] Leader __consumer_offsets-25 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,607] INFO [Broker id=0] Leader __consumer_offsets-40 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,611] INFO [Broker id=0] Leader __consumer_offsets-47 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,613] INFO [Broker id=0] Leader __consumer_offsets-17 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,615] INFO [Broker id=0] Leader json-topic-0 starts at leader epoch 0 from offset 1 with high watermark 1 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,616] INFO [Broker id=0] Leader __consumer_offsets-32 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,633] INFO [Broker id=0] Leader __consumer_offsets-37 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,638] INFO [Broker id=0] Leader __consumer_offsets-7 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,644] INFO [Broker id=0] Leader __consumer_offsets-22 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,660] INFO [Broker id=0] Leader __consumer_offsets-29 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,663] INFO [Broker id=0] Leader __consumer_offsets-44 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,667] INFO [Broker id=0] Leader __consumer_offsets-14 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,669] INFO [Broker id=0] Leader __consumer_offsets-23 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,672] INFO [Broker id=0] Leader __consumer_offsets-38 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,674] INFO [Broker id=0] Leader __consumer_offsets-8 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,694] INFO [Broker id=0] Leader __consumer_offsets-45 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,698] INFO [Broker id=0] Leader __consumer_offsets-15 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,703] INFO [Broker id=0] Leader __consumer_offsets-30 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,706] INFO [Broker id=0] Leader __consumer_offsets-0 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,708] INFO [Broker id=0] Leader __consumer_offsets-35 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,711] INFO [Broker id=0] Leader __consumer_offsets-5 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,714] INFO [Broker id=0] Leader __consumer_offsets-20 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,717] INFO [Broker id=0] Leader __consumer_offsets-27 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,722] INFO [Broker id=0] Leader __consumer_offsets-42 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,727] INFO [Broker id=0] Leader __consumer_offsets-12 starts at leader epoch 0 from offset 161 with high watermark 161 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,728] INFO [Broker id=0] Leader __consumer_offsets-21 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,732] INFO [Broker id=0] Leader __consumer_offsets-36 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,735] INFO [Broker id=0] Leader __consumer_offsets-6 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,738] INFO [Broker id=0] Leader __consumer_offsets-43 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,743] INFO [Broker id=0] Leader __consumer_offsets-13 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,746] INFO [Broker id=0] Leader __consumer_offsets-28 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,750] INFO [Broker id=0] Leader my-topic-0 starts at leader epoch 0 from offset 20 with high watermark 20 ISR [0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger) +[2021-05-10 04:47:42,818] INFO [Broker id=0] Finished LeaderAndIsr request in 418ms correlationId 1 from controller 0 for 52 partitions (state.change.logger) +[2021-05-10 04:47:42,837] INFO [Broker id=0] Add 52 partitions and deleted 0 partitions from metadata cache in response to UpdateMetadata request sent by controller 0 epoch 2 with correlation id 2 (state.change.logger) +[2021-05-10 04:48:06,613] INFO [Controller id=0 epoch=2] Sending UpdateMetadata request to brokers HashSet() for 0 partitions (state.change.logger) diff --git a/kafka/kafka_2.13-2.8.0/logs/zookeeper-gc.log b/kafka/kafka_2.13-2.8.0/logs/zookeeper-gc.log new file mode 100644 index 0000000..c699d78 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/logs/zookeeper-gc.log @@ -0,0 +1,23 @@ +[2021-05-10T04:47:38.239-0400][gc,heap] Heap region size: 1M +[2021-05-10T04:47:38.242-0400][gc ] Using G1 +[2021-05-10T04:47:38.242-0400][gc,heap,coops] Heap address: 0x00000000e0000000, size: 512 MB, Compressed Oops mode: 32-bit +[2021-05-10T04:47:38.242-0400][gc,cds ] Mark closed archive regions in map: [0x00000000fff00000, 0x00000000fff69ff8] +[2021-05-10T04:47:38.242-0400][gc,cds ] Mark open archive regions in map: [0x00000000ffe00000, 0x00000000ffe33ff8] +[2021-05-10T04:47:39.625-0400][gc,start ] GC(0) Pause Young (Normal) (G1 Evacuation Pause) +[2021-05-10T04:47:39.625-0400][gc,task ] GC(0) Using 2 workers of 2 for evacuation +[2021-05-10T04:47:39.632-0400][gc,phases ] GC(0) Pre Evacuate Collection Set: 0.0ms +[2021-05-10T04:47:39.632-0400][gc,phases ] GC(0) Evacuate Collection Set: 7.0ms +[2021-05-10T04:47:39.632-0400][gc,phases ] GC(0) Post Evacuate Collection Set: 0.3ms +[2021-05-10T04:47:39.632-0400][gc,phases ] GC(0) Other: 0.3ms +[2021-05-10T04:47:39.632-0400][gc,heap ] GC(0) Eden regions: 25->0(21) +[2021-05-10T04:47:39.632-0400][gc,heap ] GC(0) Survivor regions: 0->4(4) +[2021-05-10T04:47:39.632-0400][gc,heap ] GC(0) Old regions: 2->4 +[2021-05-10T04:47:39.632-0400][gc,heap ] GC(0) Humongous regions: 0->0 +[2021-05-10T04:47:39.632-0400][gc,metaspace ] GC(0) Metaspace: 7589K->7589K(1056768K) +[2021-05-10T04:47:39.632-0400][gc ] GC(0) Pause Young (Normal) (G1 Evacuation Pause) 25M->5M(512M) 7.645ms +[2021-05-10T04:47:39.632-0400][gc,cpu ] GC(0) User=0.01s Sys=0.01s Real=0.01s +[2021-05-10T04:48:12.662-0400][gc,heap,exit ] Heap +[2021-05-10T04:48:12.662-0400][gc,heap,exit ] garbage-first heap total 524288K, used 9173K [0x00000000e0000000, 0x0000000100000000) +[2021-05-10T04:48:12.662-0400][gc,heap,exit ] region size 1024K, 8 young (8192K), 4 survivors (4096K) +[2021-05-10T04:48:12.662-0400][gc,heap,exit ] Metaspace used 7887K, capacity 8236K, committed 8448K, reserved 1056768K +[2021-05-10T04:48:12.662-0400][gc,heap,exit ] class space used 885K, capacity 1004K, committed 1024K, reserved 1048576K diff --git a/kafka/kafka_2.13-2.8.0/logs/zookeeper-gc.log.0 b/kafka/kafka_2.13-2.8.0/logs/zookeeper-gc.log.0 new file mode 100644 index 0000000..b17463a --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/logs/zookeeper-gc.log.0 @@ -0,0 +1,23 @@ +[2021-05-10T04:16:22.320-0400][gc,heap] Heap region size: 1M +[2021-05-10T04:16:22.323-0400][gc ] Using G1 +[2021-05-10T04:16:22.323-0400][gc,heap,coops] Heap address: 0x00000000e0000000, size: 512 MB, Compressed Oops mode: 32-bit +[2021-05-10T04:16:22.323-0400][gc,cds ] Mark closed archive regions in map: [0x00000000fff00000, 0x00000000fff69ff8] +[2021-05-10T04:16:22.323-0400][gc,cds ] Mark open archive regions in map: [0x00000000ffe00000, 0x00000000ffe33ff8] +[2021-05-10T04:16:53.755-0400][gc,start ] GC(0) Pause Young (Normal) (G1 Evacuation Pause) +[2021-05-10T04:16:53.755-0400][gc,task ] GC(0) Using 2 workers of 2 for evacuation +[2021-05-10T04:16:53.764-0400][gc,phases ] GC(0) Pre Evacuate Collection Set: 0.0ms +[2021-05-10T04:16:53.764-0400][gc,phases ] GC(0) Evacuate Collection Set: 7.9ms +[2021-05-10T04:16:53.764-0400][gc,phases ] GC(0) Post Evacuate Collection Set: 0.6ms +[2021-05-10T04:16:53.764-0400][gc,phases ] GC(0) Other: 0.2ms +[2021-05-10T04:16:53.764-0400][gc,heap ] GC(0) Eden regions: 25->0(21) +[2021-05-10T04:16:53.764-0400][gc,heap ] GC(0) Survivor regions: 0->4(4) +[2021-05-10T04:16:53.764-0400][gc,heap ] GC(0) Old regions: 2->4 +[2021-05-10T04:16:53.764-0400][gc,heap ] GC(0) Humongous regions: 0->0 +[2021-05-10T04:16:53.764-0400][gc,metaspace ] GC(0) Metaspace: 7539K->7539K(1056768K) +[2021-05-10T04:16:53.764-0400][gc ] GC(0) Pause Young (Normal) (G1 Evacuation Pause) 25M->5M(512M) 8.820ms +[2021-05-10T04:16:53.764-0400][gc,cpu ] GC(0) User=0.01s Sys=0.00s Real=0.01s +[2021-05-10T04:46:45.790-0400][gc,heap,exit ] Heap +[2021-05-10T04:46:45.790-0400][gc,heap,exit ] garbage-first heap total 524288K, used 12197K [0x00000000e0000000, 0x0000000100000000) +[2021-05-10T04:46:45.790-0400][gc,heap,exit ] region size 1024K, 11 young (11264K), 4 survivors (4096K) +[2021-05-10T04:46:45.790-0400][gc,heap,exit ] Metaspace used 8112K, capacity 8432K, committed 8704K, reserved 1056768K +[2021-05-10T04:46:45.790-0400][gc,heap,exit ] class space used 886K, capacity 1004K, committed 1024K, reserved 1048576K diff --git a/kafka/kafka_2.13-2.8.0/logs/zookeeper-gc.log.1 b/kafka/kafka_2.13-2.8.0/logs/zookeeper-gc.log.1 new file mode 100644 index 0000000..19cc175 --- /dev/null +++ b/kafka/kafka_2.13-2.8.0/logs/zookeeper-gc.log.1 @@ -0,0 +1,24 @@ +[2021-05-10T04:47:23.725-0400][gc,heap] Heap region size: 1M +[2021-05-10T04:47:23.728-0400][gc ] Using G1 +[2021-05-10T04:47:23.728-0400][gc,heap,coops] Heap address: 0x00000000e0000000, size: 512 MB, Compressed Oops mode: 32-bit +[2021-05-10T04:47:23.728-0400][gc,cds ] Mark closed archive regions in map: [0x00000000fff00000, 0x00000000fff69ff8] +[2021-05-10T04:47:23.728-0400][gc,cds ] Mark open archive regions in map: [0x00000000ffe00000, 0x00000000ffe33ff8] +[2021-05-10T04:47:24.376-0400][gc,start ] GC(0) Pause Young (Normal) (G1 Evacuation Pause) +[2021-05-10T04:47:24.376-0400][gc,task ] GC(0) Using 2 workers of 2 for evacuation +[2021-05-10T04:47:24.398-0400][gc,mmu ] GC(0) MMU target violated: 21.0ms (20.0ms/21.0ms) +[2021-05-10T04:47:24.398-0400][gc,phases ] GC(0) Pre Evacuate Collection Set: 0.0ms +[2021-05-10T04:47:24.398-0400][gc,phases ] GC(0) Evacuate Collection Set: 21.3ms +[2021-05-10T04:47:24.398-0400][gc,phases ] GC(0) Post Evacuate Collection Set: 0.3ms +[2021-05-10T04:47:24.398-0400][gc,phases ] GC(0) Other: 0.3ms +[2021-05-10T04:47:24.398-0400][gc,heap ] GC(0) Eden regions: 25->0(21) +[2021-05-10T04:47:24.398-0400][gc,heap ] GC(0) Survivor regions: 0->4(4) +[2021-05-10T04:47:24.398-0400][gc,heap ] GC(0) Old regions: 2->4 +[2021-05-10T04:47:24.398-0400][gc,heap ] GC(0) Humongous regions: 0->0 +[2021-05-10T04:47:24.398-0400][gc,metaspace ] GC(0) Metaspace: 7436K->7436K(1056768K) +[2021-05-10T04:47:24.398-0400][gc ] GC(0) Pause Young (Normal) (G1 Evacuation Pause) 25M->6M(512M) 21.884ms +[2021-05-10T04:47:24.398-0400][gc,cpu ] GC(0) User=0.01s Sys=0.00s Real=0.02s +[2021-05-10T04:47:31.237-0400][gc,heap,exit ] Heap +[2021-05-10T04:47:31.237-0400][gc,heap,exit ] garbage-first heap total 524288K, used 8445K [0x00000000e0000000, 0x0000000100000000) +[2021-05-10T04:47:31.237-0400][gc,heap,exit ] region size 1024K, 7 young (7168K), 4 survivors (4096K) +[2021-05-10T04:47:31.237-0400][gc,heap,exit ] Metaspace used 7544K, capacity 7855K, committed 7936K, reserved 1056768K +[2021-05-10T04:47:31.237-0400][gc,heap,exit ] class space used 863K, capacity 1004K, committed 1024K, reserved 1048576K diff --git a/kafka/kafka_2.13-2.8.0/site-docs/kafka_2.13-2.8.0-site-docs.tgz b/kafka/kafka_2.13-2.8.0/site-docs/kafka_2.13-2.8.0-site-docs.tgz new file mode 100644 index 0000000..0d17c74 Binary files /dev/null and b/kafka/kafka_2.13-2.8.0/site-docs/kafka_2.13-2.8.0-site-docs.tgz differ diff --git a/kafka/pub.py b/kafka/pub.py new file mode 100644 index 0000000..12d3dc2 --- /dev/null +++ b/kafka/pub.py @@ -0,0 +1,12 @@ +from time import sleep +from json import dumps +from kafka import KafkaProducer + +producer = KafkaProducer(bootstrap_servers=['localhost:9092'], + value_serializer=lambda x: + dumps(x).encode('utf-8')) + +for e in range(10): + data = {'number' : e} + producer.send('my-topic', value=data) + sleep(5) diff --git a/kafka/sub.py b/kafka/sub.py new file mode 100644 index 0000000..f61ab4d --- /dev/null +++ b/kafka/sub.py @@ -0,0 +1,16 @@ +from kafka import KafkaConsumer +from json import loads + +consumer = KafkaConsumer( + 'my-topic', + bootstrap_servers=['localhost:9092'], + auto_offset_reset='earliest', + enable_auto_commit=True, + group_id='my-group', + value_deserializer=lambda x: loads(x.decode('utf-8'))) + + +for message in consumer: + message = message.value + print('{} received'.format(message)) +