Last active
March 12, 2018 18:53
-
-
Save ottomata/5324fc3becdd20e9a678d5d37c2db872 to your computer and use it in GitHub Desktop.
MirrorMaker 0.9.0.1 -> Kafka 1.0 Cluster woes
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
############################# Consumer Basics ############################# | |
# Kafka Consumer group id | |
group.id=kafka-mirror-main-eqiad_to_jumbo-eqiad | |
partition.assignment.strategy=roundrobin | |
# Zookeeper connection string | |
# comma separated host:port pairs, each corresponding to a zk | |
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" | |
zookeeper.connect=conf1001.eqiad.wmnet,conf1002.eqiad.wmnet,conf1003.eqiad.wmnet/kafka/main-eqiad | |
########################## Additional Propeties ########################## | |
fetch.message.max.bytes=4242868 |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/usr/bin/kafka-mirror-maker \ | |
--whitelist '.*' \ | |
--num.streams 2 \ | |
--offset.commit.interval.ms 5000 \ | |
--consumer.config /etc/kafka/mirror/main-eqiad_to_jumbo-eqiad/consumer.properties \ | |
--producer.config /etc/kafka/mirror/main-eqiad_to_jumbo-eqiad/producer.properties |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
############################# Producer Basics ############################# | |
# list of brokers used for bootstrapping | |
# format: host1:port1,host2:port2 ... | |
bootstrap.servers=kafka-jumbo1001.eqiad.wmnet:9092,kafka-jumbo1002.eqiad.wmnet:9092,kafka-jumbo1003.eqiad.wmnet:9092,kafka-jumbo1004.eqiad.wmnet:9092,kafka-jumbo1005.eqiad.wmnet:9092,kafka-jumbo1006.eqiad.wmnet:9092 | |
# Logically identifies the application making the request. | |
client.id=kafka-mirror-main-eqiad_to_jumbo-eqiad-kafka1022-producer | |
# Required number of acks | |
acks=all | |
linger.ms=1000 | |
# specify the compression codec for all data generated: none , gzip, snappy. | |
# the old config values work as well: 0, 1, 2 for none, gzip, snappy, respectivally | |
compression.type=snappy | |
########################## Additional Propeties ########################## | |
max.request.size=4242868 |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
############################# Server Basics ############################# | |
# The id of the broker. This must be set to a unique integer for each broker. | |
broker.id=1001 | |
# Always require a static broker id. | |
broker.id.generation.enable=false | |
broker.rack=A | |
listeners=PLAINTEXT://:9092,SSL://:9093 | |
# Specify which version of the inter-broker protocol will be used. This is | |
# typically bumped after all brokers were upgraded to a new version. | |
inter.broker.protocol.version=1.0 | |
# Define whether the timestamp in the message is message create time or log append time. | |
# The value should be either `CreateTime` or `LogAppendTime` | |
log.message.timestamp.type=LogAppendTime | |
message.max.bytes=4194304 | |
replica.fetch.max.bytes=4194304 | |
######################### ACL handling ################################## | |
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer | |
allow.everyone.if.no.acl.found=true | |
super.users=User:CN=kafka_jumbo-eqiad_broker | |
######################### Socket Server Settings ######################## | |
security.inter.broker.protocol=SSL | |
ssl.keystore.location=XXXX | |
ssl.keystore.password=XXXX | |
ssl.key.password=XXXX | |
ssl.truststore.location=XXXX | |
ssl.truststore.password=XXXX | |
ssl.enabled.protocols=TLSv1.2 | |
ssl.cipher.suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 | |
ssl.client.auth=requested | |
# The number of threads doing disk I/O | |
num.io.threads=1 | |
# The send buffer (SO_SNDBUF) used by the socket server | |
socket.send.buffer.bytes=1048576 | |
# The receive buffer (SO_RCVBUF) used by the socket server | |
socket.receive.buffer.bytes=1048576 | |
############################# Log Basics ############################# | |
# A comma seperated list of directories under which to store log files | |
log.dirs=/srv/kafka/data | |
# The default number of log partitions per topic. More partitions allow greater | |
# parallelism for consumption, but this will also result in more files across | |
# the brokers. | |
num.partitions=1 | |
# The default replication factor for automatically created topics. | |
# Default to the number of brokers in this cluster. | |
default.replication.factor=3 | |
# Enables topic deletion | |
delete.topic.enable=true | |
# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" | |
# For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3. | |
offsets.topic.replication.factor=3 | |
# When a producer sets acks to "all" (or "-1"), min.insync.replicas specifies the minimum number of | |
# replicas that must acknowledge a write for the write to be considered successful. If this minimum | |
# cannot be met, then the producer will raise an exception (either NotEnoughReplicas or | |
# NotEnoughReplicasAfterAppend). When used together, min.insync.replicas and acks allow you to | |
# enforce greater durability guarantees. A typical scenario would be to create a topic with a | |
# replication factor of 3, set min.insync.replicas to 2, and produce with acks of "all". This will | |
# ensure that the producer raises an exception if a majority of replicas do not receive a write. | |
min.insync.replicas=1 | |
# Enable auto creation of topic on the server. If this is set to true | |
# then attempts to produce, consume, or fetch metadata for a non-existent | |
# topic will automatically create it with the default replication factor | |
# and number of partitions. | |
auto.create.topics.enable=true | |
# If this is enabled the controller will automatically try to balance | |
# leadership for partitions among the brokers by periodically returning | |
# leadership to the "preferred" replica for each partition if it is available. | |
auto.leader.rebalance.enable=true | |
# Number of threads used to replicate messages from leaders. Increasing this | |
# value can increase the degree of I/O parallelism in the follower broker. | |
# This is useful to temporarily increase if you have a broker that needs | |
# to catch up on messages to get back into the ISR. | |
num.replica.fetchers=12 | |
############################# Log Retention Policy ############################# | |
# The following configurations control the disposal of log segments. The policy | |
# can be set to delete segments after a period of time, or after a given size | |
# has accumulated. A segment will be deleted whenever *either* of these | |
# criteria are met. Deletion always happens from the end of the log. | |
# The minimum age of a log file to be eligible for deletion due to age | |
log.retention.hours=168 | |
# Log retention window in minutes for offsets topic. If an offset | |
# commit for a consumer group has not been recieved in this amount of | |
# time, Kafka will drop the offset commit and consumers in the group | |
# will have to start a new. This can be overridden in an offset commit | |
# request. | |
offsets.retention.minutes=10080 | |
############################# Zookeeper ############################# | |
# Zookeeper connection string (see zookeeper docs for details). | |
# This is a comma separated host:port pairs, each corresponding to a zk | |
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". | |
# You can also append an optional chroot string to the urls to specify the | |
# root directory for all kafka znodes. | |
zookeeper.connect=conf1001.eqiad.wmnet,conf1002.eqiad.wmnet,conf1003.eqiad.wmnet/kafka/jumbo-eqiad |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment