I hereby claim:
- I am miguno on github.
- I am miguno (https://keybase.io/miguno) on keybase.
- I have a public key whose fingerprint is 0528 C791 1372 04EE C0CE 277A 796E 57B8 6D7C BA34
To claim this, I am signing this object:
package com.twitter.algebird.caliper | |
import com.google.caliper.{ Param, SimpleBenchmark } | |
import com.google.common.hash.{ HashFunction, Hashing } | |
/** | |
* Benchmarks the hashing algorithms used by Count-Min sketch for CMS[BigInt]. | |
* | |
* The input values are generated ahead of time to ensure that each trial uses the same input (and that the RNG is not | |
* influencing the runtime of the trials). |
private def entropy(counts: Vector[Long], totalCount: Double): Double = { | |
if (totalCount == 0) { | |
return 0 | |
} | |
def log2(x: Double) = scala.math.log(x) / scala.math.log(2) | |
counts.filter(_ != 0).map { count => | |
val freq = 1.0 * count / totalCount | |
freq * log2(freq) | |
}.foldLeft(0.0) { case (agg, i) => agg - i } |
I hereby claim:
To claim this, I am signing this object:
#!/usr/bin/env bash | |
# | |
# File: kafka-move-leadership.sh | |
# | |
# Description | |
# =========== | |
# | |
# Generates a Kafka partition reassignment JSON snippet to STDOUT to move the leadership | |
# of any replicas away from the provided "source" broker to different, randomly selected | |
# "target" brokers. Run this script with `-h` to show detailed usage instructions. |
$ for zk in `seq 1 3`; do echo stat | nc zookeeper${zk} 2181|egrep "^(Zxid|Node)"; done | |
Zxid: 0x46d1b3ebc | |
Node count: 4557 | |
Zxid: 0x46d1b3ebc | |
Node count: 4557 | |
Zxid: 0x46d1b3ebf | |
Node count: 4557 | |
# All values should be the same, though with ongoing transactions from clients you might see the values | |
# to diverge slightly (e.g. if you wait 1 second in between querying `stat` from zookeeper1 and zookeeper2, |
// This KStream contains information such as "alice" -> 13 | |
KStream<String, Long> userClicksStream = builder.stream(..., "user-clicks-topic"); | |
// This KTable contains information such as "alice" -> "europe" | |
KTable<String, String> userRegionsTable = builder.table(..., "user-regions-topic"); | |
// Compute the number of clicks per region, e.g. "europe" -> 13 | |
// | |
// The resulting KTable is continuously being updated as new data records are arriving in the | |
// input KStream `userClicksStream` and input KTable `userRegionsTable`. |
# Key settings to enable client authentication and SSL encryption | |
bootstrap.servers=kafka.example.com:9093 | |
security.protocol=SSL | |
ssl.truststore.location=/etc/security/tls/kafka.client.truststore.jks | |
ssl.truststore.password=test1234 | |
ssl.keystore.location=/etc/security/tls/kafka.client.keystore.jks | |
ssl.keystore.password=test1234 | |
ssl.key.password=test1234 |
// Code of your Java application that uses the Kafka Streams library | |
Properties settings = new Properties(); | |
settings.put(StreamsConfig.APPLICATION_ID_CONFIG, "secure-kafka-streams-app"); | |
settings.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "kafka.example.com:9093"); | |
// ...further non-security related settings may follow here... | |
settings.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SSL"); | |
settings.put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, "/etc/security/tls/kafka.client.truststore.jks"); | |
settings.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, "test1234"); | |
settings.put(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, "/etc/security/tls/kafka.client.keystore.jks"); | |
settings.put(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, "test1234"); |
# Misconfigured ssl.keystore.password | |
Exception in thread "main" org.apache.kafka.common.KafkaException: Failed to construct kafka producer | |
[...snip...] | |
Caused by: org.apache.kafka.common.KafkaException: org.apache.kafka.common.KafkaException: java.io.IOException: Keystore was tampered with, or password was incorrect | |
[...snip...] | |
Caused by: java.security.UnrecoverableKeyException: Password verification failed |
# Misconfigured ssl.key.password | |
Exception in thread "main" org.apache.kafka.common.KafkaException: Failed to construct kafka producer | |
[...snip...] | |
Caused by: org.apache.kafka.common.KafkaException: org.apache.kafka.common.KafkaException: java.security.UnrecoverableKeyException: Cannot recover key | |
[...snip...] | |
Caused by: org.apache.kafka.common.KafkaException: java.security.UnrecoverableKeyException: Cannot recover key | |
[...snip...] | |
Caused by: java.security.UnrecoverableKeyException: Cannot recover key |