I hereby claim:
- I am picadoh on github.
- I am picadoh (https://keybase.io/picadoh) on keybase.
- I have a public key whose fingerprint is A00B 4282 CCED A0A3 0809 90F4 7114 7A08 A373 B191
To claim this, I am signing this object:
| # Spark Stateful Streaming with Python | |
| # Takes text from input network socket and prints the accumulated count for each word | |
| from pyspark import SparkContext | |
| from pyspark.streaming import StreamingContext | |
| # define the update function | |
| def updateTotalCount(currentCount, countState): | |
| if countState is None: | |
| countState = 0 |
| import org.apache.kafka.common.serialization.Serde; | |
| import org.apache.kafka.common.serialization.Serdes; | |
| import org.apache.kafka.streams.KafkaStreams; | |
| import org.apache.kafka.streams.KeyValue; | |
| import org.apache.kafka.streams.StreamsConfig; | |
| import org.apache.kafka.streams.kstream.KStreamBuilder; | |
| import org.apache.kafka.streams.processor.WallclockTimestampExtractor; | |
| import java.util.Properties; | |
| import static java.util.Arrays.asList; |
| # env | |
| export KAFKA_HOST="my.kafka.hostname" | |
| export KAFKA_OPTS="-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf" | |
| # create topics | |
| kafka-topics --create --topic securing-kafka --replication-factor 1 --partitions 3 --zookeeper $KAFKA_HOST:2181 | |
| # producer acl | |
| kafka-acls --authorizer-properties zookeeper.connect=$KAFKA_HOST:2181 --add --allow-principal User:kafkaclient --producer --topic securing-kafka |
I hereby claim:
To claim this, I am signing this object:
| #!/bin/bash | |
| # storage functions | |
| db_set () { | |
| echo "$1,$2" >> event_store | |
| } | |
| db_get () { | |
| grep "^$1," event_store | sed -e "s/^$1,//" | tail -n 1 | |
| } |
| <Project Sdk="Microsoft.NET.Sdk"> | |
| <PropertyGroup> | |
| <OutputType>Exe</OutputType> | |
| <TargetFramework>netcoreapp2.0</TargetFramework> | |
| </PropertyGroup> | |
| <ItemGroup> | |
| <PackageReference Include="Confluent.Kafka" Version="0.11.2" /> | |
| </ItemGroup> | |
| </Project> |
| <Project Sdk="Microsoft.NET.Sdk"> | |
| <PropertyGroup> | |
| <OutputType>Exe</OutputType> | |
| <TargetFramework>netcoreapp2.0</TargetFramework> | |
| </PropertyGroup> | |
| <ItemGroup> | |
| <PackageReference Include="Confluent.Kafka" Version="0.11.2" /> | |
| </ItemGroup> | |
| </Project> |
| version: '2' | |
| services: | |
| zookeeper: | |
| image: wurstmeister/zookeeper:3.4.6 | |
| ports: | |
| - "2181:2181" | |
| kafka: | |
| image: wurstmeister/kafka:0.11.0.1 | |
| ports: | |
| - "9092:9092" |
| ### Cloudwatch Events ### | |
| # Event rule: Runs at 8pm during working days | |
| resource "aws_cloudwatch_event_rule" "start_instances_event_rule" { | |
| name = "start_instances_event_rule" | |
| description = "Starts stopped EC2 instances" | |
| schedule_expression = "cron(0 8 ? * MON-FRI *)" | |
| depends_on = ["aws_lambda_function.ec2_start_scheduler_lambda"] | |
| } | |
| # Runs at 8am during working days |
| const std = @import("std"); | |
| pub fn main() !void { | |
| const result = divideOrDefault(100, 0, 0); | |
| std.log.info("result = {d}", .{result}); | |
| } | |
| fn divideOrDefault(numerator: u32, denominator: u32, default: u32) u32 { | |
| const result = divide(numerator, denominator) catch |err| { | |
| switch (err) { |