I hereby claim:
- I am rafaelfelix on github.
- I am rafaelfc (https://keybase.io/rafaelfc) on keybase.
- I have a public key ASCGxsBdB_ZnmKH-lhFUuCs_geoXR236mbQElQs78L7LhQo
To claim this, I am signing this object:
version: '2' | |
services: | |
kibana: | |
image: kibana | |
links: | |
- elasticsearch | |
environment: | |
- ELASTICSEARCH_URL=http://elk_elasticsearch_1:9200 | |
elasticsearch: | |
image: elasticsearch:2.2 |
I hereby claim:
To claim this, I am signing this object:
import boto3 | |
import os | |
# Read environment variables into a read-only dict-like object. Falls back to AWS SSM | |
# in case the variable is not set | |
# inspiration: https://aws.amazon.com/blogs/compute/sharing-secrets-with-aws-lambda-using-aws-systems-manager-parameter-store/ | |
class AWSLambdaConfig(object): | |
def __init__(self, path): | |
self._ssm_path = path | |
self._ssm_client = boto3.client('ssm') |
{ | |
"StartAt": "Concatenate Strings", | |
"States": { | |
"Concatenate Strings": { | |
"Type": "Task", | |
"Resource": "arn:aws:lambda:region:account:function:concatenate_strings", | |
"ResultPath": "$.preparedInput", | |
"Parameters": { | |
"market.$": "$.market", | |
"model.$": "$.model", |
def lambda_handler(event, context): | |
return f'{event["bucket"]}/{event["model"]}/{event["market"]}' |
{ | |
"StartAt": "Step1", | |
"States": { | |
"Step1": { | |
"Type": "Task", | |
"Resource": "arn:aws:lambda:region:account:function:step1", | |
"ResultPath": "$.result", | |
"Next": "Step2" | |
}, | |
"Step2": { |
#!/bin/bash | |
# set spark.master to k8s api host:port | |
sed -i "s/#spark.master.*/spark.master\ k8s\:\/\/https\:\/\/$KUBERNETES_SERVICE_HOST\:$KUBERNETES_PORT_443_TCP_PORT/g" $SPARK_HOME/conf/spark-defaults.conf | |
# set spark.kubernetes.driver.pod.name to $HOSTNAME (https://spark.apache.org/docs/latest/running-on-kubernetes.html#client-mode-executor-pod-garbage-collection) | |
sed -i "s/#spark.kubernetes.driver.pod.name.*/spark.kubernetes.driver.pod.name\ $HOSTNAME/g" $SPARK_HOME/conf/spark-defaults.conf | |
# set spark.driver.host to own pod name | |
THIS_POD_FQDN=$HOSTNAME.servicename.{{ .Release.Namespace }}.svc.cluster.local |
rules: | |
- pattern: Hadoop<service=HBase, name=RegionServer, sub=Regions><>Namespace_([^\W_]+)_table_([^\W_]+)_region_([^\W_]+)_metric_(\w+) | |
name: HBase_RegionServer_Regions_metric_$4 | |
labels: | |
namespace: "$1" | |
table: "$2" | |
region: "$3" | |
- pattern: Hadoop<service=HBase, name=RegionServer, sub=(TableLatencies|Tables)><>Namespace_([^\W_]+)_table_([^\W_]+)_metric_(\w+) | |
name: HBase_RegionServer_$1_metric_$4 | |
labels: |
#!/bin/bash | |
MIRROR=archive.apache.org | |
SPARK_VERSION=2.4.4 | |
SPARK_VERSION_SUFFIX=-bin-hadoop2.7 | |
curl -Lo spark.tgz https://${MIRROR}/dist/spark/spark-${SPARK_VERSION}/spark-${SPARK_VERSION}${SPARK_VERSION_SUFFIX}.tgz && \ | |
tar xvzf spark.tgz && mv spark-${SPARK_VERSION}${SPARK_VERSION_SUFFIX} spark | |
export SPARK_HOME=$(pwd)/spark |
#!/bin/bash | |
K8S_VERSION=v1.15.4 | |
MINIKUBE_VMDRIVER=virtualbox | |
minikube start --cpus=4 --memory=4000mb --vm-driver=${MINIKUBE_VMDRIVER} --kubernetes-version=${K8S_VERSION} |