I hereby claim:
- I am jkordish on github.
- I am jkordish (https://keybase.io/jkordish) on keybase.
- I have a public key whose fingerprint is DA0A 79EC EC04 A015 5C47 1BE8 863C 2392 C6B8 20EF
To claim this, I am signing this object:
I hereby claim:
To claim this, I am signing this object:
- hosts: localhost | |
tasks: | |
- name: update sysctl | |
action: sysctl state=present reload=yes {{ item }} | |
with_items: | |
# max open files | |
- name=fs.file-max value=65535 | |
# lets not use swap. | |
- name=vm.swappiness value=0 | |
# enable syn coockies. this can actually cause overhead. |
#!/bin/bash -e | |
### | |
# | |
# Using custom DNS in your AWS VPC and tired of it breaking EMR? | |
# Assumes you have a internal DNS server at the network address at .5 and DNS01 | |
# Just an example -- hack away to fit your specific needs. | |
# Add this to fix to a S3 bucket and then add it your first boot strap step either via the console or cli | |
# e.g. | |
# --bootstrap-actions Path=s3://<bucket>/emr_fix_dns.sh Path=s3://support.elasticmapreduce/spark/install-spark |
curl -s localhost:9200/_recovery | jq '.["indice"] | .shards[] | select(.index.files.percent != "100.0%") | { id: .id , percent: .index.files.percent, host: .target.host}' |
#!/bin/bash -e | |
for index in $(curl -sXGET http://localhost:9200/_cat/shards | grep UNASSIGNED | awk '{print $1}' | uniq); do | |
i=1 | |
for shard in $(curl -XGET http://localhost:9200/_cat/shards?index=$index | grep UNASSIGNED | awk '{print $2}'); do | |
echo "$shard on $index going on datanode$i" | |
curl -sXPOST 'localhost:9200/_cluster/reroute' -d '{ | |
"commands" : [ { | |
"allocate" : { | |
"index" : "'$index'", |
#!/bin/bash -ex | |
# Upload script to S3 bucket you are using for your cluster. | |
# Add the following after the Spark install bootstrap action | |
# --bootstrap-actions Name="Fix Hive",Path="s3://<s3_bucket>/hive-fix.sh" | |
export MASTER=$(hdfs getconf -namenodes) | |
if [ -f "/home/hadoop/hive/conf/hive-default.xml" ]; then | |
sed -i "s/jdbc:mysql:\/\/localhost:3306/jdbc:mysql:\/\/$MASTER:3306/g" /home/hadoop/hive/conf/hive-default.xml | |
fi |
description "weave" | |
start on started docker | |
stop on runlevel [!2345] | |
#respawn | |
console log | |
script | |
if [ "$(hostname -s)" = "$(hostname -s | cut -d- -f1)-mslavepub01" ]; then | |
echo "starting up master weave" |
#cloud-config | |
coreos: | |
units: | |
- name: lvm2-lvmetad.service | |
mask: true | |
- name: lvm2-lvmetad.socket | |
mask: true | |
- name: lvm2-monitor.service | |
mask: true | |
- name: [email protected] |
boot2docker down | |
vboxmanage modifyvm "boot2docker-vm" --natpf1 "docker,tcp,127.0.0.1,2376,,2376" | |
boot2docker up | |
export DOCKER_HOST=tcp://127.0.0.1:2376 |
pre-start script | |
set -e | |
MEM=$(awk '/^MemFree/ {free=$2 * 1024 - 1610612736}; END \ | |
{ if ( free > 4294967296 ) print 4294967296; \ | |
else if ( free < 134217728 ) print 134217728; \ | |
else print free }' /proc/meminfo) | |
CPU=$(awk '/^processor/ { n++ }; END { if ( n > 2) print n-2; else print n+0}' /proc/cpuinfo) | |
sed -i "s/^chunkMemQueueBytes.*/chunkMemQueueBytes\ =\ $MEM/" /opt/tap-cbs/config.txt | |
sed -i "s/^nDigesters.*/nDigesters\ =\ $CPU/" /opt/tap-cbs/config.txt |