Created
June 30, 2016 13:21
-
-
Save seanorama/85116167a094065ee732b288614b44d8 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/bash | |
: ${TRACE:=1} | |
: ${DEBUG:=1} | |
: ${DEPLOYER_LOCATION:="/var/lib/cloudbreak-deployment"} | |
: ${AWS_BIN_LOCATION:="/opt/aws/bin"} | |
_trap_error () { | |
local err=$1 | |
local line=$2 | |
local linecallfunc=$3 | |
local command="$4" | |
local funcstack="$5" | |
echo -e "\n---" | |
echo "ERROR: command '${command}' exited with status: ${err} (line ${line})" | |
if [ "${funcstack}" != "::" ]; then | |
echo -n " at ${funcstack} " | |
if [ "${linecallfunc}" != "" ]; then | |
echo -n "(line ${linecallfunc})" | |
fi | |
else | |
echo -n " internal debug info from function ${FUNCNAME} (line $linecallfunc)" | |
fi | |
echo -e "\n---" | |
signal_failure ${err} "command '${command}' exited with status: ${err} (line ${line})" | |
} | |
debug() { | |
[[ "$DEBUG" ]] && echo "-----> $*" 1>&2 | |
} | |
colorless() { | |
sed -r 's/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g' | tr -d '\015' | |
} | |
wait_for_docker() { | |
debug "wait for docker ..." | |
while ! (docker info &>/dev/null); do echo -n .; sleep 1; done | |
} | |
start_cloudbreak() { | |
cd $DEPLOYER_LOCATION | |
rm Profile | |
echo 'export PUBLIC_IP=$(curl -4fs 169.254.169.254/latest/meta-data/public-hostname)' > Profile | |
echo "export COMPOSE_HTTP_TIMEOUT=240" >> Profile | |
echo "export UAA_DEFAULT_USER_EMAIL=${USERNAME}" >> Profile | |
echo "export UAA_DEFAULT_USER_PW=${ADMIN_PASSWORD}" >> Profile | |
echo "export CB_AWS_DEFAULT_CF_TAG=${CLOUDBREAK_ID}" >> Profile | |
echo "export ULU_HWX_CLOUD_DEFAULT_CREDENTIAL=aws-access" >> Profile | |
echo "export ULU_HWX_CLOUD_DEFAULT_REGION=${REGION}" >> Profile | |
echo "export ULU_HWX_CLOUD_DEFAULT_VPC_ID=${VPC_ID}" >> Profile | |
echo "export ULU_HWX_CLOUD_DEFAULT_IGW_ID=${IGW_ID}" >> Profile | |
echo "export ULU_HWX_CLOUD_DEFAULT_SSH_KEY=${KEYPAIR_NAME}" >> Profile | |
echo "export DOCKER_IMAGE_CLOUDBREAK_WEB=hortonworks/cloud-web" >> Profile | |
echo "export DOCKER_IMAGE_CLOUDBREAK_AUTH=hortonworks/cloud-auth" >> Profile | |
echo "export AWS_ACCOUNT_ID=${ACCOUNT_ID}" >> Profile | |
echo "export CLOUDBREAK_TELEMETRY_MAIL_ADDRESS=aws-marketplace@hortonworks.com" >> Profile | |
echo "export HWX_CLOUD_TYPE=${HWX_CLOUD_TYPE}" >> Profile | |
echo "export HWX_CLOUD_TEMPLATE_VERSION=${CBD_VERSION}" >> Profile | |
echo "export AWS_AMI_ID=$(curl -4fs 169.254.169.254/latest/meta-data/ami-id)" >> Profile | |
echo "export AWS_INSTANCE_ID=$(curl -4fs 169.254.169.254/latest/meta-data/instance-id)" >> Profile | |
if [ "$CBD_VERSION" == "snapshot" ]; then | |
echo "export DOCKER_TAG_ULUWATU=latest" >> Profile | |
echo "export DOCKER_TAG_SULTANS=latest" >> Profile | |
else | |
echo "export DOCKER_TAG_ULUWATU=${CBD_VERSION}" >> Profile | |
echo "export DOCKER_TAG_SULTANS=${CBD_VERSION}" >> Profile | |
fi | |
echo "export CB_BLUEPRINT_DEFAULTS='EDW-ETL: Apache Hive 1.2.1, Apache Spark 1.6=hdp-etl-edw;Data Science: Apache Spark 1.6, Zeppelin=hdp25-data-science;25EDW-ETL: Apache Hive 1.2.1, Apache Spark 1.6=hdp25-etl-edw;EDW-ETL: Apache Spark 2.0-preview=hdp25-etl-edw-spark2;EDW-Analytics: Apache Hive 2 LLAP, Apache Zeppelin=hdp25-edw-analytics'" >> Profile | |
if [ "${PRODUCT_TELEMETRY}" == "I Have Read and Opt In to SmartSense Telemetry" ]; then | |
echo "export CB_SMARTSENSE_CONFIGURE=true" >> Profile | |
else | |
echo "export CB_SMARTSENSE_CONFIGURE=false" >> Profile | |
fi | |
CURRENT_VERSION=$(cbd version 2> /dev/null | colorless | grep "local version" | cut -d: -f 2) | |
if [ "$CBD_VERSION" != `echo $CURRENT_VERSION | cut -d'-' -f 1-2` ]; then | |
debug "Updating cbd to [$CBD_VERSION] from [$CURRENT_VERSION]" | |
curl -Ls s3.amazonaws.com/public-repo-1.hortonworks.com/HDP/cloudbreak/cloudbreak-deployer_${CBD_VERSION}_$(uname)_x86_64.tgz | tar -xz -C /bin cbd | |
cbd generate | |
cbd pull-parallel | |
else | |
debug "cbd version is [$CURRENT_VERSION], update not needed." | |
fi | |
debug "Starting Cloudbreak." | |
cbd regenerate | |
cbd-start-wait | |
debug "Cloudbreak started." | |
debug "Creating default credential." | |
cat > ./cmdfile << EOF | |
credential create --AWS --name aws-access --roleArn $CREDENTIAL_ROLE_ARN --sshKeyUrl http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key --existingKeyPairName $KEYPAIR_NAME | |
EOF | |
run_shell_on_cmdfile | |
debug "Default credential created." | |
} | |
TRIES_LEFT=10 | |
cbd-start-wait() { | |
if [[ $TRIES_LEFT -gt 0 ]] && [[ ! `cbd start-wait` ]]; then | |
((TRIES_LEFT--)) | |
sleep 5 | |
cbd-start-wait | |
elif [[ $TRIES_LEFT -eq 0 ]]; then | |
cbd start-wait | |
fi | |
} | |
send_terms_mail() { | |
curl -sX POST 'http://127.0.0.1:3001/acceptTerms' | |
} | |
trim_cluster_name() { | |
export CLUSTER_NAME=$(echo $CLUSTER_NAME | cut -c1-39 | tr '[:upper:]' '[:lower:]') | |
} | |
start_cluster() { | |
AMBARI_VERSION="2.4" | |
if [ "${HDP_TYPE}" == "HDP 2.4: Apache Hive 1.2.1, Apache Spark 1.6" ]; then | |
HDP_VERSION="2.4" | |
BLUEPRINT="EDW-ETL: Apache Hive 1.2.1, Apache Spark 1.6" | |
elif [ "${HDP_TYPE}" == "HDP 2.5 (Technical Preview): Apache Hive 1.2.1, Apache Spark 1.6" ]; then | |
HDP_VERSION="2.5" | |
BLUEPRINT="25EDW-ETL: Apache Hive 1.2.1, Apache Spark 1.6" | |
elif [ "${HDP_TYPE}" == "HDP 2.5 (Technical Preview): Apache Hive 2 LLAP, Apache Zeppelin" ]; then | |
HDP_VERSION="2.5" | |
BLUEPRINT="EDW-Analytics: Apache Hive 2 LLAP, Apache Zeppelin" | |
elif [ "${HDP_TYPE}" == "HDP 2.5 (Technical Preview): Apache Spark 1.6, Apache Zeppelin" ]; then | |
HDP_VERSION="2.5" | |
BLUEPRINT="Data Science: Apache Spark 1.6, Zeppelin" | |
elif [ "${HDP_TYPE}" == "HDP 2.5 (Technical Preview): Apache Spark 2.0-preview" ]; then | |
HDP_VERSION="2.5" | |
BLUEPRINT="EDW-ETL: Apache Spark 2.0-preview" | |
fi | |
: ${HDP_VERSION:? required} | |
: ${BLUEPRINT:? required} | |
debug "Creating default cluster." | |
cat > ./cmdfile << EOF | |
template create --AWS --name master-node --instanceType ${MASTER_INSTANCE_TYPE} --volumeCount $(get-volume-count $MASTER_INSTANCE_TYPE) --volumeSize $(get-volume-size $MASTER_INSTANCE_TYPE) --volumeType $(get-volume-type $MASTER_INSTANCE_TYPE) | |
template create --AWS --name worker-node --instanceType $INSTANCE_TYPE --volumeCount $(get-volume-count $INSTANCE_TYPE) --volumeSize $(get-volume-size $INSTANCE_TYPE) --volumeType $(get-volume-type $INSTANCE_TYPE) | |
credential select --name aws-access | |
blueprint select --name "${BLUEPRINT}" | |
instancegroup configure --instanceGroup master --nodecount 1 --templateName master-node --ambariServer true | |
instancegroup configure --instanceGroup worker --nodecount $CLUSTER_SIZE --templateName worker-node --ambariServer false | |
network create --AWS --NEW_SUBNET --name vpc-subnet-1 --subnet 10.0.1.0/24 --vpcID $VPC_ID --internetGatewayID $IGW_ID | |
securitygroup create --name default-sg --description "only allow secure access from a remote location" --rules ${REMOTE_LOCATION}:tcp:443,22;$(curl -4fs 169.254.169.254/latest/meta-data/public-ipv4)/32:tcp:22,443,9443 | |
stack create --AWS --wait yes --name $CLUSTER_NAME --region $REGION --instanceProfileStrategy USE_EXISTING --s3Role $S3_ROLE --ambariVersion $AMBARI_VERSION --hdpVersion $HDP_VERSION | |
cluster create --wait yes --userName admin --password $ADMIN_PASSWORD | |
EOF | |
run_shell_on_cmdfile | |
debug "Default cluster started." | |
} | |
get_ambari_url() { | |
cat > /tmp/stack_metadata << EOF | |
stack metadata --name $CLUSTER_NAME --outputType JSON --instancegroup master | |
EOF | |
export AMBARI_IP=$(cat /tmp/stack_metadata | cbd util cloudbreak-shell-quiet | colorless | grep '^{"master":\[.*\]}$' | jq .master[] -r) | |
if [ -z "$AMBARI_IP" ]; then | |
signal_failure 1 "Couldn't find Ambari IP address." | |
exit 1 | |
fi | |
} | |
run_shell_on_cmdfile() { | |
cbd util cloudbreak-shell-quiet | |
export ERROR_REASON=$(cat output.log | grep '\[FAILED\]' | sed 's/^.*REASON: \(.*\)\]/\1/') | |
cat ./output.log | |
rm ./output.log | |
mv ./cmdfile ./cmdfile_$(date +"%s") | |
if [ -n "$ERROR_REASON" ]; then | |
signal_failure 1 "$ERROR_REASON" | |
exit 1 | |
fi | |
} | |
check_terms_of_use() { | |
if [ "${TERMS_OF_USE}" != "I Have Read and Agree to Terms of Use" ]; then | |
signal_failure 1 "You must accept the Terms of Use to create a cluster!" | |
exit 1 | |
fi | |
} | |
EPHEMERALS="c3.xlarge:2x40,c3.2xlarge:2x80,c3.4xlarge:2x160,c3.8xlarge:2x320,d2.xlarge:3x2000,d2.2xlarge:6x2000,d2.4xlarge:12x2000,d2.8xlarge:24x2000,hs1.8xlarge:24x2000,i2.xlarge:1x800,i2.2xlarge:2x800,i2.4xlarge:4x800,i2.8xlarge:8x800,m3.large:1x32,m3.xlarge:2x40,m3.2xlarge:2x80,r3.large:1x32,r3.xlarge:1x80,r3.2xlarge:1x160,r3.4xlarge:1x320,r3.8xlarge:2x320,x1.32xlarge:2x1920" | |
get-volume-count() { | |
: ${1:? required} | |
if [[ ",$EPHEMERALS" = *",$1:"* ]]; then | |
volume=$(get-volume-params $1) | |
echo $volume | cut -d"x" -f1 | |
else | |
echo 2 | |
fi | |
} | |
get-volume-size() { | |
: ${1:? required} | |
if [[ ",$EPHEMERALS" = *",$1:"* ]]; then | |
volume=$(get-volume-params $1) | |
echo $volume | cut -d"x" -f2 | |
else | |
echo 100 | |
fi | |
} | |
get-volume-params() { | |
: ${1:? required} | |
echo ",$EPHEMERALS" | egrep -o ",$1:[0-9]{1,2}x[0-9]+" | cut -d":" -f2 | |
} | |
get-volume-type() { | |
: ${1:? required} | |
if [[ ",$EPHEMERALS" = *",$1:"* ]]; then | |
echo "ephemeral" | |
else | |
echo "gp2" | |
fi | |
} | |
signal_cluster_success() { | |
$AWS_BIN_LOCATION/cfn-signal -s true -e 0 --id "$1" --data "$2" "${CLUSTER_WAIT_HANDLE_URL}" | |
} | |
signal_stack_name_success() { | |
$AWS_BIN_LOCATION/cfn-signal -s true -e 0 --id "$1" --data "$2" "${STACK_NAME_WAIT_HANDLE_URL}" | |
} | |
signal_failure() { | |
debug "Error occurred, sending failure signal. Reason: $2" | |
if [ $BUILD_CLUSTER == "true" ]; then | |
WAIT_HANDLE_URL=${STACK_NAME_WAIT_HANDLE_URL} | |
else | |
WAIT_HANDLE_URL=${CLUSTER_WAIT_HANDLE_URL} | |
fi | |
$AWS_BIN_LOCATION/cfn-signal -s false -e "$1" --id "cbd-init" --reason "$2" "${WAIT_HANDLE_URL}" | |
} | |
create-cbd-per-boot-script() { | |
cat > /var/lib/cloud/scripts/per-boot/cbd-per-boot.sh << ENDOF | |
#!/bin/bash | |
cd /var/lib/cloudbreak-deployment | |
echo "Restart cbd (kill-regenerate-start) to configure the new public domain name." | |
cbd restart | |
ENDOF | |
chmod +x /var/lib/cloud/scripts/per-boot/cbd-per-boot.sh | |
} | |
main() { | |
: ${CREDENTIAL_ROLE_ARN:? required} | |
: ${KEYPAIR_NAME:? required} | |
: ${USERNAME:? required} | |
: ${ADMIN_PASSWORD:? required} | |
: ${PRODUCT_TELEMETRY:? required} | |
: ${BUILD_CLUSTER:? required} | |
: ${CLOUDBREAK_ID:? required} | |
: ${CBD_VERSION:? required} | |
: ${VPC_ID:? required} | |
: ${IGW_ID:? required} | |
: ${REGION:? required} | |
: ${ACCOUNT_ID:? required} | |
: ${HWX_CLOUD_TYPE:? required} | |
if [ $BUILD_CLUSTER == "true" ]; then | |
: ${CLUSTER_SIZE:? required} | |
: ${MASTER_INSTANCE_TYPE:? required} | |
: ${INSTANCE_TYPE:? required} | |
: ${HDP_TYPE:? required} | |
: ${CLUSTER_NAME:? required} | |
: ${S3_ROLE:? required} | |
: ${REMOTE_LOCATION:? required} | |
: ${STACK_NAME_WAIT_HANDLE_URL:? required} | |
: ${TERMS_OF_USE:? required} | |
check_terms_of_use | |
fi | |
: ${CLUSTER_WAIT_HANDLE_URL:? required} | |
if [[ "$0" == "$BASH_SOURCE" ]]; then | |
exec > >(tee /var/log/cbd-quick-start.log|logger -t user-data -s 2>/dev/console) 2>&1 | |
set -o errexit | |
set -o errtrace | |
set -o nounset | |
set -o noclobber | |
fi | |
if [[ "$TRACE" ]]; then | |
: ${START_TIME:=$(date +%s)} | |
export START_TIME | |
export PS4='+ [TRACE $BASH_SOURCE:$LINENO][ellapsed: $(( $(date +%s) - $START_TIME ))] ' | |
set -x | |
fi | |
trap '_trap_error $? $LINENO $BASH_LINENO "$BASH_COMMAND" $(printf "::%s" ${FUNCNAME[@]})' ERR | |
wait_for_docker | |
create-cbd-per-boot-script | |
start_cloudbreak | |
if [ $BUILD_CLUSTER == "true" ]; then | |
send_terms_mail | |
trim_cluster_name | |
start_cluster | |
get_ambari_url | |
signal_stack_name_success "clusterStackName" "${CLUSTER_NAME}-1" | |
signal_cluster_success "ambariUrl" "https://$AMBARI_IP/ambari/" | |
signal_cluster_success "clusterName" "$CLUSTER_NAME" | |
signal_cluster_success "ambariIP" "$AMBARI_IP" | |
else | |
signal_cluster_success "cloudbreak" "success" | |
fi | |
} | |
[[ "$0" == "$BASH_SOURCE" ]] && main "$@" |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment