Created
August 9, 2017 05:33
-
-
Save jsilberm/d066de30d921595f7a58089970a9fdcc to your computer and use it in GitHub Desktop.
Take a cluster that has been deployed through Tectonic, and make it ready to run/deploy Portworx
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python3.6 | |
# | |
# ready4px: Take a cluster that has been deployed through Tectonic, | |
# and make it ready to run/deploy Portworx | |
# | |
# Inputs: | |
# Environment: | |
# CLUSTER : Corresponds to CLUSTER from Tectonic | |
# AWS_SECRET_ACCESS_KEY, AWS_ACCESS_KEY_ID, AWS_REGION : Amazon credentials and region | |
# AWS_VOL_TYPE: standard'|'io1'|'gp2'|'sc1'|'st1' | |
# AWS_VOL_SIZE: in GBs | |
# AWS_VOL_NAME: /dev name for attach | |
# AWS_NUM_VOLS: number of EBS vols to create | |
import os | |
import sys | |
import time | |
import boto3 | |
from pprint import pprint | |
from botocore.exceptions import ClientError | |
# PX-ports, needed open between workers | |
pxports = [ 9001, 9002, 9003, 9010, 9012, 9014 ] | |
envars = [ "AWS_SECRET_ACCESS_KEY", "AWS_ACCESS_KEY_ID", "AWS_REGION", | |
"CLUSTER", "AWS_VOL_TYPE", "AWS_VOL_SIZE", "AWS_VOL_NAME", "AWS_NUM_VOLS" ] | |
def check_prereqs(): | |
for e in envars: | |
if not os.getenv(e): | |
print ("FATAL: {} is not defined".format(e)) | |
sys.exit(-1) | |
# | |
# asg_to_iids: Given an auto-scaling group name, return the corresponding list of instanceIDs | |
# | |
def asg_to_iids(asgname): | |
asclient = boto3.client('autoscaling') | |
# Array of instance IDs | |
iids = [] | |
asgs = asclient.describe_auto_scaling_groups( | |
AutoScalingGroupNames=[ | |
asgname | |
] | |
) | |
for a in asgs['AutoScalingGroups']: | |
for ids in a['Instances']: | |
iids.append(ids['InstanceId']) | |
return iids | |
def list_instances(grp, iids): | |
for i in iids: | |
print ('{} : {}'.format(grp,i)) | |
def pxify_masters(ec2, master_sg): | |
print ("px-ifying ", master_sg) | |
msg = ec2.SecurityGroup(master_sg) | |
try: | |
msg.authorize_ingress( IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=30062, ToPort=30062 ) | |
except ClientError as e: | |
if e.response['Error']['Code'] == 'EntityAlreadyExists': | |
print ("Object already exists") | |
else: | |
print ("Unexpected error: %s" % e) | |
# | |
# Add ports to the workers Security Group | |
# | |
def pxify_workers_sg (ec2, worker_sg, wvpc): | |
print ("px-ifying security group", worker_sg) | |
wsg = ec2.SecurityGroup(worker_sg) | |
for p in pxports: | |
try: | |
wsg.authorize_ingress( IpPermissions=[{'FromPort': p, 'IpProtocol': 'tcp', 'ToPort': p, | |
'UserIdGroupPairs' : [{'GroupId': worker_sg, 'VpcId' : wvpc}]}]) | |
print ("Added port {} to security_group {}".format(p, worker_sg)) | |
except ClientError as e: | |
if e.response['Error']['Code'] == 'EntityAlreadyExists': | |
print ("Object already exists") | |
else: | |
print ("Unexpected error: %s" % e) | |
def waitfor_vol (ec2r, volid): | |
while True: | |
if ec2r.Volume(volid).state == "available": | |
return | |
else: | |
print ("Waiting for ", volid) | |
time.sleep(2) | |
# | |
# Add and attach volumes to the workers instances | |
# | |
def pxify_workers_vols (ec2c, ec2r, workers): | |
vol_size = os.getenv("AWS_VOL_SIZE") | |
vol_type = os.getenv("AWS_VOL_TYPE") | |
vol_name = os.getenv("AWS_VOL_NAME") | |
vol_region = os.getenv("AWS_REGION") | |
for w in workers: | |
az = ec2r.Instance(w).placement['AvailabilityZone'] | |
print ("{} has az {}".format(w, az)) | |
# Count the number of disks per instance. Make sure there aren't more than there should be | |
nblkdevs = len(ec2r.Instance(w).block_device_mappings) | |
print ("Instance {} has {} devices attached".format(w, nblkdevs)) | |
try: | |
vol = ec2c.create_volume ( AvailabilityZone=az, Size=int(vol_size), VolumeType=vol_type) | |
print ("Created ", vol['VolumeId']) | |
waitfor_vol (ec2r, vol['VolumeId']) | |
if ec2r.Instance(w).attach_volume( Device=vol_name, VolumeId=vol['VolumeId']): | |
print ("Attached {} to {}".format(vol['VolumeId'], w)) | |
except ClientError as e: | |
if e.response['Error']['Code'] == 'EntityAlreadyExists': | |
print ("Object already exists") | |
else: | |
print ("Unexpected error: %s" % e) | |
if __name__ == "__main__": | |
check_prereqs() | |
masters_asg = '{}-{}'.format(os.getenv("CLUSTER"),"masters") | |
workers_asg = '{}-{}'.format(os.getenv("CLUSTER"),"workers") | |
masters = asg_to_iids(masters_asg) | |
workers = asg_to_iids(workers_asg) | |
ec2c = boto3.client('ec2') | |
ec2r = boto3.resource('ec2') | |
list_instances(masters_asg, masters) | |
master_sg = ec2r.Instance(masters[0]).security_groups[0]['GroupId'] | |
print (master_sg) | |
pxify_masters(ec2r, master_sg) | |
list_instances(workers_asg, workers) | |
worker_sg = ec2r.Instance(workers[0]).security_groups[0]['GroupId'] | |
print (worker_sg) | |
worker_vpc = ec2r.Instance(workers[0]).vpc_id | |
pxify_workers_sg (ec2r, worker_sg, worker_vpc) | |
pxify_workers_vols (ec2c, ec2r, workers) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment