Skip to content

Instantly share code, notes, and snippets.

@kudosqujo
Last active September 22, 2021 00:25
Show Gist options
  • Save kudosqujo/0a6ae36c549182174d29b7b8e606861e to your computer and use it in GitHub Desktop.
Save kudosqujo/0a6ae36c549182174d29b7b8e606861e to your computer and use it in GitHub Desktop.
[AWS CLI] #aws #cheatsheet #ec2 #s3 #eks
### Walkthrough #1: Create our first virtual machine in the cloud
# get list of commands useful for EC2
aws ec2 help | grep "\sdescribe"
# create an SSH keypair for AWS
aws ec2 create-key-pair --key-name MyKeyPair --query 'KeyMaterial' --output text > MyKeyPair.pem
# verify that AWS knows about our newly created key
aws ec2 describe-key-pairs --key-name MyKeyPair
# the private key is used to access all machines in AWS that it is assigned to,
# so it is important to KEEP IT SAFE!!!
# it is best practice to keep the permissions of this key file safe
chmod 400 MyKeyPair.pem
# we're also going to need a security group that is going to be attached to the instance
# A security group defines access permissions for the resources it is attached to.
# create a security group which will get attached to our instance
aws ec2 create-security-group --group-name my-sg --description "My security group"
# verify its creation
aws ec2 describe-security-groups --group-names my-sg
# since security group's don't auto-create rules upon initialization, we will need to create some ourselves
# add an inbound rule to the group that allows TCP traffic on port 22 from the IP range of 203.0.113.0/24
aws ec2 authorize-security-group-ingress --group-name my-sg --protocol tcp --port 22 --cidr 203.0.113.0/24
# get a list of AMIs, filter list to only include Xenial AMIs
# should return an image w/ an ID of `ami-785db401`
aws ec2 describe-images --filters "Name=description,Values=*xenial*"
# set the size of our EC2 instance to `t2.micro` -- which is a free tier for new customers
# and run the instance
aws ec2 run-instances --instance-type t2.micro --key-name MyKeyPair --security-groups my-sg --image-id ami-760aaa0f
# verify that the instance is running
aws ec2 describe-instances --instance-ids i-04240cbed0bac7da6
# terminate the instance
aws ec2 terminate-instances --instance-ids i-04240cbed0bac7da6
# if we wanted to STOP the instance instead of TERMINATE it, we'd run
aws ec2 stop-instances --instance-ids i-04240cbed0bac7da6
# every instance has a metadata endpoint accessible from within the instance via the address:
# 169.254.169.254
# run an instance
aws ec2 run-instances --instance-type t2.micro --key-name MyKeyPair --security-groups my-sg --image-id ami-760aaa0f --iam-instance-profile Name="role1-instance-profile"
# ssh into instance
ssh -i /srv/resources/MyKeyPair.pem [email protected]
# configure curl to start a new line after the result of a call
echo '-w "\n"' >> ~/.curlrc
# hit metadata endpoint
curl 169.254.169.254/
# checkout latest metadata
curl 169.254.169.254/latest/meta-data/
# checkout IAM role
curl 169.254.169.254/latest/meta-data/iam/security-credentials/role1
# get availability zone
curl 169.254.169.254/latest/meta-data/placement/availability-zone
# Here's an example for `fish` which will get all clusters and show their API endpoints
for cluster in (aws eks list-clusters | jq -r '.clusters[]')
echo "$cluster"
aws eks describe-cluster --name "$cluster" | jq -r '.cluster.endpoint'
end
# get a list of all current EKS clusters
aws eks list-clusters | jq -r '.clusters[]'
# get an API endpoint for a specific cluster
aws eks describe-cluster --name "$cluster" | jq -r '.cluster.endpoint'
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AllObjectAccess",
"Effect": "Deny",
"Principal": "*",
"Action": [
"s3:GetObject"
],
"Resource": "arn:aws:s3:::mybucket/*"
}
]
}
# create an S3 bucket
aws s3 mb s3://<bucket_name>
# list all buckets owned by the user
aws s3 ls
# when shelled inside an EC2 instance
aws s3 ls # list all of the buckets you have access to
# copy local file to s3 bucket (note: the desitnation file name can be omitted if it will be the same as the source)
aws s3 cp test_file s3://mybucket
# list contents of a bucket
aws s3 ls s3://mybucket
# copy a file from S3 to local machine
aws s3api get-object --bucket mybucket --key test_file copy
# can attach a bucket policy (which is a JSON file controlling access rights to buckets)
aws s3api put-bucket-policy --policy file://policy.json --bucket mybucket
# sync <source> folder with <target> folder, downloading the missing files
aws s3 sync s3://mybucket/my/folder ./tmp/myfolder
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment