Skip to content

Instantly share code, notes, and snippets.

@amcginlay
Last active March 27, 2021 13:12
Show Gist options
  • Save amcginlay/61b0c066ecb97a139031ab4f198950d2 to your computer and use it in GitHub Desktop.
Save amcginlay/61b0c066ecb97a139031ab4f198950d2 to your computer and use it in GitHub Desktop.
#######################################################################
# TOPICS
#######################################################################
# 0. ASSUMPTIONS
# 1. CONFIGURE CLOUD9 (EC2) ENVIRONMENT
# 2. INSTALL APPMESH
# 3. DEPLOY OUR APPS TO K8S WITHOUT APPMESH
# 4. MESHIFY THE BACKEND COMPONENTS
# 5. MESHIFY THE FRONTEND COMPONENTS
# 6. WEIGHTING THE ROUTES
#######################################################################
#####################################################################
# 0. ASSUMPTIONS
#####################################################################
# TBD
#####################################################################
# 1. CONFIGURE CLOUD9 (EC2) ENVIRONMENT
#####################################################################
export AWS_DEFAULT_REGION=$(curl --silent http://169.254.169.254/latest/meta-data/placement/region)
cluster=dev
namespace=passthru
app_name=passthru
# install AWS CLI v2, eksctl, kubectl, helm
sudo mv /usr/local/bin/aws /usr/local/bin/aws.old
sudo mv /usr/bin/aws /usr/bin/aws.old
curl --silent "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
sudo ./aws/install
curl --silent --location "https://github.com/weaveworks/eksctl/releases/latest/download/eksctl_$(uname -s)_amd64.tar.gz" | tar xz -C /tmp
sudo mv /tmp/eksctl /usr/local/bin
curl -LO https://storage.googleapis.com/kubernetes-release/release/v${k8s_version}.0/bin/linux/amd64/kubectl
chmod +x ./kubectl
sudo mv ./kubectl /usr/local/bin/kubectl
curl -sSL https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash
# verify this worked
which aws eksctl kubectl helm
# install the kubectl neat add-on (https://krew.sigs.k8s.io/docs/user-guide/setup/install/ | https://github.com/itaysk/kubectl-neat)
(
set -x; cd "$(mktemp -d)" &&
curl -fsSLO "https://github.com/kubernetes-sigs/krew/releases/latest/download/krew.tar.gz" &&
tar zxvf krew.tar.gz &&
KREW=./krew-"$(uname | tr '[:upper:]' '[:lower:]')_$(uname -m | sed -e 's/x86_64/amd64/' -e 's/arm.*$/arm/')" &&
"$KREW" install krew
)
echo 'export PATH="${KREW_ROOT:-$HOME/.krew}/bin:$PATH"' >> ~/.bashrc
source ~/.bashrc
kubectl krew install neat
#####################################################################
# 2. INSTALL APPMESH
#####################################################################
kubectl create ns appmesh-system
eksctl utils associate-iam-oidc-provider \
--cluster ${cluster} \
--approve
eksctl create iamserviceaccount \
--cluster ${cluster} \
--namespace appmesh-system \
--name appmesh-controller \
--attach-policy-arn arn:aws:iam::aws:policy/AWSCloudMapFullAccess,arn:aws:iam::aws:policy/AWSAppMeshFullAccess \
--override-existing-serviceaccounts \
--approve
curl -sSL https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash
helm repo add eks https://aws.github.io/eks-charts
helm upgrade -i appmesh-controller eks/appmesh-controller \
--namespace appmesh-system \
--set region=${AWS_DEFAULT_REGION} \
--set serviceAccount.create=false \
--set serviceAccount.name=appmesh-controller \
--set tracing.provider=x-ray
kubectl -n appmesh-system get all # check it's ready
#####################################################################
# 3. DEPLOY OUR APPS TO K8S WITHOUT APPMESH
#####################################################################
mkdir -p ~/environment/${namespace}
# build the namespace manifest
kubectl create namespace ${namespace} -o yaml --dry-run=client | kubectl neat > ~/environment/${namespace}/namespace.yaml
# build the backend deployment manifests (x2) with config volume mount
for version in blue green; do
kubectl create deployment backend-${version} -n ${namespace} --image nginx:1.18-alpine -o yaml --dry-run=client | kubectl neat > ~/environment/${namespace}/backend-${version}-deploy.yaml
cat << EOF >> ~/environment/${namespace}/backend-${version}-deploy.yaml
imagePullPolicy: Always
ports:
- containerPort: 80
volumeMounts:
- name: backend-${version}-conf
mountPath: /usr/share/nginx/html/index.html
subPath: index.html
readOnly: true
volumes:
- name: backend-${version}-conf
configMap:
name: backend-${version}-conf
items:
- key: index.html
path: index.html
EOF
done
# build the frontend deployment manifest with config volume mount
kubectl create deployment frontend -n ${namespace} --image nginx:1.18-alpine -o yaml --dry-run=client | kubectl neat > ~/environment/${namespace}/frontend-deploy.yaml
cat << EOF >> ~/environment/${namespace}/frontend-deploy.yaml
imagePullPolicy: Always
ports:
- containerPort: 80
volumeMounts:
- name: frontend-conf
mountPath: /etc/nginx/nginx.conf
subPath: nginx.conf
readOnly: true
volumes:
- name: frontend-conf
configMap:
name: frontend-conf
items:
- key: nginx.conf
path: nginx.conf
EOF
# build the backend configmaps (v2), which overwrites the nginx homepage
for version in blue green; do
cat << EOF > ~/environment/${namespace}/backend-${version}-conf.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: backend-${version}-conf
namespace: ${namespace}
data:
index.html: |
<html><head></head><body><p>${version}</p></body></html>
EOF
done
# build the frontend configmap, which initially configures nginx to pass all traffic through to the backend-blue service
cat << EOF > ~/environment/${namespace}/frontend-conf.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: frontend-conf
namespace: ${namespace}
data:
nginx.conf: |
events {
}
http {
server {
location / {
proxy_pass http://backend-blue:80;
}
}
}
EOF
# build the backend services (x2), using the standard clusterip type
for version in blue green; do
kubectl create service clusterip backend-${version} -n ${namespace} --tcp=80 -o yaml --dry-run=client | kubectl neat > ~/environment/${namespace}/backend-${version}-svc.yaml
done
# build the frontend service, exposed as a loadbalancer
kubectl create service loadbalancer frontend -n ${namespace} --tcp=80 -o yaml --dry-run=client | kubectl neat > ~/environment/${namespace}/frontend-svc.yaml
# deploy it, namespace first
kubectl apply -f ~/environment/${namespace}/namespace.yaml
kubectl apply -f ~/environment/${namespace}/
# verify our namespace has native k8s objects and check external load balancer is working (takes ~2 mins)
kubectl -n ${namespace} get deployments,pods,services,configmaps -o wide
sleep 2
lb_dnsname=$(kubectl -n ${namespace} get service -l app=frontend -o jsonpath='{.items[0].status.loadBalancer.ingress[0].hostname}')
while true; do curl ${lb_dnsname}; echo $(date); sleep 0.5; done # <--- ctrl+c to quit loop
#####################################################################
# 4. MESHIFY THE BACKEND COMPONENTS
#####################################################################
# create a service mesh
cat > ~/environment/${namespace}/mesh.yaml << EOF
apiVersion: appmesh.k8s.aws/v1beta2
kind: Mesh
metadata:
name: ${namespace}
spec:
egressFilter:
type: ALLOW_ALL
namespaceSelector:
matchLabels:
mesh: ${namespace}
EOF
kubectl apply -f ~/environment/${namespace}/mesh.yaml
aws appmesh describe-mesh --mesh-name ${namespace} # check it produced an AWS resource
# activate our namespace for use with our new mesh (don't restart any deployments yet!)
cat << EOF >> ~/environment/${namespace}/namespace.yaml
labels:
mesh: ${namespace}
appmesh.k8s.aws/sidecarInjectorWebhook: enabled
EOF
kubectl apply -f ~/environment/${namespace}/namespace.yaml
kubectl describe namespace ${namespace} # check the labels have been applied
# build the backend virtualnode manifests (x2) - each one wraps its corresponding k8s backend service
for version in blue green; do
cat > ~/environment/${namespace}/backend-${version}-virtualnode.yaml << EOF
apiVersion: appmesh.k8s.aws/v1beta2
kind: VirtualNode
metadata:
name: vn-backend-${version}
namespace: ${namespace}
spec:
awsName: vn-backend-${version}
podSelector:
matchLabels:
app: backend-${version}
listeners:
- portMapping:
port: 80
protocol: http
serviceDiscovery:
dns:
hostname: backend-${version}
EOF
kubectl apply -f ~/environment/${namespace}/backend-${version}-virtualnode.yaml
done
# confirm that pods currently have one container each
kubectl -n ${namespace} get pods
# upon restart, each backend pod will be injected with an envoy container (sidecar)
for version in blue green; do
kubectl -n ${namespace} rollout restart deployment backend-${version}
done
# wait a few seconds then confirm that the backend pods now possess two containers each
kubectl -n ${namespace} get pods
#####################################################################
# 5. MESHIFY THE FRONTEND COMPONENTS
#####################################################################
# the frontend virtualnode was defered until now because it depends upon the backend being fully meshified
# that involves introducing a virtualrouter and a virtual service to marshal requests to the backends
# a virtualrouter will distribute backend requests (blue and green) using weighted routes
# create a single virtualrouter which, for now, sends 100% of requests to blue
cat > ~/environment/${namespace}/backend-virtualrouter.yaml << EOF
apiVersion: appmesh.k8s.aws/v1beta2
kind: VirtualRouter
metadata:
name: vr-backend
namespace: ${namespace}
spec:
awsName: vr-backend
listeners:
- portMapping:
port: 80
protocol: http
routes:
- name: vrr-backend-blue
httpRoute:
match:
prefix: /
action:
weightedTargets:
- virtualNodeRef:
name: vn-backend-blue
weight: 100
EOF
kubectl apply -f ~/environment/${namespace}/backend-virtualrouter.yaml
# ensure than ARNs exist for the virtualrouter then check in AWS
kubectl -n ${namespace} get virtualrouters
aws appmesh list-virtual-routers --mesh-name ${namespace}
# it's important to ensure that a matching k8s service exists for each virtualservice (names must be identical!)
# the k8s service doesn't need to resolve to any pods, it just need to surface a cluster IP address that the mesh can use
kubectl -n ${namespace} create service clusterip vs-backend --tcp=80 -o yaml --dry-run=client | kubectl neat > ~/environment/${namespace}/vs-backend-svc.yaml
kubectl apply -f ~/environment/${namespace}/vs-backend-svc.yaml
# compare the backend-blue and backend services
# observe that our new backend service intenionally has no physical endpoints
kubectl -n passthru describe svc backend-blue vs-backend
# in the same way that k8s pods send requests to other pods via k8s services,
# virtualnodes (which wrap k8s services) send requests to other virtualnodes via virtualservices
# we already have a virtualrouter, which knows how to locate the backend virtualnodes
# now we create a single virtualservice that forwards all its traffic to the virtualrouter
cat > ~/environment/${namespace}/vs-backend-virtualservice.yaml << EOF
apiVersion: appmesh.k8s.aws/v1beta2
kind: VirtualService
metadata:
name: vs-backend
namespace: ${namespace}
spec:
awsName: vs-backend
provider:
virtualRouter:
virtualRouterRef:
name: vr-backend
EOF
kubectl apply -f ~/environment/${namespace}/vs-backend-virtualservice.yaml
# ensure than ARNs exist for the virtualservice then check in AWS
kubectl -n ${namespace} get virtualservices
aws appmesh list-virtual-services --mesh-name ${namespace}
# finally, we build a virtualnode for the frontend which is required before envoy can be injected there
# we could have applied this manifest before now, but since it's dependencies were not yet available
# it would be stuck in a pending state without the means to produce a corresponding AWS resource
cat > ~/environment/${namespace}/frontend-virtualnode.yaml << EOF
apiVersion: appmesh.k8s.aws/v1beta2
kind: VirtualNode
metadata:
name: vn-frontend
namespace: ${namespace}
spec:
awsName: vn-frontend
podSelector:
matchLabels:
app: frontend
listeners:
- portMapping:
port: 80
protocol: http
serviceDiscovery:
dns:
hostname: frontend
backends:
- virtualService:
virtualServiceRef:
name: vs-backend
EOF
kubectl apply -f ~/environment/${namespace}/frontend-virtualnode.yaml
# ensure than ARNs exist for all our virtualnodes then check in AWS
kubectl -n ${namespace} get virtualnodes
aws appmesh list-virtual-nodes --mesh-name ${namespace}
# before we restart the frontend pod, let's reconfigure it
# the nginx instance currently forwards all requests to backend-blue
# henceforth, all requests will go to the virtualservice vs-backend
sed -i "s/backend-blue/vs-backend/g" ./passthru/frontend-conf.yaml
kubectl apply -f ./passthru/frontend-conf.yaml
# restart the frontend pod, which now has a matching virtualnode
kubectl -n ${namespace} rollout restart deployment frontend
# wait a few seconds then confirm that the frontend pod now possess two containers
kubectl -n ${namespace} get pods
#####################################################################
# 6. WEIGHTING THE ROUTES
#####################################################################
# after the following changes have been applied, the requests should be split 50/50 between blue and green
sed -i "s/weight: 100/weight: 50/g" ./passthru/backend-virtualrouter.yaml
cat << EOF >> ./passthru/backend-virtualrouter.yaml
- name: vrr-backend-green
httpRoute:
match:
prefix: /
action:
weightedTargets:
- virtualNodeRef:
name: vn-backend-green
weight: 50
EOF
kubectl apply -f ./passthru/backend-virtualrouter.yaml
### TODO what now? at what point was I unable to curl frontend and why? is it enevitable that i'll need a virtual gateway?
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment