Skip to content

Instantly share code, notes, and snippets.

@amcginlay
Last active March 30, 2021 18:29
Show Gist options
  • Save amcginlay/476ed24c05a1e0b094db19bced820e16 to your computer and use it in GitHub Desktop.
Save amcginlay/476ed24c05a1e0b094db19bced820e16 to your computer and use it in GitHub Desktop.
#######################################################################
# TOPICS
#######################################################################
# 0. ASSUMPTIONS
# 1. CONFIGURE CLOUD9 (EC2) ENVIRONMENT
# 2. INSTALL APPMESH
# 3. BUILD CONTAINER IMAGE
# 4. PUSH CONTAINER IMAGE TO ECR
# 5. DEPLOY OUR APPS TO K8S WITHOUT APPMESH
# 6. MESHIFY THE BACKEND COMPONENTS
# 7. MESHIFY THE FRONTEND COMPONENTS
# 8. WEIGHTING THE ROUTES
#######################################################################
#####################################################################
# 0. ASSUMPTIONS
#####################################################################
# TBD
#####################################################################
# 1. CONFIGURE CLOUD9 (EC2) ENVIRONMENT
#####################################################################
export AWS_DEFAULT_REGION=$(curl --silent http://169.254.169.254/latest/meta-data/placement/region)
export AWS_PAGER=""
cluster=dev
namespace=passthru
app_name=passthru
app_version=1.0.42
# install AWS CLI v2, eksctl, kubectl, helm
sudo mv /usr/local/bin/aws /usr/local/bin/aws.old
sudo mv /usr/bin/aws /usr/bin/aws.old
curl --silent "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
sudo ./aws/install
curl --silent --location "https://github.com/weaveworks/eksctl/releases/latest/download/eksctl_$(uname -s)_amd64.tar.gz" | tar xz -C /tmp
sudo mv /tmp/eksctl /usr/local/bin
curl -LO https://storage.googleapis.com/kubernetes-release/release/v${k8s_version}.0/bin/linux/amd64/kubectl
chmod +x ./kubectl
sudo mv ./kubectl /usr/local/bin/kubectl
curl -sSL https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash
# verify this worked
which aws eksctl kubectl helm
# install the kubectl neat add-on (https://krew.sigs.k8s.io/docs/user-guide/setup/install/ | https://github.com/itaysk/kubectl-neat)
(
set -x; cd "$(mktemp -d)" &&
curl -fsSLO "https://github.com/kubernetes-sigs/krew/releases/latest/download/krew.tar.gz" &&
tar zxvf krew.tar.gz &&
KREW=./krew-"$(uname | tr '[:upper:]' '[:lower:]')_$(uname -m | sed -e 's/x86_64/amd64/' -e 's/arm.*$/arm/')" &&
"$KREW" install krew
)
echo 'export PATH="${KREW_ROOT:-$HOME/.krew}/bin:$PATH"' >> ~/.bashrc
source ~/.bashrc
kubectl krew install neat
#####################################################################
# 2. INSTALL APPMESH
#####################################################################
kubectl create ns appmesh-system
eksctl utils associate-iam-oidc-provider \
--cluster ${cluster} \
--approve
eksctl create iamserviceaccount \
--cluster ${cluster} \
--namespace appmesh-system \
--name appmesh-controller \
--attach-policy-arn arn:aws:iam::aws:policy/AWSCloudMapFullAccess,arn:aws:iam::aws:policy/AWSAppMeshFullAccess \
--override-existing-serviceaccounts \
--approve
curl -sSL https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash
helm repo add eks https://aws.github.io/eks-charts
helm upgrade -i appmesh-controller eks/appmesh-controller \
--namespace appmesh-system \
--set region=${AWS_DEFAULT_REGION} \
--set serviceAccount.create=false \
--set serviceAccount.name=appmesh-controller \
--set tracing.enabled=true \
--set tracing.provider=x-ray
kubectl -n appmesh-system get all # check it's ready
#####################################################################
# 3. BUILD CONTAINER IMAGE
#####################################################################
mkdir -p ~/environment/${namespace}/src
# NodeJS application code
cat << EOF > ~/environment/${namespace}/src/server.js
const http = require("http");
const app = require("express")();
const xray = require("aws-xray-sdk");
const port = process.env.PORT || 3000
const hostname = (process.env.HOSTNAME || require("os").hostname() || "unknown")
const response = (process.env.RESPONSE || "unknown")
var nextfail = (process.env.RELIABILITY || -1);
app.use(xray.express.openSegment(response));
app.get('/', (req, res) => {
if (nextfail-- == 0) {
// simulate periodic server failure
const result = "[status=500,hostname=" + hostname + ",uptime=" + process.uptime() + "]";
console.log(result);
res.status(500).send(result + "\n");
nextfail = (process.env.RELIABILITY || -1);
return;
}
if (typeof process.env.FWD_URL !== 'undefined')
{
// frontends will forward the request as configured ... (e.g. export FWD_URL=http://checkip.amazonaws.com:80)
const result = "[hostname=" + hostname + ",uptime=" + process.uptime() + ",forwarding=" + process.env.FWD_URL + "]"
console.log(result);
req.pipe(http.request(process.env.FWD_URL, (res_fwd) => { res_fwd.pipe(res); }));
return
}
// ... backends will generate a response
const result = "[status=200,hostname=" + hostname + ",uptime=" + process.uptime() + "response=" + response + "]";
console.log(result);
res.send(result + "\n");
})
app.use(xray.express.closeSegment());
app.listen(port, () => { console.log("listening on " + port); })
EOF
# dependencies for application
cat << EOF > ~/environment/${namespace}/src/package.json
{
"dependencies": {
"aws-xray-sdk": "^3.2.0",
"express": "^4.17.1"
},
"private": "true"
}
EOF
# Dockerfile for application
cat << EOF > ~/environment/${namespace}/src/Dockerfile
FROM node:14-alpine
COPY package*.json server.js ./
RUN npm install --no-package-lock
EXPOSE 3000
CMD [ "node", "server.js" ]
EOF
# assume docker local install, keep it neat, kill everything!
for i in $(docker ps -q); do docker kill $i; done
docker system prune --all --force
# build the docker image and run a container instance
docker build -t ${app_name} ~/environment/${namespace}/src/
# run a quick test
container_id=$(docker run --detach --rm -p 3000:3000 ${app_name})
sleep 2 && curl http://localhost:3000
docker stop ${container_id}
#####################################################################
# 4. PUSH CONTAINER IMAGE TO ECR
#####################################################################
# create ECR repo and push image
repo_uri=$( \
aws ecr create-repository \
--repository-name ${app_name} \
--image-scanning-configuration scanOnPush=true \
--query 'repository.repositoryUri' \
--output text \
)
aws ecr get-login-password | docker login --username AWS --password-stdin ${repo_uri}
docker tag ${app_name}:latest ${repo_uri}:${app_version}
docker push ${repo_uri}:${app_version}
#####################################################################
# 5. DEPLOY OUR APPS TO K8S WITHOUT APPMESH
#####################################################################
mkdir -p ~/environment/${namespace}/deployment
# build the namespace manifest
kubectl create namespace ${namespace} -o yaml --dry-run=client | kubectl neat > ~/environment/${namespace}/deployment/namespace.yaml
# build the backend deployment manifests (x2)
for version in blue green; do
kubectl -n ${namespace} create deployment backend-${version} --image ${repo_uri}:${app_version} -o yaml --dry-run=client | kubectl neat > ~/environment/${namespace}/deployment/deploy-backend-${version}.yaml
cat << EOF >> ~/environment/${namespace}/deployment/deploy-backend-${version}.yaml
imagePullPolicy: Always
ports:
- containerPort: 3000
env:
- name: RESPONSE
value: backend-${version}
- name: RELIABILITY
value: "8"
EOF
done
# build the frontend deployment manifest
kubectl -n ${namespace} create deployment frontend --image ${repo_uri}:${app_version} -o yaml --dry-run=client | kubectl neat > ~/environment/${namespace}/deployment/deploy-frontend.yaml
cat << EOF >> ~/environment/${namespace}/deployment/deploy-frontend.yaml
imagePullPolicy: Always
ports:
- containerPort: 3000
env:
- name: RESPONSE
value: frontend
- name: FWD_URL
value: http://backend-blue:3000
EOF
# build the backend services (x2), using the standard clusterip type
for version in blue green; do
kubectl -n ${namespace} create service clusterip backend-${version} --tcp=3000 -o yaml --dry-run=client | kubectl neat > ~/environment/${namespace}/deployment/svc-backend-${version}.yaml
done
# build the frontend service, exposed as a loadbalancer
kubectl -n ${namespace} create service loadbalancer frontend --tcp=3000 -o yaml --dry-run=client | kubectl neat > ~/environment/${namespace}/deployment/svc-frontend.yaml
# deploy it, namespace first
kubectl apply -f ~/environment/${namespace}/deployment/namespace.yaml
kubectl apply -f ~/environment/${namespace}/deployment/
# verify our namespace has native k8s objects and check external load balancer is working (takes ~2 mins)
kubectl -n ${namespace} get deployments,pods,services -o wide
sleep 2 && lb_dnsname=$(kubectl -n ${namespace} get service -l app=frontend -o jsonpath='{.items[0].status.loadBalancer.ingress[0].hostname}')
while true; do curl ${lb_dnsname}:3000; sleep 0.5; done # <--- ctrl+c to quit loop
#####################################################################
# 6. MESHIFY THE BACKEND COMPONENTS
#####################################################################
# create a service mesh
cat > ~/environment/${namespace}/deployment/mesh.yaml << EOF
apiVersion: appmesh.k8s.aws/v1beta2
kind: Mesh
metadata:
name: ${namespace}
spec:
egressFilter:
type: ALLOW_ALL
namespaceSelector:
matchLabels:
mesh: ${namespace}
EOF
kubectl apply -f ~/environment/${namespace}/deployment/mesh.yaml
sleep 2 && aws appmesh describe-mesh --mesh-name ${namespace} # check it produced an AWS resource
# activate our namespace for use with our new mesh (don't restart any deployments yet!)
cat << EOF >> ~/environment/${namespace}/deployment/namespace.yaml
labels:
mesh: ${namespace}
appmesh.k8s.aws/sidecarInjectorWebhook: enabled
EOF
kubectl apply -f ~/environment/${namespace}/deployment/namespace.yaml
kubectl describe namespace ${namespace} # check the labels have been applied
# build the backend virtualnode manifests (x2) - each one wraps its corresponding k8s backend service
for version in blue green; do
cat > ~/environment/${namespace}/deployment/vn-backend-${version}.yaml << EOF
apiVersion: appmesh.k8s.aws/v1beta2
kind: VirtualNode
metadata:
name: vn-backend-${version}
namespace: ${namespace}
spec:
awsName: vn-backend-${version}
podSelector:
matchLabels:
app: backend-${version}
listeners:
- portMapping:
port: 3000
protocol: http
serviceDiscovery:
dns:
hostname: backend-${version}
EOF
kubectl apply -f ~/environment/${namespace}/deployment/vn-backend-${version}.yaml
done
# confirm that pods currently have one container each
kubectl -n ${namespace} get pods
# upon restart, each backend pod will be injected with an envoy container (sidecar)
for version in blue green; do
kubectl -n ${namespace} rollout restart deployment backend-${version}
done
# wait a few seconds then confirm that the backend pods now possess more than one container each
# the second container is envoy
# the third container is xray-daemon (if enabled when the app mesh controller was installed)
kubectl -n ${namespace} get pods
#####################################################################
# 6. MESHIFY THE FRONTEND COMPONENTS
#####################################################################
# the frontend virtualnode was defered until now because it depends upon the backend being fully meshified
# that involves introducing a virtualrouter and a virtualservice to marshal requests through to the backends
# a virtualrouter will distribute backend requests (blue and green) using weighted routes
# create a single virtualrouter which, for now, sends 100% of requests to blue
cat > ~/environment/${namespace}/deployment/vr-backend.yaml << EOF
apiVersion: appmesh.k8s.aws/v1beta2
kind: VirtualRouter
metadata:
name: vr-backend
namespace: ${namespace}
spec:
awsName: vr-backend
listeners:
- portMapping:
port: 3000
protocol: http
routes:
- name: vrr-backend
httpRoute:
match:
prefix: /
action:
weightedTargets:
- virtualNodeRef:
name: vn-backend-blue
weight: 100
- virtualNodeRef:
name: vn-backend-green
weight: 0
EOF
kubectl apply -f ~/environment/${namespace}/deployment/vr-backend.yaml
# ensure than ARNs exist for the virtualrouter then check in AWS
kubectl -n ${namespace} get virtualrouters
sleep 2 && aws appmesh list-virtual-routers --mesh-name ${namespace}
# it's important to ensure that a matching k8s service exists for each virtualservice
# it doesn't need to resolve to any pods, it just need to surface a cluster IP address
kubectl -n ${namespace} create service clusterip vs-backend --tcp=3000 -o yaml --dry-run=client | kubectl neat > ~/environment/${namespace}/deployment/svc-vs-backend.yaml
kubectl apply -f ~/environment/${namespace}/deployment/svc-vs-backend.yaml
# in the same way that k8s pods send requests to other pods via k8s services,
# virtualnodes (which wrap k8s services) send requests to other virtualnodes via virtualservices
# we already have a virtualrouter, which knows how to locate the backend virtualnodes
# now we create a single virtualservice that forwards all its traffic to the virtualrouter
cat > ~/environment/${namespace}/deployment/vs-backend.yaml << EOF
apiVersion: appmesh.k8s.aws/v1beta2
kind: VirtualService
metadata:
name: vs-backend
namespace: ${namespace}
spec:
awsName: vs-backend
provider:
virtualRouter:
virtualRouterRef:
name: vr-backend
EOF
kubectl apply -f ~/environment/${namespace}/deployment/vs-backend.yaml
# ensure than ARNs exist for the virtualservice then check in AWS
kubectl -n ${namespace} get virtualservices
sleep 2 && aws appmesh list-virtual-services --mesh-name ${namespace}
# finally we build a virtualnode for the frontend which is required before envoy can be injected there
# we could have applied this manifest before now, but since it's dependencies were not yet available
# it would be stuck in a pending state without the means to produce a corresponding AWS resource
cat > ~/environment/${namespace}/deployment/vn-frontend.yaml << EOF
apiVersion: appmesh.k8s.aws/v1beta2
kind: VirtualNode
metadata:
name: vn-frontend
namespace: ${namespace}
spec:
awsName: vn-frontend
podSelector:
matchLabels:
app: frontend
listeners:
- portMapping:
port: 3000
protocol: http
serviceDiscovery:
dns:
hostname: frontend
backends:
- virtualService:
virtualServiceRef:
name: vs-backend
EOF
kubectl apply -f ~/environment/${namespace}/deployment/vn-frontend.yaml
# ensure than ARNs exist for all our virtualnodes then check in AWS
kubectl -n ${namespace} get virtualnodes
sleep 2 && aws appmesh list-virtual-nodes --mesh-name ${namespace}
# let's reconfigure the frontend and deploy it again
# the frontend currently forwards all requests to backend-blue
# henceforth, all requests will go to the virtualservice vs-backend
sed -i "s/backend-blue/vs-backend/g" ./passthru/deployment/deploy-frontend.yaml
kubectl apply -f ./passthru/deployment/deploy-frontend.yaml
# note: as we've applied an updated deployment manifest the restart is implicit
# wait a few seconds then confirm that that all pods now possess multiple containers each
kubectl -n ${namespace} get pods
#####################################################################
# 6. WEIGHTING THE ROUTES
#####################################################################
# after the following changes have been applied, the requests should be split 50/50 between blue and green
sed -i "s/weight: 100/weight: 50/g" ./passthru/deployment/vr-backend.yaml
sed -i "s/weight: 0/weight: 50/g" ./passthru/deployment/vr-backend.yaml
kubectl apply -f ./passthru/deployment/vr-backend.yaml
#####################################################################
# NEXT STEPS - can I get this working on Cloud9?
#####################################################################
# Get one of the pods that matches the Envoy daemonset
ENVOY_POD=$(kubectl -n projectcontour get pod -l app=envoy -o name | head -1)
# Do the port forward to that pod
kubectl -n projectcontour port-forward $ENVOY_POD 9001
# what's the problem with nginx? - why does the proxy mechanism not work with envoy?
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment