Skip to content

Instantly share code, notes, and snippets.

@amcginlay
Last active March 30, 2021 17:17
Show Gist options
  • Save amcginlay/5fd14f1a6eab5b7b40ed993a143bcbd3 to your computer and use it in GitHub Desktop.
Save amcginlay/5fd14f1a6eab5b7b40ed993a143bcbd3 to your computer and use it in GitHub Desktop.
export AWS_DEFAULT_REGION=$(curl --silent http://169.254.169.254/latest/meta-data/placement/region)
cluster=dev
namespace=passthru
mkdir -p ~/environment/${namespace}/
# build the namespace manifest
kubectl create namespace ${namespace} -o yaml --dry-run=client | kubectl neat > ~/environment/${namespace}/namespace.yaml
# build the backend deployment manifests (x2) with config volume mount
for version in blue green; do
kubectl create deployment backend-${version} -n ${namespace} --image nginx:1.9.5 -o yaml --dry-run=client | kubectl neat > ~/environment/${namespace}/backend-${version}-deploy.yaml
cat << EOF >> ~/environment/${namespace}/backend-${version}-deploy.yaml
volumeMounts:
- name: backend-${version}-conf
mountPath: /usr/share/nginx/html/index.html
subPath: index.html
readOnly: true
volumes:
- name: backend-${version}-conf
configMap:
name: backend-${version}-conf
items:
- key: index.html
path: index.html
EOF
done
# build the frontend deployment manifest with config volume mount
kubectl create deployment frontend -n ${namespace} --image nginx:1.9.5 -o yaml --dry-run=client | kubectl neat > ~/environment/${namespace}/frontend-deploy.yaml
cat << EOF >> ~/environment/${namespace}/frontend-deploy.yaml
volumeMounts:
- name: frontend-conf
mountPath: /etc/nginx/nginx.conf
subPath: nginx.conf
readOnly: true
volumes:
- name: frontend-conf
configMap:
name: frontend-conf
items:
- key: nginx.conf
path: nginx.conf
EOF
# build the backend configmaps (v2), which overwrites the nginx homepage
for version in blue green; do
cat << EOF > ~/environment/${namespace}/backend-${version}-conf.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: backend-${version}-conf
namespace: ${namespace}
data:
index.html: |
<html><head></head><body><p>${version}</p></body></html>
EOF
done
# build the frontend configmap, which initially configures nginx to pass all traffic through to the backend-blue service
cat << EOF > ~/environment/${namespace}/frontend-conf.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: frontend-conf
namespace: ${namespace}
data:
nginx.conf: |
events {
}
http {
server {
location / {
proxy_pass http://backend-blue:9080;
}
}
}
EOF
# build the backend services (x2), using the standard clusterip type
for version in blue green; do
kubectl create service clusterip backend-${version} -n ${namespace} --tcp=9080:80 -o yaml --dry-run=client | kubectl neat > ~/environment/${namespace}/backend-${version}-svc.yaml
done
# build the frontend service, exposed as a loadbalancer
kubectl create service loadbalancer frontend -n ${namespace} --tcp=9080:80 -o yaml --dry-run=client | kubectl neat > ~/environment/${namespace}/frontend-svc.yaml
# deploy it, namespace first
kubectl apply -f ~/environment/${namespace}/namespace.yaml
kubectl apply -f ~/environment/${namespace}/
# ... verify ...
kubectl get all,configmap -n ${namespace}
lb_dnsname=$(kubectl get service -l app=frontend -n ${namespace} -o jsonpath='{.items[0].status.loadBalancer.ingress[0].hostname}')
while true; do curl ${lb_dnsname}:9080; sleep 0.5; done # <--- ctrl+c to quit loop
# ----------------------------------------------------------------------------------------
# the remainder of this is work in progress looking at AppMesh
# install the app mesh controller
kubectl create ns appmesh-system
eksctl utils associate-iam-oidc-provider \
--cluster ${cluster} \
--approve
eksctl create iamserviceaccount \
--cluster ${cluster} \
--namespace appmesh-system \
--name appmesh-controller \
--attach-policy-arn arn:aws:iam::aws:policy/AWSCloudMapFullAccess,arn:aws:iam::aws:policy/AWSAppMeshFullAccess \
--override-existing-serviceaccounts \
--approve
curl -sSL https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash
helm repo add eks https://aws.github.io/eks-charts
helm upgrade -i appmesh-controller eks/appmesh-controller \
--namespace appmesh-system \
--set region=${AWS_DEFAULT_REGION} \
--set serviceAccount.create=false \
--set serviceAccount.name=appmesh-controller
kubectl get all -n appmesh-system # check it's ready
# create a service mesh
cat > ~/environment/${namespace}/mesh.yaml << EOF
apiVersion: appmesh.k8s.aws/v1beta2
kind: Mesh
metadata:
name: ${namespace}
spec:
egressFilter:
type: ALLOW_ALL
namespaceSelector:
matchLabels:
mesh: ${namespace}
EOF
kubectl apply -f ~/environment/${namespace}/mesh.yaml
aws appmesh describe-mesh --mesh-name ${namespace} # check it produced an AWS resource
# activate our namespace for use with our new mesh (don't restart any deployments yet!)
cat << EOF >> ~/environment/${namespace}/namespace.yaml
labels:
mesh: ${namespace}
appmesh.k8s.aws/sidecarInjectorWebhook: enabled
EOF
kubectl apply -f ~/environment/${namespace}/namespace.yaml
kubectl describe namespace ${namespace} # check the labels have been applied
# build the backend virtualnode manifests (x2) - each one wraps its corresponding k8s backend service
for version in blue green; do
cat > ~/environment/${namespace}/backend-${version}-virtualnode.yaml << EOF
apiVersion: appmesh.k8s.aws/v1beta2
kind: VirtualNode
metadata:
name: backend-${version}
namespace: ${namespace}
spec:
podSelector:
matchLabels:
app: backend-${version}
listeners:
- portMapping:
port: 9080
protocol: http
serviceDiscovery:
dns:
hostname: backend-${version}.${namespace}.svc.cluster.local
EOF
kubectl apply -f ~/environment/${namespace}/backend-${version}-virtualnode.yaml
done
# ensure than ARNs exist for all virtualnodes then check in AWS
kubectl get virtualnodes -n ${namespace}
aws appmesh list-virtual-nodes --mesh-name ${namespace}
# confirm that pods have one container each (for now)
kubectl get pods -n ${namespace}
# restart the backend pods, which now have matching virtualnodes
for version in blue green; do
kubectl rollout restart deployment -n ${namespace} backend-${version}
done
# confirm that the backend pods now have two containers (the additional one is the sidecar container, envoy)
kubectl get pods -n ${namespace}
# create a single virtualrouter which sends 100% of traffic to blue (for now)
cat > ~/environment/${namespace}/backend-virtualrouter.yaml << EOF
apiVersion: appmesh.k8s.aws/v1beta2
kind: VirtualRouter
metadata:
name: backend
namespace: ${namespace}
spec:
listeners:
- portMapping:
port: 9080
protocol: http
routes:
- name: backend-blue
httpRoute:
match:
prefix: /
action:
weightedTargets:
- virtualNodeRef:
name: backend-blue
weight: 100
EOF
kubectl apply -f ~/environment/${namespace}/backend-virtualrouter.yaml
# ensure than ARNs exist for the virtualrouter then check in AWS
kubectl get virtualrouter -n ${namespace}
aws appmesh list-virtual-routers --mesh-name ${namespace}
# create a single virtualservice that forwards all traffic to the virtualrouter
cat > ~/environment/${namespace}/backend-virtualservice.yaml << EOF
apiVersion: appmesh.k8s.aws/v1beta2
kind: VirtualService
metadata:
name: backend
namespace: ${namespace}
spec:
awsName: backend.${namespace}.svc.cluster.local
provider:
virtualRouter:
virtualRouterRef:
name: backend
EOF
kubectl apply -f ~/environment/${namespace}/backend-virtualservice.yaml
# ensure than ARNs exist for the virtualservice then check in AWS
kubectl get virtualservice -n ${namespace}
aws appmesh list-virtual-services --mesh-name ${namespace}
cat > ~/environment/${namespace}/frontend-virtualnode.yaml << EOF
apiVersion: appmesh.k8s.aws/v1beta2
kind: VirtualNode
metadata:
name: frontend
namespace: ${namespace}
spec:
podSelector:
matchLabels:
app: frontend
listeners:
- portMapping:
port: 9080
protocol: http
serviceDiscovery:
dns:
hostname: frontend
backends:
- virtualService:
virtualServiceRef:
name: backend
EOF
kubectl apply -f ~/environment/${namespace}/frontend-virtualnode.yaml
# ensure than ARNs exist for all our virtualnodes then check in AWS
kubectl get virtualnodes -n ${namespace}
aws appmesh list-virtual-nodes --mesh-name ${namespace}
# restart the frontend pod, which now has a matching virtualnode
kubectl rollout restart deployment -n ${namespace} frontend
# confirm that the frontend pod now has two containers (the additional one is the sidecar container, envoy)
kubectl get pods -n ${namespace}
# ... tear it down
kubectl delete -f ~/environment/${namespace}/
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment