Skip to content

Instantly share code, notes, and snippets.

@mikesparr
Last active April 17, 2021 23:31
Show Gist options
  • Save mikesparr/4acfe7a432417b89eec8181dbddf4233 to your computer and use it in GitHub Desktop.
Save mikesparr/4acfe7a432417b89eec8181dbddf4233 to your computer and use it in GitHub Desktop.
Example HTTPS load balancer in front of GKE cluster using NEGs
#!/usr/bin/env bash
export PROJECT_ID=$(gcloud config get-value project)
export PROJECT_USER=$(gcloud config get-value core/account) # set current user
export PROJECT_NUMBER=$(gcloud projects describe $PROJECT_ID --format="value(projectNumber)")
export IDNS=${PROJECT_ID}.svc.id.goog # workflow identity domain
export GCP_REGION="us-west1"
export GKE_CLUSTER_NAME="west-v116"
export GKE_CLUSTER_VERSION="1.16.15-gke.12500"
export GKE_CLUSTER_CHANNEL="None"
export TEST_NAMESPACE="test1"
export TEST_NS_2="test2"
export NETWORK_NAME="default"
export DOMAIN="msparr.com"
# enable APIs
gcloud services enable compute.googleapis.com \
container.googleapis.com
# create cluster
gcloud container --project $PROJECT_ID clusters create $GKE_CLUSTER_NAME \
--region $GCP_REGION \
--num-nodes 1 \
--enable-ip-alias \
--cluster-version $GKE_CLUSTER_VERSION \
--release-channel $GKE_CLUSTER_CHANNEL
# create namespace
kubectl create ns $TEST_NAMESPACE
# create echo deployment
kubectl create deployment echo --image=k8s.gcr.io/echoserver:1.4 -n $TEST_NAMESPACE
# scale to 3 replicas
kubectl scale deployment echo --replicas 3 -n $TEST_NAMESPACE
# expose deployment with ClusterIP (for external access)
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
name: ilb-service
namespace: $TEST_NAMESPACE
annotations:
cloud.google.com/neg: '{"exposed_ports": {"80":{}}}'
#networking.gke.io/load-balancer-type: "Internal"
labels:
app: echo
spec:
type: ClusterIP
selector:
app: echo
ports:
- port: 80
targetPort: 8080
protocol: TCP
EOF
# create health check
gcloud compute health-checks create http health-check-${TEST_NAMESPACE} \
--use-serving-port \
--request-path="/healthz"
# create backend service
gcloud compute backend-services create backend-service-default \
--global
gcloud compute backend-services create backend-service-${TEST_NAMESPACE} \
--global \
--health-checks health-check-${TEST_NAMESPACE}
# create URL map
gcloud compute url-maps create ${TEST_NAMESPACE}-url-map \
--global \
--default-service backend-service-default
sleep 10
# add path rules to URL map
gcloud compute url-maps add-path-matcher ${TEST_NAMESPACE}-url-map \
--global \
--path-matcher-name=${TEST_NAMESPACE}-matcher \
--default-service=backend-service-default \
--backend-service-path-rules="/echo/*=backend-service-${TEST_NAMESPACE}"
# reserve static IP
gcloud compute addresses create ${TEST_NAMESPACE}-static \
--ip-version=IPV4 \
--global
# assign static IP to DNS
export STATIC_IP=$(gcloud compute addresses describe ${TEST_NAMESPACE}-static --global --format="value(address)")
# confirm assigned IP to DNS to continue
while true; do
read -p "Did you create DNS record for ${DOMAIN} with ${STATIC_IP}? " -n 1 -r yn
echo
case $yn in
[Yy]* ) break;;
[Nn]* ) exit;;
* ) echo "Please answer yes or no.";;
esac
done
# create managed SSL cert
gcloud beta compute ssl-certificates create ${TEST_NAMESPACE}-cert \
--domains "${TEST_NAMESPACE}.${DOMAIN}"
# create target HTTPS proxy
gcloud compute target-https-proxies create ${TEST_NAMESPACE}-https-proxy \
--ssl-certificates=${TEST_NAMESPACE}-cert \
--url-map=${TEST_NAMESPACE}-url-map
# create forwarding rule
gcloud compute forwarding-rules create ${TEST_NAMESPACE}-fw-rule \
--target-https-proxy=${TEST_NAMESPACE}-https-proxy \
--global \
--ports=443 \
--address=${TEST_NAMESPACE}-static
# verify cert (may take 10-20 minutes)
gcloud beta compute ssl-certificates describe ${TEST_NAMESPACE}-cert
# get provisioned NEG (note neg name and zones)
export NEG_NAME=$(kubectl get svc ilb-service -n $TEST_NAMESPACE -o jsonpath="{.metadata.annotations.cloud\.google\.com/neg-status}" | jq '.network_endpoint_groups | {name: .["80"]}' | jq .name -r)
# add NEG to backend service for each zone
kubectl get svc ilb-service -n $TEST_NAMESPACE -o jsonpath="{.metadata.annotations.cloud\.google\.com/neg-status}" \
| jq '.zones | {name: .[]}' | jq .name \
| xargs -I {} gcloud compute backend-services add-backend backend-service-${TEST_NAMESPACE} \
--global \
--network-endpoint-group $NEG_NAME \
--network-endpoint-group-zone={} \
--balancing-mode=RATE \
--max-rate-per-endpoint=100
# create firewall rules for health checks
gcloud compute firewall-rules create fw-allow-health-checks \
--network=$NETWORK_NAME \
--action=ALLOW \
--direction=INGRESS \
--source-ranges=35.191.0.0/16,130.211.0.0/22 \
--rules=tcp
# verify backend is healthy
gcloud compute backend-services get-health \
--global backend-service-${TEST_NAMESPACE}
# ----------- NOW TEST IF ILB CAN WORK IN CLUSTER ---------------
# create namespace
kubectl create ns $TEST_NS_2
# create echo deployment
kubectl create deployment echo --image=k8s.gcr.io/echoserver:1.4 -n $TEST_NS_2
# scale to 3 replicas
kubectl scale deployment echo --replicas 3 -n $TEST_NS_2
# expose deployment with ILB for internal access
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
name: ilb-service
namespace: $TEST_NS_2
annotations:
#cloud.google.com/neg: '{"exposed_ports": {"80":{}}}'
networking.gke.io/load-balancer-type: "Internal"
labels:
app: echo
spec:
type: LoadBalancer
selector:
app: echo
ports:
- port: 80
targetPort: 8080
protocol: TCP
EOF
#!/usr/bin/env bash
export PROJECT_ID=$(gcloud config get-value project)
export PROJECT_USER=$(gcloud config get-value core/account) # set current user
export PROJECT_NUMBER=$(gcloud projects describe $PROJECT_ID --format="value(projectNumber)")
export IDNS=${PROJECT_ID}.svc.id.goog # workflow identity domain
export GCP_REGION="us-west1"
export GKE_CLUSTER_NAME="west-v116"
export GKE_CLUSTER_VERSION="1.16.15-gke.12500"
export GKE_CLUSTER_CHANNEL="None"
export TEST_NAMESPACE="test1"
export TEST_NS_2="test2"
export NETWORK_NAME="default"
export DOMAIN="msparr.com"
# confirm they are deleting from right project
while true; do
read -p "Do you wish to delete LB and Cluster from ${PROJECT_ID}? " -n 1 -r yn
echo
case $yn in
[Yy]* ) break;;
[Nn]* ) exit;;
* ) echo "Please answer yes or no.";;
esac
done
# delete firewall rules for health checks
gcloud --quiet compute firewall-rules delete fw-allow-health-checks
# delete forwarding rule
gcloud --quiet compute forwarding-rules delete ${TEST_NAMESPACE}-fw-rule \
--global
# delete target HTTPS proxy
gcloud --quiet compute target-https-proxies delete ${TEST_NAMESPACE}-https-proxy
# delete managed SSL cert
gcloud --quiet beta compute ssl-certificates delete ${TEST_NAMESPACE}-cert
# delete static IP
gcloud --quiet compute addresses delete ${TEST_NAMESPACE}-static \
--global
# delete URL map
gcloud --quiet compute url-maps delete ${TEST_NAMESPACE}-url-map \
--global
# delete backend service
gcloud --quiet compute backend-services delete backend-service-default \
--global
gcloud --quiet compute backend-services delete backend-service-${TEST_NAMESPACE} \
--global
# delete health check
gcloud --quiet compute health-checks delete health-check-${TEST_NAMESPACE}
# delete namespaces
kubectl delete ns $TEST_NS_2
kubectl delete ns $TEST_NAMESPACE
# delete cluster
gcloud --quiet container --project $PROJECT_ID clusters delete $GKE_CLUSTER_NAME \
--region $GCP_REGION \
echo "Done"
@mikesparr
Copy link
Author

HTTPS load balancer works exposing 1 ClusterIP service using NEG as backend

Screen Shot 2021-03-31 at 9 25 18 AM

Internal Load Balancer works for non-exposed services

Screen Shot 2021-03-31 at 9 55 43 AM

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment