Last active
March 8, 2021 22:31
-
-
Save jasdeepkhalsa/b745213e2f4864e832564a752b5a7490 to your computer and use it in GitHub Desktop.
GCP with Docker, K8s & Jenkins
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
## This script covers | |
# * Running Docker containers on a host. | |
# * Storing Docker images in the Google Container Repository (GCR). | |
# * Deploying GCR images on Kubernetes. | |
# * Pushing updates onto Kubernetes. | |
# * Automating deployments to Kubernetes using Jenkins. | |
GCP_PROJECT_ID="qwiklabs-gcp-04-17810074a304" | |
# Clone a repository from the Google Cloud Shell | |
gsutil cp -r gs://spls/gsp021/* . | |
gcloud source repos clone valkyrie-app --project=$GCP_PROJECT_ID | |
# Requires SSH access setup | |
git clone ssh://[email protected]@source.developers.google.com:2022/p/qwiklabs-gcp-04-17810074a304/r/valkyrie-app | |
## Docker commands | |
# Build image from Dockerfile | |
# Note: the "." means current directory, so you need to run this command from within the directory that has the Dockerfile | |
# --file option allows to specify a dockerfile not named as the default of "Dockerfile" | |
# -t or --tag adds us to tag this docker image with a name and version | |
docker build --tag "app:latest" --file "app.dockerfile" . | |
docker build -t node-app:0.1 . | |
docker build -t valkyrie-app:v0.0.1 . | |
## Run a container | |
# The --name flag allows you to name the container if you like | |
# The -p does a port mapping by instructing Docker to map the host's port 4000 to the container's port 80 to access the server at http://localhost:4000 | |
# Without port mapping, you would not be able to reach the container at localhost | |
docker run -i -t --ipc=host --shm-size="1g" "app:latest" | |
docker run -p 4000:80 --name my-app node-app:0.1 | |
docker run -p 8080:80 --name my-app-2 -d node-app:0.2 | |
docker run -p 8080:8080 valkyrie-app:v0.0.1 & # Run the background | |
# See all containers | |
docker ps -a | |
# Stop all containers | |
docker stop $(docker ps -q) | |
# Remove all containers | |
docker rm $(docker ps -aq) | |
# Remove all images | |
docker rmi $(docker images -aq) | |
# See logs of a container | |
docker logs -f <container id> | |
# Inspect metadata of a container | |
docker inspect <container id> format='{{.IPAddress}}{{end}}' | |
# Enter into bash of a container | |
docker exec -it <container id> bash | |
## Tag and push a docker image for publishing to e.g. Google Container Registry (gcr): | |
# [hostname]= gcr.io | |
# [project-id]= your project's ID | |
# [image]= your image name | |
# [tag]= any string tag of your choice. If unspecified, it defaults to "latest" | |
# e.g. gcr.io/$(gcloud config list project)/node-app:0.2 | |
docker tag node-app:0.2 "gcr.io/$GCP_PROJECT_ID/node-app:0.2" | |
docker push "gcr.io/$GCP_PROJECT_ID/node-app:0.2" | |
docker tag valkyrie-app:v0.0.1 "gcr.io/$GCP_PROJECT_ID/valkyrie-app:v0.0.1" | |
docker push "gcr.io/$GCP_PROJECT_ID/valkyrie-app:v0.0.1" | |
docker tag valkyrie-app:v0.0.2 "gcr.io/$GCP_PROJECT_ID/valkyrie-app:v0.0.2" | |
docker push "gcr.io/$GCP_PROJECT_ID/valkyrie-app:v0.0.2" | |
## Pull and run a remote docker image e.g. from Google Container Registry (gcr) | |
docker pull "gcr.io/$GCP_PROJECT_ID/node-app:0.2" | |
docker run -p 4000:80 -d "gcr.io/$GCP_PROJECT_ID/node-app:0.2" | |
## General commands | |
# Get active account name | |
gcloud auth list | |
# List the project ID | |
gcloud config list project | |
# Find default zone | |
gcloud compute project-info describe --project <GCP Project ID> | |
# Set default region | |
gcloud config set compute/region us-east1 | |
# Set default zone | |
gcloud config set compute/zone us-east1-b | |
# Create server / compute instance with a specific machine type | |
gcloud compute instances create jooli-webserver --machine-type n1-standard-1 | |
## Create a Kubernetes service cluster | |
# Create a Kubernetes cluster with a specific machine-type (if --num-nodes are omitted, defaults to 3) | |
# Alternative scope for e.g. to enable Jenkins to access Cloud Source Repositories and Google Container Registry could be https://www.googleapis.com/auth/source.read_write,cloud-platform | |
gcloud container clusters create jooli-cluster --machine-type n1-standard-1 --num-nodes 2 --scopes "https://www.googleapis.com/auth/projecthosting,storage-rw" | |
gcloud container clusters create valkyrie-dev --machine-type n1-standard-1 --scopes "https://www.googleapis.com/auth/source.read_write,cloud-platform" | |
# Get info on the cluster, control plane IP, KubeDNS, metrics etc. | |
kubectl cluster-info | |
# Authenticate the Kubernetes cluster | |
gcloud container clusters get-credentials jooli-cluster | |
gcloud container clusters get-credentials valkyrie-dev | |
# Deploy an application to the cluster from a remote Docker image | |
kubectl create deployment jooli-server --image gcr.io/google-samples/hello-app:2.0 | |
# Create a deployment or service from a YAML config file | |
# To add a GCR image into a deployment add the full path to GCR & version of the image e.g. gcr.io/qwiklabs-gcp-02-0bfdce3256e7/valkyrie-app:v0.0.1 | |
kubectl create -f k8s/deployment.yaml | |
kubectl create -f k8s/service.yaml | |
kubectl replace --force -f k8s/deployment.yaml # Delete and re-create the pod | |
kubectl describe pod valkyrie-dev-58dd8cdb67-p8rgj # Drill down into why a pod failed | |
# Create TLS certs and configmaps for e.g. nginx | |
kubectl create secret generic tls-certs --from-file tls/ | |
kubectl create configmap nginx-frontend-conf --from-file=nginx/frontend.conf | |
# Setup Helm for using e.g. Jenkins charts | |
helm repo add jenkins https://charts.jenkins.io | |
# Ensure the helm repo is up to date | |
helm repo update | |
# Use the Helm CLI to deploy the chart with your config settings: | |
helm install cd jenkins/jenkins -f jenkins/values.yaml --version 1.2.2 --wait | |
# Configure a Jenkins service account to be able to deploy to the cluster | |
kubectl create clusterrolebinding jenkins-deploy --clusterrole=cluster-admin --serviceaccount=default:cd-jenkins | |
# Setup port forwarding to the Jenkins UI from the Cloud Shell | |
export POD_NAME=$(kubectl get pods --namespace default -l "app.kubernetes.io/component=jenkins-master" -l "app.kubernetes.io/instance=cd" -o jsonpath="{.items[0].metadata.name}") | |
kubectl port-forward $POD_NAME 8080:8080 >> /dev/null & | |
# You are using the Kubernetes Plugin (https://plugins.jenkins.io/kubernetes/) so that our builder nodes will be automatically launched as necessary when the Jenkins master requests them. | |
# Upon completion of their work, they will automatically be turned down and their resources added back to the clusters resource pool. | |
# Notice that this service exposes ports 8080 and 50000 for any pods that match the selector. | |
# This will expose the Jenkins web UI and builder/agent registration ports within the Kubernetes cluster. | |
# Additionally, the jenkins-ui services is exposed using a ClusterIP so that it is not accessible from outside the cluster. | |
# Retrieve Jenkins password | |
printf $(kubectl get secret cd-jenkins -o jsonpath="{.data.jenkins-admin-password}" | base64 --decode);echo | |
# Creating a repository and push it to a Cloud Source Repository service | |
gcloud source repos create default | |
git init | |
git config credential.helper gcloud.sh | |
git remote add origin https://source.developers.google.com/p/$DEVSHELL_PROJECT_ID/r/default | |
git config --global user.email "[EMAIL_ADDRESS]" | |
git config --global user.name "[USERNAME]" | |
# Expose the Kubernetes Service to the outside world | |
kubectl expose deployment jooli-server --type LoadBalancer --port 8080 | |
# Get pods, services, replicaSets and deployments running | |
kubectl get pods | |
kubectl get services | |
kubectl get replicasets | |
kubectl get deployments | |
# Increase the number of replica pods of a deployment | |
kubectl scale deployment <deployment> --replicas=3 | |
# Trigger, pause, resume, undo and view status of a rolling update on a deployment | |
kubectl edit deployment <deployment> | |
kubectl rollout pause deployment/<deployment> | |
kubectl rollout resume deployment/<deployment> | |
kubectl rollout undo deployment/<deployment> | |
kubectl rollout status deployment/<deployment> | |
# View the rollout history | |
kubectl rollout history deployment/<deployment> | |
# See which version of a deployment is in use | |
curl -ks https://`kubectl get svc <frontend> -o=jsonpath="{.status.loadBalancer.ingress[0].ip}"`/version | |
# See which version of a container image is deployed to a pod | |
kubectl get pods -o jsonpath --template='{range .items[*]}{.metadata.name}{"\t"}{"\t"}{.spec.containers[0].image}{"\n"}{end}' | |
# Create an interactive shell inside a pod | |
kubectl exec <pod> --stdin --tty -c <pod> /bin/sh | |
# Check that the service has been exposed | |
kubectl get service | |
kubectl get # list resources | |
kubectl describe # show detailed information about a resource | |
kubectl logs # print the logs from a container in a pod | |
kubectl exec # execute a command on a container in a pod | |
# Get list of compute images | |
gcloud compute images list | |
## Set up an HTTP load balancer | |
# Create a startup script | |
cat << EOF > startup.sh | |
#! /bin/bash | |
apt-get update | |
apt-get install -y nginx | |
service nginx start | |
sed -i -- 's/nginx/Google Cloud Platform -'"\$HOSTNAME"'/' | |
/var/www/html/index.nginx-debian.html | |
EOF | |
# Create an instance template | |
gcloud compute instance-templates create lb-backend-template \ | |
--region us-east1 \ | |
--network default \ | |
--machine-type n1-standard-1 \ | |
--subnet default \ | |
--tags allow-health-check \ | |
--image-family debian-9 \ | |
--image-project debian-cloud \ | |
--metadata-from-file startup-script=startup.sh | |
# Create a target pool | |
gcloud compute target-pools create lb-pool | |
# Create a managed instance group with a target pool, | |
# that should receive incoming traffic from forwarding rules | |
gcloud compute instance-groups managed create lb-backend-group \ | |
--template lb-backend-template \ | |
--size 2 \ | |
--target-pool lb-pool | |
# Check computer instances | |
gcloud compute instances list | |
# Create a simple firewall | |
gcloud compute firewall-rules create fw-allow-health-check --allow tcp:80 | |
# Or a more explicit firewall | |
gcloud compute firewall-rules create fw-allow-health-check \ | |
--network default \ | |
--action allow \ | |
--direction ingress \ | |
--source-ranges 130.211.0.0/22,35.191.0.0/16 \ | |
--target-tags allow-health-check \ | |
--rules tcp:80 | |
# Create a forwarding rule from the outside world to the target pool | |
gcloud compute forwarding-rules create nginx-lb \ | |
--region us-east1 \ | |
--ports 80 \ | |
--target-pool lb-pool | |
# Create a reserved IPv4 address (optional) | |
gcloud compute addresses create lb-ipv4-1 \ | |
--ip-version IPV4 \ | |
--global | |
# Get the IPv4 address (optional) | |
gcloud compute addresses describe lb-ipv4-1 \ | |
--format "get(address)" \ | |
--global | |
# Create a HTTP health check | |
gcloud compute http-health-checks create http-basic-check --port 80 | |
# Ensure the health check service can reach the instance-group on http port 80 | |
# See gcloud compute instance-groups set-named-ports --help for more information | |
gcloud compute instance-groups managed set-named-ports lb-backend-group \ | |
--named-ports http:80 | |
# Create a backend service... | |
gcloud compute backend-services create web-backend-service \ | |
--protocol HTTP \ | |
--http-health-checks http-basic-check \ | |
--global | |
# ...and attach the managed instance group | |
gcloud compute backend-services add-backend web-backend-service \ | |
--instance-group lb-backend-group \ | |
--instance-group-zone us-east1-b \ | |
--global | |
# Create a URL map | |
gcloud compute url-maps create web-map-http --default-service web-backend-service | |
# Target the HTTP proxy to route requests to your URL map | |
gcloud compute target-http-proxies create http-lb-proxy --url-map web-map-http | |
# Create a global forwarding rule from outside world to lb-proxy | |
gcloud compute forwarding-rules create http-content-rule \ | |
--global \ | |
--target-http-proxy http-lb-proxy \ | |
--ports 80 | |
# --address lb-ipv4-1 | |
# Check the forwarding rule is active | |
gcloud compute forwarding-rules list | |
# Undo/Delete all of the created above | |
gcloud compute forwarding-rules delete http-content-rule --global && gcloud compute target-http-proxies delete http-lb-proxy && gcloud compute url-maps delete web-map-http && gcloud compute backend-services delete web-backend-service --global | |
gcloud compute health-checks delete http http-basic-check | |
gcloud compute firewall-rules delete fw-allow-health-check && gcloud compute instance-groups managed delete lb-backend-group && gcloud compute instance-templates delete lb-backend-template |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment