Skip to content

Instantly share code, notes, and snippets.

@marvin-marvin
Last active April 8, 2021 18:29
Show Gist options
  • Save marvin-marvin/22189c7aec85a03869e881a4ecc82fcd to your computer and use it in GitHub Desktop.
Save marvin-marvin/22189c7aec85a03869e881a4ecc82fcd to your computer and use it in GitHub Desktop.
k8s configuration
# label workers
kubectl label node vps-nl-ams-kube-2 node-role.kubernetes.io/worker=worker
kubectl label node vps-us-atl-kube-3 node-role.kubernetes.io/worker=worker
kubectl label node esxi-ger-bs-kube-4 node-role.kubernetes.io/worker=worker
kubectl label node esxi-ger-bs-kube-5 node-role.kubernetes.io/worker=worker
kubectl label node esxi-ger-bs-kube-6 node-role.kubernetes.io/worker=worker
# label ESXI workers
kubectl label node esxi-ger-bs-kube-4 location=lan nodetype=esxi
kubectl label node esxi-ger-bs-kube-5 location=lan nodetype=esxi
kubectl label node esxi-ger-bs-kube-6 location=lan nodetype=esxi
# label VPS workers
kubectl label node vps-ger-nue-kube-1 location=wan nodetype=vps
kubectl label node vps-nl-ams-kube-2 location=wan nodetype=vps
kubectl label node vps-us-atl-kube-3 location=wan nodetype=vps
#Allow scheduling of pods on the master
kubectl taint nodes --all node-role.kubernetes.io/master-
#Verify the master isn't tainted
kubectl describe node | egrep -i taint
Taints: <none>
# LB
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.9.5/manifests/namespace.yaml
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.9.5/manifests/metallb.yaml
# On first install only
kubectl create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)"
kubectl apply -f configmap.yaml #from metallb folder in k8s on dropbox
# dashboard
# start with "kubectl proxy"
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.1.0/aio/deploy/recommended.yaml
# comment kind_Secret section
kubectl apply -f recommended.yaml
# generate certificate
mkdir $HOME/certs
cd $HOME/certs
openssl genrsa -out dashboard.key 2048
openssl rsa -in dashboard.key -out dashboard.key
openssl req -sha256 -new -key dashboard.key -out dashboard.csr -subj '/CN=localhost'
openssl x509 -req -sha256 -days 365 -in dashboard.csr -signkey dashboard.key -out dashboard.crt
kubectl create secret generic kubernetes-dashboard-certs --from-file=dashboard.key --from-file=dashboard.crt -n kubernetes-dashboard
kubectl apply -f https://raw.githubusercontent.com/marvin-marvin/k8s-cfg/master/dashboard/create-admin.yaml
kubectl apply -f https://raw.githubusercontent.com/marvin-marvin/k8s-cfg/master/dashboard/create-admin-binding.yaml
kubectl create serviceaccount dashboard-admin -n kube-system
kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')
kubectl apply -f service.yaml
# Client: kubectl proxy
# http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy
# Use token as token on webpage
# commands
kubeadm token create --print-join-command
kubectl get pods --all-namespaces
kubectl get all --all-namespaces
kubectl cluster-info
kubectl describe nodes
kubectl get pods --show-labels -o wide
kubectl get pods -o wide
kubectl get nodes --show-labels
# troubleshooting
kubectl get pods -n=kube-system
kubectl describe pods weave-net-km9pm -n=kube-system
kubectl --v=8 logs weave-net-km9pm -n=kube-system -c weave
# notes
imagePullPolicy: Always
"nodeSelector": {
"location": "lan"
},
kubectl config set-context kubernetes --namespace=lan
kubectl config use-context kubernetes
kubectl create secret docker-registry regcred --docker-server=https://index.docker.io/v1/ --docker-username=<USERNAME> --docker-password=<PASSWORD> --docker-email=<EMAIL> -n <NAMESPACE>
kubeadm token create --print-join-command
kubectl get deploy homebridge -o yaml --export
# PV,PVC can't be deleted ; stays in "Terminating:
k edit pvc k8s-pvc
comment finalizers
# remove namespace in terminating state
kubectl get namespace rook-ceph -o json > /tmp/rook-ceph.json
kubectl replace --raw "/api/v1/namespaces/rook-ceph/finalize" -f /tmp/rook-ceph.json
# metrics
wget wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
> add:
containers:
- name: metrics-server
image: k8s.gcr.io/metrics-server-amd64:v0.3.3
imagePullPolicy: Always
args: [ "--kubelet-insecure-tls" ]
volumeMounts:
- name: tmp-dir
mountPath: /tmp
to "metrics-server-deployment.yaml"
k apply -f components.yaml
# Rook
git clone --single-branch --branch v1.5.7 https://github.com/rook/rook.git
cd rook/cluster/examples/kubernetes/ceph
nano operator.yaml
enable ROOK_ENABLE_DISCOVERY_DAEMON
enable ROOK_ENABLE_FLEX_DRIVER
kubectl create -f crds.yaml -f common.yaml -f operator.yaml
# change mon: count: 3 to 6 (6 nodes) in cluster.yaml
kubectl create -f cluster.yaml
nano csi/rbd/storageclass.yaml
# change replica to 6
kubectl apply -f ./csi/rbd/storageclass.yaml
nano pvc-rook-ceph-block.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: ics-dhcp-pvc
spec:
storageClassName: rook-ceph-block
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
kubectl apply -f toolbox.yaml
# get status
kubectl -n rook-ceph exec -it rook-ceph-tools-84c5795776-x7qpj-- ceph status
# show volumes
kubectl -n rook-ceph exec -it rook-ceph-tools-84c5795776-x7qpj -- rbd list replicapool
# show volume information
kubectl -n rook-ceph exec -it rook-ceph-tools-84c5795776-x7qpj -- rbd info replicapool/<volume>
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment