kubectl get networkpolicy deny-metadata -o yaml
kubectl explain networkpolicy.spec.egress
kubectl explain networkpolicy.spec.egress.to
kubectl explain networkpolicy.spec.egress.to.ipBlock
kubectl run busybox --image=busybox --rm -it /bin/sh
wget https://www.google.co.in
kubectl create namespace test
kubectl run busybox --image=busybox --rm -it -n test /bin/sh
wget 169.254.169.254
role=$(wget -qO- 169.254.169.254/latest/meta-data/iam/security-credentials)
wget -qO- 169.254.169.254/latest/meta-data/iam/security-credentials/$role
cat > app-policy.yaml <<EOF
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: app-tiers
namespace: test
spec:
podSelector:
matchLabels:
app-tier: web
policyTypes:
- Ingress
ingress:
- from:
- podSelector:
matchLabels:
app-tier: cache
ports:
- port: 80
EOF
kubectl create -f app-policy.yaml
kubectl run web-server -n test -l app-tier=web --image=nginx:1.15.1 --port 80
# Get the web server pod's IP address
web_ip=$(kubectl get pod -n test -o jsonpath='{.items[0].status.podIP}')
kubectl run busybox -n test -l app-tier=cache --image=busybox --env="web_ip=$web_ip" --rm -it /bin/sh
wget $web_ip
exit
kubectl run busybox -n test --image=busybox --env="web_ip=$web_ip" --rm -it /bin/sh
wget $web_ip
Last active
November 21, 2022 02:03
-
-
Save debu999/bee47f6ba1c2a33d5af139fdbe671f98 to your computer and use it in GitHub Desktop.
source <(kubectl completion bash)
kubectl get nodes
kubectl describe nodes -l node-role.kubernetes.io/control-plane | more
kubectl get namespace
kubectl get pods --namespace=kube-system
cat <<EOF > mysql-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: mysql
labels:
app: mysql
data:
master.cnf: |
# Apply this config only on the primary.
[mysqld]
log-bin
slave.cnf: |
# Apply this config only on replicas.
[mysqld]
super-read-only
EOF
kubectl create -f mysql-configmap.yaml
cat <<EOF > mysql-services.yaml
# Headless service for stable DNS entries of StatefulSet members.
apiVersion: v1
kind: Service
metadata:
name: mysql
labels:
app: mysql
spec:
ports:
- name: mysql
port: 3306
clusterIP: None
selector:
app: mysql
---
# Client service for connecting to any MySQL instance for reads.
# For writes, you must instead connect to the primary: mysql-0.mysql.
apiVersion: v1
kind: Service
metadata:
name: mysql-read
labels:
app: mysql
spec:
ports:
- name: mysql
port: 3306
selector:
app: mysql
EOF
kubectl create -f mysql-services.yaml
cat <<EOF > mysql-storageclass.yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: general
provisioner: kubernetes.io/aws-ebs
parameters:
type: gp2
EOF
kubectl create -f mysql-storageclass.yaml
cat <<'EOF' > mysql-statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mysql
spec:
selector:
matchLabels:
app: mysql
serviceName: mysql
replicas: 3
template:
metadata:
labels:
app: mysql
spec:
initContainers:
- name: init-mysql
image: mysql:5.7.35
command:
- bash
- "-c"
- |
set -ex
# Generate mysql server-id from pod ordinal index.
[[ `hostname` =~ -([0-9]+)$ ]] || exit 1
ordinal=${BASH_REMATCH[1]}
echo [mysqld] > /mnt/conf.d/server-id.cnf
# Add an offset to avoid reserved server-id=0 value.
echo server-id=$((100 + $ordinal)) >> /mnt/conf.d/server-id.cnf
# Copy appropriate conf.d files from config-map to emptyDir.
if [[ $ordinal -eq 0 ]]; then
cp /mnt/config-map/master.cnf /mnt/conf.d/
else
cp /mnt/config-map/slave.cnf /mnt/conf.d/
fi
volumeMounts:
- name: conf
mountPath: /mnt/conf.d
- name: config-map
mountPath: /mnt/config-map
- name: clone-mysql
image: gcr.io/google-samples/xtrabackup:1.0
command:
- bash
- "-c"
- |
set -ex
# Skip the clone if data already exists.
[[ -d /var/lib/mysql/mysql ]] && exit 0
# Skip the clone on primary (ordinal index 0).
[[ `hostname` =~ -([0-9]+)$ ]] || exit 1
ordinal=${BASH_REMATCH[1]}
[[ $ordinal -eq 0 ]] && exit 0
# Clone data from previous peer.
ncat --recv-only mysql-$(($ordinal-1)).mysql 3307 | xbstream -x -C /var/lib/mysql
# Prepare the backup.
xtrabackup --prepare --target-dir=/var/lib/mysql
volumeMounts:
- name: data
mountPath: /var/lib/mysql
subPath: mysql
- name: conf
mountPath: /etc/mysql/conf.d
containers:
- name: mysql
image: mysql:5.7
env:
- name: MYSQL_ALLOW_EMPTY_PASSWORD
value: "1"
ports:
- name: mysql
containerPort: 3306
volumeMounts:
- name: data
mountPath: /var/lib/mysql
subPath: mysql
- name: conf
mountPath: /etc/mysql/conf.d
resources:
requests:
cpu: 100m
memory: 200Mi
livenessProbe:
exec:
command: ["mysqladmin", "ping"]
initialDelaySeconds: 30
timeoutSeconds: 5
readinessProbe:
exec:
# Check we can execute queries over TCP (skip-networking is off).
command: ["mysql", "-h", "127.0.0.1", "-e", "SELECT 1"]
initialDelaySeconds: 5
timeoutSeconds: 1
- name: xtrabackup
image: gcr.io/google-samples/xtrabackup:1.0
ports:
- name: xtrabackup
containerPort: 3307
command:
- bash
- "-c"
- |
set -ex
cd /var/lib/mysql
# Determine binlog position of cloned data, if any.
if [[ -f xtrabackup_slave_info ]]; then
# XtraBackup already generated a partial "CHANGE MASTER TO" query
# because we're cloning from an existing replica.
mv xtrabackup_slave_info change_master_to.sql.in
# Ignore xtrabackup_binlog_info in this case (it's useless).
rm -f xtrabackup_binlog_info
elif [[ -f xtrabackup_binlog_info ]]; then
# We're cloning directly from primary. Parse binlog position.
[[ `cat xtrabackup_binlog_info` =~ ^(.*?)[[:space:]]+(.*?)$ ]] || exit 1
rm xtrabackup_binlog_info
echo "CHANGE MASTER TO MASTER_LOG_FILE='${BASH_REMATCH[1]}',\
MASTER_LOG_POS=${BASH_REMATCH[2]}" > change_master_to.sql.in
fi
# Check if we need to complete a clone by starting replication.
if [[ -f change_master_to.sql.in ]]; then
echo "Waiting for mysqld to be ready (accepting connections)"
until mysql -h 127.0.0.1 -e "SELECT 1"; do sleep 1; done
echo "Initializing replication from clone position"
# In case of container restart, attempt this at-most-once.
mv change_master_to.sql.in change_master_to.sql.orig
mysql -h 127.0.0.1 <<EOF
$(<change_master_to.sql.orig),
MASTER_HOST='mysql-0.mysql',
MASTER_USER='root',
MASTER_PASSWORD='',
MASTER_CONNECT_RETRY=10;
START SLAVE;
EOF
fi
# Start a server to send backups when requested by peers.
exec ncat --listen --keep-open --send-only --max-conns=1 3307 -c \
"xtrabackup --backup --slave-info --stream=xbstream --host=127.0.0.1 --user=root"
volumeMounts:
- name: data
mountPath: /var/lib/mysql
subPath: mysql
- name: conf
mountPath: /etc/mysql/conf.d
resources:
requests:
cpu: 100m
memory: 50Mi
volumes:
- name: conf
emptyDir: {}
- name: config-map
configMap:
name: mysql
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 2Gi
storageClassName: general
EOF
kubectl create -f mysql-statefulset.yaml
kubectl get pods -l app=mysql --watch
kubectl describe pv
kubectl describe pvc
kubectl get statefulset
kubectl run mysql-client --image=mysql:5.7 -i -t --rm --restart=Never --\
/usr/bin/mysql -h mysql-0.mysql -e "CREATE DATABASE mydb; CREATE TABLE mydb.notes (note VARCHAR(250)); INSERT INTO mydb.notes VALUES ('k8s Cloud Academy Lab');"
kubectl run mysql-client --image=mysql:5.7 -i -t --rm --restart=Never --\
/usr/bin/mysql -h mysql-read -e "SELECT * FROM mydb.notes"
kubectl run mysql-client-loop --image=mysql:5.7 -i -t --rm --restart=Never --\
bash -ic "while sleep 1; do /usr/bin/mysql -h mysql-read -e 'SELECT @@server_id'; done"
kubectl get pod -o wide
node=$(kubectl get pods --field-selector metadata.name=mysql-2 -o=jsonpath='{.items[0].spec.nodeName}')
kubectl drain $node --force --delete-local-data --ignore-daemonsets
kubectl get pod -o wide --watch
kubectl uncordon $node
kubectl delete pod mysql-2
kubectl get pod mysql-2 -o wide --watch
kubectl scale --replicas=5 statefulset mysql
kubectl get pods -l app=mysql --watch
kubectl run mysql-client-loop --image=mysql:5.7 -i -t --rm --restart=Never --\
bash -ic "while sleep 1; do /usr/bin/mysql -h mysql-read -e 'SELECT @@server_id'; done"
kubectl run mysql-client --image=mysql:5.7 -i -t --rm --restart=Never --\
/usr/bin/mysql -h mysql-4.mysql -e "SELECT * FROM mydb.notes"
kubectl get services mysql-read
echo " type: LoadBalancer" >> mysql-services.yaml
kubectl apply -f mysql-services.yaml
kubectl describe services mysql-read | grep "LoadBalancer Ingress"
load_balancer=$(kubectl get services mysql-read -o=jsonpath='{.status.loadBalancer.ingress[0].hostname}')
kubectl run mysql-client-loop --image=mysql:5.7 -i -t --rm --restart=Never --\
bash -ic "while sleep 1; do /usr/bin/mysql -h $load_balancer -e 'SELECT @@server_id'; done"
cat << EOF > dashboard-admin.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: dashboard-admin
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: dashboard-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: dashboard-admin
namespace: kubernetes-dashboard
EOF
kubectl delete -f dashboard-admin.yaml # delete the default role binding
kubectl create -f dashboard-admin.yaml
kubectl -n kubernetes-dashboard create token dashboard-admin
sudo kubectl port-forward -n kubernetes-dashboard --address 0.0.0.0 service/kubernetes-dashboard 8001:443
-
helm search hub [KEYWORD]
-
helm search repo [KEYWORD]
-
helm pull [CHART]
-
helm install [NAME] [CHART]
-
helm upgrade [RELEASE] [CHART]
-
helm rollback [RELEASE] [REVISION]
-
helm uninstall [RELEASE]
-
helm repo list
-
helm repo add [NAME] [URL]
-
helm repo remove [NAME]
-
helm repo update
-
helm repo index [DIR]
-
helm status [RELEASE]
-
helm list
-
helm history [RELEASE]
-
helm get manifest [RELEASE]
-
helm create [NAME]
-
helm template [NAME] [CHART]
-
helm package [CHART]
-
helm lint [CHART]
-
Helm Commands
- Chart Management
- Repository Management
- Release Management
values.yaml is used to do parameter replacement.
and helm upgrade demo ./abc-app --set=service.port=9808
then it gets updated in values.yaml file.
templates folder holds all templates to deploy. all files in templates folder will go via template rendering process.
kubectl explain pod.spec.securityContext | more
kubectl explain pod.spec.containers.securityContext | more
cat << EOF > pod-no-security-context.yaml
apiVersion: v1
kind: Pod
metadata:
name: security-context-test-1
spec:
containers:
- image: busybox:1.30.1
name: busybox
args:
- sleep
- "3600"
EOF
kubectl create -f pod-no-security-context.yaml
kubectl exec security-context-test-1 -- ls /dev
kubectl delete -f pod-no-security-context.yaml
cat > pod-privileged.yaml <<EOF
apiVersion: v1
kind: Pod
metadata:
name: security-context-test-2
spec:
containers:
- image: busybox:1.30.1
name: busybox
args:
- sleep
- "3600"
securityContext:
privileged: true
EOF
kubectl create -f pod-privileged.yaml
kubectl delete -f pod-privileged.yaml
cat << EOF > pod-runas.yaml
apiVersion: v1
kind: Pod
metadata:
name: security-context-test-3
spec:
securityContext:
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
containers:
- image: busybox:1.30.1
name: busybox
args:
- sleep
- "3600"
securityContext:
runAsUser: 2000
readOnlyRootFilesystem: true
EOF
kubectl create -f pod-runas.yaml
kubectl exec security-context-test-3 -it -- /bin/sh
touch /tmp/test-file
exit
kubectl delete -f pod-runas.yaml
# Create namespace
kubectl create namespace persistence
# Set namespace as the default for the current context
kubectl config set-context $(kubectl config current-context) --namespace=persistence
cat << 'EOF' > pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: db-data
spec:
# Only one node can mount the volume in Read/Write
# mode at a time
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
EOF
kubectl create -f pvc.yaml
kubectl get pvc
kubectl get pv
cat << 'EOF' > db.yaml
apiVersion: v1
kind: Pod
metadata:
name: db
spec:
containers:
- image: mongo:4.0.6
name: mongodb
# Mount as volume
volumeMounts:
- name: data
mountPath: /data/db
ports:
- containerPort: 27017
protocol: TCP
volumes:
- name: data
# Declare the PVC to use for the volume
persistentVolumeClaim:
claimName: db-data
EOF
kubectl create -f db.yaml
kubectl exec db -it -- mongo testdb --quiet --eval \
'db.messages.insert({"message": "I was here"}); db.messages.findOne().message'
* `kubectl delete -f db.yaml`
* `kubectl create -f db.yaml`
* `kubectl exec db -it -- mongo testdb --quiet --eval 'db.messages.findOne().message'`
# Create namespace
kubectl create namespace configmaps
# Set namespace as the default for the current context
kubectl config set-context $(kubectl config current-context) --namespace=configmaps
kubectl create configmap app-config --from-literal=DB_NAME=testdb \
--from-literal=COLLECTION_NAME=messages
k describe configmap app-config
kubectl get configmaps app-config -o yaml
cat << 'EOF' > pod-configmap.yaml
apiVersion: v1
kind: Pod
metadata:
name: db
spec:
containers:
- image: mongo
name: mongodb
# Mount as volume
volumeMounts:
- name: config
mountPath: /config
ports:
- containerPort: 27017
protocol: TCP
volumes:
- name: config
# Declare the configMap to use for the volume
configMap:
name: app-config
EOF
kubectl create -f pod-configmap.yaml
kubectl exec db -it -- ls /config
kubectl exec db -it -- cat /config/DB_NAME && echo
kubectl create configmap --help | more
Aliases:
configmap, cm
Examples:
# Create a new config map named my-config based on folder bar
kubectl create configmap my-config --from-file=path/to/bar
# Create a new config map named my-config with specified keys instead of file basenames on disk
kubectl create configmap my-config --from-file=key1=/path/to/bar/file1.txt --from-file=key2=/path/to/bar/file2.txt
# Create a new config map named my-config with key1=config1 and key2=config2
kubectl create configmap my-config --from-literal=key1=config1 --from-literal=key2=config2
# Create a new config map named my-config from the key=value pairs in the file
kubectl create configmap my-config --from-file=path/to/bar
# Create a new config map named my-config from an env file
kubectl create configmap my-config --from-env-file=path/to/foo.env --from-env-file=path/to/bar.env
kubectl create secret generic app-secret --from-literal=password=123457
kubectl get secret app-secret -o yaml
kubectl get secret app-secret -o jsonpath="{.data.password}" \
| base64 --decode \
&& echo
cat << EOF > pod-secret.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-secret
spec:
containers:
- image: busybox:1.30.1
name: busybox
args:
- sleep
- "3600"
env:
- name: PASSWORD # Name of environment variable
valueFrom:
secretKeyRef:
name: app-secret # Name of secret
key: password # Name of secret key
EOF
kubectl create -f pod-secret.yaml
kubectl exec pod-secret -- /bin/sh -c 'echo $PASSWORD'
Note: When you use kubectl create secret, the value is automatically encoded. If you use kubectl create -f, and specify a resource file, you need to encode the value yourself when setting the data: mapping. See the next instruction for how to achieve this. Alternatively, you can set a stringData: mapping instead which will perform the encoding for you. See kubectl explain secret for more details about the two options.
# Create namespace
kubectl create namespace serviceaccounts
# Set namespace as the default for the current context
kubectl config set-context $(kubectl config current-context) --namespace=serviceaccounts
kubectl get serviceaccounts
kubectl run default-pod --image=mongo
kubectl get pod default-pod -o yaml | more
kubectl create serviceaccount app-sa
cat << 'EOF' > pod-custom-sa.yaml
apiVersion: v1
kind: Pod
metadata:
name: custom-sa-pod
spec:
containers:
- image: mongo
name: mongodb
serviceAccount: app-sa
EOF
kubectl create -f pod-custom-sa.yaml
kubectl get pod custom-sa-pod -o yaml | more
# Create namespace
kubectl create namespace ephemeral
# Set namespace as the default for the current context
kubectl config set-context $(kubectl config current-context) --namespace=ephemeral
cat << EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: coin-toss
spec:
containers:
- name: coin-toss
image: busybox:1.33.1
command: ["/bin/sh", "-c"]
args:
- >
while true;
do
# Record coint tosses
if [[ $(($RANDOM % 2)) -eq 0 ]]; then echo Heads; else echo Tails; fi >> /var/log/tosses.txt;
sleep 1;
done
# Mount the log directory /var/log using a volume
volumeMounts:
- name: varlog
mountPath: /var/log
# Declare log directory volume an emptyDir ephemeral volume
volumes:
- name: varlog
emptyDir: {}
EOF
pod_node=$(kubectl get pod coin-toss -o jsonpath='{.status.hostIP}')
pod_id=$(kubectl get pod coin-toss -o jsonpath='{.metadata.uid}')
ssh $pod_node -oStrictHostKeyChecking=no sudo ls /var/lib/kubelet/pods/$pod_id/volumes/kubernetes.io~empty-dir/varlog
kubectl explain pod.spec.volumes.emptyDir
The medium can be configured to use tmpfs memory-based storage by setting medium: Memory. This will result in higher performance than using disk-backed volumes. However, the memory used does counts against container memory limits. Note that when using memory (RAM) the data will not survive Node restarts in contrast to using disk.
kubectl exec coin-toss -- wc -l /var/log/tosses.txt
kubectl set image pod coin-toss coin-toss=busybox:1.34.0
kubectl get pods -w
kubectl exec coin-toss -- wc -l /var/log/tosses.txt
kubectl delete pod coin-toss
cat << EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: cache
spec:
containers:
- name: cache
image: redis:6.2.5-alpine
resources:
requests:
ephemeral-storage: "1Ki"
limits:
ephemeral-storage: "1Ki"
volumeMounts:
- name: ephemeral
mountPath: "/data"
volumes:
- name: ephemeral
emptyDir:
sizeLimit: 1Ki
EOF
kubectl describe pod cache
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment