show
kubectl create namespace mynamespace
kubectl run nginx --image=nginx --restart=Never -n mynamespaceshow
Easily generate YAML with:
kubectl run nginx --image=nginx --restart=Never -n mynamespace --dry-run -o yaml > pod.yamlcat pod.yamlapiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: nginx
name: nginx
spec:
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: nginx
resources: {}
dnsPolicy: ClusterFirst
restartPolicy: Never
status: {}kubectl create -f pod.yamlshow
kubectl run busybox --image=busybox --command --restart=Never -it -- env # -it will help in seeing the output
# or, just run it without -it
kubectl run busybox --image=busybox --command --restart=Never -- env
# and then, check its logs
kubectl logs busyboxshow
kubectl create namespace myns -o yaml --dry-runshow
kubectl create resourcequota myrq -o yaml --dry-runshow
kubectl get po --all-namespacesshow
kubectl run nginx --image=nginx --restart=Never --port=80Change pod's image to nginx:1.7.1. Observe that the pod will be killed and recreated as soon as the image gets pulled
show
# kubectl set image POD_NAME CONTAINER_NAME=IMAGE_NAME:TAG
kubectl set image nginx nginx=nginx:1.7.1
kubectl describe po nginx # you will see an event 'Container will be killed and recreated'
kubectl get po nginx -w # watch itshow
kubectl get po -o wide # get the IP, will be something like '10.1.1.131'
# create a temp busybox pod
kubectl run busybox --image=busybox --rm -it --restart=Never -- sh
# run wget on specified IP:Port
wget -O- 10.1.1.131:80
exitshow
kubectl get po nginx -o yaml --exportshow
kubectl describe po nginxshow
kubectl logs nginxshow
kubectl logs nginx -pshow
kubectl exec -it nginx -- /bin/shshow
kubectl run busybox --image=busybox -it --restart=Never -- echo 'hello world'
# or
kubectl run busybox --image=busybox -it --restart=Never -- /bin/sh -c 'echo hello world'show
kubectl run busybox --image=busybox -it --rm --restart=Never -- /bin/sh -c 'echo hello world'
kubectl get po # nowhere to be found :)Create an nginx pod and set an env value as 'var1=val1'. Check the env value existence within the pod
show
kubectl run nginx --image=nginx --restart=Never --env=var1=val1
# then
kubectl exec -it nginx -- env
# or
kubectl describe nginx | grep val1
# or
kubectl run po busybox --restart=Never --image=busybox --env=var1=val1 -it --rm -- envCreate a Pod with two containers, both with image busybox and command "echo hello; sleep 3600". Connect to the second container and run 'ls'
show
Easiest way to do it is create a pod with a single container and save its definition in a YAML file:
kubectl run busybox --image=busybox --restart=Never -o yaml --dry-run -- /bin/sh -c 'echo hello;sleep 3600' > pod.yaml
vi pod.yamlCopy/paste the container related values, so your final YAML should contain the following two containers (make sure those containers have a different name):
containers:
- args:
- /bin/sh
- -c
- echo hello;sleep 3600
image: busybox
imagePullPolicy: IfNotPresent
name: busybox
resources: {}
- args:
- /bin/sh
- -c
- echo hello;sleep 3600
image: busybox
name: busybox2kubectl create -f pod.yaml
# Connect to the busybox2 container within the pod
kubectl exec -it busybox -c busybox2 -- /bin/sh
ls
exit
# you can do some cleanup
kubectl delete po busyboxshow
kubectl run nginx1 --image=nginx --restart=Never --labels=app=v1
kubectl run nginx2 --image=nginx --restart=Never --labels=app=v1
kubectl run nginx3 --image=nginx --restart=Never --labels=app=v1show
kubectl get po --show-labelsshow
kubectl label po nginx2 app=v2 --overwriteshow
kubectl get po -L appshow
kubectl get po -l app=v2
# or
kubectl get po -l 'app in (v2)'show
kubectl label po nginx1 nginx2 nginx3 app-
# or
kubectl label po nginx{1..3} app-show
We can use the 'nodeSelector' property on the Pod YAML:
apiVersion: v1
kind: Pod
metadata:
name: cuda-test
spec:
containers:
- name: cuda-test
image: "k8s.gcr.io/cuda-vector-add:v0.1"
nodeSelector: # add this
accelerator: nvidia-tesla-p100 # the slection labelYou can easily find out where in the YAML it should be placed by:
kubectl explain po.specshow
kubectl annotate po nginx1 nginx2 nginx3 description='my description'show
kubectl describe po nginx1 | grep -i 'annotations'show
kubectl annotate po nginx{1..3} description-show
kubectl delete po nginx{1..3}Create a deployment with image nginx:1.7.8, 2 replicas, defining port 80 as the port that this container exposes (don't create a service for this deployment)
show
kubectl run nginx --image=nginx:1.7.8 --replicas=2 --port=80show
kubectl get deploy nginx --export -o yamlshow
kubectl describe deploy nginx # you'll see the name of the replica set on the Events section and in the 'NewReplicaSet' property
# you could also just do kubectl get rs
kubectl get rs nginx-7bf7478b77 --export -o yamlshow
kubectl get po # get all the pods
kubectl get po nginx-7bf7478b77-gjzp8 -o yaml --exportshow
kubectl rollout status deploy nginxshow
kubectl set image deploy nginx nginx=nginx:1.7.9
# alternatively...
kubectl edit deploy nginx # change the .spec.template.spec.containers[0].imageThe syntax of the 'kubectl set image' command is kubectl set image (-f FILENAME | TYPE NAME) CONTAINER_NAME_1=CONTAINER_IMAGE_1 ... CONTAINER_NAME_N=CONTAINER_IMAGE_N [options]
show
kubectl rollout history deploy nginx
kubectl get deploy nginx
kubectl get rs # check that a new replica set has been created
kubectl get poshow
kubectl rollout undo deploy nginx
# wait a bit
kubectl get po # select one 'Running' Pod
kubectl describe po nginx-5ff4457d65-nslcl | grep -i image # should be nginx:1.7.8show
kubectl set image deploy nginx nginx=nginx:1.91
# or
kubectl edit deploy nginx
# change the image to nginx:1.91
# vim tip: type (without quotes) '/image' and Enter, to navigate quicklyshow
kubectl rollout status deploy nginx
# or
kubectl get po # you'll see 'ErrImagePull'show
kubectl rollout undo deploy nginx --to-revision=2
kubectl describe deploy nginx | grep Image:
kubectl rollout status deploy nginx # Everything should be OKshow
kubectl rollout history deploy nginx --revision=3 # You'll also see the wrong image displayed hereshow
kubectl scale deploy nginx --replicas=5
kubectl get po
kubectl describe deploy nginxshow
kubectl autoscale deploy nginx --min=5 --max=10 --cpu-percent=80show
kubectl rollout pause deploy nginxUpdate the image to nginx:1.9.1 and check that there's nothing going on, since we paused the rollout
show
kubectl set image deploy nginx nginx=nginx:1.9.1
# or
kubectl edit deploy nginx
# change the image to nginx:1.9.1
kubectl rollout history deploy nginx # no new revisionshow
kubectl rollout resume deploy nginx
kubectl rollout history deploy nginx
kubectl rollout history deploy nginx --revision=6 # insert the number of your latest revisionshow
kubectl delete deploy nginx
kubectl delete hpa nginxCreate a job with image perl that runs default command with arguments "perl -Mbignum=bpi -wle 'print bpi(2000)'"
show
kubectl run pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'show
kubectl get jobs -w # wait till 'SUCCESSFUL' is 1 (will take some time, perl image might be big)
kubectl get po # get the pod name
kubectl logs perl-**** # get the pi numbers
kubectl delete job pishow
kubectl run busybox --image=busybox --restart=OnFailure -- /bin/sh -c 'echo hello;sleep 30;echo world'show
kubectl get po # find the job pod
kubectl logs busybox-ptx58 -f # follow the logsshow
kubectl get jobs
kubectl describe jobs busybox
kubectl logs job/busyboxshow
kubectl delete job busyboxshow
kubectl run busybox --image=busybox --restart=OnFailure --dry-run -o yaml -- /bin/sh -c 'echo hello;sleep 30;echo world' > job.yaml
vi job.yamlAdd job.cpec.completions=5
apiVersion: batch/v1
kind: Job
metadata:
creationTimestamp: null
labels:
run: busybox
name: busybox
spec:
completions: 5 # add this line
template:
metadata:
creationTimestamp: null
labels:
run: busybox
spec:
containers:
- args:
- /bin/sh
- -c
- echo hello;sleep 30;echo world
image: busybox
name: busybox
resources: {}
restartPolicy: OnFailure
status: {}kubectl create -f job.yamlVerify that it has been completed:
kubectl get job busybox -w # will take two and a half minutes
kubectl delete jobs busyboxshow
vi job.yamlAdd job.spec.parallelism=5
apiVersion: batch/v1
kind: Job
metadata:
creationTimestamp: null
labels:
run: busybox
name: busybox
spec:
parallelism: 5 # add this line
template:
metadata:
creationTimestamp: null
labels:
run: busybox
spec:
containers:
- args:
- /bin/sh
- -c
- echo hello;sleep 30;echo world
image: busybox
name: busybox
resources: {}
restartPolicy: OnFailure
status: {}kubectl create -f job.yaml
kubectl get jobsIt will take some time for the parallel jobs to finish (>= 30 seconds)
kubectl delete job busyboxCreate a cron job with image busybox that runs on a schedule of "*/1 * * * *" and writes 'date; echo Hello from the Kubernetes cluster' to standard output
show
kubectl run busybox --image=busybox --restart=OnFailure --schedule="*/1 * * * *" -- /bin/sh -c 'date; echo Hello from the Kubernetes cluster'show
kubectl get cj
kubectl get jobs --watch
kubectl get po --show-labels # observe that the pods have a label that mentions their 'parent' job
kubect logs busybox-1529745840-m867r
# Bear in mind that Kubernetes will run a new job/pod for each new cron job
kubectl delete cj busyboxshow
kubectl create configmap config --from-literal=foo=lala --from-literal=foo2=loloshow
kubectl get cm config -o yaml --export
# or
kubectl describe cm configCreate the file with
echo -e "foo3=lili\nfoo4=lele" > config.txtshow
kubectl create cm configmap2 --from-file=config.txt
kubectl get cm configmap2 -o yaml --exportCreate the file with the command
echo -e "var1=val1\n# this is a comment\n\nvar2=val2\n#anothercomment" > config.envshow
kubectl create cm configmap3 --from-file=config.txt
kubectl get cm configmap2 -o yaml --exportCreate the file with
echo -e "var3=val3\nvar4=val4" > config3.txtshow
kubectl create cm configmap4 --from-file=special=config2.txt
kubectl describe cm configmap4
kubectl get cm configmap4 -o --exportCreate a configMap called 'options' with the value var5=val5. Create a new nginx pod that loads the value from variable 'var5' in an env variable called 'option'
show
kubectl create cm options --from-literal=var5=val5
kubectl run nginx --image=nginx --restart=Never --dry-run -o yaml > pod.yaml
vi pod.yamlapiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: nginx
name: nginx
spec:
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: nginx
resources: {}
env:
- name: option # name of the env variable
valueFrom:
configMapKeyRef:
name: options # name of config map
key: var5 # name of the entity in config map
dnsPolicy: ClusterFirst
restartPolicy: Never
status: {}kubectl create -f pod.yaml
kubectl exec -it nginx -- env | grep option # will show 'option=val5'Create a configMap 'anotherone' with values 'var6=val6', 'var7=val7'. Load this configMap as env variables into a new nginx pod
show
kubectl create configmap anotherone --from-literal=var6=val6 --from-literal=var7=val7
kubectl run --restart=Never nginx --image=nginx -o yaml --dry-run > pod.yaml
vi pod.yamlapiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: nginx
name: nginx
spec:
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: nginx
resources: {}
envFrom: # different than previous one, that was 'env'
- configMapRef: # different from the previous one, was 'configMapKeyRef'
name: anotherone # the name of the config map
dnsPolicy: ClusterFirst
restartPolicy: Never
status: {}kubectl create -f pod.yaml
kubectl exec -it nginx -- env Create a configMap 'cmvolume' with values 'var8=val8', 'var9=val9'. Load this as a volume inside an nginx pod on path '/etc/lala'. Create the pod and 'ls' into the '/etc/lala' directory.
show
kubectl create configmap cmvolume --from-literal=var8=val8 --from-literal=var9=val9
kubectl run nginx --image=nginx --restart=Never -o yaml --dry-run > pod.yaml
vi pod.yamlapiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: nginx
name: nginx
spec:
volumes: # add a volumes list
- name: myvolume # just a name, you'll reference this in the pods
configMap:
name: cmvolume # name of your configmap
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: nginx
resources: {}
volumeMounts: # your volume mounts are listed here
- name: myvolume # the name that you specified in pod.spec.volumes.name
mountPath: /etc/lala # the path inside your container
dnsPolicy: ClusterFirst
restartPolicy: Never
status: {}kubectl exec -it nginx -- /bin/sh
cd /etc/lala
ls # will show var8 var9
cat var8 # will show val8show
kubectl run nginx --image=nginx --restart=Never --dry-run -o yaml > pod.yaml
vi pod.yamlapiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: nginx
name: nginx
spec:
securityContext: # insert this line
runAsUser: 101 # UID for the user
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: nginx
resources: {}
dnsPolicy: ClusterFirst
restartPolicy: Never
status: {}Create the YAML for an nginx pod that has the capabilities "NET_ADMIN", "SYS_TIME" added on its single container
show
kubectl run nginx --image=nginx --restart=Never --dry-run -o yaml > pod.yaml
vi pod.yamlapiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: nginx
name: nginx
spec:
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: nginx
securityContext: # insert this line
capabilities: # and this
add: ["NET_ADMIN", "SYS_TIME"] # this as well
resources: {}
dnsPolicy: ClusterFirst
restartPolicy: Never
status: {}show
kubectl run nginx --image=nginx --restart=Never --requests='cpu=100m,memory=256Mi' --limits='cpu=200m,memory=512Mi'show
kubectl create secret generic mysecret --from-literal=password=mypassCreate a file called username with the value admin:
echo admin > usernameshow
kubectl create secret generic mysecret2 --from-file=usernameshow
kubectl get secret mysecret2 -o yaml --export
echo YWRtaW4K | base64 -d # shows 'admin'show
kubectl run nginx --image=nginx --restart=Never -o yaml --dry-run > pod.yaml
vi pod.yamlapiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: nginx
name: nginx
spec:
volumes: # specify the volumes
- name: foo # this name will be used for reference inside the container
secret: # we want a secret
secretName: mysecret2 # name of the secret - this must already exist on pod creation
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: nginx
resources: {}
volumeMounts: # our volume mounts
- name: foo # name on pod.spec.volumes
mountPath: /etc/foo #our mount path
dnsPolicy: ClusterFirst
restartPolicy: Never
status: {}kubectl create -f pod.yaml
kubectl exec -it nginx /bin/bash
ls /etc/foo # shows username
cat /etc/foo/username # shows adminDelete the pod you just created and mount the variable 'username' from secret mysecret2 onto a new nginx pod in env variable called 'USERNAME'
show
kubectl delete po nginx
kubectl run nginx --image=nginx --restart=Never -o yaml --dry-run > pod.yaml
vi pod.yamlapiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: nginx
name: nginx
spec:
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: nginx
resources: {}
env: # our env variables
- name: USERNAME # asked name
valueFrom:
secretKeyRef: # secret reference
name: mysecret2 # our secret's name
key: username # the key of the data in the secret
dnsPolicy: ClusterFirst
restartPolicy: Never
status: {}kubectl create -f pod.yaml
kubectl exec -it nginx -- env | grep USERNAME | cut -d '=' -f 2 # will show 'admin'show
kubectl get sa --all-namespacesshow
kubectl create sa 'myuser' --dry-run -o yamlAlternatively:
# let's get a template easily
kubectl get sa default -o yaml --export > sa.yaml
vim sa.yamlapiVersion: v1
kind: ServiceAccount
metadata:
name: myuserkubectl create -f sa.yamlshow
kubectl run nginx --image=nginx --restart=Never -o yaml --dry-run > pod.yaml
vi pod.yamlapiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: nginx
name: nginx
spec:
serviceAccountName: myuser # we use pod.spec.serviceAccountName
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: nginx
resources: {}
dnsPolicy: ClusterFirst
restartPolicy: Never
status: {}kubectl create -f pod.yaml
kubectl describe pod nginx # will see that a new secret called myuser-token-***** has been mountedor you can add directly with kubectl run command:
kubectl run nginx --image=nginx --restart=Never --serviceaccount=myuser -o yaml --dry-run > pod.yaml
kubectl apply -f pod.yamlCreate an nginx pod with a liveness probe that just runs the command 'ls'. Save its YAML in pod.yaml. Run it, check its probe status, delete it.
show
kubectl run nginx --image=nginx --restart=Never --dry-run -o yaml > pod.yaml
vi pod.yamlapiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: nginx
name: nginx
spec:
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: nginx
resources: {}
livenessProbe: # our probe
exec: # add this line
command: # command definition
- ls # ls command
dnsPolicy: ClusterFirst
restartPolicy: Never
status: {}kubectl create -f pod.yaml
kubectl describe pod nginx | grep -i liveness # run this to see that liveness probe works
kubectl delete -f pod.yamlModify the pod.yaml file so that liveness probe starts kicking in after 5 seconds whereas the period of probing would be 10 seconds. Run it, check the probe, delete it.
show
kubectl explain pod.spec.containers.livenessProbe # get the exact namesapiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: nginx
name: nginx
spec:
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: nginx
resources: {}
livenessProbe:
initialDelaySeconds: 5 # add this line
periodSeconds: 10 # add this line as well
exec:
command:
- ls
dnsPolicy: ClusterFirst
restartPolicy: Never
status: {}kubectl create -f pod.yaml
kubectl describe po nginx | grep -i liveness
kubectl delete -f pod.yamlCreate an nginx pod (that includes port 80) with an HTTP readinessProbe on path '/' on port 80. Again, run it, check the readinessProbe, delete it.
show
kubectl run nginx --image=nginx --dry-run -o yaml --restart=Never --port=80 > pod.yaml
vi pod.yamlapiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: nginx
name: nginx
spec:
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: nginx
resources: {}
ports:
- containerPort: 80
readinessProbe: # declare the readiness probe
httpGet: # add this line
path: / #
port: 80 #
dnsPolicy: ClusterFirst
restartPolicy: Never
status: {}kubectl create -f pod.yaml
kubectl describe pod nginx | grep -i readiness # to see the pod readiness details
kubectl delete -f pod.yamlCreate a busybox pod that runs 'i=0; while true; do echo "$i: $(date)"; i=$ ((i+1)); sleep 1; done'. Check its logs
show
kubectl run busybox --image=busybox --restart=Never -- /bin/sh -c 'i=0; while true; do echo "$i: $(date)"; i=$((i+1)); sleep 1; done'
kubectl logs busybox -f # follow the logsCreate a busybox pod that runs 'ls /notexist'. Determine if there's an error (of course there is), see it. In the end, delete the pod
show
kubectl run busybox --restart=Never --image=busybox -- /bin/sh -c 'ls /notexist'
# show that there's an error
kubectl k logs bulogs busybox
kubectl describe po busybox
kubectl delete po busyboxCreate a busybox pod that runs 'notexist'. Determine if there's an error (of course there is), see it. In the end, delete the pod forcefully with a 0 grace period
show
kubectl run busybox --restart=Never --image=busybox -- notexist
kubectl logs busybox # will bring nothing! container never started
kubectl describe po busybox # in the events section, you'll see the error
# also...
kubectl get events | grep -i error # you'll see the error here as well
kubectl delete po busybox --force --grace-period=0show
kubectl top nodesshow
kubectl run nginx --image=nginx --restart=Never --port=80 --expose
# observer that a pod as well as a service are createdshow
kubectl get svc nginx # services
kubectl get ep # endpointsshow
kubectl get svc nginx # get the IP (something like 10.108.93.130)
kubectl run busybox --rm --image=busybox -it --restart=Never -- sh
wget -O- IP:80
exitConvert the ClusterIP to NodePort and find the NodePort port. Hit it using Node's IP. Delete the service and the pod
show
kubectl edit svc nginxapiVersion: v1
kind: Service
metadata:
creationTimestamp: 2018-06-25T07:55:16Z
name: nginx
namespace: default
resourceVersion: "93442"
selfLink: /api/v1/namespaces/default/services/nginx
uid: 191e3dac-784d-11e8-86b1-00155d9f663c
spec:
clusterIP: 10.97.242.220
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
run: nginx
sessionAffinity: None
type: NodePort # change cluster IP to nodeport
status:
loadBalancer: {}kubectl get svc# result:
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 1d
nginx NodePort 10.107.253.138 <none> 80:31931/TCP 3m
wget -O- NODE_IP:31931 # if you're using Kubernetes with Docker for Windows/Mac, try 127.0.0.1Create a deployment called foo using image 'dgkanatsios/simpleapp' (a simple server that returns hostname) and 3 replicas. Label it as 'app=foo'. Declare that containers in this pod will accept traffic on port 8080 (do NOT create a service yet)
show
kubectl run foo --image=dgkanatsios/simpleapp --labels=app=foo --port=8080 --replicas=3show
kubectl get pods -l app=foo -o wide # 'wide' will show pod IPs
kubectl run busybox --image=busybox --restart=Never -it --rm -- sh
wget -O- POD_IP:8080 # do not try with pod name, will not work
# try hitting all IPs to confirm that hostname is different
exitCreate a service that exposes the deployment on port 6262. Verify its existence, check the endpoints
show
kubectl expose deploy foo --port=6262 --target-port=8080
kubectl get service foo # you will see ClusterIP as well as port 6262
kubectl get endpoint foo # you will see the IPs of the three replica nodes, listening on port 8080Create a temp busybox pod and connect via wget to foo service. Verify that each time there's a different hostname returned. Delete deployment and services to cleanup the cluster
show
kubectl get svc # get the foo service ClusterIP
kubectl run busybox --image=busybox -it --rm --restart=Never -- sh
wget -O- foo:6262 # DNS works! run it many times, you'll see different pods responding
wget -O- SERVICE_CLUSTER_IP:6262 # ClusterIP works as well
# you can also kubectl logs on deployment pods to see the container logs
kubectl delete svc foo
kubectl delete deploy fooCreate an nginx deployment of 2 replicas, expose it via a ClusterIP service on port 80. Create a NetworkPolicy so that only pods with labels 'access: true' can access the deployment and apply it
show
kubectl run nginx --image=nginx --replicas=2 --port=80 --expose
kubectl describe svc nginx # see the 'run=nginx' selector for the pods
# or
kubectl get svc nginx -o yaml --export
vi policy.yamlkind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: access-nginx # pick a name
spec:
podSelector:
matchLabels:
run: nginx # selector for the pods
ingress: # allow ingress traffic
- from:
- podSelector: # from pods
matchLabels: # with this label
access: 'true' # 'true' *needs* quotes in YAML, apparentlyCreate busybox pod with two containers, each one will have the image busybox and will run the 'sleep 3600' command. Make both pods mount an emptyDir at '/etc/foo'. Connect to the second busybox, write the first column of '/etc/passwd' file to '/etc/foo/passwd'. Connect to the first busybox and write '/etc/foo/passwd' file to standard output. Delete pod.
show
This question is probably a better fit for the 'Multi-container-pods' section but I'm keeping it here as it will help you get acquainted with state
Easiest way to do this is to create a template pod with:
kubectl run busybox --image=busybox --restart=Never -o yaml --dry-run -- /bin/sh -c 'sleep 3600' > pod.yaml
vi pod.yamlCopy paste the container definition and type the lines that have a comment in the end:
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: busybox
name: busybox
spec:
dnsPolicy: ClusterFirst
restartPolicy: Never
containers:
- args:
- /bin/sh
- -c
- sleep 3600
image: busybox
imagePullPolicy: IfNotPresent
name: busybox
resources: {}
volumeMounts: #
- name: myvolume #
mountPath: /etc/foo #
- args:
- /bin/sh
- -c
- sleep 3600
image: busybox
name: busybox2 # don't forget to change the name during copy paste, must be different from the first container's name!
volumeMounts: #
- name: myvolume #
mountPath: /etc/foo #
volumes: #
- name: myvolume #
emptyDir: {} #
status: {}
dnsPolicy: ClusterFirst
restartPolicy: NeverConnect to the second container:
kubectl exec -it busybox -c busybox2 -- /bin/sh
cat /etc/passwd | cut -f 1 -d ':' > /etc/foo/passwd
cat /etc/foo/passwd # confirm that stuff has been written successfully
exitConnect to the first container:
kubectl exec -it busybox -c busybox -- /bin/sh
mount | grep foo # confirm the mounting
cat /etc/foo/passwd
exit
kubectl delete po busyboxCreate a PersistentVolume of 20GB, called 'myvolume'. Make it have accessMode of 'ReadWriteOnce' and 'ReadWriteMany', storageClassName 'normal', mounted on hostPath '/etc/foo'. Save it on pv.yaml, add it to the cluster. Show the PersistentVolumes that exist on the cluster
show
vi pv.yamlkind: PersistentVolume
apiVersion: v1
metadata:
name: myvolume
spec:
storageClassName: normal
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
- ReadWriteMany
hostPath:
path: /etc/fooShow the PersistentVolumes:
kubectl create -f pv.yaml
# will have status 'Available'
kubectl get pvCreate a PersistentVolumeClaim for this storage class, called mypvc, a request of 4GB and save it on pvc.yaml. Create it on the cluster. Show the PersistentVolumeClaims of the cluster. Show the PersistentVolumes of the cluster
show
vi pvc.yamlkind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: mypvc
spec:
storageClassName: manual
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4GiShow the PersistentVolumeClaims and PersistentVolumes:
kubectl get pvc # will show as 'Bound'
kubectl get pv # will show as 'Bound' as wellCreate a busybox pod with command 'sleep 3600', save it on pod.yaml. Mount the PersistentVolumeClaim to '/etc/foo'. Connect to the 'busybox' pod, and copy the '/etc/passwd' file to '/etc/foo'
show
Create a skeleton pod:
kubectl run busybox --image=busybox --restart=Never -o yaml --dry-run -- /bin/sh -c 'sleep 3600' > pod.yaml
vi pod.yamlAdd the lines that finish with a comment:
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: busybox
name: busybox
spec:
containers:
- args:
- /bin/sh
- -c
- sleep 3600
image: busybox
imagePullPolicy: IfNotPresent
name: busybox
resources: {}
volumeMounts: #
- name: myvolume #
mountPath: /etc/foo #
dnsPolicy: ClusterFirst
restartPolicy: Never
volumes: #
- name: myvolume #
persistentVolumeClaim: #
claimName: mypvc #
status: {}Create the pod:
kubectl create -f pod.yamlConnect to the pod and copy '/etc/passwd' to '/etc/foo/passwd':
kubectl exec busybox -it -- cp /etc/passwd /etc/foo/passwdCreate a second pod which is identical with the one you just created (you can easily do it by changing the 'name' property on pod.yaml). Connect to it and verify that '/etc/foo' contains the 'passwd' file. Delete pods to cleanup
show
Create the second pod, called busybox2:
vim pod.yaml
# change 'name: busybox' to 'name: busybox2'
kubectl create -f pod.yaml
kubectl exec busybox2 -- ls /etc/foo # will show 'passwd'
# cleanup
kubectl delete po busybox busybox2Create a busybox pod with 'sleep 3600' as arguments. Copy '/etc/passwd' from the pod to your local folder
show
kubectl run busybox --image=busybox --restart=Never -- sleep 3600
kubectl cp busybox:/etc/passwd . # kubectl cp command
cat passwd