kubectl get --raw=/api/v1/namespaces/kube-system/endpoints/kube-controller-manager | jq .metadata.annotations
For kubectl via Rancher:
kubectl get --raw=/k8s/clusters/c-zslh4/api/v1/namespaces/kube-system/endpoints/kube-scheduler
cat <<EOF | kubectl apply -f -
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: nettest
labels:
app: nettest
spec:
selector:
matchLabels:
name: nettest
template:
metadata:
labels:
name: nettest
spec:
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
containers:
- name: nettest
image: amouat/network-utils
command: ["sh"]
args: ["-c", "while true; do sleep 100; done;"]
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 200Mi
EOF
With PSP
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
labels:
app: nettest
name: nettest
spec:
revisionHistoryLimit: 10
selector:
matchLabels:
name: nettest
template:
metadata:
creationTimestamp: null
labels:
name: nettest
spec:
containers:
- args:
- -c
- while true; do sleep 100; done;
command:
- sh
image: amouat/network-utils
imagePullPolicy: Always
name: nettest
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 200Mi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
serviceAccount: nettest
serviceAccountName: nettest
securityContext: {}
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
templateGeneration: 1
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nettest
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: nettest
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: nettest
subjects:
- kind: ServiceAccount
name: nettest
namespace: canh
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: nettest
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- '*'
- apiGroups:
- policy
resourceNames:
- eks.privileged
resources:
- podsecuritypolicies
verbs:
- use
kubectl get pod -l name=nettest --no-headers | awk '{print $1}' | xargs -I % kubectl exec % -- nslookup google.com
kubectl get pod -l app=ingress-nginx -o \
jsonpath='{range .items[*]}{..metadata.name}{"\t"}{.spec.nodeName}{"\n"}' | \
awk '{print "kubectl exec "$1" cat /etc/nginx/nginx.conf > nginx-"$2".conf"}' | bash
docker ps -a -f status=dead
# docker rm 7f225410b273
Error response from daemon: unable to remove filesystem for 7f225410b273777661eb92c3c9c2692ad2c4d1f71a03206fddfa5fb874dc1759: remove /u02/docker_graph/docker/containers/7f225410b273777661eb92c3c9c2692ad2c4d1f71a03206fddfa5fb874dc1759/shm: device or resource busy
# grep 7f225410b273 /proc/*/mountinfo
/proc/32095/mountinfo:910 906 0:58 / /u02/docker_graph/docker/containers/7f225410b273777661eb92c3c9c2692ad2c4d1f71a03206fddfa5fb874dc1759/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,seclabel,size=65536k
# docker ps -q | xargs docker inspect --format '{{.State.Pid}}, {{.Name}}' | grep 32095
32095, /k8s_filebeat_logsmain-tcloud-logs-agent-filebeat-rrb7g_xxxx_28a48f1d-fca2-11e8-a990-00505695c450_0
!#/usr/bin/env bash
node_name=$1
echo " Delete all pods with Vault (that have PDB) on ${node_name}"
kubectl get pods -A --field-selector spec.nodeName=${node_name} -l app=vault --no-headers | awk '{print "kubectl delete pod "$2" -n "$1" --force --grace-period=0"}' | bash
echo "Delete all pod in terminating states on node ${node_name}"
kubectl get pods -A --field-selector spec.nodeName=${node_name} --no-headers | grep Terminating | awk '{print "kubectl delete pod "$2" -n "$1" --force --grace-period=0"}' | bash
kubectl drain --ignore-daemonsets --delete-local-data ${node_name}