Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Save rimusz/3b1b91b1b006d34eeef3738c8447cc9d to your computer and use it in GitHub Desktop.
Save rimusz/3b1b91b1b006d34eeef3738c8447cc9d to your computer and use it in GitHub Desktop.
Kubernetes-Installation
### Requirements
apt install docker.io socat
### KUBECTL
wget https://storage.googleapis.com/kubernetes-release/release/v1.6.2/bin/linux/amd64/kubectl
chmod +x kubectl
sudo mv kubectl /usr/local/bin/.
kubectl version
### CERTS
## Certificate Authority
cat > ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "8760h"
},
"profiles": {
"kubernetes": {
"usages": ["signing", "key encipherment", "server auth", "client auth"],
"expiry": "8760h"
}
}
}
}
EOF
cat > ca-csr.json <<EOF
{
"CN": "Kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "Kubernetes",
"OU": "CA",
"ST": "Oregon"
}
]
}
EOF
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
RESULTS:
ca-key.pem
ca.pem
## Client and Server Certificates
cat > admin-csr.json <<EOF
{
"CN": "admin",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "system:masters",
"OU": "Kubernetes The Hard Way",
"ST": "Oregon"
}
]
}
EOF
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
admin-csr.json | cfssljson -bare admin
RESULTS:
admin-key.pem
admin.pem
## The Kubelet Client Certificates
for instance in fran-worker-0 fran-worker-1; do
cat > ${instance}-csr.json <<EOF
{
"CN": "system:node:${instance}",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "system:nodes",
"OU": "Kubernetes The Hard Way",
"ST": "Oregon"
}
]
}
EOF
done
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-hostname=fran-worker-0,<PUBLIC_IP>,<PRIVATE_IP> \
-profile=kubernetes \
fran-worker-0-csr.json | cfssljson -bare fran-worker-0
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-hostname=fran-worker-1,<PUBLIC_IP>,<PRIVATE_IP> \
-profile=kubernetes \
fran-worker-1-csr.json | cfssljson -bare fran-worker-1
RESULTS:
fran-worker-1-key.pem
fran-worker-1.pem
fran-worker-2-key.pem
fran-worker-2.pem
#NOTA GET IP -> INTERNAL_IP=$(ifconfig ens4 | grep 'inet addr' | cut -d ':' -f 2 | cut -d ' ' -f 1)
## The kube-proxy Client Certificate
cat > kube-proxy-csr.json <<EOF
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "system:node-proxier",
"OU": "Kubernetes The Hard Way",
"ST": "Oregon"
}
]
}
EOF
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
kube-proxy-csr.json | cfssljson -bare kube-proxy
RESULTS
kube-proxy-key.pem
kube-proxy.pem
## The Kubernetes API Server Certificate
KUBERNETES_PUBLIC_ADDRESS=
cat > kubernetes-csr.json <<EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "Kubernetes",
"OU": "Kubernetes The Hard Way",
"ST": "Oregon"
}
]
}
EOF
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-hostname=10.3.0.1,<PRIVATE_IP>,<PRIVATE_IP>,<PRIVATE_IP>,${KUBERNETES_PUBLIC_ADDRESS},127.0.0.1,kubernetes.default \
-profile=kubernetes \
kubernetes-csr.json | cfssljson -bare kubernetes
RESULTS:
kubernetes-key.pem
kubernetes.pem
## Distribute the Client and Server Certificates
# WORKERS
scp ca.pem fran-worker-0-key.pem fran-worker-0.pem root@<PUBLIC_IP>:~/
scp ca.pem fran-worker-1-key.pem fran-worker-1.pem root@<PUBLIC_IP>:~/
# CONTROLLERS
scp ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem root@<PUBLIC_IP>:~/
### KUBECONFIG
## KUBELET
KUBERNETES_PUBLIC_ADDRESS=
for instance in fran-worker-0 fran-worker-1; do
kubectl config set-cluster kubernetes-the-hard-way \
--certificate-authority=ca.pem \
--embed-certs=true \
--server=https://${KUBERNETES_PUBLIC_ADDRESS}:6443 \
--kubeconfig=${instance}.kubeconfig
kubectl config set-credentials system:node:${instance} \
--client-certificate=${instance}.pem \
--client-key=${instance}-key.pem \
--embed-certs=true \
--kubeconfig=${instance}.kubeconfig
kubectl config set-context default \
--cluster=kubernetes-the-hard-way \
--user=system:node:${instance} \
--kubeconfig=${instance}.kubeconfig
kubectl config use-context default --kubeconfig=${instance}.kubeconfig
done
RESULTS:
fran-worker-1.kubeconfig
fran-worker-2.kubeconfig
## KUBE-PROXY
KUBERNETES_PUBLIC_ADDRESS=
kubectl config set-cluster kubernetes-the-hard-way \
--certificate-authority=ca.pem \
--embed-certs=true \
--server=https://${KUBERNETES_PUBLIC_ADDRESS}:6443 \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy \
--client-certificate=kube-proxy.pem \
--client-key=kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-context default \
--cluster=kubernetes-the-hard-way \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
RESULTS:
kube-proxy.kubeconfig
## COPY TO WORKERS
scp fran-worker-0.kubeconfig kube-proxy.kubeconfig root@<PUBLIC_IP>:~/
scp fran-worker-1.kubeconfig kube-proxy.kubeconfig root@<PUBLIC_IP>:~/
### ETCD
wget -q --show-progress --https-only --timestamping \
"https://github.com/coreos/etcd/releases/download/v3.2.6/etcd-v3.2.6-linux-amd64.tar.gz"
tar -xvf etcd-v3.2.6-linux-amd64.tar.gz
sudo mv etcd-v3.2.6-linux-amd64/etcd* /usr/local/bin/
sudo mkdir -p /etc/etcd /var/lib/etcd
sudo cp ca.pem kubernetes-key.pem kubernetes.pem /etc/etcd/
#### CHECK INTERFACE
INTERNAL_IP=$(ifconfig eth1 | grep 'inet addr' | cut -d ':' -f 2 | cut -d ' ' -f 1)
ETCD_NAME=$(hostname -s)
cat > etcd.service <<EOF
[Unit]
Description=etcd
Documentation=https://github.com/coreos
[Service]
ExecStart=/usr/local/bin/etcd \\
--name ${ETCD_NAME} \\
--cert-file=/etc/etcd/kubernetes.pem \\
--key-file=/etc/etcd/kubernetes-key.pem \\
--peer-cert-file=/etc/etcd/kubernetes.pem \\
--peer-key-file=/etc/etcd/kubernetes-key.pem \\
--trusted-ca-file=/etc/etcd/ca.pem \\
--peer-trusted-ca-file=/etc/etcd/ca.pem \\
--peer-client-cert-auth \\
--client-cert-auth \\
--initial-advertise-peer-urls https://${INTERNAL_IP}:2380 \\
--listen-peer-urls https://${INTERNAL_IP}:2380 \\
--listen-client-urls https://${INTERNAL_IP}:2379,http://127.0.0.1:2379 \\
--advertise-client-urls https://${INTERNAL_IP}:2379 \\
--initial-cluster-token etcd-cluster-0 \\
--initial-cluster ${ETCD_NAME}=https://${INTERNAL_IP}:2380 \\
--initial-cluster-state new \\
--data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
sudo mv etcd.service /etc/systemd/system/
sudo systemctl daemon-reload
sudo systemctl enable etcd
sudo systemctl restart etcd
## VERIFICATION
ETCDCTL_API=3 etcdctl member list
### CONTROL PLANE
## DOWNLOAD
Download binaries
export VERSION=v1.6.2
wget "https://storage.googleapis.com/kubernetes-release/release/${VERSION}/bin/linux/amd64/kube-apiserver" \
"https://storage.googleapis.com/kubernetes-release/release/${VERSION}/bin/linux/amd64/kube-controller-manager" \
"https://storage.googleapis.com/kubernetes-release/release/${VERSION}/bin/linux/amd64/kube-scheduler" \
"https://storage.googleapis.com/kubernetes-release/release/${VERSION}/bin/linux/amd64/kubectl"
chmod +x kube-apiserver kube-controller-manager kube-scheduler kubectl
sudo mv kube-apiserver kube-controller-manager kube-scheduler kubectl /usr/local/bin/
## CERTS
sudo mkdir -p /var/lib/kubernetes/
sudo cp ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem /var/lib/kubernetes/
## UNIT
# APISERVER
INTERNAL_IP=$(ifconfig eth1 | grep 'inet addr' | cut -d ':' -f 2 | cut -d ' ' -f 1)
cat > kube-apiserver.service <<EOF
[Unit]
Description=apiserver
Documentation=https://kubernetes.io
[Service]
ExecStart=/usr/local/bin/kube-apiserver \\
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \\
--advertise-address=${INTERNAL_IP} \\
--kubelet-preferred-address-types=InternalIP \\
--allow-privileged=true \\
--apiserver-count=1 \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-path=/var/log/audit.log \\
--authorization-mode=RBAC \\
--bind-address=0.0.0.0 \\
--client-ca-file=/var/lib/kubernetes/ca.pem \\
--enable-swagger-ui=true \\
--etcd-cafile=/var/lib/kubernetes/ca.pem \\
--etcd-certfile=/var/lib/kubernetes/kubernetes.pem \\
--etcd-keyfile=/var/lib/kubernetes/kubernetes-key.pem \\
--etcd-servers=https://${INTERNAL_IP}:2379 \\
--event-ttl=1h \\
--insecure-bind-address=0.0.0.0 \\
--kubelet-certificate-authority=/var/lib/kubernetes/ca.pem \\
--kubelet-client-certificate=/var/lib/kubernetes/kubernetes.pem \\
--kubelet-client-key=/var/lib/kubernetes/kubernetes-key.pem \\
--kubelet-https=true \\
--runtime-config=rbac.authorization.k8s.io/v1alpha1 \\
--service-account-key-file=/var/lib/kubernetes/ca-key.pem \\
--service-cluster-ip-range=10.3.0.0/24 \\
--service-node-port-range=30000-32767 \\
--tls-ca-file=/var/lib/kubernetes/ca.pem \\
--tls-cert-file=/var/lib/kubernetes/kubernetes.pem \\
--tls-private-key-file=/var/lib/kubernetes/kubernetes-key.pem \\
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
# CONTROLLER
INTERNAL_IP=$(ifconfig eth1 | grep 'inet addr' | cut -d ':' -f 2 | cut -d ' ' -f 1)
cat > kube-controller-manager.service <<EOF
[Unit]
Description=controller
Documentation=https://kubernetes.io
[Service]
ExecStart=/usr/local/bin/kube-controller-manager \\
--address=0.0.0.0 \\
--allocate-node-cidrs=true \\
--cluster-cidr=10.2.0.0/16 \\
--cluster-name=kubernetes \\
--cluster-signing-cert-file=/var/lib/kubernetes/ca.pem \\
--cluster-signing-key-file=/var/lib/kubernetes/ca-key.pem \\
--leader-elect=true \\
--master=http://${INTERNAL_IP}:8080 \\
--root-ca-file=/var/lib/kubernetes/ca.pem \\
--service-account-private-key-file=/var/lib/kubernetes/ca-key.pem \\
--service-cluster-ip-range=10.3.0.0/24 \\
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
# SCHEDULER
INTERNAL_IP=$(ifconfig eth1 | grep 'inet addr' | cut -d ':' -f 2 | cut -d ' ' -f 1)
cat > kube-scheduler.service <<EOF
[Unit]
Description=scheduler
Documentation=https://kubernetes.io
[Service]
ExecStart=/usr/local/bin/kube-scheduler \\
--leader-elect=true \\
--master=http://${INTERNAL_IP}:8080 \\
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
## START CONTROL-PLAN
sudo mv kube-apiserver.service kube-scheduler.service kube-controller-manager.service /etc/systemd/system/
sudo systemctl daemon-reload
sudo systemctl enable kube-apiserver kube-controller-manager kube-scheduler
sudo systemctl start kube-apiserver kube-controller-manager kube-scheduler
fran@controller-a:~$ kubectl get componentstatuses
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
etcd-0 Unhealthy Get https://10.132.0.2:2379/health: remote error: tls: bad certificate
### FLANNEL
Crear desde el master:
cat <<EOF | kubectl create -f -
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"type": "flannel",
"delegate": {
"isDefaultGateway": true
}
}
net-conf.json: |
{
"Network": "10.2.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
template:
metadata:
labels:
tier: node
app: flannel
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: amd64
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.9.0-amd64
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conf
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.9.0-amd64
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
EOF
### WORKERS
## DOWNLOAD
sudo apt install docker.io socat
sudo mkdir -p \
/etc/cni/net.d \
/opt/cni/bin \
/var/lib/kubelet \
/var/lib/kube-proxy \
/var/lib/kubernetes \
/var/run/kubernetes
export CNI_VERSION=v0.6.0
export KUBE_VERSION=v1.6.2
wget https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-amd64-${CNI_VERSION}.tgz \
https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/bin/linux/amd64/kubectl \
https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/bin/linux/amd64/kube-proxy \
https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/bin/linux/amd64/kubelet
chmod +x kubectl kube-proxy kubelet
sudo tar -xvf cni-plugins-amd64-v0.6.0.tgz -C /opt/cni/bin/
mv kubectl kube-proxy kubelet /usr/local/bin/
## KUBELET
export NODE=$(hostname -s)
sudo cp ${NODE}.pem ${NODE}-key.pem /var/lib/kubelet/
sudo cp ca.pem /var/lib/kubernetes/
sudo cp ${NODE}.kubeconfig /var/lib/kubelet/kubeconfig
## UNIT
export NODE=$(hostname -s)
cat > kubelet.service <<EOF
[Unit]
Description=kubelet
Documentation=https://kubernetes.io
[Service]
ExecStart=/usr/local/bin/kubelet \\
--allow-privileged=true \\
--cluster-dns=10.3.0.10 \\
--cluster-domain=cluster.local \\
--image-pull-progress-deadline=2m \\
--kubeconfig=/var/lib/kubelet/kubeconfig \\
--network-plugin=cni \\
--pod-cidr=10.2.0.0/16 \\
--register-node=true \\
--require-kubeconfig \\
--runtime-request-timeout=10m \\
--tls-cert-file=/var/lib/kubelet/${NODE}.pem \\
--tls-private-key-file=/var/lib/kubelet/${NODE}-key.pem \\
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
sudo mv kubelet.service /etc/systemd/system/
sudo systemctl daemon-reload
sudo systemctl enable kubelet
sudo systemctl restart kubelet
## PROXY
mv kube-proxy.kubeconfig /var/lib/kube-proxy/kubeconfig
cat > proxy.service <<EOF
[Unit]
Description=proxy
Documentation=https://kubernetes.io
[Service]
ExecStart=/usr/local/bin/kube-proxy \\
--cluster-cidr=10.2.0.0/16 \\
--kubeconfig=/var/lib/kube-proxy/kubeconfig \\
--proxy-mode=iptables \\
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
sudo mv proxy.service /etc/systemd/system/
sudo systemctl daemon-reload
sudo systemctl enable proxy
sudo systemctl start proxy
###REMOTE KUBECONFIG
KUBERNETES_PUBLIC_ADDRESS=
kubectl config set-cluster kubernetes-the-hard-way \
--certificate-authority=ca.pem \
--embed-certs=true \
--server=https://${KUBERNETES_PUBLIC_ADDRESS}:6443
kubectl config set-credentials admin \
--client-certificate=admin.pem \
--client-key=admin-key.pem
kubectl config set-context kubernetes-the-hard-way \
--cluster=kubernetes-the-hard-way \
--user=admin
kubectl config use-context kubernetes-the-hard-way
cat ~/.kube/config
### DNS
cat <<EOF | kubectl create -f -
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-dns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-dns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeDNS"
spec:
clusterIP: 10.3.0.10
ports:
- name: dns
port: 53
protocol: UDP
targetPort: 53
- name: dns-tcp
port: 53
protocol: TCP
targetPort: 53
selector:
k8s-app: kube-dns
sessionAffinity: None
type: ClusterIP
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
name: kube-dns
namespace: kube-system
spec:
replicas: 2
selector:
matchLabels:
k8s-app: kube-dns
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
type: RollingUpdate
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
creationTimestamp: null
labels:
k8s-app: kube-dns
spec:
containers:
- name: kubedns
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.4
env:
- name: PROMETHEUS_PORT
value: "10055"
args:
- --domain=cluster.local.
- --dns-port=10053
- --config-dir=/kube-dns-config
- --v=2
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthcheck/kubedns
port: 10054
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
ports:
- name: dns-local
containerPort: 10053
protocol: UDP
- name: dns-tcp-local
containerPort: 10053
protocol: TCP
- name: metrics
containerPort: 10055
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readiness
port: 8081
scheme: HTTP
initialDelaySeconds: 3
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
volumeMounts:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.4
args:
- -v=2
- -logtostderr
- -configDir=/etc/k8s/dns/dnsmasq-nanny
- -restartDnsmasq=true
- --
- -k
- --cache-size=1000
- --log-facility=-
- --server=/cluster.local/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/ip6.arpa/127.0.0.1#10053
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
ports:
- name: dns
containerPort: 53
protocol: UDP
- name: dns-tcp
containerPort: 53
protocol: TCP
resources:
requests:
cpu: 150m
memory: 20Mi
volumeMounts:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.4
args:
- --v=2
- --logtostderr
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A
livenessProbe:
failureThreshold: 5
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
ports:
- name: metrics
containerPort: 10054
protocol: TCP
resources:
requests:
cpu: 10m
memory: 20Mi
dnsPolicy: Default
restartPolicy: Always
serviceAccount: kube-dns
serviceAccountName: kube-dns
terminationGracePeriodSeconds: 30
tolerations:
- key: CriticalAddonsOnly
operator: Exists
volumes:
- name: kube-dns-config
configMap:
defaultMode: 420
name: kube-dns
optional: true
EOF
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment