moby-025000000007:/# kubectl get nodes
NAME STATUS AGE VERSION
moby-025000000007 NotReady 50s v1.6.2
moby-025000000008 NotReady 13s v1.6.2
moby-025000000007:/# helm init --net-host
Creating /root/.helm
Creating /root/.helm/repository
Creating /root/.helm/repository/cache
Creating /root/.helm/repository/local
Creating /root/.helm/plugins
Creating /root/.helm/starters
Creating /root/.helm/repository/repositories.yaml
$HELM_HOME has been configured at /root/.helm.
Tiller (the helm server side component) has been installed into your Kubernetes Cluster.
Happy Helming!
moby-025000000007:/# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
etcd-moby-025000000007 1/1 Running 0 1m
kube-apiserver-moby-025000000007 1/1 Running 0 1m
kube-controller-manager-moby-025000000007 1/1 Running 0 1m
kube-dns-3913472980-tmlg9 0/3 Pending 0 1m
kube-proxy-9v5gp 1/1 Running 0 30s
kube-proxy-r3ch5 1/1 Running 0 1m
kube-scheduler-moby-025000000007 1/1 Running 0 1m
tiller-deploy-632052818-kg1st 0/1 Pending 0 9s
moby-025000000007:/# kubectl describe svc tiller-deploy -n kube-system
Name: tiller-deploy
Namespace: kube-system
Labels: app=helm
name=tiller
Annotations: <none>
Selector: app=helm,name=tiller
Type: ClusterIP
IP: 10.109.174.42
Port: tiller 44134/TCP
Endpoints: <none>
Session Affinity: None
Events: <none>
moby-025000000007:/# kubectl describe po/tiller-deploy-632052818-kg1st -n kube-system
Name: tiller-deploy-632052818-kg1st
Namespace: kube-system
Node: /
Labels: app=helm
name=tiller
pod-template-hash=632052818
Annotations: kubernetes.io/created-by={"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"ReplicaSet","namespace":"kube-system","name":"tiller-deploy-632052818","uid":"e4a32e1d-35a9-11e7-8f93-56e5...
Status: Pending
IP:
Controllers: ReplicaSet/tiller-deploy-632052818
Containers:
tiller:
Image: gcr.io/kubernetes-helm/tiller:v2.4.1
Port: 44134/TCP
Liveness: http-get http://:44135/liveness delay=1s timeout=1s period=10s #success=1 #failure=3
Readiness: http-get http://:44135/readiness delay=1s timeout=1s period=10s #success=1 #failure=3
Environment:
TILLER_NAMESPACE: kube-system
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from default-token-d9165 (ro)
Conditions:
Type Status
PodScheduled False
Volumes:
default-token-d9165:
Type: Secret (a volume populated by a Secret)
SecretName: default-token-d9165
Optional: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.alpha.kubernetes.io/notReady=:Exists:NoExecute for 300s
node.alpha.kubernetes.io/unreachable=:Exists:NoExecute for 300s
Events:
FirstSeen LastSeen Count From SubObjectPath Type Reason Message
--------- -------- ----- ---- ------------- -------- ------ -------
1m 23s 8 default-scheduler Warning FailedScheduling no nodes available to schedule pods
kubectl get po/tiller-deploy-632052818-kg1st -n kube-system -o yaml
apiVersion: v1
kind: Pod
metadata:
annotations:
kubernetes.io/created-by: |
{"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"ReplicaSet","namespace":"kube-system","name":"tiller-deploy-632052818","uid":"e4a32e1d-35a9-11e7-8f93-56e59be3ff0f","apiVersion":"extensions","resourceVersion":"426"}}
creationTimestamp: 2017-05-10T17:55:45Z
generateName: tiller-deploy-632052818-
labels:
app: helm
name: tiller
pod-template-hash: "632052818"
name: tiller-deploy-632052818-kg1st
namespace: kube-system
ownerReferences:
- apiVersion: extensions/v1beta1
blockOwnerDeletion: true
controller: true
kind: ReplicaSet
name: tiller-deploy-632052818
uid: e4a32e1d-35a9-11e7-8f93-56e59be3ff0f
resourceVersion: "433"
selfLink: /api/v1/namespaces/kube-system/pods/tiller-deploy-632052818-kg1st
uid: e4a42275-35a9-11e7-8f93-56e59be3ff0f
spec:
containers:
- env:
- name: TILLER_NAMESPACE
value: kube-system
image: gcr.io/kubernetes-helm/tiller:v2.4.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /liveness
port: 44135
scheme: HTTP
initialDelaySeconds: 1
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: tiller
ports:
- containerPort: 44134
hostPort: 44134
name: tiller
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readiness
port: 44135
scheme: HTTP
initialDelaySeconds: 1
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: default-token-d9165
readOnly: true
dnsPolicy: ClusterFirst
hostNetwork: true
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: default
serviceAccountName: default
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoExecute
key: node.alpha.kubernetes.io/notReady
operator: Exists
tolerationSeconds: 300
- effect: NoExecute
key: node.alpha.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
volumes:
- name: default-token-d9165
secret:
defaultMode: 420
secretName: default-token-d9165
status:
conditions:
- lastProbeTime: null
lastTransitionTime: 2017-05-10T17:55:45Z
message: no nodes available to schedule pods
reason: Unschedulable
status: "False"
type: PodScheduled
phase: Pending
qosClass: BestEffort
[root@arjunshanka1 user]# kubectl get po/tiller-deploy-597c48f967-2v7fw -n kube-system -o yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: 2018-07-10T20:16:20Z
generateName: tiller-deploy-597c48f967-
labels:
app: helm
name: tiller
pod-template-hash: "1537049523"
name: tiller-deploy-597c48f967-2v7fw
namespace: kube-system
ownerReferences:
blockOwnerDeletion: true
controller: true
kind: ReplicaSet
name: tiller-deploy-597c48f967
uid: 1c2c5d61-847e-11e8-8795-0e5090de90b6
resourceVersion: "19098"
selfLink: /api/v1/namespaces/kube-system/pods/tiller-deploy-597c48f967-2v7fw
uid: 1c34fed4-847e-11e8-8795-0e5090de90b6
spec:
containers:
value: kube-system
value: "0"
image: gcr.io/kubernetes-helm/tiller:v2.9.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /liveness
port: 44135
scheme: HTTP
initialDelaySeconds: 1
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: tiller
ports:
name: tiller
protocol: TCP
name: http
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readiness
port: 44135
scheme: HTTP
initialDelaySeconds: 1
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
name: default-token-46n8t
readOnly: true
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: default
serviceAccountName: default
terminationGracePeriodSeconds: 30
tolerations:
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
volumes:
secret:
defaultMode: 420
secretName: default-token-46n8t
status:
conditions:
lastTransitionTime: 2018-07-10T20:16:20Z
message: '0/1 nodes are available: 1 node(s) were not ready.'
reason: Unschedulable
status: "False"
type: PodScheduled
phase: Pending
qosClass: BestEffort