Last active
October 31, 2017 05:06
-
-
Save tamalsaha/944624d58b4cff3d79609b56a450cfb9 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/usr/bin/kubelet | |
--allow-privileged=true | |
--authorization-mode=Webhook | |
--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf | |
--cadvisor-port=0 | |
--cert-dir=/var/lib/kubelet/pki | |
--client-ca-file=/etc/kubernetes/pki/ca.crt | |
--cloud-provider=external | |
--cluster-dns=10.96.0.10 | |
--cluster-domain=cluster.local | |
--cni-bin-dir=/opt/cni/bin | |
--cni-conf-dir=/etc/cni/net.d | |
--kubeconfig=/etc/kubernetes/kubelet.conf | |
--network-plugin=cni | |
--node-labels=cloud.appscode.com/pool=master | |
--pod-manifest-path=/etc/kubernetes/manifests | |
--rotate-certificates=true | |
kube-controller-manager | |
--address=127.0.0.1 | |
--allocate-node-cidrs=true | |
--cluster-cidr=192.168.0.0/16 | |
--cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt | |
--cluster-signing-key-file=/etc/kubernetes/pki/ca.key | |
--controllers=*,bootstrapsigner,tokencleaner | |
--kubeconfig=/etc/kubernetes/controller-manager.conf | |
--leader-elect=true | |
--node-cidr-mask-size=24 | |
--root-ca-file=/etc/kubernetes/pki/ca.crt | |
--service-account-private-key-file=/etc/kubernetes/pki/sa.key | |
--use-service-account-credentials=true | |
kube-apiserver | |
--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota | |
--advertise-address=138.197.19.244 | |
--allow-privileged=true | |
--authorization-mode=Node,RBAC | |
--client-ca-file=/etc/kubernetes/pki/ca.crt | |
--enable-bootstrap-token-auth=true | |
--etcd-servers=http://127.0.0.1:2379 | |
--insecure-port=0 | |
--kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt | |
--kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key | |
--kubelet-preferred-address-types=InternalIP,ExternalIP | |
--proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt | |
--proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key | |
--requestheader-allowed-names=front-proxy-client | |
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt | |
--requestheader-extra-headers-prefix=X-Remote-Extra- | |
--requestheader-group-headers=X-Remote-Group | |
--requestheader-username-headers=X-Remote-User | |
--secure-port=6443 | |
--service-account-key-file=/etc/kubernetes/pki/sa.pub | |
--service-cluster-ip-range=10.96.0.0/12 | |
--tls-cert-file=/etc/kubernetes/pki/apiserver.crt | |
--tls-private-key-file=/etc/kubernetes/pki/apiserver.key | |
kube-scheduler | |
--address=127.0.0.1 | |
--kubeconfig=/etc/kubernetes/scheduler.conf | |
--leader-elect=true | |
/usr/local/bin/kube-proxy | |
--cluster-cidr=192.168.0.0/16 | |
--kubeconfig=/var/lib/kube-proxy/kubeconfig.conf |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
root@c1-master:~# docker logs 12dc0a50ff77 | |
I1031 02:53:43.698887 1 controllermanager.go:109] Version: v1.8.0 | |
I1031 02:53:43.707984 1 leaderelection.go:174] attempting to acquire leader lease... | |
E1031 02:53:43.708659 1 leaderelection.go:224] error retrieving resource lock kube-system/kube-controller-manager: Get https://159.203.139.40:6443/api/v1/namespaces/kube-system/endpoints/kube-controller-manager: dial tcp 159.203.139.40:6443: getsockopt: connection refused | |
E1031 02:53:47.163276 1 leaderelection.go:224] error retrieving resource lock kube-system/kube-controller-manager: Get https://159.203.139.40:6443/api/v1/namespaces/kube-system/endpoints/kube-controller-manager: dial tcp 159.203.139.40:6443: getsockopt: connection refused | |
E1031 02:53:51.424193 1 leaderelection.go:224] error retrieving resource lock kube-system/kube-controller-manager: Get https://159.203.139.40:6443/api/v1/namespaces/kube-system/endpoints/kube-controller-manager: dial tcp 159.203.139.40:6443: getsockopt: connection refused | |
E1031 02:53:55.030984 1 leaderelection.go:224] error retrieving resource lock kube-system/kube-controller-manager: Get https://159.203.139.40:6443/api/v1/namespaces/kube-system/endpoints/kube-controller-manager: dial tcp 159.203.139.40:6443: getsockopt: connection refused | |
E1031 02:53:58.099955 1 leaderelection.go:224] error retrieving resource lock kube-system/kube-controller-manager: endpoints "kube-controller-manager" is forbidden: User "system:kube-controller-manager" cannot get endpoints in the namespace "kube-system" | |
E1031 02:54:01.121253 1 leaderelection.go:224] error retrieving resource lock kube-system/kube-controller-manager: endpoints "kube-controller-manager" is forbidden: User "system:kube-controller-manager" cannot get endpoints in the namespace "kube-system" | |
E1031 02:54:04.771283 1 leaderelection.go:224] error retrieving resource lock kube-system/kube-controller-manager: endpoints "kube-controller-manager" is forbidden: User "system:kube-controller-manager" cannot get endpoints in the namespace "kube-system" | |
I1031 02:54:06.947383 1 leaderelection.go:184] successfully acquired lease kube-system/kube-controller-manager | |
W1031 02:54:06.948331 1 controllermanager.go:138] --use-service-account-credentials was specified without providing a --service-account-private-key-file | |
I1031 02:54:06.948072 1 event.go:218] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"kube-controller-manager", UID:"c2fa7933-bde6-11e7-9f28-7a8f0192438d", APIVersion:"v1", ResourceVersion:"115", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' c1-master became leader | |
E1031 02:54:06.955539 1 controllermanager.go:384] Server isn't healthy yet. Waiting a little while. | |
I1031 02:54:08.015059 1 plugins.go:101] No cloud provider specified. | |
I1031 02:54:08.021776 1 controller_utils.go:1041] Waiting for caches to sync for tokens controller | |
I1031 02:54:08.122761 1 controller_utils.go:1048] Caches are synced for tokens controller | |
I1031 02:54:08.152146 1 controllermanager.go:487] Started "disruption" | |
I1031 02:54:08.152400 1 disruption.go:288] Starting disruption controller | |
I1031 02:54:08.152417 1 controller_utils.go:1041] Waiting for caches to sync for disruption controller | |
I1031 02:54:08.194047 1 controllermanager.go:487] Started "cronjob" | |
I1031 02:54:08.194827 1 cronjob_controller.go:98] Starting CronJob Manager | |
I1031 02:54:08.250521 1 controllermanager.go:487] Started "bootstrapsigner" | |
W1031 02:54:08.250560 1 core.go:135] configure-cloud-routes is set, but no cloud provider specified. Will not configure cloud provider routes. | |
W1031 02:54:08.250568 1 controllermanager.go:484] Skipping "route" | |
I1031 02:54:08.329569 1 controllermanager.go:487] Started "resourcequota" | |
I1031 02:54:08.329756 1 resource_quota_controller.go:238] Starting resource quota controller | |
I1031 02:54:08.329785 1 controller_utils.go:1041] Waiting for caches to sync for resource quota controller | |
I1031 02:54:08.364395 1 controllermanager.go:487] Started "podgc" | |
I1031 02:54:08.364726 1 gc_controller.go:76] Starting GC controller | |
I1031 02:54:08.364783 1 controller_utils.go:1041] Waiting for caches to sync for GC controller | |
I1031 02:54:08.445920 1 controllermanager.go:487] Started "horizontalpodautoscaling" | |
I1031 02:54:08.446232 1 horizontal.go:145] Starting HPA controller | |
I1031 02:54:08.446321 1 controller_utils.go:1041] Waiting for caches to sync for HPA controller | |
I1031 02:54:08.561494 1 controllermanager.go:487] Started "csrsigning" | |
I1031 02:54:08.561699 1 certificate_controller.go:109] Starting certificate controller | |
I1031 02:54:08.561818 1 controller_utils.go:1041] Waiting for caches to sync for certificate controller | |
I1031 02:54:08.707976 1 controllermanager.go:487] Started "csrapproving" | |
W1031 02:54:08.708063 1 controllermanager.go:484] Skipping "persistentvolume-expander" | |
I1031 02:54:08.708139 1 certificate_controller.go:109] Starting certificate controller | |
I1031 02:54:08.708195 1 controller_utils.go:1041] Waiting for caches to sync for certificate controller | |
I1031 02:54:08.971285 1 controllermanager.go:487] Started "endpoint" | |
I1031 02:54:08.971432 1 endpoints_controller.go:153] Starting endpoint controller | |
I1031 02:54:08.971466 1 controller_utils.go:1041] Waiting for caches to sync for endpoint controller | |
I1031 02:54:09.248578 1 controllermanager.go:487] Started "namespace" | |
I1031 02:54:09.248943 1 namespace_controller.go:186] Starting namespace controller | |
I1031 02:54:09.248971 1 controller_utils.go:1041] Waiting for caches to sync for namespace controller | |
I1031 02:54:09.470627 1 controllermanager.go:487] Started "serviceaccount" | |
I1031 02:54:09.470904 1 serviceaccounts_controller.go:113] Starting service account controller | |
I1031 02:54:09.471139 1 controller_utils.go:1041] Waiting for caches to sync for service account controller | |
I1031 02:54:10.530552 1 controllermanager.go:487] Started "garbagecollector" | |
I1031 02:54:10.531195 1 garbagecollector.go:136] Starting garbage collector controller | |
I1031 02:54:10.531227 1 controller_utils.go:1041] Waiting for caches to sync for garbage collector controller | |
I1031 02:54:10.531285 1 graph_builder.go:321] GraphBuilder running | |
I1031 02:54:10.585806 1 controllermanager.go:487] Started "job" | |
I1031 02:54:10.586731 1 job_controller.go:138] Starting job controller | |
I1031 02:54:10.586871 1 controller_utils.go:1041] Waiting for caches to sync for job controller | |
I1031 02:54:10.651981 1 controllermanager.go:487] Started "replicaset" | |
I1031 02:54:10.652228 1 replica_set.go:156] Starting replica set controller | |
I1031 02:54:10.652762 1 controller_utils.go:1041] Waiting for caches to sync for replica set controller | |
I1031 02:54:10.747493 1 controllermanager.go:487] Started "statefulset" | |
I1031 02:54:10.747642 1 stateful_set.go:146] Starting stateful set controller | |
I1031 02:54:10.747714 1 controller_utils.go:1041] Waiting for caches to sync for stateful set controller | |
I1031 02:54:11.005697 1 controllermanager.go:487] Started "ttl" | |
I1031 02:54:11.006491 1 ttl_controller.go:116] Starting TTL controller | |
I1031 02:54:11.006637 1 controller_utils.go:1041] Waiting for caches to sync for TTL controller | |
I1031 02:54:11.109686 1 controllermanager.go:487] Started "replicationcontroller" | |
I1031 02:54:11.109899 1 replication_controller.go:151] Starting RC controller | |
I1031 02:54:11.110228 1 controller_utils.go:1041] Waiting for caches to sync for RC controller | |
I1031 02:54:11.379546 1 controllermanager.go:487] Started "attachdetach" | |
I1031 02:54:11.379709 1 attach_detach_controller.go:255] Starting attach detach controller | |
I1031 02:54:11.379730 1 controller_utils.go:1041] Waiting for caches to sync for attach detach controller | |
I1031 02:54:11.608781 1 node_controller.go:249] Sending events to api server. | |
I1031 02:54:21.618371 1 range_allocator.go:70] Sending events to api server. | |
I1031 02:54:21.618618 1 range_allocator.go:85] No Service CIDR provided. Skipping filtering out service addresses. | |
I1031 02:54:21.618677 1 range_allocator.go:91] Node c1-master has no CIDR, ignoring | |
I1031 02:54:21.618835 1 taint_controller.go:158] Sending events to api server. | |
I1031 02:54:21.619236 1 controllermanager.go:487] Started "node" | |
I1031 02:54:21.619362 1 node_controller.go:516] Starting node controller | |
I1031 02:54:21.619411 1 controller_utils.go:1041] Waiting for caches to sync for node controller | |
I1031 02:54:21.641734 1 controllermanager.go:487] Started "deployment" | |
I1031 02:54:21.641919 1 deployment_controller.go:151] Starting deployment controller | |
I1031 02:54:21.642383 1 controller_utils.go:1041] Waiting for caches to sync for deployment controller | |
I1031 02:54:21.673142 1 controllermanager.go:487] Started "tokencleaner" | |
E1031 02:54:21.707384 1 core.go:70] Failed to start service controller: WARNING: no cloud provider provided, services of type LoadBalancer will fail. | |
W1031 02:54:21.707429 1 controllermanager.go:484] Skipping "service" | |
I1031 02:54:21.757992 1 controllermanager.go:487] Started "persistentvolume-binder" | |
I1031 02:54:21.758175 1 pv_controller_base.go:259] Starting persistent volume controller | |
I1031 02:54:21.759188 1 controller_utils.go:1041] Waiting for caches to sync for persistent volume controller | |
I1031 02:54:21.828878 1 controllermanager.go:487] Started "daemonset" | |
I1031 02:54:21.831965 1 daemon_controller.go:230] Starting daemon sets controller | |
I1031 02:54:21.831996 1 controller_utils.go:1041] Waiting for caches to sync for daemon sets controller | |
E1031 02:54:21.882840 1 actual_state_of_world.go:483] Failed to set statusUpdateNeeded to needed true because nodeName="c1-master" does not exist | |
E1031 02:54:21.883026 1 actual_state_of_world.go:497] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true because nodeName="c1-master" does not exist | |
I1031 02:54:21.912608 1 range_allocator.go:249] Set node c1-master PodCIDR to 192.168.0.0/24 | |
I1031 02:54:21.913847 1 controller_utils.go:1048] Caches are synced for TTL controller | |
I1031 02:54:21.913921 1 controller_utils.go:1048] Caches are synced for RC controller | |
I1031 02:54:21.914020 1 controller_utils.go:1048] Caches are synced for certificate controller | |
I1031 02:54:21.919584 1 controller_utils.go:1048] Caches are synced for node controller | |
I1031 02:54:21.919750 1 taint_controller.go:181] Starting NoExecuteTaintManager | |
I1031 02:54:21.919794 1 node_controller.go:563] Initializing eviction metric for zone: | |
W1031 02:54:21.919895 1 node_controller.go:916] Missing timestamp for Node c1-master. Assuming now as a timestamp. | |
I1031 02:54:21.920327 1 event.go:218] Event(v1.ObjectReference{Kind:"Node", Namespace:"", Name:"c1-master", UID:"bfffffa1-bde6-11e7-9f28-7a8f0192438d", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node c1-master event: Registered Node c1-master in Controller | |
I1031 02:54:21.932760 1 controller_utils.go:1048] Caches are synced for daemon sets controller | |
I1031 02:54:21.943068 1 controller_utils.go:1048] Caches are synced for deployment controller | |
I1031 02:54:21.947863 1 controller_utils.go:1048] Caches are synced for HPA controller | |
I1031 02:54:21.955336 1 controller_utils.go:1048] Caches are synced for namespace controller | |
I1031 02:54:21.955934 1 controller_utils.go:1048] Caches are synced for disruption controller | |
I1031 02:54:21.955982 1 disruption.go:296] Sending events to api server. | |
I1031 02:54:21.956567 1 controller_utils.go:1048] Caches are synced for replica set controller | |
I1031 02:54:21.962010 1 controller_utils.go:1048] Caches are synced for certificate controller | |
I1031 02:54:21.962322 1 controller_utils.go:1048] Caches are synced for persistent volume controller | |
I1031 02:54:21.966983 1 controller_utils.go:1048] Caches are synced for GC controller | |
I1031 02:54:21.973233 1 controller_utils.go:1048] Caches are synced for service account controller | |
I1031 02:54:21.973907 1 controller_utils.go:1048] Caches are synced for endpoint controller | |
I1031 02:54:21.980286 1 controller_utils.go:1048] Caches are synced for attach detach controller | |
I1031 02:54:21.988224 1 event.go:218] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kube-system", Name:"kube-dns", UID:"c3f24119-bde6-11e7-9f28-7a8f0192438d", APIVersion:"extensions", ResourceVersion:"168", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set kube-dns-545bc4bfd4 to 1 | |
I1031 02:54:21.988708 1 controller_utils.go:1048] Caches are synced for job controller | |
I1031 02:54:21.990258 1 event.go:218] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kube-system", Name:"calico-kube-controllers", UID:"c48b41f4-bde6-11e7-9f28-7a8f0192438d", APIVersion:"extensions", ResourceVersion:"197", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set calico-kube-controllers-6ff88bf6d4 to 1 | |
I1031 02:54:22.024282 1 event.go:218] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"kube-proxy", UID:"c3fd3f93-bde6-11e7-9f28-7a8f0192438d", APIVersion:"extensions", ResourceVersion:"175", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kube-proxy-pdkzd | |
I1031 02:54:22.037599 1 controller_utils.go:1048] Caches are synced for resource quota controller | |
I1031 02:54:22.038471 1 controller_utils.go:1048] Caches are synced for garbage collector controller | |
I1031 02:54:22.038554 1 garbagecollector.go:145] Garbage collector: all resource monitors have synced. Proceeding to collect garbage | |
I1031 02:54:22.050146 1 controller_utils.go:1048] Caches are synced for stateful set controller | |
I1031 02:54:22.073173 1 event.go:218] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kube-system", Name:"calico-kube-controllers-6ff88bf6d4", UID:"cbecb309-bde6-11e7-9f28-7a8f0192438d", APIVersion:"extensions", ResourceVersion:"302", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: calico-kube-controllers-6ff88bf6d4-zxdpw | |
I1031 02:54:22.079643 1 event.go:218] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kube-system", Name:"kube-dns-545bc4bfd4", UID:"cbec6b8e-bde6-11e7-9f28-7a8f0192438d", APIVersion:"extensions", ResourceVersion:"301", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kube-dns-545bc4bfd4-fj4q9 | |
I1031 02:54:22.325496 1 event.go:218] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"cloud-controller-manager", UID:"c537c7e5-bde6-11e7-9f28-7a8f0192438d", APIVersion:"extensions", ResourceVersion:"237", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: cloud-controller-manager-m2v46 | |
E1031 02:54:22.374907 1 daemon_controller.go:263] kube-system/cloud-controller-manager failed with : error storing status for daemon set &v1beta1.DaemonSet{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"cloud-controller-manager", GenerateName:"", Namespace:"kube-system", SelfLink:"/apis/extensions/v1beta1/namespaces/kube-system/daemonsets/cloud-controller-manager", UID:"c537c7e5-bde6-11e7-9f28-7a8f0192438d", ResourceVersion:"237", Generation:1, CreationTimestamp:v1.Time{Time:time.Time{sec:63645015250, nsec:0, loc:(*time.Location)(0x4bb7dc0)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"app":"cloud-controller-manager"}, Annotations:map[string]string{"kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"extensions/v1beta1\",\"kind\":\"DaemonSet\",\"metadata\":{\"annotations\":{},\"name\":\"cloud-controller-manager\",\"namespace\":\"kube-system\"},\"spec\":{\"template\":{\"metadata\":{\"annotations\":{\"scheduler.alpha.kubernetes.io/critical-pod\":\"\"},\"labels\":{\"app\":\"cloud-controller-manager\"}},\"spec\":{\"containers\":[{\"command\":[\"/bin/digitalocean-cloud-controller-manager\",\"--cloud-provider=digitalocean\",\"--leader-elect=false\"],\"env\":[{\"name\":\"DO_ACCESS_TOKEN\",\"valueFrom\":{\"secretKeyRef\":{\"key\":\"token\",\"name\":\"digitalocean\"}}}],\"image\":\"digitalocean/digitalocean-cloud-controller-manager:v0.1.2\",\"name\":\"ccm\",\"resources\":{\"requests\":{\"cpu\":\"100m\",\"memory\":\"50Mi\"}}}],\"dnsPolicy\":\"Default\",\"hostNetwork\":true,\"nodeSelector\":{\"node-role.kubernetes.io/master\":\"\"},\"serviceAccountName\":\"cloud-controller-manager\",\"tolerations\":[{\"effect\":\"NoSchedule\",\"key\":\"node.cloudprovider.kubernetes.io/uninitialized\",\"value\":\"true\"},{\"key\":\"CriticalAddonsOnly\",\"operator\":\"Exists\"},{\"effect\":\"NoSchedule\",\"key\":\"node-role.kubernetes.io/master\"}]}}}}\n"}, OwnerReferences:[]v1.OwnerReference(nil), Initializers:(*v1.Initializers)(nil), Finalizers:[]string(nil), ClusterName:""}, Spec:v1beta1.DaemonSetSpec{Selector:(*v1.LabelSelector)(0xc42109a5c0), Template:v1.PodTemplateSpec{ObjectMeta:v1.ObjectMeta{Name:"", GenerateName:"", Namespace:"", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{sec:0, nsec:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"app":"cloud-controller-manager"}, Annotations:map[string]string{"scheduler.alpha.kubernetes.io/critical-pod":""}, OwnerReferences:[]v1.OwnerReference(nil), Initializers:(*v1.Initializers)(nil), Finalizers:[]string(nil), ClusterName:""}, Spec:v1.PodSpec{Volumes:[]v1.Volume(nil), InitContainers:[]v1.Container(nil), Containers:[]v1.Container{v1.Container{Name:"ccm", Image:"digitalocean/digitalocean-cloud-controller-manager:v0.1.2", Command:[]string{"/bin/digitalocean-cloud-controller-manager", "--cloud-provider=digitalocean", "--leader-elect=false"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar{v1.EnvVar{Name:"DO_ACCESS_TOKEN", Value:"", ValueFrom:(*v1.EnvVarSource)(0xc42109a720)}}, Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:100, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"100m", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:52428800, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"50Mi", Format:"BinarySI"}}}, VolumeMounts:[]v1.VolumeMount(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, RestartPolicy:"Always", TerminationGracePeriodSeconds:(*int64)(0xc4201beed8), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"Default", NodeSelector:map[string]string{"node-role.kubernetes.io/master":""}, ServiceAccountName:"cloud-controller-manager", DeprecatedServiceAccount:"cloud-controller-manager", AutomountServiceAccountToken:(*bool)(nil), NodeName:"", HostNetwork:true, HostPID:false, HostIPC:false, SecurityContext:(*v1.PodSecurityContext)(0xc4205fd500), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(nil), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration{v1.Toleration{Key:"node.cloudprovider.kubernetes.io/uninitialized", Operator:"", Value:"true", Effect:"NoSchedule", TolerationSeconds:(*int64)(nil)}, v1.Toleration{Key:"CriticalAddonsOnly", Operator:"Exists", Value:"", Effect:"", TolerationSeconds:(*int64)(nil)}, v1.Toleration{Key:"node-role.kubernetes.io/master", Operator:"", Value:"", Effect:"NoSchedule", TolerationSeconds:(*int64)(nil)}}, HostAliases:[]v1.HostAlias(nil), PriorityClassName:"", Priority:(*int32)(nil)}}, UpdateStrategy:v1beta1.DaemonSetUpdateStrategy{Type:"OnDelete", RollingUpdate:(*v1beta1.RollingUpdateDaemonSet)(nil)}, MinReadySeconds:0, TemplateGeneration:1, RevisionHistoryLimit:(*int32)(0xc4201bef68)}, Status:v1beta1.DaemonSetStatus{CurrentNumberScheduled:0, NumberMisscheduled:0, DesiredNumberScheduled:0, NumberReady:0, ObservedGeneration:0, UpdatedNumberScheduled:0, NumberAvailable:0, NumberUnavailable:0, CollisionCount:(*int32)(nil)}}: Operation cannot be fulfilled on daemonsets.extensions "cloud-controller-manager": the object has been modified; please apply your changes to the latest version and try again | |
I1031 02:54:26.920185 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 02:54:31.920601 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 02:54:36.920985 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 02:54:41.921548 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 02:54:43.749111 1 event.go:218] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"kubeadm-probe", UID:"c4e60e81-bde6-11e7-9f28-7a8f0192438d", APIVersion:"extensions", ResourceVersion:"332", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kubeadm-probe-w99lt | |
I1031 02:54:43.749780 1 event.go:218] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"calico-node", UID:"c4889e73-bde6-11e7-9f28-7a8f0192438d", APIVersion:"extensions", ResourceVersion:"317", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: calico-node-5qrsr | |
I1031 02:54:43.843068 1 event.go:218] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"calico-etcd", UID:"c482a459-bde6-11e7-9f28-7a8f0192438d", APIVersion:"extensions", ResourceVersion:"303", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: calico-etcd-z88bw | |
I1031 02:54:46.922588 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:54:51.923171 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:54:56.923540 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:55:01.924002 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:55:06.924541 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:55:11.926637 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:55:16.927090 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:55:21.927586 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:55:27.035589 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:55:32.036224 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:55:37.036761 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:55:42.037291 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:55:47.037612 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:55:52.038104 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:55:57.038468 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:56:02.039015 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:56:07.039958 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:56:12.040270 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:56:17.040906 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:56:22.041986 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:56:27.042541 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:56:32.043076 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:56:37.043421 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:56:42.044020 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:56:47.044858 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:56:52.045276 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:56:57.045790 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:57:02.046349 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:57:07.047345 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:57:12.047730 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:57:17.048120 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:57:22.048566 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:57:27.049012 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:57:32.049924 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:57:37.050411 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:57:42.050814 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:57:47.051376 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:57:52.052048 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:57:57.052481 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:58:02.053010 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:58:07.053684 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:58:12.054412 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:58:17.054955 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:58:22.055495 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:58:27.056323 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:58:32.056735 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:58:37.057210 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:58:42.057659 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:58:47.058121 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:58:52.058511 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:58:57.058928 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:59:02.059410 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:59:07.060019 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:59:12.060461 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:59:17.060940 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
I1031 02:59:22.061365 1 node_controller.go:563] Initializing eviction metric for zone: nyc3:: | |
root@c1-master:~# |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/usr/bin/kubelet | |
--allow-privileged=true | |
--authorization-mode=Webhook | |
--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf | |
--cadvisor-port=0 | |
--cert-dir=/var/lib/kubelet/pki | |
--client-ca-file=/etc/kubernetes/pki/ca.crt | |
--cloud-provider=external | |
--cluster-dns=10.96.0.10 | |
--cluster-domain=cluster.local | |
--cni-bin-dir=/opt/cni/bin | |
--cni-conf-dir=/etc/cni/net.d | |
--kubeconfig=/etc/kubernetes/kubelet.conf | |
--network-plugin=cni | |
--pod-manifest-path=/etc/kubernetes/manifests | |
--rotate-certificates=true | |
kube-controller-manager | |
--address=127.0.0.1 | |
--cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt | |
--cluster-signing-key-file=/etc/kubernetes/pki/ca.key | |
--controllers=*,bootstrapsigner,tokencleaner | |
--kubeconfig=/etc/kubernetes/controller-manager.conf | |
--leader-elect=true | |
--root-ca-file=/etc/kubernetes/pki/ca.crt | |
--service-account-private-key-file=/etc/kubernetes/pki/sa.key | |
--use-service-account-credentials=true | |
kube-apiserver | |
--admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota | |
--advertise-address=45.55.41.241 | |
--allow-privileged=true | |
--authorization-mode=Node,RBAC | |
--client-ca-file=/etc/kubernetes/pki/ca.crt | |
--enable-bootstrap-token-auth=true | |
--etcd-servers=http://127.0.0.1:2379 | |
--insecure-port=0 | |
--kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt | |
--kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key | |
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname | |
--proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt | |
--proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key | |
--requestheader-allowed-names=front-proxy-client | |
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt | |
--requestheader-extra-headers-prefix=X-Remote-Extra- | |
--requestheader-group-headers=X-Remote-Group | |
--requestheader-username-headers=X-Remote-User | |
--secure-port=6443 | |
--service-account-key-file=/etc/kubernetes/pki/sa.pub | |
--service-cluster-ip-range=10.96.0.0/12 | |
--tls-cert-file=/etc/kubernetes/pki/apiserver.crt | |
--tls-private-key-file=/etc/kubernetes/pki/apiserver.key | |
kube-scheduler | |
--address=127.0.0.1 | |
--kubeconfig=/etc/kubernetes/scheduler.conf | |
--leader-elect=true | |
/usr/local/bin/kube-proxy | |
--kubeconfig=/var/lib/kube-proxy/kubeconfig.conf |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
root@c1-master:~# kubectl logs -f -n kube-system kube-controller-manager-c1-master | |
I1031 04:57:06.821958 1 controllermanager.go:109] Version: v1.8.0 | |
I1031 04:57:06.826910 1 leaderelection.go:174] attempting to acquire leader lease... | |
I1031 04:57:06.849020 1 leaderelection.go:184] successfully acquired lease kube-system/kube-controller-manager | |
W1031 04:57:06.849170 1 controllermanager.go:138] --use-service-account-credentials was specified without providing a --service-account-private-key-file | |
I1031 04:57:06.849159 1 event.go:218] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"kube-controller-manager", UID:"f1beb9f7-bdf7-11e7-a7f4-16aac5b0ea95", APIVersion:"v1", ResourceVersion:"250", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' c1-master became leader | |
I1031 04:57:06.884989 1 plugins.go:101] No cloud provider specified. | |
I1031 04:57:06.888166 1 controller_utils.go:1041] Waiting for caches to sync for tokens controller | |
I1031 04:57:06.988406 1 controller_utils.go:1048] Caches are synced for tokens controller | |
I1031 04:57:07.047478 1 controllermanager.go:487] Started "deployment" | |
I1031 04:57:07.047820 1 deployment_controller.go:151] Starting deployment controller | |
I1031 04:57:07.047895 1 controller_utils.go:1041] Waiting for caches to sync for deployment controller | |
I1031 04:57:07.072700 1 controllermanager.go:487] Started "replicaset" | |
I1031 04:57:07.073605 1 replica_set.go:156] Starting replica set controller | |
I1031 04:57:07.073710 1 controller_utils.go:1041] Waiting for caches to sync for replica set controller | |
I1031 04:57:07.102274 1 controllermanager.go:487] Started "csrapproving" | |
I1031 04:57:07.102453 1 certificate_controller.go:109] Starting certificate controller | |
I1031 04:57:07.102495 1 controller_utils.go:1041] Waiting for caches to sync for certificate controller | |
I1031 04:57:07.128753 1 controllermanager.go:487] Started "replicationcontroller" | |
I1031 04:57:07.129089 1 replication_controller.go:151] Starting RC controller | |
I1031 04:57:07.129152 1 controller_utils.go:1041] Waiting for caches to sync for RC controller | |
W1031 04:57:07.160962 1 shared_informer.go:304] resyncPeriod 59644390577746 is smaller than resyncCheckPeriod 83829992603544 and the informer has already started. Changing it to 83829992603544 | |
I1031 04:57:07.161191 1 controllermanager.go:487] Started "resourcequota" | |
I1031 04:57:07.161742 1 resource_quota_controller.go:238] Starting resource quota controller | |
I1031 04:57:07.161884 1 controller_utils.go:1041] Waiting for caches to sync for resource quota controller | |
I1031 04:57:07.235332 1 controllermanager.go:487] Started "namespace" | |
I1031 04:57:07.235468 1 namespace_controller.go:186] Starting namespace controller | |
I1031 04:57:07.235576 1 controller_utils.go:1041] Waiting for caches to sync for namespace controller | |
I1031 04:57:07.273295 1 controllermanager.go:487] Started "serviceaccount" | |
I1031 04:57:07.274828 1 serviceaccounts_controller.go:113] Starting service account controller | |
I1031 04:57:07.274920 1 controller_utils.go:1041] Waiting for caches to sync for service account controller | |
I1031 04:57:07.379085 1 controllermanager.go:487] Started "daemonset" | |
I1031 04:57:07.379260 1 daemon_controller.go:230] Starting daemon sets controller | |
W1031 04:57:07.379346 1 core.go:135] configure-cloud-routes is set, but no cloud provider specified. Will not configure cloud provider routes. | |
W1031 04:57:07.379492 1 controllermanager.go:484] Skipping "route" | |
W1031 04:57:07.379515 1 controllermanager.go:484] Skipping "persistentvolume-expander" | |
I1031 04:57:07.379467 1 controller_utils.go:1041] Waiting for caches to sync for daemon sets controller | |
I1031 04:57:07.633226 1 controllermanager.go:487] Started "endpoint" | |
I1031 04:57:07.633799 1 endpoints_controller.go:153] Starting endpoint controller | |
I1031 04:57:07.633913 1 controller_utils.go:1041] Waiting for caches to sync for endpoint controller | |
I1031 04:57:08.686041 1 controllermanager.go:487] Started "garbagecollector" | |
I1031 04:57:08.686096 1 garbagecollector.go:136] Starting garbage collector controller | |
I1031 04:57:08.686123 1 controller_utils.go:1041] Waiting for caches to sync for garbage collector controller | |
I1031 04:57:08.686167 1 graph_builder.go:321] GraphBuilder running | |
I1031 04:57:08.745773 1 controllermanager.go:487] Started "horizontalpodautoscaling" | |
I1031 04:57:08.746063 1 horizontal.go:145] Starting HPA controller | |
I1031 04:57:08.746116 1 controller_utils.go:1041] Waiting for caches to sync for HPA controller | |
I1031 04:57:08.831614 1 controllermanager.go:487] Started "statefulset" | |
I1031 04:57:08.831670 1 stateful_set.go:146] Starting stateful set controller | |
I1031 04:57:08.831797 1 controller_utils.go:1041] Waiting for caches to sync for stateful set controller | |
I1031 04:57:09.084765 1 controllermanager.go:487] Started "persistentvolume-binder" | |
I1031 04:57:09.084954 1 pv_controller_base.go:259] Starting persistent volume controller | |
I1031 04:57:09.085016 1 controller_utils.go:1041] Waiting for caches to sync for persistent volume controller | |
I1031 04:57:09.329304 1 node_controller.go:249] Sending events to api server. | |
I1031 04:57:19.337469 1 range_allocator.go:70] Sending events to api server. | |
I1031 04:57:19.337828 1 range_allocator.go:85] No Service CIDR provided. Skipping filtering out service addresses. | |
I1031 04:57:19.337850 1 range_allocator.go:91] Node c1-master has no CIDR, ignoring | |
I1031 04:57:19.338280 1 taint_controller.go:158] Sending events to api server. | |
I1031 04:57:19.344868 1 controllermanager.go:487] Started "node" | |
I1031 04:57:19.345375 1 node_controller.go:516] Starting node controller | |
I1031 04:57:19.345453 1 controller_utils.go:1041] Waiting for caches to sync for node controller | |
I1031 04:57:19.388445 1 controllermanager.go:487] Started "podgc" | |
I1031 04:57:19.388901 1 gc_controller.go:76] Starting GC controller | |
I1031 04:57:19.389038 1 controller_utils.go:1041] Waiting for caches to sync for GC controller | |
I1031 04:57:19.415143 1 controllermanager.go:487] Started "disruption" | |
I1031 04:57:19.415492 1 disruption.go:288] Starting disruption controller | |
I1031 04:57:19.415507 1 controller_utils.go:1041] Waiting for caches to sync for disruption controller | |
I1031 04:57:19.430221 1 controllermanager.go:487] Started "csrsigning" | |
I1031 04:57:19.430552 1 certificate_controller.go:109] Starting certificate controller | |
I1031 04:57:19.430629 1 controller_utils.go:1041] Waiting for caches to sync for certificate controller | |
I1031 04:57:19.457265 1 controllermanager.go:487] Started "bootstrapsigner" | |
E1031 04:57:19.489562 1 core.go:70] Failed to start service controller: WARNING: no cloud provider provided, services of type LoadBalancer will fail. | |
W1031 04:57:19.489592 1 controllermanager.go:484] Skipping "service" | |
I1031 04:57:19.515144 1 controllermanager.go:487] Started "job" | |
I1031 04:57:19.515612 1 job_controller.go:138] Starting job controller | |
I1031 04:57:19.515640 1 controller_utils.go:1041] Waiting for caches to sync for job controller | |
I1031 04:57:19.552855 1 controllermanager.go:487] Started "cronjob" | |
I1031 04:57:19.553215 1 cronjob_controller.go:98] Starting CronJob Manager | |
I1031 04:57:19.731081 1 controllermanager.go:487] Started "ttl" | |
I1031 04:57:19.731612 1 ttl_controller.go:116] Starting TTL controller | |
I1031 04:57:19.731728 1 controller_utils.go:1041] Waiting for caches to sync for TTL controller | |
I1031 04:57:19.979381 1 controllermanager.go:487] Started "tokencleaner" | |
I1031 04:57:20.232070 1 controllermanager.go:487] Started "attachdetach" | |
I1031 04:57:20.238306 1 attach_detach_controller.go:255] Starting attach detach controller | |
I1031 04:57:20.238400 1 controller_utils.go:1041] Waiting for caches to sync for attach detach controller | |
I1031 04:57:20.312296 1 controller_utils.go:1048] Caches are synced for certificate controller | |
E1031 04:57:20.317106 1 actual_state_of_world.go:483] Failed to set statusUpdateNeeded to needed true because nodeName="c1-master" does not exist | |
E1031 04:57:20.317614 1 actual_state_of_world.go:497] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true because nodeName="c1-master" does not exist | |
I1031 04:57:20.330444 1 range_allocator.go:249] Set node c1-master PodCIDR to 192.168.0.0/24 | |
I1031 04:57:20.331744 1 controller_utils.go:1048] Caches are synced for certificate controller | |
I1031 04:57:20.332037 1 controller_utils.go:1048] Caches are synced for TTL controller | |
I1031 04:57:20.337032 1 controller_utils.go:1048] Caches are synced for namespace controller | |
I1031 04:57:20.346280 1 controller_utils.go:1048] Caches are synced for HPA controller | |
I1031 04:57:20.362067 1 controller_utils.go:1048] Caches are synced for resource quota controller | |
I1031 04:57:20.373952 1 controller_utils.go:1048] Caches are synced for replica set controller | |
I1031 04:57:20.375245 1 controller_utils.go:1048] Caches are synced for service account controller | |
I1031 04:57:20.379729 1 controller_utils.go:1048] Caches are synced for daemon sets controller | |
I1031 04:57:20.385317 1 controller_utils.go:1048] Caches are synced for persistent volume controller | |
I1031 04:57:20.386612 1 controller_utils.go:1048] Caches are synced for garbage collector controller | |
I1031 04:57:20.386626 1 garbagecollector.go:145] Garbage collector: all resource monitors have synced. Proceeding to collect garbage | |
I1031 04:57:20.389481 1 controller_utils.go:1048] Caches are synced for GC controller | |
I1031 04:57:20.416881 1 controller_utils.go:1048] Caches are synced for job controller | |
I1031 04:57:20.417040 1 controller_utils.go:1048] Caches are synced for disruption controller | |
I1031 04:57:20.417053 1 disruption.go:296] Sending events to api server. | |
I1031 04:57:20.431105 1 controller_utils.go:1048] Caches are synced for RC controller | |
I1031 04:57:20.432025 1 controller_utils.go:1048] Caches are synced for stateful set controller | |
I1031 04:57:20.435216 1 controller_utils.go:1048] Caches are synced for endpoint controller | |
I1031 04:57:20.439877 1 controller_utils.go:1048] Caches are synced for attach detach controller | |
I1031 04:57:20.448335 1 controller_utils.go:1048] Caches are synced for node controller | |
I1031 04:57:20.448513 1 controller_utils.go:1048] Caches are synced for deployment controller | |
I1031 04:57:20.448696 1 node_controller.go:563] Initializing eviction metric for zone: | |
W1031 04:57:20.448932 1 node_controller.go:916] Missing timestamp for Node c1-master. Assuming now as a timestamp. | |
I1031 04:57:20.449945 1 taint_controller.go:181] Starting NoExecuteTaintManager | |
I1031 04:57:20.450067 1 event.go:218] Event(v1.ObjectReference{Kind:"Node", Namespace:"", Name:"c1-master", UID:"cad68b48-bdf7-11e7-a7f4-16aac5b0ea95", APIVersion:"", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'RegisteredNode' Node c1-master event: Registered Node c1-master in Controller | |
I1031 04:57:20.513793 1 event.go:218] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"calico-node", UID:"cec07998-bdf7-11e7-a7f4-16aac5b0ea95", APIVersion:"extensions", ResourceVersion:"164", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: calico-node-k9jqm | |
I1031 04:57:20.513831 1 event.go:218] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"kubeadm-probe", UID:"cf10b2b7-bdf7-11e7-a7f4-16aac5b0ea95", APIVersion:"extensions", ResourceVersion:"179", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kubeadm-probe-pdw8c | |
I1031 04:57:20.609773 1 event.go:218] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kube-system", Name:"kube-dns", UID:"ce40df4a-bdf7-11e7-a7f4-16aac5b0ea95", APIVersion:"extensions", ResourceVersion:"146", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set kube-dns-545bc4bfd4 to 1 | |
I1031 04:57:20.610620 1 event.go:218] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"kube-system", Name:"calico-kube-controllers", UID:"cec37851-bdf7-11e7-a7f4-16aac5b0ea95", APIVersion:"extensions", ResourceVersion:"165", FieldPath:""}): type: 'Normal' reason: 'ScalingReplicaSet' Scaled up replica set calico-kube-controllers-6ff88bf6d4 to 1 | |
I1031 04:57:20.656222 1 event.go:218] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kube-system", Name:"kube-dns-545bc4bfd4", UID:"f9f08ca7-bdf7-11e7-a7f4-16aac5b0ea95", APIVersion:"extensions", ResourceVersion:"372", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kube-dns-545bc4bfd4-gvrlg | |
I1031 04:57:20.668020 1 event.go:218] Event(v1.ObjectReference{Kind:"ReplicaSet", Namespace:"kube-system", Name:"calico-kube-controllers-6ff88bf6d4", UID:"f9f10fae-bdf7-11e7-a7f4-16aac5b0ea95", APIVersion:"extensions", ResourceVersion:"371", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: calico-kube-controllers-6ff88bf6d4-dp8gf | |
I1031 04:57:20.691482 1 event.go:218] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"kube-proxy", UID:"ce469024-bdf7-11e7-a7f4-16aac5b0ea95", APIVersion:"extensions", ResourceVersion:"151", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: kube-proxy-l86qx | |
I1031 04:57:20.698435 1 event.go:218] Event(v1.ObjectReference{Kind:"DaemonSet", Namespace:"kube-system", Name:"calico-etcd", UID:"cebba9e8-bdf7-11e7-a7f4-16aac5b0ea95", APIVersion:"extensions", ResourceVersion:"161", FieldPath:""}): type: 'Normal' reason: 'SuccessfulCreate' Created pod: calico-etcd-nj86x | |
E1031 04:57:20.759519 1 daemon_controller.go:263] kube-system/kube-proxy failed with : error storing status for daemon set &v1beta1.DaemonSet{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-proxy", GenerateName:"", Namespace:"kube-system", SelfLink:"/apis/extensions/v1beta1/namespaces/kube-system/daemonsets/kube-proxy", UID:"ce469024-bdf7-11e7-a7f4-16aac5b0ea95", ResourceVersion:"151", Generation:1, CreationTimestamp:v1.Time{Time:time.Time{sec:63645022567, nsec:0, loc:(*time.Location)(0x4bb7dc0)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"k8s-app":"kube-proxy"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Initializers:(*v1.Initializers)(nil), Finalizers:[]string(nil), ClusterName:""}, Spec:v1beta1.DaemonSetSpec{Selector:(*v1.LabelSelector)(0xc4203fd2c0), Template:v1.PodTemplateSpec{ObjectMeta:v1.ObjectMeta{Name:"", GenerateName:"", Namespace:"", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{sec:0, nsec:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"k8s-app":"kube-proxy"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Initializers:(*v1.Initializers)(nil), Finalizers:[]string(nil), ClusterName:""}, Spec:v1.PodSpec{Volumes:[]v1.Volume{v1.Volume{Name:"kube-proxy", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(nil), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(nil), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(0xc4214953c0), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil)}}, v1.Volume{Name:"xtables-lock", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(0xc4203fd320), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(nil), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil)}}}, InitContainers:[]v1.Container(nil), Containers:[]v1.Container{v1.Container{Name:"kube-proxy", Image:"gcr.io/google_containers/kube-proxy-amd64:v1.8.0", Command:[]string{"/usr/local/bin/kube-proxy", "--kubeconfig=/var/lib/kube-proxy/kubeconfig.conf", "--cluster-cidr=192.168.0.0/16"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"kube-proxy", ReadOnly:false, MountPath:"/var/lib/kube-proxy", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil)}, v1.VolumeMount{Name:"xtables-lock", ReadOnly:false, MountPath:"/run/xtables.lock", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil)}}, LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(0xc421495480), Stdin:false, StdinOnce:false, TTY:false}}, RestartPolicy:"Always", TerminationGracePeriodSeconds:(*int64)(0xc4204025d8), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string(nil), ServiceAccountName:"kube-proxy", DeprecatedServiceAccount:"kube-proxy", AutomountServiceAccountToken:(*bool)(nil), NodeName:"", HostNetwork:true, HostPID:false, HostIPC:false, SecurityContext:(*v1.PodSecurityContext)(0xc4214954c0), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(nil), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration{v1.Toleration{Key:"node-role.kubernetes.io/master", Operator:"", Value:"", Effect:"NoSchedule", TolerationSeconds:(*int64)(nil)}, v1.Toleration{Key:"node.cloudprovider.kubernetes.io/uninitialized", Operator:"", Value:"true", Effect:"NoSchedule", TolerationSeconds:(*int64)(nil)}}, HostAliases:[]v1.HostAlias(nil), PriorityClassName:"", Priority:(*int32)(nil)}}, UpdateStrategy:v1beta1.DaemonSetUpdateStrategy{Type:"RollingUpdate", RollingUpdate:(*v1beta1.RollingUpdateDaemonSet)(0xc42000d3e8)}, MinReadySeconds:0, TemplateGeneration:1, RevisionHistoryLimit:(*int32)(0xc42040264c)}, Status:v1beta1.DaemonSetStatus{CurrentNumberScheduled:0, NumberMisscheduled:0, DesiredNumberScheduled:0, NumberReady:0, ObservedGeneration:0, UpdatedNumberScheduled:0, NumberAvailable:0, NumberUnavailable:0, CollisionCount:(*int32)(nil)}}: Operation cannot be fulfilled on daemonsets.extensions "kube-proxy": the object has been modified; please apply your changes to the latest version and try again | |
E1031 04:57:20.761610 1 daemon_controller.go:263] kube-system/calico-etcd failed with : error storing status for daemon set &v1beta1.DaemonSet{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"calico-etcd", GenerateName:"", Namespace:"kube-system", SelfLink:"/apis/extensions/v1beta1/namespaces/kube-system/daemonsets/calico-etcd", UID:"cebba9e8-bdf7-11e7-a7f4-16aac5b0ea95", ResourceVersion:"161", Generation:1, CreationTimestamp:v1.Time{Time:time.Time{sec:63645022568, nsec:0, loc:(*time.Location)(0x4bb7dc0)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"k8s-app":"calico-etcd"}, Annotations:map[string]string{"kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"extensions/v1beta1\",\"kind\":\"DaemonSet\",\"metadata\":{\"annotations\":{},\"labels\":{\"k8s-app\":\"calico-etcd\"},\"name\":\"calico-etcd\",\"namespace\":\"kube-system\"},\"spec\":{\"template\":{\"metadata\":{\"annotations\":{\"scheduler.alpha.kubernetes.io/critical-pod\":\"\"},\"labels\":{\"k8s-app\":\"calico-etcd\"}},\"spec\":{\"containers\":[{\"args\":[\"/usr/local/bin/etcd --name=calico --data-dir=/var/etcd/calico-data --advertise-client-urls=http://$CALICO_ETCD_IP:6666 --listen-client-urls=http://0.0.0.0:6666 --listen-peer-urls=http://0.0.0.0:6667\"],\"command\":[\"/bin/sh\",\"-c\"],\"env\":[{\"name\":\"CALICO_ETCD_IP\",\"valueFrom\":{\"fieldRef\":{\"fieldPath\":\"status.podIP\"}}}],\"image\":\"quay.io/coreos/etcd:v3.1.10\",\"name\":\"calico-etcd\",\"volumeMounts\":[{\"mountPath\":\"/var/etcd\",\"name\":\"var-etcd\"}]}],\"hostNetwork\":true,\"nodeSelector\":{\"node-role.kubernetes.io/master\":\"\"},\"tolerations\":[{\"effect\":\"NoSchedule\",\"key\":\"node-role.kubernetes.io/master\"},{\"key\":\"CriticalAddonsOnly\",\"operator\":\"Exists\"}],\"volumes\":[{\"hostPath\":{\"path\":\"/var/etcd\"},\"name\":\"var-etcd\"}]}}}}\n"}, OwnerReferences:[]v1.OwnerReference(nil), Initializers:(*v1.Initializers)(nil), Finalizers:[]string(nil), ClusterName:""}, Spec:v1beta1.DaemonSetSpec{Selector:(*v1.LabelSelector)(0xc4203fd680), Template:v1.PodTemplateSpec{ObjectMeta:v1.ObjectMeta{Name:"", GenerateName:"", Namespace:"", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{sec:0, nsec:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"k8s-app":"calico-etcd"}, Annotations:map[string]string{"scheduler.alpha.kubernetes.io/critical-pod":""}, OwnerReferences:[]v1.OwnerReference(nil), Initializers:(*v1.Initializers)(nil), Finalizers:[]string(nil), ClusterName:""}, Spec:v1.PodSpec{Volumes:[]v1.Volume{v1.Volume{Name:"var-etcd", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(0xc4203fd820), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(nil), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil)}}}, InitContainers:[]v1.Container(nil), Containers:[]v1.Container{v1.Container{Name:"calico-etcd", Image:"quay.io/coreos/etcd:v3.1.10", Command:[]string{"/bin/sh", "-c"}, Args:[]string{"/usr/local/bin/etcd --name=calico --data-dir=/var/etcd/calico-data --advertise-client-urls=http://$CALICO_ETCD_IP:6666 --listen-client-urls=http://0.0.0.0:6666 --listen-peer-urls=http://0.0.0.0:6667"}, WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar{v1.EnvVar{Name:"CALICO_ETCD_IP", Value:"", ValueFrom:(*v1.EnvVarSource)(0xc4203fd920)}}, Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"var-etcd", ReadOnly:false, MountPath:"/var/etcd", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil)}}, LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, RestartPolicy:"Always", TerminationGracePeriodSeconds:(*int64)(0xc4204027b8), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string{"node-role.kubernetes.io/master":""}, ServiceAccountName:"", DeprecatedServiceAccount:"", AutomountServiceAccountToken:(*bool)(nil), NodeName:"", HostNetwork:true, HostPID:false, HostIPC:false, SecurityContext:(*v1.PodSecurityContext)(0xc421495540), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(nil), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration{v1.Toleration{Key:"node-role.kubernetes.io/master", Operator:"", Value:"", Effect:"NoSchedule", TolerationSeconds:(*int64)(nil)}, v1.Toleration{Key:"CriticalAddonsOnly", Operator:"Exists", Value:"", Effect:"", TolerationSeconds:(*int64)(nil)}}, HostAliases:[]v1.HostAlias(nil), PriorityClassName:"", Priority:(*int32)(nil)}}, UpdateStrategy:v1beta1.DaemonSetUpdateStrategy{Type:"OnDelete", RollingUpdate:(*v1beta1.RollingUpdateDaemonSet)(nil)}, MinReadySeconds:0, TemplateGeneration:1, RevisionHistoryLimit:(*int32)(0xc420402848)}, Status:v1beta1.DaemonSetStatus{CurrentNumberScheduled:0, NumberMisscheduled:0, DesiredNumberScheduled:0, NumberReady:0, ObservedGeneration:0, UpdatedNumberScheduled:0, NumberAvailable:0, NumberUnavailable:0, CollisionCount:(*int32)(nil)}}: Operation cannot be fulfilled on daemonsets.extensions "calico-etcd": the object has been modified; please apply your changes to the latest version and try again | |
I1031 04:57:25.449476 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 04:57:30.449890 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 04:57:35.450183 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 04:57:40.450628 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 04:57:45.450994 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 04:57:50.451528 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 04:57:55.452267 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 04:58:00.452915 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 04:58:05.453614 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 04:58:10.454188 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 04:58:15.455089 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 04:58:20.455806 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 04:58:25.456408 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 04:58:30.456682 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 04:58:35.457045 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 04:58:40.457907 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 04:58:45.458364 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 04:58:50.459048 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 04:58:55.459451 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 04:59:00.459945 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 04:59:05.460578 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 04:59:10.463076 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 04:59:15.463960 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 04:59:20.464552 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 04:59:25.464987 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 04:59:30.465715 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 04:59:35.466082 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 04:59:40.466506 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 04:59:45.466934 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 04:59:50.467247 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 04:59:55.467889 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:00:00.468429 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:00:05.468778 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:00:10.469207 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:00:15.469676 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:00:20.470006 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:00:25.470571 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:00:30.471036 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:00:35.471400 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:00:40.472370 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:00:45.472908 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:00:50.473267 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:00:55.473698 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:01:00.474081 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:01:05.474457 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:01:10.475035 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:01:15.475451 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:01:20.475925 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:01:25.476579 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:01:30.477157 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:01:35.477820 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:01:40.478539 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:01:45.478933 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:01:50.479323 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:01:55.479622 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:02:00.480172 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:02:05.480938 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:02:10.481311 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:02:15.481907 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:02:20.482467 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:02:25.482990 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:02:30.483651 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:02:35.484056 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:02:40.484848 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:02:45.485460 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:02:50.485737 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:02:55.486070 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:03:00.486430 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:03:05.487765 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:03:10.488622 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:03:15.489092 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:03:20.489540 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:03:25.489938 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:03:30.491891 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:03:35.492186 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:03:40.492686 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:03:45.493485 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:03:50.493986 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:03:55.494623 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:04:00.495049 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:04:05.495581 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:04:10.496051 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:04:15.496549 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:04:20.496945 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:04:25.497482 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:04:30.498079 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:04:35.498557 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:04:40.498984 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:04:45.499461 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:04:50.499824 1 node_controller.go:563] Initializing eviction metric for zone: | |
I1031 05:04:55.500293 1 node_controller.go:563] Initializing eviction metric for zone: |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment