Created
February 14, 2019 03:18
-
-
Save virtuman/c5110b5f8b9e44d85dc8b49e64b19649 to your computer and use it in GitHub Desktop.
having crazy problem on production cluster all day today. all of a sudden kube-controller-manager started to constantly restart:
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
[root@k8s-n01 ~]# kubectl -n kube-system logs -f kube-controller-manager-k8s-n01 | |
I0205 17:40:57.646786 1 feature_gate.go:206] feature gates: &{map[PersistentLocalVolumes:true VolumeScheduling:true]} | |
I0205 17:40:57.646988 1 flags.go:33] FLAG: --address="0.0.0.0" | |
I0205 17:40:57.647005 1 flags.go:33] FLAG: --allocate-node-cidrs="false" | |
I0205 17:40:57.647015 1 flags.go:33] FLAG: --allow-untagged-cloud="false" | |
I0205 17:40:57.647022 1 flags.go:33] FLAG: --allow-verification-with-non-compliant-keys="false" | |
I0205 17:40:57.647031 1 flags.go:33] FLAG: --alsologtostderr="false" | |
I0205 17:40:57.647038 1 flags.go:33] FLAG: --application-metrics-count-limit="100" | |
I0205 17:40:57.647046 1 flags.go:33] FLAG: --attach-detach-reconcile-sync-period="1m0s" | |
I0205 17:40:57.647069 1 flags.go:33] FLAG: --authentication-kubeconfig="" | |
I0205 17:40:57.647104 1 flags.go:33] FLAG: --authentication-skip-lookup="false" | |
I0205 17:40:57.647112 1 flags.go:33] FLAG: --authentication-token-webhook-cache-ttl="10s" | |
I0205 17:40:57.647119 1 flags.go:33] FLAG: --authorization-always-allow-paths="[/healthz]" | |
I0205 17:40:57.647156 1 flags.go:33] FLAG: --authorization-kubeconfig="" | |
I0205 17:40:57.647163 1 flags.go:33] FLAG: --authorization-webhook-cache-authorized-ttl="10s" | |
I0205 17:40:57.647171 1 flags.go:33] FLAG: --authorization-webhook-cache-unauthorized-ttl="10s" | |
I0205 17:40:57.647179 1 flags.go:33] FLAG: --azure-container-registry-config="" | |
I0205 17:40:57.647187 1 flags.go:33] FLAG: --bind-address="0.0.0.0" | |
I0205 17:40:57.647229 1 flags.go:33] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" | |
I0205 17:40:57.647237 1 flags.go:33] FLAG: --cert-dir="/var/run/kubernetes" | |
I0205 17:40:57.647245 1 flags.go:33] FLAG: --cidr-allocator-type="RangeAllocator" | |
I0205 17:40:57.647252 1 flags.go:33] FLAG: --client-ca-file="" | |
I0205 17:40:57.647259 1 flags.go:33] FLAG: --cloud-config="" | |
I0205 17:40:57.647268 1 flags.go:33] FLAG: --cloud-provider="" | |
I0205 17:40:57.647281 1 flags.go:33] FLAG: --cloud-provider-gce-lb-src-cidrs="130.211.0.0/22,209.85.152.0/22,209.85.204.0/22,35.191.0.0/16" | |
I0205 17:40:57.647296 1 flags.go:33] FLAG: --cluster-cidr="" | |
I0205 17:40:57.647334 1 flags.go:33] FLAG: --cluster-name="kubernetes" | |
I0205 17:40:57.647348 1 flags.go:33] FLAG: --cluster-signing-cert-file="/etc/kubernetes/ssl/ca.pem" | |
I0205 17:40:57.647356 1 flags.go:33] FLAG: --cluster-signing-key-file="/etc/kubernetes/ssl/ca-key.pem" | |
I0205 17:40:57.647363 1 flags.go:33] FLAG: --concurrent-deployment-syncs="5" | |
I0205 17:40:57.647372 1 flags.go:33] FLAG: --concurrent-endpoint-syncs="5" | |
I0205 17:40:57.647379 1 flags.go:33] FLAG: --concurrent-gc-syncs="20" | |
I0205 17:40:57.647386 1 flags.go:33] FLAG: --concurrent-namespace-syncs="10" | |
I0205 17:40:57.647400 1 flags.go:33] FLAG: --concurrent-replicaset-syncs="5" | |
I0205 17:40:57.647412 1 flags.go:33] FLAG: --concurrent-resource-quota-syncs="5" | |
I0205 17:40:57.647418 1 flags.go:33] FLAG: --concurrent-service-syncs="1" | |
I0205 17:40:57.647425 1 flags.go:33] FLAG: --concurrent-serviceaccount-token-syncs="5" | |
I0205 17:40:57.647432 1 flags.go:33] FLAG: --concurrent-ttl-after-finished-syncs="5" | |
I0205 17:40:57.647438 1 flags.go:33] FLAG: --concurrent_rc_syncs="5" | |
I0205 17:40:57.647445 1 flags.go:33] FLAG: --configure-cloud-routes="true" | |
I0205 17:40:57.647452 1 flags.go:33] FLAG: --container-hints="/etc/cadvisor/container_hints.json" | |
I0205 17:40:57.647460 1 flags.go:33] FLAG: --containerd="unix:///var/run/containerd.sock" | |
I0205 17:40:57.647469 1 flags.go:33] FLAG: --contention-profiling="false" | |
I0205 17:40:57.647476 1 flags.go:33] FLAG: --controller-start-interval="0s" | |
I0205 17:40:57.647483 1 flags.go:33] FLAG: --controllers="[*]" | |
I0205 17:40:57.647498 1 flags.go:33] FLAG: --default-not-ready-toleration-seconds="300" | |
I0205 17:40:57.647506 1 flags.go:33] FLAG: --default-unreachable-toleration-seconds="300" | |
I0205 17:40:57.647515 1 flags.go:33] FLAG: --deleting-pods-burst="0" | |
I0205 17:40:57.647522 1 flags.go:33] FLAG: --deleting-pods-qps="0.1" | |
I0205 17:40:57.647534 1 flags.go:33] FLAG: --deployment-controller-sync-period="30s" | |
I0205 17:40:57.647543 1 flags.go:33] FLAG: --disable-attach-detach-reconcile-sync="false" | |
I0205 17:40:57.647549 1 flags.go:33] FLAG: --docker="unix:///var/run/docker.sock" | |
I0205 17:40:57.647557 1 flags.go:33] FLAG: --docker-env-metadata-whitelist="" | |
I0205 17:40:57.647564 1 flags.go:33] FLAG: --docker-only="false" | |
I0205 17:40:57.647573 1 flags.go:33] FLAG: --docker-root="/var/lib/docker" | |
I0205 17:40:57.647580 1 flags.go:33] FLAG: --docker-tls="false" | |
I0205 17:40:57.647586 1 flags.go:33] FLAG: --docker-tls-ca="ca.pem" | |
I0205 17:40:57.647593 1 flags.go:33] FLAG: --docker-tls-cert="cert.pem" | |
I0205 17:40:57.647600 1 flags.go:33] FLAG: --docker-tls-key="key.pem" | |
I0205 17:40:57.647607 1 flags.go:33] FLAG: --enable-dynamic-provisioning="true" | |
I0205 17:40:57.647614 1 flags.go:33] FLAG: --enable-garbage-collector="true" | |
I0205 17:40:57.647621 1 flags.go:33] FLAG: --enable-hostpath-provisioner="false" | |
I0205 17:40:57.647627 1 flags.go:33] FLAG: --enable-load-reader="false" | |
I0205 17:40:57.647634 1 flags.go:33] FLAG: --enable-taint-manager="true" | |
I0205 17:40:57.647641 1 flags.go:33] FLAG: --event-storage-age-limit="default=0" | |
I0205 17:40:57.647649 1 flags.go:33] FLAG: --event-storage-event-limit="default=0" | |
I0205 17:40:57.647655 1 flags.go:33] FLAG: --experimental-cluster-signing-duration="8760h0m0s" | |
I0205 17:40:57.647664 1 flags.go:33] FLAG: --external-cloud-volume-plugin="" | |
I0205 17:40:57.647670 1 flags.go:33] FLAG: --feature-gates="PersistentLocalVolumes=true,VolumeScheduling=true" | |
I0205 17:40:57.647694 1 flags.go:33] FLAG: --flex-volume-plugin-dir="/usr/libexec/kubernetes/kubelet-plugins/volume/exec/" | |
I0205 17:40:57.647702 1 flags.go:33] FLAG: --global-housekeeping-interval="1m0s" | |
I0205 17:40:57.647710 1 flags.go:33] FLAG: --google-json-key="" | |
I0205 17:40:57.647717 1 flags.go:33] FLAG: --help="false" | |
I0205 17:40:57.647724 1 flags.go:33] FLAG: --horizontal-pod-autoscaler-cpu-initialization-period="5m0s" | |
I0205 17:40:57.647731 1 flags.go:33] FLAG: --horizontal-pod-autoscaler-downscale-delay="5m0s" | |
I0205 17:40:57.647738 1 flags.go:33] FLAG: --horizontal-pod-autoscaler-downscale-stabilization="5m0s" | |
I0205 17:40:57.647747 1 flags.go:33] FLAG: --horizontal-pod-autoscaler-initial-readiness-delay="30s" | |
I0205 17:40:57.647755 1 flags.go:33] FLAG: --horizontal-pod-autoscaler-sync-period="15s" | |
I0205 17:40:57.647762 1 flags.go:33] FLAG: --horizontal-pod-autoscaler-tolerance="0.1" | |
I0205 17:40:57.647772 1 flags.go:33] FLAG: --horizontal-pod-autoscaler-upscale-delay="3m0s" | |
I0205 17:40:57.647780 1 flags.go:33] FLAG: --horizontal-pod-autoscaler-use-rest-clients="true" | |
I0205 17:40:57.647787 1 flags.go:33] FLAG: --housekeeping-interval="10s" | |
I0205 17:40:57.647795 1 flags.go:33] FLAG: --http2-max-streams-per-connection="0" | |
I0205 17:40:57.647807 1 flags.go:33] FLAG: --insecure-experimental-approve-all-kubelet-csrs-for-group="" | |
I0205 17:40:57.647814 1 flags.go:33] FLAG: --kube-api-burst="30" | |
I0205 17:40:57.647821 1 flags.go:33] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" | |
I0205 17:40:57.647829 1 flags.go:33] FLAG: --kube-api-qps="20" | |
I0205 17:40:57.647837 1 flags.go:33] FLAG: --kubeconfig="/etc/kubernetes/kube-controller-manager-kubeconfig.yaml" | |
I0205 17:40:57.647844 1 flags.go:33] FLAG: --large-cluster-size-threshold="50" | |
I0205 17:40:57.647853 1 flags.go:33] FLAG: --leader-elect="true" | |
I0205 17:40:57.647860 1 flags.go:33] FLAG: --leader-elect-lease-duration="15s" | |
I0205 17:40:57.647866 1 flags.go:33] FLAG: --leader-elect-renew-deadline="10s" | |
I0205 17:40:57.647873 1 flags.go:33] FLAG: --leader-elect-resource-lock="endpoints" | |
I0205 17:40:57.647880 1 flags.go:33] FLAG: --leader-elect-retry-period="2s" | |
I0205 17:40:57.647887 1 flags.go:33] FLAG: --log-backtrace-at=":0" | |
I0205 17:40:57.647895 1 flags.go:33] FLAG: --log-cadvisor-usage="false" | |
I0205 17:40:57.647902 1 flags.go:33] FLAG: --log-dir="" | |
I0205 17:40:57.647908 1 flags.go:33] FLAG: --log-flush-frequency="5s" | |
I0205 17:40:57.647915 1 flags.go:33] FLAG: --logtostderr="true" | |
I0205 17:40:57.647922 1 flags.go:33] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" | |
I0205 17:40:57.647931 1 flags.go:33] FLAG: --master="" | |
I0205 17:40:57.647938 1 flags.go:33] FLAG: --mesos-agent="127.0.0.1:5051" | |
I0205 17:40:57.647946 1 flags.go:33] FLAG: --mesos-agent-timeout="10s" | |
I0205 17:40:57.647953 1 flags.go:33] FLAG: --min-resync-period="12h0m0s" | |
I0205 17:40:57.647961 1 flags.go:33] FLAG: --namespace-sync-period="5m0s" | |
I0205 17:40:57.647969 1 flags.go:33] FLAG: --node-cidr-mask-size="24" | |
I0205 17:40:57.647975 1 flags.go:33] FLAG: --node-eviction-rate="0.1" | |
I0205 17:40:57.647983 1 flags.go:33] FLAG: --node-monitor-grace-period="40s" | |
I0205 17:40:57.647990 1 flags.go:33] FLAG: --node-monitor-period="5s" | |
I0205 17:40:57.647997 1 flags.go:33] FLAG: --node-startup-grace-period="1m0s" | |
I0205 17:40:57.648004 1 flags.go:33] FLAG: --node-sync-period="0s" | |
I0205 17:40:57.648010 1 flags.go:33] FLAG: --pod-eviction-timeout="5m0s" | |
I0205 17:40:57.648017 1 flags.go:33] FLAG: --port="10252" | |
I0205 17:40:57.648025 1 flags.go:33] FLAG: --profiling="false" | |
I0205 17:40:57.648032 1 flags.go:33] FLAG: --pv-recycler-increment-timeout-nfs="30" | |
I0205 17:40:57.648038 1 flags.go:33] FLAG: --pv-recycler-minimum-timeout-hostpath="60" | |
I0205 17:40:57.648045 1 flags.go:33] FLAG: --pv-recycler-minimum-timeout-nfs="300" | |
I0205 17:40:57.648052 1 flags.go:33] FLAG: --pv-recycler-pod-template-filepath-hostpath="" | |
I0205 17:40:57.648061 1 flags.go:33] FLAG: --pv-recycler-pod-template-filepath-nfs="" | |
I0205 17:40:57.648069 1 flags.go:33] FLAG: --pv-recycler-timeout-increment-hostpath="30" | |
I0205 17:40:57.648076 1 flags.go:33] FLAG: --pvclaimbinder-sync-period="15s" | |
I0205 17:40:57.648083 1 flags.go:33] FLAG: --register-retry-count="10" | |
I0205 17:40:57.648090 1 flags.go:33] FLAG: --requestheader-allowed-names="[]" | |
I0205 17:40:57.648123 1 flags.go:33] FLAG: --requestheader-client-ca-file="" | |
I0205 17:40:57.648130 1 flags.go:33] FLAG: --requestheader-extra-headers-prefix="[x-remote-extra-]" | |
I0205 17:40:57.648142 1 flags.go:33] FLAG: --requestheader-group-headers="[x-remote-group]" | |
I0205 17:40:57.648157 1 flags.go:33] FLAG: --requestheader-username-headers="[x-remote-user]" | |
I0205 17:40:57.648172 1 flags.go:33] FLAG: --resource-quota-sync-period="5m0s" | |
I0205 17:40:57.648180 1 flags.go:33] FLAG: --root-ca-file="/etc/kubernetes/ssl/ca.pem" | |
I0205 17:40:57.648189 1 flags.go:33] FLAG: --route-reconciliation-period="10s" | |
I0205 17:40:57.648196 1 flags.go:33] FLAG: --secondary-node-eviction-rate="0.01" | |
I0205 17:40:57.648203 1 flags.go:33] FLAG: --secure-port="10257" | |
I0205 17:40:57.648210 1 flags.go:33] FLAG: --service-account-private-key-file="/etc/kubernetes/ssl/service-account-key.pem" | |
I0205 17:40:57.648218 1 flags.go:33] FLAG: --service-cluster-ip-range="" | |
I0205 17:40:57.648225 1 flags.go:33] FLAG: --stderrthreshold="2" | |
I0205 17:40:57.648232 1 flags.go:33] FLAG: --storage-driver-buffer-duration="1m0s" | |
I0205 17:40:57.648239 1 flags.go:33] FLAG: --storage-driver-db="cadvisor" | |
I0205 17:40:57.648248 1 flags.go:33] FLAG: --storage-driver-host="localhost:8086" | |
I0205 17:40:57.648255 1 flags.go:33] FLAG: --storage-driver-password="root" | |
I0205 17:40:57.648262 1 flags.go:33] FLAG: --storage-driver-secure="false" | |
I0205 17:40:57.648268 1 flags.go:33] FLAG: --storage-driver-table="stats" | |
I0205 17:40:57.648277 1 flags.go:33] FLAG: --storage-driver-user="root" | |
I0205 17:40:57.648291 1 flags.go:33] FLAG: --terminated-pod-gc-threshold="12500" | |
I0205 17:40:57.648305 1 flags.go:33] FLAG: --tls-cert-file="" | |
I0205 17:40:57.648315 1 flags.go:33] FLAG: --tls-cipher-suites="[]" | |
I0205 17:40:57.648333 1 flags.go:33] FLAG: --tls-min-version="" | |
I0205 17:40:57.648341 1 flags.go:33] FLAG: --tls-private-key-file="" | |
I0205 17:40:57.648348 1 flags.go:33] FLAG: --tls-sni-cert-key="[]" | |
I0205 17:40:57.648358 1 flags.go:33] FLAG: --unhealthy-zone-threshold="0.55" | |
I0205 17:40:57.648366 1 flags.go:33] FLAG: --use-service-account-credentials="true" | |
I0205 17:40:57.648374 1 flags.go:33] FLAG: --v="2" | |
I0205 17:40:57.648381 1 flags.go:33] FLAG: --version="false" | |
I0205 17:40:57.648393 1 flags.go:33] FLAG: --vmodule="" | |
I0205 17:41:03.027555 1 serving.go:293] Generated self-signed cert (/var/run/kubernetes/kube-controller-manager.crt, /var/run/kubernetes/kube-controller-manager.key) | |
W0205 17:41:16.228175 1 authentication.go:371] failed to read in-cluster kubeconfig for delegated authentication: failed to read token file "/var/run/secrets/kubernetes.io/serviceaccount/token": open /var/run/secrets/kubernetes.io/serviceaccount/token: no such file or directory | |
W0205 17:41:16.228278 1 authentication.go:233] No authentication-kubeconfig provided in order to lookup client-ca-file in configmap/extension-apiserver-authentication in kube-system, so client certificate authentication won't work. | |
W0205 17:41:16.228320 1 authentication.go:236] No authentication-kubeconfig provided in order to lookup requestheader-client-ca-file in configmap/extension-apiserver-authentication in kube-system, so request-header client certificate authentication won't work. | |
W0205 17:41:16.228405 1 authorization.go:158] failed to read in-cluster kubeconfig for delegated authorization: failed to read token file "/var/run/secrets/kubernetes.io/serviceaccount/token": open /var/run/secrets/kubernetes.io/serviceaccount/token: no such file or directory | |
W0205 17:41:16.233602 1 authorization.go:127] No authorization-kubeconfig provided, so SubjectAccessReview of authorization tokens won't work. | |
I0205 17:41:16.233699 1 controllermanager.go:143] Version: v1.12.3 | |
I0205 17:41:16.235114 1 secure_serving.go:116] Serving securely on [::]:10257 | |
I0205 17:41:16.236428 1 deprecated_insecure_serving.go:50] Serving insecurely on [::]:10252 | |
I0205 17:41:16.433802 1 leaderelection.go:187] attempting to acquire leader lease kube-system/kube-controller-manager... | |
I0205 17:41:58.755507 1 leaderelection.go:196] successfully acquired lease kube-system/kube-controller-manager | |
I0205 17:41:58.756498 1 event.go:221] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"kube-controller-manager", UID:"7ede931d-e846-11e8-97fd-549f3525ca50", APIVersion:"v1", ResourceVersion:"23680789", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' k8s-n01_3d12f32d-296d-11e9-bdf4-549f3525ca50 became leader | |
E0205 17:41:59.232867 1 memcache.go:134] couldn't get resource list for mutators.kubedb.com/v1alpha1: the server is currently unable to handle the request | |
E0205 17:41:59.235731 1 memcache.go:134] couldn't get resource list for validators.kubedb.com/v1alpha1: the server is currently unable to handle the request | |
E0205 17:42:01.335152 1 controllermanager.go:394] unable to get all supported resources from server: unable to retrieve the complete list of server APIs: mutators.kubedb.com/v1alpha1: the server is currently unable to handle the request, validators.kubedb.com/v1alpha1: the server is currently unable to handle the request | |
I0205 17:42:01.335544 1 plugins.go:99] No cloud provider specified. | |
I0205 17:42:01.339608 1 controllermanager.go:482] Starting "resourcequota" | |
I0205 17:42:01.339688 1 controller_utils.go:1027] Waiting for caches to sync for tokens controller | |
I0205 17:42:01.839926 1 controller_utils.go:1034] Caches are synced for tokens controller | |
E0205 17:42:02.534482 1 resource_quota_controller.go:167] initial discovery check failure, continuing and counting on future sync update: unable to retrieve the complete list of server APIs: mutators.kubedb.com/v1alpha1: the server is currently unable to handle the request, validators.kubedb.com/v1alpha1: the server is currently unable to handle the request | |
I0205 17:42:02.534691 1 resource_quota_monitor.go:228] QuotaMonitor created object count evaluator for {apps deployments} | |
I0205 17:42:02.534805 1 resource_quota_monitor.go:228] QuotaMonitor created object count evaluator for {apps controllerrevisions} | |
I0205 17:42:02.534924 1 resource_quota_monitor.go:228] QuotaMonitor created object count evaluator for {extensions replicasets} | |
I0205 17:42:02.534980 1 resource_quota_monitor.go:228] QuotaMonitor created object count evaluator for {apps statefulsets} | |
I0205 17:42:02.535024 1 resource_quota_monitor.go:228] QuotaMonitor created object count evaluator for {batch jobs} | |
I0205 17:42:02.535064 1 resource_quota_monitor.go:228] QuotaMonitor created object count evaluator for {networking.k8s.io networkpolicies} | |
I0205 17:42:02.535128 1 resource_quota_monitor.go:228] QuotaMonitor created object count evaluator for {rbac.authorization.k8s.io rolebindings} | |
I0205 17:42:02.535203 1 resource_quota_monitor.go:228] QuotaMonitor created object count evaluator for { limitranges} | |
I0205 17:42:02.535249 1 resource_quota_monitor.go:228] QuotaMonitor created object count evaluator for {extensions ingresses} | |
I0205 17:42:02.535319 1 resource_quota_monitor.go:228] QuotaMonitor created object count evaluator for {autoscaling horizontalpodautoscalers} | |
I0205 17:42:02.535414 1 resource_quota_monitor.go:228] QuotaMonitor created object count evaluator for { endpoints} | |
I0205 17:42:02.535478 1 resource_quota_monitor.go:228] QuotaMonitor created object count evaluator for {extensions deployments} | |
I0205 17:42:02.535531 1 resource_quota_monitor.go:228] QuotaMonitor created object count evaluator for {events.k8s.io events} | |
W0205 17:42:02.535636 1 shared_informer.go:311] resyncPeriod 51195343521530 is smaller than resyncCheckPeriod 85362903697735 and the informer has already started. Changing it to 85362903697735 | |
I0205 17:42:02.536440 1 resource_quota_monitor.go:228] QuotaMonitor created object count evaluator for {policy poddisruptionbudgets} | |
I0205 17:42:02.536548 1 resource_quota_monitor.go:228] QuotaMonitor created object count evaluator for {rbac.authorization.k8s.io roles} | |
W0205 17:42:02.536669 1 shared_informer.go:311] resyncPeriod 84290720619506 is smaller than resyncCheckPeriod 85362903697735 and the informer has already started. Changing it to 85362903697735 | |
I0205 17:42:02.537155 1 resource_quota_monitor.go:228] QuotaMonitor created object count evaluator for { serviceaccounts} | |
I0205 17:42:02.537474 1 resource_quota_monitor.go:228] QuotaMonitor created object count evaluator for {batch cronjobs} | |
I0205 17:42:02.537549 1 resource_quota_monitor.go:228] QuotaMonitor created object count evaluator for {extensions daemonsets} | |
I0205 17:42:02.537594 1 resource_quota_monitor.go:228] QuotaMonitor created object count evaluator for {apps daemonsets} | |
I0205 17:42:02.537816 1 resource_quota_monitor.go:228] QuotaMonitor created object count evaluator for { podtemplates} | |
I0205 17:42:02.537884 1 resource_quota_monitor.go:228] QuotaMonitor created object count evaluator for {apps replicasets} | |
I0205 17:42:02.537944 1 resource_quota_monitor.go:228] QuotaMonitor created object count evaluator for {coordination.k8s.io leases} | |
E0205 17:42:02.538198 1 resource_quota_controller.go:173] initial monitor sync has error: [couldn't start monitor for resource {"mysql.oracle.com" "v1alpha1" "mysqlrestores"}: unable to monitor quota for resource "mysql.oracle.com/v1alpha1, Resource=mysqlrestores", couldn't start monitor for resource {"monitoring.coreos.com" "v1" "servicemonitors"}: unable to monitor quota for resource "monitoring.coreos.com/v1, Resource=servicemonitors", couldn't start monitor for resource {"kubedb.com" "v1alpha1" "dormantdatabases"}: unable to monitor quota for resource "kubedb.com/v1alpha1, Resource=dormantdatabases", couldn't start monitor for resource {"monitoring.coreos.com" "v1" "prometheusrules"}: unable to monitor quota for resource "monitoring.coreos.com/v1, Resource=prometheusrules", couldn't start monitor for resource {"appcatalog.appscode.com" "v1alpha1" "appbindings"}: unable to monitor quota for resource "appcatalog.appscode.com/v1alpha1, Resource=appbindings", couldn't start monitor for resource {"monitoring.coreos.com" "v1" "prometheuses"}: unable to monitor quota for resource "monitoring.coreos.com/v1, Resource=prometheuses", couldn't start monitor for resource {"extensions" "v1beta1" "networkpolicies"}: unable to monitor quota for resource "extensions/v1beta1, Resource=networkpolicies", couldn't start monitor for resource {"kubedb.com" "v1alpha1" "memcacheds"}: unable to monitor quota for resource "kubedb.com/v1alpha1, Resource=memcacheds", couldn't start monitor for resource {"kubedb.com" "v1alpha1" "postgreses"}: unable to monitor quota for resource "kubedb.com/v1alpha1, Resource=postgreses", couldn't start monitor for resource {"mysql.oracle.com" "v1alpha1" "mysqlbackups"}: unable to monitor quota for resource "mysql.oracle.com/v1alpha1, Resource=mysqlbackups", couldn't start monitor for resource {"certmanager.k8s.io" "v1alpha1" "certificates"}: unable to monitor quota for resource "certmanager.k8s.io/v1alpha1, Resource=certificates", couldn't start monitor for resource {"kubedb.com" "v1alpha1" "snapshots"}: unable to monitor quota for resource "kubedb.com/v1alpha1, Resource=snapshots", couldn't start monitor for resource {"kubedb.com" "v1alpha1" "elasticsearches"}: unable to monitor quota for resource "kubedb.com/v1alpha1, Resource=elasticsearches", couldn't start monitor for resource {"mysql.oracle.com" "v1alpha1" "mysqlbackupschedules"}: unable to monitor quota for resource "mysql.oracle.com/v1alpha1, Resource=mysqlbackupschedules", couldn't start monitor for resource {"kubedb.com" "v1alpha1" "redises"}: unable to monitor quota for resource "kubedb.com/v1alpha1, Resource=redises", couldn't start monitor for resource {"kubedb.com" "v1alpha1" "etcds"}: unable to monitor quota for resource "kubedb.com/v1alpha1, Resource=etcds", couldn't start monitor for resource {"mysql.oracle.com" "v1alpha1" "mysqlclusters"}: unable to monitor quota for resource "mysql.oracle.com/v1alpha1, Resource=mysqlclusters", couldn't start monitor for resource {"monitoring.coreos.com" "v1" "alertmanagers"}: unable to monitor quota for resource "monitoring.coreos.com/v1, Resource=alertmanagers", couldn't start monitor for resource {"kubedb.com" "v1alpha1" "mysqls"}: unable to monitor quota for resource "kubedb.com/v1alpha1, Resource=mysqls", couldn't start monitor for resource {"certmanager.k8s.io" "v1alpha1" "issuers"}: unable to monitor quota for resource "certmanager.k8s.io/v1alpha1, Resource=issuers", couldn't start monitor for resource {"kubedb.com" "v1alpha1" "mongodbs"}: unable to monitor quota for resource "kubedb.com/v1alpha1, Resource=mongodbs"] | |
I0205 17:42:02.538261 1 controllermanager.go:497] Started "resourcequota" | |
I0205 17:42:02.538287 1 controllermanager.go:482] Starting "podgc" | |
I0205 17:42:02.538320 1 resource_quota_controller.go:278] Starting resource quota controller | |
I0205 17:42:02.538373 1 controller_utils.go:1027] Waiting for caches to sync for resource quota controller | |
I0205 17:42:02.538427 1 resource_quota_monitor.go:301] QuotaMonitor running | |
I0205 17:42:02.650806 1 controllermanager.go:497] Started "podgc" | |
I0205 17:42:02.650871 1 controllermanager.go:482] Starting "csrsigning" | |
I0205 17:42:02.650934 1 gc_controller.go:76] Starting GC controller | |
I0205 17:42:02.650979 1 controller_utils.go:1027] Waiting for caches to sync for GC controller | |
I0205 17:42:02.729142 1 controllermanager.go:497] Started "csrsigning" | |
I0205 17:42:02.729224 1 controllermanager.go:482] Starting "clusterrole-aggregation" | |
I0205 17:42:02.729256 1 certificate_controller.go:113] Starting certificate controller | |
I0205 17:42:02.729337 1 controller_utils.go:1027] Waiting for caches to sync for certificate controller | |
I0205 17:42:02.767075 1 controllermanager.go:497] Started "clusterrole-aggregation" | |
I0205 17:42:02.767123 1 controllermanager.go:482] Starting "endpoint" | |
I0205 17:42:02.767253 1 clusterroleaggregation_controller.go:148] Starting ClusterRoleAggregator | |
I0205 17:42:02.767294 1 controller_utils.go:1027] Waiting for caches to sync for ClusterRoleAggregator controller | |
I0205 17:42:02.867319 1 controllermanager.go:497] Started "endpoint" | |
I0205 17:42:02.867374 1 controllermanager.go:482] Starting "horizontalpodautoscaling" | |
I0205 17:42:02.867423 1 endpoints_controller.go:149] Starting endpoint controller | |
I0205 17:42:02.867466 1 controller_utils.go:1027] Waiting for caches to sync for endpoint controller | |
I0205 17:42:03.051781 1 controllermanager.go:497] Started "horizontalpodautoscaling" | |
I0205 17:42:03.051840 1 horizontal.go:156] Starting HPA controller | |
I0205 17:42:03.051876 1 controller_utils.go:1027] Waiting for caches to sync for HPA controller | |
I0205 17:42:03.051845 1 controllermanager.go:482] Starting "csrapproving" | |
I0205 17:42:03.132499 1 controllermanager.go:497] Started "csrapproving" | |
W0205 17:42:03.132550 1 controllermanager.go:476] "bootstrapsigner" is disabled | |
I0205 17:42:03.132564 1 controllermanager.go:482] Starting "nodeipam" | |
W0205 17:42:03.132579 1 controllermanager.go:489] Skipping "nodeipam" | |
I0205 17:42:03.132587 1 controllermanager.go:482] Starting "namespace" | |
I0205 17:42:03.132669 1 certificate_controller.go:113] Starting certificate controller | |
I0205 17:42:03.132691 1 controller_utils.go:1027] Waiting for caches to sync for certificate controller | |
E0205 17:42:03.433962 1 namespaced_resources_deleter.go:169] unable to get all supported resources from server: unable to retrieve the complete list of server APIs: mutators.kubedb.com/v1alpha1: the server is currently unable to handle the request, validators.kubedb.com/v1alpha1: the server is currently unable to handle the request | |
I0205 17:42:03.631309 1 controllermanager.go:497] Started "namespace" | |
I0205 17:42:03.631389 1 controllermanager.go:482] Starting "replicaset" | |
I0205 17:42:03.631712 1 namespace_controller.go:186] Starting namespace controller | |
I0205 17:42:03.631735 1 controller_utils.go:1027] Waiting for caches to sync for namespace controller | |
I0205 17:42:04.429448 1 controllermanager.go:497] Started "replicaset" | |
I0205 17:42:04.429507 1 controllermanager.go:482] Starting "disruption" | |
I0205 17:42:04.429519 1 replica_set.go:182] Starting replicaset controller | |
I0205 17:42:04.429551 1 controller_utils.go:1027] Waiting for caches to sync for ReplicaSet controller | |
E0205 17:42:04.541632 1 resource_quota_controller.go:430] unable to retrieve the complete list of server APIs: mutators.kubedb.com/v1alpha1: the server is currently unable to handle the request, validators.kubedb.com/v1alpha1: the server is currently unable to handle the request | |
I0205 17:42:04.541722 1 resource_quota_controller.go:450] syncing resource quota controller with updated resources from discovery: map[{ v1 configmaps}:{} {extensions v1beta1 daemonsets}:{} {apps v1 deployments}:{} {rbac.authorization.k8s.io v1 roles}:{} {monitoring.coreos.com v1 prometheuses}:{} {mysql.oracle.com v1alpha1 mysqlbackupschedules}:{} { v1 endpoints}:{} {apps v1 statefulsets}:{} {mysql.oracle.com v1alpha1 mysqlclusters}:{} { v1 serviceaccounts}:{} {apps v1 daemonsets}:{} {batch v1 jobs}:{} {networking.k8s.io v1 networkpolicies}:{} {appcatalog.appscode.com v1alpha1 appbindings}:{} {kubedb.com v1alpha1 etcds}:{} {extensions v1beta1 deployments}:{} { v1 podtemplates}:{} {rbac.authorization.k8s.io v1 rolebindings}:{} {monitoring.coreos.com v1 alertmanagers}:{} { v1 events}:{} {extensions v1beta1 networkpolicies}:{} {apps v1 controllerrevisions}:{} {coordination.k8s.io v1beta1 leases}:{} {certmanager.k8s.io v1alpha1 certificates}:{} {kubedb.com v1alpha1 redises}:{} { v1 pods}:{} { v1 resourcequotas}:{} {apps v1 replicasets}:{} {autoscaling v1 horizontalpodautoscalers}:{} {monitoring.coreos.com v1 prometheusrules}:{} {kubedb.com v1alpha1 postgreses}:{} {kubedb.com v1alpha1 mongodbs}:{} { v1 secrets}:{} { v1 persistentvolumeclaims}:{} {extensions v1beta1 replicasets}:{} {batch v1beta1 cronjobs}:{} {policy v1beta1 poddisruptionbudgets}:{} {kubedb.com v1alpha1 dormantdatabases}:{} {kubedb.com v1alpha1 snapshots}:{} {mysql.oracle.com v1alpha1 mysqlrestores}:{} { v1 limitranges}:{} { v1 replicationcontrollers}:{} {extensions v1beta1 ingresses}:{} {events.k8s.io v1beta1 events}:{} {monitoring.coreos.com v1 servicemonitors}:{} {certmanager.k8s.io v1alpha1 issuers}:{} {kubedb.com v1alpha1 memcacheds}:{} {kubedb.com v1alpha1 elasticsearches}:{} { v1 services}:{} {mysql.oracle.com v1alpha1 mysqlbackups}:{} {kubedb.com v1alpha1 mysqls}:{}] | |
I0205 17:42:04.564552 1 controllermanager.go:497] Started "disruption" | |
I0205 17:42:04.564597 1 controllermanager.go:482] Starting "cronjob" | |
I0205 17:42:04.564707 1 disruption.go:288] Starting disruption controller | |
I0205 17:42:04.564764 1 controller_utils.go:1027] Waiting for caches to sync for disruption controller | |
I0205 17:42:04.646554 1 controllermanager.go:497] Started "cronjob" | |
I0205 17:42:04.646597 1 controllermanager.go:482] Starting "attachdetach" | |
I0205 17:42:04.646780 1 cronjob_controller.go:94] Starting CronJob Manager | |
W0205 17:42:04.751628 1 probe.go:270] Flexvolume plugin directory at /usr/libexec/kubernetes/kubelet-plugins/volume/exec/ does not exist. Recreating. | |
I0205 17:42:04.834664 1 plugins.go:508] Loaded volume plugin "kubernetes.io/aws-ebs" | |
I0205 17:42:04.834719 1 plugins.go:508] Loaded volume plugin "kubernetes.io/gce-pd" | |
I0205 17:42:04.834733 1 plugins.go:508] Loaded volume plugin "kubernetes.io/cinder" | |
I0205 17:42:04.834747 1 plugins.go:508] Loaded volume plugin "kubernetes.io/portworx-volume" | |
I0205 17:42:04.834765 1 plugins.go:508] Loaded volume plugin "kubernetes.io/vsphere-volume" | |
I0205 17:42:04.834782 1 plugins.go:508] Loaded volume plugin "kubernetes.io/azure-disk" | |
I0205 17:42:04.834800 1 plugins.go:508] Loaded volume plugin "kubernetes.io/photon-pd" | |
I0205 17:42:04.834817 1 plugins.go:508] Loaded volume plugin "kubernetes.io/scaleio" | |
I0205 17:42:04.835963 1 plugins.go:508] Loaded volume plugin "kubernetes.io/storageos" | |
I0205 17:42:04.836003 1 plugins.go:508] Loaded volume plugin "kubernetes.io/fc" | |
I0205 17:42:04.836017 1 plugins.go:508] Loaded volume plugin "kubernetes.io/iscsi" | |
I0205 17:42:04.836031 1 plugins.go:508] Loaded volume plugin "kubernetes.io/rbd" | |
I0205 17:42:04.836053 1 plugins.go:508] Loaded volume plugin "kubernetes.io/csi" | |
I0205 17:42:04.836440 1 controllermanager.go:497] Started "attachdetach" | |
I0205 17:42:04.836844 1 controllermanager.go:482] Starting "pvc-protection" | |
I0205 17:42:04.836513 1 attach_detach_controller.go:315] Starting attach detach controller | |
I0205 17:42:04.837206 1 controller_utils.go:1027] Waiting for caches to sync for attach detach controller | |
I0205 17:42:05.029462 1 controllermanager.go:497] Started "pvc-protection" | |
I0205 17:42:05.029533 1 controllermanager.go:482] Starting "garbagecollector" | |
I0205 17:42:05.029561 1 pvc_protection_controller.go:99] Starting PVC protection controller | |
I0205 17:42:05.029583 1 controller_utils.go:1027] Waiting for caches to sync for PVC protection controller | |
W0205 17:42:05.132617 1 garbagecollector.go:649] failed to discover preferred resources: the cache has not been filled yet | |
I0205 17:42:05.134146 1 controllermanager.go:497] Started "garbagecollector" | |
I0205 17:42:05.134563 1 controllermanager.go:482] Starting "ttl" | |
I0205 17:42:05.134639 1 garbagecollector.go:133] Starting garbage collector controller | |
I0205 17:42:05.134673 1 controller_utils.go:1027] Waiting for caches to sync for garbage collector controller | |
I0205 17:42:05.134714 1 graph_builder.go:308] GraphBuilder running | |
I0205 17:42:05.352658 1 controllermanager.go:497] Started "ttl" | |
I0205 17:42:05.352710 1 controllermanager.go:482] Starting "persistentvolume-expander" | |
I0205 17:42:05.352997 1 ttl_controller.go:116] Starting TTL controller | |
I0205 17:42:05.353025 1 controller_utils.go:1027] Waiting for caches to sync for TTL controller | |
I0205 17:42:05.458733 1 plugins.go:508] Loaded volume plugin "kubernetes.io/aws-ebs" | |
I0205 17:42:05.458773 1 plugins.go:508] Loaded volume plugin "kubernetes.io/gce-pd" | |
I0205 17:42:05.458787 1 plugins.go:508] Loaded volume plugin "kubernetes.io/cinder" | |
I0205 17:42:05.458799 1 plugins.go:508] Loaded volume plugin "kubernetes.io/portworx-volume" | |
I0205 17:42:05.458811 1 plugins.go:508] Loaded volume plugin "kubernetes.io/vsphere-volume" | |
I0205 17:42:05.458822 1 plugins.go:508] Loaded volume plugin "kubernetes.io/glusterfs" | |
I0205 17:42:05.458833 1 plugins.go:508] Loaded volume plugin "kubernetes.io/rbd" | |
I0205 17:42:05.458843 1 plugins.go:508] Loaded volume plugin "kubernetes.io/azure-disk" | |
I0205 17:42:05.458871 1 plugins.go:508] Loaded volume plugin "kubernetes.io/azure-file" | |
I0205 17:42:05.458883 1 plugins.go:508] Loaded volume plugin "kubernetes.io/photon-pd" | |
I0205 17:42:05.458905 1 plugins.go:508] Loaded volume plugin "kubernetes.io/scaleio" | |
I0205 17:42:05.458918 1 plugins.go:508] Loaded volume plugin "kubernetes.io/storageos" | |
I0205 17:42:05.458929 1 plugins.go:508] Loaded volume plugin "kubernetes.io/fc" | |
I0205 17:42:05.459170 1 controllermanager.go:497] Started "persistentvolume-expander" | |
I0205 17:42:05.459191 1 controllermanager.go:482] Starting "pv-protection" | |
I0205 17:42:05.459427 1 expand_controller.go:153] Starting expand controller | |
I0205 17:42:05.459444 1 controller_utils.go:1027] Waiting for caches to sync for expand controller | |
I0205 17:42:05.528249 1 controllermanager.go:497] Started "pv-protection" | |
I0205 17:42:05.528291 1 controllermanager.go:482] Starting "ttl-after-finished" | |
W0205 17:42:05.528321 1 controllermanager.go:489] Skipping "ttl-after-finished" | |
I0205 17:42:05.528338 1 controllermanager.go:482] Starting "daemonset" | |
I0205 17:42:05.528418 1 pv_protection_controller.go:81] Starting PV protection controller | |
I0205 17:42:05.528434 1 controller_utils.go:1027] Waiting for caches to sync for PV protection controller | |
I0205 17:42:05.627942 1 controllermanager.go:497] Started "daemonset" | |
I0205 17:42:05.628008 1 controllermanager.go:482] Starting "deployment" | |
I0205 17:42:05.628052 1 daemon_controller.go:270] Starting daemon sets controller | |
I0205 17:42:05.628093 1 controller_utils.go:1027] Waiting for caches to sync for daemon sets controller | |
I0205 17:42:05.661922 1 controllermanager.go:497] Started "deployment" | |
I0205 17:42:05.661995 1 controllermanager.go:482] Starting "statefulset" | |
I0205 17:42:05.665175 1 deployment_controller.go:152] Starting deployment controller | |
I0205 17:42:05.665230 1 controller_utils.go:1027] Waiting for caches to sync for deployment controller | |
I0205 17:42:10.029421 1 controllermanager.go:497] Started "statefulset" | |
I0205 17:42:10.029567 1 controllermanager.go:482] Starting "csrcleaner" | |
I0205 17:42:10.030109 1 stateful_set.go:151] Starting stateful set controller | |
I0205 17:42:10.030142 1 controller_utils.go:1027] Waiting for caches to sync for stateful set controller | |
I0205 17:42:10.329411 1 controllermanager.go:497] Started "csrcleaner" | |
W0205 17:42:10.329479 1 controllermanager.go:476] "tokencleaner" is disabled | |
I0205 17:42:10.329504 1 controllermanager.go:482] Starting "replicationcontroller" | |
I0205 17:42:10.329503 1 cleaner.go:81] Starting CSR cleaner controller | |
W0205 17:42:11.739514 1 garbagecollector.go:647] failed to discover some groups: map[validators.kubedb.com/v1alpha1:the server is currently unable to handle the request mutators.kubedb.com/v1alpha1:the server is currently unable to handle the request] | |
I0205 17:42:11.740069 1 garbagecollector.go:204] syncing garbage collector with updated resources from discovery (attempt 1): added: [{Group: Version:v1 Resource:configmaps} {Group: Version:v1 Resource:endpoints} {Group: Version:v1 Resource:events} {Group: Version:v1 Resource:limitranges} {Group: Version:v1 Resource:namespaces} {Group: Version:v1 Resource:nodes} {Group: Version:v1 Resource:persistentvolumeclaims} {Group: Version:v1 Resource:persistentvolumes} {Group: Version:v1 Resource:pods} {Group: Version:v1 Resource:podtemplates} {Group: Version:v1 Resource:replicationcontrollers} {Group: Version:v1 Resource:resourcequotas} {Group: Version:v1 Resource:secrets} {Group: Version:v1 Resource:serviceaccounts} {Group: Version:v1 Resource:services} {Group:admissionregistration.k8s.io Version:v1alpha1 Resource:initializerconfigurations} {Group:admissionregistration.k8s.io Version:v1beta1 Resource:mutatingwebhookconfigurations} {Group:admissionregistration.k8s.io Version:v1beta1 Resource:validatingwebhookconfigurations} {Group:apiextensions.k8s.io Version:v1beta1 Resource:customresourcedefinitions} {Group:apiregistration.k8s.io Version:v1 Resource:apiservices} {Group:appcatalog.appscode.com Version:v1alpha1 Resource:appbindings} {Group:apps Version:v1 Resource:controllerrevisions} {Group:apps Version:v1 Resource:daemonsets} {Group:apps Version:v1 Resource:deployments} {Group:apps Version:v1 Resource:replicasets} {Group:apps Version:v1 Resource:statefulsets} {Group:autoscaling Version:v1 Resource:horizontalpodautoscalers} {Group:batch Version:v1 Resource:jobs} {Group:batch Version:v1beta1 Resource:cronjobs} {Group:catalog.kubedb.com Version:v1alpha1 Resource:elasticsearchversions} {Group:catalog.kubedb.com Version:v1alpha1 Resource:etcdversions} {Group:catalog.kubedb.com Version:v1alpha1 Resource:memcachedversions} {Group:catalog.kubedb.com Version:v1alpha1 Resource:mongodbversions} {Group:catalog.kubedb.com Version:v1alpha1 Resource:mysqlversions} {Group:catalog.kubedb.com Version:v1alpha1 Resource:postgresversions} {Group:catalog.kubedb.com Version:v1alpha1 Resource:redisversions} {Group:certificates.k8s.io Version:v1beta1 Resource:certificatesigningrequests} {Group:certmanager.k8s.io Version:v1alpha1 Resource:certificates} {Group:certmanager.k8s.io Version:v1alpha1 Resource:clusterissuers} {Group:certmanager.k8s.io Version:v1alpha1 Resource:issuers} {Group:coordination.k8s.io Version:v1beta1 Resource:leases} {Group:events.k8s.io Version:v1beta1 Resource:events} {Group:extensions Version:v1beta1 Resource:daemonsets} {Group:extensions Version:v1beta1 Resource:deployments} {Group:extensions Version:v1beta1 Resource:ingresses} {Group:extensions Version:v1beta1 Resource:networkpolicies} {Group:extensions Version:v1beta1 Resource:podsecuritypolicies} {Group:extensions Version:v1beta1 Resource:replicasets} {Group:kubedb.com Version:v1alpha1 Resource:dormantdatabases} {Group:kubedb.com Version:v1alpha1 Resource:elasticsearches} {Group:kubedb.com Version:v1alpha1 Resource:etcds} {Group:kubedb.com Version:v1alpha1 Resource:memcacheds} {Group:kubedb.com Version:v1alpha1 Resource:mongodbs} {Group:kubedb.com Version:v1alpha1 Resource:mysqls} {Group:kubedb.com Version:v1alpha1 Resource:postgreses} {Group:kubedb.com Version:v1alpha1 Resource:redises} {Group:kubedb.com Version:v1alpha1 Resource:snapshots} {Group:monitoring.coreos.com Version:v1 Resource:alertmanagers} {Group:monitoring.coreos.com Version:v1 Resource:prometheuses} {Group:monitoring.coreos.com Version:v1 Resource:prometheusrules} {Group:monitoring.coreos.com Version:v1 Resource:servicemonitors} {Group:mysql.oracle.com Version:v1alpha1 Resource:mysqlbackupschedules} {Group:mysql.oracle.com Version:v1alpha1 Resource:mysqlbackups} {Group:mysql.oracle.com Version:v1alpha1 Resource:mysqlclusters} {Group:mysql.oracle.com Version:v1alpha1 Resource:mysqlrestores} {Group:networking.k8s.io Version:v1 Resource:networkpolicies} {Group:policy Version:v1beta1 Resource:poddisruptionbudgets} {Group:policy Version:v1beta1 Resource:podsecuritypolicies} {Group:rbac.authorization.k8s.io Version:v1 Resource:clusterrolebindings} {Group:rbac.authorization.k8s.io Version:v1 Resource:clusterroles} {Group:rbac.authorization.k8s.io Version:v1 Resource:rolebindings} {Group:rbac.authorization.k8s.io Version:v1 Resource:roles} {Group:scheduling.k8s.io Version:v1beta1 Resource:priorityclasses} {Group:storage.k8s.io Version:v1 Resource:storageclasses} {Group:storage.k8s.io Version:v1beta1 Resource:volumeattachments}], removed: [] | |
I0205 17:42:11.852501 1 controllermanager.go:497] Started "replicationcontroller" | |
I0205 17:42:11.852634 1 replica_set.go:182] Starting replicationcontroller controller | |
I0205 17:42:11.852685 1 controller_utils.go:1027] Waiting for caches to sync for ReplicationController controller | |
I0205 17:42:11.852639 1 controllermanager.go:482] Starting "serviceaccount" | |
I0205 17:42:12.229765 1 controllermanager.go:497] Started "serviceaccount" | |
I0205 17:42:12.229829 1 controllermanager.go:482] Starting "persistentvolume-binder" | |
I0205 17:42:12.230185 1 serviceaccounts_controller.go:115] Starting service account controller | |
I0205 17:42:12.230229 1 controller_utils.go:1027] Waiting for caches to sync for service account controller | |
I0205 17:42:12.433668 1 plugins.go:508] Loaded volume plugin "kubernetes.io/host-path" | |
I0205 17:42:12.433737 1 plugins.go:508] Loaded volume plugin "kubernetes.io/nfs" | |
I0205 17:42:12.433768 1 plugins.go:508] Loaded volume plugin "kubernetes.io/glusterfs" | |
I0205 17:42:12.433797 1 plugins.go:508] Loaded volume plugin "kubernetes.io/rbd" | |
I0205 17:42:12.433819 1 plugins.go:508] Loaded volume plugin "kubernetes.io/quobyte" | |
I0205 17:42:12.433845 1 plugins.go:508] Loaded volume plugin "kubernetes.io/azure-file" | |
I0205 17:42:12.433879 1 plugins.go:508] Loaded volume plugin "kubernetes.io/flocker" | |
I0205 17:42:12.433903 1 plugins.go:508] Loaded volume plugin "kubernetes.io/portworx-volume" | |
I0205 17:42:12.433921 1 plugins.go:508] Loaded volume plugin "kubernetes.io/scaleio" | |
I0205 17:42:12.433936 1 plugins.go:508] Loaded volume plugin "kubernetes.io/local-volume" | |
I0205 17:42:12.433968 1 plugins.go:508] Loaded volume plugin "kubernetes.io/storageos" | |
I0205 17:42:12.433983 1 plugins.go:508] Loaded volume plugin "kubernetes.io/aws-ebs" | |
I0205 17:42:12.433996 1 plugins.go:508] Loaded volume plugin "kubernetes.io/gce-pd" | |
I0205 17:42:12.434015 1 plugins.go:508] Loaded volume plugin "kubernetes.io/cinder" | |
I0205 17:42:12.434044 1 plugins.go:508] Loaded volume plugin "kubernetes.io/vsphere-volume" | |
I0205 17:42:12.434057 1 plugins.go:508] Loaded volume plugin "kubernetes.io/azure-disk" | |
I0205 17:42:12.434072 1 plugins.go:508] Loaded volume plugin "kubernetes.io/photon-pd" | |
I0205 17:42:12.434278 1 controllermanager.go:497] Started "persistentvolume-binder" | |
I0205 17:42:12.435198 1 pv_controller_base.go:271] Starting persistent volume controller | |
I0205 17:42:12.527407 1 controller_utils.go:1027] Waiting for caches to sync for persistent volume controller | |
I0205 17:42:12.527419 1 controllermanager.go:482] Starting "job" | |
E0205 17:42:12.532641 1 memcache.go:134] couldn't get resource list for mutators.kubedb.com/v1alpha1: the server is currently unable to handle the request | |
E0205 17:42:12.534274 1 memcache.go:134] couldn't get resource list for validators.kubedb.com/v1alpha1: the server is currently unable to handle the request | |
I0205 17:42:12.829857 1 controllermanager.go:497] Started "job" | |
I0205 17:42:12.829920 1 controllermanager.go:482] Starting "service" | |
I0205 17:42:12.830093 1 job_controller.go:143] Starting job controller | |
I0205 17:42:12.830266 1 controller_utils.go:1027] Waiting for caches to sync for job controller | |
E0205 17:42:13.035107 1 core.go:76] Failed to start service controller: WARNING: no cloud provider provided, services of type LoadBalancer will fail | |
W0205 17:42:13.035147 1 controllermanager.go:489] Skipping "service" | |
I0205 17:42:13.035165 1 controllermanager.go:482] Starting "route" | |
I0205 17:42:13.035175 1 core.go:150] Will not configure cloud provider routes for allocate-node-cidrs: false, configure-cloud-routes: true. | |
W0205 17:42:13.035183 1 controllermanager.go:489] Skipping "route" | |
I0205 17:42:13.035189 1 controllermanager.go:482] Starting "nodelifecycle" | |
I0205 17:42:13.232027 1 taint_manager.go:190] Sending events to api server. | |
I0205 17:42:13.232335 1 node_lifecycle_controller.go:349] Controller will taint node by condition. | |
I0205 17:42:13.232594 1 controllermanager.go:497] Started "nodelifecycle" | |
I0205 17:42:13.233496 1 node_lifecycle_controller.go:386] Starting node controller | |
I0205 17:42:13.233553 1 controller_utils.go:1027] Waiting for caches to sync for taint controller | |
E0205 17:42:13.329885 1 resource_quota_controller.go:460] failed to sync resource monitors: [couldn't start monitor for resource {"mysql.oracle.com" "v1alpha1" "mysqlclusters"}: unable to monitor quota for resource "mysql.oracle.com/v1alpha1, Resource=mysqlclusters", couldn't start monitor for resource {"appcatalog.appscode.com" "v1alpha1" "appbindings"}: unable to monitor quota for resource "appcatalog.appscode.com/v1alpha1, Resource=appbindings", couldn't start monitor for resource {"kubedb.com" "v1alpha1" "etcds"}: unable to monitor quota for resource "kubedb.com/v1alpha1, Resource=etcds", couldn't start monitor for resource {"monitoring.coreos.com" "v1" "alertmanagers"}: unable to monitor quota for resource "monitoring.coreos.com/v1, Resource=alertmanagers", couldn't start monitor for resource {"extensions" "v1beta1" "networkpolicies"}: unable to monitor quota for resource "extensions/v1beta1, Resource=networkpolicies", couldn't start monitor for resource {"certmanager.k8s.io" "v1alpha1" "certificates"}: unable to monitor quota for resource "certmanager.k8s.io/v1alpha1, Resource=certificates", couldn't start monitor for resource {"kubedb.com" "v1alpha1" "redises"}: unable to monitor quota for resource "kubedb.com/v1alpha1, Resource=redises", couldn't start monitor for resource {"monitoring.coreos.com" "v1" "prometheusrules"}: unable to monitor quota for resource "monitoring.coreos.com/v1, Resource=prometheusrules", couldn't start monitor for resource {"kubedb.com" "v1alpha1" "postgreses"}: unable to monitor quota for resource "kubedb.com/v1alpha1, Resource=postgreses", couldn't start monitor for resource {"kubedb.com" "v1alpha1" "mongodbs"}: unable to monitor quota for resource "kubedb.com/v1alpha1, Resource=mongodbs", couldn't start monitor for resource {"kubedb.com" "v1alpha1" "dormantdatabases"}: unable to monitor quota for resource "kubedb.com/v1alpha1, Resource=dormantdatabases", couldn't start monitor for resource {"kubedb.com" "v1alpha1" "snapshots"}: unable to monitor quota for resource "kubedb.com/v1alpha1, Resource=snapshots", couldn't start monitor for resource {"mysql.oracle.com" "v1alpha1" "mysqlrestores"}: unable to monitor quota for resource "mysql.oracle.com/v1alpha1, Resource=mysqlrestores", couldn't start monitor for resource {"monitoring.coreos.com" "v1" "servicemonitors"}: unable to monitor quota for resource "monitoring.coreos.com/v1, Resource=servicemonitors", couldn't start monitor for resource {"certmanager.k8s.io" "v1alpha1" "issuers"}: unable to monitor quota for resource "certmanager.k8s.io/v1alpha1, Resource=issuers", couldn't start monitor for resource {"kubedb.com" "v1alpha1" "memcacheds"}: unable to monitor quota for resource "kubedb.com/v1alpha1, Resource=memcacheds", couldn't start monitor for resource {"kubedb.com" "v1alpha1" "elasticsearches"}: unable to monitor quota for resource "kubedb.com/v1alpha1, Resource=elasticsearches", couldn't start monitor for resource {"mysql.oracle.com" "v1alpha1" "mysqlbackups"}: unable to monitor quota for resource "mysql.oracle.com/v1alpha1, Resource=mysqlbackups", couldn't start monitor for resource {"kubedb.com" "v1alpha1" "mysqls"}: unable to monitor quota for resource "kubedb.com/v1alpha1, Resource=mysqls", couldn't start monitor for resource {"monitoring.coreos.com" "v1" "prometheuses"}: unable to monitor quota for resource "monitoring.coreos.com/v1, Resource=prometheuses", couldn't start monitor for resource {"mysql.oracle.com" "v1alpha1" "mysqlbackupschedules"}: unable to monitor quota for resource "mysql.oracle.com/v1alpha1, Resource=mysqlbackupschedules"] | |
I0205 17:42:15.627545 1 controller_utils.go:1034] Caches are synced for certificate controller | |
I0205 17:42:15.628185 1 controller_utils.go:1034] Caches are synced for certificate controller | |
I0205 17:42:17.239577 1 controller_utils.go:1027] Waiting for caches to sync for garbage collector controller | |
W0205 17:42:18.035115 1 actual_state_of_world.go:491] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true, because nodeName="k8s-n01" does not exist | |
W0205 17:42:18.035163 1 actual_state_of_world.go:491] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true, because nodeName="k8s-n02" does not exist | |
W0205 17:42:18.035469 1 actual_state_of_world.go:491] Failed to update statusUpdateNeeded field in actual state of world: Failed to set statusUpdateNeeded to needed true, because nodeName="k8s-n03" does not exist | |
I0205 17:42:18.127662 1 controller_utils.go:1034] Caches are synced for TTL controller | |
I0205 17:42:22.827625 1 controller_utils.go:1034] Caches are synced for service account controller | |
I0205 17:42:22.944184 1 controller_utils.go:1034] Caches are synced for namespace controller | |
I0205 17:42:23.438117 1 controller_utils.go:1034] Caches are synced for ClusterRoleAggregator controller | |
I0205 17:42:23.627491 1 controller_utils.go:1034] Caches are synced for PV protection controller | |
I0205 17:42:23.627548 1 controller_utils.go:1034] Caches are synced for expand controller | |
I0205 17:42:25.627688 1 leaderelection.go:231] failed to renew lease kube-system/kube-controller-manager: failed to tryAcquireOrRenew context deadline exceeded | |
F0205 17:42:25.627841 1 controllermanager.go:238] leaderelection lost | |
I0205 17:42:25.628689 1 pv_protection_controller.go:93] Shutting down PV protection controller | |
I0205 17:42:25.628742 1 shared_informer.go:119] stop requested | |
E0205 17:42:29.128180 1 controller_utils.go:1030] Unable to sync caches for GC controller | |
I0205 17:42:29.128207 1 gc_controller.go:80] Shutting down GC controller | |
I0205 17:42:25.628770 1 shared_informer.go:119] stop requested | |
E0205 17:42:29.128235 1 controller_utils.go:1030] Unable to sync caches for attach detach controller | |
I0205 17:42:25.628797 1 shared_informer.go:119] stop requested | |
E0205 17:42:29.128277 1 controller_utils.go:1030] Unable to sync caches for garbage collector controller | |
I0205 17:42:25.628774 1 shared_informer.go:119] stop requested | |
E0205 17:42:29.128295 1 controller_utils.go:1030] Unable to sync caches for ReplicationController controller | |
I0205 17:42:25.628814 1 shared_informer.go:119] stop requested | |
E0205 17:42:29.128324 1 controller_utils.go:1030] Unable to sync caches for ReplicaSet controller | |
I0205 17:42:25.628813 1 shared_informer.go:119] stop requested | |
E0205 17:42:29.128338 1 controller_utils.go:1030] Unable to sync caches for taint controller | |
I0205 17:42:25.628835 1 shared_informer.go:119] stop requested | |
E0205 17:42:29.128353 1 controller_utils.go:1030] Unable to sync caches for daemon sets controller | |
I0205 17:42:25.628893 1 certificate_controller.go:125] Shutting down certificate controller | |
I0205 17:42:25.628914 1 certificate_controller.go:125] Shutting down certificate controller | |
I0205 17:42:25.629009 1 shared_informer.go:119] stop requested | |
E0205 17:42:29.128417 1 controller_utils.go:1030] Unable to sync caches for disruption controller | |
I0205 17:42:25.629029 1 shared_informer.go:119] stop requested | |
E0205 17:42:29.128434 1 controller_utils.go:1030] Unable to sync caches for endpoint controller | |
I0205 17:42:25.629030 1 shared_informer.go:119] stop requested | |
E0205 17:42:29.128461 1 controller_utils.go:1030] Unable to sync caches for job controller | |
I0205 17:42:25.629167 1 expand_controller.go:165] Shutting down expand controller | |
I0205 17:42:25.629169 1 shared_informer.go:119] stop requested | |
E0205 17:42:29.128481 1 controller_utils.go:1030] Unable to sync caches for HPA controller | |
I0205 17:42:25.629183 1 clusterroleaggregation_controller.go:160] Shutting down ClusterRoleAggregator | |
I0205 17:42:25.629181 1 shared_informer.go:119] stop requested | |
E0205 17:42:29.128538 1 controller_utils.go:1030] Unable to sync caches for resource quota controller | |
I0205 17:42:25.629201 1 shared_informer.go:119] stop requested | |
E0205 17:42:29.128552 1 controller_utils.go:1030] Unable to sync caches for stateful set controller | |
I0205 17:42:25.629209 1 namespace_controller.go:198] Shutting down namespace controller | |
I0205 17:42:25.629215 1 ttl_controller.go:128] Shutting down TTL controller | |
I0205 17:42:25.629239 1 tokens_controller.go:182] Shutting down | |
I0205 17:42:25.628828 1 shared_informer.go:119] stop requested | |
E0205 17:42:29.128942 1 controller_utils.go:1030] Unable to sync caches for PVC protection controller | |
I0205 17:42:25.733914 1 cleaner.go:89] Shutting down CSR cleaner controller | |
I0205 17:42:25.733933 1 cronjob_controller.go:98] Shutting down CronJob Manager | |
I0205 17:42:25.629219 1 serviceaccounts_controller.go:127] Shutting down service account controller | |
I0205 17:42:25.930330 1 event.go:221] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"kube-controller-manager", UID:"7ede931d-e846-11e8-97fd-549f3525ca50", APIVersion:"v1", ResourceVersion:"23680859", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' k8s-n01_3d12f32d-296d-11e9-bdf4-549f3525ca50 stopped leading | |
I0205 17:42:25.937005 1 shared_informer.go:119] stop requested | |
E0205 17:42:29.129128 1 controller_utils.go:1030] Unable to sync caches for persistent volume controller | |
I0205 17:42:25.937041 1 shared_informer.go:119] stop requested | |
E0205 17:42:29.129152 1 controller_utils.go:1030] Unable to sync caches for garbage collector controller | |
I0205 17:42:25.937318 1 shared_informer.go:119] stop requested | |
E0205 17:42:29.129178 1 controller_utils.go:1030] Unable to sync caches for deployment controller | |
I0205 17:42:29.227427 1 attach_detach_controller.go:319] Shutting down attach detach controller | |
[root@k8s-n01 ~]# |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Feb 05 17:22:02 k8s-n03 kubelet[10525]: W0205 17:22:02.820128 10525 reflector.go:270] object-"kubedb-postgres"/"pg-custom-config": watch of *v1.ConfigMap ended with: too old resource version: 23674758 (23676539) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
kube-system kube-proxy-k8s-n02 0/1 Pending 0 1s | |
kube-system kube-proxy-k8s-n02 1/1 Running 0 2s | |
kube-system kube-controller-manager-k8s-n02 0/1 Error 103 170m | |
kube-system kube-controller-manager-k8s-n02 0/1 CrashLoopBackOff 103 170m | |
kube-system kube-controller-manager-k8s-n01 1/1 Running 36 171m |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment