Created
August 23, 2016 22:31
-
-
Save cesarkawakami/c116d4fe11dfbfa5a6dfab70e3617de1 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
I0823 20:36:36.834892 1778 docker.go:327] Start docker client with request timeout=2m0s | |
I0823 20:36:36.867905 1778 aws.go:604] Zone not specified in configuration file; querying AWS metadata service | |
I0823 20:36:37.112241 1778 aws.go:726] AWS cloud filtering on tags: map[KubernetesCluster:pndtest] | |
I0823 20:36:37.112289 1778 server.go:349] Successfully initialized cloud provider: "aws" from the config file: "" | |
I0823 20:36:37.113350 1778 manager.go:138] cAdvisor running in container: "/" | |
W0823 20:36:37.222328 1778 manager.go:146] unable to connect to Rkt api service: rkt: cannot tcp Dial rkt api service: dial tcp 127.0.0.1:15441: getsockopt: connection refused | |
I0823 20:36:37.328437 1778 fs.go:139] Filesystem partitions: map[/dev/xvda1:{mountpoint:/var/lib/docker/aufs major:202 minor:1 fsType:ext4 blockSize:0}] | |
I0823 20:36:37.333430 1778 manager.go:192] Machine: {NumCores:2 CpuFrequency:2394478 MemoryCapacity:4143976448 MachineID:9da75450b5baeda6ab8af33757aa419a SystemUUID:EC26648B-0807-6DB4-CDCC-7360CC9CD5E7 BootID:e1cd3e19-56fa-45c3-9410-2e54d4a2ad04 Filesystems:[{Device:/dev/xvda1 Capacity:10423046144 Type:vfs Inodes:655360}] DiskMap:map[202:0:{Name:xvda Major:202 Minor:0 Size:10737418240 Scheduler:deadline}] NetworkDevices:[{Name:cbr0 MacAddress:ce:21:6c:c1:a7:68 Speed:0 Mtu:1500} {Name:eth0 MacAddress:0a:d8:eb:64:76:f7 Speed:0 Mtu:9001}] Topology:[{Id:0 Memory:4143976448 Cores:[{Id:0 Threads:[0] Caches:[{Size:32768 Type:Data Level:1} {Size:32768 Type:Instruction Level:1} {Size:262144 Type:Unified Level:2}]} {Id:1 Threads:[1] Caches:[{Size:32768 Type:Data Level:1} {Size:32768 Type:Instruction Level:1} {Size:262144 Type:Unified Level:2}]}] Caches:[{Size:31457280 Type:Unified Level:3}]}] CloudProvider:AWS InstanceType:t2.medium InstanceID:i-221d172c} | |
I0823 20:36:37.333814 1778 manager.go:198] Version: {KernelVersion:3.13.0-93-generic ContainerOsVersion:Ubuntu 14.04.5 LTS DockerVersion:1.10.3 CadvisorVersion: CadvisorRevision:} | |
I0823 20:36:37.334263 1778 server.go:382] Using root directory: /var/lib/kubelet | |
I0823 20:36:37.335799 1778 server.go:647] cloud provider determined current node name to be ip-172-24-18-196.ec2.internal | |
I0823 20:36:37.335890 1778 server.go:758] Adding manifest file: /etc/kubernetes/manifests | |
I0823 20:36:37.335916 1778 file.go:47] Watching path "/etc/kubernetes/manifests" | |
I0823 20:36:37.335940 1778 server.go:768] Watching apiserver | |
I0823 20:36:37.336062 1778 kubelet.go:384] Hairpin mode set to "promiscuous-bridge" | |
I0823 20:36:37.443396 1778 docker_manager.go:235] Setting dockerRoot to /var/lib/docker | |
I0823 20:36:37.443516 1778 kubelet.go:3871] Setting Pod CIDR: -> 172.18.196.0/24 | |
I0823 20:36:37.443640 1778 plugins.go:333] Loaded volume plugin "kubernetes.io/aws-ebs" | |
I0823 20:36:37.443650 1778 plugins.go:333] Loaded volume plugin "kubernetes.io/empty-dir" | |
I0823 20:36:37.443658 1778 plugins.go:333] Loaded volume plugin "kubernetes.io/gce-pd" | |
I0823 20:36:37.443668 1778 plugins.go:333] Loaded volume plugin "kubernetes.io/git-repo" | |
I0823 20:36:37.443675 1778 plugins.go:333] Loaded volume plugin "kubernetes.io/host-path" | |
I0823 20:36:37.443682 1778 plugins.go:333] Loaded volume plugin "kubernetes.io/nfs" | |
I0823 20:36:37.443690 1778 plugins.go:333] Loaded volume plugin "kubernetes.io/secret" | |
I0823 20:36:37.443699 1778 plugins.go:333] Loaded volume plugin "kubernetes.io/iscsi" | |
I0823 20:36:37.443707 1778 plugins.go:333] Loaded volume plugin "kubernetes.io/glusterfs" | |
I0823 20:36:37.443715 1778 plugins.go:333] Loaded volume plugin "kubernetes.io/rbd" | |
I0823 20:36:37.443723 1778 plugins.go:333] Loaded volume plugin "kubernetes.io/cinder" | |
I0823 20:36:37.443731 1778 plugins.go:333] Loaded volume plugin "kubernetes.io/cephfs" | |
I0823 20:36:37.443739 1778 plugins.go:333] Loaded volume plugin "kubernetes.io/downward-api" | |
I0823 20:36:37.443750 1778 plugins.go:333] Loaded volume plugin "kubernetes.io/fc" | |
I0823 20:36:37.443757 1778 plugins.go:333] Loaded volume plugin "kubernetes.io/flocker" | |
I0823 20:36:37.443764 1778 plugins.go:333] Loaded volume plugin "kubernetes.io/azure-file" | |
I0823 20:36:37.443772 1778 plugins.go:333] Loaded volume plugin "kubernetes.io/configmap" | |
I0823 20:36:37.443780 1778 plugins.go:333] Loaded volume plugin "kubernetes.io/vsphere-volume" | |
I0823 20:36:37.444756 1778 server.go:698] Setting keys quota in /proc/sys/kernel/keys/root_maxkeys to 1000000 | |
I0823 20:36:37.444787 1778 server.go:714] Setting keys bytes in /proc/sys/kernel/keys/root_maxbytes to 25000000 | |
I0823 20:36:37.444801 1778 server.go:730] Started kubelet v1.3.5-dirty | |
E0823 20:36:37.445080 1778 kubelet.go:933] Image garbage collection failed: unable to find data for container / | |
I0823 20:36:37.445481 1778 container_manager_linux.go:220] Updating kernel flag: kernel/panic, expected value: 10, actual value: 0 | |
I0823 20:36:37.445503 1778 container_manager_linux.go:220] Updating kernel flag: kernel/panic_on_oops, expected value: 1, actual value: 0 | |
I0823 20:36:37.445520 1778 container_manager_linux.go:220] Updating kernel flag: vm/overcommit_memory, expected value: 1, actual value: 0 | |
I0823 20:36:37.445549 1778 fs_resource_analyzer.go:66] Starting FS ResourceAnalyzer | |
I0823 20:36:37.445565 1778 status_manager.go:123] Starting to sync pod status with apiserver | |
I0823 20:36:37.445577 1778 kubelet.go:2470] Starting kubelet main sync loop. | |
I0823 20:36:37.445605 1778 kubelet.go:2479] skipping pod synchronization - [network state unknown container runtime is down] | |
I0823 20:36:37.445767 1778 server.go:117] Starting to listen on 0.0.0.0:10250 | |
I0823 20:36:37.447430 1778 server.go:134] Starting to listen read-only on 0.0.0.0:10255 | |
I0823 20:36:37.452409 1778 container_manager_linux.go:284] Discovered runtime cgroups name: / | |
I0823 20:36:37.452479 1778 volume_manager.go:210] Starting Kubelet Volume Manager | |
I0823 20:36:37.455730 1778 container_bridge.go:157] MASQUERADE rule doesn't exist, recreate it (with nonMasqueradeCIDR 172.16.0.0/14) | |
I0823 20:36:37.463437 1778 container_bridge.go:81] Attempting to recreate cbr0 with address range: 172.18.196.1/24 | |
I0823 20:36:37.547595 1778 kubelet.go:1102] Adding node label from cloud provider: beta.kubernetes.io/instance-type=t2.medium | |
I0823 20:36:37.547627 1778 kubelet.go:1113] Adding node label from cloud provider: failure-domain.beta.kubernetes.io/zone=us-east-1c | |
I0823 20:36:37.547637 1778 kubelet.go:1117] Adding node label from cloud provider: failure-domain.beta.kubernetes.io/region=us-east-1 | |
E0823 20:36:37.575487 1778 kubelet.go:2366] Failed to check if disk space is available for the runtime: failed to get fs info for "runtime": unable to find data for container / | |
E0823 20:36:37.575991 1778 kubelet.go:2374] Failed to check if disk space is available on the root partition: failed to get fs info for "root": unable to find data for container / | |
I0823 20:36:37.576008 1778 kubelet.go:2888] Recording NodeHasSufficientDisk event message for node ip-172-24-18-196.ec2.internal | |
I0823 20:36:37.576031 1778 kubelet.go:2888] Recording NodeHasSufficientMemory event message for node ip-172-24-18-196.ec2.internal | |
I0823 20:36:37.576061 1778 kubelet.go:1171] Attempting to register node ip-172-24-18-196.ec2.internal | |
E0823 20:36:37.576984 1778 docker_manager.go:2410] Unable to inspect container "a72df388ca7b3a40f4672c4f8e94d103021e120738d3b013ec0be9731cfe5e72": no such container: "a72df388ca7b3a40f4672c4f8e94d103021e120738d3b013ec0be9731cfe5e72" | |
E0823 20:36:37.581432 1778 generic.go:236] PLEG: Ignoring events for pod httphello-4180909509-a8k54/default: Cannot connect to the Docker daemon. Is the docker daemon running on this host? | |
W0823 20:36:37.581513 1778 container_gc.go:116] Failed to remove dead container "/k8s_influxdb.5de47df6_monitoring-influxdb-grafana-v3-dbemr_default_3f926b6c-695e-11e6-9a99-0a42cc394ec9_a0866f72": An error occurred trying to connect: EOF | |
W0823 20:36:37.581626 1778 container_gc.go:116] Failed to remove dead container "/k8s_POD.8ef22685_monitoring-influxdb-grafana-v3-dbemr_default_3f926b6c-695e-11e6-9a99-0a42cc394ec9_16d51521": Cannot connect to the Docker daemon. Is the docker daemon running on this host? | |
W0823 20:36:37.581728 1778 container_gc.go:116] Failed to remove dead container "/k8s_POD.d8dbe16c_heapster-v1.1.0-794996294-o4cx9_default_3f84e138-695e-11e6-9a99-0a42cc394ec9_03f46891": Cannot connect to the Docker daemon. Is the docker daemon running on this host? | |
W0823 20:36:37.581823 1778 container_gc.go:116] Failed to remove dead container "/k8s_POD.d8dbe16c_httphello-4180909509-a8k54_default_3f8fa200-695e-11e6-9a99-0a42cc394ec9_aceabe8a": Cannot connect to the Docker daemon. Is the docker daemon running on this host? | |
W0823 20:36:37.581918 1778 container_gc.go:116] Failed to remove dead container "/k8s_grafana.2c2cefc4_monitoring-influxdb-grafana-v3-dbemr_default_3f926b6c-695e-11e6-9a99-0a42cc394ec9_1af16d24": Cannot connect to the Docker daemon. Is the docker daemon running on this host? | |
W0823 20:36:37.582012 1778 container_gc.go:116] Failed to remove dead container "/k8s_eventer.7f6125e5_heapster-v1.1.0-794996294-o4cx9_default_3f84e138-695e-11e6-9a99-0a42cc394ec9_ead54e8d": Cannot connect to the Docker daemon. Is the docker daemon running on this host? | |
E0823 20:36:37.582317 1778 manager.go:235] Docker container factory registration failed: failed to validate Docker info: failed to detect Docker info: Cannot connect to the Docker daemon. Is the docker daemon running on this host?. | |
E0823 20:36:37.582324 1778 manager.go:240] Registration of the rkt container factory failed: unable to communicate with Rkt api service: rkt: cannot tcp Dial rkt api service: dial tcp 127.0.0.1:15441: getsockopt: connection refused | |
I0823 20:36:37.582333 1778 factory.go:54] Registering systemd factory | |
I0823 20:36:37.585498 1778 factory.go:86] Registering Raw factory | |
I0823 20:36:37.585779 1778 manager.go:1072] Started watching for new ooms in manager | |
I0823 20:36:37.585843 1778 oomparser.go:200] OOM parser using kernel log file: "/var/log/kern.log" | |
I0823 20:36:37.586168 1778 manager.go:281] Starting recovery of all containers | |
I0823 20:36:37.586469 1778 manager.go:286] Recovery completed | |
I0823 20:36:37.597515 1778 kubelet.go:1187] Node ip-172-24-18-196.ec2.internal was previously registered | |
E0823 20:36:37.605235 1778 kubelet.go:3100] Error getting image list: Cannot connect to the Docker daemon. Is the docker daemon running on this host? | |
I0823 20:36:37.605327 1778 kubelet.go:2888] Recording NodeHasSufficientDisk event message for node ip-172-24-18-196.ec2.internal | |
I0823 20:36:37.605345 1778 kubelet.go:2888] Recording NodeNotReady event message for node ip-172-24-18-196.ec2.internal | |
I0823 20:36:37.725580 1778 container_bridge.go:67] Recreated cbr0 and restarted docker | |
I0823 20:36:42.445828 1778 kubelet.go:2536] SyncLoop (ADD, "file"): "" | |
I0823 20:36:42.445925 1778 kubelet.go:2536] SyncLoop (ADD, "api"): "httphello-4180909509-a8k54_default(3f8fa200-695e-11e6-9a99-0a42cc394ec9), monitoring-influxdb-grafana-v3-dbemr_default(3f926b6c-695e-11e6-9a99-0a42cc394ec9), heapster-v1.1.0-794996294-o4cx9_default(3f84e138-695e-11e6-9a99-0a42cc394ec9)" | |
I0823 20:36:42.446034 1778 kubelet.go:2563] SyncLoop (PLEG): "monitoring-influxdb-grafana-v3-dbemr_default(3f926b6c-695e-11e6-9a99-0a42cc394ec9)", event: &pleg.PodLifecycleEvent{ID:"3f926b6c-695e-11e6-9a99-0a42cc394ec9", Type:"ContainerDied", Data:"2cfb6fddd143121599b60e11ef251b174a8457cd4f365708fad352d83558e51a"} | |
I0823 20:36:42.446080 1778 kubelet.go:2563] SyncLoop (PLEG): "monitoring-influxdb-grafana-v3-dbemr_default(3f926b6c-695e-11e6-9a99-0a42cc394ec9)", event: &pleg.PodLifecycleEvent{ID:"3f926b6c-695e-11e6-9a99-0a42cc394ec9", Type:"ContainerDied", Data:"8d06303036441c46c0a14265366dc0212353a1e74c955b0c87731866fd7ac354"} | |
I0823 20:36:42.446110 1778 kubelet.go:2563] SyncLoop (PLEG): "monitoring-influxdb-grafana-v3-dbemr_default(3f926b6c-695e-11e6-9a99-0a42cc394ec9)", event: &pleg.PodLifecycleEvent{ID:"3f926b6c-695e-11e6-9a99-0a42cc394ec9", Type:"ContainerDied", Data:"fc3019d17732a41d01f1011d4cd5db6da253dfd0ff1b2b742d199f3719708148"} | |
I0823 20:36:42.446136 1778 kubelet.go:2563] SyncLoop (PLEG): "monitoring-influxdb-grafana-v3-dbemr_default(3f926b6c-695e-11e6-9a99-0a42cc394ec9)", event: &pleg.PodLifecycleEvent{ID:"3f926b6c-695e-11e6-9a99-0a42cc394ec9", Type:"ContainerDied", Data:"7fddefee7005a43857948407218fcc528059d4eb5a3bec0b12bd4b0c64af7e63"} | |
I0823 20:36:42.446761 1778 kubelet.go:2563] SyncLoop (PLEG): "monitoring-influxdb-grafana-v3-dbemr_default(3f926b6c-695e-11e6-9a99-0a42cc394ec9)", event: &pleg.PodLifecycleEvent{ID:"3f926b6c-695e-11e6-9a99-0a42cc394ec9", Type:"ContainerDied", Data:"93b1b0766302cd6fba0cb6a04ed3374f567098f4608c97680cfb41a0b99ed459"} | |
I0823 20:36:42.446782 1778 kubelet.go:2563] SyncLoop (PLEG): "monitoring-influxdb-grafana-v3-dbemr_default(3f926b6c-695e-11e6-9a99-0a42cc394ec9)", event: &pleg.PodLifecycleEvent{ID:"3f926b6c-695e-11e6-9a99-0a42cc394ec9", Type:"ContainerDied", Data:"4b1f3873d0edb417de121dae81aa496208a0fe3b5aef3c7e598fdadc6e1dce65"} | |
I0823 20:36:42.446802 1778 kubelet.go:2563] SyncLoop (PLEG): "monitoring-influxdb-grafana-v3-dbemr_default(3f926b6c-695e-11e6-9a99-0a42cc394ec9)", event: &pleg.PodLifecycleEvent{ID:"3f926b6c-695e-11e6-9a99-0a42cc394ec9", Type:"ContainerDied", Data:"b1ebc8d014ed84082920c5e097e5934fb441e9aeb5769061c605aec7cc14692d"} | |
I0823 20:36:42.446827 1778 kubelet.go:2563] SyncLoop (PLEG): "heapster-v1.1.0-794996294-o4cx9_default(3f84e138-695e-11e6-9a99-0a42cc394ec9)", event: &pleg.PodLifecycleEvent{ID:"3f84e138-695e-11e6-9a99-0a42cc394ec9", Type:"ContainerDied", Data:"b454e856ac604c2820ab71ea8fc5cf29bd93466bb0966b5bed29c8ba9a525e72"} | |
I0823 20:36:42.446847 1778 kubelet.go:2563] SyncLoop (PLEG): "heapster-v1.1.0-794996294-o4cx9_default(3f84e138-695e-11e6-9a99-0a42cc394ec9)", event: &pleg.PodLifecycleEvent{ID:"3f84e138-695e-11e6-9a99-0a42cc394ec9", Type:"ContainerDied", Data:"c2ce159f450fb430429c34f949d31205a29196a0ffbbf40eb4488ace7ea8531c"} | |
I0823 20:36:42.447080 1778 kubelet.go:2563] SyncLoop (PLEG): "heapster-v1.1.0-794996294-o4cx9_default(3f84e138-695e-11e6-9a99-0a42cc394ec9)", event: &pleg.PodLifecycleEvent{ID:"3f84e138-695e-11e6-9a99-0a42cc394ec9", Type:"ContainerDied", Data:"b000c5ff181d94506b401900f16cb54d908716284cc6ba5ffda466fab31afa08"} | |
I0823 20:36:42.447111 1778 kubelet.go:2563] SyncLoop (PLEG): "heapster-v1.1.0-794996294-o4cx9_default(3f84e138-695e-11e6-9a99-0a42cc394ec9)", event: &pleg.PodLifecycleEvent{ID:"3f84e138-695e-11e6-9a99-0a42cc394ec9", Type:"ContainerDied", Data:"5966d95918974331deabf98b497b697884da7c61fb95bb610bb2cf584af32d01"} | |
I0823 20:36:42.447131 1778 kubelet.go:2563] SyncLoop (PLEG): "heapster-v1.1.0-794996294-o4cx9_default(3f84e138-695e-11e6-9a99-0a42cc394ec9)", event: &pleg.PodLifecycleEvent{ID:"3f84e138-695e-11e6-9a99-0a42cc394ec9", Type:"ContainerDied", Data:"441d9b248eb029330dd937b68a12241a0e233aba111b6d13d19594ac5633b960"} | |
I0823 20:36:42.447154 1778 kubelet.go:2563] SyncLoop (PLEG): "heapster-v1.1.0-794996294-o4cx9_default(3f84e138-695e-11e6-9a99-0a42cc394ec9)", event: &pleg.PodLifecycleEvent{ID:"3f84e138-695e-11e6-9a99-0a42cc394ec9", Type:"ContainerDied", Data:"f51381a8e512a57ce8fa1e39f296ee3df44d049c133fb262cb733c91ade3fd45"} | |
I0823 20:36:42.447178 1778 kubelet.go:2563] SyncLoop (PLEG): "heapster-v1.1.0-794996294-o4cx9_default(3f84e138-695e-11e6-9a99-0a42cc394ec9)", event: &pleg.PodLifecycleEvent{ID:"3f84e138-695e-11e6-9a99-0a42cc394ec9", Type:"ContainerDied", Data:"bbd859029985ea0376fb217c1ad0221c25dd4a3bc684ff2115f9231813c1bc3a"} | |
I0823 20:36:42.447197 1778 kubelet.go:2563] SyncLoop (PLEG): "heapster-v1.1.0-794996294-o4cx9_default(3f84e138-695e-11e6-9a99-0a42cc394ec9)", event: &pleg.PodLifecycleEvent{ID:"3f84e138-695e-11e6-9a99-0a42cc394ec9", Type:"ContainerDied", Data:"a72df388ca7b3a40f4672c4f8e94d103021e120738d3b013ec0be9731cfe5e72"} | |
I0823 20:36:42.447218 1778 kubelet.go:2563] SyncLoop (PLEG): "heapster-v1.1.0-794996294-o4cx9_default(3f84e138-695e-11e6-9a99-0a42cc394ec9)", event: &pleg.PodLifecycleEvent{ID:"3f84e138-695e-11e6-9a99-0a42cc394ec9", Type:"ContainerDied", Data:"665fec3e0cb51826559dde0bdc337d6e99a5afe3507d165c9501487ec4725a1e"} | |
I0823 20:36:42.447236 1778 kubelet.go:2563] SyncLoop (PLEG): "httphello-4180909509-a8k54_default(3f8fa200-695e-11e6-9a99-0a42cc394ec9)", event: &pleg.PodLifecycleEvent{ID:"3f8fa200-695e-11e6-9a99-0a42cc394ec9", Type:"ContainerDied", Data:"a467447d87bf0d48533eb557f6fc2526983817dbb219596651081ecdc3ba0779"} | |
I0823 20:36:42.447255 1778 kubelet.go:2563] SyncLoop (PLEG): "httphello-4180909509-a8k54_default(3f8fa200-695e-11e6-9a99-0a42cc394ec9)", event: &pleg.PodLifecycleEvent{ID:"3f8fa200-695e-11e6-9a99-0a42cc394ec9", Type:"ContainerDied", Data:"480ebe81aba5a05ac76669c256d24c63f842e97f3d6e7b1fd91e032e2a08496e"} | |
I0823 20:36:42.447273 1778 kubelet.go:2563] SyncLoop (PLEG): "httphello-4180909509-a8k54_default(3f8fa200-695e-11e6-9a99-0a42cc394ec9)", event: &pleg.PodLifecycleEvent{ID:"3f8fa200-695e-11e6-9a99-0a42cc394ec9", Type:"ContainerDied", Data:"919060cbcd35936ceace6116f10273601533654c306ef1666c1250cefba14c66"} | |
I0823 20:36:42.447296 1778 kubelet.go:2563] SyncLoop (PLEG): "httphello-4180909509-a8k54_default(3f8fa200-695e-11e6-9a99-0a42cc394ec9)", event: &pleg.PodLifecycleEvent{ID:"3f8fa200-695e-11e6-9a99-0a42cc394ec9", Type:"ContainerDied", Data:"b4b8e3a0b6c9032497ccac4c9d85e86fd6c3b75d91f2583fa83df287862fde2e"} | |
I0823 20:36:42.447315 1778 kubelet.go:2563] SyncLoop (PLEG): "httphello-4180909509-a8k54_default(3f8fa200-695e-11e6-9a99-0a42cc394ec9)", event: &pleg.PodLifecycleEvent{ID:"3f8fa200-695e-11e6-9a99-0a42cc394ec9", Type:"ContainerDied", Data:"b60b22930908e41e77dddf27f528fa5615abb270ca77c014eddfa0a1bb4a767d"} | |
I0823 20:36:42.564284 1778 reconciler.go:180] VerifyControllerAttachedVolume operation started for volume "kubernetes.io/secret/3f8fa200-695e-11e6-9a99-0a42cc394ec9-default-token-tw753" (spec.Name: "default-token-tw753") pod "3f8fa200-695e-11e6-9a99-0a42cc394ec9" (UID: "3f8fa200-695e-11e6-9a99-0a42cc394ec9") | |
I0823 20:36:42.564344 1778 reconciler.go:180] VerifyControllerAttachedVolume operation started for volume "kubernetes.io/empty-dir/3f926b6c-695e-11e6-9a99-0a42cc394ec9-influxdb-persistent-storage" (spec.Name: "influxdb-persistent-storage") pod "3f926b6c-695e-11e6-9a99-0a42cc394ec9" (UID: "3f926b6c-695e-11e6-9a99-0a42cc394ec9") | |
I0823 20:36:42.564362 1778 reconciler.go:180] VerifyControllerAttachedVolume operation started for volume "kubernetes.io/empty-dir/3f926b6c-695e-11e6-9a99-0a42cc394ec9-grafana-persistent-storage" (spec.Name: "grafana-persistent-storage") pod "3f926b6c-695e-11e6-9a99-0a42cc394ec9" (UID: "3f926b6c-695e-11e6-9a99-0a42cc394ec9") | |
I0823 20:36:42.564383 1778 reconciler.go:180] VerifyControllerAttachedVolume operation started for volume "kubernetes.io/secret/3f926b6c-695e-11e6-9a99-0a42cc394ec9-default-token-tw753" (spec.Name: "default-token-tw753") pod "3f926b6c-695e-11e6-9a99-0a42cc394ec9" (UID: "3f926b6c-695e-11e6-9a99-0a42cc394ec9") | |
I0823 20:36:42.564399 1778 reconciler.go:180] VerifyControllerAttachedVolume operation started for volume "kubernetes.io/secret/3f84e138-695e-11e6-9a99-0a42cc394ec9-default-token-tw753" (spec.Name: "default-token-tw753") pod "3f84e138-695e-11e6-9a99-0a42cc394ec9" (UID: "3f84e138-695e-11e6-9a99-0a42cc394ec9") | |
I0823 20:36:42.664921 1778 reconciler.go:254] MountVolume operation started for volume "kubernetes.io/empty-dir/3f926b6c-695e-11e6-9a99-0a42cc394ec9-grafana-persistent-storage" (spec.Name: "grafana-persistent-storage") to pod "3f926b6c-695e-11e6-9a99-0a42cc394ec9" (UID: "3f926b6c-695e-11e6-9a99-0a42cc394ec9"). | |
I0823 20:36:42.665042 1778 reconciler.go:254] MountVolume operation started for volume "kubernetes.io/secret/3f926b6c-695e-11e6-9a99-0a42cc394ec9-default-token-tw753" (spec.Name: "default-token-tw753") to pod "3f926b6c-695e-11e6-9a99-0a42cc394ec9" (UID: "3f926b6c-695e-11e6-9a99-0a42cc394ec9"). | |
I0823 20:36:42.665096 1778 reconciler.go:254] MountVolume operation started for volume "kubernetes.io/secret/3f84e138-695e-11e6-9a99-0a42cc394ec9-default-token-tw753" (spec.Name: "default-token-tw753") to pod "3f84e138-695e-11e6-9a99-0a42cc394ec9" (UID: "3f84e138-695e-11e6-9a99-0a42cc394ec9"). | |
I0823 20:36:42.665134 1778 reconciler.go:254] MountVolume operation started for volume "kubernetes.io/secret/3f8fa200-695e-11e6-9a99-0a42cc394ec9-default-token-tw753" (spec.Name: "default-token-tw753") to pod "3f8fa200-695e-11e6-9a99-0a42cc394ec9" (UID: "3f8fa200-695e-11e6-9a99-0a42cc394ec9"). | |
I0823 20:36:42.665166 1778 reconciler.go:254] MountVolume operation started for volume "kubernetes.io/empty-dir/3f926b6c-695e-11e6-9a99-0a42cc394ec9-influxdb-persistent-storage" (spec.Name: "influxdb-persistent-storage") to pod "3f926b6c-695e-11e6-9a99-0a42cc394ec9" (UID: "3f926b6c-695e-11e6-9a99-0a42cc394ec9"). | |
I0823 20:36:42.669063 1778 operation_executor.go:740] MountVolume.SetUp succeeded for volume "kubernetes.io/empty-dir/3f926b6c-695e-11e6-9a99-0a42cc394ec9-grafana-persistent-storage" (spec.Name: "grafana-persistent-storage") pod "3f926b6c-695e-11e6-9a99-0a42cc394ec9" (UID: "3f926b6c-695e-11e6-9a99-0a42cc394ec9"). | |
I0823 20:36:42.669726 1778 operation_executor.go:740] MountVolume.SetUp succeeded for volume "kubernetes.io/empty-dir/3f926b6c-695e-11e6-9a99-0a42cc394ec9-influxdb-persistent-storage" (spec.Name: "influxdb-persistent-storage") pod "3f926b6c-695e-11e6-9a99-0a42cc394ec9" (UID: "3f926b6c-695e-11e6-9a99-0a42cc394ec9"). | |
I0823 20:36:42.676109 1778 operation_executor.go:740] MountVolume.SetUp succeeded for volume "kubernetes.io/secret/3f8fa200-695e-11e6-9a99-0a42cc394ec9-default-token-tw753" (spec.Name: "default-token-tw753") pod "3f8fa200-695e-11e6-9a99-0a42cc394ec9" (UID: "3f8fa200-695e-11e6-9a99-0a42cc394ec9"). | |
I0823 20:36:42.680897 1778 operation_executor.go:740] MountVolume.SetUp succeeded for volume "kubernetes.io/secret/3f926b6c-695e-11e6-9a99-0a42cc394ec9-default-token-tw753" (spec.Name: "default-token-tw753") pod "3f926b6c-695e-11e6-9a99-0a42cc394ec9" (UID: "3f926b6c-695e-11e6-9a99-0a42cc394ec9"). | |
I0823 20:36:42.685983 1778 operation_executor.go:740] MountVolume.SetUp succeeded for volume "kubernetes.io/secret/3f84e138-695e-11e6-9a99-0a42cc394ec9-default-token-tw753" (spec.Name: "default-token-tw753") pod "3f84e138-695e-11e6-9a99-0a42cc394ec9" (UID: "3f84e138-695e-11e6-9a99-0a42cc394ec9"). | |
I0823 20:36:42.746691 1778 docker_manager.go:1750] Need to restart pod infra container for "httphello-4180909509-a8k54_default(3f8fa200-695e-11e6-9a99-0a42cc394ec9)" because it is not found | |
I0823 20:36:42.746844 1778 docker_manager.go:1750] Need to restart pod infra container for "monitoring-influxdb-grafana-v3-dbemr_default(3f926b6c-695e-11e6-9a99-0a42cc394ec9)" because it is not found | |
I0823 20:36:42.747539 1778 docker_manager.go:1750] Need to restart pod infra container for "heapster-v1.1.0-794996294-o4cx9_default(3f84e138-695e-11e6-9a99-0a42cc394ec9)" because it is not found | |
I0823 20:36:42.912690 1778 provider.go:119] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider | |
I0823 20:36:42.913055 1778 docker.go:185] Pulling image dockercloud/hello-world:latest without credentials | |
I0823 20:36:43.055628 1778 kube_docker_client.go:295] Stop pulling image "dockercloud/hello-world:latest": "Status: Image is up to date for dockercloud/hello-world:latest" |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment