Created
February 6, 2019 17:37
-
-
Save bjethwan/1ce7f13792e8d511071144d81baf1251 to your computer and use it in GitHub Desktop.
cilium clustermesh cluster2 logs
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
level=info msg=" --access-log=''" subsys=daemon | |
level=info msg=" --agent-labels=''" subsys=daemon | |
level=info msg=" --allow-localhost='auto'" subsys=daemon | |
level=info msg=" --auto-direct-node-routes='false'" subsys=daemon | |
level=info msg=" --auto-ipv6-node-routes='false'" subsys=daemon | |
level=info msg=" --bpf-compile-debug='false'" subsys=daemon | |
level=info msg=" --bpf-ct-global-any-max='262144'" subsys=daemon | |
level=info msg=" --bpf-ct-global-tcp-max='1000000'" subsys=daemon | |
level=info msg=" --bpf-root=''" subsys=daemon | |
level=info msg=" --cgroup-root=''" subsys=daemon | |
level=info msg=" --cluster-id='2'" subsys=daemon | |
level=info msg=" --cluster-name='cluster2'" subsys=daemon | |
level=info msg=" --clustermesh-config='/var/lib/cilium/clustermesh/'" subsys=daemon | |
level=info msg=" --cmdref=''" subsys=daemon | |
level=info msg=" --config=''" subsys=daemon | |
level=info msg=" --conntrack-garbage-collector-interval='60'" subsys=daemon | |
level=info msg=" --container-runtime=''" subsys=daemon | |
level=info msg=" --container-runtime-endpoint='map[]'" subsys=daemon | |
level=info msg=" --datapath-mode='veth'" subsys=daemon | |
level=info msg=" --debug='false'" subsys=daemon | |
level=info msg=" --debug-verbose=''" subsys=daemon | |
level=info msg=" --device='undefined'" subsys=daemon | |
level=info msg=" --disable-conntrack='false'" subsys=daemon | |
level=info msg=" --disable-endpoint-crd='false'" subsys=daemon | |
level=info msg=" --disable-envoy-version-check='false'" subsys=daemon | |
level=info msg=" --disable-ipv4='false'" subsys=daemon | |
level=info msg=" --disable-k8s-services='false'" subsys=daemon | |
level=info msg=" --docker='unix:///var/run/docker.sock'" subsys=daemon | |
level=info msg=" --enable-ipsec='false'" subsys=daemon | |
level=info msg=" --enable-ipv4='true'" subsys=daemon | |
level=info msg=" --enable-ipv6='false'" subsys=daemon | |
level=info msg=" --enable-policy='default'" subsys=daemon | |
level=info msg=" --enable-tracing='false'" subsys=daemon | |
level=info msg=" --envoy-log=''" subsys=daemon | |
level=info msg=" --fixed-identity-mapping='map[]'" subsys=daemon | |
level=info msg=" --flannel-manage-existing-containers='false'" subsys=daemon | |
level=info msg=" --flannel-master-device=''" subsys=daemon | |
level=info msg=" --flannel-uninstall-on-exit='false'" subsys=daemon | |
level=info msg=" --http-403-msg=''" subsys=daemon | |
level=info msg=" --http-idle-timeout='0'" subsys=daemon | |
level=info msg=" --http-max-grpc-timeout='0'" subsys=daemon | |
level=info msg=" --http-request-timeout='3600'" subsys=daemon | |
level=info msg=" --http-retry-count='3'" subsys=daemon | |
level=info msg=" --http-retry-timeout='0'" subsys=daemon | |
level=info msg=" --install-iptables-rules='true'" subsys=daemon | |
level=info msg=" --ipv4-cluster-cidr-mask-size='8'" subsys=daemon | |
level=info msg=" --ipv4-node='auto'" subsys=daemon | |
level=info msg=" --ipv4-range='auto'" subsys=daemon | |
level=info msg=" --ipv4-service-range='auto'" subsys=daemon | |
level=info msg=" --ipv6-cluster-alloc-cidr='f00d::/64'" subsys=daemon | |
level=info msg=" --ipv6-node='auto'" subsys=daemon | |
level=info msg=" --ipv6-range='auto'" subsys=daemon | |
level=info msg=" --ipv6-service-range='auto'" subsys=daemon | |
level=info msg=" --ipvlan-master-device='undefined'" subsys=daemon | |
level=info msg=" --k8s-api-server=''" subsys=daemon | |
level=info msg=" --k8s-kubeconfig-path=''" subsys=daemon | |
level=info msg=" --k8s-legacy-host-allows-world=''" subsys=daemon | |
level=info msg=" --k8s-namespace='kube-system'" subsys=daemon | |
level=info msg=" --k8s-require-ipv4-pod-cidr='false'" subsys=daemon | |
level=info msg=" --k8s-require-ipv6-pod-cidr='false'" subsys=daemon | |
level=info msg=" --keep-bpf-templates='false'" subsys=daemon | |
level=info msg=" --keep-config='false'" subsys=daemon | |
level=info msg=" --kvstore='etcd'" subsys=daemon | |
level=info msg=" --kvstore-opt='map[etcd.config:/var/lib/etcd-config/etcd.config]'" subsys=daemon | |
level=info msg=" --label-prefix-file=''" subsys=daemon | |
level=info msg=" --labels=''" subsys=daemon | |
level=info msg=" --lb=''" subsys=daemon | |
level=info msg=" --lib-dir='/var/lib/cilium'" subsys=daemon | |
level=info msg=" --log-driver=''" subsys=daemon | |
level=info msg=" --log-opt='map[]'" subsys=daemon | |
level=info msg=" --log-system-load='false'" subsys=daemon | |
level=info msg=" --masquerade='true'" subsys=daemon | |
level=info msg=" --max-controller-interval='0'" subsys=daemon | |
level=info msg=" --monitor-aggregation='none'" subsys=daemon | |
level=info msg=" --monitor-queue-size='32768'" subsys=daemon | |
level=info msg=" --mtu='0'" subsys=daemon | |
level=info msg=" --nat46-range='0:0:0:0:0:FFFF::/96'" subsys=daemon | |
level=info msg=" --pprof='false'" subsys=daemon | |
level=info msg=" --preallocate-bpf-maps='false'" subsys=daemon | |
level=info msg=" --prefilter-device='undefined'" subsys=daemon | |
level=info msg=" --prefilter-mode='native'" subsys=daemon | |
level=info msg=" --prepend-iptables-chains='true'" subsys=daemon | |
level=info msg=" --prometheus-serve-addr=''" subsys=daemon | |
level=info msg=" --proxy-connect-timeout='1'" subsys=daemon | |
level=info msg=" --restore='true'" subsys=daemon | |
level=info msg=" --sidecar-http-proxy='false'" subsys=daemon | |
level=info msg=" --sidecar-istio-proxy-image='cilium/istio_proxy'" subsys=daemon | |
level=info msg=" --single-cluster-route='false'" subsys=daemon | |
level=info msg=" --socket-path='/var/run/cilium/cilium.sock'" subsys=daemon | |
level=info msg=" --sockops-enable='false'" subsys=daemon | |
level=info msg=" --state-dir='/var/run/cilium'" subsys=daemon | |
level=info msg=" --tofqdns-dns-reject-response-code='refused'" subsys=daemon | |
level=info msg=" --tofqdns-enable-poller='false'" subsys=daemon | |
level=info msg=" --tofqdns-enable-poller-events='true'" subsys=daemon | |
level=info msg=" --tofqdns-endpoint-max-ip-per-hostname='50'" subsys=daemon | |
level=info msg=" --tofqdns-min-ttl='0'" subsys=daemon | |
level=info msg=" --tofqdns-proxy-port='0'" subsys=daemon | |
level=info msg=" --trace-payloadlen='128'" subsys=daemon | |
level=info msg=" --tunnel='vxlan'" subsys=daemon | |
level=info msg=" --version='false'" subsys=daemon | |
level=info msg=" _ _ _" subsys=daemon | |
level=info msg=" ___|_| |_|_ _ _____" subsys=daemon | |
level=info msg="| _| | | | | | |" subsys=daemon | |
level=info msg="|___|_|_|_|___|_|_|_|" subsys=daemon | |
level=info msg="Cilium 1.4.90 b2ed853 2019-02-06T14:45:37+01:00 go version go1.11.1 linux/amd64" subsys=daemon | |
level=info msg="cilium-envoy version: 84ee839e1d78ef858a39e390288ad417d35bb1d4/1.9.0-dev/Modified/RELEASE" subsys=daemon | |
level=info msg="clang (3.8.1) and kernel (4.14.96) versions: OK!" subsys=daemon | |
level=info msg="linking environment: OK!" subsys=daemon | |
level=info msg="bpf_requirements check: OK!" subsys=daemon | |
level=info msg="Mounted BPF filesystem /sys/fs/bpf" subsys=bpf | |
level=info msg="Connecting to etcd server..." config=/var/lib/etcd-config/etcd.config endpoints="[https://cilium-etcd-client.kube-system.svc:2379]" subsys=kvstore | |
level=info msg="Valid label prefix configuration:" subsys=labels-filter | |
level=info msg=" - :io.kubernetes.pod.namespace" subsys=labels-filter | |
level=info msg=" - :io.cilium.k8s.namespace.labels" subsys=labels-filter | |
level=info msg=" - !:io.kubernetes" subsys=labels-filter | |
level=info msg=" - !:.*kubernetes.io" subsys=labels-filter | |
level=info msg=" - !:pod-template-generation" subsys=labels-filter | |
level=info msg=" - !:pod-template-hash" subsys=labels-filter | |
level=info msg=" - !:controller-revision-hash" subsys=labels-filter | |
level=info msg=" - !:annotation.cilium.io/" subsys=labels-filter | |
level=info msg=" - !:annotation.cilium-identity" subsys=labels-filter | |
level=info msg=" - !:annotation.sidecar.istio.io" subsys=labels-filter | |
level=info msg=" - !:annotation.etcd.version" subsys=labels-filter | |
level=info msg=" - !:etcd_node" subsys=labels-filter | |
level=info msg="Initializing daemon" subsys=daemon | |
level=info msg="Detected MTU 9001" subsys=mtu | |
level=info msg="Restored service ID from datapath" serviceID=2 serviceIP="100.65.0.10:53" subsys=daemon | |
level=info msg="Restored service ID from datapath" serviceID=1 serviceIP="100.65.0.1:443" subsys=daemon | |
level=info msg="Restored service ID from datapath" serviceID=4 serviceIP="100.65.13.71:2379" subsys=daemon | |
level=info msg="Restored service ID from datapath" serviceID=3 serviceIP="100.65.14.98:2379" subsys=daemon | |
level=info msg="Clearing leftover Cilium veths" subsys=daemon | |
level=info msg="Waiting for k8s api-server to be ready..." subsys=k8s | |
level=info msg="Connected to k8s api-server" ipAddr="https://100.65.0.1:443" subsys=k8s | |
level=info msg="Retrieved node information from kubernetes" nodeName=ip-10-0-1-211.ap-south-1.compute.internal subsys=k8s | |
level=info msg="Received own node information from API server" ipAddr.ipv4=10.0.1.211 ipAddr.ipv6="<nil>" nodeName=ip-10-0-1-211.ap-south-1.compute.internal subsys=k8s | |
level=info msg="Retrieved IPv4 allocation range for node. Using it for ipv4-range" node=ip-10-0-1-211.ap-south-1.compute.internal subsys=node v4Prefix=100.65.128.0/24 | |
level=info msg="Automatically retrieved IP for node. Using it for ipv4-node" ipAddr=10.0.1.211 node=ip-10-0-1-211.ap-south-1.compute.internal subsys=node | |
level=info msg="Kubernetes information:" subsys=daemon | |
level=info msg=" Namespace: kube-system" subsys=daemon | |
level=info msg="k8s mode: Allowing localhost to reach local endpoints" subsys=daemon | |
level=info msg="Initializing node addressing" subsys=daemon | |
level=info msg="Restored IPv4 internal node IP: 100.65.128.1" subsys=node | |
level=info msg="Initializing IPAM" subsys=daemon | |
level=info msg="Container runtime options set: endpoint=/var/run/containerd/containerd.sock,endpoint=/var/run/crio.sock,datapath-mode=veth,endpoint=unix:///var/run/docker.sock" subsys=daemon | |
level=info msg="Container runtime options set: endpoint=/var/run/containerd/containerd.sock,endpoint=/var/run/crio.sock,datapath-mode=veth,endpoint=unix:///var/run/docker.sock" subsys=daemon | |
level=info msg="Restoring endpoints from former life..." subsys=daemon | |
level=info msg="Endpoints restored" count.restored=0 count.total=1 subsys=daemon | |
level=info msg="Addressing information:" subsys=daemon | |
level=info msg=" Cluster-Name: cluster2" subsys=daemon | |
level=info msg=" Cluster-ID: 2" subsys=daemon | |
level=info msg=" Local node-name: ip-10-0-1-211.ap-south-1.compute.internal" subsys=daemon | |
level=info msg=" External-Node IPv4: 10.0.1.211" subsys=daemon | |
level=info msg=" Internal-Node IPv4: 100.65.128.1" subsys=daemon | |
level=info msg=" Cluster IPv4 prefix: 100.0.0.0/8" subsys=daemon | |
level=info msg=" IPv4 allocation prefix: 100.65.128.0/24" subsys=daemon | |
level=info msg=" Loopback IPv4: 100.65.128.236" subsys=daemon | |
level=info msg="Annotating k8s node with CIDR ranges" subsys=daemon | |
level=info msg="Initializing identity allocator" subsys=identity-cache | |
level=info msg="Adding local node to cluster" subsys=daemon | |
level=info msg="Starting to watch allocation changes" kvstoreErr="<nil>" kvstoreStatus="etcd: 1/1 connected: https://cilium-etcd-client.kube-system.svc:2379 - 3.3.11" prefix=cilium/state/identities/v1/id subsys=allocator | |
level=info msg="Initializing ClusterMesh routing" path=/var/lib/cilium/clustermesh/ subsys=daemon | |
level=info msg="Sockmap disabled." subsys=sockops | |
level=info msg="Sockmsg Disabled." subsys=sockops | |
level=info msg="Successfully verified version of etcd endpoint" config=/var/lib/etcd-config/etcd.config endpoints="[https://cilium-etcd-client.kube-system.svc:2379]" etcdEndpoint="https://cilium-etcd-client.kube-system.svc:2379" subsys=kvstore version=3.3.11 | |
level=info msg="Setting sysctl net.core.bpf_jit_enable=1" subsys=daemon | |
level=info msg="Setting sysctl net.ipv4.conf.all.rp_filter=0" subsys=daemon | |
level=info msg="Setting sysctl net.ipv6.conf.all.disable_ipv6=0" subsys=daemon | |
level=info msg="Starting IP identity watcher" subsys=ipcache | |
level=info msg="Envoy: Starting xDS gRPC server listening on /var/run/cilium/xds.sock" subsys=envoy-manager | |
level=info msg="Validating configured node address ranges" subsys=daemon | |
level=info msg="Starting connection tracking garbage collector" subsys=daemon | |
level=info msg="Periodic IPCache map swap will occur due to lack of kernel support for LPM delete operation. Upgrade to Linux 4.15 or higher to avoid this." subsys=map-ipcache | |
level=info msg="Conntrack garbage collection statistics" completed=true duration=2.472227ms family=ipv4 maxEntries=1000000 numDeleted=3 numKeyFallbacks=0 numLookups=3 numLookupsFailed=0 protocol=TCP startTime="2019-02-06 17:30:20.343260308 +0000 UTC m=+2.103341491" subsys=map-ct | |
level=info msg="Conntrack garbage collection statistics" completed=true duration="826.387µs" family=ipv4 maxEntries=262144 numDeleted=0 numKeyFallbacks=0 numLookups=1 numLookupsFailed=0 protocol=non-TCP startTime="2019-02-06 17:30:20.345833695 +0000 UTC m=+2.105914930" subsys=map-ct | |
level=info msg="Initial scan of connection tracking completed" subsys=endpoint-manager | |
level=info msg="Launching node monitor daemon" subsys=daemon | |
level=info msg="Enabling k8s event listener" subsys=daemon | |
level=info msg="Serving cilium node monitor v1.0 API at unix:///var/run/cilium/monitor.sock" subsys=cilium-node-monitor | |
level=info msg="Serving cilium node monitor v1.2 API at unix:///var/run/cilium/monitor1_2.sock" subsys=cilium-node-monitor | |
level=info msg="Beginning to read cilium agent events" subsys=cilium-node-monitor | |
level=info msg="CRD (CustomResourceDefinition) is installed and up-to-date" name=CiliumNetworkPolicy/v2 subsys=k8s | |
level=info msg="Updating CRD (CustomResourceDefinition)..." name=v2.CiliumEndpoint subsys=k8s | |
level=info msg="Setting sysctl net.core.bpf_jit_enable=1" subsys=daemon | |
level=info msg="Setting sysctl net.ipv4.conf.all.rp_filter=0" subsys=daemon | |
level=info msg="Setting sysctl net.ipv6.conf.all.disable_ipv6=0" subsys=daemon | |
level=info msg="regenerating all endpoints due to datapath ipcache" subsys=endpoint-manager | |
level=info msg="CRD (CustomResourceDefinition) is installed and up-to-date" name=v2.CiliumEndpoint subsys=k8s | |
level=info msg="Waiting until all pre-existing resources related to policy have been received" subsys=daemon | |
level=info msg="Kubernetes service definition changed" action=service-updated endpoints="100.65.129.200:2379/TCP,100.65.129.210:2379/TCP,100.65.129.58:2379/TCP" k8sNamespace=kube-system k8sSvcName=cilium-etcd-client service="frontend:100.65.14.98/ports=[client]/selector=map[app:etcd etcd_cluster:cilium-etcd]" subsys=daemon | |
level=info msg="Kubernetes service definition changed" action=service-updated endpoints="100.65.129.200:2379/TCP,100.65.129.210:2379/TCP,100.65.129.58:2379/TCP" k8sNamespace=kube-system k8sSvcName=cilium-etcd-external service="frontend:100.65.13.71/ports=[]/selector=map[etcd_cluster:cilium-etcd io.cilium/app:etcd-operator app:etcd]" subsys=daemon | |
level=info msg="Kubernetes service definition changed" action=service-updated endpoints="100.65.129.117:53/TCP,100.65.129.117:53/UDP,100.65.129.69:53/TCP,100.65.129.69:53/UDP" k8sNamespace=kube-system k8sSvcName=kube-dns service="frontend:100.65.0.10/ports=[dns dns-tcp]/selector=map[k8s-app:kube-dns]" subsys=daemon | |
level=info msg="Kubernetes service definition changed" action=service-updated endpoints="10.0.1.211:443/TCP" k8sNamespace=default k8sSvcName=kubernetes service="frontend:100.65.0.1/ports=[https]/selector=map[]" subsys=daemon | |
level=info msg="Kubernetes service definition changed" action=service-updated endpoints="100.65.129.200:2379/TCP,100.65.129.200:2380/TCP,100.65.129.210:2379/TCP,100.65.129.210:2380/TCP,100.65.129.58:2379/TCP,100.65.129.58:2380/TCP" k8sNamespace=kube-system k8sSvcName=cilium-etcd service="frontend:<nil>/ports=[client peer]/selector=map[app:etcd etcd_cluster:cilium-etcd]" subsys=daemon | |
level=info msg="All pre-existing resources related to policy have been received; continuing" subsys=daemon | |
level=info msg="Regenerating 0 restored endpoints" subsys=daemon | |
level=info msg="Removed stale bpf map" file-path=/sys/fs/bpf/tc/globals/cilium_calls_1763 subsys=datapath-maps | |
level=info msg="Removed stale bpf map" file-path=/sys/fs/bpf/tc/globals/cilium_ct4_global subsys=datapath-maps | |
level=info msg="Removed stale bpf map" file-path=/sys/fs/bpf/tc/globals/cilium_ct_any4_global subsys=datapath-maps | |
level=info msg="Removed stale bpf map" file-path=/sys/fs/bpf/tc/globals/cilium_policy_1763 subsys=datapath-maps | |
level=info msg="Enabling docker event listener" subsys=workload-watcher | |
level=info msg="Removed endpoint" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=1763 identity=4 ipv4=100.65.128.154 ipv6= k8sPodName=/ subsys=endpoint | |
level=info msg="Finished regenerating restored endpoints" regenerated=0 subsys=daemon total=0 | |
level=info msg="Building health endpoint" subsys=daemon | |
level=info msg="Launching Cilium health daemon" subsys=daemon | |
level=info msg="Launching Cilium health endpoint" subsys=daemon | |
level=info msg="Spawning health endpoint with arguments []string{\"cilium-health\", \"cilium_health\", \"cilium\", \"\", \"100.65.128.47/32\", \"cilium-health\", \"-d --admin=unix --passive --pidfile /var/run/cilium/state/health-endpoint.pid\"}" subsys=cilium-health-launcher | |
level=info msg="Identity of endpoint changed" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=0 identity=4 identityLabels="reserved:health" ipv4= ipv6= k8sPodName=/ oldIdentity="no identity" subsys=endpoint | |
level=info msg="No request received to manage networking for container" containerID=c5543d497a maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="Initializing Cilium API" subsys=daemon | |
level=info msg="Daemon initialization completed" bootstrapTime=3.844425772s subsys=daemon | |
level=info msg="Serving cilium at unix:///var/run/cilium/cilium.sock" subsys=daemon | |
level=info msg="Serving cilium health at http://[::]:4240" subsys=health-server | |
level=info msg="Adding route" command="ip route add 100.65.128.1/32 dev cilium" netns=cilium-health subsys=cilium-health-launcher | |
level=info msg="Adding route" command="ip route add 0.0.0.0/0 via 100.65.128.1 mtu 8951 dev cilium" netns=cilium-health subsys=cilium-health-launcher | |
level=info msg="New endpoint" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=1798 identity=4 ipv4= ipv6= k8sPodName=/ subsys=endpoint | |
level=info msg="Regenerating endpoint" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=1798 identity=4 ipv4= ipv6= k8sPodName=/ reason="health daemon bootstrap" startTime="2019-02-06 17:30:23.116488333 +0000 UTC m=+4.876569484" subsys=endpoint | |
level=info msg="Serving cilium health at http://[::]:4240" subsys=health-server | |
level=info msg="Recompiled endpoint BPF program" BPFCompilationTime=676.116943ms containerID= datapathPolicyRevision=0 desiredPolicyRevision=1 endpointID=1798 error="<nil>" identity=4 ipv4= ipv6= k8sPodName=/ subsys=endpoint | |
level=info msg="Completed endpoint regeneration" bpfCompilation=676.116943ms buildDuration=677.36286ms containerID= datapathPolicyRevision=1 desiredPolicyRevision=1 endpointID=1798 identity=4 ipv4= ipv6= k8sPodName=/ mapSync="163.91µs" policyCalculation="53.871µs" prepareBuild="441.461µs" proxyConfiguration="7.171µs" proxyPolicyCalculation="34.758µs" proxyWaitForAck="3.83µs" reason="health daemon bootstrap" subsys=endpoint waitingForCTClean="3.728µs" waitingForLock="5.722µs" | |
level=info msg="Serving cilium health at unix:///var/run/cilium/health.sock" subsys=health-server | |
level=info msg="No request received to manage networking for container" containerID=d879e72cb7 maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="No request received to manage networking for container" containerID=c2aa1ed0f1 maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="No request received to manage networking for container" containerID=133fb6a0d5 maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="No request received to manage networking for container" containerID=aebc4de253 maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="No request received to manage networking for container" containerID=98485ad148 maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="No request received to manage networking for container" containerID=460d21b97f maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="No request received to manage networking for container" containerID=1537a4d6fa maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="No request received to manage networking for container" containerID=caeb1aad65 maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="No request received to manage networking for container" containerID=4739d848f0 maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="No request received to manage networking for container" containerID=da0c9fb973 maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="No request received to manage networking for container" containerID=f45b0f94ed maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="No request received to manage networking for container" containerID=c14d3eca08 maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="No request received to manage networking for container" containerID=bf39a6abb5 maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="No request received to manage networking for container" containerID=b962a48dab maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="No request received to manage networking for container" containerID=25fecdd68d maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="No request received to manage networking for container" containerID=a0b4391b71 maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="Conntrack garbage collection statistics" completed=true duration=2.126161ms family=ipv4 maxEntries=1000000 numDeleted=1 numKeyFallbacks=0 numLookups=2 numLookupsFailed=0 protocol=TCP startTime="2019-02-06 17:31:20.346961227 +0000 UTC m=+62.107042532" subsys=map-ct | |
level=info msg="Conntrack garbage collection statistics" completed=true duration="568.22µs" family=ipv4 maxEntries=262144 numDeleted=0 numKeyFallbacks=0 numLookups=1 numLookupsFailed=0 protocol=non-TCP startTime="2019-02-06 17:31:20.34918169 +0000 UTC m=+62.109415758" subsys=map-ct | |
level=info msg="Conntrack garbage collection statistics" completed=true duration=2.152435ms family=ipv4 maxEntries=1000000 numDeleted=2 numKeyFallbacks=0 numLookups=3 numLookupsFailed=0 protocol=TCP startTime="2019-02-06 17:32:20.350182321 +0000 UTC m=+122.110263603" subsys=map-ct | |
level=info msg="Conntrack garbage collection statistics" completed=true duration="570.024µs" family=ipv4 maxEntries=262144 numDeleted=0 numKeyFallbacks=0 numLookups=1 numLookupsFailed=0 protocol=non-TCP startTime="2019-02-06 17:32:20.352604911 +0000 UTC m=+122.112686166" subsys=map-ct | |
level=info msg="Conntrack garbage collection statistics" completed=true duration=1.992394ms family=ipv4 maxEntries=1000000 numDeleted=2 numKeyFallbacks=0 numLookups=3 numLookupsFailed=0 protocol=TCP startTime="2019-02-06 17:33:20.353352684 +0000 UTC m=+182.113433928" subsys=map-ct | |
level=info msg="Conntrack garbage collection statistics" completed=true duration="572.649µs" family=ipv4 maxEntries=262144 numDeleted=0 numKeyFallbacks=0 numLookups=1 numLookupsFailed=0 protocol=non-TCP startTime="2019-02-06 17:33:20.35548327 +0000 UTC m=+182.115564471" subsys=map-ct |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment