Created
February 6, 2019 17:36
-
-
Save bjethwan/247259261f9c766c60b6437d0b64cb12 to your computer and use it in GitHub Desktop.
cilium clustermesh cluster1 logs
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
level=info msg=" --access-log=''" subsys=daemon | |
level=info msg=" --agent-labels=''" subsys=daemon | |
level=info msg=" --allow-localhost='auto'" subsys=daemon | |
level=info msg=" --auto-direct-node-routes='false'" subsys=daemon | |
level=info msg=" --auto-ipv6-node-routes='false'" subsys=daemon | |
level=info msg=" --bpf-compile-debug='false'" subsys=daemon | |
level=info msg=" --bpf-ct-global-any-max='262144'" subsys=daemon | |
level=info msg=" --bpf-ct-global-tcp-max='1000000'" subsys=daemon | |
level=info msg=" --bpf-root=''" subsys=daemon | |
level=info msg=" --cgroup-root=''" subsys=daemon | |
level=info msg=" --cluster-id='1'" subsys=daemon | |
level=info msg=" --cluster-name='cluster1'" subsys=daemon | |
level=info msg=" --clustermesh-config='/var/lib/cilium/clustermesh/'" subsys=daemon | |
level=info msg=" --cmdref=''" subsys=daemon | |
level=info msg=" --config=''" subsys=daemon | |
level=info msg=" --conntrack-garbage-collector-interval='60'" subsys=daemon | |
level=info msg=" --container-runtime=''" subsys=daemon | |
level=info msg=" --container-runtime-endpoint='map[]'" subsys=daemon | |
level=info msg=" --datapath-mode='veth'" subsys=daemon | |
level=info msg=" --debug='false'" subsys=daemon | |
level=info msg=" --debug-verbose=''" subsys=daemon | |
level=info msg=" --device='undefined'" subsys=daemon | |
level=info msg=" --disable-conntrack='false'" subsys=daemon | |
level=info msg=" --disable-endpoint-crd='false'" subsys=daemon | |
level=info msg=" --disable-envoy-version-check='false'" subsys=daemon | |
level=info msg=" --disable-ipv4='false'" subsys=daemon | |
level=info msg=" --disable-k8s-services='false'" subsys=daemon | |
level=info msg=" --docker='unix:///var/run/docker.sock'" subsys=daemon | |
level=info msg=" --enable-ipsec='false'" subsys=daemon | |
level=info msg=" --enable-ipv4='true'" subsys=daemon | |
level=info msg=" --enable-ipv6='false'" subsys=daemon | |
level=info msg=" --enable-policy='default'" subsys=daemon | |
level=info msg=" --enable-tracing='false'" subsys=daemon | |
level=info msg=" --envoy-log=''" subsys=daemon | |
level=info msg=" --fixed-identity-mapping='map[]'" subsys=daemon | |
level=info msg=" --flannel-manage-existing-containers='false'" subsys=daemon | |
level=info msg=" --flannel-master-device=''" subsys=daemon | |
level=info msg=" --flannel-uninstall-on-exit='false'" subsys=daemon | |
level=info msg=" --http-403-msg=''" subsys=daemon | |
level=info msg=" --http-idle-timeout='0'" subsys=daemon | |
level=info msg=" --http-max-grpc-timeout='0'" subsys=daemon | |
level=info msg=" --http-request-timeout='3600'" subsys=daemon | |
level=info msg=" --http-retry-count='3'" subsys=daemon | |
level=info msg=" --http-retry-timeout='0'" subsys=daemon | |
level=info msg=" --install-iptables-rules='true'" subsys=daemon | |
level=info msg=" --ipv4-cluster-cidr-mask-size='8'" subsys=daemon | |
level=info msg=" --ipv4-node='auto'" subsys=daemon | |
level=info msg=" --ipv4-range='auto'" subsys=daemon | |
level=info msg=" --ipv4-service-range='auto'" subsys=daemon | |
level=info msg=" --ipv6-cluster-alloc-cidr='f00d::/64'" subsys=daemon | |
level=info msg=" --ipv6-node='auto'" subsys=daemon | |
level=info msg=" --ipv6-range='auto'" subsys=daemon | |
level=info msg=" --ipv6-service-range='auto'" subsys=daemon | |
level=info msg=" --ipvlan-master-device='undefined'" subsys=daemon | |
level=info msg=" --k8s-api-server=''" subsys=daemon | |
level=info msg=" --k8s-kubeconfig-path=''" subsys=daemon | |
level=info msg=" --k8s-legacy-host-allows-world=''" subsys=daemon | |
level=info msg=" --k8s-namespace='kube-system'" subsys=daemon | |
level=info msg=" --k8s-require-ipv4-pod-cidr='false'" subsys=daemon | |
level=info msg=" --k8s-require-ipv6-pod-cidr='false'" subsys=daemon | |
level=info msg=" --keep-bpf-templates='false'" subsys=daemon | |
level=info msg=" --keep-config='false'" subsys=daemon | |
level=info msg=" --kvstore='etcd'" subsys=daemon | |
level=info msg=" --kvstore-opt='map[etcd.config:/var/lib/etcd-config/etcd.config]'" subsys=daemon | |
level=info msg=" --label-prefix-file=''" subsys=daemon | |
level=info msg=" --labels=''" subsys=daemon | |
level=info msg=" --lb=''" subsys=daemon | |
level=info msg=" --lib-dir='/var/lib/cilium'" subsys=daemon | |
level=info msg=" --log-driver=''" subsys=daemon | |
level=info msg=" --log-opt='map[]'" subsys=daemon | |
level=info msg=" --log-system-load='false'" subsys=daemon | |
level=info msg=" --masquerade='true'" subsys=daemon | |
level=info msg=" --max-controller-interval='0'" subsys=daemon | |
level=info msg=" --monitor-aggregation='none'" subsys=daemon | |
level=info msg=" --monitor-queue-size='32768'" subsys=daemon | |
level=info msg=" --mtu='0'" subsys=daemon | |
level=info msg=" --nat46-range='0:0:0:0:0:FFFF::/96'" subsys=daemon | |
level=info msg=" --pprof='false'" subsys=daemon | |
level=info msg=" --preallocate-bpf-maps='false'" subsys=daemon | |
level=info msg=" --prefilter-device='undefined'" subsys=daemon | |
level=info msg=" --prefilter-mode='native'" subsys=daemon | |
level=info msg=" --prepend-iptables-chains='true'" subsys=daemon | |
level=info msg=" --prometheus-serve-addr=''" subsys=daemon | |
level=info msg=" --proxy-connect-timeout='1'" subsys=daemon | |
level=info msg=" --restore='true'" subsys=daemon | |
level=info msg=" --sidecar-http-proxy='false'" subsys=daemon | |
level=info msg=" --sidecar-istio-proxy-image='cilium/istio_proxy'" subsys=daemon | |
level=info msg=" --single-cluster-route='false'" subsys=daemon | |
level=info msg=" --socket-path='/var/run/cilium/cilium.sock'" subsys=daemon | |
level=info msg=" --sockops-enable='false'" subsys=daemon | |
level=info msg=" --state-dir='/var/run/cilium'" subsys=daemon | |
level=info msg=" --tofqdns-dns-reject-response-code='refused'" subsys=daemon | |
level=info msg=" --tofqdns-enable-poller='false'" subsys=daemon | |
level=info msg=" --tofqdns-enable-poller-events='true'" subsys=daemon | |
level=info msg=" --tofqdns-endpoint-max-ip-per-hostname='50'" subsys=daemon | |
level=info msg=" --tofqdns-min-ttl='0'" subsys=daemon | |
level=info msg=" --tofqdns-proxy-port='0'" subsys=daemon | |
level=info msg=" --trace-payloadlen='128'" subsys=daemon | |
level=info msg=" --tunnel='vxlan'" subsys=daemon | |
level=info msg=" --version='false'" subsys=daemon | |
level=info msg=" _ _ _" subsys=daemon | |
level=info msg=" ___|_| |_|_ _ _____" subsys=daemon | |
level=info msg="| _| | | | | | |" subsys=daemon | |
level=info msg="|___|_|_|_|___|_|_|_|" subsys=daemon | |
level=info msg="Cilium 1.4.90 19de68a 2019-02-06T00:11:07+01:00 go version go1.11.1 linux/amd64" subsys=daemon | |
level=info msg="cilium-envoy version: 84ee839e1d78ef858a39e390288ad417d35bb1d4/1.9.0-dev/Modified/RELEASE" subsys=daemon | |
level=info msg="clang (3.8.1) and kernel (4.14.96) versions: OK!" subsys=daemon | |
level=info msg="linking environment: OK!" subsys=daemon | |
level=info msg="bpf_requirements check: OK!" subsys=daemon | |
level=info msg="Mounted BPF filesystem /sys/fs/bpf" subsys=bpf | |
level=info msg="Connecting to etcd server..." config=/var/lib/etcd-config/etcd.config endpoints="[https://cilium-etcd-client.kube-system.svc:2379]" subsys=kvstore | |
level=info msg="Valid label prefix configuration:" subsys=labels-filter | |
level=info msg=" - :io.kubernetes.pod.namespace" subsys=labels-filter | |
level=info msg=" - :io.cilium.k8s.namespace.labels" subsys=labels-filter | |
level=info msg=" - !:io.kubernetes" subsys=labels-filter | |
level=info msg=" - !:.*kubernetes.io" subsys=labels-filter | |
level=info msg=" - !:pod-template-generation" subsys=labels-filter | |
level=info msg=" - !:pod-template-hash" subsys=labels-filter | |
level=info msg=" - !:controller-revision-hash" subsys=labels-filter | |
level=info msg=" - !:annotation.cilium.io/" subsys=labels-filter | |
level=info msg=" - !:annotation.cilium-identity" subsys=labels-filter | |
level=info msg=" - !:annotation.sidecar.istio.io" subsys=labels-filter | |
level=info msg=" - !:annotation.etcd.version" subsys=labels-filter | |
level=info msg=" - !:etcd_node" subsys=labels-filter | |
level=info msg="Initializing daemon" subsys=daemon | |
level=info msg="Detected MTU 9001" subsys=mtu | |
level=info msg="Restored service ID from datapath" serviceID=3 serviceIP="100.64.2.158:2379" subsys=daemon | |
level=info msg="Restored service ID from datapath" serviceID=2 serviceIP="100.64.0.1:443" subsys=daemon | |
level=info msg="Restored service ID from datapath" serviceID=4 serviceIP="100.64.19.16:2379" subsys=daemon | |
level=info msg="Restored service ID from datapath" serviceID=1 serviceIP="100.64.0.10:53" subsys=daemon | |
level=info msg="Clearing leftover Cilium veths" subsys=daemon | |
level=info msg="Waiting for k8s api-server to be ready..." subsys=k8s | |
level=info msg="Connected to k8s api-server" ipAddr="https://100.64.0.1:443" subsys=k8s | |
level=info msg="Retrieved node information from kubernetes" nodeName=ip-10-0-1-235.ap-south-1.compute.internal subsys=k8s | |
level=info msg="Received own node information from API server" ipAddr.ipv4=10.0.1.235 ipAddr.ipv6="<nil>" nodeName=ip-10-0-1-235.ap-south-1.compute.internal subsys=k8s | |
level=info msg="Retrieved IPv4 allocation range for node. Using it for ipv4-range" node=ip-10-0-1-235.ap-south-1.compute.internal subsys=node v4Prefix=100.64.128.0/24 | |
level=info msg="Automatically retrieved IP for node. Using it for ipv4-node" ipAddr=10.0.1.235 node=ip-10-0-1-235.ap-south-1.compute.internal subsys=node | |
level=info msg="Kubernetes information:" subsys=daemon | |
level=info msg=" Namespace: kube-system" subsys=daemon | |
level=info msg="k8s mode: Allowing localhost to reach local endpoints" subsys=daemon | |
level=info msg="Initializing node addressing" subsys=daemon | |
level=info msg="Restored IPv4 internal node IP: 100.64.128.1" subsys=node | |
level=info msg="Initializing IPAM" subsys=daemon | |
level=info msg="Container runtime options set: endpoint=/var/run/containerd/containerd.sock,endpoint=/var/run/crio.sock,datapath-mode=veth,endpoint=unix:///var/run/docker.sock" subsys=daemon | |
level=info msg="Container runtime options set: endpoint=/var/run/containerd/containerd.sock,endpoint=/var/run/crio.sock,datapath-mode=veth,endpoint=unix:///var/run/docker.sock" subsys=daemon | |
level=info msg="Restoring endpoints from former life..." subsys=daemon | |
level=info msg="Endpoints restored" count.restored=0 count.total=1 subsys=daemon | |
level=info msg="Addressing information:" subsys=daemon | |
level=info msg=" Cluster-Name: cluster1" subsys=daemon | |
level=info msg=" Cluster-ID: 1" subsys=daemon | |
level=info msg=" Local node-name: ip-10-0-1-235.ap-south-1.compute.internal" subsys=daemon | |
level=info msg=" External-Node IPv4: 10.0.1.235" subsys=daemon | |
level=info msg=" Internal-Node IPv4: 100.64.128.1" subsys=daemon | |
level=info msg=" Cluster IPv4 prefix: 100.0.0.0/8" subsys=daemon | |
level=info msg=" IPv4 allocation prefix: 100.64.128.0/24" subsys=daemon | |
level=info msg=" Loopback IPv4: 100.64.128.48" subsys=daemon | |
level=info msg="Annotating k8s node with CIDR ranges" subsys=daemon | |
level=info msg="Initializing identity allocator" subsys=identity-cache | |
level=info msg="Adding local node to cluster" subsys=daemon | |
level=info msg="Successfully verified version of etcd endpoint" config=/var/lib/etcd-config/etcd.config endpoints="[https://cilium-etcd-client.kube-system.svc:2379]" etcdEndpoint="https://cilium-etcd-client.kube-system.svc:2379" subsys=kvstore version=3.3.11 | |
level=info msg="Starting to watch allocation changes" kvstoreErr="<nil>" kvstoreStatus="etcd: 1/1 connected: https://cilium-etcd-client.kube-system.svc:2379 - 3.3.11" prefix=cilium/state/identities/v1/id subsys=allocator | |
level=info msg="Initializing ClusterMesh routing" path=/var/lib/cilium/clustermesh/ subsys=daemon | |
level=info msg="Sockmap disabled." subsys=sockops | |
level=info msg="Sockmsg Disabled." subsys=sockops | |
level=info msg="Setting sysctl net.core.bpf_jit_enable=1" subsys=daemon | |
level=info msg="Setting sysctl net.ipv4.conf.all.rp_filter=0" subsys=daemon | |
level=info msg="Setting sysctl net.ipv6.conf.all.disable_ipv6=0" subsys=daemon | |
level=info msg="Starting IP identity watcher" subsys=ipcache | |
level=info msg="Validating configured node address ranges" subsys=daemon | |
level=info msg="Starting connection tracking garbage collector" subsys=daemon | |
level=info msg="Envoy: Starting xDS gRPC server listening on /var/run/cilium/xds.sock" subsys=envoy-manager | |
level=info msg="Conntrack garbage collection statistics" completed=true duration=3.290106ms family=ipv4 maxEntries=1000000 numDeleted=3 numKeyFallbacks=0 numLookups=3 numLookupsFailed=0 protocol=TCP startTime="2019-02-06 17:29:48.172122728 +0000 UTC m=+2.099966331" subsys=map-ct | |
level=info msg="Conntrack garbage collection statistics" completed=true duration="541.332µs" family=ipv4 maxEntries=262144 numDeleted=0 numKeyFallbacks=0 numLookups=1 numLookupsFailed=0 protocol=non-TCP startTime="2019-02-06 17:29:48.175674507 +0000 UTC m=+2.103518183" subsys=map-ct | |
level=info msg="Initial scan of connection tracking completed" subsys=endpoint-manager | |
level=info msg="Launching node monitor daemon" subsys=daemon | |
level=info msg="Enabling k8s event listener" subsys=daemon | |
level=info msg="Periodic IPCache map swap will occur due to lack of kernel support for LPM delete operation. Upgrade to Linux 4.15 or higher to avoid this." subsys=map-ipcache | |
level=info msg="Serving cilium node monitor v1.0 API at unix:///var/run/cilium/monitor.sock" subsys=cilium-node-monitor | |
level=info msg="Serving cilium node monitor v1.2 API at unix:///var/run/cilium/monitor1_2.sock" subsys=cilium-node-monitor | |
level=info msg="Beginning to read cilium agent events" subsys=cilium-node-monitor | |
level=info msg="CRD (CustomResourceDefinition) is installed and up-to-date" name=CiliumNetworkPolicy/v2 subsys=k8s | |
level=info msg="Updating CRD (CustomResourceDefinition)..." name=v2.CiliumEndpoint subsys=k8s | |
level=info msg="Setting sysctl net.core.bpf_jit_enable=1" subsys=daemon | |
level=info msg="Setting sysctl net.ipv4.conf.all.rp_filter=0" subsys=daemon | |
level=info msg="Setting sysctl net.ipv6.conf.all.disable_ipv6=0" subsys=daemon | |
level=info msg="regenerating all endpoints due to datapath ipcache" subsys=endpoint-manager | |
level=info msg="CRD (CustomResourceDefinition) is installed and up-to-date" name=v2.CiliumEndpoint subsys=k8s | |
level=info msg="Waiting until all pre-existing resources related to policy have been received" subsys=daemon | |
level=info msg="Kubernetes service definition changed" action=service-updated endpoints="100.64.129.110:2379/TCP,100.64.129.179:2379/TCP,100.64.129.224:2379/TCP" k8sNamespace=kube-system k8sSvcName=cilium-etcd-client service="frontend:100.64.2.158/ports=[client]/selector=map[app:etcd etcd_cluster:cilium-etcd]" subsys=daemon | |
level=info msg="Kubernetes service definition changed" action=service-updated endpoints="100.64.129.110:2379/TCP,100.64.129.179:2379/TCP,100.64.129.224:2379/TCP" k8sNamespace=kube-system k8sSvcName=cilium-etcd-external service="frontend:100.64.19.16/ports=[]/selector=map[etcd_cluster:cilium-etcd io.cilium/app:etcd-operator app:etcd]" subsys=daemon | |
level=info msg="Kubernetes service definition changed" action=service-updated endpoints="100.64.129.25:53/TCP,100.64.129.25:53/UDP,100.64.129.63:53/TCP,100.64.129.63:53/UDP" k8sNamespace=kube-system k8sSvcName=kube-dns service="frontend:100.64.0.10/ports=[dns dns-tcp]/selector=map[k8s-app:kube-dns]" subsys=daemon | |
level=info msg="Kubernetes service definition changed" action=service-updated endpoints="10.0.1.235:443/TCP" k8sNamespace=default k8sSvcName=kubernetes service="frontend:100.64.0.1/ports=[https]/selector=map[]" subsys=daemon | |
level=info msg="Kubernetes service definition changed" action=service-updated endpoints="100.64.129.110:2379/TCP,100.64.129.110:2380/TCP,100.64.129.179:2379/TCP,100.64.129.179:2380/TCP,100.64.129.224:2379/TCP,100.64.129.224:2380/TCP" k8sNamespace=kube-system k8sSvcName=cilium-etcd service="frontend:<nil>/ports=[client peer]/selector=map[app:etcd etcd_cluster:cilium-etcd]" subsys=daemon | |
level=info msg="All pre-existing resources related to policy have been received; continuing" subsys=daemon | |
level=info msg="Regenerating 0 restored endpoints" subsys=daemon | |
level=info msg="Finished regenerating restored endpoints" regenerated=0 subsys=daemon total=0 | |
level=info msg="Removed stale bpf map" file-path=/sys/fs/bpf/tc/globals/cilium_calls_2183 subsys=datapath-maps | |
level=info msg="Removed stale bpf map" file-path=/sys/fs/bpf/tc/globals/cilium_ct4_global subsys=datapath-maps | |
level=info msg="Removed stale bpf map" file-path=/sys/fs/bpf/tc/globals/cilium_ct_any4_global subsys=datapath-maps | |
level=info msg="Removed stale bpf map" file-path=/sys/fs/bpf/tc/globals/cilium_policy_2183 subsys=datapath-maps | |
level=info msg="Enabling docker event listener" subsys=workload-watcher | |
level=info msg="Conntrack garbage collection statistics" completed=true duration=2.133309ms family=ipv4 maxEntries=1000000 numDeleted=0 numKeyFallbacks=0 numLookups=1 numLookupsFailed=0 protocol=TCP startTime="2019-02-06 17:29:49.877760467 +0000 UTC m=+3.805604114" subsys=map-ct | |
level=info msg="Removed endpoint" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=2183 identity=4 ipv4=100.64.128.235 ipv6= k8sPodName=/ subsys=endpoint | |
level=info msg="Building health endpoint" subsys=daemon | |
level=info msg="Launching Cilium health daemon" subsys=daemon | |
level=info msg="Launching Cilium health endpoint" subsys=daemon | |
level=info msg="Spawning health endpoint with arguments []string{\"cilium-health\", \"cilium_health\", \"cilium\", \"\", \"100.64.128.37/32\", \"cilium-health\", \"-d --admin=unix --passive --pidfile /var/run/cilium/state/health-endpoint.pid\"}" subsys=cilium-health-launcher | |
level=info msg="Identity of endpoint changed" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=0 identity=4 identityLabels="reserved:health" ipv4= ipv6= k8sPodName=/ oldIdentity="no identity" subsys=endpoint | |
level=info msg="No request received to manage networking for container" containerID=da3dd169f7 maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="Initializing Cilium API" subsys=daemon | |
level=info msg="Daemon initialization completed" bootstrapTime=3.819803627s subsys=daemon | |
level=info msg="Serving cilium at unix:///var/run/cilium/cilium.sock" subsys=daemon | |
level=info msg="Serving cilium health at http://[::]:4240" subsys=health-server | |
level=info msg="Adding route" command="ip route add 100.64.128.1/32 dev cilium" netns=cilium-health subsys=cilium-health-launcher | |
level=info msg="Adding route" command="ip route add 0.0.0.0/0 via 100.64.128.1 mtu 8951 dev cilium" netns=cilium-health subsys=cilium-health-launcher | |
level=info msg="New endpoint" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=3601 identity=4 ipv4= ipv6= k8sPodName=/ subsys=endpoint | |
level=info msg="Regenerating endpoint" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=3601 identity=4 ipv4= ipv6= k8sPodName=/ reason="health daemon bootstrap" startTime="2019-02-06 17:29:50.929689145 +0000 UTC m=+4.857532756" subsys=endpoint | |
level=info msg="Serving cilium health at http://[::]:4240" subsys=health-server | |
level=info msg="Recompiled endpoint BPF program" BPFCompilationTime=642.530769ms containerID= datapathPolicyRevision=0 desiredPolicyRevision=1 endpointID=3601 error="<nil>" identity=4 ipv4= ipv6= k8sPodName=/ subsys=endpoint | |
level=info msg="Completed endpoint regeneration" bpfCompilation=642.530769ms buildDuration=643.890053ms containerID= datapathPolicyRevision=1 desiredPolicyRevision=1 endpointID=3601 identity=4 ipv4= ipv6= k8sPodName=/ mapSync="225.469µs" policyCalculation="44.978µs" prepareBuild="508.722µs" proxyConfiguration="21.32µs" proxyPolicyCalculation="18.447µs" proxyWaitForAck="4.287µs" reason="health daemon bootstrap" subsys=endpoint waitingForCTClean="4.239µs" waitingForLock="6.71µs" | |
level=info msg="Serving cilium health at unix:///var/run/cilium/health.sock" subsys=health-server | |
level=info msg="No request received to manage networking for container" containerID=a5417eef0d maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="No request received to manage networking for container" containerID=f61654a1dd maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="No request received to manage networking for container" containerID=a31df6c680 maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="No request received to manage networking for container" containerID=43b241b5b9 maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="No request received to manage networking for container" containerID=7fe460476e maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="No request received to manage networking for container" containerID=1020a0067c maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="No request received to manage networking for container" containerID=523007fc25 maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="No request received to manage networking for container" containerID=b82b34d9f1 maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="No request received to manage networking for container" containerID=4ba78d32d5 maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="No request received to manage networking for container" containerID=65b83ac3e6 maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="No request received to manage networking for container" containerID=9831131bb7 maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="No request received to manage networking for container" containerID=13a6b9044e maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="No request received to manage networking for container" containerID=f93ed1dad2 maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="No request received to manage networking for container" containerID=23996554a5 maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="No request received to manage networking for container" containerID=7d11760146 maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="No request received to manage networking for container" containerID=3694510e55 maxRetry=20 subsys=workload-watcher willRetry=false | |
level=info msg="Conntrack garbage collection statistics" completed=true duration=1.960279ms family=ipv4 maxEntries=1000000 numDeleted=1 numKeyFallbacks=0 numLookups=2 numLookupsFailed=0 protocol=TCP startTime="2019-02-06 17:30:48.176559712 +0000 UTC m=+62.104403305" subsys=map-ct | |
level=info msg="Conntrack garbage collection statistics" completed=true duration="543.364µs" family=ipv4 maxEntries=262144 numDeleted=0 numKeyFallbacks=0 numLookups=1 numLookupsFailed=0 protocol=non-TCP startTime="2019-02-06 17:30:48.178624254 +0000 UTC m=+62.106467857" subsys=map-ct | |
level=info msg="Conntrack garbage collection statistics" completed=true duration=2.010932ms family=ipv4 maxEntries=1000000 numDeleted=2 numKeyFallbacks=0 numLookups=3 numLookupsFailed=0 protocol=TCP startTime="2019-02-06 17:31:48.179471834 +0000 UTC m=+122.107315428" subsys=map-ct | |
level=info msg="Conntrack garbage collection statistics" completed=true duration="499.111µs" family=ipv4 maxEntries=262144 numDeleted=0 numKeyFallbacks=0 numLookups=1 numLookupsFailed=0 protocol=non-TCP startTime="2019-02-06 17:31:48.181592098 +0000 UTC m=+122.109564048" subsys=map-ct | |
level=info msg="Conntrack garbage collection statistics" completed=true duration=1.955616ms family=ipv4 maxEntries=1000000 numDeleted=2 numKeyFallbacks=0 numLookups=3 numLookupsFailed=0 protocol=TCP startTime="2019-02-06 17:32:48.182515821 +0000 UTC m=+182.110359394" subsys=map-ct | |
level=info msg="Conntrack garbage collection statistics" completed=true duration="528.399µs" family=ipv4 maxEntries=262144 numDeleted=0 numKeyFallbacks=0 numLookups=1 numLookupsFailed=0 protocol=non-TCP startTime="2019-02-06 17:32:48.184615364 +0000 UTC m=+182.112458957" subsys=map-ct |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment