Created
November 19, 2024 19:14
-
-
Save fmuyassarov/72047f20aef1115231d00f260138a7f2 to your computer and use it in GitHub Desktop.
cpu manager (static)
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
STEP 1/3 — Building Dockerfile: [ttl.sh/ghcr.io/containers/nri-plugins/nri-resource-policy-topology-aware] | |
Building Dockerfile for platform linux/amd64: | |
ARG GO_VERSION=1.22 | |
FROM golang:${GO_VERSION}-bullseye AS builder | |
ARG IMAGE_VERSION | |
ARG BUILD_VERSION | |
ARG BUILD_BUILDID | |
ARG DEBUG=0 | |
ARG NORACE=0 | |
WORKDIR /go/builder | |
RUN mkdir -p /debug-extras; \ | |
if [ "$DEBUG" = 1 ]; then \ | |
mkdir -p /debug-extras/bin; \ | |
GOBIN=/debug-extras/bin go install -tags osusergo,netgo -ldflags "-extldflags=-static" github.com/go-delve/delve/cmd/dlv@latest; \ | |
fi | |
# Fetch go dependencies in a separate layer for caching | |
COPY go.mod go.sum ./ | |
COPY pkg/topology/ pkg/topology/ | |
RUN go mod download | |
# Build nri-resource-policy | |
COPY . . | |
RUN make clean | |
RUN make IMAGE_VERSION=${IMAGE_VERSION} BUILD_VERSION=${BUILD_VERSION} BUILD_BUILDID=${BUILD_BUILDID} PLUGINS=nri-resource-policy-topology-aware DEBUG=$DEBUG NORACE=$NORACE V=$DEBUG build-plugins-static | |
RUN cpgodir() { \ | |
mkdir -p $2; \ | |
find $1 -name '*.s' -o -name '*.go' | grep -v -E '/test/|/testdata/|_test.go' \ | |
| xargs -I {} cp --parents {} $2; \ | |
}; \ | |
if [ "$DEBUG" = 1 ]; then \ | |
cpgodir /go/pkg /debug-extras; \ | |
cpgodir /go/builder/pkg /debug-extras; \ | |
cpgodir /go/builder/cmd /debug-extras; \ | |
cpgodir /go/builder/vendor /debug-extras; \ | |
cpgodir /usr/local/go /debug-extras; \ | |
fi | |
FROM gcr.io/distroless/static | |
COPY --from=builder /go/builder/build/bin/nri-resource-policy-topology-aware /bin/nri-resource-policy-topology-aware | |
COPY --from=builder /debug-extras / | |
ENTRYPOINT ["/bin/nri-resource-policy-topology-aware"] | |
Building image | |
[background] read source files 110.70MB [done: 262ms] | |
[builder 1/10] FROM docker.io/library/golang:1.22-bullseye@sha256:05a23dcbf30f718d67e3dfe3483da0e4be465bc38acd9ded39b11f529336e744 | |
[stage-1 1/3] FROM gcr.io/distroless/static:latest@sha256:f4a57e8ffd7ba407bdd0eb315bb54ef1f21a2100a7f032e9102e4da34fe7c196 | |
[builder 7/10] COPY . . [cached] | |
[builder 6/10] RUN go mod download [cached] | |
[builder 5/10] COPY pkg/topology/ pkg/topology/ [cached] | |
[builder 4/10] COPY go.mod go.sum ./ [cached] | |
[builder 3/10] RUN mkdir -p /debug-extras; if [ "0" = 1 ]; then mkdir -p /debug-extras/bin; GOBIN=/debug-extras/bin go install -tags osusergo,netgo -ldflags "-extldflags=-static" github.com/go-delve/delve/cmd/dlv@latest; fi [cached] | |
[builder 2/10] WORKDIR /go/builder [cached] | |
[stage-1 3/3] COPY --from=builder /debug-extras / [cached] | |
[stage-1 2/3] COPY --from=builder /go/builder/build/bin/nri-resource-policy-topology-aware /bin/nri-resource-policy-topology-aware [cached] | |
[builder 10/10] RUN cpgodir() { mkdir -p $2; find $1 -name '*.s' -o -name '*.go' | grep -v -E '/test/|/testdata/|_test.go' | xargs -I {} cp --parents {} $2; }; if [ "0" = 1 ]; then cpgodir /go/pkg /debug-extras; cpgodir /go/builder/pkg /debug-extras; cpgodir /go/builder/cmd /debug-extras; cpgodir /go/builder/vendor /debug-extras; cpgodir /usr/local/go /debug-extras; fi [cached] | |
[builder 9/10] RUN make IMAGE_VERSION=${IMAGE_VERSION} BUILD_VERSION=${BUILD_VERSION} BUILD_BUILDID=${BUILD_BUILDID} PLUGINS=nri-resource-policy-topology-aware DEBUG=0 NORACE=0 V=0 build-plugins-static [cached] | |
[builder 8/10] RUN make clean [cached] | |
exporting to image | |
STEP 2/3 — Pushing ttl.sh/ghcr.io/containers/nri-plugins/nri-resource-policy-topology-aware:tilt-83a930344beac5f2 | |
Pushing with Docker client | |
Authenticating to image repo: ttl.sh | |
Sending image data | |
f4aee9e53c42: Layer already exists | |
218cd7e6d3ad: Layer already exists | |
1a73b54f556b: Layer already exists | |
b336e209998f: Layer already exists | |
5f70bf18a086: Layer already exists | |
bbb6cacb8c82: Layer already exists | |
af5aa97ebe6c: Layer already exists | |
6f1cdceb6a31: Layer already exists | |
4d049f83d9cf: Layer already exists | |
2a92d6ac9e4f: Layer already exists | |
8fa10c0194df: Layer already exists | |
03af25190641: Layer already exists | |
ddc6e550070c: Layer already exists | |
STEP 3/3 — Deploying | |
Running cmd: python3 /home/fmuyassarov/.local/share/tilt-dev/tilt_modules/github.com/tilt-dev/tilt-extensions/helm_resource/helm-apply-helper.py --set=config.instrumentation.prometheusExport=true --set=ports[0].name=metrics --set=ports[0].container=8891 --set=image.name=ttl.sh/ghcr.io/containers/nri-plugins/nri-resource-policy-topology-aware | |
Running cmd: ['helm', 'upgrade', '--install', '--set=config.instrumentation.prometheusExport=true', '--set=ports[0].name=metrics', '--set=ports[0].container=8891', '--set=image.name=ttl.sh/ghcr.io/containers/nri-plugins/nri-resource-policy-topology-aware', '--set', 'image.registry=ttl.sh', '--set', 'image.repository=ghcr.io/containers/nri-plugins/nri-resource-policy-topology-aware', '--set', 'image.tag=tilt-83a930344beac5f2', '--namespace', 'kube-system', 'controller-logs', './deployment/helm/topology-aware'] | |
Release "controller-logs" does not exist. Installing it now. | |
NAME: controller-logs | |
LAST DEPLOYED: Tue Nov 19 21:11:38 2024 | |
NAMESPACE: kube-system | |
STATUS: deployed | |
REVISION: 1 | |
TEST SUITE: None | |
Running cmd: ['helm', 'get', 'manifest', '--namespace', 'kube-system', 'controller-logs'] | |
Running cmd: ['kubectl', 'get', '-oyaml', '-f', '-'] | |
Objects applied to cluster: | |
→ nri-resource-policy-topology-aware:serviceaccount | |
→ nri-resource-policy-topology-aware:clusterrole | |
→ nri-resource-policy-topology-aware:clusterrolebinding | |
→ nri-resource-policy-topology-aware:role | |
→ nri-resource-policy-topology-aware:rolebinding | |
→ nri-resource-policy-topology-aware:daemonset | |
→ default:topologyawarepolicy | |
Step 1 - 1.22s (Building Dockerfile: [ttl.sh/ghcr.io/containers/nri-plugins/nri-resource-policy-topology-aware]) | |
Step 2 - 5.87s (Pushing ttl.sh/ghcr.io/containers/nri-plugins/nri-resource-policy-topology-aware:tilt-83a930344beac5f2) | |
Step 3 - 2.38s (Deploying) | |
DONE IN: 9.48s | |
Tracking new pod rollout (nri-resource-policy-topology-aware-r8vld): | |
┊ Scheduled - <1s | |
┊ Initialized - <1s | |
┊ Ready - 1s | |
[event: pod kube-system/nri-resource-policy-topology-aware-r8vld] Pulling image "ttl.sh/ghcr.io/containers/nri-plugins/nri-resource-policy-topology-aware:tilt-83a930344beac5f2" | |
[event: pod kube-system/nri-resource-policy-topology-aware-r8vld] Successfully pulled image "ttl.sh/ghcr.io/containers/nri-plugins/nri-resource-policy-topology-aware:tilt-83a930344beac5f2" in 534ms (534ms including waiting). Image size: 30682892 bytes. | |
I1119 19:11:40.124885 1 config.go:125] logger configuration update &{Debug:[] LogSource:false Klog:{Add_dir_header:<nil> Alsologtostderr:<nil> Log_backtrace_at:<nil> Log_dir:<nil> Log_file:<nil> Log_file_max_size:<nil> Logtostderr:<nil> One_output:<nil> Skip_headers:<nil> Skip_log_headers:<nil> Stderrthreshold:<nil> V:<nil> Vmodule:<nil>}} | |
I1119 19:11:40.126463 1 metrics.go:21] registering collector cgroupstats... | |
I1119 19:11:40.129911 1 control.go:236] registering controller cpu... | |
I1119 19:11:40.129919 1 control.go:236] registering controller e2e-test... | |
W1119 19:11:40.130168 1 cache.go:983] existing cache directory "/var/lib/nri-resource-policy" has less strict permissions -rwxr-xr-x than expected -rwx--x--- | |
I1119 19:11:40.134581 1 resource-manager.go:95] running as an NRI plugin... | |
I1119 19:11:40.134597 1 nri.go:50] creating plugin... | |
W1119 19:11:40.134606 1 cache.go:515] clearing all data for active policy ("topology-aware") from cache... | |
I1119 19:11:40.136907 1 policy.go:226] creating 'topology-aware' policy... | |
I1119 19:11:40.136915 1 resource-manager.go:290] topology-aware policy has no policy-specific metrics. | |
I1119 19:11:40.137014 1 log.go:470] starting 'topology-aware' policy version /build ... | |
I1119 19:11:40.137023 1 instrumentation.go:72] starting instrumentation services... | |
I1119 19:11:40.137029 1 http.go:143] HTTP server is disabled | |
I1119 19:11:40.137033 1 tracing.go:119] starting tracing exporter... | |
I1119 19:11:40.137044 1 tracing.go:133] tracing effectively disabled, no endpoint set | |
I1119 19:11:40.137049 1 metrics.go:101] metrics exporter disabled | |
I1119 19:11:40.137052 1 log.go:470] starting agent, waiting for initial configuration... | |
I1119 19:11:40.141683 1 log.go:470] node removed from config group '' | |
I1119 19:11:40.142640 1 agent.go:512] group-specific config updated | |
I1119 19:11:40.143295 1 log.go:470] acquired initial configuration default (generation 1): | |
I1119 19:11:40.143317 1 resource-manager.go:153] <initial config> metadata: | |
I1119 19:11:40.143322 1 resource-manager.go:153] <initial config> annotations: | |
I1119 19:11:40.143326 1 resource-manager.go:153] <initial config> meta.helm.sh/release-name: controller-logs | |
I1119 19:11:40.143339 1 resource-manager.go:153] <initial config> meta.helm.sh/release-namespace: kube-system | |
I1119 19:11:40.143348 1 resource-manager.go:153] <initial config> creationTimestamp: "2024-11-19T19:11:39Z" | |
I1119 19:11:40.143351 1 resource-manager.go:153] <initial config> generation: 1 | |
I1119 19:11:40.143355 1 resource-manager.go:153] <initial config> labels: | |
I1119 19:11:40.143357 1 resource-manager.go:153] <initial config> app.kubernetes.io/instance: controller-logs | |
I1119 19:11:40.143360 1 resource-manager.go:153] <initial config> app.kubernetes.io/managed-by: Helm | |
I1119 19:11:40.143363 1 resource-manager.go:153] <initial config> app.kubernetes.io/name: nri-resource-policy-topology-aware | |
I1119 19:11:40.143366 1 resource-manager.go:153] <initial config> helm.sh/chart: nri-resource-policy-topology-aware-v0.0.0 | |
I1119 19:11:40.143369 1 resource-manager.go:153] <initial config> managedFields: | |
I1119 19:11:40.143372 1 resource-manager.go:153] <initial config> - apiVersion: config.nri/v1alpha1 | |
I1119 19:11:40.143376 1 resource-manager.go:153] <initial config> fieldsType: FieldsV1 | |
I1119 19:11:40.143379 1 resource-manager.go:153] <initial config> fieldsV1: | |
I1119 19:11:40.143381 1 resource-manager.go:153] <initial config> f:metadata: | |
I1119 19:11:40.143385 1 resource-manager.go:153] <initial config> f:annotations: | |
I1119 19:11:40.143387 1 resource-manager.go:153] <initial config> .: {} | |
I1119 19:11:40.143390 1 resource-manager.go:153] <initial config> f:meta.helm.sh/release-name: {} | |
I1119 19:11:40.143393 1 resource-manager.go:153] <initial config> f:meta.helm.sh/release-namespace: {} | |
I1119 19:11:40.143395 1 resource-manager.go:153] <initial config> f:labels: | |
I1119 19:11:40.143398 1 resource-manager.go:153] <initial config> .: {} | |
I1119 19:11:40.143402 1 resource-manager.go:153] <initial config> f:app.kubernetes.io/instance: {} | |
I1119 19:11:40.143404 1 resource-manager.go:153] <initial config> f:app.kubernetes.io/managed-by: {} | |
I1119 19:11:40.143407 1 resource-manager.go:153] <initial config> f:app.kubernetes.io/name: {} | |
I1119 19:11:40.143409 1 resource-manager.go:153] <initial config> f:helm.sh/chart: {} | |
I1119 19:11:40.143412 1 resource-manager.go:153] <initial config> f:spec: | |
I1119 19:11:40.143414 1 resource-manager.go:153] <initial config> .: {} | |
I1119 19:11:40.143418 1 resource-manager.go:153] <initial config> f:defaultCPUPriority: {} | |
I1119 19:11:40.143421 1 resource-manager.go:153] <initial config> f:instrumentation: | |
I1119 19:11:40.143424 1 resource-manager.go:153] <initial config> .: {} | |
I1119 19:11:40.143426 1 resource-manager.go:153] <initial config> f:httpEndpoint: {} | |
I1119 19:11:40.143429 1 resource-manager.go:153] <initial config> f:prometheusExport: {} | |
I1119 19:11:40.143432 1 resource-manager.go:153] <initial config> f:reportPeriod: {} | |
I1119 19:11:40.143435 1 resource-manager.go:153] <initial config> f:samplingRatePerMillion: {} | |
I1119 19:11:40.143438 1 resource-manager.go:153] <initial config> f:log: | |
I1119 19:11:40.143440 1 resource-manager.go:153] <initial config> .: {} | |
I1119 19:11:40.143443 1 resource-manager.go:153] <initial config> f:klog: | |
I1119 19:11:40.143446 1 resource-manager.go:153] <initial config> .: {} | |
I1119 19:11:40.143452 1 resource-manager.go:153] <initial config> f:skip_headers: {} | |
I1119 19:11:40.143455 1 resource-manager.go:153] <initial config> f:source: {} | |
I1119 19:11:40.143458 1 resource-manager.go:153] <initial config> f:pinCPU: {} | |
I1119 19:11:40.143461 1 resource-manager.go:153] <initial config> f:pinMemory: {} | |
I1119 19:11:40.143464 1 resource-manager.go:153] <initial config> f:preferIsolatedCPUs: {} | |
I1119 19:11:40.143467 1 resource-manager.go:153] <initial config> f:reservedResources: | |
I1119 19:11:40.143470 1 resource-manager.go:153] <initial config> .: {} | |
I1119 19:11:40.143473 1 resource-manager.go:153] <initial config> f:cpu: {} | |
I1119 19:11:40.143476 1 resource-manager.go:153] <initial config> manager: helm | |
I1119 19:11:40.143479 1 resource-manager.go:153] <initial config> operation: Update | |
I1119 19:11:40.143482 1 resource-manager.go:153] <initial config> time: "2024-11-19T19:11:39Z" | |
I1119 19:11:40.143485 1 resource-manager.go:153] <initial config> name: default | |
I1119 19:11:40.143488 1 resource-manager.go:153] <initial config> namespace: kube-system | |
I1119 19:11:40.143490 1 resource-manager.go:153] <initial config> resourceVersion: "3702" | |
I1119 19:11:40.143493 1 resource-manager.go:153] <initial config> uid: b53e7104-f526-49dd-8a05-acc592c06276 | |
I1119 19:11:40.143496 1 resource-manager.go:153] <initial config> spec: | |
I1119 19:11:40.143499 1 resource-manager.go:153] <initial config> control: | |
I1119 19:11:40.143502 1 resource-manager.go:153] <initial config> cpu: null | |
I1119 19:11:40.143506 1 resource-manager.go:153] <initial config> defaultCPUPriority: none | |
I1119 19:11:40.143508 1 resource-manager.go:153] <initial config> instrumentation: | |
I1119 19:11:40.143512 1 resource-manager.go:153] <initial config> httpEndpoint: :8891 | |
I1119 19:11:40.143515 1 resource-manager.go:153] <initial config> prometheusExport: true | |
I1119 19:11:40.143518 1 resource-manager.go:153] <initial config> reportPeriod: 1m0s | |
I1119 19:11:40.143520 1 resource-manager.go:153] <initial config> log: | |
I1119 19:11:40.143523 1 resource-manager.go:153] <initial config> klog: | |
I1119 19:11:40.143526 1 resource-manager.go:153] <initial config> skip_headers: true | |
I1119 19:11:40.143529 1 resource-manager.go:153] <initial config> source: true | |
I1119 19:11:40.143532 1 resource-manager.go:153] <initial config> pinCPU: true | |
I1119 19:11:40.143534 1 resource-manager.go:153] <initial config> pinMemory: true | |
I1119 19:11:40.143537 1 resource-manager.go:153] <initial config> preferIsolatedCPUs: true | |
I1119 19:11:40.143539 1 resource-manager.go:153] <initial config> reservedResources: | |
I1119 19:11:40.143542 1 resource-manager.go:153] <initial config> cpu: 750m | |
I1119 19:11:40.143545 1 resource-manager.go:153] <initial config> status: | |
I1119 19:11:40.143548 1 resource-manager.go:153] <initial config> nodes: null | |
I1119 19:11:40.143551 1 resource-manager.go:153] <initial config> | |
I1119 19:11:40.143556 1 resource-manager.go:171] starting resource manager... | |
I1119 19:11:40.143578 1 config.go:125] logger configuration update &{Debug:[] LogSource:true Klog:{Add_dir_header:<nil> Alsologtostderr:<nil> Log_backtrace_at:<nil> Log_dir:<nil> Log_file:<nil> Log_file_max_size:<nil> Logtostderr:<nil> One_output:<nil> Skip_headers:0xc0004b0f7c Skip_log_headers:<nil> Stderrthreshold:<nil> V:<nil> Vmodule:<nil>}} | |
I: [ http ] stopping HTTP server... | |
I: [ http ] starting HTTP server... | |
I: [ tracing ] starting tracing exporter... | |
I: [ tracing ] tracing effectively disabled, no endpoint set | |
I: [ metrics ] starting metrics exporter... | |
I: [ sysfs ] NUMA nodes with CPUs: 0 | |
I: [ sysfs ] NUMA nodes with (any) memory: 0 | |
I: [ sysfs ] NUMA nodes with normal memory: 0 | |
I: [ sysfs ] node 0 has DRAM memory | |
[ sst ] DEBUG: sst device "/host/dev/isst_interface" does not exist | |
I: [ sysfs ] Speed Select Technology (SST) support not detected | |
I: [ policy ] activating 'topology-aware' policy... | |
I: [ policy ] initial configuration: &{PinCPU:true PinMemory:true PreferIsolated:true PreferShared:false ColocatePods:false ColocateNamespaces:false ReservedPoolNamespaces:[] AvailableResources:map[] ReservedResources:map[cpu:750m] DefaultCPUPriority:none} | |
I: [ cpuallocator ] picked cache level 2 for CPU grouping | |
I: [ libmem ] memory allocator configuration | |
I: [ libmem ] DRAM node #0 with 33345736704 memory (31.056G) | |
I: [ libmem ] distance vector [10] | |
I: [ libmem ] close CPUs: 0-15 | |
I: [ policy ] implicit affinity colocate-pods is disabled | |
I: [ policy ] implicit affinity colocate-namespaces is disabled | |
I: [ policy ] ***** default CPU priority is none | |
I: [ policy ] <post-start> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ nri-plugin ] starting plugin... | |
I: [ nri-plugin ] creating plugin stub... | |
time="2024-11-19T19:11:40Z" level=info msg="Created plugin 90-resource-manager (nri-resource-policy-topology-aware, handles RunPodSandbox,StopPodSandbox,RemovePodSandbox,CreateContainer,StartContainer,UpdateContainer,StopContainer,RemoveContainer)" | |
time="2024-11-19T19:11:40Z" level=info msg="Registering plugin 90-resource-manager..." | |
time="2024-11-19T19:11:40Z" level=info msg="Configuring plugin 90-resource-manager for runtime containerd/v1.7.17..." | |
I: [ nri-plugin ] => Configure, runtime containerd v1.7.17 | |
time="2024-11-19T19:11:40Z" level=info msg="Subscribing plugin 90-resource-manager (nri-resource-policy-topology-aware) for events RunPodSandbox,StopPodSandbox,RemovePodSandbox,CreateContainer,StartContainer,UpdateContainer,StopContainer,RemoveContainer" | |
time="2024-11-19T19:11:40Z" level=info msg="Started plugin 90-resource-manager..." | |
I: [ resource-control ] syncing controllers with configuration... | |
I: [ resource-control ] starting controller cpu | |
I: [ cpu ] empty configuration, disabling controller | |
I: [ resource-control ] controller cpu is disabled | |
I: [ resource-control ] starting controller e2e-test | |
I: [ resource-control ] controller e2e-test is enabled and running | |
I: [ resource-manager ] up and running | |
I: [ nri-plugin ] => Synchronize | |
I: [ nri-plugin ] synchronizing cache state with NRI runtime... | |
I: [ nri-plugin ] discovered stale container <unknown-pod 43ac1c42499962de53d0f313e9255c3ad25111d4f1fcb3773fdafb7aa685c976>/kube-apiserver (3f0c6d1b2abc738abf8586184af5c85884cc2261d9523907728ea1e6f0d081b9)... | |
I: [ nri-plugin ] discovered stale container <unknown-pod b8afa36c7d2b394312cc688b01d56921d43a99c0ee1fffaef1fbedebf79ab1f0>/calico-node (8b2adaaa98d741b20d3fc9640bb678ff78cc754e7b633203d865db6e55282c72)... | |
I: [ nri-plugin ] discovered stale container <unknown-pod 901cdd7f242be2d9489e7e307aa7dbfe236debe49846d2a9b0d560d80e7484ba>/calico-apiserver (b7a85ba2dc2e1c3a0c8b1ba89e4a7bad89f413ae3cdfb559521c9c5e6bae7d21)... | |
I: [ nri-plugin ] discovered stale container <unknown-pod a751401c037ea738d16cfde43bcb71706c9c0043b4d6be52ee8e7f527a87aef5>/calico-typha (f0151247d9b56a31eae3af53e2937d09ad9763824b73755782b66b675c5722d5)... | |
I: [ nri-plugin ] discovered stale container <unknown-pod d7017096cc698fc692a60249af582ffd28d4269561c199a141933ea350c8383c>/coredns (024e03cb5cc86c4a1580b60089ec95201190553b8c419a127a6fbcdff3db88a3)... | |
I: [ nri-plugin ] discovered stale container <unknown-pod e9226b79197e037d2078ac4543d53a6c80b7cb655977095a23ea7c7b4316b236>/kube-proxy (f8d818854b77daa40ff8c471d669bb5fd075243163c2e522d9ac2f83524787af)... | |
I: [ nri-plugin ] discovered stale container <unknown-pod 7ba58ade812d88482be531fb18949fa36060c101c9bfe9d25f267bb226c0e29a>/nri-resource-policy-topology-aware (06b8fc282724b49f713719b5ba32bbc0fad7c8b285b361d4781b674a14df78f7)... | |
I: [ nri-plugin ] discovered stale container <unknown-pod c4317eb75a8cdaaf427ffe64cfb3fa1900b3ce0e3062f80eea782cfe918a5dd8>/etcd (49d7beae2a6c2d53d711211859fd335b641bfc409f50a51945db94a8a766d1f2)... | |
I: [ nri-plugin ] discovered stale container <unknown-pod 8e5f4bd4bd0b7ce85a51df364ef4c3c1616ae77ff0706dbf1558b2f5ece47c23>/tigera-operator (c34c3a55c69ad4286b8acd7570f5e63e0614d6fe92e94216385d5917d842f1f5)... | |
I: [ nri-plugin ] discovered stale container <unknown-pod 6dfc65fb2453d9a4861ecd9244b2339b1d9993de1ce65c9003ef6db44a24b10c>/coredns (2ee092fe6290337aaae9452f949d1c05664d45fdd22302d3da6bc8679dd326e0)... | |
I: [ nri-plugin ] discovered stale container <unknown-pod 01611531158fe9773ee23540c969589d9bbba9bb001ced91b37f0aad680bccca>/kube-controller-manager (4f74a640fc508fed4e40c15510653a417579b27c2bc6e5213034d0ccc399bd89)... | |
I: [ nri-plugin ] discovered stale container <unknown-pod a5d4ea86dd3cbcb168c4cf064b75ab21f787bcf64601375d36b7a423643c5c09>/calico-apiserver (63ab036ce068d47dd4a1dedfe6925f4db8651e667be6a9d9a4bb412aa4304002)... | |
I: [ nri-plugin ] discovered stale container <unknown-pod 46f9c2cc0e210fd3a9d8eff9f8f40b7aa2db860b3412f35ff368da0c0292cd5c>/csi-node-driver-registrar (947f4ae06259735a2fd5bf899f3b0ab20a0a5d2746dfc4e066be33ad37cdf2ca)... | |
I: [ nri-plugin ] discovered stale container <unknown-pod c8213c2bcc5cc9d25809b1ec2ee1e4c829dd2adf02ce278f2ffbe0695c5a4c46>/kube-scheduler (9a5b9505bbccbf9083f4aad7af913cc5fdde68cff33e36a4c649e43a4218f9aa)... | |
I: [ nri-plugin ] discovered stale container <unknown-pod 46f9c2cc0e210fd3a9d8eff9f8f40b7aa2db860b3412f35ff368da0c0292cd5c>/calico-csi (a2d8cbea258b703d2f7078c3d4646a51816cf935b40d77c25e5bfef98dfe037a)... | |
I: [ nri-plugin ] discovered created/running container kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver (a0585b7210654108ed24f58bb1b8a41e7c09aa1aeab2dca15475fd83a5e0c2cb)... | |
I: [ nri-plugin ] discovered created/running container kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager (e24cbfe2c5a96e354d07ae3700b96202f756b001ef97868a485ccab7a4c391e7)... | |
I: [ nri-plugin ] discovered created/running container calico-system/calico-typha-84bcd45767-8jmds/calico-typha (5af6afbd305f7e677948f5213dde3ff01143864c04ea5301846017eaba10bb3b)... | |
I: [ nri-plugin ] discovered created/running container calico-system/csi-node-driver-6bdjw/csi-node-driver-registrar (e8b9251c7778296c593759b023c147c07e59b551d3e9130c1b35f25801bc9b7c)... | |
I: [ nri-plugin ] discovered created/running container kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler (c111a8b93e58e048e5e82258336fb6bffe18e8b18a165a677a4a8bd4162bed51)... | |
I: [ nri-plugin ] discovered created/running container kube-system/kube-proxy-ptvqv/kube-proxy (be06783ab70396cb46542d249a8dc67405a09307b59597dcd112f857641552ed)... | |
I: [ nri-plugin ] discovered created/running container calico-apiserver/calico-apiserver-56cd955578-vc6t9/calico-apiserver (95ef4f0691dd7bbef6dcff9ae1e3bc37c73571bb0621fe27fcd8e1d5f6174832)... | |
I: [ nri-plugin ] discovered created/running container kube-system/coredns-7c65d6cfc9-kmlzs/coredns (e4309ba59a19a8b306754322064b8fbf8fed8ee9f0d640a8fd5d6d1051fe9194)... | |
I: [ nri-plugin ] discovered created/running container kube-system/nri-resource-policy-topology-aware-r8vld/nri-resource-policy-topology-aware (0cc3da00ad26d05065a985a7462112d8e6978f68230b50ccf278918ebd8e63da)... | |
I: [ nri-plugin ] discovered created/running container calico-system/calico-node-p9gtd/calico-node (de93c20876e58db5ce0fb260bc895359ec8870d93584e49d2fc95650bb67bfaa)... | |
I: [ nri-plugin ] discovered created/running container calico-apiserver/calico-apiserver-56cd955578-4sv8l/calico-apiserver (e8838526372d173c8f136425868fce24a1526d57dcafcee9aaf38e24ff83185e)... | |
I: [ nri-plugin ] discovered created/running container calico-system/calico-kube-controllers-66c57f9c4-hd5f4/calico-kube-controllers (08d804a6f705b2c1610ea0b99dfd18d6b9555e64c8eb02417e30b59b925d9413)... | |
I: [ nri-plugin ] discovered created/running container kube-system/etcd-fmuyassa-mobl3/etcd (b9b45260d8e733b7d247b1345dc20a43aa2d8e8f043a3d3024cd70bef279199e)... | |
I: [ nri-plugin ] discovered created/running container tigera-operator/tigera-operator-55748b469f-5mbcj/tigera-operator (f90472b17da770daa63ee6758141098d3ff5912a06201caf2d637a87fe70f892)... | |
I: [ nri-plugin ] discovered created/running container kube-system/coredns-7c65d6cfc9-9zrr4/coredns (aab02f67c22a11954189b9b947a48d0f1b8a368b67b2c77e8801f4ad09f2f0fb)... | |
I: [ nri-plugin ] discovered created/running container calico-system/csi-node-driver-6bdjw/calico-csi (253f6c9aca8a7c0d772ebdfb3dd9538ac0fb87f99ced7d65cd421153aba9acbd)... | |
I: [ nri-plugin ] kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver: mapped container to a0585b7210654108ed24f58bb1b8a41e7c09aa1aeab2dca15475fd83a5e0c2cb | |
I: [ nri-plugin ] kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager: mapped container to e24cbfe2c5a96e354d07ae3700b96202f756b001ef97868a485ccab7a4c391e7 | |
I: [ nri-plugin ] calico-system/calico-typha-84bcd45767-8jmds/calico-typha: mapped container to 5af6afbd305f7e677948f5213dde3ff01143864c04ea5301846017eaba10bb3b | |
I: [ nri-plugin ] calico-system/csi-node-driver-6bdjw/csi-node-driver-registrar: mapped container to e8b9251c7778296c593759b023c147c07e59b551d3e9130c1b35f25801bc9b7c | |
I: [ nri-plugin ] kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler: mapped container to c111a8b93e58e048e5e82258336fb6bffe18e8b18a165a677a4a8bd4162bed51 | |
I: [ nri-plugin ] kube-system/kube-proxy-ptvqv/kube-proxy: mapped container to be06783ab70396cb46542d249a8dc67405a09307b59597dcd112f857641552ed | |
I: [ nri-plugin ] calico-apiserver/calico-apiserver-56cd955578-vc6t9/calico-apiserver: mapped container to 95ef4f0691dd7bbef6dcff9ae1e3bc37c73571bb0621fe27fcd8e1d5f6174832 | |
I: [ nri-plugin ] kube-system/coredns-7c65d6cfc9-kmlzs/coredns: mapped container to e4309ba59a19a8b306754322064b8fbf8fed8ee9f0d640a8fd5d6d1051fe9194 | |
I: [ nri-plugin ] kube-system/nri-resource-policy-topology-aware-r8vld/nri-resource-policy-topology-aware: mapped container to 0cc3da00ad26d05065a985a7462112d8e6978f68230b50ccf278918ebd8e63da | |
I: [ nri-plugin ] calico-system/calico-node-p9gtd/calico-node: mapped container to de93c20876e58db5ce0fb260bc895359ec8870d93584e49d2fc95650bb67bfaa | |
I: [ nri-plugin ] calico-apiserver/calico-apiserver-56cd955578-4sv8l/calico-apiserver: mapped container to e8838526372d173c8f136425868fce24a1526d57dcafcee9aaf38e24ff83185e | |
I: [ nri-plugin ] calico-system/calico-kube-controllers-66c57f9c4-hd5f4/calico-kube-controllers: mapped container to 08d804a6f705b2c1610ea0b99dfd18d6b9555e64c8eb02417e30b59b925d9413 | |
I: [ nri-plugin ] kube-system/etcd-fmuyassa-mobl3/etcd: mapped container to b9b45260d8e733b7d247b1345dc20a43aa2d8e8f043a3d3024cd70bef279199e | |
I: [ nri-plugin ] tigera-operator/tigera-operator-55748b469f-5mbcj/tigera-operator: mapped container to f90472b17da770daa63ee6758141098d3ff5912a06201caf2d637a87fe70f892 | |
I: [ nri-plugin ] kube-system/coredns-7c65d6cfc9-9zrr4/coredns: mapped container to aab02f67c22a11954189b9b947a48d0f1b8a368b67b2c77e8801f4ad09f2f0fb | |
I: [ nri-plugin ] calico-system/csi-node-driver-6bdjw/calico-csi: mapped container to 253f6c9aca8a7c0d772ebdfb3dd9538ac0fb87f99ced7d65cd421153aba9acbd | |
I: [ policy ] * releasing resources allocated to <unknown-pod 43ac1c42499962de53d0f313e9255c3ad25111d4f1fcb3773fdafb7aa685c976>/kube-apiserver | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod 43ac1c42499962de53d0f313e9255c3ad25111d4f1fcb3773fdafb7aa685c976>/kube-apiserver> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to <unknown-pod b8afa36c7d2b394312cc688b01d56921d43a99c0ee1fffaef1fbedebf79ab1f0>/calico-node | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod b8afa36c7d2b394312cc688b01d56921d43a99c0ee1fffaef1fbedebf79ab1f0>/calico-node> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to <unknown-pod 901cdd7f242be2d9489e7e307aa7dbfe236debe49846d2a9b0d560d80e7484ba>/calico-apiserver | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod 901cdd7f242be2d9489e7e307aa7dbfe236debe49846d2a9b0d560d80e7484ba>/calico-apiserver> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to <unknown-pod a751401c037ea738d16cfde43bcb71706c9c0043b4d6be52ee8e7f527a87aef5>/calico-typha | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod a751401c037ea738d16cfde43bcb71706c9c0043b4d6be52ee8e7f527a87aef5>/calico-typha> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to <unknown-pod d7017096cc698fc692a60249af582ffd28d4269561c199a141933ea350c8383c>/coredns | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod d7017096cc698fc692a60249af582ffd28d4269561c199a141933ea350c8383c>/coredns> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to <unknown-pod e9226b79197e037d2078ac4543d53a6c80b7cb655977095a23ea7c7b4316b236>/kube-proxy | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod e9226b79197e037d2078ac4543d53a6c80b7cb655977095a23ea7c7b4316b236>/kube-proxy> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to <unknown-pod 7ba58ade812d88482be531fb18949fa36060c101c9bfe9d25f267bb226c0e29a>/nri-resource-policy-topology-aware | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod 7ba58ade812d88482be531fb18949fa36060c101c9bfe9d25f267bb226c0e29a>/nri-resource-policy-topology-aware> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to <unknown-pod c4317eb75a8cdaaf427ffe64cfb3fa1900b3ce0e3062f80eea782cfe918a5dd8>/etcd | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod c4317eb75a8cdaaf427ffe64cfb3fa1900b3ce0e3062f80eea782cfe918a5dd8>/etcd> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to <unknown-pod 8e5f4bd4bd0b7ce85a51df364ef4c3c1616ae77ff0706dbf1558b2f5ece47c23>/tigera-operator | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod 8e5f4bd4bd0b7ce85a51df364ef4c3c1616ae77ff0706dbf1558b2f5ece47c23>/tigera-operator> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to <unknown-pod 6dfc65fb2453d9a4861ecd9244b2339b1d9993de1ce65c9003ef6db44a24b10c>/coredns | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod 6dfc65fb2453d9a4861ecd9244b2339b1d9993de1ce65c9003ef6db44a24b10c>/coredns> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to <unknown-pod 01611531158fe9773ee23540c969589d9bbba9bb001ced91b37f0aad680bccca>/kube-controller-manager | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod 01611531158fe9773ee23540c969589d9bbba9bb001ced91b37f0aad680bccca>/kube-controller-manager> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to <unknown-pod a5d4ea86dd3cbcb168c4cf064b75ab21f787bcf64601375d36b7a423643c5c09>/calico-apiserver | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod a5d4ea86dd3cbcb168c4cf064b75ab21f787bcf64601375d36b7a423643c5c09>/calico-apiserver> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to <unknown-pod 46f9c2cc0e210fd3a9d8eff9f8f40b7aa2db860b3412f35ff368da0c0292cd5c>/csi-node-driver-registrar | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod 46f9c2cc0e210fd3a9d8eff9f8f40b7aa2db860b3412f35ff368da0c0292cd5c>/csi-node-driver-registrar> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to <unknown-pod c8213c2bcc5cc9d25809b1ec2ee1e4c829dd2adf02ce278f2ffbe0695c5a4c46>/kube-scheduler | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod c8213c2bcc5cc9d25809b1ec2ee1e4c829dd2adf02ce278f2ffbe0695c5a4c46>/kube-scheduler> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to <unknown-pod 46f9c2cc0e210fd3a9d8eff9f8f40b7aa2db860b3412f35ff368da0c0292cd5c>/calico-csi | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod 46f9c2cc0e210fd3a9d8eff9f8f40b7aa2db860b3412f35ff368da0c0292cd5c>/calico-csi> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to calico-system/calico-typha-84bcd45767-8jmds/calico-typha | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release calico-system/calico-typha-84bcd45767-8jmds/calico-typha> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to calico-system/csi-node-driver-6bdjw/csi-node-driver-registrar | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release calico-system/csi-node-driver-6bdjw/csi-node-driver-registrar> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to kube-system/kube-proxy-ptvqv/kube-proxy | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release kube-system/kube-proxy-ptvqv/kube-proxy> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to calico-apiserver/calico-apiserver-56cd955578-vc6t9/calico-apiserver | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release calico-apiserver/calico-apiserver-56cd955578-vc6t9/calico-apiserver> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to kube-system/coredns-7c65d6cfc9-kmlzs/coredns | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release kube-system/coredns-7c65d6cfc9-kmlzs/coredns> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to kube-system/nri-resource-policy-topology-aware-r8vld/nri-resource-policy-topology-aware | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release kube-system/nri-resource-policy-topology-aware-r8vld/nri-resource-policy-topology-aware> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to calico-system/calico-node-p9gtd/calico-node | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release calico-system/calico-node-p9gtd/calico-node> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to calico-apiserver/calico-apiserver-56cd955578-4sv8l/calico-apiserver | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release calico-apiserver/calico-apiserver-56cd955578-4sv8l/calico-apiserver> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to calico-system/calico-kube-controllers-66c57f9c4-hd5f4/calico-kube-controllers | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release calico-system/calico-kube-controllers-66c57f9c4-hd5f4/calico-kube-controllers> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to kube-system/etcd-fmuyassa-mobl3/etcd | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release kube-system/etcd-fmuyassa-mobl3/etcd> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to tigera-operator/tigera-operator-55748b469f-5mbcj/tigera-operator | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release tigera-operator/tigera-operator-55748b469f-5mbcj/tigera-operator> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to kube-system/coredns-7c65d6cfc9-9zrr4/coredns | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release kube-system/coredns-7c65d6cfc9-9zrr4/coredns> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to calico-system/csi-node-driver-6bdjw/calico-csi | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release calico-system/csi-node-driver-6bdjw/calico-csi> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * applying grant <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: reserved, reserved: 8 (250m), memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver to (reserved) cpuset 8 | |
I: [ policy ] * updating shared allocations affected by <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: reserved, reserved: 8 (250m), memory: nodes{0} (0.00)> | |
I: [ policy ] this grant uses reserved CPUs, does not affect shared allocations | |
I: [ policy ] <post-alloc kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver> total CPU granted: 250m (0 exclusive + 250m shared), total memory granted: 0.00 | |
I: [ policy ] * applying grant <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager to (reserved) cpuset 8 | |
I: [ policy ] * updating shared allocations affected by <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> | |
I: [ policy ] this grant uses reserved CPUs, does not affect shared allocations | |
I: [ policy ] <post-alloc kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager> total CPU granted: 449m (0 exclusive + 449m shared), total memory granted: 0.00 | |
I: [ policy ] * applying grant <grant for calico-system/calico-typha-84bcd45767-8jmds/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning calico-system/calico-typha-84bcd45767-8jmds/calico-typha to (shared) cpuset 0-7,9-11 | |
I: [ policy ] * updating shared allocations affected by <grant for calico-system/calico-typha-84bcd45767-8jmds/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: reserved, reserved: 8 (250m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] <post-alloc calico-system/calico-typha-84bcd45767-8jmds/calico-typha> total CPU granted: 449m (0 exclusive + 449m shared), total memory granted: 0.00 | |
I: [ policy ] * applying grant <grant for calico-system/csi-node-driver-6bdjw/csi-node-driver-registrar from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning calico-system/csi-node-driver-6bdjw/csi-node-driver-registrar to (shared) cpuset 0-7,9-11 | |
I: [ policy ] * updating shared allocations affected by <grant for calico-system/csi-node-driver-6bdjw/csi-node-driver-registrar from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: reserved, reserved: 8 (250m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/calico-typha-84bcd45767-8jmds/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] <post-alloc calico-system/csi-node-driver-6bdjw/csi-node-driver-registrar> total CPU granted: 449m (0 exclusive + 449m shared), total memory granted: 0.00 | |
I: [ policy ] * applying grant <grant for kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler to (reserved) cpuset 8 | |
I: [ policy ] * updating shared allocations affected by <grant for kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> | |
I: [ policy ] this grant uses reserved CPUs, does not affect shared allocations | |
I: [ policy ] <post-alloc kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler> total CPU granted: 549m (0 exclusive + 549m shared), total memory granted: 0.00 | |
I: [ policy ] * applying grant <grant for kube-system/kube-proxy-ptvqv/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning kube-system/kube-proxy-ptvqv/kube-proxy to (reserved) cpuset 8 | |
I: [ policy ] * updating shared allocations affected by <grant for kube-system/kube-proxy-ptvqv/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> | |
I: [ policy ] this grant uses reserved CPUs, does not affect shared allocations | |
I: [ policy ] <post-alloc kube-system/kube-proxy-ptvqv/kube-proxy> total CPU granted: 549m (0 exclusive + 549m shared), total memory granted: 0.00 | |
I: [ policy ] * applying grant <grant for calico-apiserver/calico-apiserver-56cd955578-vc6t9/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning calico-apiserver/calico-apiserver-56cd955578-vc6t9/calico-apiserver to (shared) cpuset 0-7,9-11 | |
I: [ policy ] * updating shared allocations affected by <grant for calico-apiserver/calico-apiserver-56cd955578-vc6t9/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => <grant for kube-system/kube-proxy-ptvqv/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: reserved, reserved: 8 (250m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/calico-typha-84bcd45767-8jmds/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-6bdjw/csi-node-driver-registrar from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] <post-alloc calico-apiserver/calico-apiserver-56cd955578-vc6t9/calico-apiserver> total CPU granted: 549m (0 exclusive + 549m shared), total memory granted: 0.00 | |
I: [ policy ] * applying grant <grant for kube-system/coredns-7c65d6cfc9-kmlzs/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> | |
I: [ policy ] => pinning kube-system/coredns-7c65d6cfc9-kmlzs/coredns to (reserved) cpuset 8 | |
I: [ policy ] * updating shared allocations affected by <grant for kube-system/coredns-7c65d6cfc9-kmlzs/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> | |
I: [ policy ] this grant uses reserved CPUs, does not affect shared allocations | |
I: [ policy ] <post-alloc kube-system/coredns-7c65d6cfc9-kmlzs/coredns> total CPU granted: 649m (0 exclusive + 649m shared), total memory granted: 170.00M | |
W: [ policy ] possible misconfiguration of reserved resources: | |
W: [ policy ] socket #0: allocatable <socket #0 allocatable: CPU: isolated:12-15, reserved:8 (allocatable: 351m), grantedReserved:649m, sharable:0-7,9-11 (allocatable:11000m), MemLimit: 30.89G> | |
W: [ policy ] kube-system/nri-resource-policy-topology-aware-r8vld/nri-resource-policy-topology-aware: needs 500 reserved, only 351 available | |
W: [ policy ] falling back to using normal unreserved CPUs instead... | |
I: [ policy ] * applying grant <grant for kube-system/nri-resource-policy-topology-aware-r8vld/nri-resource-policy-topology-aware from socket #0: cputype: normal, shared: 0-7,9-11 (500m), memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning kube-system/nri-resource-policy-topology-aware-r8vld/nri-resource-policy-topology-aware to (shared) cpuset 0-7,9-11 | |
I: [ policy ] * updating shared allocations affected by <grant for kube-system/nri-resource-policy-topology-aware-r8vld/nri-resource-policy-topology-aware from socket #0: cputype: normal, shared: 0-7,9-11 (500m), memory: nodes{0} (0.00)> | |
I: [ policy ] => updating <grant for calico-system/calico-typha-84bcd45767-8jmds/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-proxy-ptvqv/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-kmlzs/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: reserved, reserved: 8 (250m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-6bdjw/csi-node-driver-registrar from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-56cd955578-vc6t9/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] <post-alloc kube-system/nri-resource-policy-topology-aware-r8vld/nri-resource-policy-topology-aware> total CPU granted: 1149m (0 exclusive + 1149m shared), total memory granted: 170.00M | |
I: [ policy ] * applying grant <grant for calico-system/calico-node-p9gtd/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning calico-system/calico-node-p9gtd/calico-node to (shared) cpuset 0-7,9-11 | |
I: [ policy ] * updating shared allocations affected by <grant for calico-system/calico-node-p9gtd/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: reserved, reserved: 8 (250m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-6bdjw/csi-node-driver-registrar from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-56cd955578-vc6t9/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-typha-84bcd45767-8jmds/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-proxy-ptvqv/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-kmlzs/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for kube-system/nri-resource-policy-topology-aware-r8vld/nri-resource-policy-topology-aware from socket #0: cputype: normal, shared: 0-7,9-11 (500m), memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] <post-alloc calico-system/calico-node-p9gtd/calico-node> total CPU granted: 1149m (0 exclusive + 1149m shared), total memory granted: 170.00M | |
I: [ policy ] * applying grant <grant for calico-apiserver/calico-apiserver-56cd955578-4sv8l/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning calico-apiserver/calico-apiserver-56cd955578-4sv8l/calico-apiserver to (shared) cpuset 0-7,9-11 | |
I: [ policy ] * updating shared allocations affected by <grant for calico-apiserver/calico-apiserver-56cd955578-4sv8l/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: reserved, reserved: 8 (250m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-6bdjw/csi-node-driver-registrar from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-56cd955578-vc6t9/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-typha-84bcd45767-8jmds/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-proxy-ptvqv/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-kmlzs/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for kube-system/nri-resource-policy-topology-aware-r8vld/nri-resource-policy-topology-aware from socket #0: cputype: normal, shared: 0-7,9-11 (500m), memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-node-p9gtd/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] <post-alloc calico-apiserver/calico-apiserver-56cd955578-4sv8l/calico-apiserver> total CPU granted: 1149m (0 exclusive + 1149m shared), total memory granted: 170.00M | |
I: [ policy ] * applying grant <grant for calico-system/calico-kube-controllers-66c57f9c4-hd5f4/calico-kube-controllers from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning calico-system/calico-kube-controllers-66c57f9c4-hd5f4/calico-kube-controllers to (shared) cpuset 0-7,9-11 | |
I: [ policy ] * updating shared allocations affected by <grant for calico-system/calico-kube-controllers-66c57f9c4-hd5f4/calico-kube-controllers from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: reserved, reserved: 8 (250m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-6bdjw/csi-node-driver-registrar from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-56cd955578-vc6t9/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-typha-84bcd45767-8jmds/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-proxy-ptvqv/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-kmlzs/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for kube-system/nri-resource-policy-topology-aware-r8vld/nri-resource-policy-topology-aware from socket #0: cputype: normal, shared: 0-7,9-11 (500m), memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-node-p9gtd/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-56cd955578-4sv8l/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] <post-alloc calico-system/calico-kube-controllers-66c57f9c4-hd5f4/calico-kube-controllers> total CPU granted: 1149m (0 exclusive + 1149m shared), total memory granted: 170.00M | |
I: [ policy ] * applying grant <grant for kube-system/etcd-fmuyassa-mobl3/etcd from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning kube-system/etcd-fmuyassa-mobl3/etcd to (reserved) cpuset 8 | |
I: [ policy ] * updating shared allocations affected by <grant for kube-system/etcd-fmuyassa-mobl3/etcd from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> | |
I: [ policy ] this grant uses reserved CPUs, does not affect shared allocations | |
I: [ policy ] <post-alloc kube-system/etcd-fmuyassa-mobl3/etcd> total CPU granted: 1249m (0 exclusive + 1249m shared), total memory granted: 170.00M | |
I: [ policy ] * applying grant <grant for tigera-operator/tigera-operator-55748b469f-5mbcj/tigera-operator from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning tigera-operator/tigera-operator-55748b469f-5mbcj/tigera-operator to (shared) cpuset 0-7,9-11 | |
I: [ policy ] * updating shared allocations affected by <grant for tigera-operator/tigera-operator-55748b469f-5mbcj/tigera-operator from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => <grant for kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-56cd955578-vc6t9/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-kube-controllers-66c57f9c4-hd5f4/calico-kube-controllers from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/etcd-fmuyassa-mobl3/etcd from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: reserved, reserved: 8 (250m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-6bdjw/csi-node-driver-registrar from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-typha-84bcd45767-8jmds/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for kube-system/nri-resource-policy-topology-aware-r8vld/nri-resource-policy-topology-aware from socket #0: cputype: normal, shared: 0-7,9-11 (500m), memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-56cd955578-4sv8l/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-proxy-ptvqv/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-kmlzs/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/calico-node-p9gtd/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] <post-alloc tigera-operator/tigera-operator-55748b469f-5mbcj/tigera-operator> total CPU granted: 1249m (0 exclusive + 1249m shared), total memory granted: 170.00M | |
I: [ policy ] * applying grant <grant for kube-system/coredns-7c65d6cfc9-9zrr4/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> | |
I: [ policy ] => pinning kube-system/coredns-7c65d6cfc9-9zrr4/coredns to (reserved) cpuset 8 | |
I: [ policy ] * updating shared allocations affected by <grant for kube-system/coredns-7c65d6cfc9-9zrr4/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> | |
I: [ policy ] this grant uses reserved CPUs, does not affect shared allocations | |
I: [ policy ] <post-alloc kube-system/coredns-7c65d6cfc9-9zrr4/coredns> total CPU granted: 1349m (0 exclusive + 1349m shared), total memory granted: 340.00M | |
I: [ policy ] * applying grant <grant for calico-system/csi-node-driver-6bdjw/calico-csi from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning calico-system/csi-node-driver-6bdjw/calico-csi to (shared) cpuset 0-7,9-11 | |
I: [ policy ] * updating shared allocations affected by <grant for calico-system/csi-node-driver-6bdjw/calico-csi from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => updating <grant for calico-system/calico-node-p9gtd/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-proxy-ptvqv/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-kmlzs/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-6bdjw/csi-node-driver-registrar from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-56cd955578-vc6t9/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-kube-controllers-66c57f9c4-hd5f4/calico-kube-controllers from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/etcd-fmuyassa-mobl3/etcd from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for tigera-operator/tigera-operator-55748b469f-5mbcj/tigera-operator from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: reserved, reserved: 8 (250m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-56cd955578-4sv8l/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-typha-84bcd45767-8jmds/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for kube-system/nri-resource-policy-topology-aware-r8vld/nri-resource-policy-topology-aware from socket #0: cputype: normal, shared: 0-7,9-11 (500m), memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-9zrr4/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] <post-alloc calico-system/csi-node-driver-6bdjw/calico-csi> total CPU granted: 1349m (0 exclusive + 1349m shared), total memory granted: 340.00M | |
I: [ policy ] <post-sync> total CPU granted: 1349m (0 exclusive + 1349m shared), total memory granted: 340.00M | |
I: [ resource-manager ] updating topology zones... | |
I: [ agent ] updating node resource topology CR | |
I: [ nri-plugin ] <= Synchronize | |
I: [ nri-plugin ] => UpdateContainer kube-system/nri-resource-policy-topology-aware-r8vld/nri-resource-policy-topology-aware (0cc3da00ad26d05065a985a7462112d8e6978f68230b50ccf278918ebd8e63da) | |
W: [ nri-plugin ] UpdateContainer with identical resources, short-circuiting it... | |
I: [ nri-plugin ] <= UpdateContainer | |
I: [ nri-plugin ] => RunPodSandbox default/nginx-deployment-d556bf558-96x45 | |
I: [ nri-plugin ] <= RunPodSandbox | |
I: [ nri-plugin ] => CreateContainer default/nginx-deployment-d556bf558-96x45/nginx (1641d0067909f4f1411736d9df2a1f4ca8974fc7f653127da6125c3cdfbec614) | |
I: [ policy ] * applying grant <grant for default/nginx-deployment-d556bf558-96x45/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning default/nginx-deployment-d556bf558-96x45/nginx to (shared) cpuset 0-7,9-11 | |
I: [ policy ] * updating shared allocations affected by <grant for default/nginx-deployment-d556bf558-96x45/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: reserved, reserved: 8 (250m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-6bdjw/csi-node-driver-registrar from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-56cd955578-vc6t9/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-kube-controllers-66c57f9c4-hd5f4/calico-kube-controllers from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/etcd-fmuyassa-mobl3/etcd from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for tigera-operator/tigera-operator-55748b469f-5mbcj/tigera-operator from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-typha-84bcd45767-8jmds/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for kube-system/nri-resource-policy-topology-aware-r8vld/nri-resource-policy-topology-aware from socket #0: cputype: normal, shared: 0-7,9-11 (500m), memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-56cd955578-4sv8l/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-9zrr4/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-proxy-ptvqv/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-kmlzs/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/calico-node-p9gtd/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-6bdjw/calico-csi from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] <post-alloc default/nginx-deployment-d556bf558-96x45/nginx> total CPU granted: 1349m (0 exclusive + 1349m shared), total memory granted: 340.00M | |
I: [ resource-manager ] updating topology zones... | |
I: [ agent ] updating node resource topology CR | |
I: [ nri-plugin ] default/nginx-deployment-d556bf558-96x45/nginx: mapped container to 1641d0067909f4f1411736d9df2a1f4ca8974fc7f653127da6125c3cdfbec614 | |
I: [ nri-plugin ] <= CreateContainer | |
I: [ nri-plugin ] => RunPodSandbox default/nginx-deployment-d556bf558-rrs9z | |
I: [ nri-plugin ] <= RunPodSandbox | |
I: [ nri-plugin ] => CreateContainer default/nginx-deployment-d556bf558-rrs9z/nginx (465506910afa0668b8e42ae12aa2f6eb4d1010dc68500a1d5893716b0f23d43c) | |
I: [ policy ] * applying grant <grant for default/nginx-deployment-d556bf558-rrs9z/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning default/nginx-deployment-d556bf558-rrs9z/nginx to (shared) cpuset 0-7,9-11 | |
I: [ policy ] * updating shared allocations affected by <grant for default/nginx-deployment-d556bf558-rrs9z/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-6bdjw/calico-csi from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-proxy-ptvqv/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-kmlzs/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/calico-node-p9gtd/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-56cd955578-vc6t9/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-kube-controllers-66c57f9c4-hd5f4/calico-kube-controllers from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/etcd-fmuyassa-mobl3/etcd from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for tigera-operator/tigera-operator-55748b469f-5mbcj/tigera-operator from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: reserved, reserved: 8 (250m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-6bdjw/csi-node-driver-registrar from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for default/nginx-deployment-d556bf558-96x45/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-typha-84bcd45767-8jmds/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for kube-system/nri-resource-policy-topology-aware-r8vld/nri-resource-policy-topology-aware from socket #0: cputype: normal, shared: 0-7,9-11 (500m), memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-56cd955578-4sv8l/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-9zrr4/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] <post-alloc default/nginx-deployment-d556bf558-rrs9z/nginx> total CPU granted: 1349m (0 exclusive + 1349m shared), total memory granted: 340.00M | |
I: [ resource-manager ] updating topology zones... | |
I: [ agent ] updating node resource topology CR | |
I: [ nri-plugin ] default/nginx-deployment-d556bf558-rrs9z/nginx: mapped container to 465506910afa0668b8e42ae12aa2f6eb4d1010dc68500a1d5893716b0f23d43c | |
I: [ nri-plugin ] <= CreateContainer | |
I: [ nri-plugin ] => StartContainer default/nginx-deployment-d556bf558-96x45/nginx (1641d0067909f4f1411736d9df2a1f4ca8974fc7f653127da6125c3cdfbec614) | |
I: [ policy ] triggering coldstart period (if necessary) for default/nginx-deployment-d556bf558-96x45/nginx | |
I: [ policy ] coldstart: triggering coldstart for default/nginx-deployment-d556bf558-96x45/nginx... | |
I: [ policy ] coldstart: no coldstart, nothing to do... | |
I: [ nri-plugin ] <= StartContainer | |
I: [ nri-plugin ] => StartContainer default/nginx-deployment-d556bf558-rrs9z/nginx (465506910afa0668b8e42ae12aa2f6eb4d1010dc68500a1d5893716b0f23d43c) | |
I: [ policy ] triggering coldstart period (if necessary) for default/nginx-deployment-d556bf558-rrs9z/nginx | |
I: [ policy ] coldstart: triggering coldstart for default/nginx-deployment-d556bf558-rrs9z/nginx... | |
I: [ policy ] coldstart: no coldstart, nothing to do... | |
I: [ nri-plugin ] <= StartContainer | |
I: [ nri-plugin ] => UpdateContainer default/nginx-deployment-d556bf558-96x45/nginx (1641d0067909f4f1411736d9df2a1f4ca8974fc7f653127da6125c3cdfbec614) | |
W: [ nri-plugin ] UpdateContainer with identical resources, short-circuiting it... | |
I: [ nri-plugin ] <= UpdateContainer | |
I: [ nri-plugin ] => UpdateContainer default/nginx-deployment-d556bf558-rrs9z/nginx (465506910afa0668b8e42ae12aa2f6eb4d1010dc68500a1d5893716b0f23d43c) | |
W: [ nri-plugin ] UpdateContainer with identical resources, short-circuiting it... | |
I: [ nri-plugin ] <= UpdateContainer | |
I: [ nri-plugin ] => StopContainer default/nginx-deployment-d556bf558-rrs9z/nginx (465506910afa0668b8e42ae12aa2f6eb4d1010dc68500a1d5893716b0f23d43c) | |
I: [ nri-plugin ] default/nginx-deployment-d556bf558-rrs9z/nginx: unmapped container (465506910afa0668b8e42ae12aa2f6eb4d1010dc68500a1d5893716b0f23d43c) | |
I: [ policy ] * releasing resources allocated to default/nginx-deployment-d556bf558-rrs9z/nginx | |
I: [ policy ] => releasing grant <grant for default/nginx-deployment-d556bf558-rrs9z/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)>... | |
I: [ policy ] * updating shared allocations affected by <grant for default/nginx-deployment-d556bf558-rrs9z/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-6bdjw/csi-node-driver-registrar from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-56cd955578-vc6t9/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-kube-controllers-66c57f9c4-hd5f4/calico-kube-controllers from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/etcd-fmuyassa-mobl3/etcd from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for tigera-operator/tigera-operator-55748b469f-5mbcj/tigera-operator from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: reserved, reserved: 8 (250m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for default/nginx-deployment-d556bf558-96x45/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-56cd955578-4sv8l/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-typha-84bcd45767-8jmds/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for kube-system/nri-resource-policy-topology-aware-r8vld/nri-resource-policy-topology-aware from socket #0: cputype: normal, shared: 0-7,9-11 (500m), memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-9zrr4/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/calico-node-p9gtd/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-6bdjw/calico-csi from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-proxy-ptvqv/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-kmlzs/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] <post-release default/nginx-deployment-d556bf558-rrs9z/nginx> total CPU granted: 1349m (0 exclusive + 1349m shared), total memory granted: 340.00M | |
I: [ resource-manager ] updating topology zones... | |
I: [ agent ] updating node resource topology CR | |
I: [ nri-plugin ] <= StopContainer | |
I: [ nri-plugin ] => StopContainer default/nginx-deployment-d556bf558-96x45/nginx (1641d0067909f4f1411736d9df2a1f4ca8974fc7f653127da6125c3cdfbec614) | |
I: [ nri-plugin ] default/nginx-deployment-d556bf558-96x45/nginx: unmapped container (1641d0067909f4f1411736d9df2a1f4ca8974fc7f653127da6125c3cdfbec614) | |
I: [ policy ] * releasing resources allocated to default/nginx-deployment-d556bf558-96x45/nginx | |
I: [ policy ] => releasing grant <grant for default/nginx-deployment-d556bf558-96x45/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)>... | |
I: [ policy ] * updating shared allocations affected by <grant for default/nginx-deployment-d556bf558-96x45/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => updating <grant for calico-system/calico-typha-84bcd45767-8jmds/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for kube-system/nri-resource-policy-topology-aware-r8vld/nri-resource-policy-topology-aware from socket #0: cputype: normal, shared: 0-7,9-11 (500m), memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-56cd955578-4sv8l/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-9zrr4/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-proxy-ptvqv/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-kmlzs/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/calico-node-p9gtd/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-6bdjw/calico-csi from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-56cd955578-vc6t9/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-kube-controllers-66c57f9c4-hd5f4/calico-kube-controllers from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/etcd-fmuyassa-mobl3/etcd from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for tigera-operator/tigera-operator-55748b469f-5mbcj/tigera-operator from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: reserved, reserved: 8 (250m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-6bdjw/csi-node-driver-registrar from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] <post-release default/nginx-deployment-d556bf558-96x45/nginx> total CPU granted: 1349m (0 exclusive + 1349m shared), total memory granted: 340.00M | |
I: [ resource-manager ] updating topology zones... | |
I: [ agent ] updating node resource topology CR | |
I: [ nri-plugin ] <= StopContainer | |
I: [ nri-plugin ] => StopPodSandbox default/nginx-deployment-d556bf558-rrs9z | |
I: [ nri-plugin ] <= StopPodSandbox | |
I: [ nri-plugin ] => StopPodSandbox default/nginx-deployment-d556bf558-96x45 | |
I: [ nri-plugin ] <= StopPodSandbox | |
I: [ nri-plugin ] => RunPodSandbox default/nginx-deployment-d556bf558-8qp4b | |
I: [ nri-plugin ] <= RunPodSandbox | |
I: [ nri-plugin ] => CreateContainer default/nginx-deployment-d556bf558-8qp4b/nginx (d6a18da9cb33359f6dc7139f0a414dff357b56a2b4681caefb301d80464d027d) | |
I: [ policy ] * applying grant <grant for default/nginx-deployment-d556bf558-8qp4b/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning default/nginx-deployment-d556bf558-8qp4b/nginx to (shared) cpuset 0-7,9-11 | |
I: [ policy ] * updating shared allocations affected by <grant for default/nginx-deployment-d556bf558-8qp4b/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => updating <grant for calico-system/calico-kube-controllers-66c57f9c4-hd5f4/calico-kube-controllers from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/etcd-fmuyassa-mobl3/etcd from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for tigera-operator/tigera-operator-55748b469f-5mbcj/tigera-operator from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: reserved, reserved: 8 (250m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-6bdjw/csi-node-driver-registrar from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-56cd955578-vc6t9/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-typha-84bcd45767-8jmds/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for kube-system/nri-resource-policy-topology-aware-r8vld/nri-resource-policy-topology-aware from socket #0: cputype: normal, shared: 0-7,9-11 (500m), memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-56cd955578-4sv8l/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-9zrr4/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-proxy-ptvqv/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-kmlzs/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/calico-node-p9gtd/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-6bdjw/calico-csi from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] <post-alloc default/nginx-deployment-d556bf558-8qp4b/nginx> total CPU granted: 1349m (0 exclusive + 1349m shared), total memory granted: 340.00M | |
I: [ resource-manager ] updating topology zones... | |
I: [ agent ] updating node resource topology CR | |
I: [ nri-plugin ] default/nginx-deployment-d556bf558-8qp4b/nginx: mapped container to d6a18da9cb33359f6dc7139f0a414dff357b56a2b4681caefb301d80464d027d | |
I: [ nri-plugin ] <= CreateContainer | |
I: [ nri-plugin ] => StartContainer default/nginx-deployment-d556bf558-8qp4b/nginx (d6a18da9cb33359f6dc7139f0a414dff357b56a2b4681caefb301d80464d027d) | |
I: [ policy ] triggering coldstart period (if necessary) for default/nginx-deployment-d556bf558-8qp4b/nginx | |
I: [ policy ] coldstart: triggering coldstart for default/nginx-deployment-d556bf558-8qp4b/nginx... | |
I: [ policy ] coldstart: no coldstart, nothing to do... | |
I: [ nri-plugin ] <= StartContainer | |
I: [ nri-plugin ] => RunPodSandbox default/nginx-deployment-d556bf558-54cfz | |
I: [ nri-plugin ] <= RunPodSandbox | |
I: [ nri-plugin ] => CreateContainer default/nginx-deployment-d556bf558-54cfz/nginx (fee345c6bb116311b119de55f2984acc3a8084aac05acb0c2ecd272b00ebc3d8) | |
I: [ policy ] * applying grant <grant for default/nginx-deployment-d556bf558-54cfz/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning default/nginx-deployment-d556bf558-54cfz/nginx to (shared) cpuset 0-7,9-11 | |
I: [ policy ] * updating shared allocations affected by <grant for default/nginx-deployment-d556bf558-54cfz/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-6bdjw/csi-node-driver-registrar from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-56cd955578-vc6t9/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-kube-controllers-66c57f9c4-hd5f4/calico-kube-controllers from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/etcd-fmuyassa-mobl3/etcd from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for tigera-operator/tigera-operator-55748b469f-5mbcj/tigera-operator from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: reserved, reserved: 8 (250m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-56cd955578-4sv8l/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-typha-84bcd45767-8jmds/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for kube-system/nri-resource-policy-topology-aware-r8vld/nri-resource-policy-topology-aware from socket #0: cputype: normal, shared: 0-7,9-11 (500m), memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-9zrr4/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for default/nginx-deployment-d556bf558-8qp4b/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-node-p9gtd/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-6bdjw/calico-csi from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-proxy-ptvqv/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-kmlzs/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] <post-alloc default/nginx-deployment-d556bf558-54cfz/nginx> total CPU granted: 1349m (0 exclusive + 1349m shared), total memory granted: 340.00M | |
I: [ resource-manager ] updating topology zones... | |
I: [ agent ] updating node resource topology CR | |
I: [ nri-plugin ] default/nginx-deployment-d556bf558-54cfz/nginx: mapped container to fee345c6bb116311b119de55f2984acc3a8084aac05acb0c2ecd272b00ebc3d8 | |
I: [ nri-plugin ] <= CreateContainer | |
I: [ nri-plugin ] => StartContainer default/nginx-deployment-d556bf558-54cfz/nginx (fee345c6bb116311b119de55f2984acc3a8084aac05acb0c2ecd272b00ebc3d8) | |
I: [ policy ] triggering coldstart period (if necessary) for default/nginx-deployment-d556bf558-54cfz/nginx | |
I: [ policy ] coldstart: triggering coldstart for default/nginx-deployment-d556bf558-54cfz/nginx... | |
I: [ policy ] coldstart: no coldstart, nothing to do... | |
I: [ nri-plugin ] <= StartContainer | |
I: [ nri-plugin ] => UpdateContainer default/nginx-deployment-d556bf558-54cfz/nginx (fee345c6bb116311b119de55f2984acc3a8084aac05acb0c2ecd272b00ebc3d8) | |
W: [ nri-plugin ] UpdateContainer with identical resources, short-circuiting it... | |
I: [ nri-plugin ] <= UpdateContainer | |
I: [ nri-plugin ] => UpdateContainer default/nginx-deployment-d556bf558-8qp4b/nginx (d6a18da9cb33359f6dc7139f0a414dff357b56a2b4681caefb301d80464d027d) | |
W: [ nri-plugin ] UpdateContainer with identical resources, short-circuiting it... | |
I: [ nri-plugin ] <= UpdateContainer | |
I: [ nri-plugin ] => StopContainer default/nginx-deployment-d556bf558-54cfz/nginx (fee345c6bb116311b119de55f2984acc3a8084aac05acb0c2ecd272b00ebc3d8) | |
I: [ nri-plugin ] default/nginx-deployment-d556bf558-54cfz/nginx: unmapped container (fee345c6bb116311b119de55f2984acc3a8084aac05acb0c2ecd272b00ebc3d8) | |
I: [ policy ] * releasing resources allocated to default/nginx-deployment-d556bf558-54cfz/nginx | |
I: [ policy ] => releasing grant <grant for default/nginx-deployment-d556bf558-54cfz/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)>... | |
I: [ policy ] * updating shared allocations affected by <grant for default/nginx-deployment-d556bf558-54cfz/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-kmlzs/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/calico-node-p9gtd/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-6bdjw/calico-csi from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-proxy-ptvqv/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-6bdjw/csi-node-driver-registrar from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-56cd955578-vc6t9/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-kube-controllers-66c57f9c4-hd5f4/calico-kube-controllers from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/etcd-fmuyassa-mobl3/etcd from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for tigera-operator/tigera-operator-55748b469f-5mbcj/tigera-operator from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: reserved, reserved: 8 (250m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for kube-system/nri-resource-policy-topology-aware-r8vld/nri-resource-policy-topology-aware from socket #0: cputype: normal, shared: 0-7,9-11 (500m), memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-56cd955578-4sv8l/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-typha-84bcd45767-8jmds/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for default/nginx-deployment-d556bf558-8qp4b/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-9zrr4/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] <post-release default/nginx-deployment-d556bf558-54cfz/nginx> total CPU granted: 1349m (0 exclusive + 1349m shared), total memory granted: 340.00M | |
I: [ resource-manager ] updating topology zones... | |
I: [ agent ] updating node resource topology CR | |
I: [ nri-plugin ] <= StopContainer | |
I: [ nri-plugin ] => StopContainer default/nginx-deployment-d556bf558-8qp4b/nginx (d6a18da9cb33359f6dc7139f0a414dff357b56a2b4681caefb301d80464d027d) | |
I: [ nri-plugin ] default/nginx-deployment-d556bf558-8qp4b/nginx: unmapped container (d6a18da9cb33359f6dc7139f0a414dff357b56a2b4681caefb301d80464d027d) | |
I: [ policy ] * releasing resources allocated to default/nginx-deployment-d556bf558-8qp4b/nginx | |
I: [ policy ] => releasing grant <grant for default/nginx-deployment-d556bf558-8qp4b/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)>... | |
I: [ policy ] * updating shared allocations affected by <grant for default/nginx-deployment-d556bf558-8qp4b/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-9zrr4/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-kmlzs/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/calico-node-p9gtd/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-6bdjw/calico-csi from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-proxy-ptvqv/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-6bdjw/csi-node-driver-registrar from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-56cd955578-vc6t9/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-kube-controllers-66c57f9c4-hd5f4/calico-kube-controllers from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/etcd-fmuyassa-mobl3/etcd from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for tigera-operator/tigera-operator-55748b469f-5mbcj/tigera-operator from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: reserved, reserved: 8 (250m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for kube-system/nri-resource-policy-topology-aware-r8vld/nri-resource-policy-topology-aware from socket #0: cputype: normal, shared: 0-7,9-11 (500m), memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-56cd955578-4sv8l/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-typha-84bcd45767-8jmds/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] <post-release default/nginx-deployment-d556bf558-8qp4b/nginx> total CPU granted: 1349m (0 exclusive + 1349m shared), total memory granted: 340.00M | |
I: [ resource-manager ] updating topology zones... | |
I: [ agent ] updating node resource topology CR | |
I: [ nri-plugin ] <= StopContainer | |
I: [ nri-plugin ] => StopPodSandbox default/nginx-deployment-d556bf558-8qp4b | |
I: [ nri-plugin ] <= StopPodSandbox | |
I: [ nri-plugin ] => StopPodSandbox default/nginx-deployment-d556bf558-54cfz | |
I: [ nri-plugin ] <= StopPodSandbox | |
I: [ nri-plugin ] => RemoveContainer default/nginx-deployment-d556bf558-8qp4b/nginx (d6a18da9cb33359f6dc7139f0a414dff357b56a2b4681caefb301d80464d027d) | |
I: [ nri-plugin ] <= RemoveContainer | |
I: [ nri-plugin ] => RemoveContainer default/nginx-deployment-d556bf558-54cfz/nginx (fee345c6bb116311b119de55f2984acc3a8084aac05acb0c2ecd272b00ebc3d8) | |
I: [ nri-plugin ] <= RemoveContainer |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
UpdateContainer with identical resources, short-circuiting it...