Created
November 19, 2024 18:40
-
-
Save fmuyassarov/d6d8cb6df3953319e4bc154d799c2109 to your computer and use it in GitHub Desktop.
no cpu manager
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
STEP 1/3 — Building Dockerfile: [ttl.sh/ghcr.io/containers/nri-plugins/nri-resource-policy-topology-aware] | |
Building Dockerfile for platform linux/amd64: | |
ARG GO_VERSION=1.22 | |
FROM golang:${GO_VERSION}-bullseye AS builder | |
ARG IMAGE_VERSION | |
ARG BUILD_VERSION | |
ARG BUILD_BUILDID | |
ARG DEBUG=0 | |
ARG NORACE=0 | |
WORKDIR /go/builder | |
RUN mkdir -p /debug-extras; \ | |
if [ "$DEBUG" = 1 ]; then \ | |
mkdir -p /debug-extras/bin; \ | |
GOBIN=/debug-extras/bin go install -tags osusergo,netgo -ldflags "-extldflags=-static" github.com/go-delve/delve/cmd/dlv@latest; \ | |
fi | |
# Fetch go dependencies in a separate layer for caching | |
COPY go.mod go.sum ./ | |
COPY pkg/topology/ pkg/topology/ | |
RUN go mod download | |
# Build nri-resource-policy | |
COPY . . | |
RUN make clean | |
RUN make IMAGE_VERSION=${IMAGE_VERSION} BUILD_VERSION=${BUILD_VERSION} BUILD_BUILDID=${BUILD_BUILDID} PLUGINS=nri-resource-policy-topology-aware DEBUG=$DEBUG NORACE=$NORACE V=$DEBUG build-plugins-static | |
RUN cpgodir() { \ | |
mkdir -p $2; \ | |
find $1 -name '*.s' -o -name '*.go' | grep -v -E '/test/|/testdata/|_test.go' \ | |
| xargs -I {} cp --parents {} $2; \ | |
}; \ | |
if [ "$DEBUG" = 1 ]; then \ | |
cpgodir /go/pkg /debug-extras; \ | |
cpgodir /go/builder/pkg /debug-extras; \ | |
cpgodir /go/builder/cmd /debug-extras; \ | |
cpgodir /go/builder/vendor /debug-extras; \ | |
cpgodir /usr/local/go /debug-extras; \ | |
fi | |
FROM gcr.io/distroless/static | |
COPY --from=builder /go/builder/build/bin/nri-resource-policy-topology-aware /bin/nri-resource-policy-topology-aware | |
COPY --from=builder /debug-extras / | |
ENTRYPOINT ["/bin/nri-resource-policy-topology-aware"] | |
Building image | |
[builder 1/10] FROM docker.io/library/golang:1.22-bullseye@sha256:05a23dcbf30f718d67e3dfe3483da0e4be465bc38acd9ded39b11f529336e744 | |
[stage-1 1/3] FROM gcr.io/distroless/static:latest@sha256:f4a57e8ffd7ba407bdd0eb315bb54ef1f21a2100a7f032e9102e4da34fe7c196 | |
[background] read source files 110.70MB [done: 237ms] | |
[stage-1 3/3] COPY --from=builder /debug-extras / [cached] | |
[stage-1 2/3] COPY --from=builder /go/builder/build/bin/nri-resource-policy-topology-aware /bin/nri-resource-policy-topology-aware [cached] | |
[builder 10/10] RUN cpgodir() { mkdir -p $2; find $1 -name '*.s' -o -name '*.go' | grep -v -E '/test/|/testdata/|_test.go' | xargs -I {} cp --parents {} $2; }; if [ "0" = 1 ]; then cpgodir /go/pkg /debug-extras; cpgodir /go/builder/pkg /debug-extras; cpgodir /go/builder/cmd /debug-extras; cpgodir /go/builder/vendor /debug-extras; cpgodir /usr/local/go /debug-extras; fi [cached] | |
[builder 9/10] RUN make IMAGE_VERSION=${IMAGE_VERSION} BUILD_VERSION=${BUILD_VERSION} BUILD_BUILDID=${BUILD_BUILDID} PLUGINS=nri-resource-policy-topology-aware DEBUG=0 NORACE=0 V=0 build-plugins-static [cached] | |
[builder 8/10] RUN make clean [cached] | |
[builder 7/10] COPY . . [cached] | |
[builder 6/10] RUN go mod download [cached] | |
[builder 5/10] COPY pkg/topology/ pkg/topology/ [cached] | |
[builder 4/10] COPY go.mod go.sum ./ [cached] | |
[builder 3/10] RUN mkdir -p /debug-extras; if [ "0" = 1 ]; then mkdir -p /debug-extras/bin; GOBIN=/debug-extras/bin go install -tags osusergo,netgo -ldflags "-extldflags=-static" github.com/go-delve/delve/cmd/dlv@latest; fi [cached] | |
[builder 2/10] WORKDIR /go/builder [cached] | |
exporting to image | |
STEP 2/3 — Pushing ttl.sh/ghcr.io/containers/nri-plugins/nri-resource-policy-topology-aware:tilt-83a930344beac5f2 | |
Pushing with Docker client | |
Authenticating to image repo: ttl.sh | |
Sending image data | |
1a73b54f556b: Layer already exists | |
218cd7e6d3ad: Layer already exists | |
5f70bf18a086: Layer already exists | |
f4aee9e53c42: Layer already exists | |
6f1cdceb6a31: Layer already exists | |
bbb6cacb8c82: Layer already exists | |
af5aa97ebe6c: Layer already exists | |
2a92d6ac9e4f: Layer already exists | |
b336e209998f: Layer already exists | |
4d049f83d9cf: Layer already exists | |
8fa10c0194df: Layer already exists | |
ddc6e550070c: Layer already exists | |
03af25190641: Layer already exists | |
STEP 3/3 — Deploying | |
Running cmd: python3 /home/fmuyassarov/.local/share/tilt-dev/tilt_modules/github.com/tilt-dev/tilt-extensions/helm_resource/helm-apply-helper.py --set=config.instrumentation.prometheusExport=true --set=ports[0].name=metrics --set=ports[0].container=8891 --set=image.name=ttl.sh/ghcr.io/containers/nri-plugins/nri-resource-policy-topology-aware | |
Running cmd: ['helm', 'upgrade', '--install', '--set=config.instrumentation.prometheusExport=true', '--set=ports[0].name=metrics', '--set=ports[0].container=8891', '--set=image.name=ttl.sh/ghcr.io/containers/nri-plugins/nri-resource-policy-topology-aware', '--set', 'image.registry=ttl.sh', '--set', 'image.repository=ghcr.io/containers/nri-plugins/nri-resource-policy-topology-aware', '--set', 'image.tag=tilt-83a930344beac5f2', '--namespace', 'kube-system', 'controller-logs', './deployment/helm/topology-aware'] | |
Release "controller-logs" does not exist. Installing it now. | |
NAME: controller-logs | |
LAST DEPLOYED: Tue Nov 19 20:38:18 2024 | |
NAMESPACE: kube-system | |
STATUS: deployed | |
REVISION: 1 | |
TEST SUITE: None | |
Running cmd: ['helm', 'get', 'manifest', '--namespace', 'kube-system', 'controller-logs'] | |
Running cmd: ['kubectl', 'get', '-oyaml', '-f', '-'] | |
Objects applied to cluster: | |
→ nri-resource-policy-topology-aware:serviceaccount | |
→ nri-resource-policy-topology-aware:clusterrole | |
→ nri-resource-policy-topology-aware:clusterrolebinding | |
→ nri-resource-policy-topology-aware:role | |
→ nri-resource-policy-topology-aware:rolebinding | |
→ nri-resource-policy-topology-aware:daemonset | |
→ default:topologyawarepolicy | |
Step 1 - 1.04s (Building Dockerfile: [ttl.sh/ghcr.io/containers/nri-plugins/nri-resource-policy-topology-aware]) | |
Step 2 - 6.36s (Pushing ttl.sh/ghcr.io/containers/nri-plugins/nri-resource-policy-topology-aware:tilt-83a930344beac5f2) | |
Step 3 - 2.34s (Deploying) | |
DONE IN: 9.74s | |
Tracking new pod rollout (nri-resource-policy-topology-aware-6vbqd): | |
┊ Scheduled - <1s | |
┊ Initialized - <1s | |
┊ Ready - 2s | |
[event: pod kube-system/nri-resource-policy-topology-aware-6vbqd] Pulling image "ttl.sh/ghcr.io/containers/nri-plugins/nri-resource-policy-topology-aware:tilt-83a930344beac5f2" | |
[event: pod kube-system/nri-resource-policy-topology-aware-6vbqd] Successfully pulled image "ttl.sh/ghcr.io/containers/nri-plugins/nri-resource-policy-topology-aware:tilt-83a930344beac5f2" in 605ms (605ms including waiting). Image size: 30682892 bytes. | |
I1119 18:38:20.070421 1 config.go:125] logger configuration update &{Debug:[] LogSource:false Klog:{Add_dir_header:<nil> Alsologtostderr:<nil> Log_backtrace_at:<nil> Log_dir:<nil> Log_file:<nil> Log_file_max_size:<nil> Logtostderr:<nil> One_output:<nil> Skip_headers:<nil> Skip_log_headers:<nil> Stderrthreshold:<nil> V:<nil> Vmodule:<nil>}} | |
I1119 18:38:20.071871 1 metrics.go:21] registering collector cgroupstats... | |
I1119 18:38:20.076951 1 control.go:236] registering controller cpu... | |
I1119 18:38:20.076965 1 control.go:236] registering controller e2e-test... | |
W1119 18:38:20.077216 1 cache.go:983] existing cache directory "/var/lib/nri-resource-policy" has less strict permissions -rwxr-xr-x than expected -rwx--x--- | |
I1119 18:38:20.081245 1 resource-manager.go:95] running as an NRI plugin... | |
I1119 18:38:20.081255 1 nri.go:50] creating plugin... | |
W1119 18:38:20.081263 1 cache.go:515] clearing all data for active policy ("topology-aware") from cache... | |
I1119 18:38:20.085039 1 policy.go:226] creating 'topology-aware' policy... | |
I1119 18:38:20.085056 1 resource-manager.go:290] topology-aware policy has no policy-specific metrics. | |
I1119 18:38:20.085194 1 log.go:470] starting 'topology-aware' policy version /build ... | |
I1119 18:38:20.085210 1 instrumentation.go:72] starting instrumentation services... | |
I1119 18:38:20.085254 1 http.go:143] HTTP server is disabled | |
I1119 18:38:20.085264 1 tracing.go:119] starting tracing exporter... | |
I1119 18:38:20.085277 1 tracing.go:133] tracing effectively disabled, no endpoint set | |
I1119 18:38:20.085283 1 metrics.go:101] metrics exporter disabled | |
I1119 18:38:20.085287 1 log.go:470] starting agent, waiting for initial configuration... | |
I1119 18:38:20.091294 1 log.go:470] node removed from config group '' | |
I1119 18:38:20.092635 1 agent.go:512] group-specific config updated | |
I1119 18:38:20.093075 1 log.go:470] acquired initial configuration default (generation 1): | |
I1119 18:38:20.093085 1 resource-manager.go:153] <initial config> metadata: | |
I1119 18:38:20.093088 1 resource-manager.go:153] <initial config> annotations: | |
I1119 18:38:20.093089 1 resource-manager.go:153] <initial config> meta.helm.sh/release-name: controller-logs | |
I1119 18:38:20.093091 1 resource-manager.go:153] <initial config> meta.helm.sh/release-namespace: kube-system | |
I1119 18:38:20.093093 1 resource-manager.go:153] <initial config> creationTimestamp: "2024-11-19T18:38:18Z" | |
I1119 18:38:20.093095 1 resource-manager.go:153] <initial config> generation: 1 | |
I1119 18:38:20.093096 1 resource-manager.go:153] <initial config> labels: | |
I1119 18:38:20.093097 1 resource-manager.go:153] <initial config> app.kubernetes.io/instance: controller-logs | |
I1119 18:38:20.093098 1 resource-manager.go:153] <initial config> app.kubernetes.io/managed-by: Helm | |
I1119 18:38:20.093100 1 resource-manager.go:153] <initial config> app.kubernetes.io/name: nri-resource-policy-topology-aware | |
I1119 18:38:20.093101 1 resource-manager.go:153] <initial config> helm.sh/chart: nri-resource-policy-topology-aware-v0.0.0 | |
I1119 18:38:20.093102 1 resource-manager.go:153] <initial config> managedFields: | |
I1119 18:38:20.093105 1 resource-manager.go:153] <initial config> - apiVersion: config.nri/v1alpha1 | |
I1119 18:38:20.093107 1 resource-manager.go:153] <initial config> fieldsType: FieldsV1 | |
I1119 18:38:20.093108 1 resource-manager.go:153] <initial config> fieldsV1: | |
I1119 18:38:20.093110 1 resource-manager.go:153] <initial config> f:metadata: | |
I1119 18:38:20.093111 1 resource-manager.go:153] <initial config> f:annotations: | |
I1119 18:38:20.093112 1 resource-manager.go:153] <initial config> .: {} | |
I1119 18:38:20.093114 1 resource-manager.go:153] <initial config> f:meta.helm.sh/release-name: {} | |
I1119 18:38:20.093115 1 resource-manager.go:153] <initial config> f:meta.helm.sh/release-namespace: {} | |
I1119 18:38:20.093118 1 resource-manager.go:153] <initial config> f:labels: | |
I1119 18:38:20.093119 1 resource-manager.go:153] <initial config> .: {} | |
I1119 18:38:20.093120 1 resource-manager.go:153] <initial config> f:app.kubernetes.io/instance: {} | |
I1119 18:38:20.093121 1 resource-manager.go:153] <initial config> f:app.kubernetes.io/managed-by: {} | |
I1119 18:38:20.093123 1 resource-manager.go:153] <initial config> f:app.kubernetes.io/name: {} | |
I1119 18:38:20.093125 1 resource-manager.go:153] <initial config> f:helm.sh/chart: {} | |
I1119 18:38:20.093126 1 resource-manager.go:153] <initial config> f:spec: | |
I1119 18:38:20.093127 1 resource-manager.go:153] <initial config> .: {} | |
I1119 18:38:20.093129 1 resource-manager.go:153] <initial config> f:defaultCPUPriority: {} | |
I1119 18:38:20.093130 1 resource-manager.go:153] <initial config> f:instrumentation: | |
I1119 18:38:20.093141 1 resource-manager.go:153] <initial config> .: {} | |
I1119 18:38:20.093143 1 resource-manager.go:153] <initial config> f:httpEndpoint: {} | |
I1119 18:38:20.093145 1 resource-manager.go:153] <initial config> f:prometheusExport: {} | |
I1119 18:38:20.093147 1 resource-manager.go:153] <initial config> f:reportPeriod: {} | |
I1119 18:38:20.093148 1 resource-manager.go:153] <initial config> f:samplingRatePerMillion: {} | |
I1119 18:38:20.093150 1 resource-manager.go:153] <initial config> f:log: | |
I1119 18:38:20.093151 1 resource-manager.go:153] <initial config> .: {} | |
I1119 18:38:20.093153 1 resource-manager.go:153] <initial config> f:klog: | |
I1119 18:38:20.093154 1 resource-manager.go:153] <initial config> .: {} | |
I1119 18:38:20.093156 1 resource-manager.go:153] <initial config> f:skip_headers: {} | |
I1119 18:38:20.093157 1 resource-manager.go:153] <initial config> f:source: {} | |
I1119 18:38:20.093158 1 resource-manager.go:153] <initial config> f:pinCPU: {} | |
I1119 18:38:20.093160 1 resource-manager.go:153] <initial config> f:pinMemory: {} | |
I1119 18:38:20.093161 1 resource-manager.go:153] <initial config> f:preferIsolatedCPUs: {} | |
I1119 18:38:20.093162 1 resource-manager.go:153] <initial config> f:reservedResources: | |
I1119 18:38:20.093163 1 resource-manager.go:153] <initial config> .: {} | |
I1119 18:38:20.093164 1 resource-manager.go:153] <initial config> f:cpu: {} | |
I1119 18:38:20.093166 1 resource-manager.go:153] <initial config> manager: helm | |
I1119 18:38:20.093167 1 resource-manager.go:153] <initial config> operation: Update | |
I1119 18:38:20.093168 1 resource-manager.go:153] <initial config> time: "2024-11-19T18:38:18Z" | |
I1119 18:38:20.093170 1 resource-manager.go:153] <initial config> name: default | |
I1119 18:38:20.093171 1 resource-manager.go:153] <initial config> namespace: kube-system | |
I1119 18:38:20.093172 1 resource-manager.go:153] <initial config> resourceVersion: "1132" | |
I1119 18:38:20.093173 1 resource-manager.go:153] <initial config> uid: c5203b9c-1e97-4668-ac8d-f1ebf2c0f6dc | |
I1119 18:38:20.093175 1 resource-manager.go:153] <initial config> spec: | |
I1119 18:38:20.093176 1 resource-manager.go:153] <initial config> control: | |
I1119 18:38:20.093178 1 resource-manager.go:153] <initial config> cpu: null | |
I1119 18:38:20.093179 1 resource-manager.go:153] <initial config> defaultCPUPriority: none | |
I1119 18:38:20.093181 1 resource-manager.go:153] <initial config> instrumentation: | |
I1119 18:38:20.093182 1 resource-manager.go:153] <initial config> httpEndpoint: :8891 | |
I1119 18:38:20.093183 1 resource-manager.go:153] <initial config> prometheusExport: true | |
I1119 18:38:20.093185 1 resource-manager.go:153] <initial config> reportPeriod: 1m0s | |
I1119 18:38:20.093186 1 resource-manager.go:153] <initial config> log: | |
I1119 18:38:20.093188 1 resource-manager.go:153] <initial config> klog: | |
I1119 18:38:20.093189 1 resource-manager.go:153] <initial config> skip_headers: true | |
I1119 18:38:20.093191 1 resource-manager.go:153] <initial config> source: true | |
I1119 18:38:20.093192 1 resource-manager.go:153] <initial config> pinCPU: true | |
I1119 18:38:20.093194 1 resource-manager.go:153] <initial config> pinMemory: true | |
I1119 18:38:20.093195 1 resource-manager.go:153] <initial config> preferIsolatedCPUs: true | |
I1119 18:38:20.093197 1 resource-manager.go:153] <initial config> reservedResources: | |
I1119 18:38:20.093198 1 resource-manager.go:153] <initial config> cpu: 750m | |
I1119 18:38:20.093200 1 resource-manager.go:153] <initial config> status: | |
I1119 18:38:20.093202 1 resource-manager.go:153] <initial config> nodes: null | |
I1119 18:38:20.093203 1 resource-manager.go:153] <initial config> | |
I1119 18:38:20.093206 1 resource-manager.go:171] starting resource manager... | |
I1119 18:38:20.093215 1 config.go:125] logger configuration update &{Debug:[] LogSource:true Klog:{Add_dir_header:<nil> Alsologtostderr:<nil> Log_backtrace_at:<nil> Log_dir:<nil> Log_file:<nil> Log_file_max_size:<nil> Logtostderr:<nil> One_output:<nil> Skip_headers:0xc00017729c Skip_log_headers:<nil> Stderrthreshold:<nil> V:<nil> Vmodule:<nil>}} | |
I: [ http ] stopping HTTP server... | |
I: [ http ] starting HTTP server... | |
I: [ tracing ] starting tracing exporter... | |
I: [ tracing ] tracing effectively disabled, no endpoint set | |
I: [ metrics ] starting metrics exporter... | |
I: [ sysfs ] NUMA nodes with CPUs: 0 | |
I: [ sysfs ] NUMA nodes with (any) memory: 0 | |
I: [ sysfs ] NUMA nodes with normal memory: 0 | |
I: [ sysfs ] node 0 has DRAM memory | |
[ sst ] DEBUG: sst device "/host/dev/isst_interface" does not exist | |
I: [ sysfs ] Speed Select Technology (SST) support not detected | |
I: [ policy ] activating 'topology-aware' policy... | |
I: [ policy ] initial configuration: &{PinCPU:true PinMemory:true PreferIsolated:true PreferShared:false ColocatePods:false ColocateNamespaces:false ReservedPoolNamespaces:[] AvailableResources:map[] ReservedResources:map[cpu:750m] DefaultCPUPriority:none} | |
I: [ cpuallocator ] picked cache level 2 for CPU grouping | |
I: [ libmem ] memory allocator configuration | |
I: [ libmem ] DRAM node #0 with 33345736704 memory (31.056G) | |
I: [ libmem ] distance vector [10] | |
I: [ libmem ] close CPUs: 0-15 | |
I: [ policy ] implicit affinity colocate-pods is disabled | |
I: [ policy ] implicit affinity colocate-namespaces is disabled | |
I: [ policy ] ***** default CPU priority is none | |
I: [ policy ] <post-start> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ nri-plugin ] starting plugin... | |
I: [ nri-plugin ] creating plugin stub... | |
time="2024-11-19T18:38:20Z" level=info msg="Created plugin 90-resource-manager (nri-resource-policy-topology-aware, handles RunPodSandbox,StopPodSandbox,RemovePodSandbox,CreateContainer,StartContainer,UpdateContainer,StopContainer,RemoveContainer)" | |
time="2024-11-19T18:38:20Z" level=info msg="Registering plugin 90-resource-manager..." | |
time="2024-11-19T18:38:20Z" level=info msg="Configuring plugin 90-resource-manager for runtime containerd/v1.7.17..." | |
I: [ nri-plugin ] => Configure, runtime containerd v1.7.17 | |
time="2024-11-19T18:38:20Z" level=info msg="Subscribing plugin 90-resource-manager (nri-resource-policy-topology-aware) for events RunPodSandbox,StopPodSandbox,RemovePodSandbox,CreateContainer,StartContainer,UpdateContainer,StopContainer,RemoveContainer" | |
time="2024-11-19T18:38:20Z" level=info msg="Started plugin 90-resource-manager..." | |
I: [ resource-control ] syncing controllers with configuration... | |
I: [ resource-control ] starting controller cpu | |
I: [ cpu ] empty configuration, disabling controller | |
I: [ resource-control ] controller cpu is disabled | |
I: [ resource-control ] starting controller e2e-test | |
I: [ resource-control ] controller e2e-test is enabled and running | |
I: [ resource-manager ] up and running | |
I: [ nri-plugin ] => Synchronize | |
I: [ nri-plugin ] synchronizing cache state with NRI runtime... | |
I: [ nri-plugin ] discovered stale container <unknown-pod c2c03eb74669c686741e30fa83d91eb2b9127f50cdbc989a36c312071638abaf>/kube-apiserver (1ea34fbddfa6c6473d48344ddd33701d638d9be49049f5981a916bd5d1f30eaf)... | |
I: [ nri-plugin ] discovered stale container <unknown-pod 0c4e75e33f718dc99c2afeeadb78f9fc5bc4bf18e96114d642b9287573c38f67>/kube-proxy (54531e5b38b415c1c9b91fc25beebd654cc04af4fb67cb7ebb4677b47ffe51ab)... | |
I: [ nri-plugin ] discovered stale container <unknown-pod bc17f298c9d3669870b92b793cfe859681fe593531b8e74cc2bfa51b2256071e>/nri-resource-policy-topology-aware (86385cb2f5d2fd61ab44913522792211d665f1922aa3d9c082db71ce5d5c7229)... | |
I: [ nri-plugin ] discovered stale container <unknown-pod 08260970c0aafcc0b3e0540a5b61e8aa50b71455ed11a42176dd1cc9b690bdb6>/kube-scheduler (ec9b8f559f79906c92d670f01da9987f15da265973636d3684a1592d83f2cd83)... | |
I: [ nri-plugin ] discovered stale container <unknown-pod 790386c204e33a0d760f9b5777dfc7aca28b7577a903f9ffaa26c9e0d9bbf972>/coredns (d66a803d14019261e5c1921ca0809deff962a584efb2fe04ab1f2124d906bd18)... | |
I: [ nri-plugin ] discovered stale container <unknown-pod 80de7bd9ce4420886d1308b6d17cdd31649bd54bb452be90ed808cbb4a63e9f9>/coredns (2c7942f9cb5d44ebb47d1d443fa796d0961fed52e1a8975a71833589032e8335)... | |
I: [ nri-plugin ] discovered stale container <unknown-pod 71d67e2a8f6d6a489df17ec6b86e829d5a3e6a21db47a493bf84f70dfa3e7e87>/calico-typha (76cd465f15656175bb166040d3288c4bad5a3bb56a72c63d1c2e1d21cf6aa527)... | |
I: [ nri-plugin ] discovered stale container <unknown-pod 759917411982729ecdc32702fff5598bbed9287db4f1672f24049bd296bcd5fb>/nginx (a3c826ee6b2c5e2c625e8a248a62894f8a2c95401f6c2e979380f49381cebdce)... | |
I: [ nri-plugin ] discovered stale container <unknown-pod 1f753191d160fb93f8fd5f3959c43ba3171ba49df5caba308386ca91e0f6e49d>/csi-node-driver-registrar (b9e384878336a28c3c708c9e1823aff5d63b0c718f3a8f3f2cae91ebc5518b8d)... | |
I: [ nri-plugin ] discovered stale container <unknown-pod f91efecd5e640f55fc088decc76ae09f78541252c6bfc163c410e25986dd0f34>/calico-node (4013733f74ad7ff00738e87daf502ed1ef6507269b58cb6ae7f95237fc940c9b)... | |
I: [ nri-plugin ] discovered stale container <unknown-pod 0a9ba47f2afd956632b9aaa5adb547f6336eb31d3d97be0f4e36880fb443db59>/etcd (686face2d6c56ea9db3834a284174f1a3eb96507c3d3393ec3d667ba24fb62e0)... | |
I: [ nri-plugin ] discovered stale container <unknown-pod fa7d711b28bc838e085fe0f57de12b4df4f0e4b9386a882257a52e97eb9ce364>/kube-controller-manager (9580fd656dc81f91fca178331c9d04174be2e152c7128c82fdb3f925ba033202)... | |
I: [ nri-plugin ] discovered stale container <unknown-pod 3265d72416981841b3363ade4214f985b5f695d2b8dc465516fce98d64d89589>/calico-kube-controllers (b59a49153bac6a539213c84b1ce4bcf3ed412ef7beadf7d083b56cf345000ed0)... | |
I: [ nri-plugin ] discovered stale container <unknown-pod 1f753191d160fb93f8fd5f3959c43ba3171ba49df5caba308386ca91e0f6e49d>/calico-csi (34086679c21aa5e4ee46496e4b9c509d59fac916c6a2410231bba2f89cede771)... | |
I: [ nri-plugin ] discovered stale container <unknown-pod 0946fc8b5b14925b3f238e0f16fc56a044f0f45839948ee0437a451e5cd76d97>/calico-apiserver (4ebf2d5b967a57facdf899ee4316f07b52022426f5e99aba427ad1b1f82712bb)... | |
I: [ nri-plugin ] discovered stale container <unknown-pod 773c4a3800f7f775c2c737942cc5591ee08d533411f0d170c365f727c9523ec7>/tigera-operator (59d2750a0eb01f545d732244160d633e5b404941d1c9c07b79450c20c1e671c1)... | |
I: [ nri-plugin ] discovered stale container <unknown-pod dbcab0235b02d0f145d381728942a53a1895ad8fbfb90bce94866526391cbf42>/nginx (a847b45344b0a8c735f879962499b76b050b7bc391159ab8f9e54303649c3340)... | |
I: [ nri-plugin ] discovered created/running container tigera-operator/tigera-operator-55748b469f-zswc9/tigera-operator (c34c3a55c69ad4286b8acd7570f5e63e0614d6fe92e94216385d5917d842f1f5)... | |
I: [ nri-plugin ] discovered created/running container calico-apiserver/calico-apiserver-74cb7885-qvb48/calico-apiserver (63ab036ce068d47dd4a1dedfe6925f4db8651e667be6a9d9a4bb412aa4304002)... | |
I: [ nri-plugin ] discovered created/running container calico-system/calico-node-kdhhb/calico-node (8b2adaaa98d741b20d3fc9640bb678ff78cc754e7b633203d865db6e55282c72)... | |
I: [ nri-plugin ] discovered created/running container calico-system/calico-typha-68998cdbcd-x9f4c/calico-typha (f0151247d9b56a31eae3af53e2937d09ad9763824b73755782b66b675c5722d5)... | |
I: [ nri-plugin ] discovered created/running container kube-system/etcd-fmuyassa-mobl3/etcd (49d7beae2a6c2d53d711211859fd335b641bfc409f50a51945db94a8a766d1f2)... | |
I: [ nri-plugin ] discovered created/running container kube-system/nri-resource-policy-topology-aware-6vbqd/nri-resource-policy-topology-aware (06b8fc282724b49f713719b5ba32bbc0fad7c8b285b361d4781b674a14df78f7)... | |
I: [ nri-plugin ] discovered created/running container kube-system/kube-proxy-tqlzg/kube-proxy (f8d818854b77daa40ff8c471d669bb5fd075243163c2e522d9ac2f83524787af)... | |
I: [ nri-plugin ] discovered created/running container calico-apiserver/calico-apiserver-74cb7885-lfkld/calico-apiserver (b7a85ba2dc2e1c3a0c8b1ba89e4a7bad89f413ae3cdfb559521c9c5e6bae7d21)... | |
I: [ nri-plugin ] discovered created/running container kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager (4f74a640fc508fed4e40c15510653a417579b27c2bc6e5213034d0ccc399bd89)... | |
I: [ nri-plugin ] discovered created/running container calico-system/calico-kube-controllers-789bc4f9fd-wvbh9/calico-kube-controllers (39e632d6023eb47f73208b5f4a4caab2e0de99a41b6f6b805e6586bddab1bf9f)... | |
I: [ nri-plugin ] discovered created/running container kube-system/coredns-7c65d6cfc9-fpvnn/coredns (024e03cb5cc86c4a1580b60089ec95201190553b8c419a127a6fbcdff3db88a3)... | |
I: [ nri-plugin ] discovered created/running container kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver (3f0c6d1b2abc738abf8586184af5c85884cc2261d9523907728ea1e6f0d081b9)... | |
I: [ nri-plugin ] discovered created/running container kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler (9a5b9505bbccbf9083f4aad7af913cc5fdde68cff33e36a4c649e43a4218f9aa)... | |
I: [ nri-plugin ] discovered created/running container calico-system/csi-node-driver-n8xjh/csi-node-driver-registrar (947f4ae06259735a2fd5bf899f3b0ab20a0a5d2746dfc4e066be33ad37cdf2ca)... | |
I: [ nri-plugin ] discovered created/running container calico-system/csi-node-driver-n8xjh/calico-csi (a2d8cbea258b703d2f7078c3d4646a51816cf935b40d77c25e5bfef98dfe037a)... | |
I: [ nri-plugin ] discovered created/running container kube-system/coredns-7c65d6cfc9-9bm84/coredns (2ee092fe6290337aaae9452f949d1c05664d45fdd22302d3da6bc8679dd326e0)... | |
I: [ nri-plugin ] tigera-operator/tigera-operator-55748b469f-zswc9/tigera-operator: mapped container to c34c3a55c69ad4286b8acd7570f5e63e0614d6fe92e94216385d5917d842f1f5 | |
I: [ nri-plugin ] calico-apiserver/calico-apiserver-74cb7885-qvb48/calico-apiserver: mapped container to 63ab036ce068d47dd4a1dedfe6925f4db8651e667be6a9d9a4bb412aa4304002 | |
I: [ nri-plugin ] calico-system/calico-node-kdhhb/calico-node: mapped container to 8b2adaaa98d741b20d3fc9640bb678ff78cc754e7b633203d865db6e55282c72 | |
I: [ nri-plugin ] calico-system/calico-typha-68998cdbcd-x9f4c/calico-typha: mapped container to f0151247d9b56a31eae3af53e2937d09ad9763824b73755782b66b675c5722d5 | |
I: [ nri-plugin ] kube-system/etcd-fmuyassa-mobl3/etcd: mapped container to 49d7beae2a6c2d53d711211859fd335b641bfc409f50a51945db94a8a766d1f2 | |
I: [ nri-plugin ] kube-system/nri-resource-policy-topology-aware-6vbqd/nri-resource-policy-topology-aware: mapped container to 06b8fc282724b49f713719b5ba32bbc0fad7c8b285b361d4781b674a14df78f7 | |
I: [ nri-plugin ] kube-system/kube-proxy-tqlzg/kube-proxy: mapped container to f8d818854b77daa40ff8c471d669bb5fd075243163c2e522d9ac2f83524787af | |
I: [ nri-plugin ] calico-apiserver/calico-apiserver-74cb7885-lfkld/calico-apiserver: mapped container to b7a85ba2dc2e1c3a0c8b1ba89e4a7bad89f413ae3cdfb559521c9c5e6bae7d21 | |
I: [ nri-plugin ] kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager: mapped container to 4f74a640fc508fed4e40c15510653a417579b27c2bc6e5213034d0ccc399bd89 | |
I: [ nri-plugin ] calico-system/calico-kube-controllers-789bc4f9fd-wvbh9/calico-kube-controllers: mapped container to 39e632d6023eb47f73208b5f4a4caab2e0de99a41b6f6b805e6586bddab1bf9f | |
I: [ nri-plugin ] kube-system/coredns-7c65d6cfc9-fpvnn/coredns: mapped container to 024e03cb5cc86c4a1580b60089ec95201190553b8c419a127a6fbcdff3db88a3 | |
I: [ nri-plugin ] kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver: mapped container to 3f0c6d1b2abc738abf8586184af5c85884cc2261d9523907728ea1e6f0d081b9 | |
I: [ nri-plugin ] kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler: mapped container to 9a5b9505bbccbf9083f4aad7af913cc5fdde68cff33e36a4c649e43a4218f9aa | |
I: [ nri-plugin ] calico-system/csi-node-driver-n8xjh/csi-node-driver-registrar: mapped container to 947f4ae06259735a2fd5bf899f3b0ab20a0a5d2746dfc4e066be33ad37cdf2ca | |
I: [ nri-plugin ] calico-system/csi-node-driver-n8xjh/calico-csi: mapped container to a2d8cbea258b703d2f7078c3d4646a51816cf935b40d77c25e5bfef98dfe037a | |
I: [ nri-plugin ] kube-system/coredns-7c65d6cfc9-9bm84/coredns: mapped container to 2ee092fe6290337aaae9452f949d1c05664d45fdd22302d3da6bc8679dd326e0 | |
I: [ policy ] * releasing resources allocated to <unknown-pod c2c03eb74669c686741e30fa83d91eb2b9127f50cdbc989a36c312071638abaf>/kube-apiserver | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod c2c03eb74669c686741e30fa83d91eb2b9127f50cdbc989a36c312071638abaf>/kube-apiserver> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to <unknown-pod 0c4e75e33f718dc99c2afeeadb78f9fc5bc4bf18e96114d642b9287573c38f67>/kube-proxy | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod 0c4e75e33f718dc99c2afeeadb78f9fc5bc4bf18e96114d642b9287573c38f67>/kube-proxy> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to <unknown-pod bc17f298c9d3669870b92b793cfe859681fe593531b8e74cc2bfa51b2256071e>/nri-resource-policy-topology-aware | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod bc17f298c9d3669870b92b793cfe859681fe593531b8e74cc2bfa51b2256071e>/nri-resource-policy-topology-aware> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to <unknown-pod 08260970c0aafcc0b3e0540a5b61e8aa50b71455ed11a42176dd1cc9b690bdb6>/kube-scheduler | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod 08260970c0aafcc0b3e0540a5b61e8aa50b71455ed11a42176dd1cc9b690bdb6>/kube-scheduler> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to <unknown-pod 790386c204e33a0d760f9b5777dfc7aca28b7577a903f9ffaa26c9e0d9bbf972>/coredns | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod 790386c204e33a0d760f9b5777dfc7aca28b7577a903f9ffaa26c9e0d9bbf972>/coredns> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to <unknown-pod 80de7bd9ce4420886d1308b6d17cdd31649bd54bb452be90ed808cbb4a63e9f9>/coredns | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod 80de7bd9ce4420886d1308b6d17cdd31649bd54bb452be90ed808cbb4a63e9f9>/coredns> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to <unknown-pod 71d67e2a8f6d6a489df17ec6b86e829d5a3e6a21db47a493bf84f70dfa3e7e87>/calico-typha | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod 71d67e2a8f6d6a489df17ec6b86e829d5a3e6a21db47a493bf84f70dfa3e7e87>/calico-typha> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to <unknown-pod 759917411982729ecdc32702fff5598bbed9287db4f1672f24049bd296bcd5fb>/nginx | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod 759917411982729ecdc32702fff5598bbed9287db4f1672f24049bd296bcd5fb>/nginx> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to <unknown-pod 1f753191d160fb93f8fd5f3959c43ba3171ba49df5caba308386ca91e0f6e49d>/csi-node-driver-registrar | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod 1f753191d160fb93f8fd5f3959c43ba3171ba49df5caba308386ca91e0f6e49d>/csi-node-driver-registrar> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to <unknown-pod f91efecd5e640f55fc088decc76ae09f78541252c6bfc163c410e25986dd0f34>/calico-node | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod f91efecd5e640f55fc088decc76ae09f78541252c6bfc163c410e25986dd0f34>/calico-node> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to <unknown-pod 0a9ba47f2afd956632b9aaa5adb547f6336eb31d3d97be0f4e36880fb443db59>/etcd | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod 0a9ba47f2afd956632b9aaa5adb547f6336eb31d3d97be0f4e36880fb443db59>/etcd> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to <unknown-pod fa7d711b28bc838e085fe0f57de12b4df4f0e4b9386a882257a52e97eb9ce364>/kube-controller-manager | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod fa7d711b28bc838e085fe0f57de12b4df4f0e4b9386a882257a52e97eb9ce364>/kube-controller-manager> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to <unknown-pod 3265d72416981841b3363ade4214f985b5f695d2b8dc465516fce98d64d89589>/calico-kube-controllers | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod 3265d72416981841b3363ade4214f985b5f695d2b8dc465516fce98d64d89589>/calico-kube-controllers> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to <unknown-pod 1f753191d160fb93f8fd5f3959c43ba3171ba49df5caba308386ca91e0f6e49d>/calico-csi | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod 1f753191d160fb93f8fd5f3959c43ba3171ba49df5caba308386ca91e0f6e49d>/calico-csi> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to <unknown-pod 0946fc8b5b14925b3f238e0f16fc56a044f0f45839948ee0437a451e5cd76d97>/calico-apiserver | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod 0946fc8b5b14925b3f238e0f16fc56a044f0f45839948ee0437a451e5cd76d97>/calico-apiserver> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to <unknown-pod 773c4a3800f7f775c2c737942cc5591ee08d533411f0d170c365f727c9523ec7>/tigera-operator | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod 773c4a3800f7f775c2c737942cc5591ee08d533411f0d170c365f727c9523ec7>/tigera-operator> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to <unknown-pod dbcab0235b02d0f145d381728942a53a1895ad8fbfb90bce94866526391cbf42>/nginx | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release <unknown-pod dbcab0235b02d0f145d381728942a53a1895ad8fbfb90bce94866526391cbf42>/nginx> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to tigera-operator/tigera-operator-55748b469f-zswc9/tigera-operator | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release tigera-operator/tigera-operator-55748b469f-zswc9/tigera-operator> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to calico-apiserver/calico-apiserver-74cb7885-qvb48/calico-apiserver | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release calico-apiserver/calico-apiserver-74cb7885-qvb48/calico-apiserver> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to calico-system/calico-node-kdhhb/calico-node | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release calico-system/calico-node-kdhhb/calico-node> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to calico-system/calico-typha-68998cdbcd-x9f4c/calico-typha | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release calico-system/calico-typha-68998cdbcd-x9f4c/calico-typha> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to kube-system/etcd-fmuyassa-mobl3/etcd | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release kube-system/etcd-fmuyassa-mobl3/etcd> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to kube-system/nri-resource-policy-topology-aware-6vbqd/nri-resource-policy-topology-aware | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release kube-system/nri-resource-policy-topology-aware-6vbqd/nri-resource-policy-topology-aware> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to kube-system/kube-proxy-tqlzg/kube-proxy | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release kube-system/kube-proxy-tqlzg/kube-proxy> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to calico-apiserver/calico-apiserver-74cb7885-lfkld/calico-apiserver | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release calico-apiserver/calico-apiserver-74cb7885-lfkld/calico-apiserver> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to calico-system/calico-kube-controllers-789bc4f9fd-wvbh9/calico-kube-controllers | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release calico-system/calico-kube-controllers-789bc4f9fd-wvbh9/calico-kube-controllers> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to kube-system/coredns-7c65d6cfc9-fpvnn/coredns | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release kube-system/coredns-7c65d6cfc9-fpvnn/coredns> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to calico-system/csi-node-driver-n8xjh/csi-node-driver-registrar | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release calico-system/csi-node-driver-n8xjh/csi-node-driver-registrar> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to calico-system/csi-node-driver-n8xjh/calico-csi | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release calico-system/csi-node-driver-n8xjh/calico-csi> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * releasing resources allocated to kube-system/coredns-7c65d6cfc9-9bm84/coredns | |
I: [ policy ] => no grant found, nothing to do... | |
I: [ policy ] <post-release kube-system/coredns-7c65d6cfc9-9bm84/coredns> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * applying grant <grant for tigera-operator/tigera-operator-55748b469f-zswc9/tigera-operator from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning tigera-operator/tigera-operator-55748b469f-zswc9/tigera-operator to (shared) cpuset 0-7,9-11 | |
I: [ policy ] * updating shared allocations affected by <grant for tigera-operator/tigera-operator-55748b469f-zswc9/tigera-operator from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] <post-alloc tigera-operator/tigera-operator-55748b469f-zswc9/tigera-operator> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * applying grant <grant for calico-apiserver/calico-apiserver-74cb7885-qvb48/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning calico-apiserver/calico-apiserver-74cb7885-qvb48/calico-apiserver to (shared) cpuset 0-7,9-11 | |
I: [ policy ] * updating shared allocations affected by <grant for calico-apiserver/calico-apiserver-74cb7885-qvb48/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => updating <grant for tigera-operator/tigera-operator-55748b469f-zswc9/tigera-operator from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] <post-alloc calico-apiserver/calico-apiserver-74cb7885-qvb48/calico-apiserver> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * applying grant <grant for calico-system/calico-node-kdhhb/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning calico-system/calico-node-kdhhb/calico-node to (shared) cpuset 0-7,9-11 | |
I: [ policy ] * updating shared allocations affected by <grant for calico-system/calico-node-kdhhb/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => updating <grant for tigera-operator/tigera-operator-55748b469f-zswc9/tigera-operator from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-74cb7885-qvb48/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] <post-alloc calico-system/calico-node-kdhhb/calico-node> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * applying grant <grant for calico-system/calico-typha-68998cdbcd-x9f4c/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning calico-system/calico-typha-68998cdbcd-x9f4c/calico-typha to (shared) cpuset 0-7,9-11 | |
I: [ policy ] * updating shared allocations affected by <grant for calico-system/calico-typha-68998cdbcd-x9f4c/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => updating <grant for tigera-operator/tigera-operator-55748b469f-zswc9/tigera-operator from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-74cb7885-qvb48/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-node-kdhhb/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] <post-alloc calico-system/calico-typha-68998cdbcd-x9f4c/calico-typha> total CPU granted: 0m (0 exclusive + 0m shared), total memory granted: 0.00 | |
I: [ policy ] * applying grant <grant for kube-system/etcd-fmuyassa-mobl3/etcd from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning kube-system/etcd-fmuyassa-mobl3/etcd to (reserved) cpuset 8 | |
I: [ policy ] * updating shared allocations affected by <grant for kube-system/etcd-fmuyassa-mobl3/etcd from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> | |
I: [ policy ] this grant uses reserved CPUs, does not affect shared allocations | |
I: [ policy ] <post-alloc kube-system/etcd-fmuyassa-mobl3/etcd> total CPU granted: 100m (0 exclusive + 100m shared), total memory granted: 0.00 | |
I: [ policy ] * applying grant <grant for kube-system/nri-resource-policy-topology-aware-6vbqd/nri-resource-policy-topology-aware from socket #0: cputype: reserved, reserved: 8 (500m), memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning kube-system/nri-resource-policy-topology-aware-6vbqd/nri-resource-policy-topology-aware to (reserved) cpuset 8 | |
I: [ policy ] * updating shared allocations affected by <grant for kube-system/nri-resource-policy-topology-aware-6vbqd/nri-resource-policy-topology-aware from socket #0: cputype: reserved, reserved: 8 (500m), memory: nodes{0} (0.00)> | |
I: [ policy ] this grant uses reserved CPUs, does not affect shared allocations | |
I: [ policy ] <post-alloc kube-system/nri-resource-policy-topology-aware-6vbqd/nri-resource-policy-topology-aware> total CPU granted: 600m (0 exclusive + 600m shared), total memory granted: 0.00 | |
I: [ policy ] * applying grant <grant for kube-system/kube-proxy-tqlzg/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning kube-system/kube-proxy-tqlzg/kube-proxy to (reserved) cpuset 8 | |
I: [ policy ] * updating shared allocations affected by <grant for kube-system/kube-proxy-tqlzg/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> | |
I: [ policy ] this grant uses reserved CPUs, does not affect shared allocations | |
I: [ policy ] <post-alloc kube-system/kube-proxy-tqlzg/kube-proxy> total CPU granted: 600m (0 exclusive + 600m shared), total memory granted: 0.00 | |
I: [ policy ] * applying grant <grant for calico-apiserver/calico-apiserver-74cb7885-lfkld/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning calico-apiserver/calico-apiserver-74cb7885-lfkld/calico-apiserver to (shared) cpuset 0-7,9-11 | |
I: [ policy ] * updating shared allocations affected by <grant for calico-apiserver/calico-apiserver-74cb7885-lfkld/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => <grant for kube-system/nri-resource-policy-topology-aware-6vbqd/nri-resource-policy-topology-aware from socket #0: cputype: reserved, reserved: 8 (500m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-proxy-tqlzg/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for tigera-operator/tigera-operator-55748b469f-zswc9/tigera-operator from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-74cb7885-qvb48/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-node-kdhhb/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-typha-68998cdbcd-x9f4c/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/etcd-fmuyassa-mobl3/etcd from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] <post-alloc calico-apiserver/calico-apiserver-74cb7885-lfkld/calico-apiserver> total CPU granted: 600m (0 exclusive + 600m shared), total memory granted: 0.00 | |
I: [ policy ] * applying grant <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager to (reserved) cpuset 8 | |
I: [ policy ] * updating shared allocations affected by <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> | |
I: [ policy ] this grant uses reserved CPUs, does not affect shared allocations | |
I: [ policy ] <post-alloc kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager> total CPU granted: 799m (0 exclusive + 799m shared), total memory granted: 0.00 | |
I: [ policy ] * applying grant <grant for calico-system/calico-kube-controllers-789bc4f9fd-wvbh9/calico-kube-controllers from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning calico-system/calico-kube-controllers-789bc4f9fd-wvbh9/calico-kube-controllers to (shared) cpuset 0-7,9-11 | |
I: [ policy ] * updating shared allocations affected by <grant for calico-system/calico-kube-controllers-789bc4f9fd-wvbh9/calico-kube-controllers from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/calico-node-kdhhb/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-typha-68998cdbcd-x9f4c/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/etcd-fmuyassa-mobl3/etcd from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-proxy-tqlzg/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for tigera-operator/tigera-operator-55748b469f-zswc9/tigera-operator from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-74cb7885-qvb48/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/nri-resource-policy-topology-aware-6vbqd/nri-resource-policy-topology-aware from socket #0: cputype: reserved, reserved: 8 (500m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-74cb7885-lfkld/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] <post-alloc calico-system/calico-kube-controllers-789bc4f9fd-wvbh9/calico-kube-controllers> total CPU granted: 799m (0 exclusive + 799m shared), total memory granted: 0.00 | |
I: [ policy ] * applying grant <grant for kube-system/coredns-7c65d6cfc9-fpvnn/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> | |
I: [ policy ] => pinning kube-system/coredns-7c65d6cfc9-fpvnn/coredns to (reserved) cpuset 8 | |
I: [ policy ] * updating shared allocations affected by <grant for kube-system/coredns-7c65d6cfc9-fpvnn/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> | |
I: [ policy ] this grant uses reserved CPUs, does not affect shared allocations | |
I: [ policy ] <post-alloc kube-system/coredns-7c65d6cfc9-fpvnn/coredns> total CPU granted: 899m (0 exclusive + 899m shared), total memory granted: 170.00M | |
W: [ policy ] possible misconfiguration of reserved resources: | |
W: [ policy ] socket #0: allocatable <socket #0 allocatable: CPU: isolated:12-15, reserved:8 (allocatable: 101m), grantedReserved:899m, sharable:0-7,9-11 (allocatable:11000m), MemLimit: 30.89G> | |
W: [ policy ] kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver: needs 250 reserved, only 101 available | |
W: [ policy ] falling back to using normal unreserved CPUs instead... | |
I: [ policy ] * applying grant <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: normal, shared: 0-7,9-11 (250m), memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver to (shared) cpuset 0-7,9-11 | |
I: [ policy ] * updating shared allocations affected by <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: normal, shared: 0-7,9-11 (250m), memory: nodes{0} (0.00)> | |
I: [ policy ] => updating <grant for tigera-operator/tigera-operator-55748b469f-zswc9/tigera-operator from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-74cb7885-qvb48/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/nri-resource-policy-topology-aware-6vbqd/nri-resource-policy-topology-aware from socket #0: cputype: reserved, reserved: 8 (500m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-74cb7885-lfkld/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-fpvnn/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/calico-node-kdhhb/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-typha-68998cdbcd-x9f4c/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/etcd-fmuyassa-mobl3/etcd from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-proxy-tqlzg/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/calico-kube-controllers-789bc4f9fd-wvbh9/calico-kube-controllers from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] <post-alloc kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver> total CPU granted: 1149m (0 exclusive + 1149m shared), total memory granted: 170.00M | |
I: [ policy ] * applying grant <grant for kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler to (reserved) cpuset 8 | |
I: [ policy ] * updating shared allocations affected by <grant for kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> | |
I: [ policy ] this grant uses reserved CPUs, does not affect shared allocations | |
I: [ policy ] <post-alloc kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler> total CPU granted: 1249m (0 exclusive + 1249m shared), total memory granted: 170.00M | |
I: [ policy ] * applying grant <grant for calico-system/csi-node-driver-n8xjh/csi-node-driver-registrar from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning calico-system/csi-node-driver-n8xjh/csi-node-driver-registrar to (shared) cpuset 0-7,9-11 | |
I: [ policy ] * updating shared allocations affected by <grant for calico-system/csi-node-driver-n8xjh/csi-node-driver-registrar from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => updating <grant for calico-system/calico-kube-controllers-789bc4f9fd-wvbh9/calico-kube-controllers from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/etcd-fmuyassa-mobl3/etcd from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-proxy-tqlzg/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-fpvnn/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/calico-node-kdhhb/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-typha-68998cdbcd-x9f4c/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/nri-resource-policy-topology-aware-6vbqd/nri-resource-policy-topology-aware from socket #0: cputype: reserved, reserved: 8 (500m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-74cb7885-lfkld/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: normal, shared: 0-7,9-11 (250m), memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for tigera-operator/tigera-operator-55748b469f-zswc9/tigera-operator from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-74cb7885-qvb48/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] <post-alloc calico-system/csi-node-driver-n8xjh/csi-node-driver-registrar> total CPU granted: 1249m (0 exclusive + 1249m shared), total memory granted: 170.00M | |
I: [ policy ] * applying grant <grant for calico-system/csi-node-driver-n8xjh/calico-csi from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning calico-system/csi-node-driver-n8xjh/calico-csi to (shared) cpuset 0-7,9-11 | |
I: [ policy ] * updating shared allocations affected by <grant for calico-system/csi-node-driver-n8xjh/calico-csi from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => updating <grant for calico-system/calico-kube-controllers-789bc4f9fd-wvbh9/calico-kube-controllers from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-n8xjh/csi-node-driver-registrar from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-node-kdhhb/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-typha-68998cdbcd-x9f4c/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/etcd-fmuyassa-mobl3/etcd from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-proxy-tqlzg/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-fpvnn/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for tigera-operator/tigera-operator-55748b469f-zswc9/tigera-operator from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-74cb7885-qvb48/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/nri-resource-policy-topology-aware-6vbqd/nri-resource-policy-topology-aware from socket #0: cputype: reserved, reserved: 8 (500m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-74cb7885-lfkld/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: normal, shared: 0-7,9-11 (250m), memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] <post-alloc calico-system/csi-node-driver-n8xjh/calico-csi> total CPU granted: 1249m (0 exclusive + 1249m shared), total memory granted: 170.00M | |
W: [ policy ] possible misconfiguration of reserved resources: | |
W: [ policy ] socket #0: allocatable <socket #0 allocatable: CPU: isolated:12-15, reserved:8 (allocatable: 1m), grantedReserved:999m, sharable:0-7,9-11 (grantedShared:250m/250m local/subtree, allocatable:10750m), MemLimit: 30.89G> | |
W: [ policy ] kube-system/coredns-7c65d6cfc9-9bm84/coredns: needs 100 reserved, only 1 available | |
W: [ policy ] falling back to using normal unreserved CPUs instead... | |
I: [ policy ] * applying grant <grant for kube-system/coredns-7c65d6cfc9-9bm84/coredns from socket #0: cputype: normal, shared: 0-7,9-11 (100m), memory: nodes{0} (170.00M)> | |
I: [ policy ] => pinning kube-system/coredns-7c65d6cfc9-9bm84/coredns to (shared) cpuset 0-7,9-11 | |
I: [ policy ] * updating shared allocations affected by <grant for kube-system/coredns-7c65d6cfc9-9bm84/coredns from socket #0: cputype: normal, shared: 0-7,9-11 (100m), memory: nodes{0} (170.00M)> | |
I: [ policy ] => updating <grant for calico-system/calico-node-kdhhb/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-typha-68998cdbcd-x9f4c/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/etcd-fmuyassa-mobl3/etcd from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-proxy-tqlzg/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-fpvnn/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for tigera-operator/tigera-operator-55748b469f-zswc9/tigera-operator from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-74cb7885-qvb48/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/nri-resource-policy-topology-aware-6vbqd/nri-resource-policy-topology-aware from socket #0: cputype: reserved, reserved: 8 (500m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-74cb7885-lfkld/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: normal, shared: 0-7,9-11 (250m), memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-kube-controllers-789bc4f9fd-wvbh9/calico-kube-controllers from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-n8xjh/calico-csi from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-n8xjh/csi-node-driver-registrar from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] <post-alloc kube-system/coredns-7c65d6cfc9-9bm84/coredns> total CPU granted: 1349m (0 exclusive + 1349m shared), total memory granted: 340.00M | |
I: [ policy ] <post-sync> total CPU granted: 1349m (0 exclusive + 1349m shared), total memory granted: 340.00M | |
I: [ resource-manager ] updating topology zones... | |
I: [ agent ] updating node resource topology CR | |
I: [ nri-plugin ] <= Synchronize | |
I: [ nri-plugin ] => RunPodSandbox default/nginx-deployment-d556bf558-q9x7z | |
I: [ nri-plugin ] <= RunPodSandbox | |
I: [ nri-plugin ] => CreateContainer default/nginx-deployment-d556bf558-q9x7z/nginx (f62b581035325d9448de6ee429167fddd54726e848a4efdeb84f42111f7363c7) | |
I: [ policy ] * applying grant <grant for default/nginx-deployment-d556bf558-q9x7z/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning default/nginx-deployment-d556bf558-q9x7z/nginx to (shared) cpuset 0-7,9-11 | |
I: [ policy ] * updating shared allocations affected by <grant for default/nginx-deployment-d556bf558-q9x7z/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => updating <grant for calico-system/calico-kube-controllers-789bc4f9fd-wvbh9/calico-kube-controllers from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-n8xjh/calico-csi from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-n8xjh/csi-node-driver-registrar from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-fpvnn/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for kube-system/coredns-7c65d6cfc9-9bm84/coredns from socket #0: cputype: normal, shared: 0-7,9-11 (100m), memory: nodes{0} (170.00M)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-node-kdhhb/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-typha-68998cdbcd-x9f4c/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/etcd-fmuyassa-mobl3/etcd from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-proxy-tqlzg/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: normal, shared: 0-7,9-11 (250m), memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for tigera-operator/tigera-operator-55748b469f-zswc9/tigera-operator from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-74cb7885-qvb48/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/nri-resource-policy-topology-aware-6vbqd/nri-resource-policy-topology-aware from socket #0: cputype: reserved, reserved: 8 (500m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-74cb7885-lfkld/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] <post-alloc default/nginx-deployment-d556bf558-q9x7z/nginx> total CPU granted: 1349m (0 exclusive + 1349m shared), total memory granted: 340.00M | |
I: [ resource-manager ] updating topology zones... | |
I: [ agent ] updating node resource topology CR | |
I: [ nri-plugin ] default/nginx-deployment-d556bf558-q9x7z/nginx: mapped container to f62b581035325d9448de6ee429167fddd54726e848a4efdeb84f42111f7363c7 | |
I: [ nri-plugin ] <= CreateContainer | |
I: [ nri-plugin ] => RunPodSandbox default/nginx-deployment-d556bf558-z6hkf | |
I: [ nri-plugin ] <= RunPodSandbox | |
I: [ nri-plugin ] => CreateContainer default/nginx-deployment-d556bf558-z6hkf/nginx (c6113cd04eccff22e6c8830286a76ab00eb7920296f59c93396e4eb62fe6b39a) | |
I: [ policy ] * applying grant <grant for default/nginx-deployment-d556bf558-z6hkf/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning default/nginx-deployment-d556bf558-z6hkf/nginx to (shared) cpuset 0-7,9-11 | |
I: [ policy ] * updating shared allocations affected by <grant for default/nginx-deployment-d556bf558-z6hkf/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => <grant for kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-n8xjh/csi-node-driver-registrar from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for default/nginx-deployment-d556bf558-q9x7z/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-node-kdhhb/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-typha-68998cdbcd-x9f4c/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/etcd-fmuyassa-mobl3/etcd from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-proxy-tqlzg/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-fpvnn/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for kube-system/coredns-7c65d6cfc9-9bm84/coredns from socket #0: cputype: normal, shared: 0-7,9-11 (100m), memory: nodes{0} (170.00M)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for tigera-operator/tigera-operator-55748b469f-zswc9/tigera-operator from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-74cb7885-qvb48/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/nri-resource-policy-topology-aware-6vbqd/nri-resource-policy-topology-aware from socket #0: cputype: reserved, reserved: 8 (500m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-74cb7885-lfkld/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: normal, shared: 0-7,9-11 (250m), memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-kube-controllers-789bc4f9fd-wvbh9/calico-kube-controllers from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-n8xjh/calico-csi from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] <post-alloc default/nginx-deployment-d556bf558-z6hkf/nginx> total CPU granted: 1349m (0 exclusive + 1349m shared), total memory granted: 340.00M | |
I: [ resource-manager ] updating topology zones... | |
I: [ agent ] updating node resource topology CR | |
I: [ nri-plugin ] default/nginx-deployment-d556bf558-z6hkf/nginx: mapped container to c6113cd04eccff22e6c8830286a76ab00eb7920296f59c93396e4eb62fe6b39a | |
I: [ nri-plugin ] <= CreateContainer | |
I: [ nri-plugin ] => StartContainer default/nginx-deployment-d556bf558-q9x7z/nginx (f62b581035325d9448de6ee429167fddd54726e848a4efdeb84f42111f7363c7) | |
I: [ policy ] triggering coldstart period (if necessary) for default/nginx-deployment-d556bf558-q9x7z/nginx | |
I: [ policy ] coldstart: triggering coldstart for default/nginx-deployment-d556bf558-q9x7z/nginx... | |
I: [ policy ] coldstart: no coldstart, nothing to do... | |
I: [ nri-plugin ] <= StartContainer | |
I: [ nri-plugin ] => StartContainer default/nginx-deployment-d556bf558-z6hkf/nginx (c6113cd04eccff22e6c8830286a76ab00eb7920296f59c93396e4eb62fe6b39a) | |
I: [ policy ] triggering coldstart period (if necessary) for default/nginx-deployment-d556bf558-z6hkf/nginx | |
I: [ policy ] coldstart: triggering coldstart for default/nginx-deployment-d556bf558-z6hkf/nginx... | |
I: [ policy ] coldstart: no coldstart, nothing to do... | |
I: [ nri-plugin ] <= StartContainer | |
I: [ nri-plugin ] => StopContainer default/nginx-deployment-d556bf558-q9x7z/nginx (f62b581035325d9448de6ee429167fddd54726e848a4efdeb84f42111f7363c7) | |
I: [ nri-plugin ] default/nginx-deployment-d556bf558-q9x7z/nginx: unmapped container (f62b581035325d9448de6ee429167fddd54726e848a4efdeb84f42111f7363c7) | |
I: [ policy ] * releasing resources allocated to default/nginx-deployment-d556bf558-q9x7z/nginx | |
I: [ policy ] => releasing grant <grant for default/nginx-deployment-d556bf558-q9x7z/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)>... | |
I: [ policy ] * updating shared allocations affected by <grant for default/nginx-deployment-d556bf558-q9x7z/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => updating <grant for kube-system/coredns-7c65d6cfc9-9bm84/coredns from socket #0: cputype: normal, shared: 0-7,9-11 (100m), memory: nodes{0} (170.00M)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-node-kdhhb/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-typha-68998cdbcd-x9f4c/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/etcd-fmuyassa-mobl3/etcd from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-proxy-tqlzg/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-fpvnn/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for tigera-operator/tigera-operator-55748b469f-zswc9/tigera-operator from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-74cb7885-qvb48/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/nri-resource-policy-topology-aware-6vbqd/nri-resource-policy-topology-aware from socket #0: cputype: reserved, reserved: 8 (500m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-74cb7885-lfkld/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: normal, shared: 0-7,9-11 (250m), memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-kube-controllers-789bc4f9fd-wvbh9/calico-kube-controllers from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-n8xjh/calico-csi from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for default/nginx-deployment-d556bf558-z6hkf/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-n8xjh/csi-node-driver-registrar from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] <post-release default/nginx-deployment-d556bf558-q9x7z/nginx> total CPU granted: 1349m (0 exclusive + 1349m shared), total memory granted: 340.00M | |
I: [ resource-manager ] updating topology zones... | |
I: [ agent ] updating node resource topology CR | |
I: [ nri-plugin ] <= StopContainer | |
I: [ nri-plugin ] => StopContainer default/nginx-deployment-d556bf558-z6hkf/nginx (c6113cd04eccff22e6c8830286a76ab00eb7920296f59c93396e4eb62fe6b39a) | |
I: [ nri-plugin ] default/nginx-deployment-d556bf558-z6hkf/nginx: unmapped container (c6113cd04eccff22e6c8830286a76ab00eb7920296f59c93396e4eb62fe6b39a) | |
I: [ policy ] * releasing resources allocated to default/nginx-deployment-d556bf558-z6hkf/nginx | |
I: [ policy ] => releasing grant <grant for default/nginx-deployment-d556bf558-z6hkf/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)>... | |
I: [ policy ] * updating shared allocations affected by <grant for default/nginx-deployment-d556bf558-z6hkf/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => updating <grant for tigera-operator/tigera-operator-55748b469f-zswc9/tigera-operator from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-74cb7885-qvb48/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/nri-resource-policy-topology-aware-6vbqd/nri-resource-policy-topology-aware from socket #0: cputype: reserved, reserved: 8 (500m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-74cb7885-lfkld/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: normal, shared: 0-7,9-11 (250m), memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-kube-controllers-789bc4f9fd-wvbh9/calico-kube-controllers from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-n8xjh/calico-csi from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-n8xjh/csi-node-driver-registrar from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-node-kdhhb/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-typha-68998cdbcd-x9f4c/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/etcd-fmuyassa-mobl3/etcd from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-proxy-tqlzg/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-fpvnn/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for kube-system/coredns-7c65d6cfc9-9bm84/coredns from socket #0: cputype: normal, shared: 0-7,9-11 (100m), memory: nodes{0} (170.00M)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] <post-release default/nginx-deployment-d556bf558-z6hkf/nginx> total CPU granted: 1349m (0 exclusive + 1349m shared), total memory granted: 340.00M | |
I: [ resource-manager ] updating topology zones... | |
I: [ agent ] updating node resource topology CR | |
I: [ nri-plugin ] <= StopContainer | |
I: [ nri-plugin ] => StopPodSandbox default/nginx-deployment-d556bf558-z6hkf | |
I: [ nri-plugin ] <= StopPodSandbox | |
I: [ nri-plugin ] => StopPodSandbox default/nginx-deployment-d556bf558-q9x7z | |
I: [ nri-plugin ] <= StopPodSandbox | |
I: [ nri-plugin ] => RemoveContainer default/nginx-deployment-d556bf558-q9x7z/nginx (f62b581035325d9448de6ee429167fddd54726e848a4efdeb84f42111f7363c7) | |
I: [ nri-plugin ] <= RemoveContainer | |
I: [ nri-plugin ] => RemoveContainer default/nginx-deployment-d556bf558-z6hkf/nginx (c6113cd04eccff22e6c8830286a76ab00eb7920296f59c93396e4eb62fe6b39a) | |
I: [ nri-plugin ] <= RemoveContainer | |
I: [ nri-plugin ] => RemovePodSandbox default/nginx-deployment-d556bf558-z6hkf | |
I: [ nri-plugin ] <= RemovePodSandbox | |
I: [ nri-plugin ] => RemovePodSandbox default/nginx-deployment-d556bf558-q9x7z | |
I: [ nri-plugin ] <= RemovePodSandbox | |
I: [ nri-plugin ] => RunPodSandbox default/nginx-deployment-d556bf558-wqmbx | |
I: [ nri-plugin ] <= RunPodSandbox | |
I: [ nri-plugin ] => CreateContainer default/nginx-deployment-d556bf558-wqmbx/nginx (a647b28dcbd53574d2e742f5b5fe53dabc5da11daa7651863cb9aaf7e8ec3245) | |
I: [ policy ] * applying grant <grant for default/nginx-deployment-d556bf558-wqmbx/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning default/nginx-deployment-d556bf558-wqmbx/nginx to (shared) cpuset 0-7,9-11 | |
I: [ policy ] * updating shared allocations affected by <grant for default/nginx-deployment-d556bf558-wqmbx/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => updating <grant for kube-system/coredns-7c65d6cfc9-9bm84/coredns from socket #0: cputype: normal, shared: 0-7,9-11 (100m), memory: nodes{0} (170.00M)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-node-kdhhb/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-typha-68998cdbcd-x9f4c/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/etcd-fmuyassa-mobl3/etcd from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-proxy-tqlzg/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-fpvnn/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for tigera-operator/tigera-operator-55748b469f-zswc9/tigera-operator from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-74cb7885-qvb48/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/nri-resource-policy-topology-aware-6vbqd/nri-resource-policy-topology-aware from socket #0: cputype: reserved, reserved: 8 (500m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-74cb7885-lfkld/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: normal, shared: 0-7,9-11 (250m), memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-kube-controllers-789bc4f9fd-wvbh9/calico-kube-controllers from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-n8xjh/calico-csi from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-n8xjh/csi-node-driver-registrar from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] <post-alloc default/nginx-deployment-d556bf558-wqmbx/nginx> total CPU granted: 1349m (0 exclusive + 1349m shared), total memory granted: 340.00M | |
I: [ resource-manager ] updating topology zones... | |
I: [ agent ] updating node resource topology CR | |
I: [ nri-plugin ] default/nginx-deployment-d556bf558-wqmbx/nginx: mapped container to a647b28dcbd53574d2e742f5b5fe53dabc5da11daa7651863cb9aaf7e8ec3245 | |
I: [ nri-plugin ] <= CreateContainer | |
I: [ nri-plugin ] => RunPodSandbox default/nginx-deployment-d556bf558-bd2bw | |
I: [ nri-plugin ] <= RunPodSandbox | |
I: [ nri-plugin ] => CreateContainer default/nginx-deployment-d556bf558-bd2bw/nginx (246af2c6566ed6aa5f4e87f3236de43b16da196836f2b5baecf1106762efa1c3) | |
I: [ policy ] * applying grant <grant for default/nginx-deployment-d556bf558-bd2bw/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => pinning default/nginx-deployment-d556bf558-bd2bw/nginx to (shared) cpuset 0-7,9-11 | |
I: [ policy ] * updating shared allocations affected by <grant for default/nginx-deployment-d556bf558-bd2bw/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => updating <grant for calico-system/calico-kube-controllers-789bc4f9fd-wvbh9/calico-kube-controllers from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-n8xjh/calico-csi from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-n8xjh/csi-node-driver-registrar from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for default/nginx-deployment-d556bf558-wqmbx/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-fpvnn/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for kube-system/coredns-7c65d6cfc9-9bm84/coredns from socket #0: cputype: normal, shared: 0-7,9-11 (100m), memory: nodes{0} (170.00M)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-node-kdhhb/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-typha-68998cdbcd-x9f4c/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/etcd-fmuyassa-mobl3/etcd from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-proxy-tqlzg/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for tigera-operator/tigera-operator-55748b469f-zswc9/tigera-operator from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-74cb7885-qvb48/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/nri-resource-policy-topology-aware-6vbqd/nri-resource-policy-topology-aware from socket #0: cputype: reserved, reserved: 8 (500m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-74cb7885-lfkld/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: normal, shared: 0-7,9-11 (250m), memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] <post-alloc default/nginx-deployment-d556bf558-bd2bw/nginx> total CPU granted: 1349m (0 exclusive + 1349m shared), total memory granted: 340.00M | |
I: [ resource-manager ] updating topology zones... | |
I: [ agent ] updating node resource topology CR | |
I: [ nri-plugin ] default/nginx-deployment-d556bf558-bd2bw/nginx: mapped container to 246af2c6566ed6aa5f4e87f3236de43b16da196836f2b5baecf1106762efa1c3 | |
I: [ nri-plugin ] <= CreateContainer | |
I: [ nri-plugin ] => StartContainer default/nginx-deployment-d556bf558-wqmbx/nginx (a647b28dcbd53574d2e742f5b5fe53dabc5da11daa7651863cb9aaf7e8ec3245) | |
I: [ policy ] triggering coldstart period (if necessary) for default/nginx-deployment-d556bf558-wqmbx/nginx | |
I: [ policy ] coldstart: triggering coldstart for default/nginx-deployment-d556bf558-wqmbx/nginx... | |
I: [ policy ] coldstart: no coldstart, nothing to do... | |
I: [ nri-plugin ] <= StartContainer | |
I: [ nri-plugin ] => StartContainer default/nginx-deployment-d556bf558-bd2bw/nginx (246af2c6566ed6aa5f4e87f3236de43b16da196836f2b5baecf1106762efa1c3) | |
I: [ policy ] triggering coldstart period (if necessary) for default/nginx-deployment-d556bf558-bd2bw/nginx | |
I: [ policy ] coldstart: triggering coldstart for default/nginx-deployment-d556bf558-bd2bw/nginx... | |
I: [ policy ] coldstart: no coldstart, nothing to do... | |
I: [ nri-plugin ] <= StartContainer | |
I: [ nri-plugin ] => StopContainer default/nginx-deployment-d556bf558-wqmbx/nginx (a647b28dcbd53574d2e742f5b5fe53dabc5da11daa7651863cb9aaf7e8ec3245) | |
I: [ nri-plugin ] default/nginx-deployment-d556bf558-wqmbx/nginx: unmapped container (a647b28dcbd53574d2e742f5b5fe53dabc5da11daa7651863cb9aaf7e8ec3245) | |
I: [ policy ] * releasing resources allocated to default/nginx-deployment-d556bf558-wqmbx/nginx | |
I: [ policy ] => releasing grant <grant for default/nginx-deployment-d556bf558-wqmbx/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)>... | |
I: [ policy ] * updating shared allocations affected by <grant for default/nginx-deployment-d556bf558-wqmbx/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => updating <grant for calico-system/calico-kube-controllers-789bc4f9fd-wvbh9/calico-kube-controllers from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-n8xjh/calico-csi from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-n8xjh/csi-node-driver-registrar from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/etcd-fmuyassa-mobl3/etcd from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-proxy-tqlzg/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-fpvnn/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for kube-system/coredns-7c65d6cfc9-9bm84/coredns from socket #0: cputype: normal, shared: 0-7,9-11 (100m), memory: nodes{0} (170.00M)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-node-kdhhb/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-typha-68998cdbcd-x9f4c/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/nri-resource-policy-topology-aware-6vbqd/nri-resource-policy-topology-aware from socket #0: cputype: reserved, reserved: 8 (500m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-74cb7885-lfkld/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: normal, shared: 0-7,9-11 (250m), memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for default/nginx-deployment-d556bf558-bd2bw/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for tigera-operator/tigera-operator-55748b469f-zswc9/tigera-operator from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-74cb7885-qvb48/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] <post-release default/nginx-deployment-d556bf558-wqmbx/nginx> total CPU granted: 1349m (0 exclusive + 1349m shared), total memory granted: 340.00M | |
I: [ resource-manager ] updating topology zones... | |
I: [ agent ] updating node resource topology CR | |
I: [ nri-plugin ] <= StopContainer | |
I: [ nri-plugin ] => StopContainer default/nginx-deployment-d556bf558-bd2bw/nginx (246af2c6566ed6aa5f4e87f3236de43b16da196836f2b5baecf1106762efa1c3) | |
I: [ nri-plugin ] default/nginx-deployment-d556bf558-bd2bw/nginx: unmapped container (246af2c6566ed6aa5f4e87f3236de43b16da196836f2b5baecf1106762efa1c3) | |
I: [ policy ] * releasing resources allocated to default/nginx-deployment-d556bf558-bd2bw/nginx | |
I: [ policy ] => releasing grant <grant for default/nginx-deployment-d556bf558-bd2bw/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)>... | |
I: [ policy ] * updating shared allocations affected by <grant for default/nginx-deployment-d556bf558-bd2bw/nginx from socket #0: cputype: normal, memory: nodes{0} (0.00)> | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-n8xjh/calico-csi from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-kube-controllers-789bc4f9fd-wvbh9/calico-kube-controllers from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/csi-node-driver-n8xjh/csi-node-driver-registrar from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/kube-scheduler-fmuyassa-mobl3/kube-scheduler from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-system/calico-typha-68998cdbcd-x9f4c/calico-typha from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/etcd-fmuyassa-mobl3/etcd from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-proxy-tqlzg/kube-proxy from socket #0: cputype: reserved, memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/kube-controller-manager-fmuyassa-mobl3/kube-controller-manager from socket #0: cputype: reserved, reserved: 8 (199m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => <grant for kube-system/coredns-7c65d6cfc9-fpvnn/coredns from socket #0: cputype: reserved, reserved: 8 (100m), memory: nodes{0} (170.00M)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for kube-system/coredns-7c65d6cfc9-9bm84/coredns from socket #0: cputype: normal, shared: 0-7,9-11 (100m), memory: nodes{0} (170.00M)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-system/calico-node-kdhhb/calico-node from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-74cb7885-qvb48/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => <grant for kube-system/nri-resource-policy-topology-aware-6vbqd/nri-resource-policy-topology-aware from socket #0: cputype: reserved, reserved: 8 (500m), memory: nodes{0} (0.00)> not affected (only reserved CPUs)... | |
I: [ policy ] => updating <grant for calico-apiserver/calico-apiserver-74cb7885-lfkld/calico-apiserver from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for kube-system/kube-apiserver-fmuyassa-mobl3/kube-apiserver from socket #0: cputype: normal, shared: 0-7,9-11 (250m), memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] => updating <grant for tigera-operator/tigera-operator-55748b469f-zswc9/tigera-operator from socket #0: cputype: normal, memory: nodes{0} (0.00)> with shared CPUs of socket #0: 0-7,9-11... | |
I: [ policy ] <post-release default/nginx-deployment-d556bf558-bd2bw/nginx> total CPU granted: 1349m (0 exclusive + 1349m shared), total memory granted: 340.00M | |
I: [ resource-manager ] updating topology zones... | |
I: [ agent ] updating node resource topology CR | |
I: [ nri-plugin ] <= StopContainer | |
I: [ nri-plugin ] => StopPodSandbox default/nginx-deployment-d556bf558-wqmbx | |
I: [ nri-plugin ] <= StopPodSandbox | |
I: [ nri-plugin ] => StopPodSandbox default/nginx-deployment-d556bf558-bd2bw | |
I: [ nri-plugin ] <= StopPodSandbox | |
I: [ nri-plugin ] => RemoveContainer default/nginx-deployment-d556bf558-wqmbx/nginx (a647b28dcbd53574d2e742f5b5fe53dabc5da11daa7651863cb9aaf7e8ec3245) | |
I: [ nri-plugin ] <= RemoveContainer | |
I: [ nri-plugin ] => RemoveContainer default/nginx-deployment-d556bf558-bd2bw/nginx (246af2c6566ed6aa5f4e87f3236de43b16da196836f2b5baecf1106762efa1c3) | |
I: [ nri-plugin ] <= RemoveContainer |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment