Skip to content

Instantly share code, notes, and snippets.

@olegch
Created December 24, 2023 04:06
Show Gist options
  • Save olegch/8df24386c04adac047513cb9ed37dcdb to your computer and use it in GitHub Desktop.
Save olegch/8df24386c04adac047513cb9ed37dcdb to your computer and use it in GitHub Desktop.
kublr:
# Values in fixed section cannot be changed dynamically, they may be hardcoded in other places, although every effort
# will be made to reduce duplication
fixed:
# This path is defined by the actual location of the config file used and cannot be overridden
# in the config file.
#
# Any alternative value of this property specified in a user config file or in extensions'
# default config files will be ignored and overridden by this value.
#
# If config file is passed to the process (agent or seeder) via flags, this property is forcibly
# overridden after the config file loading and set to the parent directory of the config file.
#
# If custom config file location is not provided via flags, but default config file is used
# (/etc/kublr/seeder.yaml or /etc/kublr/daemon.yaml), this value is forcibly overridden to /etc/kublr
#
# If custom config file location is not provided via flags, and default config file is not used
# (may be true for some commands), this value is not altered.
kublr_config_dir: /etc/kublr
# Directory in which kublr is installed
kublr_dir: /opt/kublr
cluster_config_dir: /etc/kubernetes
safe_config_dir: /srv/kubernetes
var_lib_dir: /var/lib
log_dir: /var/log
# @deprecated use `kublr.setup.docker.*.storage` instead
#
# possible values for docker_storage:
# - '' - means that Kubler will generate docker configuration from kublr.docker.config key as is
# - 'auto' - means that Kublr will select docker storage driver automatically using this algorithm:
# IF
# /proc/filesystems contains overlay (try loading overlay kernel module before checking) AND
# ( data-root fstype != xfs OR
# ( data-root fstype == xfs AND data-root xfs ftype == 1 ) )
# THEN use overlay2
# ELSE use devicemapper with loopback
# WHERE
# data-root = value of docker option "data-root" or /var/lib/docker if not set
# data-root fstype = $(stat -f -c %T $data-root)
# data-root xfs ftype = $(xfs_info $data-root | grep ftype=1)
# - any valid docker storage driver - Kublr will select the specified storage driver
docker_storage: auto
# Values in version and docker_image sections should be changed with care as many other configuration options embedded
# in the templates and the agent code may have to be changed accordingly with software versions change
# This ability is left to configuration mainly for development purposes
version:
# kubectl image is only used in label-master static pod, which is exactly same in all versions as of Kublr 1.25
kubectl: '23.28.3'
addon_manager: '9.1.8-2'
registry: '2.8.1'
haproxy: '2.6.4-alpine'
nvidia_device_plugin: '1.10'
alpine: '3.18.3-1'
# vsphere csi driver components
vsphere_csi_driver: '2.4.3'
vsphere_csi_node_driver_registrar: '2.3.0'
vsphere_csi_livenessprobe: '2.4.0'
vsphere_csi_provisioner: '3.0.0'
vsphere_csi_syncer: '2.4.3'
vsphere_csi_resizer: '1.3.0'
vsphere_csi_attacher: '3.3.0'
vsphere_csi_snapshotter: '' # supports since 2.5.0
# Kublr certManager [end of support in v1.25.0]
cert_updater: '1.25.0-alpha.1'
kublr_selinux_policy: '0.0.8-3'
# Proxy information
proxy_server_properties:
http_proxy_url_with_creds: '{{coalesce .proxy_server.proxy.http.urlWithCreds .proxy_server.proxy.any.urlWithCreds ""}}'
https_proxy_url_with_creds: '{{coalesce .proxy_server.proxy.https.urlWithCreds .proxy_server.proxy.any.urlWithCreds ""}}'
http_proxy_url: '{{coalesce .proxy_server.proxy.http.url .proxy_server.proxy.any.url ""}}'
https_proxy_url: '{{coalesce .proxy_server.proxy.https.url .proxy_server.proxy.any.url ""}}'
http_proxy_username: '{{coalesce .proxy_server.proxy.http.username .proxy_server.proxy.any.username ""}}'
https_proxy_username: '{{coalesce .proxy_server.proxy.https.username .proxy_server.proxy.any.username ""}}'
http_proxy_password: '{{coalesce .proxy_server.proxy.http.password .proxy_server.proxy.any.password ""}}'
https_proxy_password: '{{coalesce .proxy_server.proxy.https.password .proxy_server.proxy.any.password ""}}'
no_proxy: '{{coalesce .proxy_server.no_proxy ""}}'
docker_registry:
# Docker Registry auth information.
# Useful when using private registries with authentication
# Example:
# - registry: docker.example.com:5000
# username: login
# password: passw0rd
auth: []
# Docker Registry certificates
# Useful when using private registries with self-signed or untrusted certificates
# Example:
# - registry: docker.example.com:5000
# certificate: |
# -----BEGIN CERTIFICATE-----
# ...
# -----END CERTIFICATE-----
certificates: []
# Docker registry overrides.
# Useful when public registries cannot be used.
# You can override one particular registry (i.e. Docker Hub, gcr.io end etc), or you can you 'default' to override all registries at once.
override:
# a non-empty value here will override all registries not overridden by other specific more specific registry overrides below
default: ''
# specific registry overrides for docker.io (docker hub), gcr.io, k8s.gcr.io, registry.k8s.io, and quay.io
docker_io: ''
# used by helm tiller, all other are migrated from gcr.io/google_containers to k8s.gcr.io
gcr_io: ''
k8s_gcr_io: ''
# since 1.22.17, 1.23.15, 1.24.9 k8s images migrated to registry.k8s.io
registry_k8s_io: ''
quay_io: ''
cr_kublr_com: ''
public_ecr_aws: ''
mcr_microsoft_com: ''
docker_image:
# hyperkube image is not available starting with k8s 1.19.0; lower versions are present in registry.k8s.io in multiarch form
hyperkube: '{{- if (semverCompare "< 1.19-0" .kublr.version.k8s) }}{{coalesce .kublr.docker_registry.override.registry_k8s_io .kublr.docker_registry.override.default "registry.k8s.io"}}/hyperkube:v{{.kublr.version.k8s}}{{- end}}'
# k8s images use registry.k8s.io since 2022, old versions starting at least with 1.17.0 are migrated there too
kube_proxy: '{{coalesce .kublr.docker_registry.override.registry_k8s_io .kublr.docker_registry.override.default "registry.k8s.io"}}/kube-proxy:v{{.kublr.version.k8s}}'
kube_controller_manager: '{{coalesce .kublr.docker_registry.override.registry_k8s_io .kublr.docker_registry.override.default "registry.k8s.io"}}/kube-controller-manager:v{{.kublr.version.k8s}}'
kube_apiserver: '{{coalesce .kublr.docker_registry.override.registry_k8s_io .kublr.docker_registry.override.default "registry.k8s.io"}}/kube-apiserver:v{{.kublr.version.k8s}}'
kube_scheduler: '{{coalesce .kublr.docker_registry.override.registry_k8s_io .kublr.docker_registry.override.default "registry.k8s.io"}}/kube-scheduler:v{{.kublr.version.k8s}}'
kubelet_addons: '{{coalesce .kublr.docker_registry.override.docker_io .kublr.docker_registry.override.default "docker.io" }}/kublr/kubelet-addons:{{.kublr.version.kubelet_addons}}'
# 1.17: kubectl: '{{- if and (isSemver .kublr.version.k8s) (semverCompare ">= 1.19-0" .kublr.version.k8s) }}{{coalesce .kublr.docker_registry.override.docker_io .kublr.docker_registry.override.default "docker.io" }}/bitnami/kubectl:{{.kublr.version.k8s}}{{- end}}'
# 1.18: kubectl: '{{- if and (isSemver .kublr.version.k8s) (semverCompare ">= 1.19-0" .kublr.version.k8s) }}{{coalesce .kublr.docker_registry.override.docker_io .kublr.docker_registry.override.default "docker.io" }}/bitnami/kubectl:{{.kublr.version.k8s}}{{- end}}'
# 1.19: kubectl: '{{- if or (not (isSemver .kublr.version.k8s)) (semverCompare ">= 1.19-0" .kublr.version.k8s) }}{{coalesce .kublr.docker_registry.override.cr_kublr_com .kublr.docker_registry.override.default "cr.kublr.com"}}/bitnami/kubectl:{{.kublr.version.kubectl}}{{- end}}'
# 1.20+:
kubectl: '{{coalesce .kublr.docker_registry.override.cr_kublr_com .kublr.docker_registry.override.default "cr.kublr.com"}}/kublr/{{if (semverCompare "< 23.28.3-0" .kublr.version.kubectl) }}base/{{ end }}kubectl:{{.kublr.version.kubectl}}'
etcd: '{{coalesce .kublr.docker_registry.override.registry_k8s_io .kublr.docker_registry.override.default "registry.k8s.io"}}/etcd:{{.kublr.version.etcd}}'
# 1.8.10 old format, 1.8.11 old and new format, 1.8.12 new format
addon_resizer: '{{coalesce .kublr.docker_registry.override.registry_k8s_io .kublr.docker_registry.override.default "registry.k8s.io"}}/{{if not (semverCompare "< 1.8.11-0" .kublr.version.addon_resizer) }}autoscaling/{{ end }}addon-resizer:{{.kublr.version.addon_resizer}}'
# 1.17-1.19 agents override this image, use 9.0.2 and 9.1.4 from ; agents 1.20+ use this default one (multi-arch)
addon_manager: '{{coalesce .kublr.docker_registry.override.cr_kublr_com .kublr.docker_registry.override.default "cr.kublr.com" }}/kublr/base/addon-manager:{{.kublr.version.addon_manager}}'
# addon_manager: '{{coalesce .kublr.docker_registry.override.registry_k8s_io .kublr.docker_registry.override.default "registry.k8s.io"}}/{{if or (not (isSemver .kublr.version.addon_manager)) (semverCompare ">= 9.1.2-0" .kublr.version.addon_manager) }}addon-manager/{{ end }}kube-addon-manager:v{{.kublr.version.addon_manager}}'
# 0.8.5 (amd64) is used in kublr agent 1.17-1.20
# 0.8.12 (multi-arch) is used in kublr agent 1.20+
node_problem_detector: '{{coalesce .kublr.docker_registry.override.registry_k8s_io .kublr.docker_registry.override.default "registry.k8s.io"}}/node-problem-detector/node-problem-detector:v{{.kublr.version.node_problem_detector}}'
# 2.0.4, 2.2.0, 2.4.0, 2.7.0 (all multi-arch) are used in different kublr agent versions
kubernetes_dashboard: '{{coalesce .kublr.docker_registry.override.docker_io .kublr.docker_registry.override.default "docker.io" }}/kubernetesui/dashboard:v{{.kublr.version.kubernetes_dashboard}}'
# 1.0.4, 1.0.6, 1.0.7, 1.0.8 (all multi-arch) are used in different kublr agent versions
kubernetes_dashboard_metrics_scraper: '{{coalesce .kublr.docker_registry.override.docker_io .kublr.docker_registry.override.default "docker.io" }}/kubernetesui/metrics-scraper:v{{.kublr.version.kubernetes_dashboard_metrics_scraper}}'
# 1.7.1 (amd64, arm64), 1.8.1 (amd64, arm64), 1.8.6 (multi-arch) are used in different kublr agent versions
cluster_proportional_autoscaler: '{{coalesce .kublr.docker_registry.override.registry_k8s_io .kublr.docker_registry.override.default "registry.k8s.io"}}{{if (semverCompare ">= 1.8.2-0" .kublr.version.cluster_proportional_autoscaler) }}/cpa/cluster-proportional-autoscaler{{ else }}/cluster-proportional-autoscaler-{{.runtime.goarch}}{{ end }}:{{.kublr.version.cluster_proportional_autoscaler}}'
# 2.8.1 (multi-arch)
registry: '{{coalesce .kublr.docker_registry.override.docker_io .kublr.docker_registry.override.default "docker.io" }}/registry:{{.kublr.version.registry}}'
# 1.15.10, 1.16.0, 1.17.0, 1.17.3, 1.21.1, 1.22.8 (all multi-arch)
dns_nodelocaldns: '{{coalesce .kublr.docker_registry.override.registry_k8s_io .kublr.docker_registry.override.default "registry.k8s.io"}}{{if (semverCompare "< 1.15.14-0" .kublr.version.dns_nodelocaldns) }}{{else}}/dns{{end}}/k8s-dns-node-cache:{{.kublr.version.dns_nodelocaldns}}'
# 0.3.7, 0.6.2 (all multi-arch)
metrics_server: '{{coalesce .kublr.docker_registry.override.registry_k8s_io .kublr.docker_registry.override.default "registry.k8s.io"}}/metrics-server/metrics-server:v{{.kublr.version.metrics_server}}'
# 1.17.x-... (all multi-arch)
cluster_autoscaler: '{{coalesce .kublr.docker_registry.override.registry_k8s_io .kublr.docker_registry.override.default "registry.k8s.io"}}/autoscaling/cluster-autoscaler:v{{.kublr.version.cluster_autoscaler}}'
# 1.15.10, 1.15.12 (all multi-arch)
cloud_controller_manager_common: '{{coalesce .kublr.docker_registry.override.registry_k8s_io .kublr.docker_registry.override.default "registry.k8s.io"}}/cloud-controller-manager:v{{.kublr.version.cloud_controller_manager_common}}'
# 1.1.0-... (all amd64)
cloud_controller_manager_vsphere: '{{coalesce .kublr.docker_registry.override.gcr_io .kublr.docker_registry.override.default "gcr.io" }}/cloud-provider-vsphere/cpi/release/manager:v{{.kublr.version.cloud_controller_manager_vsphere}}'
# 3.1-3.9 (all multi-arch)
pause: '{{coalesce .kublr.docker_registry.override.registry_k8s_io .kublr.docker_registry.override.default "registry.k8s.io"}}/pause:{{.kublr.version.pause}}'
# 1.19.0-14 - 1.24.0-alpha.1 (all amd64)
cert_updater: '{{coalesce .kublr.docker_registry.override.docker_io .kublr.docker_registry.override.default "docker.io" }}/kublr/cert-updater:{{.kublr.version.cert_updater}}'
# 2.6.4-alpine (multi-arch)
haproxy: '{{coalesce .kublr.docker_registry.override.docker_io .kublr.docker_registry.override.default "docker.io" }}/haproxy:{{.kublr.version.haproxy}}'
# 1.10 (amd64 and ppc64le)
nvidia_device_plugin: '{{coalesce .kublr.docker_registry.override.docker_io .kublr.docker_registry.override.default "docker.io" }}/nvidia/k8s-device-plugin:{{.kublr.version.nvidia_device_plugin}}'
# vsphere csi driver components
vsphere_csi_driver: '{{coalesce .kublr.docker_registry.override.gcr_io .kublr.docker_registry.override.default "gcr.io" }}/cloud-provider-vsphere/csi/release/driver:v{{.kublr.version.vsphere_csi_driver}}'
vsphere_csi_node_driver_registrar: '{{coalesce .kublr.docker_registry.override.registry_k8s_io .kublr.docker_registry.override.default "registry.k8s.io" }}/sig-storage/csi-node-driver-registrar:v{{.kublr.version.vsphere_csi_node_driver_registrar}}'
vsphere_csi_livenessprobe: '{{coalesce .kublr.docker_registry.override.k8s_gcr_io .kublr.docker_registry.override.default "k8s.gcr.io" }}/sig-storage/livenessprobe:v{{.kublr.version.vsphere_csi_livenessprobe}}'
vsphere_csi_provisioner: '{{coalesce .kublr.docker_registry.override.k8s_gcr_io .kublr.docker_registry.override.default "k8s.gcr.io" }}/sig-storage/csi-provisioner:v{{.kublr.version.vsphere_csi_provisioner}}'
vsphere_csi_syncer: '{{coalesce .kublr.docker_registry.override.gcr_io .kublr.docker_registry.override.default "gcr.io" }}/cloud-provider-vsphere/csi/release/syncer:v{{.kublr.version.vsphere_csi_syncer}}'
vsphere_csi_resizer: '{{coalesce .kublr.docker_registry.override.k8s_gcr_io .kublr.docker_registry.override.default "k8s.gcr.io" }}/sig-storage/csi-resizer:v{{.kublr.version.vsphere_csi_resizer}}'
vsphere_csi_attacher: '{{coalesce .kublr.docker_registry.override.k8s_gcr_io .kublr.docker_registry.override.default "k8s.gcr.io" }}/sig-storage/csi-attacher:v{{.kublr.version.vsphere_csi_attacher}}'
# vsphere snapshotter supports since 2.5.0
vsphere_csi_snapshotter: '{{if (isSemver .kublr.version.vsphere_csi_snapshotter)}}{{coalesce .kublr.docker_registry.override.k8s_gcr_io .kublr.docker_registry.override.default "k8s.gcr.io" }}/sig-storage/csi-snapshotter:v{{.kublr.version.vsphere_csi_snapshotter}}{{end}}'
scripting: '{{coalesce .kublr.docker_registry.override.cr_kublr_com .kublr.docker_registry.override.default "cr.kublr.com" }}/kublr/base/alpine:{{.kublr.version.alpine}}'
kublr_selinux_policy: '{{coalesce .kublr.docker_registry.override.cr_kublr_com .kublr.docker_registry.override.default "cr.kublr.com" }}/kublr/kublr-policy:{{.kublr.version.kublr_selinux_policy}}'
binary_repo:
auth: []
# - url_prefix: 'https://nexus.local/kubernetes-release'
# username: download
# password: ********
certificates: []
# - hostname: 'nexus.local' # DNS/crt wildcards are allowed
# insecure: true
# or
# - certificate: |
# -----BEGIN CERTIFICATE-----
# ...
# -----END CERTIFICATE-----
override:
default: ''
storage_googleapis_com: ''
github_com: ''
binary:
kubelet:
sha256: ''
source:
url: '{{- if or (not (isSemver .kublr.version.k8s)) (semverCompare ">= 1.19-0" .kublr.version.k8s) }}{{coalesce .kublr.binary_repo.override.storage_googleapis_com .kublr.binary_repo.override.default "https://storage.googleapis.com"}}/kubernetes-release/release/v{{.kublr.version.k8s}}/kubernetes-node-linux-{{.runtime.goarch}}.tar.gz{{- end }}'
encoding: 'tar.gz' # optional
path:
- 'kubernetes/node/bin/kubelet'
- 'kubernetes/node/bin/kubectl'
cni:
sha256: ''
source:
url: '{{- if or (not (isSemver .kublr.version.k8s)) (semverCompare ">= 1.19-0" .kublr.version.k8s) }}{{coalesce .kublr.binary_repo.override.github_com .kublr.binary_repo.override.default "https://github.com"}}/containernetworking/plugins/releases/download/v{{.kublr.version.cni}}/cni-plugins-linux-{{.runtime.goarch}}-v{{.kublr.version.cni}}.tgz{{- end }}'
encoding: 'tgz' # optional
# Note that folder names must have / appended
path:
- './'
kubelet_flag:
v: '--v=1'
node_ip: '{{if .node_address_ip}}--node-ip={{.node_address_ip}}{{end}}'
kube_proxy_flag:
master: '--master={{ .local_master_endpoint_url }}'
proxy_mode: '--proxy-mode=iptables'
conntrack_max_per_core: '--conntrack-max-per-core=131072'
oom_score_adj: '--oom-score-adj=-998'
iptables_sync_period: '--iptables-sync-period=1m'
iptables_min_sync_period: '--iptables-min-sync-period=10s'
ipvs_sync_period: '--ipvs-sync-period=1m'
ipvs_min_sync_period: '--ipvs-min-sync-period=10s'
bind_address: '--bind-address=$(NODE_IP)'
hostname_override: '--hostname-override=$(NODE_NAME)'
kubeconfig: '--kubeconfig=/var/lib/kube-proxy/kubeconfig'
cluster_cidr: '--cluster-cidr={{ .cluster.network.podcidr }}'
etcd_flag:
quota_backend_bytes: '--quota-backend-bytes=4294967296'
snapshot_count: '--snapshot-count=10000'
# changes in default.yaml kube_api_server_flag:
# - 1.17 == 1.18
# - 1.18 -> 1.19 - removed basic_auth_file; added service_account_signing_key_file, api_audiences, service_account_issuer, service_account_issuer1
# - 1.19 -> 1.20 - added service_account_issuer
# - 1.20 == 1.21
# - 1.21 -> 1.22 - changed service_account_issuer; added service_account_issuer1
# - 1.22 -> 1.23 - removed insecure_bind_address, insecure_port, enable_swagger_ui; added admission_control_config_file
# - 1.23 == 1.24
# - 1.24 -> 1.25 - enable_admission_plugins changed (removed PodSecurityPolicy)
# - 1.25 == 1.26 == 1.27
# Therefore all flags are specified in common default.yaml except for:
# - basic_auth_file
# - service_account_signing_key_file
# - api_audiences
# - service_account_issuer
# - service_account_issuer1
# - insecure_bind_address
# - insecure_port
# - enable_swagger_ui
# - admission_control_config_file
# - enable_admission_plugins
kube_api_server_flag:
v: '--v=1'
profiling: '--profiling=false'
advertise_address: '{{if .node_address_ip}}--advertise-address={{.node_address_ip}}{{end}}'
# '/srv/kubernetes' in these properties should not be changed even if <kublr.fixed.safe_config_dir> property
# is overridden, because kube-api-server runs in a container and <kublr.fixed.safe_config_dir> is
# always mounted to '/srv/kubernetes' inside the container
tls_cert_file: '--tls-cert-file=/srv/kubernetes/current/kube-apiserver.crt'
tls_private_key_file: '--tls-private-key-file=/srv/kubernetes/current/kube-apiserver.key'
secure_port: '--secure-port={{ .cluster.network.apiserversecureport }}'
kubelet_preferred_address_types:
flag: '--kubelet-preferred-address-types='
values:
internalip:
value: 'InternalIP'
order: '010'
hostname:
value: 'Hostname'
order: '020'
internaldns:
value: 'InternalDNS'
order: '030'
externaldns:
value: 'ExternalDNS'
order: '040'
externalip:
value: 'ExternalIP'
order: '050'
legacyhostip:
value: 'LegacyHostIP'
order: '060'
min_request_timeout: '--min-request-timeout=300'
allow_privileged: '--allow-privileged=true'
anonymous_auth: '--anonymous-auth=false'
authorization_mode:
flag: '--authorization-mode='
values:
rbac:
value: 'RBAC'
order: '010'
node:
value: 'Node'
order: '020'
service_cluster_ip_range: '--service-cluster-ip-range={{ .cluster.network.servicecidr }}'
etcd_cafile: '--etcd-cafile=/srv/kubernetes/current/ca.crt'
etcd_certfile: '--etcd-certfile=/srv/kubernetes/current/etcdclient.crt'
etcd_keyfile: '--etcd-keyfile=/srv/kubernetes/current/etcdclient.key'
kubelet_client_certificate: '--kubelet-client-certificate=/srv/kubernetes/current/kube-apiserver-kubelet-client.crt'
kubelet_client_key: '--kubelet-client-key=/srv/kubernetes/current/kube-apiserver-kubelet-client.key'
client_ca_file: '--client-ca-file=/srv/kubernetes/current/ca.crt'
token_auth_file: '--token-auth-file=/srv/kubernetes/current/known_tokens.csv'
proxy_client_cert_file: '--proxy-client-cert-file=/srv/kubernetes/current/front-proxy-client.crt'
proxy_client_key_file: '--proxy-client-key-file=/srv/kubernetes/current/front-proxy-client.key'
requestheader_allowed_names: '--requestheader-allowed-names=front-proxy-client'
requestheader_client_ca_file: '--requestheader-client-ca-file=/srv/kubernetes/current/front-proxy-ca.crt'
requestheader_extra_headers_prefix: '--requestheader-extra-headers-prefix=X-Remote-Extra-'
requestheader_group_headers: '--requestheader-group-headers=X-Remote-Group'
requestheader_username_headers: '--requestheader-username-headers=X-Remote-User'
service_account_key_file: '--service-account-key-file=/srv/kubernetes/current/kube-sa.crt'
endpoint_reconciler_type: '--endpoint-reconciler-type=lease'
# '/etc/kubernetes' and '/var/log' in these properties should not be changed even if
# <kublr.fixed.cluster_config_dir> and/or <kublr.fixed.log_dir> properties are overridden,
# because kube-api-server runs in a container and <kublr.fixed.cluster_config_dir> and
# <kublr.fixed.log_dir> are always mounted to '/etc/kubernetes' and '/var/log' inside of
# the container
audit_policy_file: '--audit-policy-file=/etc/kubernetes/audit-policy.yaml'
audit_log_path: '--audit-log-path=/var/log/audit/kube-api-server-audit.log'
audit_log_maxsize: '--audit-log-maxsize=200'
audit_log_maxage: '--audit-log-maxage=2'
audit_log_maxbackup: '--audit-log-maxbackup=5'
kube_scheduler_flag:
v: '--v=1'
profiling: '--profiling=false'
master: '--master={{ .local_master_endpoint_url }}'
kubeconfig: '--kubeconfig=/var/lib/kube-scheduler/kubeconfig'
leader_elect: '--leader-elect=true'
kube_controller_manager_flag:
v: '--v=1'
profiling: '--profiling=false'
attach_detach_reconcile_sync_period: '--attach-detach-reconcile-sync-period=1m0s'
min_resync_period: '--min-resync-period=3m'
service_cluster_ip_range: '--service-cluster-ip-range={{ .cluster.network.servicecidr }}'
cluster_cidr: '--cluster-cidr={{ .cluster.network.podcidr }}'
configure_cloud_routes: '--configure-cloud-routes=false'
master: '--master={{ .local_master_endpoint_url }}'
use_service_account_credentials: '--use-service-account-credentials=true'
kubeconfig: '--kubeconfig=/var/lib/kube-controller-manager/kubeconfig'
service_account_private_key_file: '--service-account-private-key-file=/srv/kubernetes/current/kube-sa.key'
root_ca_file: '--root-ca-file=/srv/kubernetes/instance/ca.crt'
cluster_signing_cert_file: '--cluster-signing-cert-file=/srv/kubernetes/current/ca.crt'
cluster_signing_key_file: '--cluster-signing-key-file=/srv/kubernetes/current/ca.key'
leader_elect: '--leader-elect=true'
cluster_name: '--cluster-name={{ .cluster.name }}'
# changes in default.yaml metrics_server_flag:
# - 1.17 == 1.18 == 1.19 == 1.20
# - 1.20 -> 1.21 - added kubelet_use_node_status_port, cert_dir, secure_port
# - 1.21 == 1.22 == 1.23 == 1.24 == 1.25 == 1.26 == 1.27
metrics_server_flag:
v: '--v=1'
metric_resolution: '--metric-resolution=30s'
kubelet_preferred_address_types: '--kubelet-preferred-address-types=InternalIP'
kubelet_insecure_tls: '--kubelet-insecure-tls'
cert_updater_flag:
update_wait_period: '-update-wait-period=5m'
kube_endpoint: '-kube-endpoint=https://127.0.0.1:{{ .cluster.network.apiserversecureport }}'
# '/srv/kubernetes' in this property should not be changed even if <kublr.fixed.safe_config_dir> property
# is overridden, because cert updater runs in a container and <kublr.fixed.safe_config_dir> is
# always mounted to '/srv/kubernetes' inside the container
kube_config_path: '-kube-config-path=/srv/kubernetes/current/config'
selinux:
label: 's0:c170,c441'
# Desired state of selinux runtime
# Effectively ignored if selinux is not supported on the platform (e.q. Ubuntu) or setenforce command is not installed
# Possible values are 'osdefault' (default with docker), 'permissive', 'enforcing'
# 'osdefault' means do not touch OS setting
# 'permissive' means permissive state (closest to disabled that can be achieved without node reboot)
# 'enforcing' means enforcing (fully enabled) state
# in 1.21 selinux is supported only on docker, not on other CRI
runtime_state: '{{ if eq .kublr.setup.runtime_fallback_order "docker" }}osdefault{{else}}permissive{{end}}'
docker:
config:
# TODO do we still need it ???
ip-masq: false
# TODO do we still need it ???
iptables: false
# This has to be set at the moment for Kubernetes to work on RHEL
# TODO figure out how to safely run system pods with SELinux enabled and
# remove this flag from defaults so that default docker settings are used
# (which is usually edition and OS dependent - false for Docker CE, true
# for RHEL docker)
selinux-enabled: false
# By default we can use json-file format for docker log driver
log-driver: 'json-file'
# EDS-4390 turn on docker log rotation
log-opts:
max-file: '3'
max-size: '10M'
# Containerd config override
# These parameters are remarshalled to the file /etc/containerd/kublr.toml
# which in turn is included in /etc/containerd/config.toml
# keys with dots (e.q. io.containerd.grpc.v1.cri are allowed)
# in 1.21 selinux is not supported on containerd
containerd:
config:
version: 2
plugins:
# TODO: check container.d 1.3 for validation this name
"io.containerd.grpc.v1.cri":
sandbox_image: '{{ .kublr.docker_image.pause }}'
enable_selinux: false
# CRI-O config override
# these parameters are remarhsalled to the file /etc/crio/crio.conf.d/70-kublr.conf
crio:
config:
crio:
image:
pause_image: '{{ .kublr.docker_image.pause }}'
kubernetes_alpha: {}
features:
# full_hostname_override - hostname override for kubelet
#
# Possible values are 'disable' (default), 'node-address-ip', 'hostname', 'hostname-fqdn', 'enable', 'auto'
#
# Irrespective of this property value, it will only take effect if '--hostname-override' is not specified
# for kubelet via other configurations.
#
# 'disable' (default) - standard behavior, i.e. kublr will not try to set --hostname-override flag
# 'node-address-ip' - node ip address will be used as value for '--hostname-override' flag
# 'hostname-fqdn' - 'hostname -f' output will be used as a value for '--hostname-override' flag
# 'hostname' - 'hostname' output will be used as a value for '--hostname-override' flag
# 'enable', 'auto' - the following algorithm will be used to set a value for '--hostname-override' flag
# - if kublr_cloud_provider.type == aws => AWS EC2 instance metadata 'local-hostname'
# - otherwise => no value
# Any other value is treated as default value
#
####################################################################################################################
# Agents released before 2023-09-27:
####################################################################################################################
#
# Possible values are 'disable' (default), 'node-address-ip', 'enable', 'auto'
#
# Irrespective of this property value, it will only take effect if '--hostname-override' is not specified
# for kubelet via other configurations.
#
# 'disable' (default) - standard behavior, i.e. kublr will not try to set --hostname-override flag
# 'node-address-ip' - node ip address will be used as value for '--hostname-override' flag
# 'enable' - 'hostname -f' output will be used as a value for '--hostname-override' flag
# 'auto' - 'hostname -f' output will be used as hostname override in certain situations only;
# specifically:
# - AWS cloud provider is used for kubelet (kubelet has '--cloud-provider=aws' flag set) AND
# $(hostname -f) is defined AND
# $(hostname -f) is different from node name retrieved via AWS EC2 instance metadata
# in the future the list of these situations may grow, change, or become dependant on k8s version
# Any other value is treated as default value
#
full_hostname_override: 'disable'
# cgroup_driver_override - use specific cgroup driver for docker and kubelet
#
# Possible values are '', 'auto' (default), 'cgroupfs', 'systemd'
#
# '' means that kublr will not override docker and kubelet cgroup driver flags specified in the
# configuration sections 'kublr.kubelet_flag' and 'kublr.docker.config.exec-opts'
# 'auto' means that kublr will try to identify whether SystemD is used on the target system and
# will use 'systemd' or 'cgroupfs' cgroup drivers for docker and kubelet correspondingly
# overriding corresponding kublr configuration values
# 'cgroupfs' kublr will use 'cgroupfs' cgroup driver for docker and kubelet overriding corresponding
# kublr configuration values
# 'systemd' kublr will use 'systemd' cgroup driver for docker and kubelet overriding corresponding
# kublr configuration values
#
# Any other value will be treated as default value
#
cgroup_driver_override: 'auto'
# dns resolution file processing policy
#
# Possible values are '', and 'auto' (default)
#
# '' means that kublr will not override kubelet --resolv-conf flag set or not set in the cluster definition
# 'auto' means that kublr will try to automatically set kubelet --resolv-conf flag. Curruntly the algoritm
# is as follows:
#
# 1. IF
# /etc/resolve.conf includes "nameserver 127.*" AND
# /run/systemd/resolve/resolv.conf exists and is not empty
# THEN
# use /run/systemd/resolve/resolv.conf as the source
# ELSE
# use /etc/resolv.conf as the source
# ENDIF
#
# 2. IF
# the source contains 127.* or <cluster.network.dnsip> nameserver entries
# THEN
# copy content of the source with 127.* and <cluster.network.dnsip> nameserver entries
# commented out into <var_lib_dir>/kubelet/resolv.fixed.conf
# create symlink <var_lib_dir>/kubelet/resolv.conf -> <var_lib_dir>/kubelet/resolv.fixed.conf
# ELSE
# create symlink <var_lib_dir>/kubelet/resolv.conf -> the source file
# ENDIF
#
# 3. override kubelet flag --resolv-conf=<var_lib_dir>/kubelet/resolv.conf
#
# Any other value is treated as default value
#
dns_resolution: 'auto'
# Master auto-discovery mechanism
#
# Possible values are 'false', 'true' (default)
#
# 'false' 1. master and node will ignore published master addresses even
# if available
# 2. master will not publish its address
# 'true' 1. master will publish its `node_address` to secret stores
# 2. master will use published addresses of other masters to
# initialize etcd cluster. `etcd_addresses_initial` elements
# that are set will override autodiscovered addresses for other
# masters, `node_address` if set will override anything else for
# this master.
# 3. node will use published addresses of masters to connect
#
# Any other value is treated as default value
#
master_autodiscovery: 'true'
# Enable CSI driver for a specific cloud Platform
# 'auto': use default for agent version/platform combination
# defaults for current agent version are set in auto_behavior section in version-specific template
# 'csi': install CSI driver and enable CSI migration
# Might break if CSI driver is not supported by the agent
# 'manual': do NOT install CSI driver but enable CSI migration
# (assume KCP or customer will install CSI later, or PVC will break)
# 'force-intree': do NOT install CSI driver and disable migration.
# This implicitly forces legacy intree driver.
# Might not work if intree driver is already removed from current k8s version
# TODO: currently we never explicitly disable intree driver by feature gates
# we hope the intree drivers will be removed from k8s and we would not need to orchectrate their shutdown
csi_drivers:
# Various parameters to override in CSI manifests.
# Some parameters that are common for most static/addon entities,
# like image, resources, security context, tolerations
# Are specified in their own sections of the agent config,
# But parameters unique for the specific driver are set here
parameters:
azure:
# In upstream, CSI drivers use 2 replicas of the controller pod
# On other clouds we run CSI controller pods only on masters because of credentials or IM permissions
# So we have to limit number of pods to number of masters
# But on the Azure we do NOT have to run contollers on the masters
# (at least until we implement IM support), so 2 replicas seems to be a good default
# But on single-node (not to be confused with single-master) cluster 2 controllers cannot start
# Single-node clusters are not a feature intended for production use, so the user must set this to 1 manually
# Or keep up with pending second controller pod and manual intervention during upgrade/update
controller_replicas: 2
aws: 'auto'
azure: 'auto'
gce: 'auto'
vsphere: 'auto'
# feature gates to enable CSI migration in csi/manual modes
# Unfortunately, they are not regular for different providers
# Here we can specify a (probably different) feature gates for kubelet and other k8s components
# Fortunately, all of these gates got in alpha state (i.e. available) long before our oldest supported versions,
# so we must check only for GA versions, where they become NOT available
# to reduce the noise, we set the gates to True only if they are not True by default
csi_feature_gates:
aws:
kubelet:
CSIMigrationAWS: '{{- if or (not (isSemver .kublr.version.k8s)) (semverCompare "< 1.23-0" .kublr.version.k8s) }}true{{end}}'
kube_controller_manager:
CSIMigrationAWS: '{{- if or (not (isSemver .kublr.version.k8s)) (semverCompare "< 1.23-0" .kublr.version.k8s) }}true{{end}}'
azure:
kubelet:
CSIMigrationAzureDisk: '{{- if or (not (isSemver .kublr.version.k8s)) (semverCompare "< 1.23-0" .kublr.version.k8s) }}true{{end}}'
kube_controller_manager:
CSIMigrationAzureDisk: '{{- if or (not (isSemver .kublr.version.k8s)) (semverCompare "< 1.23-0" .kublr.version.k8s) }}true{{end}}'
gce:
kubelet:
CSIMigrationGCE: '{{- if or (not (isSemver .kublr.version.k8s)) (semverCompare "< 1.23-0" .kublr.version.k8s) }}true{{end}}'
kube_controller_manager:
CSIMigrationGCE: '{{- if or (not (isSemver .kublr.version.k8s)) (semverCompare "< 1.23-0" .kublr.version.k8s) }}true{{end}}'
vsphere:
kubelet:
CSIMigrationvSphere: '{{- if or (not (isSemver .kublr.version.k8s)) (semverCompare "< 1.25-0" .kublr.version.k8s) }}true{{end}}'
kube_controller_manager:
CSIMigrationvSphere: '{{- if or (not (isSemver .kublr.version.k8s)) (semverCompare "< 1.25-0" .kublr.version.k8s) }}true{{end}}'
# feature gates to disable CSI migration and force intree driver
intree_feature_gates:
aws:
kubelet:
CSIMigrationAWS: '{{- if or (not (isSemver .kublr.version.k8s)) (semverCompare "< 1.25-0" .kublr.version.k8s) }}false{{end}}'
kube_controller_manager:
CSIMigrationAWS: '{{- if or (not (isSemver .kublr.version.k8s)) (semverCompare "< 1.25-0" .kublr.version.k8s) }}false{{end}}'
azure:
kubelet:
CSIMigrationAzureDisk: '{{- if or (not (isSemver .kublr.version.k8s)) (semverCompare "< 1.24-0" .kublr.version.k8s) }}false{{end}}'
kube_controller_manager:
CSIMigrationAzureDisk: '{{- if or (not (isSemver .kublr.version.k8s)) (semverCompare "< 1.24-0" .kublr.version.k8s) }}false{{end}}'
gce:
kubelet:
CSIMigrationGCE: '{{- if or (not (isSemver .kublr.version.k8s)) (semverCompare "< 1.25-0" .kublr.version.k8s) }}false{{end}}'
kube_controller_manager:
CSIMigrationGCE: '{{- if or (not (isSemver .kublr.version.k8s)) (semverCompare "< 1.25-0" .kublr.version.k8s) }}false{{end}}'
vsphere:
kubelet:
CSIMigrationvSphere: '{{- if or (not (isSemver .kublr.version.k8s)) (semverCompare "< 1.26-0" .kublr.version.k8s) }}false{{end}}'
kube_controller_manager:
CSIMigrationvSphere: '{{- if or (not (isSemver .kublr.version.k8s)) (semverCompare "< 1.26-0" .kublr.version.k8s) }}false{{end}}'
# nvidia devices (gpu, cuda) support
nvidia_devices:
# Whether to enable NVidia devices support
#
# Possible values are 'auto' (default), 'true', 'false'
#
# 'true' means that nvidia drivers and docker runtime are installed during setup and enabled in runtime
# 'false' means that nvidia drivers and docker runtime are NOT installed during setup and NOT enabled in runtime
# 'auto' means that Kublr will do its best to decide whether to install/enable nvidia drivers and docker runtime
# Currently the decision to install drivers is made based on availability of nvidia devices in 'lspci' output,
# and decision to enable them is based on availability of nvidia devices in 'lspci' output and installed
# drivers and docker runtime files.
enable: 'auto'
# OS package name(s) to install as a driver. "cuda-driver" is an alias to what NVIDIA considers a latest version.
# To install a specific driver version, use nvidia-driver-{version}
# Be careful, at least in Ubuntu you can upgrade the driver (install newer version over the older)
# but you need to manually remove newer driver to downgrade (install older version over the newer)
# Multiple values mean 'install all these packages', NOT a fallback order.
# There are recommeded hooks to install arbitrary packages, please do not abuse this.
# Set to [] to NOT install the driver (e.q. if you know the node already has a driver)
driver: [ "cuda-drivers" ]
setup:
# Possible values: "auto" (default), "continue", "fail"
#
# "continue" means that setup procedure would continue even if separate steps fail
# "fail" means that the whole setup procedure fails on any step failure
# "auto" is equivalent to "fail" if --prepare-image flag is used, and "continue" otherwise
on_failure: "auto"
cmd:
# if this command is defined, it will be run before the setup procedure
before: []
# if this command is defined, it will be run after the setup procedure
after: []
services:
# Agent AND seeder disables the following services during the setup:
# firewalld
# SuSEfirewall2
# kublr
# kublr-kubelet
# docker
# To opt out of disabling some of them, you must set this regexp both in KublrAgentConfig and KublrSeederConfig
disable_exclude_regexp: []
packages:
# Possible values: "auto" (default), "skip", "upgrade"
#
# "skip" - skip upgrading packages
# "upgrade" - upgrade packages
# "auto" is equivalent to "upgrade" if --prepare-image flag is used, and "skip" otherwise
upgrade: "auto"
# Possible values: false (default), true
#
# true - skip installing packages (the same as the command flag --skip-package-install)
# false - install all needed packages
skip_install: false
# packages to remove before installing
remove: []
# packages to exclude from the list of packages removed by kublr
remove_exclude_regexp: []
# packages to exclude from the list of packages installed by kublr (applied after remove)
exclude_regexp: []
# packages to install in addition to the packages kublr installs (applied after exclusion)
install: []
# if this command is defined, it will be run instead of the standard package setup procedure
cmd: []
# in 1.21, only single value is supported, i.e. "docker" or "containerd", not "containerd, docker"
runtime_fallback_order: '{{- if and (isSemver .kublr.version.k8s) (semverCompare "< 1.24-0" .kublr.version.k8s) }}docker{{else}}containerd{{- end}}'
# docker setup parameters
docker:
# `edition_fallback_order` defines fallback order for docker installation.
#
# Allowed values include comma-separated list of "existing", "os", "ce", "ee" (not implemented), or "custom"
#
# Default value: "existing,os,ce"
#
# "existing" - use pre-installed docker if available
# Kublr will try to identify init system and check if docker
# service is already installed, and will use it if it is.
# "os" - use docker edition and version standard for the given OS
# Kublr will only do try this if the given OS and OS version is supported
# "ce" - use Docker CE installed according to docker documentation
# "ee" - (not implemented) use Docker EE installed according to docker documentation
# "custom" - use custom user command provided in the configuration file to setup docker
edition_fallback_order: "existing,os,ce,custom"
# parameters for setup procedure of a default Docker package for this OS
os:
# packages to remove before installing
remove: []
# packages to exclude from the list of packages installed by kublr (applied after remove)
exclude_regexp: []
# packages to install in addition to the packages kublr installs (applied after exclusion)
install: []
# parameters for setup procedure of Docker CE
ce:
# packages to remove before installing
remove: []
# packages to exclude from the list of packages installed by kublr (applied after remove)
exclude_regexp: []
# packages to install in addition to the packages kublr installs (applied after exclusion)
install: []
# parameters for setup procedure of Docker EE
# Not implemented at the moment
ee:
# packages to remove before installing
remove: []
# packages to exclude from the list of packages installed by kublr (applied after remove)
exclude_regexp: []
# packages to install in addition to the packages kublr installs (applied after exclusion)
install: []
# TODO ???
package_url: ""
# parameters for setup procedure of a custom docker version
custom:
cmd: []
containerd:
# `edition_fallback_order` defines fallback order for containerd installation.
#
# Allowed values include comma-separated list of "existing", "os", "ce", "ee" (not implemented), or "custom"
#
# Default value: "existing,os,ce"
#
# "existing" - use pre-installed containerd if available
# Kublr will try to identify init system and check if containerd
# service is already installed, and will use it if it is.
# "os" - use containerd edition and version standard for the given OS
# Kublr will only do try this if the given OS and OS version is supported
# "ce" - use Docker CE installed according to containerd documentation
# "ee" - (not implemented) use Docker EE installed according to containerd documentation
# "custom" - use custom user command provided in the configuration file to setup containerd
edition_fallback_order: "existing,os,ce,custom"
# parameters for setup procedure of a default Docker package for this OS
os:
# packages to remove before installing
remove: []
# packages to exclude from the list of packages installed by kublr (applied after remove)
exclude_regexp: []
# packages to install in addition to the packages kublr installs (applied after exclusion)
install: []
# parameters for setup procedure of Docker CE
ce:
# packages to remove before installing
remove: []
# packages to exclude from the list of packages installed by kublr (applied after remove)
exclude_regexp: []
# packages to install in addition to the packages kublr installs (applied after exclusion)
install: []
# parameters for setup procedure of Docker EE
# Not implemented at the moment
ee:
# packages to remove before installing
remove: []
# packages to exclude from the list of packages installed by kublr (applied after remove)
exclude_regexp: []
# packages to install in addition to the packages kublr installs (applied after exclusion)
install: []
# TODO ???
package_url: ""
# parameters for setup procedure of a custom containerd version
custom:
cmd: []
crio:
edition_fallback_order: "existing,os,kubic,custom"
# parameters for setup procedure of a default Docker package for this OS
os:
# packages to remove before installing
remove: []
# packages to exclude from the list of packages installed by kublr (applied after remove)
exclude_regexp: []
# packages to install in addition to the packages kublr installs (applied after exclusion)
install: []
# parameters for setup procedure of Kubic opensuse repository
kubic:
# packages to remove before installing
remove: []
# packages to exclude from the list of packages installed by kublr (applied after remove)
exclude_regexp: []
# packages to install in addition to the packages kublr installs (applied after exclusion)
install: []
# parameters for setup procedure of a custom crio version
custom:
cmd: []
# docker images to pull during setup
# also applicable to other cri
docker_images:
# Possible values:
# - "pull"
# - "skip"
# - "auto"
#
# "auto" = "pull" for --prepare-image, "skip" otherwise
#
pull: "auto"
# standard images to exclude
exclude_regexp: []
# additional images to pull
additional: []
# Cluster role to bind default PodSecurityPolicy. Should be 'psp:privileged' or 'psp:restricted'
psp:
default_clusterrole: 'psp:privileged'
#EDS-7318 Kubernetes hardening recomendation
kubelet_config:
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
resources:
# must depend on k8s version
kubelet: {}
# currently only memory limits via systemd unit are supported
kublr_agent:
limits:
memory: 500M
kublr_seeder:
limits:
memory: 500M
log:
verbose: false
http_server:
enable: true
read_only_listen_address: ''
read_only_port: 11250
read_only_listen_address_seeder: ''
read_only_port_seeder: 11252
listen_address: localhost
port: 11260
listen_address_seeder: localhost
port_seeder: 11262
profiling:
enabled: false
# cloud provider for Kublr agent to use
kublr_cloud_provider:
# Following types are supported
# - 'bare' - BareMetal
# - 'aws' - AWS
# - 'azure' - Microsoft Azure
# - 'gce' - Google Compute Engine
# - 'vcd' - VMWare vCloud Director (vCD)
# - 'vsphere' - VMWare VSphere
type: bare
aws:
# Kublr agent will assign this EIP to this EC2 instance.
# In most cases though this parameter is provided via tag on the EC2 instance
eip: ''
azure:
# cloud_config and cloud_config_overrides properties provide ability to define "raw" azure cloud provider configuration
cloud_config:
tenantID: '{{.kublr_cloud_provider.azure.tenant_id}}'
subscriptionId: '{{.kublr_cloud_provider.azure.subscription_id}}'
aadClientID: '{{.kublr_cloud_provider.azure.client_id}}'
aadClientSecret: '{{.kublr_cloud_provider.azure.client_secret}}'
resourceGroup: '{{.kublr_cloud_provider.azure.resource_group}}'
location: '{{.kublr_cloud_provider.azure.region}}'
vmType: '{{.kublr_cloud_provider.azure.vm_type}}'
primaryScaleSetName: '{{.kublr_cloud_provider.azure.primary_scale_set_name}}'
subnetName: '{{.kublr_cloud_provider.azure.subnet_name}}'
securityGroupName: '{{.kublr_cloud_provider.azure.security_group_name}}'
vnetName: '{{.kublr_cloud_provider.azure.vnet_name}}'
routeTableName: '{{.kublr_cloud_provider.azure.route_table_name}}'
# useInstanceMetadata seems to solve API throttling problem on newer k8s
# and is good idea from the security standpoint
useInstanceMetadata: true
# AKS use similar settings
cloudProviderBackoffMode: "v2"
cloudProviderBackoff: true
cloudProviderBackoffRetries: 6
cloudProviderBackoffDuration: 5
cloudProviderRateLimit: true
cloudProviderRateLimitQPS: 10
cloudProviderRateLimitBucket: 100
cloudProviderRateLimitQPSWrite: 10
cloudProviderRateLimitBucketWrite: 100
# The ClientID for an AAD application with RBAC access to talk to Azure RM APIs
client_id: ''
# The ClientSecret for an AAD application with RBAC access to talk to Azure RM APIs
client_secret: ''
# The AAD Tenant ID for the Subscription that the cluster is deployed in
tenant_id: ''
# The Azure Subscription ID that the cluster is deployed in
subscription_id: ''
# The name of the resource group that the cluster is deployed in
resource_group: ''
# The type of azure Virtual Machine.
# Possible values are 'vmss' and 'standard'. If left empty, Kublr agent will test and
# set based on the actual VM type (to 'vmss' for a VMSS VM, and to 'standard' otherwise).
# Usually 'vmss' is a safe choice because 'vmss' type works equally well for both VM and VMSS.
vm_type: ''
primary_scale_set_name: ''
region: ''
route_table_name: ''
security_group_name: ''
subnet_name: ''
vnet_name: ''
gce:
# EDS-7076: GCE CSI driver need to access to API with SA direct from master nodes
# https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver/blob/master/docs/kubernetes/user-guides/driver-install.md#install-driver
sa_secret:
type: 'service_account'
project_id: '{{.kublr_cloud_provider.gce.project_id}}'
private_key: ''
client_email: ''
multizone: true
network_name: ''
network_project_id: ''
node_tags: ''
project_id: ''
subnetwork_name: ''
vcd:
# vCloud Director API endpoint
api_url: ''
# Username for vCloud Director API access
username: ''
# Password for vCloud Director API access
password: ''
# vCloud Director organization name
org: ''
# vCloud Director virtual datacenter name
vdc: ''
# vApp name
vapp: ''
vsphere:
# vSphere API url
api_url: ''
# Username for vSphere API access
username: ''
# Password for vSphere API access
password: ''
# Skip verification of server certificate
insecure: false
# name of vSphere Data Center
datacenter: ''
cluster_disk_folder: ''
network: ''
resource_pools: []
zone_support_enabled: false
region_tag_name: 'k8s-region'
zone_tag_name: 'k8s-zone'
# `secondary_ip` property is used when we want Kublr agent to assign this IP
# as the second IP on the network interface.
# This is one of the methods to ensure stable instance IP address even in
# environments where only dynamic IP assignment is possible.
# This method should be considered the last resort and, generally, dynamic
# master address discovery or load balancer use should be preferred.
secondary_ip: ''
# secret provider for Kublr agent to use
secret_provider:
# Following types are supported:
# - 'dir' - local storage
# - 's3' - AWS S3 storage
# - 'sa' - Microsoft Azure storage
# - 'gcs' - Google Cloud Storage
# - 'vcd' - VMWare vCloud Director (vCD) Catalog
# - 'vsphere' - VMWare VSphere storage
# - 'kublragent' - distributed Kublr storage provided by Kublr agents, normally hosted on cluster masters
type: dir
dir:
path: /etc/kublr/secret
s3:
bucket_name: ''
sa:
storage_account: ''
target: ''
client_id: ''
client_secret: ''
tenant_id: ''
subscription_id: ''
resource_group: ''
gcs:
bucket_name: ''
vcd:
# vCloud Director API endpoint
api_url: ''
# Username for vCloud Director API access
username: ''
# Password for vCloud Director API access
password: ''
# vCloud Director organization name
org: ''
# Skip verification of server certificate
insecure: false
# vCloud Director virtual datacenter name
vdc: ''
# Catalog name
catalog_name: ''
# Catalog sub-path
# optional, if not specified - secrets will be stored in the root of catalog
catalog_path: ''
vsphere:
# vSphere API url
api_url: ''
# Username for vSphere API access
username: ''
# Password for vSphere API access
password: ''
# Skip verification of server certificate
insecure: false
# name of vSphere Data Center
datacenter: ''
# the name of datastore
datastore: ''
# the datastore folder name
datastore_path: ''
kublragent:
## Common (server and client) properties
# `endpoints` - endpoints of the agents providing the distributed data storage.
#
# Secret storage must be able to synchronize before any other Kublr or Kubernetes facility is functional, so these
# endpoints must be discoverable from the very beginning.
#
# Currently only static address specification is supported, but in the future various additional methods of
# discovery may be supported, such as, for example, based on querying cloud (AWS, Azure, GCP etc) API.
#
# On the storage peers:
# - Endpoints must be specified for all the storage peers other than the current one
# - Endpoints with invalid ordinal (empty, less than zero or greater or equal to the number of masters) are ignored
# - Endpoints with the same ordinal are considered to belong to the same peer, and are tried in the lexicographical order of the peer key
#
# On the clients:
# - Any non-zero number of endpoints may be specified
# - Ordinal is not required, although if specified, may be used for priority definition
#
# Endpoints configuration structure:
#
# endpoints:
# <peer-key>: # peer key may be any string allowed as a Viper yaml config property name;
# ordinal: 1 # ordinal property specifies the ordinal of the peer;
# # pure clients MAY ignore this property or MAY use it to prioritize files owned by certain peers;
# # peer servers MUST ignore endpoints with invalid ordinals (<0 or >= master_number)
# # if "ordinal" field is omitted, -1 value will be used
# static_address: '' # static IP or DNS name for this peer endpoint
# port: 0 # port for this peer endpoint; if defined, overrides port specified globally
# priority: 'default' # Priority group for the address
# # if "priority" field is omitted, "default" value will be used;
# # clients will test groups of endpoints in the lexicografical order of priority values:
# # endpoints with priority starting with "a" will be tested before endpoints with priority starting with "z";
# # usage order for endpoints with the same priority will be randomized for every call;
# # peers will use the same approach with groups of endpoints with the same ordinal.
#
# Example address configuration for peers in a 3-master cluster (key names might be different):
#
# endpoints:
# peer_0_private_ip:
# ordinal: 0
# static_address: '10.55.13.10'
# peer_0_eip:
# ordinal: 0
# static_address: '57.93.221.3'
# priority: 'secondary'
# peer_1_private_ip:
# ordinal: 1
# static_address: '10.55.13.11'
# peer_2_private_ip:
# ordinal: 2
# static_address: '10.55.13.12'
#
# Example address configuration for a client (e.g. Kublr agent or KCP cluster controller) for a 3-master cluster:
#
# endpoints:
# p3_public_elb:
# static_address: 'hdgjhg6372.publicelb.aws.amazon.com'
# priority: 's2'
# p1_private_elb:
# static_address: 'sdnfy63276.privateelb.aws.amazon.com'
# priority: 's1'
# p0_peer_0_private_ip:
# static_address: '10.55.13.10'
# p2_peer_0_eip:
# static_address: '57.93.221.3'
# priority: 's0'
# p0_peer_1_private_ip:
# static_address: '10.55.13.11'
# p0_peer_2_private_ip:
# static_address: '10.55.13.12'
#
endpoints: {}
# port to bind to
# 0 will be replaced with default value 11251
# negative value will be replaced with 0, so that OS will assign port automatically
port: 11251
# TLS/HTTPS certificates for the storage HTTPS endpoints
tls_certs: ''
# Access and secret keys to work with store peers
access_key_id: ''
secret_access_key: ''
## Server only properties
# IP address to listen on.
# Defaults to empty string, which means all IPv4 and IPv6 addresses on all interfaces
bind_address: ''
# local directory where the data will be stored
local_storage_path: '{{.etcd_storage.path}}/secret-store'
# TLS/HTTPS key for the storage HTTPS endpoint
# If tls_key is not specified, the server will not be started, and the agent will
# work with the secret store as a client
tls_key: ''
# access and secret keys to configure on the peer
#
# Access keys configuration structure:
#
# access_keys:
# <key-name>:
# access_key_id: ''
# secret_access_key: ''
# role: 'master' # allowed values are 'master', 'node', and 'client'
access_keys: {}
etcd_storage:
path: /mnt/master-pd
# device name to use as etcd storage
# If starts with "/", then Kublr will wait for the specified device to attach
# "auto" means that Kublr will use provider-specific heuristics to find device name
# "" (empty) means that Kublr will not try to mount device, and will put etcd data into the specified directory
device: auto
# no defaults for 'bare' provider (maybe used in future)
bare: {}
# defaults for 'aws' provider
aws:
# device to use when attaching the volume
device: /dev/xvdf
iops: 0
encrypted: false
type: gp2
size_gb: 15
# defaults for 'azure' provider
azure:
# optional, if not specified - '<vm-name>-dataDisk' will be used
disk_name: ''
lun: 1
# defaults for 'gce' provider
gce:
# optional, if not specified - '<instance-name>-data-disk' will be used
disk_name: ''
vsphere:
disk_path: ''
datastore: ''
# Note that 'true' here MUST BE string rather than boolean
register_kubelet: '{{if .is_master}}cordoned{{else}}true{{end}}'
# Cluster-wide configuration defaults
cluster:
network:
provider: cni-canal
clustercidr: '100.64.0.0/10'
servicecidr: '100.64.0.0/13'
podcidr: '100.96.0.0/11'
masterip: '100.64.0.1'
dnsip: '100.64.0.10'
dnsdomain: cluster.local
enablelocaldns: true
localdnsip: '169.254.20.10'
# dnsprovider must be coredns (maybe others will be added in the future)
dnsprovider: coredns
apiserversecureport: 6443
# stubdomains helps to define dns servers for a local area network. For example:
# stubdomains:
# - dns: "zone1.local"
# servers:
# - 192.168.9.1
# - 192.168.9.2
# - dns: "zone2.local"
# servers:
# - 192.168.10.1
# - 192.168.10.2
stubdomains: []
# by default all addons are included except for npd and registry
addon_filter: '^cluster-autoscaler|^dashboard|^dns-horizontal-autoscaler|^metrics-server|^cloud-controller-manager|^csi|^cpi'
is_master: '{{/*bool*/}}{{eq .node_group "master"}}'
# location is the name of Kublr cluster segment in which this node is located
location: ''
# node_group and node_ordinal default values are intentionally incorrect, user
# configuration must override them
node_group: ''
node_ordinal: -1
node_identifier: ''
runtime:
goos: '{{runtime "GOOS"}}'
goarch: '{{runtime "GOARCH"}}'
# rootCmdFunc infinite loop timeout to next healthcheck
# every node_monitor_period kublr try to check kubelet and docker
node_monitor_period: 30s
# rootCmdFunc infinite loop: if error, time to wait until next check running
# NB! reducing this period below `node_monitor_period` may cause "death cycle"
# on Azure; described in https://jira.eastbanctech.com/browse/EDS-7454 more
# details.
node_monitor_min_period: '{{.node_monitor_period}}'
ntp_servers: []
labels:
# test_label: test-label=test
node_kubernetes_io_exclude_from_external_load_balancers: '{{if .is_master}}node.kubernetes.io/exclude-from-external-load-balancers=true{{end}}'
node_kubernetes_io_exclude_disruption: '{{if .is_master}}node.kubernetes.io/exclude-disruption=true{{end}}'
taints:
# back compatible for only master node clusters
node_role_kubernetes_io_master: '{{if .is_master}}{{if and (isSemver .kublr.version.k8s) (semverCompare "< 1.23-0" .kublr.version.k8s)}}node-role.kubernetes.io/master=:NoSchedule{{else}}node-role.kubernetes.io/control-plane=:NoSchedule{{end}}{{end}}'
# test_taint: test-taint=test:NoSchedule
master_addresses:
# kublr_dynamic_master_discovery section is always overridden by the master
# autodiscovery mechanism;
# users MUST NOT specify it in the config file
kublr_dynamic_master_discovery: []
priority_group_100_Self: '{{/*yaml*/}}[{{if .is_master}}"127.0.0.1"{{end}}]'
# local_master_endpoint_url is set to an https URL for local connection to K8S API server.
# On worker nodes it is an HAproxy endpoint that forwards connections to the master nodes.
# On master nodes it is localhost address of the API server.
local_master_endpoint_url: '{{if .is_master}}https://127.0.0.1:{{.cluster.network.apiserversecureport}}{{else}}https://127.0.0.1:{{.local_proxy.api_server_port}}{{end}}'
local_proxy:
api_server_bind_address: 127.0.0.1
api_server_port: 1443
api_server_health_bind_address: 0.0.0.0
api_server_health_port: 8888
# current_master_addresses is calculated and set by kublr for purposes of using
# in local proxy configuration.
#
# current_master_addresses is
# - array of arrays of string addresses
# - no empty arrays
# - no empty values
# - addresses are sorted lexicographically
# - addresses are unique
# - no groups with exactly same list of addresses
#
# Example:
# current_master_addresses:
# - - 1.1.1.1
# - 2.2.2.2
# - - 3.3.3.3
current_master_addresses: []
etcd_addresses:
# this section is always overridden by the master autodiscovery mechanism;
# users MUST NOT specify it in the config file
kublr_dynamic_master_discovery: []
current_etcd_address_group: ''
# `node_address` is the address of the node (IP or DNS name) used for
# inter-node communications. In most cases it is a private IP of the node.
#
# Constraints and requirements:
#
# - Kublr agent on master instances MUST have this value assigned eventually.
# The word "eventually" here means that this value may be omitted in the config
# file, but only if it is discovered by other means, e.g. some cloud providers
# may get its value from instance tags, from instance metadata, and it may also
# be set from `secondary_ip` property.
#
# - `node_address` SHOULD either be an IP or a DNS name resolvable to an IP,
# which is directly assigned to one of the instance's network interfaces.
#
# - It is allowed to provide `node_address` that is not a source address on one
# of the instance's interfaces, but in this case make sure that `node_address_ip`
# is resolved to a single source IP address (usually by making sure that there
# is only one valid IP address, or by providing correct constraints in
# `node_address_ip_discovery` property)
#
# Note that `node_address` does not necessarily map directly onto `node_address_ip`.
# Only if `node_address` is an IPv4 address, which is a source IP address of one of
# the instance's network interfaces, or `node_address` is resolvable to such an IP
# address, and that source IP address passes through `node_address_ip_discovery`
# filters, only then that source IP address will be preferred to be used as
# `node_address_ip`.
# See `node_address_ip_discovery` documentation for more details on how
# `node_address_ip` is set.
node_address: ''
# This property is automatically set during kubelet startup stage according to
# `node_address_ip_discovery` rules
node_address_ip: ''
# Node address IP resolution algorithm is used if `node_address` is empty of is
# not and IP address after configuration loading.
#
# 1. The algorithm looks for the node IP among IP addresses assigned to the node
# network interfaces that can be retrieved using command `ip -4 -o address`
# 2. Selected (IP, network interface) pairs are run through the filters specified
# in `node_address_ip_discovery.filters.*`
# 3. Selecting a specific IP address after filtering is done as follows:
# 3.1. If no source IP addresses are left after filtering, `node_address_ip` will
# stay undefined.
# This may lead to undefined behavior for different cluster components in
# situations when the instance has multiple network interfaces and/or addresses
# 3.2. If only one address is left after filtering, it will be selected as the
# `node_address_ip` value
# 3.3. If `node_address` is an IPv4 address, and it is present among the filtered
# source IP addresses, it will be used
# 3.4. If `node_address` is not an IPv4 address, but it is resolvable to IPv4
# address(es), and one of these addresses is present among the filtered
# source IP addresses, it will be used
# 3.5. If previous criteria fail, Kublr agent will prioritize those filtered source
# addresses, through which packets are routed to masters (to any master on a
# node, or to other masters on a master) and will use the first such an address.
# 3.6. Further Kublr agent will select the first non-loopback source IP address
# with the narrowest network mask
# 3.7. If there are no non-loopback source IP addresses, the Kublr agent will
# select the first loopback source IP address with the narrowest network mask
node_address_ip_discovery:
filters:
# 1. only include network interfaces that match one of the following regexps
network_interface_regexp_include: []
# 2. exclude network interfaces that match any of the following regexps
network_interface_regexp_exclude:
- '^docker.*$'
- '^flannel.*$'
- '^cali.*$'
- '^virbr.*$'
- '^lxcbr.*$'
- '^nodelocaldns.*$'
# 3. only include ip addresses that match one of the following regexps or subnets
ip_regexp_include: []
ip_subnet_include: []
# 4. exclude ip addresses that match any of the following regexps or subnets
ip_regexp_exclude: []
ip_subnet_exclude: []
# 5. only include IP addresses on interfaces through which routing is possible
# to one of the specified addresses.
# Either IP or DNS names may be specified as `route_to_include` elements;
# if DNS name is specified, it will be resolved first.
# The resolution will be performed every time kubelet/Kubernetes is re-configured.
# If `route_to_include` are specified, Kublr agent should be able to resolve and
# find a route to at least one of the included addresses; if none of the included
# addresses can be resolved to IP and/or routed to, kubelet initialization will
# fail.
route_to_include: []
# Other local node addresses that will be included in the local master
# certificates.
# These addresses may also be autodiscovered, e.g. by cloud provider (e.g. on
# AWS a public instance address may be included in this list)
# The property is a map of string arrays so that different components and
# different methods of discovery may add/update their discovered addresses
# without conflicts.
# Values in node_addresses may contain duplicates, including statically provided
# addresses.
node_addresses:
kublr_aws_cloud_provider_private_ip: []
kublr_aws_cloud_provider_public_ip: []
kublr_aws_cloud_provider_public_eip: []
# Total number of masters in the cluster.
# This value MUST be specified and greater than zero on master instances.
master_number: 0
# The list of master addresses to include in master server certificate in
# addition to dynamically discovered master address.
# These addresses are also all the addresses included in the default master
# certificate included in the secrets package.
# This list MUST be the same on all master instances.
master_addresses_all: []
# The list of master addresses to include in etcd cluster certificates in
# addition to dynamically discovered master addresses.
# These addresses are also all the addresses included in the default etcd
# certificates included in the secrets package.
# This list MUST be the same on all master instances.
etcd_addresses_all: []
# The list of initial peer addresses to use for etcd cluster initial discovery.
# 1. The number of addresses in this list MUST be equal to the cluster size
# 2. The order of servers in this list MUST correspond exactly to the ordinals
# assigned to master nodes
# 3. This list MUST be the same on all masters
# 4. Non-empty values in this list will override dynamically discovered master
# addresses if dynamic master address discovery is used
# 5. This list is only used on master instances.
etcd_addresses_initial: []
# This is currently used list of etcd peer addresses for etcd cluster
# This property MUST NOT be specified in the config file as it is always
# calculated and overridden from autodiscovery addressess, static
# `etcd_addresses_initial`, and `node_address` for the current node/peer
etcd_peer_addresses: []
cloud_controller_manager:
enabled: false
cloud_provider: ''
cluster_autoscaler:
enabled: false
cloud_provider: ''
aws_region: ''
node_group_auto_discovery: []
nodes: []
extra_args: {}
ca_bundle_path: ''
extras:
kubernetes:
api_server:
# additional cert and key to be included in ca.crt
cert: ''
key: ''
# additional SNI certs that will be added to K8S API server arguments as
# --tls-sni-cert-key=<cert-file>,<key-file>[:domain1,domain1,...]
sni_certs: {}
# <key>:
# cert: ''
# key: ''
# domain_patterns: []
# - ''
# Inline extensions allows to extend and override templates built into
# the Kublr agent via the config file directly.
#
# Inline extensions are applied the last after all extensions are loaded
# from <kublr.fixed.kublr_config_dir>/extensions directory.
#
# The <key> for each record SHOULD be a normalized (lowercased, with all
# non-alphanumeric characters replaced with '_') path value.
#
# 'extensions' property is excluded from go template interpretation during
# configuration loading.
#
# See documentation on agent extensions for more information on the
# extensions structure.
#
# Example:
#
# extensions:
# templates_network_custom_overlay_addons_my_overlay_yaml:
# path: templates/network/custom-overlay/addons/my-overlay.yaml
# content: '...<content of a custom overlay template>...'
# templates_network_custom_overlay_default_yaml:
# path: templates/network/custom-overlay/default.yaml
# content: '...'
#
extensions: {}
# <key>:
# path: ''
# content: ''
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment