{
 "K8sVersionServiceOptions": {
  "v1.10": {
   "etcd": null,
   "kubeapi": {
    "allow-privileged": "true",
    "anonymous-auth": "false",
    "bind-address": "0.0.0.0",
    "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction",
    "insecure-port": "0",
    "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname",
    "profiling": "false",
    "requestheader-extra-headers-prefix": "X-Remote-Extra-",
    "requestheader-group-headers": "X-Remote-Group",
    "requestheader-username-headers": "X-Remote-User",
    "runtime-config": "authorization.k8s.io/v1beta1=true",
    "secure-port": "6443",
    "service-account-lookup": "true",
    "storage-backend": "etcd3",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
   },
   "kubelet": {
    "address": "0.0.0.0",
    "allow-privileged": "true",
    "anonymous-auth": "false",
    "authentication-token-webhook": "true",
    "authorization-mode": "Webhook",
    "cgroups-per-qos": "True",
    "cni-bin-dir": "/opt/cni/bin",
    "cni-conf-dir": "/etc/cni/net.d",
    "enforce-node-allocatable": "",
    "event-qps": "0",
    "make-iptables-util-chains": "true",
    "network-plugin": "cni",
    "read-only-port": "0",
    "resolv-conf": "/etc/resolv.conf",
    "streaming-connection-idle-timeout": "30m",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
    "v": "2",
    "volume-plugin-dir": "/var/lib/kubelet/volumeplugins"
   },
   "kubeproxy": {
    "healthz-bind-address": "127.0.0.1",
    "v": "2"
   },
   "kubeController": {
    "address": "0.0.0.0",
    "allocate-node-cidrs": "true",
    "allow-untagged-cloud": "true",
    "configure-cloud-routes": "false",
    "enable-hostpath-provisioner": "false",
    "leader-elect": "true",
    "node-monitor-grace-period": "40s",
    "pod-eviction-timeout": "5m0s",
    "profiling": "false",
    "terminated-pod-gc-threshold": "1000",
    "v": "2"
   },
   "scheduler": {
    "address": "0.0.0.0",
    "leader-elect": "true",
    "profiling": "false",
    "v": "2"
   }
  },
  "v1.11": {
   "etcd": null,
   "kubeapi": {
    "allow-privileged": "true",
    "anonymous-auth": "false",
    "bind-address": "0.0.0.0",
    "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction",
    "insecure-port": "0",
    "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname",
    "profiling": "false",
    "requestheader-extra-headers-prefix": "X-Remote-Extra-",
    "requestheader-group-headers": "X-Remote-Group",
    "requestheader-username-headers": "X-Remote-User",
    "runtime-config": "authorization.k8s.io/v1beta1=true",
    "secure-port": "6443",
    "service-account-lookup": "true",
    "storage-backend": "etcd3",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
   },
   "kubelet": {
    "address": "0.0.0.0",
    "allow-privileged": "true",
    "anonymous-auth": "false",
    "authentication-token-webhook": "true",
    "authorization-mode": "Webhook",
    "cgroups-per-qos": "True",
    "cni-bin-dir": "/opt/cni/bin",
    "cni-conf-dir": "/etc/cni/net.d",
    "enforce-node-allocatable": "",
    "event-qps": "0",
    "make-iptables-util-chains": "true",
    "network-plugin": "cni",
    "read-only-port": "0",
    "resolv-conf": "/etc/resolv.conf",
    "streaming-connection-idle-timeout": "30m",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
    "v": "2",
    "volume-plugin-dir": "/var/lib/kubelet/volumeplugins"
   },
   "kubeproxy": {
    "healthz-bind-address": "127.0.0.1",
    "v": "2"
   },
   "kubeController": {
    "address": "0.0.0.0",
    "allocate-node-cidrs": "true",
    "allow-untagged-cloud": "true",
    "configure-cloud-routes": "false",
    "enable-hostpath-provisioner": "false",
    "leader-elect": "true",
    "node-monitor-grace-period": "40s",
    "pod-eviction-timeout": "5m0s",
    "profiling": "false",
    "terminated-pod-gc-threshold": "1000",
    "v": "2"
   },
   "scheduler": {
    "address": "0.0.0.0",
    "leader-elect": "true",
    "profiling": "false",
    "v": "2"
   }
  },
  "v1.12": {
   "etcd": null,
   "kubeapi": {
    "allow-privileged": "true",
    "anonymous-auth": "false",
    "bind-address": "0.0.0.0",
    "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction",
    "insecure-port": "0",
    "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname",
    "profiling": "false",
    "requestheader-extra-headers-prefix": "X-Remote-Extra-",
    "requestheader-group-headers": "X-Remote-Group",
    "requestheader-username-headers": "X-Remote-User",
    "runtime-config": "authorization.k8s.io/v1beta1=true",
    "secure-port": "6443",
    "service-account-lookup": "true",
    "storage-backend": "etcd3",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
   },
   "kubelet": {
    "address": "0.0.0.0",
    "allow-privileged": "true",
    "anonymous-auth": "false",
    "authentication-token-webhook": "true",
    "authorization-mode": "Webhook",
    "cgroups-per-qos": "True",
    "cni-bin-dir": "/opt/cni/bin",
    "cni-conf-dir": "/etc/cni/net.d",
    "enforce-node-allocatable": "",
    "event-qps": "0",
    "make-iptables-util-chains": "true",
    "network-plugin": "cni",
    "read-only-port": "0",
    "resolv-conf": "/etc/resolv.conf",
    "streaming-connection-idle-timeout": "30m",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
    "v": "2",
    "volume-plugin-dir": "/var/lib/kubelet/volumeplugins"
   },
   "kubeproxy": {
    "healthz-bind-address": "127.0.0.1",
    "v": "2"
   },
   "kubeController": {
    "address": "0.0.0.0",
    "allocate-node-cidrs": "true",
    "allow-untagged-cloud": "true",
    "configure-cloud-routes": "false",
    "enable-hostpath-provisioner": "false",
    "leader-elect": "true",
    "node-monitor-grace-period": "40s",
    "pod-eviction-timeout": "5m0s",
    "profiling": "false",
    "terminated-pod-gc-threshold": "1000",
    "v": "2"
   },
   "scheduler": {
    "address": "0.0.0.0",
    "leader-elect": "true",
    "profiling": "false",
    "v": "2"
   }
  },
  "v1.13": {
   "etcd": null,
   "kubeapi": {
    "allow-privileged": "true",
    "anonymous-auth": "false",
    "bind-address": "0.0.0.0",
    "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction",
    "insecure-port": "0",
    "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname",
    "profiling": "false",
    "requestheader-extra-headers-prefix": "X-Remote-Extra-",
    "requestheader-group-headers": "X-Remote-Group",
    "requestheader-username-headers": "X-Remote-User",
    "runtime-config": "authorization.k8s.io/v1beta1=true",
    "secure-port": "6443",
    "service-account-lookup": "true",
    "storage-backend": "etcd3",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
   },
   "kubelet": {
    "address": "0.0.0.0",
    "allow-privileged": "true",
    "anonymous-auth": "false",
    "authentication-token-webhook": "true",
    "authorization-mode": "Webhook",
    "cgroups-per-qos": "True",
    "cni-bin-dir": "/opt/cni/bin",
    "cni-conf-dir": "/etc/cni/net.d",
    "enforce-node-allocatable": "",
    "event-qps": "0",
    "make-iptables-util-chains": "true",
    "network-plugin": "cni",
    "read-only-port": "0",
    "resolv-conf": "/etc/resolv.conf",
    "streaming-connection-idle-timeout": "30m",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
    "v": "2",
    "volume-plugin-dir": "/var/lib/kubelet/volumeplugins"
   },
   "kubeproxy": {
    "healthz-bind-address": "127.0.0.1",
    "v": "2"
   },
   "kubeController": {
    "address": "0.0.0.0",
    "allocate-node-cidrs": "true",
    "allow-untagged-cloud": "true",
    "configure-cloud-routes": "false",
    "enable-hostpath-provisioner": "false",
    "leader-elect": "true",
    "node-monitor-grace-period": "40s",
    "pod-eviction-timeout": "5m0s",
    "profiling": "false",
    "terminated-pod-gc-threshold": "1000",
    "v": "2"
   },
   "scheduler": {
    "address": "0.0.0.0",
    "leader-elect": "true",
    "profiling": "false",
    "v": "2"
   }
  },
  "v1.14": {
   "etcd": null,
   "kubeapi": {
    "allow-privileged": "true",
    "anonymous-auth": "false",
    "bind-address": "0.0.0.0",
    "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority",
    "insecure-port": "0",
    "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname",
    "profiling": "false",
    "requestheader-extra-headers-prefix": "X-Remote-Extra-",
    "requestheader-group-headers": "X-Remote-Group",
    "requestheader-username-headers": "X-Remote-User",
    "runtime-config": "authorization.k8s.io/v1beta1=true",
    "secure-port": "6443",
    "service-account-lookup": "true",
    "storage-backend": "etcd3",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
   },
   "kubelet": {
    "address": "0.0.0.0",
    "allow-privileged": "true",
    "anonymous-auth": "false",
    "authentication-token-webhook": "true",
    "authorization-mode": "Webhook",
    "cgroups-per-qos": "True",
    "cni-bin-dir": "/opt/cni/bin",
    "cni-conf-dir": "/etc/cni/net.d",
    "enforce-node-allocatable": "",
    "event-qps": "0",
    "make-iptables-util-chains": "true",
    "network-plugin": "cni",
    "read-only-port": "0",
    "resolv-conf": "/etc/resolv.conf",
    "streaming-connection-idle-timeout": "30m",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
    "v": "2",
    "volume-plugin-dir": "/var/lib/kubelet/volumeplugins"
   },
   "kubeproxy": {
    "healthz-bind-address": "127.0.0.1",
    "v": "2"
   },
   "kubeController": {
    "address": "0.0.0.0",
    "allocate-node-cidrs": "true",
    "allow-untagged-cloud": "true",
    "configure-cloud-routes": "false",
    "enable-hostpath-provisioner": "false",
    "leader-elect": "true",
    "node-monitor-grace-period": "40s",
    "pod-eviction-timeout": "5m0s",
    "profiling": "false",
    "terminated-pod-gc-threshold": "1000",
    "v": "2"
   },
   "scheduler": {
    "address": "0.0.0.0",
    "leader-elect": "true",
    "profiling": "false",
    "v": "2"
   }
  },
  "v1.14.10-rancher1-1": {
   "etcd": {
    "client-cert-auth": "true",
    "peer-client-cert-auth": "true"
   },
   "kubeapi": {
    "allow-privileged": "true",
    "anonymous-auth": "false",
    "bind-address": "0.0.0.0",
    "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority",
    "insecure-port": "0",
    "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname",
    "profiling": "false",
    "requestheader-extra-headers-prefix": "X-Remote-Extra-",
    "requestheader-group-headers": "X-Remote-Group",
    "requestheader-username-headers": "X-Remote-User",
    "runtime-config": "authorization.k8s.io/v1beta1=true",
    "secure-port": "6443",
    "service-account-lookup": "true",
    "storage-backend": "etcd3",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
   },
   "kubelet": {
    "address": "0.0.0.0",
    "allow-privileged": "true",
    "anonymous-auth": "false",
    "authentication-token-webhook": "true",
    "authorization-mode": "Webhook",
    "cgroups-per-qos": "True",
    "cni-bin-dir": "/opt/cni/bin",
    "cni-conf-dir": "/etc/cni/net.d",
    "enforce-node-allocatable": "",
    "event-qps": "0",
    "make-iptables-util-chains": "true",
    "network-plugin": "cni",
    "read-only-port": "0",
    "resolv-conf": "/etc/resolv.conf",
    "streaming-connection-idle-timeout": "30m",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
    "v": "2",
    "volume-plugin-dir": "/var/lib/kubelet/volumeplugins"
   },
   "kubeproxy": {
    "healthz-bind-address": "127.0.0.1",
    "v": "2"
   },
   "kubeController": {
    "address": "0.0.0.0",
    "allocate-node-cidrs": "true",
    "allow-untagged-cloud": "true",
    "configure-cloud-routes": "false",
    "enable-hostpath-provisioner": "false",
    "leader-elect": "true",
    "node-monitor-grace-period": "40s",
    "pod-eviction-timeout": "5m0s",
    "profiling": "false",
    "terminated-pod-gc-threshold": "1000",
    "v": "2"
   },
   "scheduler": {
    "address": "0.0.0.0",
    "leader-elect": "true",
    "profiling": "false",
    "v": "2"
   }
  },
  "v1.14.9-rancher1-1": {
   "etcd": {
    "client-cert-auth": "true",
    "peer-client-cert-auth": "true"
   },
   "kubeapi": {
    "allow-privileged": "true",
    "anonymous-auth": "false",
    "bind-address": "0.0.0.0",
    "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority",
    "insecure-port": "0",
    "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname",
    "profiling": "false",
    "requestheader-extra-headers-prefix": "X-Remote-Extra-",
    "requestheader-group-headers": "X-Remote-Group",
    "requestheader-username-headers": "X-Remote-User",
    "runtime-config": "authorization.k8s.io/v1beta1=true",
    "secure-port": "6443",
    "service-account-lookup": "true",
    "storage-backend": "etcd3",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
   },
   "kubelet": {
    "address": "0.0.0.0",
    "allow-privileged": "true",
    "anonymous-auth": "false",
    "authentication-token-webhook": "true",
    "authorization-mode": "Webhook",
    "cgroups-per-qos": "True",
    "cni-bin-dir": "/opt/cni/bin",
    "cni-conf-dir": "/etc/cni/net.d",
    "enforce-node-allocatable": "",
    "event-qps": "0",
    "make-iptables-util-chains": "true",
    "network-plugin": "cni",
    "read-only-port": "0",
    "resolv-conf": "/etc/resolv.conf",
    "streaming-connection-idle-timeout": "30m",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
    "v": "2",
    "volume-plugin-dir": "/var/lib/kubelet/volumeplugins"
   },
   "kubeproxy": {
    "healthz-bind-address": "127.0.0.1",
    "v": "2"
   },
   "kubeController": {
    "address": "0.0.0.0",
    "allocate-node-cidrs": "true",
    "allow-untagged-cloud": "true",
    "configure-cloud-routes": "false",
    "enable-hostpath-provisioner": "false",
    "leader-elect": "true",
    "node-monitor-grace-period": "40s",
    "pod-eviction-timeout": "5m0s",
    "profiling": "false",
    "terminated-pod-gc-threshold": "1000",
    "v": "2"
   },
   "scheduler": {
    "address": "0.0.0.0",
    "leader-elect": "true",
    "profiling": "false",
    "v": "2"
   }
  },
  "v1.15": {
   "etcd": null,
   "kubeapi": {
    "allow-privileged": "true",
    "anonymous-auth": "false",
    "bind-address": "0.0.0.0",
    "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize",
    "insecure-port": "0",
    "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname",
    "profiling": "false",
    "requestheader-extra-headers-prefix": "X-Remote-Extra-",
    "requestheader-group-headers": "X-Remote-Group",
    "requestheader-username-headers": "X-Remote-User",
    "runtime-config": "authorization.k8s.io/v1beta1=true",
    "secure-port": "6443",
    "service-account-lookup": "true",
    "storage-backend": "etcd3",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
   },
   "kubelet": {
    "address": "0.0.0.0",
    "anonymous-auth": "false",
    "authentication-token-webhook": "true",
    "authorization-mode": "Webhook",
    "cgroups-per-qos": "True",
    "cni-bin-dir": "/opt/cni/bin",
    "cni-conf-dir": "/etc/cni/net.d",
    "enforce-node-allocatable": "",
    "event-qps": "0",
    "make-iptables-util-chains": "true",
    "network-plugin": "cni",
    "read-only-port": "0",
    "resolv-conf": "/etc/resolv.conf",
    "streaming-connection-idle-timeout": "30m",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
    "v": "2",
    "volume-plugin-dir": "/var/lib/kubelet/volumeplugins"
   },
   "kubeproxy": {
    "healthz-bind-address": "127.0.0.1",
    "v": "2"
   },
   "kubeController": {
    "address": "0.0.0.0",
    "allocate-node-cidrs": "true",
    "allow-untagged-cloud": "true",
    "configure-cloud-routes": "false",
    "enable-hostpath-provisioner": "false",
    "leader-elect": "true",
    "node-monitor-grace-period": "40s",
    "pod-eviction-timeout": "5m0s",
    "profiling": "false",
    "terminated-pod-gc-threshold": "1000",
    "v": "2"
   },
   "scheduler": {
    "address": "0.0.0.0",
    "leader-elect": "true",
    "profiling": "false",
    "v": "2"
   }
  },
  "v1.15.10-rancher1-1": {
   "etcd": {
    "client-cert-auth": "true",
    "peer-client-cert-auth": "true"
   },
   "kubeapi": {
    "allow-privileged": "true",
    "anonymous-auth": "false",
    "bind-address": "0.0.0.0",
    "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize",
    "insecure-port": "0",
    "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname",
    "profiling": "false",
    "requestheader-extra-headers-prefix": "X-Remote-Extra-",
    "requestheader-group-headers": "X-Remote-Group",
    "requestheader-username-headers": "X-Remote-User",
    "runtime-config": "authorization.k8s.io/v1beta1=true",
    "secure-port": "6443",
    "service-account-lookup": "true",
    "storage-backend": "etcd3",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
   },
   "kubelet": {
    "address": "0.0.0.0",
    "anonymous-auth": "false",
    "authentication-token-webhook": "true",
    "authorization-mode": "Webhook",
    "cgroups-per-qos": "True",
    "cni-bin-dir": "/opt/cni/bin",
    "cni-conf-dir": "/etc/cni/net.d",
    "enforce-node-allocatable": "",
    "event-qps": "0",
    "make-iptables-util-chains": "true",
    "network-plugin": "cni",
    "read-only-port": "0",
    "resolv-conf": "/etc/resolv.conf",
    "streaming-connection-idle-timeout": "30m",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
    "v": "2",
    "volume-plugin-dir": "/var/lib/kubelet/volumeplugins"
   },
   "kubeproxy": {
    "healthz-bind-address": "127.0.0.1",
    "v": "2"
   },
   "kubeController": {
    "address": "0.0.0.0",
    "allocate-node-cidrs": "true",
    "allow-untagged-cloud": "true",
    "configure-cloud-routes": "false",
    "enable-hostpath-provisioner": "false",
    "leader-elect": "true",
    "node-monitor-grace-period": "40s",
    "pod-eviction-timeout": "5m0s",
    "profiling": "false",
    "terminated-pod-gc-threshold": "1000",
    "v": "2"
   },
   "scheduler": {
    "address": "0.0.0.0",
    "leader-elect": "true",
    "profiling": "false",
    "v": "2"
   }
  },
  "v1.15.6-rancher1-2": {
   "etcd": {
    "client-cert-auth": "true",
    "peer-client-cert-auth": "true"
   },
   "kubeapi": {
    "allow-privileged": "true",
    "anonymous-auth": "false",
    "bind-address": "0.0.0.0",
    "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize",
    "insecure-port": "0",
    "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname",
    "profiling": "false",
    "requestheader-extra-headers-prefix": "X-Remote-Extra-",
    "requestheader-group-headers": "X-Remote-Group",
    "requestheader-username-headers": "X-Remote-User",
    "runtime-config": "authorization.k8s.io/v1beta1=true",
    "secure-port": "6443",
    "service-account-lookup": "true",
    "storage-backend": "etcd3",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
   },
   "kubelet": {
    "address": "0.0.0.0",
    "anonymous-auth": "false",
    "authentication-token-webhook": "true",
    "authorization-mode": "Webhook",
    "cgroups-per-qos": "True",
    "cni-bin-dir": "/opt/cni/bin",
    "cni-conf-dir": "/etc/cni/net.d",
    "enforce-node-allocatable": "",
    "event-qps": "0",
    "make-iptables-util-chains": "true",
    "network-plugin": "cni",
    "read-only-port": "0",
    "resolv-conf": "/etc/resolv.conf",
    "streaming-connection-idle-timeout": "30m",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
    "v": "2",
    "volume-plugin-dir": "/var/lib/kubelet/volumeplugins"
   },
   "kubeproxy": {
    "healthz-bind-address": "127.0.0.1",
    "v": "2"
   },
   "kubeController": {
    "address": "0.0.0.0",
    "allocate-node-cidrs": "true",
    "allow-untagged-cloud": "true",
    "configure-cloud-routes": "false",
    "enable-hostpath-provisioner": "false",
    "leader-elect": "true",
    "node-monitor-grace-period": "40s",
    "pod-eviction-timeout": "5m0s",
    "profiling": "false",
    "terminated-pod-gc-threshold": "1000",
    "v": "2"
   },
   "scheduler": {
    "address": "0.0.0.0",
    "leader-elect": "true",
    "profiling": "false",
    "v": "2"
   }
  },
  "v1.15.7-rancher1-1": {
   "etcd": {
    "client-cert-auth": "true",
    "peer-client-cert-auth": "true"
   },
   "kubeapi": {
    "allow-privileged": "true",
    "anonymous-auth": "false",
    "bind-address": "0.0.0.0",
    "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize",
    "insecure-port": "0",
    "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname",
    "profiling": "false",
    "requestheader-extra-headers-prefix": "X-Remote-Extra-",
    "requestheader-group-headers": "X-Remote-Group",
    "requestheader-username-headers": "X-Remote-User",
    "runtime-config": "authorization.k8s.io/v1beta1=true",
    "secure-port": "6443",
    "service-account-lookup": "true",
    "storage-backend": "etcd3",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
   },
   "kubelet": {
    "address": "0.0.0.0",
    "anonymous-auth": "false",
    "authentication-token-webhook": "true",
    "authorization-mode": "Webhook",
    "cgroups-per-qos": "True",
    "cni-bin-dir": "/opt/cni/bin",
    "cni-conf-dir": "/etc/cni/net.d",
    "enforce-node-allocatable": "",
    "event-qps": "0",
    "make-iptables-util-chains": "true",
    "network-plugin": "cni",
    "read-only-port": "0",
    "resolv-conf": "/etc/resolv.conf",
    "streaming-connection-idle-timeout": "30m",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
    "v": "2",
    "volume-plugin-dir": "/var/lib/kubelet/volumeplugins"
   },
   "kubeproxy": {
    "healthz-bind-address": "127.0.0.1",
    "v": "2"
   },
   "kubeController": {
    "address": "0.0.0.0",
    "allocate-node-cidrs": "true",
    "allow-untagged-cloud": "true",
    "configure-cloud-routes": "false",
    "enable-hostpath-provisioner": "false",
    "leader-elect": "true",
    "node-monitor-grace-period": "40s",
    "pod-eviction-timeout": "5m0s",
    "profiling": "false",
    "terminated-pod-gc-threshold": "1000",
    "v": "2"
   },
   "scheduler": {
    "address": "0.0.0.0",
    "leader-elect": "true",
    "profiling": "false",
    "v": "2"
   }
  },
  "v1.15.9-rancher1-1": {
   "etcd": {
    "client-cert-auth": "true",
    "peer-client-cert-auth": "true"
   },
   "kubeapi": {
    "allow-privileged": "true",
    "anonymous-auth": "false",
    "bind-address": "0.0.0.0",
    "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize",
    "insecure-port": "0",
    "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname",
    "profiling": "false",
    "requestheader-extra-headers-prefix": "X-Remote-Extra-",
    "requestheader-group-headers": "X-Remote-Group",
    "requestheader-username-headers": "X-Remote-User",
    "runtime-config": "authorization.k8s.io/v1beta1=true",
    "secure-port": "6443",
    "service-account-lookup": "true",
    "storage-backend": "etcd3",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
   },
   "kubelet": {
    "address": "0.0.0.0",
    "anonymous-auth": "false",
    "authentication-token-webhook": "true",
    "authorization-mode": "Webhook",
    "cgroups-per-qos": "True",
    "cni-bin-dir": "/opt/cni/bin",
    "cni-conf-dir": "/etc/cni/net.d",
    "enforce-node-allocatable": "",
    "event-qps": "0",
    "make-iptables-util-chains": "true",
    "network-plugin": "cni",
    "read-only-port": "0",
    "resolv-conf": "/etc/resolv.conf",
    "streaming-connection-idle-timeout": "30m",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
    "v": "2",
    "volume-plugin-dir": "/var/lib/kubelet/volumeplugins"
   },
   "kubeproxy": {
    "healthz-bind-address": "127.0.0.1",
    "v": "2"
   },
   "kubeController": {
    "address": "0.0.0.0",
    "allocate-node-cidrs": "true",
    "allow-untagged-cloud": "true",
    "configure-cloud-routes": "false",
    "enable-hostpath-provisioner": "false",
    "leader-elect": "true",
    "node-monitor-grace-period": "40s",
    "pod-eviction-timeout": "5m0s",
    "profiling": "false",
    "terminated-pod-gc-threshold": "1000",
    "v": "2"
   },
   "scheduler": {
    "address": "0.0.0.0",
    "leader-elect": "true",
    "profiling": "false",
    "v": "2"
   }
  },
  "v1.16": {
   "etcd": null,
   "kubeapi": {
    "allow-privileged": "true",
    "anonymous-auth": "false",
    "bind-address": "0.0.0.0",
    "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize",
    "insecure-port": "0",
    "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname",
    "profiling": "false",
    "requestheader-extra-headers-prefix": "X-Remote-Extra-",
    "requestheader-group-headers": "X-Remote-Group",
    "requestheader-username-headers": "X-Remote-User",
    "runtime-config": "authorization.k8s.io/v1beta1=true",
    "secure-port": "6443",
    "service-account-lookup": "true",
    "storage-backend": "etcd3",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
   },
   "kubelet": {
    "address": "0.0.0.0",
    "anonymous-auth": "false",
    "authentication-token-webhook": "true",
    "authorization-mode": "Webhook",
    "cgroups-per-qos": "True",
    "cni-bin-dir": "/opt/cni/bin",
    "cni-conf-dir": "/etc/cni/net.d",
    "enforce-node-allocatable": "",
    "event-qps": "0",
    "make-iptables-util-chains": "true",
    "network-plugin": "cni",
    "read-only-port": "0",
    "resolv-conf": "/etc/resolv.conf",
    "streaming-connection-idle-timeout": "30m",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
    "v": "2",
    "volume-plugin-dir": "/var/lib/kubelet/volumeplugins"
   },
   "kubeproxy": {
    "healthz-bind-address": "127.0.0.1",
    "v": "2"
   },
   "kubeController": {
    "address": "0.0.0.0",
    "allocate-node-cidrs": "true",
    "allow-untagged-cloud": "true",
    "configure-cloud-routes": "false",
    "enable-hostpath-provisioner": "false",
    "leader-elect": "true",
    "node-monitor-grace-period": "40s",
    "pod-eviction-timeout": "5m0s",
    "profiling": "false",
    "terminated-pod-gc-threshold": "1000",
    "v": "2"
   },
   "scheduler": {
    "address": "0.0.0.0",
    "leader-elect": "true",
    "profiling": "false",
    "v": "2"
   }
  },
  "v1.16.3-rancher1-1": {
   "etcd": {
    "client-cert-auth": "true",
    "peer-client-cert-auth": "true"
   },
   "kubeapi": {
    "allow-privileged": "true",
    "anonymous-auth": "false",
    "bind-address": "0.0.0.0",
    "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize",
    "insecure-port": "0",
    "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname",
    "profiling": "false",
    "requestheader-extra-headers-prefix": "X-Remote-Extra-",
    "requestheader-group-headers": "X-Remote-Group",
    "requestheader-username-headers": "X-Remote-User",
    "runtime-config": "authorization.k8s.io/v1beta1=true",
    "secure-port": "6443",
    "service-account-lookup": "true",
    "storage-backend": "etcd3",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
   },
   "kubelet": {
    "address": "0.0.0.0",
    "anonymous-auth": "false",
    "authentication-token-webhook": "true",
    "authorization-mode": "Webhook",
    "cgroups-per-qos": "True",
    "cni-bin-dir": "/opt/cni/bin",
    "cni-conf-dir": "/etc/cni/net.d",
    "enforce-node-allocatable": "",
    "event-qps": "0",
    "make-iptables-util-chains": "true",
    "network-plugin": "cni",
    "read-only-port": "0",
    "resolv-conf": "/etc/resolv.conf",
    "streaming-connection-idle-timeout": "30m",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
    "v": "2",
    "volume-plugin-dir": "/var/lib/kubelet/volumeplugins"
   },
   "kubeproxy": {
    "healthz-bind-address": "127.0.0.1",
    "v": "2"
   },
   "kubeController": {
    "address": "0.0.0.0",
    "allocate-node-cidrs": "true",
    "allow-untagged-cloud": "true",
    "configure-cloud-routes": "false",
    "enable-hostpath-provisioner": "false",
    "leader-elect": "true",
    "node-monitor-grace-period": "40s",
    "pod-eviction-timeout": "5m0s",
    "profiling": "false",
    "terminated-pod-gc-threshold": "1000",
    "v": "2"
   },
   "scheduler": {
    "address": "0.0.0.0",
    "leader-elect": "true",
    "profiling": "false",
    "v": "2"
   }
  },
  "v1.16.4-rancher1-1": {
   "etcd": {
    "client-cert-auth": "true",
    "peer-client-cert-auth": "true"
   },
   "kubeapi": {
    "allow-privileged": "true",
    "anonymous-auth": "false",
    "bind-address": "0.0.0.0",
    "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize",
    "insecure-port": "0",
    "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname",
    "profiling": "false",
    "requestheader-extra-headers-prefix": "X-Remote-Extra-",
    "requestheader-group-headers": "X-Remote-Group",
    "requestheader-username-headers": "X-Remote-User",
    "runtime-config": "authorization.k8s.io/v1beta1=true",
    "secure-port": "6443",
    "service-account-lookup": "true",
    "storage-backend": "etcd3",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
   },
   "kubelet": {
    "address": "0.0.0.0",
    "anonymous-auth": "false",
    "authentication-token-webhook": "true",
    "authorization-mode": "Webhook",
    "cgroups-per-qos": "True",
    "cni-bin-dir": "/opt/cni/bin",
    "cni-conf-dir": "/etc/cni/net.d",
    "enforce-node-allocatable": "",
    "event-qps": "0",
    "make-iptables-util-chains": "true",
    "network-plugin": "cni",
    "read-only-port": "0",
    "resolv-conf": "/etc/resolv.conf",
    "streaming-connection-idle-timeout": "30m",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
    "v": "2",
    "volume-plugin-dir": "/var/lib/kubelet/volumeplugins"
   },
   "kubeproxy": {
    "healthz-bind-address": "127.0.0.1",
    "v": "2"
   },
   "kubeController": {
    "address": "0.0.0.0",
    "allocate-node-cidrs": "true",
    "allow-untagged-cloud": "true",
    "configure-cloud-routes": "false",
    "enable-hostpath-provisioner": "false",
    "leader-elect": "true",
    "node-monitor-grace-period": "40s",
    "pod-eviction-timeout": "5m0s",
    "profiling": "false",
    "terminated-pod-gc-threshold": "1000",
    "v": "2"
   },
   "scheduler": {
    "address": "0.0.0.0",
    "leader-elect": "true",
    "profiling": "false",
    "v": "2"
   }
  },
  "v1.16.6-rancher1-1": {
   "etcd": {
    "client-cert-auth": "true",
    "peer-client-cert-auth": "true"
   },
   "kubeapi": {
    "allow-privileged": "true",
    "anonymous-auth": "false",
    "bind-address": "0.0.0.0",
    "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize",
    "insecure-port": "0",
    "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname",
    "profiling": "false",
    "requestheader-extra-headers-prefix": "X-Remote-Extra-",
    "requestheader-group-headers": "X-Remote-Group",
    "requestheader-username-headers": "X-Remote-User",
    "runtime-config": "authorization.k8s.io/v1beta1=true",
    "secure-port": "6443",
    "service-account-lookup": "true",
    "storage-backend": "etcd3",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
   },
   "kubelet": {
    "address": "0.0.0.0",
    "anonymous-auth": "false",
    "authentication-token-webhook": "true",
    "authorization-mode": "Webhook",
    "cgroups-per-qos": "True",
    "cni-bin-dir": "/opt/cni/bin",
    "cni-conf-dir": "/etc/cni/net.d",
    "enforce-node-allocatable": "",
    "event-qps": "0",
    "make-iptables-util-chains": "true",
    "network-plugin": "cni",
    "read-only-port": "0",
    "resolv-conf": "/etc/resolv.conf",
    "streaming-connection-idle-timeout": "30m",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
    "v": "2",
    "volume-plugin-dir": "/var/lib/kubelet/volumeplugins"
   },
   "kubeproxy": {
    "healthz-bind-address": "127.0.0.1",
    "v": "2"
   },
   "kubeController": {
    "address": "0.0.0.0",
    "allocate-node-cidrs": "true",
    "allow-untagged-cloud": "true",
    "configure-cloud-routes": "false",
    "enable-hostpath-provisioner": "false",
    "leader-elect": "true",
    "node-monitor-grace-period": "40s",
    "pod-eviction-timeout": "5m0s",
    "profiling": "false",
    "terminated-pod-gc-threshold": "1000",
    "v": "2"
   },
   "scheduler": {
    "address": "0.0.0.0",
    "leader-elect": "true",
    "profiling": "false",
    "v": "2"
   }
  },
  "v1.16.6-rancher1-2": {
   "etcd": {
    "client-cert-auth": "true",
    "peer-client-cert-auth": "true"
   },
   "kubeapi": {
    "allow-privileged": "true",
    "anonymous-auth": "false",
    "bind-address": "0.0.0.0",
    "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize",
    "insecure-port": "0",
    "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname",
    "profiling": "false",
    "requestheader-extra-headers-prefix": "X-Remote-Extra-",
    "requestheader-group-headers": "X-Remote-Group",
    "requestheader-username-headers": "X-Remote-User",
    "runtime-config": "authorization.k8s.io/v1beta1=true",
    "secure-port": "6443",
    "service-account-lookup": "true",
    "storage-backend": "etcd3",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
   },
   "kubelet": {
    "address": "0.0.0.0",
    "anonymous-auth": "false",
    "authentication-token-webhook": "true",
    "authorization-mode": "Webhook",
    "cgroups-per-qos": "True",
    "cni-bin-dir": "/opt/cni/bin",
    "cni-conf-dir": "/etc/cni/net.d",
    "enforce-node-allocatable": "",
    "event-qps": "0",
    "make-iptables-util-chains": "true",
    "network-plugin": "cni",
    "read-only-port": "0",
    "resolv-conf": "/etc/resolv.conf",
    "streaming-connection-idle-timeout": "30m",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
    "v": "2",
    "volume-plugin-dir": "/var/lib/kubelet/volumeplugins"
   },
   "kubeproxy": {
    "healthz-bind-address": "127.0.0.1",
    "v": "2"
   },
   "kubeController": {
    "address": "0.0.0.0",
    "allocate-node-cidrs": "true",
    "allow-untagged-cloud": "true",
    "configure-cloud-routes": "false",
    "enable-hostpath-provisioner": "false",
    "leader-elect": "true",
    "node-monitor-grace-period": "40s",
    "pod-eviction-timeout": "5m0s",
    "profiling": "false",
    "terminated-pod-gc-threshold": "1000",
    "v": "2"
   },
   "scheduler": {
    "address": "0.0.0.0",
    "leader-elect": "true",
    "profiling": "false",
    "v": "2"
   }
  },
  "v1.16.7-rancher1-1": {
   "etcd": {
    "client-cert-auth": "true",
    "peer-client-cert-auth": "true"
   },
   "kubeapi": {
    "allow-privileged": "true",
    "anonymous-auth": "false",
    "bind-address": "0.0.0.0",
    "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize",
    "insecure-port": "0",
    "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname",
    "profiling": "false",
    "requestheader-extra-headers-prefix": "X-Remote-Extra-",
    "requestheader-group-headers": "X-Remote-Group",
    "requestheader-username-headers": "X-Remote-User",
    "runtime-config": "authorization.k8s.io/v1beta1=true",
    "secure-port": "6443",
    "service-account-lookup": "true",
    "storage-backend": "etcd3",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
   },
   "kubelet": {
    "address": "0.0.0.0",
    "anonymous-auth": "false",
    "authentication-token-webhook": "true",
    "authorization-mode": "Webhook",
    "cgroups-per-qos": "True",
    "cni-bin-dir": "/opt/cni/bin",
    "cni-conf-dir": "/etc/cni/net.d",
    "enforce-node-allocatable": "",
    "event-qps": "0",
    "make-iptables-util-chains": "true",
    "network-plugin": "cni",
    "read-only-port": "0",
    "resolv-conf": "/etc/resolv.conf",
    "streaming-connection-idle-timeout": "30m",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
    "v": "2",
    "volume-plugin-dir": "/var/lib/kubelet/volumeplugins"
   },
   "kubeproxy": {
    "healthz-bind-address": "127.0.0.1",
    "v": "2"
   },
   "kubeController": {
    "address": "0.0.0.0",
    "allocate-node-cidrs": "true",
    "allow-untagged-cloud": "true",
    "configure-cloud-routes": "false",
    "enable-hostpath-provisioner": "false",
    "leader-elect": "true",
    "node-monitor-grace-period": "40s",
    "pod-eviction-timeout": "5m0s",
    "profiling": "false",
    "terminated-pod-gc-threshold": "1000",
    "v": "2"
   },
   "scheduler": {
    "address": "0.0.0.0",
    "leader-elect": "true",
    "profiling": "false",
    "v": "2"
   }
  },
  "v1.17": {
   "etcd": {
    "client-cert-auth": "true",
    "enable-v2": "true",
    "peer-client-cert-auth": "true"
   },
   "kubeapi": {
    "allow-privileged": "true",
    "anonymous-auth": "false",
    "bind-address": "0.0.0.0",
    "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize",
    "insecure-port": "0",
    "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname",
    "profiling": "false",
    "requestheader-extra-headers-prefix": "X-Remote-Extra-",
    "requestheader-group-headers": "X-Remote-Group",
    "requestheader-username-headers": "X-Remote-User",
    "runtime-config": "authorization.k8s.io/v1beta1=true",
    "secure-port": "6443",
    "service-account-lookup": "true",
    "storage-backend": "etcd3",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
   },
   "kubelet": {
    "address": "0.0.0.0",
    "anonymous-auth": "false",
    "authentication-token-webhook": "true",
    "authorization-mode": "Webhook",
    "cgroups-per-qos": "True",
    "cni-bin-dir": "/opt/cni/bin",
    "cni-conf-dir": "/etc/cni/net.d",
    "enforce-node-allocatable": "",
    "event-qps": "0",
    "make-iptables-util-chains": "true",
    "network-plugin": "cni",
    "read-only-port": "0",
    "resolv-conf": "/etc/resolv.conf",
    "streaming-connection-idle-timeout": "30m",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
    "v": "2",
    "volume-plugin-dir": "/var/lib/kubelet/volumeplugins"
   },
   "kubeproxy": {
    "healthz-bind-address": "127.0.0.1",
    "v": "2"
   },
   "kubeController": {
    "address": "0.0.0.0",
    "allocate-node-cidrs": "true",
    "allow-untagged-cloud": "true",
    "configure-cloud-routes": "false",
    "enable-hostpath-provisioner": "false",
    "leader-elect": "true",
    "node-monitor-grace-period": "40s",
    "pod-eviction-timeout": "5m0s",
    "profiling": "false",
    "terminated-pod-gc-threshold": "1000",
    "v": "2"
   },
   "scheduler": {
    "address": "0.0.0.0",
    "leader-elect": "true",
    "profiling": "false",
    "v": "2"
   }
  },
  "v1.9": {
   "etcd": null,
   "kubeapi": {
    "admission-control": "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,NodeRestriction",
    "allow-privileged": "true",
    "anonymous-auth": "false",
    "bind-address": "0.0.0.0",
    "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction",
    "insecure-port": "0",
    "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname",
    "profiling": "false",
    "requestheader-extra-headers-prefix": "X-Remote-Extra-",
    "requestheader-group-headers": "X-Remote-Group",
    "requestheader-username-headers": "X-Remote-User",
    "runtime-config": "authorization.k8s.io/v1beta1=true",
    "secure-port": "6443",
    "service-account-lookup": "true",
    "storage-backend": "etcd3",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
   },
   "kubelet": {
    "address": "0.0.0.0",
    "allow-privileged": "true",
    "anonymous-auth": "false",
    "authentication-token-webhook": "true",
    "authorization-mode": "Webhook",
    "cgroups-per-qos": "True",
    "cni-bin-dir": "/opt/cni/bin",
    "cni-conf-dir": "/etc/cni/net.d",
    "enforce-node-allocatable": "",
    "event-qps": "0",
    "make-iptables-util-chains": "true",
    "network-plugin": "cni",
    "read-only-port": "0",
    "resolv-conf": "/etc/resolv.conf",
    "streaming-connection-idle-timeout": "30m",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
    "v": "2",
    "volume-plugin-dir": "/var/lib/kubelet/volumeplugins"
   },
   "kubeproxy": {
    "healthz-bind-address": "127.0.0.1",
    "v": "2"
   },
   "kubeController": {
    "address": "0.0.0.0",
    "allocate-node-cidrs": "true",
    "allow-untagged-cloud": "true",
    "configure-cloud-routes": "false",
    "enable-hostpath-provisioner": "false",
    "leader-elect": "true",
    "node-monitor-grace-period": "40s",
    "pod-eviction-timeout": "5m0s",
    "profiling": "false",
    "terminated-pod-gc-threshold": "1000",
    "v": "2"
   },
   "scheduler": {
    "address": "0.0.0.0",
    "leader-elect": "true",
    "profiling": "false",
    "v": "2"
   }
  }
 },
 "K8sVersionRKESystemImages": {
  "v1.10.0-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.1.12",
   "alpine": "rancher/rke-tools:v0.1.4",
   "nginxProxy": "rancher/rke-tools:v0.1.4",
   "certDownloader": "rancher/rke-tools:v0.1.4",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.4",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.8",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.8",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.8",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.10.0-rancher1",
   "flannel": "rancher/coreos-flannel:v0.9.1",
   "flannelCni": "rancher/coreos-flannel-cni:v0.2.0",
   "calicoNode": "rancher/calico-node:v3.1.1",
   "calicoCni": "rancher/calico-cni:v3.1.1",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.1",
   "canalCni": "rancher/calico-cni:v3.1.1",
   "canalFlannel": "rancher/coreos-flannel:v0.9.1",
   "weaveNode": "weaveworks/weave-kube:2.1.2",
   "weaveCni": "weaveworks/weave-npc:2.1.2",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.10.2-rancher3",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4"
  },
  "v1.10.1-rancher2-1": {
   "etcd": "rancher/coreos-etcd:v3.1.12",
   "alpine": "rancher/rke-tools:v0.1.8",
   "nginxProxy": "rancher/rke-tools:v0.1.8",
   "certDownloader": "rancher/rke-tools:v0.1.8",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.8",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.8",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.8",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.8",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.10.1-rancher2",
   "flannel": "rancher/coreos-flannel:v0.9.1",
   "flannelCni": "rancher/coreos-flannel-cni:v0.2.0",
   "calicoNode": "rancher/calico-node:v3.1.1",
   "calicoCni": "rancher/calico-cni:v3.1.1",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.1",
   "canalCni": "rancher/calico-cni:v3.1.1",
   "canalFlannel": "rancher/coreos-flannel:v0.9.1",
   "weaveNode": "weaveworks/weave-kube:2.1.2",
   "weaveCni": "weaveworks/weave-npc:2.1.2",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.10.2-rancher3",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4"
  },
  "v1.10.11-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.1.12",
   "alpine": "rancher/rke-tools:v0.1.13",
   "nginxProxy": "rancher/rke-tools:v0.1.13",
   "certDownloader": "rancher/rke-tools:v0.1.13",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.13",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.8",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.8",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.8",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.10.11-rancher1",
   "flannel": "rancher/coreos-flannel:v0.9.1",
   "flannelCni": "rancher/coreos-flannel-cni:v0.2.0",
   "calicoNode": "rancher/calico-node:v3.1.1",
   "calicoCni": "rancher/calico-cni:v3.1.1",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.1",
   "canalCni": "rancher/calico-cni:v3.1.1",
   "canalFlannel": "rancher/coreos-flannel:v0.9.1",
   "weaveNode": "weaveworks/weave-kube:2.1.2",
   "weaveCni": "weaveworks/weave-npc:2.1.2",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4",
   "metricsServer": "rancher/metrics-server-amd64:v0.2.1"
  },
  "v1.10.12-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.1.12",
   "alpine": "rancher/rke-tools:v0.1.13",
   "nginxProxy": "rancher/rke-tools:v0.1.13",
   "certDownloader": "rancher/rke-tools:v0.1.13",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.13",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.8",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.8",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.8",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.10.12-rancher1",
   "flannel": "rancher/coreos-flannel:v0.9.1",
   "flannelCni": "rancher/coreos-flannel-cni:v0.2.0",
   "calicoNode": "rancher/calico-node:v3.1.1",
   "calicoCni": "rancher/calico-cni:v3.1.1",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.1",
   "canalCni": "rancher/calico-cni:v3.1.1",
   "canalFlannel": "rancher/coreos-flannel:v0.9.1",
   "weaveNode": "weaveworks/weave-kube:2.1.2",
   "weaveCni": "weaveworks/weave-npc:2.1.2",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4",
   "metricsServer": "rancher/metrics-server-amd64:v0.2.1"
  },
  "v1.10.3-rancher2-1": {
   "etcd": "rancher/coreos-etcd:v3.1.12",
   "alpine": "rancher/rke-tools:v0.1.10",
   "nginxProxy": "rancher/rke-tools:v0.1.10",
   "certDownloader": "rancher/rke-tools:v0.1.10",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.10",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.8",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.8",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.8",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.10.3-rancher2",
   "flannel": "rancher/coreos-flannel:v0.9.1",
   "flannelCni": "rancher/coreos-flannel-cni:v0.2.0",
   "calicoNode": "rancher/calico-node:v3.1.1",
   "calicoCni": "rancher/calico-cni:v3.1.1",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.1",
   "canalCni": "rancher/calico-cni:v3.1.1",
   "canalFlannel": "rancher/coreos-flannel:v0.9.1",
   "weaveNode": "weaveworks/weave-kube:2.1.2",
   "weaveCni": "weaveworks/weave-npc:2.1.2",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.10.2-rancher3",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4"
  },
  "v1.10.5-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.1.12",
   "alpine": "rancher/rke-tools:v0.1.10",
   "nginxProxy": "rancher/rke-tools:v0.1.10",
   "certDownloader": "rancher/rke-tools:v0.1.10",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.10",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.8",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.8",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.8",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.10.5-rancher1",
   "flannel": "rancher/coreos-flannel:v0.9.1",
   "flannelCni": "rancher/coreos-flannel-cni:v0.2.0",
   "calicoNode": "rancher/calico-node:v3.1.1",
   "calicoCni": "rancher/calico-cni:v3.1.1",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.1",
   "canalCni": "rancher/calico-cni:v3.1.1",
   "canalFlannel": "rancher/coreos-flannel:v0.9.1",
   "weaveNode": "weaveworks/weave-kube:2.1.2",
   "weaveCni": "weaveworks/weave-npc:2.1.2",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.10.2-rancher3",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4"
  },
  "v1.10.5-rancher1-2": {
   "etcd": "rancher/coreos-etcd:v3.1.12",
   "alpine": "rancher/rke-tools:v0.1.13",
   "nginxProxy": "rancher/rke-tools:v0.1.13",
   "certDownloader": "rancher/rke-tools:v0.1.13",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.13",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.8",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.8",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.8",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.10.5-rancher1",
   "flannel": "rancher/coreos-flannel:v0.9.1",
   "flannelCni": "rancher/coreos-flannel-cni:v0.2.0",
   "calicoNode": "rancher/calico-node:v3.1.1",
   "calicoCni": "rancher/calico-cni:v3.1.1",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.1",
   "canalCni": "rancher/calico-cni:v3.1.1",
   "canalFlannel": "rancher/coreos-flannel:v0.9.1",
   "weaveNode": "weaveworks/weave-kube:2.1.2",
   "weaveCni": "weaveworks/weave-npc:2.1.2",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4",
   "metricsServer": "rancher/metrics-server-amd64:v0.2.1"
  },
  "v1.11.1-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.2.18",
   "alpine": "rancher/rke-tools:v0.1.13",
   "nginxProxy": "rancher/rke-tools:v0.1.13",
   "certDownloader": "rancher/rke-tools:v0.1.13",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.13",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.10",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.10",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.10",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.11.1-rancher1",
   "flannel": "rancher/coreos-flannel:v0.9.1",
   "flannelCni": "rancher/coreos-flannel-cni:v0.2.0",
   "calicoNode": "rancher/calico-node:v3.1.1",
   "calicoCni": "rancher/calico-cni:v3.1.1",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.1",
   "canalCni": "rancher/calico-cni:v3.1.1",
   "canalFlannel": "rancher/coreos-flannel:v0.9.1",
   "weaveNode": "weaveworks/weave-kube:2.1.2",
   "weaveCni": "weaveworks/weave-npc:2.1.2",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4",
   "metricsServer": "rancher/metrics-server-amd64:v0.2.1"
  },
  "v1.11.2-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.2.18",
   "alpine": "rancher/rke-tools:v0.1.13",
   "nginxProxy": "rancher/rke-tools:v0.1.13",
   "certDownloader": "rancher/rke-tools:v0.1.13",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.13",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.10",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.10",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.10",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.11.2-rancher1",
   "flannel": "rancher/coreos-flannel:v0.9.1",
   "flannelCni": "rancher/coreos-flannel-cni:v0.2.0",
   "calicoNode": "rancher/calico-node:v3.1.1",
   "calicoCni": "rancher/calico-cni:v3.1.1",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.1",
   "canalCni": "rancher/calico-cni:v3.1.1",
   "canalFlannel": "rancher/coreos-flannel:v0.9.1",
   "weaveNode": "weaveworks/weave-kube:2.1.2",
   "weaveCni": "weaveworks/weave-npc:2.1.2",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4",
   "metricsServer": "rancher/metrics-server-amd64:v0.2.1"
  },
  "v1.11.2-rancher1-2": {
   "etcd": "rancher/coreos-etcd:v3.2.18",
   "alpine": "rancher/rke-tools:v0.1.16",
   "nginxProxy": "rancher/rke-tools:v0.1.16",
   "certDownloader": "rancher/rke-tools:v0.1.16",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.16",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.10",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.10",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.10",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.11.2-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0",
   "flannelCni": "rancher/coreos-flannel-cni:v0.3.0",
   "calicoNode": "rancher/calico-node:v3.1.3",
   "calicoCni": "rancher/calico-cni:v3.1.3",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.3",
   "canalCni": "rancher/calico-cni:v3.1.3",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.1.2",
   "weaveCni": "weaveworks/weave-npc:2.1.2",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4",
   "metricsServer": "rancher/metrics-server-amd64:v0.2.1"
  },
  "v1.11.3-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.2.18",
   "alpine": "rancher/rke-tools:v0.1.16",
   "nginxProxy": "rancher/rke-tools:v0.1.16",
   "certDownloader": "rancher/rke-tools:v0.1.16",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.16",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.10",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.10",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.10",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.11.3-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0",
   "flannelCni": "rancher/coreos-flannel-cni:v0.3.0",
   "calicoNode": "rancher/calico-node:v3.1.3",
   "calicoCni": "rancher/calico-cni:v3.1.3",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.3",
   "canalCni": "rancher/calico-cni:v3.1.3",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.1.2",
   "weaveCni": "weaveworks/weave-npc:2.1.2",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4",
   "metricsServer": "rancher/metrics-server-amd64:v0.2.1"
  },
  "v1.11.5-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.2.18",
   "alpine": "rancher/rke-tools:v0.1.16",
   "nginxProxy": "rancher/rke-tools:v0.1.16",
   "certDownloader": "rancher/rke-tools:v0.1.16",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.16",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.10",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.10",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.10",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.11.5-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0",
   "flannelCni": "rancher/coreos-flannel-cni:v0.3.0",
   "calicoNode": "rancher/calico-node:v3.1.3",
   "calicoCni": "rancher/calico-cni:v3.1.3",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.3",
   "canalCni": "rancher/calico-cni:v3.1.3",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.1.2",
   "weaveCni": "weaveworks/weave-npc:2.1.2",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4",
   "metricsServer": "rancher/metrics-server-amd64:v0.2.1"
  },
  "v1.11.6-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.2.18",
   "alpine": "rancher/rke-tools:v0.1.15",
   "nginxProxy": "rancher/rke-tools:v0.1.15",
   "certDownloader": "rancher/rke-tools:v0.1.15",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.15",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.10",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.10",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.10",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.11.6-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0",
   "flannelCni": "rancher/coreos-flannel-cni:v0.3.0",
   "calicoNode": "rancher/calico-node:v3.1.3",
   "calicoCni": "rancher/calico-cni:v3.1.3",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.3",
   "canalCni": "rancher/calico-cni:v3.1.3",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.1.2",
   "weaveCni": "weaveworks/weave-npc:2.1.2",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4",
   "metricsServer": "rancher/metrics-server-amd64:v0.2.1"
  },
  "v1.11.8-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.2.18",
   "alpine": "rancher/rke-tools:v0.1.15",
   "nginxProxy": "rancher/rke-tools:v0.1.15",
   "certDownloader": "rancher/rke-tools:v0.1.15",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.15",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.10",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.10",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.10",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.11.8-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0",
   "flannelCni": "rancher/coreos-flannel-cni:v0.3.0",
   "calicoNode": "rancher/calico-node:v3.1.3",
   "calicoCni": "rancher/calico-cni:v3.1.3",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.3",
   "canalCni": "rancher/calico-cni:v3.1.3",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.1.2",
   "weaveCni": "weaveworks/weave-npc:2.1.2",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4",
   "metricsServer": "rancher/metrics-server-amd64:v0.2.1"
  },
  "v1.11.9-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.2.18",
   "alpine": "rancher/rke-tools:v0.1.15",
   "nginxProxy": "rancher/rke-tools:v0.1.15",
   "certDownloader": "rancher/rke-tools:v0.1.15",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.15",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.10",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.10",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.10",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.11.9-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0",
   "flannelCni": "rancher/coreos-flannel-cni:v0.3.0",
   "calicoNode": "rancher/calico-node:v3.1.3",
   "calicoCni": "rancher/calico-cni:v3.1.3",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.3",
   "canalCni": "rancher/calico-cni:v3.1.3",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.1.2",
   "weaveCni": "weaveworks/weave-npc:2.1.2",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4",
   "metricsServer": "rancher/metrics-server-amd64:v0.2.1"
  },
  "v1.11.9-rancher1-2": {
   "etcd": "rancher/coreos-etcd:v3.2.18",
   "alpine": "rancher/rke-tools:v0.1.28",
   "nginxProxy": "rancher/rke-tools:v0.1.28",
   "certDownloader": "rancher/rke-tools:v0.1.28",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.28",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.10",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.10",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.10",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.11.9-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0",
   "flannelCni": "rancher/coreos-flannel-cni:v0.3.0",
   "calicoNode": "rancher/calico-node:v3.1.3",
   "calicoCni": "rancher/calico-cni:v3.1.3",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.3",
   "canalCni": "rancher/calico-cni:v3.1.3",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.1.2",
   "weaveCni": "weaveworks/weave-npc:2.1.2",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4",
   "metricsServer": "rancher/metrics-server-amd64:v0.2.1"
  },
  "v1.11.9-rancher1-3": {
   "etcd": "rancher/coreos-etcd:v3.2.18",
   "alpine": "rancher/rke-tools:v0.1.16-2",
   "nginxProxy": "rancher/rke-tools:v0.1.16-2",
   "certDownloader": "rancher/rke-tools:v0.1.16-2",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.16-2",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.10",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.10",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.10",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.11.9-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0",
   "flannelCni": "rancher/coreos-flannel-cni:v0.3.0",
   "calicoNode": "rancher/calico-node:v3.1.3",
   "calicoCni": "rancher/calico-cni:v3.1.3",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.3",
   "canalCni": "rancher/calico-cni:v3.1.3",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.1.2",
   "weaveCni": "weaveworks/weave-npc:2.1.2",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4",
   "metricsServer": "rancher/metrics-server-amd64:v0.2.1"
  },
  "v1.12.0-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.2.24",
   "alpine": "rancher/rke-tools:v0.1.16",
   "nginxProxy": "rancher/rke-tools:v0.1.16",
   "certDownloader": "rancher/rke-tools:v0.1.16",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.16",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.13",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.13",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.13",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "coredns": "coredns/coredns:1.2.2",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.12.0-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0",
   "flannelCni": "rancher/coreos-flannel-cni:v0.3.0",
   "calicoNode": "rancher/calico-node:v3.1.3",
   "calicoCni": "rancher/calico-cni:v3.1.3",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.3",
   "canalCni": "rancher/calico-cni:v3.1.3",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.1.2",
   "weaveCni": "weaveworks/weave-npc:2.1.2",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4",
   "metricsServer": "rancher/metrics-server-amd64:v0.3.1"
  },
  "v1.12.1-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.2.24",
   "alpine": "rancher/rke-tools:v0.1.16",
   "nginxProxy": "rancher/rke-tools:v0.1.16",
   "certDownloader": "rancher/rke-tools:v0.1.16",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.16",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.13",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.13",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.13",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "coredns": "coredns/coredns:1.2.2",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.12.1-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0",
   "flannelCni": "rancher/coreos-flannel-cni:v0.3.0",
   "calicoNode": "rancher/calico-node:v3.1.3",
   "calicoCni": "rancher/calico-cni:v3.1.3",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.3",
   "canalCni": "rancher/calico-cni:v3.1.3",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.1.2",
   "weaveCni": "weaveworks/weave-npc:2.1.2",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4",
   "metricsServer": "rancher/metrics-server-amd64:v0.3.1"
  },
  "v1.12.10-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.2.24-rancher1",
   "alpine": "rancher/rke-tools:v0.1.40",
   "nginxProxy": "rancher/rke-tools:v0.1.40",
   "certDownloader": "rancher/rke-tools:v0.1.40",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.40",
   "kubedns": "rancher/k8s-dns-kube-dns:1.14.13",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.14.13",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.14.13",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0",
   "coredns": "rancher/coredns-coredns:1.2.2",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.12.10-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1",
   "calicoNode": "rancher/calico-node:v3.1.3",
   "calicoCni": "rancher/calico-cni:v3.1.3",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.3",
   "canalCni": "rancher/calico-cni:v3.1.3",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.1"
  },
  "v1.12.10-rancher1-2": {
   "etcd": "rancher/coreos-etcd:v3.2.24-rancher1",
   "alpine": "rancher/rke-tools:v0.1.42",
   "nginxProxy": "rancher/rke-tools:v0.1.42",
   "certDownloader": "rancher/rke-tools:v0.1.42",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.42",
   "kubedns": "rancher/k8s-dns-kube-dns:1.14.13",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.14.13",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.14.13",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0",
   "coredns": "rancher/coredns-coredns:1.2.2",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.12.10-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1",
   "calicoNode": "rancher/calico-node:v3.1.3",
   "calicoCni": "rancher/calico-cni:v3.1.3",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.3",
   "canalCni": "rancher/calico-cni:v3.1.3",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.1"
  },
  "v1.12.3-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.2.24",
   "alpine": "rancher/rke-tools:v0.1.18",
   "nginxProxy": "rancher/rke-tools:v0.1.18",
   "certDownloader": "rancher/rke-tools:v0.1.18",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.18",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.13",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.13",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.13",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "coredns": "coredns/coredns:1.2.2",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.12.3-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0",
   "flannelCni": "rancher/coreos-flannel-cni:v0.3.0",
   "calicoNode": "rancher/calico-node:v3.1.3",
   "calicoCni": "rancher/calico-cni:v3.1.3",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.3",
   "canalCni": "rancher/calico-cni:v3.1.3",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.1.2",
   "weaveCni": "weaveworks/weave-npc:2.1.2",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4",
   "metricsServer": "rancher/metrics-server-amd64:v0.3.1"
  },
  "v1.12.4-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.2.24",
   "alpine": "rancher/rke-tools:v0.1.27",
   "nginxProxy": "rancher/rke-tools:v0.1.27",
   "certDownloader": "rancher/rke-tools:v0.1.27",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.27",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.13",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.13",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.13",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "coredns": "coredns/coredns:1.2.2",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.12.4-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0",
   "flannelCni": "rancher/coreos-flannel-cni:v0.3.0",
   "calicoNode": "rancher/calico-node:v3.1.3",
   "calicoCni": "rancher/calico-cni:v3.1.3",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.3",
   "canalCni": "rancher/calico-cni:v3.1.3",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.1.2",
   "weaveCni": "weaveworks/weave-npc:2.1.2",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4",
   "metricsServer": "rancher/metrics-server-amd64:v0.3.1"
  },
  "v1.12.5-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.2.24",
   "alpine": "rancher/rke-tools:v0.1.16",
   "nginxProxy": "rancher/rke-tools:v0.1.16",
   "certDownloader": "rancher/rke-tools:v0.1.16",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.16",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.13",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.13",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.13",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.12.5-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0",
   "flannelCni": "rancher/coreos-flannel-cni:v0.3.0",
   "calicoNode": "rancher/calico-node:v3.1.3",
   "calicoCni": "rancher/calico-cni:v3.1.3",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.3",
   "canalCni": "rancher/calico-cni:v3.1.3",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4",
   "metricsServer": "rancher/metrics-server-amd64:v0.3.1"
  },
  "v1.12.5-rancher1-2": {
   "etcd": "rancher/coreos-etcd:v3.2.24",
   "alpine": "rancher/rke-tools:v0.1.27",
   "nginxProxy": "rancher/rke-tools:v0.1.27",
   "certDownloader": "rancher/rke-tools:v0.1.27",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.27",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.13",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.13",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.13",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "coredns": "coredns/coredns:1.2.2",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.12.5-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0",
   "flannelCni": "rancher/coreos-flannel-cni:v0.3.0",
   "calicoNode": "rancher/calico-node:v3.1.3",
   "calicoCni": "rancher/calico-cni:v3.1.3",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.3",
   "canalCni": "rancher/calico-cni:v3.1.3",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4",
   "metricsServer": "rancher/metrics-server-amd64:v0.3.1"
  },
  "v1.12.6-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.2.24",
   "alpine": "rancher/rke-tools:v0.1.16",
   "nginxProxy": "rancher/rke-tools:v0.1.16",
   "certDownloader": "rancher/rke-tools:v0.1.16",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.16",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.13",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.13",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.13",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.12.6-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0",
   "flannelCni": "rancher/coreos-flannel-cni:v0.3.0",
   "calicoNode": "rancher/calico-node:v3.1.3",
   "calicoCni": "rancher/calico-cni:v3.1.3",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.3",
   "canalCni": "rancher/calico-cni:v3.1.3",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4",
   "metricsServer": "rancher/metrics-server-amd64:v0.3.1"
  },
  "v1.12.6-rancher1-2": {
   "etcd": "rancher/coreos-etcd:v3.2.24-rancher1",
   "alpine": "rancher/rke-tools:v0.1.27",
   "nginxProxy": "rancher/rke-tools:v0.1.27",
   "certDownloader": "rancher/rke-tools:v0.1.27",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.27",
   "kubedns": "rancher/k8s-dns-kube-dns:1.14.13",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.14.13",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.14.13",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0",
   "coredns": "coredns/coredns:1.2.2",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.12.6-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1",
   "calicoNode": "rancher/calico-node:v3.1.3",
   "calicoCni": "rancher/calico-cni:v3.1.3",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.3",
   "canalCni": "rancher/calico-cni:v3.1.3",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.1"
  },
  "v1.12.7-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.2.24",
   "alpine": "rancher/rke-tools:v0.1.16",
   "nginxProxy": "rancher/rke-tools:v0.1.16",
   "certDownloader": "rancher/rke-tools:v0.1.16",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.16",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.13",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.13",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.13",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.12.7-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0",
   "flannelCni": "rancher/coreos-flannel-cni:v0.3.0",
   "calicoNode": "rancher/calico-node:v3.1.3",
   "calicoCni": "rancher/calico-cni:v3.1.3",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.3",
   "canalCni": "rancher/calico-cni:v3.1.3",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4",
   "metricsServer": "rancher/metrics-server-amd64:v0.3.1"
  },
  "v1.12.7-rancher1-2": {
   "etcd": "rancher/coreos-etcd:v3.2.24-rancher1",
   "alpine": "rancher/rke-tools:v0.1.27",
   "nginxProxy": "rancher/rke-tools:v0.1.27",
   "certDownloader": "rancher/rke-tools:v0.1.27",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.27",
   "kubedns": "rancher/k8s-dns-kube-dns:1.14.13",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.14.13",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.14.13",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0",
   "coredns": "coredns/coredns:1.2.2",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.12.7-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1",
   "calicoNode": "rancher/calico-node:v3.1.3",
   "calicoCni": "rancher/calico-cni:v3.1.3",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.3",
   "canalCni": "rancher/calico-cni:v3.1.3",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.1"
  },
  "v1.12.7-rancher1-3": {
   "etcd": "rancher/coreos-etcd:v3.2.24-rancher1",
   "alpine": "rancher/rke-tools:v0.1.28",
   "nginxProxy": "rancher/rke-tools:v0.1.28",
   "certDownloader": "rancher/rke-tools:v0.1.28",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.28",
   "kubedns": "rancher/k8s-dns-kube-dns:1.14.13",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.14.13",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.14.13",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0",
   "coredns": "rancher/coredns-coredns:1.2.2",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.12.7-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1",
   "calicoNode": "rancher/calico-node:v3.1.3",
   "calicoCni": "rancher/calico-cni:v3.1.3",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.3",
   "canalCni": "rancher/calico-cni:v3.1.3",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.1"
  },
  "v1.12.7-rancher1-4": {
   "etcd": "rancher/coreos-etcd:v3.2.24",
   "alpine": "rancher/rke-tools:v0.1.16-2",
   "nginxProxy": "rancher/rke-tools:v0.1.16-2",
   "certDownloader": "rancher/rke-tools:v0.1.16-2",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.16-2",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.13",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.13",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.13",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.12.7-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0",
   "flannelCni": "rancher/coreos-flannel-cni:v0.3.0",
   "calicoNode": "rancher/calico-node:v3.1.3",
   "calicoCni": "rancher/calico-cni:v3.1.3",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.3",
   "canalCni": "rancher/calico-cni:v3.1.3",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4",
   "metricsServer": "rancher/metrics-server-amd64:v0.3.1"
  },
  "v1.12.9-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.2.24-rancher1",
   "alpine": "rancher/rke-tools:v0.1.34",
   "nginxProxy": "rancher/rke-tools:v0.1.34",
   "certDownloader": "rancher/rke-tools:v0.1.34",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.34",
   "kubedns": "rancher/k8s-dns-kube-dns:1.14.13",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.14.13",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.14.13",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0",
   "coredns": "rancher/coredns-coredns:1.2.2",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.12.9-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1",
   "calicoNode": "rancher/calico-node:v3.1.3",
   "calicoCni": "rancher/calico-cni:v3.1.3",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.3",
   "canalCni": "rancher/calico-cni:v3.1.3",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.1"
  },
  "v1.13.1-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.2.24",
   "alpine": "rancher/rke-tools:v0.1.16",
   "nginxProxy": "rancher/rke-tools:v0.1.16",
   "certDownloader": "rancher/rke-tools:v0.1.16",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.16",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.13.1-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0",
   "flannelCni": "rancher/coreos-flannel-cni:v0.3.0",
   "calicoNode": "rancher/calico-node:v3.4.0",
   "calicoCni": "rancher/calico-cni:v3.4.0",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.4.0",
   "canalCni": "rancher/calico-cni:v3.4.0",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4",
   "metricsServer": "rancher/metrics-server-amd64:v0.3.1"
  },
  "v1.13.1-rancher1-2": {
   "etcd": "rancher/coreos-etcd:v3.2.24",
   "alpine": "rancher/rke-tools:v0.1.27",
   "nginxProxy": "rancher/rke-tools:v0.1.27",
   "certDownloader": "rancher/rke-tools:v0.1.27",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.27",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "coredns": "coredns/coredns:1.2.6",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.13.1-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0",
   "flannelCni": "rancher/coreos-flannel-cni:v0.3.0",
   "calicoNode": "rancher/calico-node:v3.4.0",
   "calicoCni": "rancher/calico-cni:v3.4.0",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.4.0",
   "canalCni": "rancher/calico-cni:v3.4.0",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4",
   "metricsServer": "rancher/metrics-server-amd64:v0.3.1"
  },
  "v1.13.10-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.2.24",
   "alpine": "rancher/rke-tools:v0.1.16-2",
   "nginxProxy": "rancher/rke-tools:v0.1.16-2",
   "certDownloader": "rancher/rke-tools:v0.1.16-2",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.16-2",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.13.10-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0",
   "flannelCni": "rancher/coreos-flannel-cni:v0.3.0",
   "calicoNode": "rancher/calico-node:v3.4.0",
   "calicoCni": "rancher/calico-cni:v3.4.0",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.4.0",
   "canalCni": "rancher/calico-cni:v3.4.0",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4",
   "metricsServer": "rancher/metrics-server-amd64:v0.3.1"
  },
  "v1.13.10-rancher1-2": {
   "etcd": "rancher/coreos-etcd:v3.2.24-rancher1",
   "alpine": "rancher/rke-tools:v0.1.42",
   "nginxProxy": "rancher/rke-tools:v0.1.42",
   "certDownloader": "rancher/rke-tools:v0.1.42",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.42",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0",
   "coredns": "rancher/coredns-coredns:1.2.6",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.13.10-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1",
   "calicoNode": "rancher/calico-node:v3.4.0",
   "calicoCni": "rancher/calico-cni:v3.4.0",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.4.0",
   "canalCni": "rancher/calico-cni:v3.4.0",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.1"
  },
  "v1.13.11-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.2.24-rancher1",
   "alpine": "rancher/rke-tools:v0.1.50",
   "nginxProxy": "rancher/rke-tools:v0.1.50",
   "certDownloader": "rancher/rke-tools:v0.1.50",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.50",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0",
   "coredns": "rancher/coredns-coredns:1.2.6",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.13.11-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1",
   "calicoNode": "rancher/calico-node:v3.4.0",
   "calicoCni": "rancher/calico-cni:v3.4.0",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.4.0",
   "canalCni": "rancher/calico-cni:v3.4.0",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.1"
  },
  "v1.13.12-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.2.24-rancher1",
   "alpine": "rancher/rke-tools:v0.1.50",
   "nginxProxy": "rancher/rke-tools:v0.1.50",
   "certDownloader": "rancher/rke-tools:v0.1.50",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.50",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0",
   "coredns": "rancher/coredns-coredns:1.2.6",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.13.12-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1",
   "calicoNode": "rancher/calico-node:v3.4.0",
   "calicoCni": "rancher/calico-cni:v3.4.0",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.4.0",
   "canalCni": "rancher/calico-cni:v3.4.0",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.1"
  },
  "v1.13.12-rancher2-1": {
   "etcd": "rancher/coreos-etcd:v3.2.24-rancher1",
   "alpine": "rancher/rke-tools:v0.1.50",
   "nginxProxy": "rancher/rke-tools:v0.1.50",
   "certDownloader": "rancher/rke-tools:v0.1.50",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.50",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0",
   "coredns": "rancher/coredns-coredns:1.2.6",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.13.12-rancher2",
   "flannel": "rancher/coreos-flannel:v0.10.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1",
   "calicoNode": "rancher/calico-node:v3.4.0",
   "calicoCni": "rancher/calico-cni:v3.4.0",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.4.0",
   "canalCni": "rancher/calico-cni:v3.4.0",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.1"
  },
  "v1.13.4-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.2.24",
   "alpine": "rancher/rke-tools:v0.1.16",
   "nginxProxy": "rancher/rke-tools:v0.1.16",
   "certDownloader": "rancher/rke-tools:v0.1.16",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.16",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.13.4-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0",
   "flannelCni": "rancher/coreos-flannel-cni:v0.3.0",
   "calicoNode": "rancher/calico-node:v3.4.0",
   "calicoCni": "rancher/calico-cni:v3.4.0",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.4.0",
   "canalCni": "rancher/calico-cni:v3.4.0",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4",
   "metricsServer": "rancher/metrics-server-amd64:v0.3.1"
  },
  "v1.13.4-rancher1-2": {
   "etcd": "rancher/coreos-etcd:v3.2.24-rancher1",
   "alpine": "rancher/rke-tools:v0.1.27",
   "nginxProxy": "rancher/rke-tools:v0.1.27",
   "certDownloader": "rancher/rke-tools:v0.1.27",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.27",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0",
   "coredns": "coredns/coredns:1.2.6",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.13.4-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1",
   "calicoNode": "rancher/calico-node:v3.4.0",
   "calicoCni": "rancher/calico-cni:v3.4.0",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.4.0",
   "canalCni": "rancher/calico-cni:v3.4.0",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.1"
  },
  "v1.13.5-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.2.24",
   "alpine": "rancher/rke-tools:v0.1.16",
   "nginxProxy": "rancher/rke-tools:v0.1.16",
   "certDownloader": "rancher/rke-tools:v0.1.16",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.16",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.13.5-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0",
   "flannelCni": "rancher/coreos-flannel-cni:v0.3.0",
   "calicoNode": "rancher/calico-node:v3.4.0",
   "calicoCni": "rancher/calico-cni:v3.4.0",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.4.0",
   "canalCni": "rancher/calico-cni:v3.4.0",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4",
   "metricsServer": "rancher/metrics-server-amd64:v0.3.1"
  },
  "v1.13.5-rancher1-2": {
   "etcd": "rancher/coreos-etcd:v3.2.24-rancher1",
   "alpine": "rancher/rke-tools:v0.1.27",
   "nginxProxy": "rancher/rke-tools:v0.1.27",
   "certDownloader": "rancher/rke-tools:v0.1.27",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.27",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0",
   "coredns": "coredns/coredns:1.2.6",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.13.5-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1",
   "calicoNode": "rancher/calico-node:v3.4.0",
   "calicoCni": "rancher/calico-cni:v3.4.0",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.4.0",
   "canalCni": "rancher/calico-cni:v3.4.0",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.1"
  },
  "v1.13.5-rancher1-3": {
   "etcd": "rancher/coreos-etcd:v3.2.24-rancher1",
   "alpine": "rancher/rke-tools:v0.1.28",
   "nginxProxy": "rancher/rke-tools:v0.1.28",
   "certDownloader": "rancher/rke-tools:v0.1.28",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.28",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0",
   "coredns": "rancher/coredns-coredns:1.2.6",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.13.5-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1",
   "calicoNode": "rancher/calico-node:v3.4.0",
   "calicoCni": "rancher/calico-cni:v3.4.0",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.4.0",
   "canalCni": "rancher/calico-cni:v3.4.0",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.1"
  },
  "v1.13.7-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.2.24-rancher1",
   "alpine": "rancher/rke-tools:v0.1.34",
   "nginxProxy": "rancher/rke-tools:v0.1.34",
   "certDownloader": "rancher/rke-tools:v0.1.34",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.34",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0",
   "coredns": "rancher/coredns-coredns:1.2.6",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.13.7-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1",
   "calicoNode": "rancher/calico-node:v3.4.0",
   "calicoCni": "rancher/calico-cni:v3.4.0",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.4.0",
   "canalCni": "rancher/calico-cni:v3.4.0",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.1"
  },
  "v1.13.9-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.2.24",
   "alpine": "rancher/rke-tools:v0.1.16",
   "nginxProxy": "rancher/rke-tools:v0.1.16",
   "certDownloader": "rancher/rke-tools:v0.1.16",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.16",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.13.9-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0",
   "flannelCni": "rancher/coreos-flannel-cni:v0.3.0",
   "calicoNode": "rancher/calico-node:v3.4.0",
   "calicoCni": "rancher/calico-cni:v3.4.0",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.4.0",
   "canalCni": "rancher/calico-cni:v3.4.0",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause-amd64:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4",
   "metricsServer": "rancher/metrics-server-amd64:v0.3.1"
  },
  "v1.13.9-rancher1-2": {
   "etcd": "rancher/coreos-etcd:v3.2.24-rancher1",
   "alpine": "rancher/rke-tools:v0.1.40",
   "nginxProxy": "rancher/rke-tools:v0.1.40",
   "certDownloader": "rancher/rke-tools:v0.1.40",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.40",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0",
   "coredns": "rancher/coredns-coredns:1.2.6",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.13.9-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1",
   "calicoNode": "rancher/calico-node:v3.4.0",
   "calicoCni": "rancher/calico-cni:v3.4.0",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.4.0",
   "canalCni": "rancher/calico-cni:v3.4.0",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.1"
  },
  "v1.14.1-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.3.10-rancher1",
   "alpine": "rancher/rke-tools:v0.1.28",
   "nginxProxy": "rancher/rke-tools:v0.1.28",
   "certDownloader": "rancher/rke-tools:v0.1.28",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.28",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "coredns": "coredns/coredns:1.3.1",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "kubernetes": "rancher/hyperkube:v1.14.1-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1",
   "calicoNode": "rancher/calico-node:v3.4.0",
   "calicoCni": "rancher/calico-cni:v3.4.0",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.4.0",
   "canalCni": "rancher/calico-cni:v3.4.0",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.1"
  },
  "v1.14.1-rancher1-2": {
   "etcd": "rancher/coreos-etcd:v3.3.10-rancher1",
   "alpine": "rancher/rke-tools:v0.1.28",
   "nginxProxy": "rancher/rke-tools:v0.1.28",
   "certDownloader": "rancher/rke-tools:v0.1.28",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.28",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "coredns": "rancher/coredns-coredns:1.3.1",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "kubernetes": "rancher/hyperkube:v1.14.1-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1",
   "calicoNode": "rancher/calico-node:v3.4.0",
   "calicoCni": "rancher/calico-cni:v3.4.0",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.4.0",
   "canalCni": "rancher/calico-cni:v3.4.0",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.1"
  },
  "v1.14.10-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.3.10-rancher1",
   "alpine": "rancher/rke-tools:v0.1.52",
   "nginxProxy": "rancher/rke-tools:v0.1.52",
   "certDownloader": "rancher/rke-tools:v0.1.52",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.52",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "coredns": "rancher/coredns-coredns:1.3.1",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "kubernetes": "rancher/hyperkube:v1.14.10-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5",
   "calicoNode": "rancher/calico-node:v3.4.0",
   "calicoCni": "rancher/calico-cni:v3.4.0",
   "calicoControllers": "rancher/calico-kube-controllers:v3.4.0",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.4.0",
   "canalCni": "rancher/calico-cni:v3.4.0",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.1"
  },
  "v1.14.3-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.3.10-rancher1",
   "alpine": "rancher/rke-tools:v0.1.34",
   "nginxProxy": "rancher/rke-tools:v0.1.34",
   "certDownloader": "rancher/rke-tools:v0.1.34",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.34",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "coredns": "rancher/coredns-coredns:1.3.1",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "kubernetes": "rancher/hyperkube:v1.14.3-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1",
   "calicoNode": "rancher/calico-node:v3.4.0",
   "calicoCni": "rancher/calico-cni:v3.4.0",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.4.0",
   "canalCni": "rancher/calico-cni:v3.4.0",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.1"
  },
  "v1.14.5-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.3.10-rancher1",
   "alpine": "rancher/rke-tools:v0.1.40",
   "nginxProxy": "rancher/rke-tools:v0.1.40",
   "certDownloader": "rancher/rke-tools:v0.1.40",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.40",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "coredns": "rancher/coredns-coredns:1.3.1",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "kubernetes": "rancher/hyperkube:v1.14.5-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1",
   "calicoNode": "rancher/calico-node:v3.4.0",
   "calicoCni": "rancher/calico-cni:v3.4.0",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.4.0",
   "canalCni": "rancher/calico-cni:v3.4.0",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.1"
  },
  "v1.14.6-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.3.10-rancher1",
   "alpine": "rancher/rke-tools:v0.1.42",
   "nginxProxy": "rancher/rke-tools:v0.1.42",
   "certDownloader": "rancher/rke-tools:v0.1.42",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.42",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "coredns": "rancher/coredns-coredns:1.3.1",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "kubernetes": "rancher/hyperkube:v1.14.6-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1",
   "calicoNode": "rancher/calico-node:v3.4.0",
   "calicoCni": "rancher/calico-cni:v3.4.0",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.4.0",
   "canalCni": "rancher/calico-cni:v3.4.0",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.1"
  },
  "v1.14.7-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.3.10-rancher1",
   "alpine": "rancher/rke-tools:v0.1.50",
   "nginxProxy": "rancher/rke-tools:v0.1.50",
   "certDownloader": "rancher/rke-tools:v0.1.50",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.50",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "coredns": "rancher/coredns-coredns:1.3.1",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "kubernetes": "rancher/hyperkube:v1.14.7-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5",
   "calicoNode": "rancher/calico-node:v3.4.0",
   "calicoCni": "rancher/calico-cni:v3.4.0",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.4.0",
   "canalCni": "rancher/calico-cni:v3.4.0",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.1"
  },
  "v1.14.8-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.3.10-rancher1",
   "alpine": "rancher/rke-tools:v0.1.50",
   "nginxProxy": "rancher/rke-tools:v0.1.50",
   "certDownloader": "rancher/rke-tools:v0.1.50",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.50",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "coredns": "rancher/coredns-coredns:1.3.1",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "kubernetes": "rancher/hyperkube:v1.14.8-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5",
   "calicoNode": "rancher/calico-node:v3.4.0",
   "calicoCni": "rancher/calico-cni:v3.4.0",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.4.0",
   "canalCni": "rancher/calico-cni:v3.4.0",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.1"
  },
  "v1.14.8-rancher2-1": {
   "etcd": "rancher/coreos-etcd:v3.3.10-rancher1",
   "alpine": "rancher/rke-tools:v0.1.51",
   "nginxProxy": "rancher/rke-tools:v0.1.51",
   "certDownloader": "rancher/rke-tools:v0.1.51",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.51",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "coredns": "rancher/coredns-coredns:1.3.1",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "kubernetes": "rancher/hyperkube:v1.14.8-rancher2",
   "flannel": "rancher/coreos-flannel:v0.10.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5",
   "calicoNode": "rancher/calico-node:v3.4.0",
   "calicoCni": "rancher/calico-cni:v3.4.0",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.4.0",
   "canalCni": "rancher/calico-cni:v3.4.0",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.1"
  },
  "v1.14.9-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.3.10-rancher1",
   "alpine": "rancher/rke-tools:v0.1.51",
   "nginxProxy": "rancher/rke-tools:v0.1.51",
   "certDownloader": "rancher/rke-tools:v0.1.51",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.51",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "coredns": "rancher/coredns-coredns:1.3.1",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "kubernetes": "rancher/hyperkube:v1.14.9-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5",
   "calicoNode": "rancher/calico-node:v3.4.0",
   "calicoCni": "rancher/calico-cni:v3.4.0",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.4.0",
   "canalCni": "rancher/calico-cni:v3.4.0",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.1"
  },
  "v1.14.9-rancher1-2": {
   "etcd": "rancher/coreos-etcd:v3.3.10-rancher1",
   "alpine": "rancher/rke-tools:v0.1.52",
   "nginxProxy": "rancher/rke-tools:v0.1.52",
   "certDownloader": "rancher/rke-tools:v0.1.52",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.52",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "coredns": "rancher/coredns-coredns:1.3.1",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "kubernetes": "rancher/hyperkube:v1.14.9-rancher1",
   "flannel": "rancher/coreos-flannel:v0.10.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5",
   "calicoNode": "rancher/calico-node:v3.4.0",
   "calicoCni": "rancher/calico-cni:v3.4.0",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.4.0",
   "canalCni": "rancher/calico-cni:v3.4.0",
   "canalFlannel": "rancher/coreos-flannel:v0.10.0",
   "weaveNode": "weaveworks/weave-kube:2.5.0",
   "weaveCni": "weaveworks/weave-npc:2.5.0",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.1"
  },
  "v1.15.0-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.3.10-rancher1",
   "alpine": "rancher/rke-tools:v0.1.34",
   "nginxProxy": "rancher/rke-tools:v0.1.34",
   "certDownloader": "rancher/rke-tools:v0.1.34",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.34",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "coredns": "rancher/coredns-coredns:1.3.1",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "kubernetes": "rancher/hyperkube:v1.15.0-rancher1",
   "flannel": "rancher/coreos-flannel:v0.11.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1",
   "calicoNode": "rancher/calico-node:v3.7.4",
   "calicoCni": "rancher/calico-cni:v3.7.4",
   "calicoControllers": "rancher/calico-kube-controllers:v3.7.4",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.7.4",
   "canalCni": "rancher/calico-cni:v3.7.4",
   "canalFlannel": "rancher/coreos-flannel:v0.11.0",
   "weaveNode": "weaveworks/weave-kube:2.5.2",
   "weaveCni": "weaveworks/weave-npc:2.5.2",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.3"
  },
  "v1.15.10-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.3.10-rancher1",
   "alpine": "rancher/rke-tools:v0.1.52",
   "nginxProxy": "rancher/rke-tools:v0.1.52",
   "certDownloader": "rancher/rke-tools:v0.1.52",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.52",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "coredns": "rancher/coredns-coredns:1.3.1",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "kubernetes": "rancher/hyperkube:v1.15.10-rancher1",
   "flannel": "rancher/coreos-flannel:v0.11.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5",
   "calicoNode": "rancher/calico-node:v3.7.4",
   "calicoCni": "rancher/calico-cni:v3.7.4",
   "calicoControllers": "rancher/calico-kube-controllers:v3.7.4",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.7.4",
   "canalCni": "rancher/calico-cni:v3.7.4",
   "canalFlannel": "rancher/coreos-flannel:v0.11.0",
   "weaveNode": "weaveworks/weave-kube:2.5.2",
   "weaveCni": "weaveworks/weave-npc:2.5.2",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.3",
   "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3"
  },
  "v1.15.2-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.3.10-rancher1",
   "alpine": "rancher/rke-tools:v0.1.40",
   "nginxProxy": "rancher/rke-tools:v0.1.40",
   "certDownloader": "rancher/rke-tools:v0.1.40",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.40",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "coredns": "rancher/coredns-coredns:1.3.1",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "kubernetes": "rancher/hyperkube:v1.15.2-rancher1",
   "flannel": "rancher/coreos-flannel:v0.11.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1",
   "calicoNode": "rancher/calico-node:v3.7.4",
   "calicoCni": "rancher/calico-cni:v3.7.4",
   "calicoControllers": "rancher/calico-kube-controllers:v3.7.4",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.7.4",
   "canalCni": "rancher/calico-cni:v3.7.4",
   "canalFlannel": "rancher/coreos-flannel:v0.11.0",
   "weaveNode": "weaveworks/weave-kube:2.5.2",
   "weaveCni": "weaveworks/weave-npc:2.5.2",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.3"
  },
  "v1.15.3-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.3.10-rancher1",
   "alpine": "rancher/rke-tools:v0.1.42",
   "nginxProxy": "rancher/rke-tools:v0.1.42",
   "certDownloader": "rancher/rke-tools:v0.1.42",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.42",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "coredns": "rancher/coredns-coredns:1.3.1",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "kubernetes": "rancher/hyperkube:v1.15.3-rancher1",
   "flannel": "rancher/coreos-flannel:v0.11.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher1",
   "calicoNode": "rancher/calico-node:v3.7.4",
   "calicoCni": "rancher/calico-cni:v3.7.4",
   "calicoControllers": "rancher/calico-kube-controllers:v3.7.4",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.7.4",
   "canalCni": "rancher/calico-cni:v3.7.4",
   "canalFlannel": "rancher/coreos-flannel:v0.11.0",
   "weaveNode": "weaveworks/weave-kube:2.5.2",
   "weaveCni": "weaveworks/weave-npc:2.5.2",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:0.21.0-rancher3",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.3"
  },
  "v1.15.4-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.3.10-rancher1",
   "alpine": "rancher/rke-tools:v0.1.50",
   "nginxProxy": "rancher/rke-tools:v0.1.50",
   "certDownloader": "rancher/rke-tools:v0.1.50",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.50",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "coredns": "rancher/coredns-coredns:1.3.1",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "kubernetes": "rancher/hyperkube:v1.15.4-rancher1",
   "flannel": "rancher/coreos-flannel:v0.11.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5",
   "calicoNode": "rancher/calico-node:v3.7.4",
   "calicoCni": "rancher/calico-cni:v3.7.4",
   "calicoControllers": "rancher/calico-kube-controllers:v3.7.4",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.7.4",
   "canalCni": "rancher/calico-cni:v3.7.4",
   "canalFlannel": "rancher/coreos-flannel:v0.11.0",
   "weaveNode": "weaveworks/weave-kube:2.5.2",
   "weaveCni": "weaveworks/weave-npc:2.5.2",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.3"
  },
  "v1.15.4-rancher1-2": {
   "etcd": "rancher/coreos-etcd:v3.3.10-rancher1",
   "alpine": "rancher/rke-tools:v0.1.50",
   "nginxProxy": "rancher/rke-tools:v0.1.50",
   "certDownloader": "rancher/rke-tools:v0.1.50",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.50",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "coredns": "rancher/coredns-coredns:1.3.1",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "kubernetes": "rancher/hyperkube:v1.15.4-rancher1",
   "flannel": "rancher/coreos-flannel:v0.11.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5",
   "calicoNode": "rancher/calico-node:v3.7.4",
   "calicoCni": "rancher/calico-cni:v3.7.4",
   "calicoControllers": "rancher/calico-kube-controllers:v3.7.4",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.7.4",
   "canalCni": "rancher/calico-cni:v3.7.4",
   "canalFlannel": "rancher/coreos-flannel:v0.11.0",
   "weaveNode": "weaveworks/weave-kube:2.5.2",
   "weaveCni": "weaveworks/weave-npc:2.5.2",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.3",
   "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3"
  },
  "v1.15.5-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.3.10-rancher1",
   "alpine": "rancher/rke-tools:v0.1.50",
   "nginxProxy": "rancher/rke-tools:v0.1.50",
   "certDownloader": "rancher/rke-tools:v0.1.50",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.50",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "coredns": "rancher/coredns-coredns:1.3.1",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "kubernetes": "rancher/hyperkube:v1.15.5-rancher1",
   "flannel": "rancher/coreos-flannel:v0.11.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5",
   "calicoNode": "rancher/calico-node:v3.7.4",
   "calicoCni": "rancher/calico-cni:v3.7.4",
   "calicoControllers": "rancher/calico-kube-controllers:v3.7.4",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.7.4",
   "canalCni": "rancher/calico-cni:v3.7.4",
   "canalFlannel": "rancher/coreos-flannel:v0.11.0",
   "weaveNode": "weaveworks/weave-kube:2.5.2",
   "weaveCni": "weaveworks/weave-npc:2.5.2",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.3"
  },
  "v1.15.5-rancher1-2": {
   "etcd": "rancher/coreos-etcd:v3.3.10-rancher1",
   "alpine": "rancher/rke-tools:v0.1.50",
   "nginxProxy": "rancher/rke-tools:v0.1.50",
   "certDownloader": "rancher/rke-tools:v0.1.50",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.50",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "coredns": "rancher/coredns-coredns:1.3.1",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "kubernetes": "rancher/hyperkube:v1.15.5-rancher1",
   "flannel": "rancher/coreos-flannel:v0.11.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5",
   "calicoNode": "rancher/calico-node:v3.7.4",
   "calicoCni": "rancher/calico-cni:v3.7.4",
   "calicoControllers": "rancher/calico-kube-controllers:v3.7.4",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.7.4",
   "canalCni": "rancher/calico-cni:v3.7.4",
   "canalFlannel": "rancher/coreos-flannel:v0.11.0",
   "weaveNode": "weaveworks/weave-kube:2.5.2",
   "weaveCni": "weaveworks/weave-npc:2.5.2",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.3",
   "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3"
  },
  "v1.15.5-rancher2-2": {
   "etcd": "rancher/coreos-etcd:v3.3.10-rancher1",
   "alpine": "rancher/rke-tools:v0.1.51",
   "nginxProxy": "rancher/rke-tools:v0.1.51",
   "certDownloader": "rancher/rke-tools:v0.1.51",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.51",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "coredns": "rancher/coredns-coredns:1.3.1",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "kubernetes": "rancher/hyperkube:v1.15.5-rancher2",
   "flannel": "rancher/coreos-flannel:v0.11.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5",
   "calicoNode": "rancher/calico-node:v3.7.4",
   "calicoCni": "rancher/calico-cni:v3.7.4",
   "calicoControllers": "rancher/calico-kube-controllers:v3.7.4",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.7.4",
   "canalCni": "rancher/calico-cni:v3.7.4",
   "canalFlannel": "rancher/coreos-flannel:v0.11.0",
   "weaveNode": "weaveworks/weave-kube:2.5.2",
   "weaveCni": "weaveworks/weave-npc:2.5.2",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.3",
   "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3"
  },
  "v1.15.6-rancher1-2": {
   "etcd": "rancher/coreos-etcd:v3.3.10-rancher1",
   "alpine": "rancher/rke-tools:v0.1.51",
   "nginxProxy": "rancher/rke-tools:v0.1.51",
   "certDownloader": "rancher/rke-tools:v0.1.51",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.51",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "coredns": "rancher/coredns-coredns:1.3.1",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "kubernetes": "rancher/hyperkube:v1.15.6-rancher1",
   "flannel": "rancher/coreos-flannel:v0.11.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5",
   "calicoNode": "rancher/calico-node:v3.7.4",
   "calicoCni": "rancher/calico-cni:v3.7.4",
   "calicoControllers": "rancher/calico-kube-controllers:v3.7.4",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.7.4",
   "canalCni": "rancher/calico-cni:v3.7.4",
   "canalFlannel": "rancher/coreos-flannel:v0.11.0",
   "weaveNode": "weaveworks/weave-kube:2.5.2",
   "weaveCni": "weaveworks/weave-npc:2.5.2",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.3",
   "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3"
  },
  "v1.15.7-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.3.10-rancher1",
   "alpine": "rancher/rke-tools:v0.1.52",
   "nginxProxy": "rancher/rke-tools:v0.1.52",
   "certDownloader": "rancher/rke-tools:v0.1.52",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.52",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "coredns": "rancher/coredns-coredns:1.3.1",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "kubernetes": "rancher/hyperkube:v1.15.7-rancher1",
   "flannel": "rancher/coreos-flannel:v0.11.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5",
   "calicoNode": "rancher/calico-node:v3.7.4",
   "calicoCni": "rancher/calico-cni:v3.7.4",
   "calicoControllers": "rancher/calico-kube-controllers:v3.7.4",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.7.4",
   "canalCni": "rancher/calico-cni:v3.7.4",
   "canalFlannel": "rancher/coreos-flannel:v0.11.0",
   "weaveNode": "weaveworks/weave-kube:2.5.2",
   "weaveCni": "weaveworks/weave-npc:2.5.2",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.3",
   "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3"
  },
  "v1.15.9-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.3.10-rancher1",
   "alpine": "rancher/rke-tools:v0.1.52",
   "nginxProxy": "rancher/rke-tools:v0.1.52",
   "certDownloader": "rancher/rke-tools:v0.1.52",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.52",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "coredns": "rancher/coredns-coredns:1.3.1",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.3.0",
   "kubernetes": "rancher/hyperkube:v1.15.9-rancher1",
   "flannel": "rancher/coreos-flannel:v0.11.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5",
   "calicoNode": "rancher/calico-node:v3.7.4",
   "calicoCni": "rancher/calico-cni:v3.7.4",
   "calicoControllers": "rancher/calico-kube-controllers:v3.7.4",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.7.4",
   "canalCni": "rancher/calico-cni:v3.7.4",
   "canalFlannel": "rancher/coreos-flannel:v0.11.0",
   "weaveNode": "weaveworks/weave-kube:2.5.2",
   "weaveCni": "weaveworks/weave-npc:2.5.2",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.3",
   "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3"
  },
  "v1.16.1-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.3.15-rancher1",
   "alpine": "rancher/rke-tools:v0.1.50",
   "nginxProxy": "rancher/rke-tools:v0.1.50",
   "certDownloader": "rancher/rke-tools:v0.1.50",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.50",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1",
   "coredns": "rancher/coredns-coredns:1.6.2",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1",
   "kubernetes": "rancher/hyperkube:v1.16.1-rancher1",
   "flannel": "rancher/coreos-flannel:v0.11.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5",
   "calicoNode": "rancher/calico-node:v3.8.1",
   "calicoCni": "rancher/calico-cni:v3.8.1",
   "calicoControllers": "rancher/calico-kube-controllers:v3.8.1",
   "calicoFlexVol": "rancher/calico-pod2daemon-flexvol:v3.8.1",
   "canalNode": "rancher/calico-node:v3.8.1",
   "canalCni": "rancher/calico-cni:v3.8.1",
   "canalFlannel": "rancher/coreos-flannel:v0.11.0",
   "canalFlexVol": "rancher/calico-pod2daemon-flexvol:v3.8.1",
   "weaveNode": "weaveworks/weave-kube:2.5.2",
   "weaveCni": "weaveworks/weave-npc:2.5.2",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.4",
   "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3"
  },
  "v1.16.2-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.3.15-rancher1",
   "alpine": "rancher/rke-tools:v0.1.50",
   "nginxProxy": "rancher/rke-tools:v0.1.50",
   "certDownloader": "rancher/rke-tools:v0.1.50",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.50",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1",
   "coredns": "rancher/coredns-coredns:1.6.2",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1",
   "kubernetes": "rancher/hyperkube:v1.16.2-rancher1",
   "flannel": "rancher/coreos-flannel:v0.11.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5",
   "calicoNode": "rancher/calico-node:v3.8.1",
   "calicoCni": "rancher/calico-cni:v3.8.1",
   "calicoControllers": "rancher/calico-kube-controllers:v3.8.1",
   "calicoFlexVol": "rancher/calico-pod2daemon-flexvol:v3.8.1",
   "canalNode": "rancher/calico-node:v3.8.1",
   "canalCni": "rancher/calico-cni:v3.8.1",
   "canalFlannel": "rancher/coreos-flannel:v0.11.0",
   "canalFlexVol": "rancher/calico-pod2daemon-flexvol:v3.8.1",
   "weaveNode": "weaveworks/weave-kube:2.5.2",
   "weaveCni": "weaveworks/weave-npc:2.5.2",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.4",
   "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3"
  },
  "v1.16.2-rancher2-1": {
   "etcd": "rancher/coreos-etcd:v3.3.15-rancher1",
   "alpine": "rancher/rke-tools:v0.1.51",
   "nginxProxy": "rancher/rke-tools:v0.1.51",
   "certDownloader": "rancher/rke-tools:v0.1.51",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.51",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1",
   "coredns": "rancher/coredns-coredns:1.6.2",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1",
   "kubernetes": "rancher/hyperkube:v1.16.2-rancher2",
   "flannel": "rancher/coreos-flannel:v0.11.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5",
   "calicoNode": "rancher/calico-node:v3.8.1",
   "calicoCni": "rancher/calico-cni:v3.8.1",
   "calicoControllers": "rancher/calico-kube-controllers:v3.8.1",
   "calicoFlexVol": "rancher/calico-pod2daemon-flexvol:v3.8.1",
   "canalNode": "rancher/calico-node:v3.8.1",
   "canalCni": "rancher/calico-cni:v3.8.1",
   "canalFlannel": "rancher/coreos-flannel:v0.11.0",
   "canalFlexVol": "rancher/calico-pod2daemon-flexvol:v3.8.1",
   "weaveNode": "weaveworks/weave-kube:2.5.2",
   "weaveCni": "weaveworks/weave-npc:2.5.2",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.4",
   "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3"
  },
  "v1.16.3-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.3.15-rancher1",
   "alpine": "rancher/rke-tools:v0.1.51",
   "nginxProxy": "rancher/rke-tools:v0.1.51",
   "certDownloader": "rancher/rke-tools:v0.1.51",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.51",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1",
   "coredns": "rancher/coredns-coredns:1.6.2",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1",
   "kubernetes": "rancher/hyperkube:v1.16.3-rancher1",
   "flannel": "rancher/coreos-flannel:v0.11.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5",
   "calicoNode": "rancher/calico-node:v3.8.1",
   "calicoCni": "rancher/calico-cni:v3.8.1",
   "calicoControllers": "rancher/calico-kube-controllers:v3.8.1",
   "calicoFlexVol": "rancher/calico-pod2daemon-flexvol:v3.8.1",
   "canalNode": "rancher/calico-node:v3.8.1",
   "canalCni": "rancher/calico-cni:v3.8.1",
   "canalFlannel": "rancher/coreos-flannel:v0.11.0",
   "canalFlexVol": "rancher/calico-pod2daemon-flexvol:v3.8.1",
   "weaveNode": "weaveworks/weave-kube:2.5.2",
   "weaveCni": "weaveworks/weave-npc:2.5.2",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.4",
   "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3"
  },
  "v1.16.4-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.3.15-rancher1",
   "alpine": "rancher/rke-tools:v0.1.52",
   "nginxProxy": "rancher/rke-tools:v0.1.52",
   "certDownloader": "rancher/rke-tools:v0.1.52",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.52",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1",
   "coredns": "rancher/coredns-coredns:1.6.2",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1",
   "kubernetes": "rancher/hyperkube:v1.16.4-rancher1",
   "flannel": "rancher/coreos-flannel:v0.11.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5",
   "calicoNode": "rancher/calico-node:v3.10.2",
   "calicoCni": "rancher/calico-cni:v3.10.2",
   "calicoControllers": "rancher/calico-kube-controllers:v3.10.2",
   "calicoFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2",
   "canalNode": "rancher/calico-node:v3.10.2",
   "canalCni": "rancher/calico-cni:v3.10.2",
   "canalFlannel": "rancher/coreos-flannel:v0.11.0",
   "canalFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2",
   "weaveNode": "weaveworks/weave-kube:2.5.2",
   "weaveCni": "weaveworks/weave-npc:2.5.2",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.4",
   "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3"
  },
  "v1.16.6-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.3.15-rancher1",
   "alpine": "rancher/rke-tools:v0.1.52",
   "nginxProxy": "rancher/rke-tools:v0.1.52",
   "certDownloader": "rancher/rke-tools:v0.1.52",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.52",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1",
   "coredns": "rancher/coredns-coredns:1.6.2",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1",
   "kubernetes": "rancher/hyperkube:v1.16.6-rancher1",
   "flannel": "rancher/coreos-flannel:v0.11.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5",
   "calicoNode": "rancher/calico-node:v3.10.2",
   "calicoCni": "rancher/calico-cni:v3.10.2",
   "calicoControllers": "rancher/calico-kube-controllers:v3.10.2",
   "calicoFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2",
   "canalNode": "rancher/calico-node:v3.10.2",
   "canalCni": "rancher/calico-cni:v3.10.2",
   "canalFlannel": "rancher/coreos-flannel:v0.11.0",
   "canalFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2",
   "weaveNode": "weaveworks/weave-kube:2.5.2",
   "weaveCni": "weaveworks/weave-npc:2.5.2",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.4",
   "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3"
  },
  "v1.16.6-rancher1-2": {
   "etcd": "rancher/coreos-etcd:v3.3.15-rancher1",
   "alpine": "rancher/rke-tools:v0.1.52",
   "nginxProxy": "rancher/rke-tools:v0.1.52",
   "certDownloader": "rancher/rke-tools:v0.1.52",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.52",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1",
   "coredns": "rancher/coredns-coredns:1.6.2",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1",
   "kubernetes": "rancher/hyperkube:v1.16.6-rancher1",
   "flannel": "rancher/coreos-flannel:v0.11.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5",
   "calicoNode": "rancher/calico-node:v3.10.2",
   "calicoCni": "rancher/calico-cni:v3.10.2",
   "calicoControllers": "rancher/calico-kube-controllers:v3.10.2",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "calicoFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2",
   "canalNode": "rancher/calico-node:v3.10.2",
   "canalCni": "rancher/calico-cni:v3.10.2",
   "canalFlannel": "rancher/coreos-flannel:v0.11.0",
   "canalFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2",
   "weaveNode": "weaveworks/weave-kube:2.5.2",
   "weaveCni": "weaveworks/weave-npc:2.5.2",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.4",
   "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3"
  },
  "v1.16.7-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.3.15-rancher1",
   "alpine": "rancher/rke-tools:v0.1.52",
   "nginxProxy": "rancher/rke-tools:v0.1.52",
   "certDownloader": "rancher/rke-tools:v0.1.52",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.52",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1",
   "coredns": "rancher/coredns-coredns:1.6.2",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1",
   "kubernetes": "rancher/hyperkube:v1.16.7-rancher1",
   "flannel": "rancher/coreos-flannel:v0.11.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5",
   "calicoNode": "rancher/calico-node:v3.10.2",
   "calicoCni": "rancher/calico-cni:v3.10.2",
   "calicoControllers": "rancher/calico-kube-controllers:v3.10.2",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "calicoFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2",
   "canalNode": "rancher/calico-node:v3.10.2",
   "canalCni": "rancher/calico-cni:v3.10.2",
   "canalFlannel": "rancher/coreos-flannel:v0.11.0",
   "canalFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2",
   "weaveNode": "weaveworks/weave-kube:2.5.2",
   "weaveCni": "weaveworks/weave-npc:2.5.2",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.4",
   "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3"
  },
  "v1.17.0-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.4.3-rancher1",
   "alpine": "rancher/rke-tools:v0.1.51",
   "nginxProxy": "rancher/rke-tools:v0.1.51",
   "certDownloader": "rancher/rke-tools:v0.1.51",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.51",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1",
   "coredns": "rancher/coredns-coredns:1.6.5",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1",
   "kubernetes": "rancher/hyperkube:v1.17.0-rancher1",
   "flannel": "rancher/coreos-flannel:v0.11.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5",
   "calicoNode": "rancher/calico-node:v3.8.1",
   "calicoCni": "rancher/calico-cni:v3.8.1",
   "calicoControllers": "rancher/calico-kube-controllers:v3.8.1",
   "calicoFlexVol": "rancher/calico-pod2daemon-flexvol:v3.8.1",
   "canalNode": "rancher/calico-node:v3.8.1",
   "canalCni": "rancher/calico-cni:v3.8.1",
   "canalFlannel": "rancher/coreos-flannel:v0.11.0",
   "canalFlexVol": "rancher/calico-pod2daemon-flexvol:v3.8.1",
   "weaveNode": "weaveworks/weave-kube:2.5.2",
   "weaveCni": "weaveworks/weave-npc:2.5.2",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.6",
   "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3"
  },
  "v1.17.0-rancher1-2": {
   "etcd": "rancher/coreos-etcd:v3.4.3-rancher1",
   "alpine": "rancher/rke-tools:v0.1.52",
   "nginxProxy": "rancher/rke-tools:v0.1.52",
   "certDownloader": "rancher/rke-tools:v0.1.52",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.52",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1",
   "coredns": "rancher/coredns-coredns:1.6.5",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1",
   "kubernetes": "rancher/hyperkube:v1.17.0-rancher1",
   "flannel": "rancher/coreos-flannel:v0.11.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5",
   "calicoNode": "rancher/calico-node:v3.10.2",
   "calicoCni": "rancher/calico-cni:v3.10.2",
   "calicoControllers": "rancher/calico-kube-controllers:v3.10.2",
   "calicoFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2",
   "canalNode": "rancher/calico-node:v3.10.2",
   "canalCni": "rancher/calico-cni:v3.10.2",
   "canalFlannel": "rancher/coreos-flannel:v0.11.0",
   "canalFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2",
   "weaveNode": "weaveworks/weave-kube:2.5.2",
   "weaveCni": "weaveworks/weave-npc:2.5.2",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.6",
   "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3"
  },
  "v1.17.2-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.4.3-rancher1",
   "alpine": "rancher/rke-tools:v0.1.52",
   "nginxProxy": "rancher/rke-tools:v0.1.52",
   "certDownloader": "rancher/rke-tools:v0.1.52",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.52",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1",
   "coredns": "rancher/coredns-coredns:1.6.5",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1",
   "kubernetes": "rancher/hyperkube:v1.17.2-rancher1",
   "flannel": "rancher/coreos-flannel:v0.11.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5",
   "calicoNode": "rancher/calico-node:v3.10.2",
   "calicoCni": "rancher/calico-cni:v3.10.2",
   "calicoControllers": "rancher/calico-kube-controllers:v3.10.2",
   "calicoFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2",
   "canalNode": "rancher/calico-node:v3.10.2",
   "canalCni": "rancher/calico-cni:v3.10.2",
   "canalFlannel": "rancher/coreos-flannel:v0.11.0",
   "canalFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2",
   "weaveNode": "weaveworks/weave-kube:2.5.2",
   "weaveCni": "weaveworks/weave-npc:2.5.2",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.6",
   "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3"
  },
  "v1.17.99-rancher1-2": {
   "etcd": "rancher/coreos-etcd:v3.4.3-rancher1",
   "alpine": "rancher/rke-tools:v0.1.52",
   "nginxProxy": "rancher/rke-tools:v0.1.52",
   "certDownloader": "rancher/rke-tools:v0.1.52",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.52",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1",
   "coredns": "rancher/coredns-coredns:1.6.5",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1",
   "kubernetes": "rancher/hyperkube:v1.17.2-rancher1",
   "flannel": "rancher/coreos-flannel:v0.11.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5",
   "calicoNode": "rancher/calico-node:v3.10.2",
   "calicoCni": "rancher/calico-cni:v3.10.2",
   "calicoControllers": "rancher/calico-kube-controllers:v3.10.2",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "calicoFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2",
   "canalNode": "rancher/calico-node:v3.10.2",
   "canalCni": "rancher/calico-cni:v3.10.2",
   "canalFlannel": "rancher/coreos-flannel:v0.11.0",
   "canalFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2",
   "weaveNode": "weaveworks/weave-kube:2.5.2",
   "weaveCni": "weaveworks/weave-npc:2.5.2",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.6",
   "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3"
  },
  "v1.17.3-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.4.3-rancher1",
   "alpine": "rancher/rke-tools:v0.1.52",
   "nginxProxy": "rancher/rke-tools:v0.1.52",
   "certDownloader": "rancher/rke-tools:v0.1.52",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.52",
   "kubedns": "rancher/k8s-dns-kube-dns:1.15.0",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny:1.15.0",
   "kubednsSidecar": "rancher/k8s-dns-sidecar:1.15.0",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1",
   "coredns": "rancher/coredns-coredns:1.6.5",
   "corednsAutoscaler": "rancher/cluster-proportional-autoscaler:1.7.1",
   "kubernetes": "rancher/hyperkube:v1.17.3-rancher1",
   "flannel": "rancher/coreos-flannel:v0.11.0-rancher1",
   "flannelCni": "rancher/flannel-cni:v0.3.0-rancher5",
   "calicoNode": "rancher/calico-node:v3.10.2",
   "calicoCni": "rancher/calico-cni:v3.10.2",
   "calicoControllers": "rancher/calico-kube-controllers:v3.10.2",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "calicoFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2",
   "canalNode": "rancher/calico-node:v3.10.2",
   "canalCni": "rancher/calico-cni:v3.10.2",
   "canalFlannel": "rancher/coreos-flannel:v0.11.0",
   "canalFlexVol": "rancher/calico-pod2daemon-flexvol:v3.10.2",
   "weaveNode": "weaveworks/weave-kube:2.5.2",
   "weaveCni": "weaveworks/weave-npc:2.5.2",
   "podInfraContainer": "rancher/pause:3.1",
   "ingress": "rancher/nginx-ingress-controller:nginx-0.25.1-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1",
   "metricsServer": "rancher/metrics-server:v0.3.6",
   "windowsPodInfraContainer": "rancher/kubelet-pause:v0.1.3"
  },
  "v1.8.11-rancher2-1": {
   "etcd": "rancher/coreos-etcd:v3.0.17",
   "alpine": "rancher/rke-tools:v0.1.8",
   "nginxProxy": "rancher/rke-tools:v0.1.8",
   "certDownloader": "rancher/rke-tools:v0.1.8",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.8",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.5",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.5",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.5",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.8.11-rancher2",
   "flannel": "rancher/coreos-flannel:v0.9.1",
   "flannelCni": "rancher/coreos-flannel-cni:v0.2.0",
   "calicoNode": "rancher/calico-node:v3.1.1",
   "calicoCni": "rancher/calico-cni:v3.1.1",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.1",
   "canalCni": "rancher/calico-cni:v3.1.1",
   "canalFlannel": "rancher/coreos-flannel:v0.9.1",
   "weaveNode": "weaveworks/weave-kube:2.1.2",
   "weaveCni": "weaveworks/weave-npc:2.1.2",
   "podInfraContainer": "rancher/pause-amd64:3.0",
   "ingress": "rancher/nginx-ingress-controller:0.10.2-rancher3",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4"
  },
  "v1.9.5-rancher1-1": {
   "etcd": "rancher/coreos-etcd:v3.1.12",
   "alpine": "rancher/rke-tools:v0.1.4",
   "nginxProxy": "rancher/rke-tools:v0.1.4",
   "certDownloader": "rancher/rke-tools:v0.1.4",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.4",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.7",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.7",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.7",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.9.5-rancher1",
   "flannel": "rancher/coreos-flannel:v0.9.1",
   "flannelCni": "rancher/coreos-flannel-cni:v0.2.0",
   "calicoNode": "rancher/calico-node:v3.1.1",
   "calicoCni": "rancher/calico-cni:v3.1.1",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.1",
   "canalCni": "rancher/calico-cni:v3.1.1",
   "canalFlannel": "rancher/coreos-flannel:v0.9.1",
   "weaveNode": "weaveworks/weave-kube:2.1.2",
   "weaveCni": "weaveworks/weave-npc:2.1.2",
   "podInfraContainer": "rancher/pause-amd64:3.0",
   "ingress": "rancher/nginx-ingress-controller:0.10.2-rancher3",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4"
  },
  "v1.9.7-rancher2-1": {
   "etcd": "rancher/coreos-etcd:v3.1.12",
   "alpine": "rancher/rke-tools:v0.1.8",
   "nginxProxy": "rancher/rke-tools:v0.1.8",
   "certDownloader": "rancher/rke-tools:v0.1.8",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.8",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.7",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.7",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.7",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.9.7-rancher2",
   "flannel": "rancher/coreos-flannel:v0.9.1",
   "flannelCni": "rancher/coreos-flannel-cni:v0.2.0",
   "calicoNode": "rancher/calico-node:v3.1.1",
   "calicoCni": "rancher/calico-cni:v3.1.1",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.1",
   "canalCni": "rancher/calico-cni:v3.1.1",
   "canalFlannel": "rancher/coreos-flannel:v0.9.1",
   "weaveNode": "weaveworks/weave-kube:2.1.2",
   "weaveCni": "weaveworks/weave-npc:2.1.2",
   "podInfraContainer": "rancher/pause-amd64:3.0",
   "ingress": "rancher/nginx-ingress-controller:0.10.2-rancher3",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4"
  },
  "v1.9.7-rancher2-2": {
   "etcd": "rancher/coreos-etcd:v3.1.12",
   "alpine": "rancher/rke-tools:v0.1.13",
   "nginxProxy": "rancher/rke-tools:v0.1.13",
   "certDownloader": "rancher/rke-tools:v0.1.13",
   "kubernetesServicesSidecar": "rancher/rke-tools:v0.1.13",
   "kubedns": "rancher/k8s-dns-kube-dns-amd64:1.14.7",
   "dnsmasq": "rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.7",
   "kubednsSidecar": "rancher/k8s-dns-sidecar-amd64:1.14.7",
   "kubednsAutoscaler": "rancher/cluster-proportional-autoscaler-amd64:1.0.0",
   "kubernetes": "rancher/hyperkube:v1.9.7-rancher2",
   "flannel": "rancher/coreos-flannel:v0.9.1",
   "flannelCni": "rancher/coreos-flannel-cni:v0.2.0",
   "calicoNode": "rancher/calico-node:v3.1.1",
   "calicoCni": "rancher/calico-cni:v3.1.1",
   "calicoCtl": "rancher/calico-ctl:v2.0.0",
   "canalNode": "rancher/calico-node:v3.1.1",
   "canalCni": "rancher/calico-cni:v3.1.1",
   "canalFlannel": "rancher/coreos-flannel:v0.9.1",
   "weaveNode": "weaveworks/weave-kube:2.1.2",
   "weaveCni": "weaveworks/weave-npc:2.1.2",
   "podInfraContainer": "rancher/pause-amd64:3.0",
   "ingress": "rancher/nginx-ingress-controller:0.16.2-rancher1",
   "ingressBackend": "rancher/nginx-ingress-controller-defaultbackend:1.4",
   "metricsServer": "rancher/metrics-server-amd64:v0.2.1"
  }
 },
 "K8sVersionedTemplates": {
  "calico": {
   "\u003e=1.13.0-rancher0 \u003c1.15.0-rancher0": "calico-v1.13",
   "\u003e=1.15.0-rancher0 \u003c1.16.0-alpha": "calico-v1.15",
   "\u003e=1.16.0-alpha \u003c1.16.4-rancher1": "calico-v1.16",
   "\u003e=1.16.4-rancher1": "calico-v1.17",
   "\u003e=1.8.0-rancher0 \u003c1.13.0-rancher0": "calico-v1.8"
  },
  "canal": {
   "\u003e=1.13.0-rancher0 \u003c1.15.0-rancher0": "canal-v1.13",
   "\u003e=1.15.0-rancher0 \u003c1.16.0-alpha": "canal-v1.15",
   "\u003e=1.16.0-alpha \u003c1.16.4-rancher1": "canal-v1.16",
   "\u003e=1.16.4-rancher1": "canal-v1.17",
   "\u003e=1.8.0-rancher0 \u003c1.13.0-rancher0": "canal-v1.8"
  },
  "coreDNS": {
   "\u003e=1.16.0-alpha \u003c1.17.0-alpha": "coredns-v1.16",
   "\u003e=1.17.0-alpha": "coredns-v1.17",
   "\u003e=1.8.0-rancher0 \u003c1.16.0-alpha": "coredns-v1.8"
  },
  "flannel": {
   "\u003e=1.15.0-rancher0 \u003c1.16.0-alpha": "flannel-v1.15",
   "\u003e=1.16.0-alpha": "flannel-v1.16",
   "\u003e=1.8.0-rancher0 \u003c1.15.0-rancher0": "flannel-v1.8"
  },
  "kubeDNS": {
   "\u003e=1.16.0-alpha": "kubedns-v1.16",
   "\u003e=1.8.0-rancher0 \u003c1.16.0-alpha": "kubedns-v1.8"
  },
  "metricsServer": {
   "\u003e=1.8.0-rancher0": "metricsserver-v1.8"
  },
  "nginxIngress": {
   "\u003e=1.13.10-rancher1-3 \u003c1.14.0-rancher0": "nginxingress-v1.15",
   "\u003e=1.14.0-rancher0 \u003c=1.14.6-rancher1-1": "nginxingress-v1.8",
   "\u003e=1.14.6-rancher2 \u003c1.15.0-rancher0": "nginxingress-v1.15",
   "\u003e=1.15.0-rancher0 \u003c=1.15.3-rancher1-1": "nginxingress-v1.8",
   "\u003e=1.15.3-rancher2": "nginxingress-v1.15",
   "\u003e=1.8.0-rancher0 \u003c1.13.10-rancher1-3": "nginxingress-v1.8"
  },
  "templateKeys": {
   "calico-v1.13": "\n{{if eq .RBACConfig \"rbac\"}}\n## start rbac here\n\n# Include a clusterrole for the calico-node DaemonSet,\n# and bind it to the calico-node serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n  name: calico-node\nrules:\n  # The CNI plugin needs to get pods, nodes, and namespaces.\n  - apiGroups: [\"\"]\n    resources:\n      - pods\n      - nodes\n      - namespaces\n    verbs:\n      - get\n  - apiGroups: [\"\"]\n    resources:\n      - endpoints\n      - services\n    verbs:\n      # Used to discover service IPs for advertisement.\n      - watch\n      - list\n      # Used to discover Typhas.\n      - get\n  - apiGroups: [\"\"]\n    resources:\n      - nodes/status\n    verbs:\n      # Needed for clearing NodeNetworkUnavailable flag.\n      - patch\n      # Calico stores some configuration information in node annotations.\n      - update\n  # Watch for changes to Kubernetes NetworkPolicies.\n  - apiGroups: [\"networking.k8s.io\"]\n    resources:\n      - networkpolicies\n    verbs:\n      - watch\n      - list\n  # Used by Calico for policy information.\n  - apiGroups: [\"\"]\n    resources:\n      - pods\n      - namespaces\n      - serviceaccounts\n    verbs:\n      - list\n      - watch\n  # The CNI plugin patches pods/status.\n  - apiGroups: [\"\"]\n    resources:\n      - pods/status\n    verbs:\n      - patch\n  # Calico monitors various CRDs for config.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - globalfelixconfigs\n      - felixconfigurations\n      - bgppeers\n      - globalbgpconfigs\n      - bgpconfigurations\n      - ippools\n      - globalnetworkpolicies\n      - globalnetworksets\n      - networkpolicies\n      - clusterinformations\n      - hostendpoints\n    verbs:\n      - get\n      - list\n      - watch\n  # Calico must create and update some CRDs on startup.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - ippools\n      - felixconfigurations\n      - clusterinformations\n    verbs:\n      - create\n      - update\n  # Calico stores some configuration information on the node.\n  - apiGroups: [\"\"]\n    resources:\n      - nodes\n    verbs:\n      - get\n      - list\n      - watch\n  # These permissions are only requried for upgrade from v2.6, and can\n  # be removed after upgrade or on fresh installations.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - bgpconfigurations\n      - bgppeers\n    verbs:\n      - create\n      - update\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n  name: calico-node\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: calico-node\nsubjects:\n- kind: ServiceAccount\n  name: calico-node\n  namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n  kind: Group\n  name: system:nodes\n{{end}}\n## end rbac here\n\n---\n# This ConfigMap is used to configure a self-hosted Calico installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n  name: calico-config\n  namespace: kube-system\ndata:\n  # To enable Typha, set this to \"calico-typha\" *and* set a non-zero value for Typha replicas\n  # below.  We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is\n  # essential.\n  typha_service_name: \"none\"\n  # Configure the Calico backend to use.\n  calico_backend: \"bird\"\n\n  # Configure the MTU to use\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n  veth_mtu: \"{{.MTU}}\"\n{{- end}}\n{{- else }}\n  veth_mtu: \"1440\"\n{{- end}}\n\n  # The CNI network configuration to install on each node.  The special\n  # values in this config will be automatically populated.\n  cni_network_config: |-\n    {\n      \"name\": \"k8s-pod-network\",\n      \"cniVersion\": \"0.3.0\",\n      \"plugins\": [\n        {\n          \"type\": \"calico\",\n          \"log_level\": \"WARNING\",\n          \"datastore_type\": \"kubernetes\",\n          \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n          \"mtu\": __CNI_MTU__,\n          \"ipam\": {\n            \"type\": \"host-local\",\n            \"subnet\": \"usePodCidr\"\n          },\n          \"policy\": {\n              \"type\": \"k8s\"\n          },\n          \"kubernetes\": {\n              \"kubeconfig\": \"{{.KubeCfg}}\"\n          }\n        },\n        {\n          \"type\": \"portmap\",\n          \"snat\": true,\n          \"capabilities\": {\"portMappings\": true}\n        }\n      ]\n    }\n---\n\n# This manifest installs the calico/node container, as well\n# as the Calico CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: extensions/v1beta1\nmetadata:\n  name: calico-node\n  namespace: kube-system\n  labels:\n    k8s-app: calico-node\nspec:\n  selector:\n    matchLabels:\n      k8s-app: calico-node\n  updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n    type: RollingUpdate\n    rollingUpdate:\n      maxUnavailable: 1\n{{end}}\n  template:\n    metadata:\n      labels:\n        k8s-app: calico-node\n      annotations:\n        # This, along with the CriticalAddonsOnly toleration below,\n        # marks the pod as a critical add-on, ensuring it gets\n        # priority scheduling and that its resources are reserved\n        # if it ever gets evicted.\n        scheduler.alpha.kubernetes.io/critical-pod: ''\n    spec:\n      affinity:\n        nodeAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            nodeSelectorTerms:\n              - matchExpressions:\n                - key: beta.kubernetes.io/os\n                  operator: NotIn\n                  values:\n                    - windows\n      hostNetwork: true\n{{if .NodeSelector}}\n      nodeSelector:\n      {{ range $k, $v := .NodeSelector }}\n        {{ $k }}: \"{{ $v }}\"\n      {{ end }}\n{{end}}\n      tolerations:\n        # Make sure calico-node gets scheduled on all nodes.\n        - effect: NoSchedule\n          operator: Exists\n        # Mark the pod as a critical add-on for rescheduling.\n        - key: CriticalAddonsOnly\n          operator: Exists\n        - effect: NoExecute\n          operator: Exists\n      serviceAccountName: calico-node\n      # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n      # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n      terminationGracePeriodSeconds: 0\n      initContainers:\n        # This container installs the Calico CNI binaries\n        # and CNI network config file on each node.\n        - name: install-cni\n          image: {{.CNIImage}}\n          command: [\"/install-cni.sh\"]\n          env:\n            # Name of the CNI config file to create.\n            - name: CNI_CONF_NAME\n              value: \"10-calico.conflist\"\n            # The CNI network config to install on each node.\n            - name: CNI_NETWORK_CONFIG\n              valueFrom:\n                configMapKeyRef:\n                  name: calico-config\n                  key: cni_network_config\n            # Set the hostname based on the k8s node name.\n            - name: KUBERNETES_NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n            # CNI MTU Config variable\n            - name: CNI_MTU\n              valueFrom:\n                configMapKeyRef:\n                  name: calico-config\n                  key: veth_mtu\n            # Prevents the container from sleeping forever.\n            - name: SLEEP\n              value: \"false\"\n          volumeMounts:\n            - mountPath: /host/opt/cni/bin\n              name: cni-bin-dir\n            - mountPath: /host/etc/cni/net.d\n              name: cni-net-dir\n      containers:\n        # Runs calico/node container on each Kubernetes node.  This\n        # container programs network policy and routes on each\n        # host.\n        - name: calico-node\n          image: {{.NodeImage}}\n          env:\n            # Use Kubernetes API as the backing datastore.\n            - name: DATASTORE_TYPE\n              value: \"kubernetes\"\n            # Typha support: controlled by the ConfigMap.\n            - name: FELIX_TYPHAK8SSERVICENAME\n              valueFrom:\n                configMapKeyRef:\n                  name: calico-config\n                  key: typha_service_name\n            # Wait for the datastore.\n            - name: WAIT_FOR_DATASTORE\n              value: \"true\"\n            # Set based on the k8s node name.\n            - name: NODENAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n            # Choose the backend to use.\n            - name: CALICO_NETWORKING_BACKEND\n              valueFrom:\n                configMapKeyRef:\n                  name: calico-config\n                  key: calico_backend\n            # Cluster type to identify the deployment type\n            - name: CLUSTER_TYPE\n              value: \"k8s,bgp\"\n            # Auto-detect the BGP IP address.\n            - name: IP\n              value: \"autodetect\"\n            # Enable IPIP\n            - name: CALICO_IPV4POOL_IPIP\n              value: \"Always\"\n            # Set MTU for tunnel device used if ipip is enabled\n            - name: FELIX_IPINIPMTU\n              valueFrom:\n                configMapKeyRef:\n                  name: calico-config\n                  key: veth_mtu\n            # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n            # chosen from this range. Changing this value after installation will have\n            # no effect. This should fall within --cluster-cidr.\n            - name: CALICO_IPV4POOL_CIDR\n              value: \"{{.ClusterCIDR}}\"\n            # Disable file logging so kubectl logs works.\n            - name: CALICO_DISABLE_FILE_LOGGING\n              value: \"true\"\n            # Set Felix endpoint to host default action to ACCEPT.\n            - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n              value: \"ACCEPT\"\n            # Disable IPv6 on Kubernetes.\n            - name: FELIX_IPV6SUPPORT\n              value: \"false\"\n            # Disable felix logging to file\n            - name: FELIX_LOGFILEPATH\n              value: \"none\"\n            # Disable felix logging for syslog\n            - name: FELIX_LOGSEVERITYSYS\n              value: \"\"\n            # Enable felix logging to stdout\n            - name: FELIX_LOGSEVERITYSCREEN\n              value: \"Warning\"\n            - name: FELIX_HEALTHENABLED\n              value: \"true\"\n          securityContext:\n            privileged: true\n          resources:\n            requests:\n              cpu: 250m\n          livenessProbe:\n            httpGet:\n              path: /liveness\n              port: 9099\n              host: localhost\n            periodSeconds: 10\n            initialDelaySeconds: 10\n            failureThreshold: 6\n          readinessProbe:\n            exec:\n              command:\n              - /bin/calico-node\n              - -bird-ready\n              - -felix-ready\n            periodSeconds: 10\n          volumeMounts:\n            - mountPath: /lib/modules\n              name: lib-modules\n              readOnly: true\n            - mountPath: /run/xtables.lock\n              name: xtables-lock\n              readOnly: false\n            - mountPath: /var/run/calico\n              name: var-run-calico\n              readOnly: false\n            - mountPath: /var/lib/calico\n              name: var-lib-calico\n              readOnly: false\n      volumes:\n        # Used by calico/node.\n        - name: lib-modules\n          hostPath:\n            path: /lib/modules\n        - name: var-run-calico\n          hostPath:\n            path: /var/run/calico\n        - name: var-lib-calico\n          hostPath:\n            path: /var/lib/calico\n        - name: xtables-lock\n          hostPath:\n            path: /run/xtables.lock\n            type: FileOrCreate\n        # Used to install CNI.\n        - name: cni-bin-dir\n          hostPath:\n            path: /opt/cni/bin\n        - name: cni-net-dir\n          hostPath:\n            path: /etc/cni/net.d\n\n# Create all the CustomResourceDefinitions needed for\n# Calico policy and networking mode.\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n   name: felixconfigurations.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: FelixConfiguration\n    plural: felixconfigurations\n    singular: felixconfiguration\n\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: bgppeers.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: BGPPeer\n    plural: bgppeers\n    singular: bgppeer\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: bgpconfigurations.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: BGPConfiguration\n    plural: bgpconfigurations\n    singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: ippools.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: IPPool\n    plural: ippools\n    singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: hostendpoints.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: HostEndpoint\n    plural: hostendpoints\n    singular: hostendpoint\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: clusterinformations.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: ClusterInformation\n    plural: clusterinformations\n    singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: GlobalNetworkPolicy\n    plural: globalnetworkpolicies\n    singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: globalnetworksets.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: GlobalNetworkSet\n    plural: globalnetworksets\n    singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: networkpolicies.crd.projectcalico.org\nspec:\n  scope: Namespaced\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: NetworkPolicy\n    plural: networkpolicies\n    singular: networkpolicy\n\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: calico-node\n  namespace: kube-system\n\n\n{{if ne .CloudProvider \"none\"}}\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n  name: {{.CloudProvider}}-ippool\n  namespace: kube-system\ndata:\n  {{.CloudProvider}}-ippool: |-\n    apiVersion: projectcalico.org/v3\n    kind: IPPool\n    metadata:\n      name: ippool-ipip-1\n    spec:\n      cidr: {{.ClusterCIDR}}\n      ipipMode: Always\n      natOutgoing: true\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: calicoctl\n  namespace: kube-system\nspec:\n  hostNetwork: true\n  restartPolicy: OnFailure\n  tolerations:\n  - effect: NoExecute\n    operator: Exists\n  - effect: NoSchedule\n    operator: Exists\n  containers:\n  - name: calicoctl\n    image: {{.Calicoctl}}\n    command: [\"/bin/sh\", \"-c\", \"calicoctl apply -f {{.CloudProvider}}-ippool.yaml\"]\n    env:\n    - name: DATASTORE_TYPE\n      value: kubernetes\n    volumeMounts:\n    - name: ippool-config\n      mountPath: /root/\n  volumes:\n  - name: ippool-config\n    configMap:\n      name: {{.CloudProvider}}-ippool\n      items:\n        - key: {{.CloudProvider}}-ippool\n          path: {{.CloudProvider}}-ippool.yaml\n  # Mount in the etcd TLS secrets.\n{{end}}\n",
   "calico-v1.15": "\n{{if eq .RBACConfig \"rbac\"}}\n---\n# Source: calico/templates/rbac.yaml\n# Include a clusterrole for the kube-controllers component,\n# and bind it to the calico-kube-controllers serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n  name: calico-kube-controllers\nrules:\n  # Nodes are watched to monitor for deletions.\n  - apiGroups: [\"\"]\n    resources:\n      - nodes\n    verbs:\n      - watch\n      - list\n      - get\n  # Pods are queried to check for existence.\n  - apiGroups: [\"\"]\n    resources:\n      - pods\n    verbs:\n      - get\n  # IPAM resources are manipulated when nodes are deleted.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - ippools\n    verbs:\n      - list\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - blockaffinities\n      - ipamblocks\n      - ipamhandles\n    verbs:\n      - get\n      - list\n      - create\n      - update\n      - delete\n  # Needs access to update clusterinformations.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - clusterinformations\n    verbs:\n      - get\n      - create\n      - update\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n  name: calico-kube-controllers\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: calico-kube-controllers\nsubjects:\n- kind: ServiceAccount\n  name: calico-kube-controllers\n  namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n  kind: Group\n  name: system:nodes\n---\n# Include a clusterrole for the calico-node DaemonSet,\n# and bind it to the calico-node serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n  name: calico-node\nrules:\n  # The CNI plugin needs to get pods, nodes, and namespaces.\n  - apiGroups: [\"\"]\n    resources:\n      - pods\n      - nodes\n      - namespaces\n    verbs:\n      - get\n  - apiGroups: [\"\"]\n    resources:\n      - endpoints\n      - services\n    verbs:\n      # Used to discover service IPs for advertisement.\n      - watch\n      - list\n      # Used to discover Typhas.\n      - get\n  - apiGroups: [\"\"]\n    resources:\n      - nodes/status\n    verbs:\n      # Needed for clearing NodeNetworkUnavailable flag.\n      - patch\n      # Calico stores some configuration information in node annotations.\n      - update\n  # Watch for changes to Kubernetes NetworkPolicies.\n  - apiGroups: [\"networking.k8s.io\"]\n    resources:\n      - networkpolicies\n    verbs:\n      - watch\n      - list\n  # Used by Calico for policy information.\n  - apiGroups: [\"\"]\n    resources:\n      - pods\n      - namespaces\n      - serviceaccounts\n    verbs:\n      - list\n      - watch\n  # The CNI plugin patches pods/status.\n  - apiGroups: [\"\"]\n    resources:\n      - pods/status\n    verbs:\n      - patch\n  # Calico monitors various CRDs for config.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - globalfelixconfigs\n      - felixconfigurations\n      - bgppeers\n      - globalbgpconfigs\n      - bgpconfigurations\n      - ippools\n      - ipamblocks\n      - globalnetworkpolicies\n      - globalnetworksets\n      - networkpolicies\n      - networksets\n      - clusterinformations\n      - hostendpoints\n    verbs:\n      - get\n      - list\n      - watch\n  # Calico must create and update some CRDs on startup.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - ippools\n      - felixconfigurations\n      - clusterinformations\n    verbs:\n      - create\n      - update\n  # Calico stores some configuration information on the node.\n  - apiGroups: [\"\"]\n    resources:\n      - nodes\n    verbs:\n      - get\n      - list\n      - watch\n  # These permissions are only requried for upgrade from v2.6, and can\n  # be removed after upgrade or on fresh installations.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - bgpconfigurations\n      - bgppeers\n    verbs:\n      - create\n      - update\n  # These permissions are required for Calico CNI to perform IPAM allocations.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - blockaffinities\n      - ipamblocks\n      - ipamhandles\n    verbs:\n      - get\n      - list\n      - create\n      - update\n      - delete\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - ipamconfigs\n    verbs:\n      - get\n  # Block affinities must also be watchable by confd for route aggregation.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - blockaffinities\n    verbs:\n      - watch\n  # The Calico IPAM migration needs to get daemonsets. These permissions can be\n  # removed if not upgrading from an installation using host-local IPAM.\n  - apiGroups: [\"apps\"]\n    resources:\n      - daemonsets\n    verbs:\n      - get\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n  name: calico-node\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: calico-node\nsubjects:\n- kind: ServiceAccount\n  name: calico-node\n  namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n  kind: Group\n  name: system:nodes\n{{end}}\n---\n# Source: calico/templates/calico-config.yaml\n# This ConfigMap is used to configure a self-hosted Calico installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n  name: calico-config\n  namespace: kube-system\ndata:\n  # Typha is disabled.\n  typha_service_name: \"none\"\n  # Configure the backend to use.\n  calico_backend: \"bird\"\n\n  # Configure the MTU to use\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n  veth_mtu: \"{{.MTU}}\"\n{{- end}}\n{{- else }}\n  veth_mtu: \"1440\"\n{{- end}}\n\n  # The CNI network configuration to install on each node.  The special\n  # values in this config will be automatically populated.\n  cni_network_config: |-\n    {\n      \"name\": \"k8s-pod-network\",\n      \"cniVersion\": \"0.3.0\",\n      \"plugins\": [\n        {\n          \"type\": \"calico\",\n          \"log_level\": \"info\",\n          \"datastore_type\": \"kubernetes\",\n          \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n          \"mtu\": __CNI_MTU__,\n          \"ipam\": {\n              \"type\": \"calico-ipam\"\n          },\n          \"policy\": {\n              \"type\": \"k8s\"\n          },\n          \"kubernetes\": {\n              \"kubeconfig\": \"{{.KubeCfg}}\"\n          }\n        },\n        {\n          \"type\": \"portmap\",\n          \"snat\": true,\n          \"capabilities\": {\"portMappings\": true}\n        }\n      ]\n    }\n---\n# Source: calico/templates/kdd-crds.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n   name: felixconfigurations.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: FelixConfiguration\n    plural: felixconfigurations\n    singular: felixconfiguration\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: ipamblocks.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: IPAMBlock\n    plural: ipamblocks\n    singular: ipamblock\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: blockaffinities.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: BlockAffinity\n    plural: blockaffinities\n    singular: blockaffinity\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: ipamhandles.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: IPAMHandle\n    plural: ipamhandles\n    singular: ipamhandle\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: ipamconfigs.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: IPAMConfig\n    plural: ipamconfigs\n    singular: ipamconfig\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: bgppeers.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: BGPPeer\n    plural: bgppeers\n    singular: bgppeer\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: bgpconfigurations.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: BGPConfiguration\n    plural: bgpconfigurations\n    singular: bgpconfiguration\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: ippools.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: IPPool\n    plural: ippools\n    singular: ippool\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: hostendpoints.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: HostEndpoint\n    plural: hostendpoints\n    singular: hostendpoint\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: clusterinformations.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: ClusterInformation\n    plural: clusterinformations\n    singular: clusterinformation\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: GlobalNetworkPolicy\n    plural: globalnetworkpolicies\n    singular: globalnetworkpolicy\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: globalnetworksets.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: GlobalNetworkSet\n    plural: globalnetworksets\n    singular: globalnetworkset\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: networkpolicies.crd.projectcalico.org\nspec:\n  scope: Namespaced\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: NetworkPolicy\n    plural: networkpolicies\n    singular: networkpolicy\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: networksets.crd.projectcalico.org\nspec:\n  scope: Namespaced\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: NetworkSet\n    plural: networksets\n    singular: networkset\n---\n# Source: calico/templates/calico-node.yaml\n# This manifest installs the calico-node container, as well\n# as the CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: extensions/v1beta1\nmetadata:\n  name: calico-node\n  namespace: kube-system\n  labels:\n    k8s-app: calico-node\nspec:\n  selector:\n    matchLabels:\n      k8s-app: calico-node\n  updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n    type: RollingUpdate\n    rollingUpdate:\n      maxUnavailable: 1\n{{end}}\n  template:\n    metadata:\n      labels:\n        k8s-app: calico-node\n      annotations:\n        # This, along with the CriticalAddonsOnly toleration below,\n        # marks the pod as a critical add-on, ensuring it gets\n        # priority scheduling and that its resources are reserved\n        # if it ever gets evicted.\n        scheduler.alpha.kubernetes.io/critical-pod: ''\n    spec:\n      nodeSelector:\n        beta.kubernetes.io/os: linux\n      {{ range $k, $v := .NodeSelector }}\n        {{ $k }}: \"{{ $v }}\"\n      {{ end }}\n      hostNetwork: true\n      tolerations:\n        # Make sure calico-node gets scheduled on all nodes.\n        - effect: NoSchedule\n          operator: Exists\n        # Mark the pod as a critical add-on for rescheduling.\n        - key: CriticalAddonsOnly\n          operator: Exists\n        - effect: NoExecute\n          operator: Exists\n{{if eq .RBACConfig \"rbac\"}}\n      serviceAccountName: calico-node\n{{end}}\n      # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n      # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n      terminationGracePeriodSeconds: 0\n      initContainers:\n        # This container performs upgrade from host-local IPAM to calico-ipam.\n        # It can be deleted if this is a fresh installation, or if you have already\n        # upgraded to use calico-ipam.\n        - name: upgrade-ipam\n          image: {{.CNIImage}}\n          command: [\"/opt/cni/bin/calico-ipam\", \"-upgrade\"]\n          env:\n            - name: KUBERNETES_NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n            - name: CALICO_NETWORKING_BACKEND\n              valueFrom:\n                configMapKeyRef:\n                  name: calico-config\n                  key: calico_backend\n          volumeMounts:\n            - mountPath: /var/lib/cni/networks\n              name: host-local-net-dir\n            - mountPath: /host/opt/cni/bin\n              name: cni-bin-dir\n        # This container installs the CNI binaries\n        # and CNI network config file on each node.\n        - name: install-cni\n          image: {{.CNIImage}}\n          command: [\"/install-cni.sh\"]\n          env:\n            # Name of the CNI config file to create.\n            - name: CNI_CONF_NAME\n              value: \"10-calico.conflist\"\n            # The CNI network config to install on each node.\n            - name: CNI_NETWORK_CONFIG\n              valueFrom:\n                configMapKeyRef:\n                  name: calico-config\n                  key: cni_network_config\n            # Set the hostname based on the k8s node name.\n            - name: KUBERNETES_NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n            # CNI MTU Config variable\n            - name: CNI_MTU\n              valueFrom:\n                configMapKeyRef:\n                  name: calico-config\n                  key: veth_mtu\n            # Prevents the container from sleeping forever.\n            - name: SLEEP\n              value: \"false\"\n          volumeMounts:\n            - mountPath: /host/opt/cni/bin\n              name: cni-bin-dir\n            - mountPath: /host/etc/cni/net.d\n              name: cni-net-dir\n      containers:\n        # Runs calico-node container on each Kubernetes node.  This\n        # container programs network policy and routes on each\n        # host.\n        - name: calico-node\n          image: {{.NodeImage}}\n          env:\n            # Use Kubernetes API as the backing datastore.\n            - name: DATASTORE_TYPE\n              value: \"kubernetes\"\n            # Wait for the datastore.\n            - name: WAIT_FOR_DATASTORE\n              value: \"true\"\n            # Set based on the k8s node name.\n            - name: NODENAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n            # Choose the backend to use.\n            - name: CALICO_NETWORKING_BACKEND\n              valueFrom:\n                configMapKeyRef:\n                  name: calico-config\n                  key: calico_backend\n            # Cluster type to identify the deployment type\n            - name: CLUSTER_TYPE\n              value: \"k8s,bgp\"\n            # Auto-detect the BGP IP address.\n            - name: IP\n              value: \"autodetect\"\n            # Enable IPIP\n            - name: CALICO_IPV4POOL_IPIP\n              value: \"Always\"\n            # Set MTU for tunnel device used if ipip is enabled\n            - name: FELIX_IPINIPMTU\n              valueFrom:\n                configMapKeyRef:\n                  name: calico-config\n                  key: veth_mtu\n            # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n            # chosen from this range. Changing this value after installation will have\n            # no effect. This should fall within --cluster-cidr.\n            - name: CALICO_IPV4POOL_CIDR\n              value: \"{{.ClusterCIDR}}\"\n            # Disable file logging so kubectl logs works.\n            - name: CALICO_DISABLE_FILE_LOGGING\n              value: \"true\"\n            # Set Felix endpoint to host default action to ACCEPT.\n            - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n              value: \"ACCEPT\"\n            # Disable IPv6 on Kubernetes.\n            - name: FELIX_IPV6SUPPORT\n              value: \"false\"\n            # Set Felix logging to \"info\"\n            - name: FELIX_LOGSEVERITYSCREEN\n              value: \"info\"\n            - name: FELIX_HEALTHENABLED\n              value: \"true\"\n          securityContext:\n            privileged: true\n          resources:\n            requests:\n              cpu: 250m\n          livenessProbe:\n            httpGet:\n              path: /liveness\n              port: 9099\n              host: localhost\n            periodSeconds: 10\n            initialDelaySeconds: 10\n            failureThreshold: 6\n          readinessProbe:\n            exec:\n              command:\n              - /bin/calico-node\n              - -bird-ready\n              - -felix-ready\n            periodSeconds: 10\n          volumeMounts:\n            - mountPath: /lib/modules\n              name: lib-modules\n              readOnly: true\n            - mountPath: /run/xtables.lock\n              name: xtables-lock\n              readOnly: false\n            - mountPath: /var/run/calico\n              name: var-run-calico\n              readOnly: false\n            - mountPath: /var/lib/calico\n              name: var-lib-calico\n              readOnly: false\n      volumes:\n        # Used by calico-node.\n        - name: lib-modules\n          hostPath:\n            path: /lib/modules\n        - name: var-run-calico\n          hostPath:\n            path: /var/run/calico\n        - name: var-lib-calico\n          hostPath:\n            path: /var/lib/calico\n        - name: xtables-lock\n          hostPath:\n            path: /run/xtables.lock\n            type: FileOrCreate\n        # Used to install CNI.\n        - name: cni-bin-dir\n          hostPath:\n            path: /opt/cni/bin\n        - name: cni-net-dir\n          hostPath:\n            path: /etc/cni/net.d\n        # Mount in the directory for host-local IPAM allocations. This is\n        # used when upgrading from host-local to calico-ipam, and can be removed\n        # if not using the upgrade-ipam init container.\n        - name: host-local-net-dir\n          hostPath:\n            path: /var/lib/cni/networks\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: calico-node\n  namespace: kube-system\n---\n# Source: calico/templates/calico-kube-controllers.yaml\n# See https://github.com/projectcalico/kube-controllers\napiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n  name: calico-kube-controllers\n  namespace: kube-system\n  labels:\n    k8s-app: calico-kube-controllers\n  annotations:\n    scheduler.alpha.kubernetes.io/critical-pod: ''\nspec:\n  # The controller can only have a single active instance.\n  replicas: 1\n  strategy:\n    type: Recreate\n  template:\n    metadata:\n      name: calico-kube-controllers\n      namespace: kube-system\n      labels:\n        k8s-app: calico-kube-controllers\n    spec:\n      nodeSelector:\n        beta.kubernetes.io/os: linux\n      tolerations:\n        # Make sure calico-node gets scheduled on all nodes.\n        - effect: NoSchedule\n          operator: Exists\n        # Mark the pod as a critical add-on for rescheduling.\n        - key: CriticalAddonsOnly\n          operator: Exists\n        - effect: NoExecute\n          operator: Exists\n{{if eq .RBACConfig \"rbac\"}}\n      serviceAccountName: calico-kube-controllers\n{{end}}\n      containers:\n        - name: calico-kube-controllers\n          image: {{.ControllersImage}}\n          env:\n            # Choose which controllers to run.\n            - name: ENABLED_CONTROLLERS\n              value: node\n            - name: DATASTORE_TYPE\n              value: kubernetes\n          readinessProbe:\n            exec:\n              command:\n              - /usr/bin/check-status\n              - -r\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: calico-kube-controllers\n  namespace: kube-system\n",
   "calico-v1.16": "\n{{if eq .RBACConfig \"rbac\"}}\n# Source: calico/templates/rbac.yaml\n\n# Include a clusterrole for the kube-controllers component,\n# and bind it to the calico-kube-controllers serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: calico-kube-controllers\nrules:\n  # Nodes are watched to monitor for deletions.\n  - apiGroups: [\"\"]\n    resources:\n      - nodes\n    verbs:\n      - watch\n      - list\n      - get\n  # Pods are queried to check for existence.\n  - apiGroups: [\"\"]\n    resources:\n      - pods\n    verbs:\n      - get\n  # IPAM resources are manipulated when nodes are deleted.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - ippools\n    verbs:\n      - list\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - blockaffinities\n      - ipamblocks\n      - ipamhandles\n    verbs:\n      - get\n      - list\n      - create\n      - update\n      - delete\n  # Needs access to update clusterinformations.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - clusterinformations\n    verbs:\n      - get\n      - create\n      - update\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: calico-kube-controllers\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: calico-kube-controllers\nsubjects:\n- kind: ServiceAccount\n  name: calico-kube-controllers\n  namespace: kube-system\n---\n# Include a clusterrole for the calico-node DaemonSet,\n# and bind it to the calico-node serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: calico-node\nrules:\n  # The CNI plugin needs to get pods, nodes, and namespaces.\n  - apiGroups: [\"\"]\n    resources:\n      - pods\n      - nodes\n      - namespaces\n    verbs:\n      - get\n  - apiGroups: [\"\"]\n    resources:\n      - endpoints\n      - services\n    verbs:\n      # Used to discover service IPs for advertisement.\n      - watch\n      - list\n      # Used to discover Typhas.\n      - get\n  - apiGroups: [\"\"]\n    resources:\n      - nodes/status\n    verbs:\n      # Needed for clearing NodeNetworkUnavailable flag.\n      - patch\n      # Calico stores some configuration information in node annotations.\n      - update\n  # Watch for changes to Kubernetes NetworkPolicies.\n  - apiGroups: [\"networking.k8s.io\"]\n    resources:\n      - networkpolicies\n    verbs:\n      - watch\n      - list\n  # Used by Calico for policy information.\n  - apiGroups: [\"\"]\n    resources:\n      - pods\n      - namespaces\n      - serviceaccounts\n    verbs:\n      - list\n      - watch\n  # The CNI plugin patches pods/status.\n  - apiGroups: [\"\"]\n    resources:\n      - pods/status\n    verbs:\n      - patch\n  # Calico monitors various CRDs for config.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - globalfelixconfigs\n      - felixconfigurations\n      - bgppeers\n      - globalbgpconfigs\n      - bgpconfigurations\n      - ippools\n      - ipamblocks\n      - globalnetworkpolicies\n      - globalnetworksets\n      - networkpolicies\n      - networksets\n      - clusterinformations\n      - hostendpoints\n    verbs:\n      - get\n      - list\n      - watch\n  # Calico must create and update some CRDs on startup.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - ippools\n      - felixconfigurations\n      - clusterinformations\n    verbs:\n      - create\n      - update\n  # Calico stores some configuration information on the node.\n  - apiGroups: [\"\"]\n    resources:\n      - nodes\n    verbs:\n      - get\n      - list\n      - watch\n  # These permissions are only requried for upgrade from v2.6, and can\n  # be removed after upgrade or on fresh installations.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - bgpconfigurations\n      - bgppeers\n    verbs:\n      - create\n      - update\n  # These permissions are required for Calico CNI to perform IPAM allocations.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - blockaffinities\n      - ipamblocks\n      - ipamhandles\n    verbs:\n      - get\n      - list\n      - create\n      - update\n      - delete\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - ipamconfigs\n    verbs:\n      - get\n  # Block affinities must also be watchable by confd for route aggregation.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - blockaffinities\n    verbs:\n      - watch\n  # The Calico IPAM migration needs to get daemonsets. These permissions can be\n  # removed if not upgrading from an installation using host-local IPAM.\n  - apiGroups: [\"apps\"]\n    resources:\n      - daemonsets\n    verbs:\n      - get\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: calico-node\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: calico-node\nsubjects:\n- kind: ServiceAccount\n  name: calico-node\n  namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n  kind: Group\n  name: system:nodes\n{{end}}\n---\n# Source: calico/templates/calico-config.yaml\n# This ConfigMap is used to configure a self-hosted Calico installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n  name: calico-config\n  namespace: kube-system\ndata:\n  # Typha is disabled.\n  typha_service_name: \"none\"\n  # Configure the backend to use.\n  calico_backend: \"bird\"\n\n  # Configure the MTU to use\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n  veth_mtu: \"{{.MTU}}\"\n{{- end}}\n{{- else }}\n  veth_mtu: \"1440\"\n{{- end}}\n\n  # The CNI network configuration to install on each node.  The special\n  # values in this config will be automatically populated.\n  cni_network_config: |-\n    {\n      \"name\": \"k8s-pod-network\",\n      \"cniVersion\": \"0.3.1\",\n      \"plugins\": [\n        {\n          \"type\": \"calico\",\n          \"log_level\": \"info\",\n          \"datastore_type\": \"kubernetes\",\n          \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n          \"mtu\": __CNI_MTU__,\n          \"ipam\": {\n              \"type\": \"calico-ipam\"\n          },\n          \"policy\": {\n              \"type\": \"k8s\"\n          },\n          \"kubernetes\": {\n              \"kubeconfig\": \"{{.KubeCfg}}\"\n          }\n        },\n        {\n          \"type\": \"portmap\",\n          \"snat\": true,\n          \"capabilities\": {\"portMappings\": true}\n        }\n      ]\n    }\n---\n---\n# Source: calico/templates/kdd-crds.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n   name: felixconfigurations.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: FelixConfiguration\n    plural: felixconfigurations\n    singular: felixconfiguration\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: ipamblocks.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: IPAMBlock\n    plural: ipamblocks\n    singular: ipamblock\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: blockaffinities.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: BlockAffinity\n    plural: blockaffinities\n    singular: blockaffinity\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: ipamhandles.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: IPAMHandle\n    plural: ipamhandles\n    singular: ipamhandle\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: ipamconfigs.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: IPAMConfig\n    plural: ipamconfigs\n    singular: ipamconfig\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: bgppeers.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: BGPPeer\n    plural: bgppeers\n    singular: bgppeer\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: bgpconfigurations.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: BGPConfiguration\n    plural: bgpconfigurations\n    singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: ippools.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: IPPool\n    plural: ippools\n    singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: hostendpoints.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: HostEndpoint\n    plural: hostendpoints\n    singular: hostendpoint\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: clusterinformations.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: ClusterInformation\n    plural: clusterinformations\n    singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: GlobalNetworkPolicy\n    plural: globalnetworkpolicies\n    singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: globalnetworksets.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: GlobalNetworkSet\n    plural: globalnetworksets\n    singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: networkpolicies.crd.projectcalico.org\nspec:\n  scope: Namespaced\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: NetworkPolicy\n    plural: networkpolicies\n    singular: networkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: networksets.crd.projectcalico.org\nspec:\n  scope: Namespaced\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: NetworkSet\n    plural: networksets\n    singular: networkset\n---\n---\n# Source: calico/templates/calico-node.yaml\n# This manifest installs the calico-node container, as well\n# as the CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: apps/v1\nmetadata:\n  name: calico-node\n  namespace: kube-system\n  labels:\n    k8s-app: calico-node\nspec:\n  selector:\n    matchLabels:\n      k8s-app: calico-node\n  updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n    type: RollingUpdate\n    rollingUpdate:\n      maxUnavailable: 1\n{{end}}\n  template:\n    metadata:\n      labels:\n        k8s-app: calico-node\n      annotations:\n        # This, along with the CriticalAddonsOnly toleration below,\n        # marks the pod as a critical add-on, ensuring it gets\n        # priority scheduling and that its resources are reserved\n        # if it ever gets evicted.\n        scheduler.alpha.kubernetes.io/critical-pod: ''\n    spec:\n      nodeSelector:\n        beta.kubernetes.io/os: linux\n      {{ range $k, $v := .NodeSelector }}\n        {{ $k }}: \"{{ $v }}\"\n      {{ end }}\n      hostNetwork: true\n      tolerations:\n        # Make sure calico-node gets scheduled on all nodes.\n        - effect: NoSchedule\n          operator: Exists\n        # Mark the pod as a critical add-on for rescheduling.\n        - key: CriticalAddonsOnly\n          operator: Exists\n        - effect: NoExecute\n          operator: Exists\n          {{if eq .RBACConfig \"rbac\"}}\n      serviceAccountName: calico-node\n          {{end}}\n      # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n      # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n      terminationGracePeriodSeconds: 0\n      priorityClassName: system-node-critical\n      initContainers:\n        # This container performs upgrade from host-local IPAM to calico-ipam.\n        # It can be deleted if this is a fresh installation, or if you have already\n        # upgraded to use calico-ipam.\n        - name: upgrade-ipam\n          image: {{.CNIImage}}\n          command: [\"/opt/cni/bin/calico-ipam\", \"-upgrade\"]\n          env:\n            - name: KUBERNETES_NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n            - name: CALICO_NETWORKING_BACKEND\n              valueFrom:\n                configMapKeyRef:\n                  name: calico-config\n                  key: calico_backend\n          volumeMounts:\n            - mountPath: /var/lib/cni/networks\n              name: host-local-net-dir\n            - mountPath: /host/opt/cni/bin\n              name: cni-bin-dir\n        # This container installs the CNI binaries\n        # and CNI network config file on each node.\n        - name: install-cni\n          image: {{.CNIImage}}\n          command: [\"/install-cni.sh\"]\n          env:\n            # Name of the CNI config file to create.\n            - name: CNI_CONF_NAME\n              value: \"10-calico.conflist\"\n            # The CNI network config to install on each node.\n            - name: CNI_NETWORK_CONFIG\n              valueFrom:\n                configMapKeyRef:\n                  name: calico-config\n                  key: cni_network_config\n            # Set the hostname based on the k8s node name.\n            - name: KUBERNETES_NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n            # CNI MTU Config variable\n            - name: CNI_MTU\n              valueFrom:\n                configMapKeyRef:\n                  name: calico-config\n                  key: veth_mtu\n            # Prevents the container from sleeping forever.\n            - name: SLEEP\n              value: \"false\"\n          volumeMounts:\n            - mountPath: /host/opt/cni/bin\n              name: cni-bin-dir\n            - mountPath: /host/etc/cni/net.d\n              name: cni-net-dir\n        # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes\n        # to communicate with Felix over the Policy Sync API.\n        - name: flexvol-driver\n          image: {{.FlexVolImg}}\n          volumeMounts:\n          - name: flexvol-driver-host\n            mountPath: /host/driver\n      containers:\n        # Runs calico-node container on each Kubernetes node.  This\n        # container programs network policy and routes on each\n        # host.\n        - name: calico-node\n          image: {{.NodeImage}}\n          env:\n            # Use Kubernetes API as the backing datastore.\n            - name: DATASTORE_TYPE\n              value: \"kubernetes\"\n            # Wait for the datastore.\n            - name: WAIT_FOR_DATASTORE\n              value: \"true\"\n            # Set based on the k8s node name.\n            - name: NODENAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n            # Choose the backend to use.\n            - name: CALICO_NETWORKING_BACKEND\n              valueFrom:\n                configMapKeyRef:\n                  name: calico-config\n                  key: calico_backend\n            # Cluster type to identify the deployment type\n            - name: CLUSTER_TYPE\n              value: \"k8s,bgp\"\n            # Auto-detect the BGP IP address.\n            - name: IP\n              value: \"autodetect\"\n            # Enable IPIP\n            - name: CALICO_IPV4POOL_IPIP\n              value: \"Always\"\n            # Set MTU for tunnel device used if ipip is enabled\n            - name: FELIX_IPINIPMTU\n              valueFrom:\n                configMapKeyRef:\n                  name: calico-config\n                  key: veth_mtu\n            # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n            # chosen from this range. Changing this value after installation will have\n            # no effect. This should fall within --cluster-cidr.\n            - name: CALICO_IPV4POOL_CIDR\n              value: \"{{.ClusterCIDR}}\"\n            # Disable file logging so kubectl logs works.\n            - name: CALICO_DISABLE_FILE_LOGGING\n              value: \"true\"\n            # Set Felix endpoint to host default action to ACCEPT.\n            - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n              value: \"ACCEPT\"\n            # Disable IPv6 on Kubernetes.\n            - name: FELIX_IPV6SUPPORT\n              value: \"false\"\n            # Set Felix logging to \"info\"\n            - name: FELIX_LOGSEVERITYSCREEN\n              value: \"info\"\n            - name: FELIX_HEALTHENABLED\n              value: \"true\"\n          securityContext:\n            privileged: true\n          resources:\n            requests:\n              cpu: 250m\n          livenessProbe:\n            httpGet:\n              path: /liveness\n              port: 9099\n              host: localhost\n            periodSeconds: 10\n            initialDelaySeconds: 10\n            failureThreshold: 6\n          readinessProbe:\n            exec:\n              command:\n              - /bin/calico-node\n              - -bird-ready\n              - -felix-ready\n            periodSeconds: 10\n          volumeMounts:\n            - mountPath: /lib/modules\n              name: lib-modules\n              readOnly: true\n            - mountPath: /run/xtables.lock\n              name: xtables-lock\n              readOnly: false\n            - mountPath: /var/run/calico\n              name: var-run-calico\n              readOnly: false\n            - mountPath: /var/lib/calico\n              name: var-lib-calico\n              readOnly: false\n            - name: policysync\n              mountPath: /var/run/nodeagent\n      volumes:\n        # Used by calico-node.\n        - name: lib-modules\n          hostPath:\n            path: /lib/modules\n        - name: var-run-calico\n          hostPath:\n            path: /var/run/calico\n        - name: var-lib-calico\n          hostPath:\n            path: /var/lib/calico\n        - name: xtables-lock\n          hostPath:\n            path: /run/xtables.lock\n            type: FileOrCreate\n        # Used to install CNI.\n        - name: cni-bin-dir\n          hostPath:\n            path: /opt/cni/bin\n        - name: cni-net-dir\n          hostPath:\n            path: /etc/cni/net.d\n        # Mount in the directory for host-local IPAM allocations. This is\n        # used when upgrading from host-local to calico-ipam, and can be removed\n        # if not using the upgrade-ipam init container.\n        - name: host-local-net-dir\n          hostPath:\n            path: /var/lib/cni/networks\n        # Used to create per-pod Unix Domain Sockets\n        - name: policysync\n          hostPath:\n            type: DirectoryOrCreate\n            path: /var/run/nodeagent\n        # Used to install Flex Volume Driver\n        - name: flexvol-driver-host\n          hostPath:\n            type: DirectoryOrCreate\n{{- if .FlexVolPluginDir }}\n            path: {{.FlexVolPluginDir}}\n{{- else }}\n            path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds\n{{- end }}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: calico-kube-controllers\n  namespace: kube-system\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: calico-node\n  namespace: kube-system\n---\n# Source: calico/templates/calico-kube-controllers.yaml\n\n# See https://github.com/projectcalico/kube-controllers\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: calico-kube-controllers\n  namespace: kube-system\n  labels:\n    k8s-app: calico-kube-controllers\nspec:\n  # The controllers can only have a single active instance.\n  replicas: 1\n  selector:\n    matchLabels:\n      k8s-app: calico-kube-controllers\n  strategy:\n    type: Recreate\n  template:\n    metadata:\n      name: calico-kube-controllers\n      namespace: kube-system\n      labels:\n        k8s-app: calico-kube-controllers\n      annotations:\n        scheduler.alpha.kubernetes.io/critical-pod: ''\n    spec:\n      nodeSelector:\n        beta.kubernetes.io/os: linux\n      tolerations:\n        # Make sure calico-node gets scheduled on all nodes.\n        - effect: NoSchedule\n          operator: Exists\n        # Mark the pod as a critical add-on for rescheduling.\n        - key: CriticalAddonsOnly\n          operator: Exists\n        - effect: NoExecute\n          operator: Exists\n{{if eq .RBACConfig \"rbac\"}}\n      serviceAccountName: calico-kube-controllers\n{{end}}\n      priorityClassName: system-cluster-critical\n      containers:\n        - name: calico-kube-controllers\n          image: {{.ControllersImage}}\n          env:\n            # Choose which controllers to run.\n            - name: ENABLED_CONTROLLERS\n              value: node\n            - name: DATASTORE_TYPE\n              value: kubernetes\n          readinessProbe:\n            exec:\n              command:\n              - /usr/bin/check-status\n              - -r\n",
   "calico-v1.17": "\n{{if eq .RBACConfig \"rbac\"}}\n# Source: calico/templates/rbac.yaml\n\n# Include a clusterrole for the kube-controllers component,\n# and bind it to the calico-kube-controllers serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: calico-kube-controllers\nrules:\n  # Nodes are watched to monitor for deletions.\n  - apiGroups: [\"\"]\n    resources:\n      - nodes\n    verbs:\n      - watch\n      - list\n      - get\n  # Pods are queried to check for existence.\n  - apiGroups: [\"\"]\n    resources:\n      - pods\n    verbs:\n      - get\n  # IPAM resources are manipulated when nodes are deleted.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - ippools\n    verbs:\n      - list\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - blockaffinities\n      - ipamblocks\n      - ipamhandles\n    verbs:\n      - get\n      - list\n      - create\n      - update\n      - delete\n  # Needs access to update clusterinformations.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - clusterinformations\n    verbs:\n      - get\n      - create\n      - update\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: calico-kube-controllers\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: calico-kube-controllers\nsubjects:\n- kind: ServiceAccount\n  name: calico-kube-controllers\n  namespace: kube-system\n---\n# Include a clusterrole for the calico-node DaemonSet,\n# and bind it to the calico-node serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: calico-node\nrules:\n  # The CNI plugin needs to get pods, nodes, and namespaces.\n  - apiGroups: [\"\"]\n    resources:\n      - pods\n      - nodes\n      - namespaces\n    verbs:\n      - get\n  - apiGroups: [\"\"]\n    resources:\n      - endpoints\n      - services\n    verbs:\n      # Used to discover service IPs for advertisement.\n      - watch\n      - list\n      # Used to discover Typhas.\n      - get\n  - apiGroups: [\"\"]\n    resources:\n      - nodes/status\n    verbs:\n      # Needed for clearing NodeNetworkUnavailable flag.\n      - patch\n      # Calico stores some configuration information in node annotations.\n      - update\n  # Watch for changes to Kubernetes NetworkPolicies.\n  - apiGroups: [\"networking.k8s.io\"]\n    resources:\n      - networkpolicies\n    verbs:\n      - watch\n      - list\n  # Used by Calico for policy information.\n  - apiGroups: [\"\"]\n    resources:\n      - pods\n      - namespaces\n      - serviceaccounts\n    verbs:\n      - list\n      - watch\n  # The CNI plugin patches pods/status.\n  - apiGroups: [\"\"]\n    resources:\n      - pods/status\n    verbs:\n      - patch\n  # Calico monitors various CRDs for config.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - globalfelixconfigs\n      - felixconfigurations\n      - bgppeers\n      - globalbgpconfigs\n      - bgpconfigurations\n      - ippools\n      - ipamblocks\n      - globalnetworkpolicies\n      - globalnetworksets\n      - networkpolicies\n      - networksets\n      - clusterinformations\n      - hostendpoints\n      - blockaffinities\n    verbs:\n      - get\n      - list\n      - watch\n  # Calico must create and update some CRDs on startup.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - ippools\n      - felixconfigurations\n      - clusterinformations\n    verbs:\n      - create\n      - update\n  # Calico stores some configuration information on the node.\n  - apiGroups: [\"\"]\n    resources:\n      - nodes\n    verbs:\n      - get\n      - list\n      - watch\n  # These permissions are only requried for upgrade from v2.6, and can\n  # be removed after upgrade or on fresh installations.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - bgpconfigurations\n      - bgppeers\n    verbs:\n      - create\n      - update\n  # These permissions are required for Calico CNI to perform IPAM allocations.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - blockaffinities\n      - ipamblocks\n      - ipamhandles\n    verbs:\n      - get\n      - list\n      - create\n      - update\n      - delete\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - ipamconfigs\n    verbs:\n      - get\n  # Block affinities must also be watchable by confd for route aggregation.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - blockaffinities\n    verbs:\n      - watch\n  # The Calico IPAM migration needs to get daemonsets. These permissions can be\n  # removed if not upgrading from an installation using host-local IPAM.\n  - apiGroups: [\"apps\"]\n    resources:\n      - daemonsets\n    verbs:\n      - get\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: calico-node\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: calico-node\nsubjects:\n- kind: ServiceAccount\n  name: calico-node\n  namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n  kind: Group\n  name: system:nodes\n{{end}}\n---\n# Source: calico/templates/calico-config.yaml\n# This ConfigMap is used to configure a self-hosted Calico installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n  name: calico-config\n  namespace: kube-system\ndata:\n  # Typha is disabled.\n  typha_service_name: \"none\"\n  # Configure the backend to use.\n  calico_backend: \"bird\"\n\n  # Configure the MTU to use\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n  veth_mtu: \"{{.MTU}}\"\n{{- end}}\n{{- else }}\n  veth_mtu: \"1440\"\n{{- end}}\n\n  # The CNI network configuration to install on each node.  The special\n  # values in this config will be automatically populated.\n  cni_network_config: |-\n    {\n      \"name\": \"k8s-pod-network\",\n      \"cniVersion\": \"0.3.1\",\n      \"plugins\": [\n        {\n          \"type\": \"calico\",\n          \"log_level\": \"info\",\n          \"datastore_type\": \"kubernetes\",\n          \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n          \"mtu\": __CNI_MTU__,\n          \"ipam\": {\n              \"type\": \"calico-ipam\"\n          },\n          \"policy\": {\n              \"type\": \"k8s\"\n          },\n          \"kubernetes\": {\n              \"kubeconfig\": \"{{.KubeCfg}}\"\n          }\n        },\n        {\n          \"type\": \"portmap\",\n          \"snat\": true,\n          \"capabilities\": {\"portMappings\": true}\n        }\n      ]\n    }\n---\n---\n# Source: calico/templates/kdd-crds.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n   name: felixconfigurations.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: FelixConfiguration\n    plural: felixconfigurations\n    singular: felixconfiguration\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: ipamblocks.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: IPAMBlock\n    plural: ipamblocks\n    singular: ipamblock\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: blockaffinities.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: BlockAffinity\n    plural: blockaffinities\n    singular: blockaffinity\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: ipamhandles.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: IPAMHandle\n    plural: ipamhandles\n    singular: ipamhandle\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: ipamconfigs.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: IPAMConfig\n    plural: ipamconfigs\n    singular: ipamconfig\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: bgppeers.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: BGPPeer\n    plural: bgppeers\n    singular: bgppeer\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: bgpconfigurations.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: BGPConfiguration\n    plural: bgpconfigurations\n    singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: ippools.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: IPPool\n    plural: ippools\n    singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: hostendpoints.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: HostEndpoint\n    plural: hostendpoints\n    singular: hostendpoint\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: clusterinformations.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: ClusterInformation\n    plural: clusterinformations\n    singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: GlobalNetworkPolicy\n    plural: globalnetworkpolicies\n    singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: globalnetworksets.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: GlobalNetworkSet\n    plural: globalnetworksets\n    singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: networkpolicies.crd.projectcalico.org\nspec:\n  scope: Namespaced\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: NetworkPolicy\n    plural: networkpolicies\n    singular: networkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: networksets.crd.projectcalico.org\nspec:\n  scope: Namespaced\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: NetworkSet\n    plural: networksets\n    singular: networkset\n---\n---\n# Source: calico/templates/calico-node.yaml\n# This manifest installs the calico-node container, as well\n# as the CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: apps/v1\nmetadata:\n  name: calico-node\n  namespace: kube-system\n  labels:\n    k8s-app: calico-node\nspec:\n  selector:\n    matchLabels:\n      k8s-app: calico-node\n  updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n    type: RollingUpdate\n    rollingUpdate:\n      maxUnavailable: 1\n{{end}}\n  template:\n    metadata:\n      labels:\n        k8s-app: calico-node\n      annotations:\n        # This, along with the CriticalAddonsOnly toleration below,\n        # marks the pod as a critical add-on, ensuring it gets\n        # priority scheduling and that its resources are reserved\n        # if it ever gets evicted.\n        scheduler.alpha.kubernetes.io/critical-pod: ''\n    spec:\n      nodeSelector:\n        beta.kubernetes.io/os: linux\n      {{ range $k, $v := .NodeSelector }}\n        {{ $k }}: \"{{ $v }}\"\n      {{ end }}\n      hostNetwork: true\n      tolerations:\n        # Make sure calico-node gets scheduled on all nodes.\n        - effect: NoSchedule\n          operator: Exists\n        # Mark the pod as a critical add-on for rescheduling.\n        - key: CriticalAddonsOnly\n          operator: Exists\n        - effect: NoExecute\n          operator: Exists\n          {{if eq .RBACConfig \"rbac\"}}\n      serviceAccountName: calico-node\n          {{end}}\n      # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n      # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n      terminationGracePeriodSeconds: 0\n      priorityClassName: system-node-critical\n      initContainers:\n        # This container performs upgrade from host-local IPAM to calico-ipam.\n        # It can be deleted if this is a fresh installation, or if you have already\n        # upgraded to use calico-ipam.\n        - name: upgrade-ipam\n          image: {{.CNIImage}}\n          command: [\"/opt/cni/bin/calico-ipam\", \"-upgrade\"]\n          env:\n            - name: KUBERNETES_NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n            - name: CALICO_NETWORKING_BACKEND\n              valueFrom:\n                configMapKeyRef:\n                  name: calico-config\n                  key: calico_backend\n          volumeMounts:\n            - mountPath: /var/lib/cni/networks\n              name: host-local-net-dir\n            - mountPath: /host/opt/cni/bin\n              name: cni-bin-dir\n        # This container installs the CNI binaries\n        # and CNI network config file on each node.\n        - name: install-cni\n          image: {{.CNIImage}}\n          command: [\"/install-cni.sh\"]\n          env:\n            # Name of the CNI config file to create.\n            - name: CNI_CONF_NAME\n              value: \"10-calico.conflist\"\n            # The CNI network config to install on each node.\n            - name: CNI_NETWORK_CONFIG\n              valueFrom:\n                configMapKeyRef:\n                  name: calico-config\n                  key: cni_network_config\n            # Set the hostname based on the k8s node name.\n            - name: KUBERNETES_NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n            # CNI MTU Config variable\n            - name: CNI_MTU\n              valueFrom:\n                configMapKeyRef:\n                  name: calico-config\n                  key: veth_mtu\n            # Prevents the container from sleeping forever.\n            - name: SLEEP\n              value: \"false\"\n          volumeMounts:\n            - mountPath: /host/opt/cni/bin\n              name: cni-bin-dir\n            - mountPath: /host/etc/cni/net.d\n              name: cni-net-dir\n        # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes\n        # to communicate with Felix over the Policy Sync API.\n        - name: flexvol-driver\n          image: {{.FlexVolImg}}\n          volumeMounts:\n          - name: flexvol-driver-host\n            mountPath: /host/driver\n      containers:\n        # Runs calico-node container on each Kubernetes node.  This\n        # container programs network policy and routes on each\n        # host.\n        - name: calico-node\n          image: {{.NodeImage}}\n          env:\n            # Use Kubernetes API as the backing datastore.\n            - name: DATASTORE_TYPE\n              value: \"kubernetes\"\n            # Wait for the datastore.\n            - name: WAIT_FOR_DATASTORE\n              value: \"true\"\n            # Set based on the k8s node name.\n            - name: NODENAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n            # Choose the backend to use.\n            - name: CALICO_NETWORKING_BACKEND\n              valueFrom:\n                configMapKeyRef:\n                  name: calico-config\n                  key: calico_backend\n            # Cluster type to identify the deployment type\n            - name: CLUSTER_TYPE\n              value: \"k8s,bgp\"\n            # Auto-detect the BGP IP address.\n            - name: IP\n              value: \"autodetect\"\n            # Enable IPIP\n            - name: CALICO_IPV4POOL_IPIP\n              value: \"Always\"\n            # Set MTU for tunnel device used if ipip is enabled\n            - name: FELIX_IPINIPMTU\n              valueFrom:\n                configMapKeyRef:\n                  name: calico-config\n                  key: veth_mtu\n            # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n            # chosen from this range. Changing this value after installation will have\n            # no effect. This should fall within --cluster-cidr.\n            - name: CALICO_IPV4POOL_CIDR\n              value: \"{{.ClusterCIDR}}\"\n            # Disable file logging so kubectl logs works.\n            - name: CALICO_DISABLE_FILE_LOGGING\n              value: \"true\"\n            # Set Felix endpoint to host default action to ACCEPT.\n            - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n              value: \"ACCEPT\"\n            # Disable IPv6 on Kubernetes.\n            - name: FELIX_IPV6SUPPORT\n              value: \"false\"\n            # Set Felix logging to \"info\"\n            - name: FELIX_LOGSEVERITYSCREEN\n              value: \"info\"\n            - name: FELIX_HEALTHENABLED\n              value: \"true\"\n          securityContext:\n            privileged: true\n          resources:\n            requests:\n              cpu: 250m\n          livenessProbe:\n            httpGet:\n              path: /liveness\n              port: 9099\n              host: localhost\n            periodSeconds: 10\n            initialDelaySeconds: 10\n            failureThreshold: 6\n          readinessProbe:\n            exec:\n              command:\n              - /bin/calico-node\n              - -bird-ready\n              - -felix-ready\n            periodSeconds: 10\n          volumeMounts:\n            - mountPath: /lib/modules\n              name: lib-modules\n              readOnly: true\n            - mountPath: /run/xtables.lock\n              name: xtables-lock\n              readOnly: false\n            - mountPath: /var/run/calico\n              name: var-run-calico\n              readOnly: false\n            - mountPath: /var/lib/calico\n              name: var-lib-calico\n              readOnly: false\n            - name: policysync\n              mountPath: /var/run/nodeagent\n      volumes:\n        # Used by calico-node.\n        - name: lib-modules\n          hostPath:\n            path: /lib/modules\n        - name: var-run-calico\n          hostPath:\n            path: /var/run/calico\n        - name: var-lib-calico\n          hostPath:\n            path: /var/lib/calico\n        - name: xtables-lock\n          hostPath:\n            path: /run/xtables.lock\n            type: FileOrCreate\n        # Used to install CNI.\n        - name: cni-bin-dir\n          hostPath:\n            path: /opt/cni/bin\n        - name: cni-net-dir\n          hostPath:\n            path: /etc/cni/net.d\n        # Mount in the directory for host-local IPAM allocations. This is\n        # used when upgrading from host-local to calico-ipam, and can be removed\n        # if not using the upgrade-ipam init container.\n        - name: host-local-net-dir\n          hostPath:\n            path: /var/lib/cni/networks\n        # Used to create per-pod Unix Domain Sockets\n        - name: policysync\n          hostPath:\n            type: DirectoryOrCreate\n            path: /var/run/nodeagent\n        # Used to install Flex Volume Driver\n        - name: flexvol-driver-host\n          hostPath:\n            type: DirectoryOrCreate\n{{- if .FlexVolPluginDir }}\n            path: {{.FlexVolPluginDir}}\n{{- else }}\n            path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds\n{{- end }}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: calico-kube-controllers\n  namespace: kube-system\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: calico-node\n  namespace: kube-system\n---\n# Source: calico/templates/calico-kube-controllers.yaml\n\n# See https://github.com/projectcalico/kube-controllers\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: calico-kube-controllers\n  namespace: kube-system\n  labels:\n    k8s-app: calico-kube-controllers\nspec:\n  # The controllers can only have a single active instance.\n  replicas: 1\n  selector:\n    matchLabels:\n      k8s-app: calico-kube-controllers\n  strategy:\n    type: Recreate\n  template:\n    metadata:\n      name: calico-kube-controllers\n      namespace: kube-system\n      labels:\n        k8s-app: calico-kube-controllers\n      annotations:\n        scheduler.alpha.kubernetes.io/critical-pod: ''\n    spec:\n      nodeSelector:\n        beta.kubernetes.io/os: linux\n      tolerations:\n        # Make sure calico-node gets scheduled on all nodes.\n        - effect: NoSchedule\n          operator: Exists\n        # Mark the pod as a critical add-on for rescheduling.\n        - key: CriticalAddonsOnly\n          operator: Exists\n        - effect: NoExecute\n          operator: Exists\n{{if eq .RBACConfig \"rbac\"}}\n      serviceAccountName: calico-kube-controllers\n{{end}}\n      priorityClassName: system-cluster-critical\n      containers:\n        - name: calico-kube-controllers\n          image: {{.ControllersImage}}\n          env:\n            # Choose which controllers to run.\n            - name: ENABLED_CONTROLLERS\n              value: node\n            - name: DATASTORE_TYPE\n              value: kubernetes\n          readinessProbe:\n            exec:\n              command:\n              - /usr/bin/check-status\n              - -r\n",
   "calico-v1.8": "\n{{if eq .RBACConfig \"rbac\"}}\n## start rbac here\n\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n  name: calico-node\nrules:\n  - apiGroups: [\"\"]\n    resources:\n      - namespaces\n    verbs:\n      - get\n      - list\n      - watch\n  - apiGroups: [\"\"]\n    resources:\n      - pods/status\n    verbs:\n      - update\n  - apiGroups: [\"\"]\n    resources:\n      - pods\n    verbs:\n      - get\n      - list\n      - watch\n      - patch\n  - apiGroups: [\"\"]\n    resources:\n      - services\n    verbs:\n      - get\n  - apiGroups: [\"\"]\n    resources:\n      - endpoints\n    verbs:\n      - get\n  - apiGroups: [\"\"]\n    resources:\n      - nodes\n    verbs:\n      - get\n      - list\n      - update\n      - watch\n  - apiGroups: [\"extensions\"]\n    resources:\n      - networkpolicies\n    verbs:\n      - get\n      - list\n      - watch\n  - apiGroups: [\"networking.k8s.io\"]\n    resources:\n      - networkpolicies\n    verbs:\n      - watch\n      - list\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - globalfelixconfigs\n      - felixconfigurations\n      - bgppeers\n      - globalbgpconfigs\n      - bgpconfigurations\n      - ippools\n      - globalnetworkpolicies\n      - globalnetworksets\n      - networkpolicies\n      - clusterinformations\n      - hostendpoints\n    verbs:\n      - create\n      - get\n      - list\n      - update\n      - watch\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n  name: calico-node\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: calico-node\nsubjects:\n- kind: ServiceAccount\n  name: calico-node\n  namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n  kind: Group\n  name: system:nodes\n{{end}}\n## end rbac here\n\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: calico-config\n namespace: kube-system\ndata:\n # To enable Typha, set this to \"calico-typha\" *and* set a non-zero value for Typha replicas\n # below.  We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is\n # essential.\n typha_service_name: \"none\"\n # The CNI network configuration to install on each node.\n cni_network_config: |-\n    {\n     \"name\": \"k8s-pod-network\",\n     \"cniVersion\": \"0.3.0\",\n     \"plugins\": [\n       {\n         \"type\": \"calico\",\n         \"log_level\": \"WARNING\",\n         \"datastore_type\": \"kubernetes\",\n         \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n         \"mtu\": 1500,\n         \"ipam\": {\n           \"type\": \"host-local\",\n           \"subnet\": \"usePodCidr\"\n         },\n         \"policy\": {\n           \"type\": \"k8s\",\n           \"k8s_auth_token\": \"__SERVICEACCOUNT_TOKEN__\"\n         },\n         \"kubernetes\": {\n           \"k8s_api_root\": \"https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__\",\n           \"kubeconfig\": \"{{.KubeCfg}}\"\n         }\n       },\n       {\n         \"type\": \"portmap\",\n         \"snat\": true,\n         \"capabilities\": {\"portMappings\": true}\n       }\n     ]\n    }\n\n---\n\n# This manifest installs the calico/node container, as well\n# as the Calico CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: extensions/v1beta1\nmetadata:\n  name: calico-node\n  namespace: kube-system\n  labels:\n    k8s-app: calico-node\nspec:\n  selector:\n    matchLabels:\n      k8s-app: calico-node\n  updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n    type: RollingUpdate\n    rollingUpdate:\n      maxUnavailable: 1\n{{end}}\n  template:\n    metadata:\n      labels:\n        k8s-app: calico-node\n      annotations:\n        # This, along with the CriticalAddonsOnly toleration below,\n        # marks the pod as a critical add-on, ensuring it gets\n        # priority scheduling and that its resources are reserved\n        # if it ever gets evicted.\n        scheduler.alpha.kubernetes.io/critical-pod: ''\n    spec:\n      affinity:\n        nodeAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            nodeSelectorTerms:\n              - matchExpressions:\n                - key: beta.kubernetes.io/os\n                  operator: NotIn\n                  values:\n                    - windows\n      hostNetwork: true\n{{if .NodeSelector}}\n      nodeSelector:\n      {{ range $k, $v := .NodeSelector }}\n        {{ $k }}: \"{{ $v }}\"\n      {{ end }}\n{{end}}\n      tolerations:\n        # Make sure calico/node gets scheduled on all nodes.\n        - effect: NoSchedule\n          operator: Exists\n        # Mark the pod as a critical add-on for rescheduling.\n        - key: CriticalAddonsOnly\n          operator: Exists\n        - effect: NoExecute\n          operator: Exists\n        - key: \"node-role.kubernetes.io/controlplane\"\n          operator: \"Exists\"\n          effect: \"NoSchedule\"\n        - key: \"node-role.kubernetes.io/etcd\"\n          operator: \"Exists\"\n          effect: \"NoExecute\"\n      serviceAccountName: calico-node\n      terminationGracePeriodSeconds: 0\n      containers:\n        # Runs calico/node container on each Kubernetes node.  This\n        # container programs network policy and routes on each\n        # host.\n        - name: calico-node\n          image: {{.NodeImage}}\n          env:\n            # Use Kubernetes API as the backing datastore.\n            - name: DATASTORE_TYPE\n              value: \"kubernetes\"\n            # Disable felix logging to file\n            - name: FELIX_LOGFILEPATH\n              value: \"none\"\n            # Disable felix logging for syslog\n            - name: FELIX_LOGSEVERITYSYS\n              value: \"\"\n            # Enable felix logging to stdout\n            - name: FELIX_LOGSEVERITYSCREEN\n              value: \"Warning\"\n            # Cluster type to identify the deployment type\n            - name: CLUSTER_TYPE\n              value: \"k8s,bgp\"\n            # Disable file logging so kubectl logs works.\n            - name: CALICO_DISABLE_FILE_LOGGING\n              value: \"true\"\n            # Set Felix endpoint to host default action to ACCEPT.\n            - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n              value: \"ACCEPT\"\n            # Disable IPV6 on Kubernetes.\n            - name: FELIX_IPV6SUPPORT\n              value: \"false\"\n            # Set MTU for tunnel device used if ipip is enabled\n            - name: FELIX_IPINIPMTU\n              value: \"1440\"\n            # Wait for the datastore.\n            - name: WAIT_FOR_DATASTORE\n              value: \"true\"\n            # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n            # chosen from this range. Changing this value after installation will have\n            # no effect. This should fall within --cluster-cidr.\n            - name: CALICO_IPV4POOL_CIDR\n              value: \"{{.ClusterCIDR}}\"\n            # Enable IPIP\n            - name: CALICO_IPV4POOL_IPIP\n              value: \"Always\"\n            # Enable IP-in-IP within Felix.\n            - name: FELIX_IPINIPENABLED\n              value: \"true\"\n            # Typha support: controlled by the ConfigMap.\n            - name: FELIX_TYPHAK8SSERVICENAME\n              valueFrom:\n                configMapKeyRef:\n                  name: calico-config\n                  key: typha_service_name\n            # Set based on the k8s node name.\n            - name: NODENAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n            # Auto-detect the BGP IP address.\n            - name: IP\n              value: \"autodetect\"\n            - name: FELIX_HEALTHENABLED\n              value: \"true\"\n          securityContext:\n            privileged: true\n          resources:\n            requests:\n              cpu: 250m\n          livenessProbe:\n            httpGet:\n              path: /liveness\n              port: 9099\n            periodSeconds: 10\n            initialDelaySeconds: 10\n            failureThreshold: 6\n          readinessProbe:\n            httpGet:\n              path: /readiness\n              port: 9099\n            periodSeconds: 10\n          volumeMounts:\n            - mountPath: /lib/modules\n              name: lib-modules\n              readOnly: true\n            - mountPath: /var/run/calico\n              name: var-run-calico\n              readOnly: false\n            - mountPath: /var/lib/calico\n              name: var-lib-calico\n              readOnly: false\n        # This container installs the Calico CNI binaries\n        # and CNI network config file on each node.\n        - name: install-cni\n          image: {{.CNIImage}}\n          command: [\"/install-cni.sh\"]\n          env:\n            # Name of the CNI config file to create.\n            - name: CNI_CONF_NAME\n              value: \"10-calico.conflist\"\n            # The CNI network config to install on each node.\n            - name: CNI_NETWORK_CONFIG\n              valueFrom:\n                configMapKeyRef:\n                  name: calico-config\n                  key: cni_network_config\n            # Set the hostname based on the k8s node name.\n            - name: KUBERNETES_NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n          volumeMounts:\n            - mountPath: /host/opt/cni/bin\n              name: cni-bin-dir\n            - mountPath: /host/etc/cni/net.d\n              name: cni-net-dir\n      volumes:\n        # Used by calico/node.\n        - name: lib-modules\n          hostPath:\n            path: /lib/modules\n        - name: var-run-calico\n          hostPath:\n            path: /var/run/calico\n        - name: var-lib-calico\n          hostPath:\n            path: /var/lib/calico\n        # Used to install CNI.\n        - name: cni-bin-dir\n          hostPath:\n            path: /opt/cni/bin\n        - name: cni-net-dir\n          hostPath:\n            path: /etc/cni/net.d\n\n# Create all the CustomResourceDefinitions needed for\n# Calico policy and networking mode.\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n   name: felixconfigurations.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: FelixConfiguration\n    plural: felixconfigurations\n    singular: felixconfiguration\n\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: bgppeers.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: BGPPeer\n    plural: bgppeers\n    singular: bgppeer\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: bgpconfigurations.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: BGPConfiguration\n    plural: bgpconfigurations\n    singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: ippools.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: IPPool\n    plural: ippools\n    singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: hostendpoints.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: HostEndpoint\n    plural: hostendpoints\n    singular: hostendpoint\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: clusterinformations.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: ClusterInformation\n    plural: clusterinformations\n    singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: GlobalNetworkPolicy\n    plural: globalnetworkpolicies\n    singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: globalnetworksets.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: GlobalNetworkSet\n    plural: globalnetworksets\n    singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: networkpolicies.crd.projectcalico.org\nspec:\n  scope: Namespaced\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: NetworkPolicy\n    plural: networkpolicies\n    singular: networkpolicy\n\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: calico-node\n  namespace: kube-system\n\n\n{{if ne .CloudProvider \"none\"}}\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n  name: {{.CloudProvider}}-ippool\n  namespace: kube-system\ndata:\n  {{.CloudProvider}}-ippool: |-\n    apiVersion: projectcalico.org/v3\n    kind: IPPool\n    metadata:\n      name: ippool-ipip-1\n    spec:\n      cidr: {{.ClusterCIDR}}\n      ipipMode: Always\n      natOutgoing: true\n---\napiVersion: v1\nkind: Pod\nmetadata:\n  name: calicoctl\n  namespace: kube-system\nspec:\n  hostNetwork: true\n  restartPolicy: OnFailure\n  tolerations:\n  - effect: NoExecute\n    operator: Exists\n  - effect: NoSchedule\n    operator: Exists\n  containers:\n  - name: calicoctl\n    image: {{.Calicoctl}}\n    command: [\"/bin/sh\", \"-c\", \"calicoctl apply -f {{.CloudProvider}}-ippool.yaml\"]\n    env:\n    - name: DATASTORE_TYPE\n      value: kubernetes\n    volumeMounts:\n    - name: ippool-config\n      mountPath: /root/\n  volumes:\n  - name: ippool-config\n    configMap:\n      name: {{.CloudProvider}}-ippool\n      items:\n        - key: {{.CloudProvider}}-ippool\n          path: {{.CloudProvider}}-ippool.yaml\n  # Mount in the etcd TLS secrets.\n{{end}}\n",
   "canal-v1.13": "\n{{if eq .RBACConfig \"rbac\"}}\n# Include a clusterrole for the calico-node DaemonSet,\n# and bind it to the calico-node serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n  name: calico\nrules:\n  # The CNI plugin needs to get pods, nodes, and namespaces.\n  - apiGroups: [\"\"]\n    resources:\n      - pods\n      - nodes\n      - namespaces\n    verbs:\n      - get\n  - apiGroups: [\"\"]\n    resources:\n      - endpoints\n      - services\n    verbs:\n      # Used to discover service IPs for advertisement.\n      - watch\n      - list\n      # Used to discover Typhas.\n      - get\n  - apiGroups: [\"\"]\n    resources:\n      - nodes/status\n    verbs:\n      # Needed for clearing NodeNetworkUnavailable flag.\n      - patch\n      # Calico stores some configuration information in node annotations.\n      - update\n  # Watch for changes to Kubernetes NetworkPolicies.\n  - apiGroups: [\"networking.k8s.io\"]\n    resources:\n      - networkpolicies\n    verbs:\n      - watch\n      - list\n  # Used by Calico for policy information.\n  - apiGroups: [\"\"]\n    resources:\n      - pods\n      - namespaces\n      - serviceaccounts\n    verbs:\n      - list\n      - watch\n  # The CNI plugin patches pods/status.\n  - apiGroups: [\"\"]\n    resources:\n      - pods/status\n    verbs:\n      - patch\n  # Calico monitors various CRDs for config.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - globalfelixconfigs\n      - felixconfigurations\n      - bgppeers\n      - globalbgpconfigs\n      - bgpconfigurations\n      - ippools\n      - globalnetworkpolicies\n      - globalnetworksets\n      - networkpolicies\n      - clusterinformations\n      - hostendpoints\n    verbs:\n      - get\n      - list\n      - watch\n  # Calico must create and update some CRDs on startup.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - ippools\n      - felixconfigurations\n      - clusterinformations\n    verbs:\n      - create\n      - update\n  # Calico stores some configuration information on the node.\n  - apiGroups: [\"\"]\n    resources:\n      - nodes\n    verbs:\n      - get\n      - list\n      - watch\n  # These permissions are only requried for upgrade from v2.6, and can\n  # be removed after upgrade or on fresh installations.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - bgpconfigurations\n      - bgppeers\n    verbs:\n      - create\n      - update\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n  name: calico-node\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: calico-node\nsubjects:\n- kind: ServiceAccount\n  name: calico-node\n  namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n  kind: Group\n  name: system:nodes\n---\n# Flannel ClusterRole\n# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n  name: flannel\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - pods\n    verbs:\n      - get\n  - apiGroups:\n      - \"\"\n    resources:\n      - nodes\n    verbs:\n      - list\n      - watch\n  - apiGroups:\n      - \"\"\n    resources:\n      - nodes/status\n    verbs:\n      - patch\n---\n# Bind the flannel ClusterRole to the canal ServiceAccount.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n  name: canal-flannel\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: flannel\nsubjects:\n- kind: ServiceAccount\n  name: canal\n  namespace: kube-system\n---\n# Bind the Calico ClusterRole to the canal ServiceAccount.\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n  name: canal-calico\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: calico\nsubjects:\n- kind: ServiceAccount\n  name: canal\n  namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n  kind: Group\n  name: system:nodes\n{{end}}\n\n# Canal Version v3.1.1\n# https://docs.projectcalico.org/v3.1/releases#v3.1.1\n# This manifest includes the following component versions:\n#   calico/node:v3.1.1\n#   calico/cni:v3.1.1\n#   coreos/flannel:v0.9.1\n\n---\n# This ConfigMap is used to configure a self-hosted Canal installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n  name: canal-config\n  namespace: kube-system\ndata:\n  # The interface used by canal for host \u003c-\u003e host communication.\n  # If left blank, then the interface is chosen using the node's\n  # default route.\n  canal_iface: \"{{.CanalInterface}}\"\n\n  # Whether or not to masquerade traffic to destinations not within\n  # the pod network.\n  masquerade: \"true\"\n\n  # The CNI network configuration to install on each node.  The special\n  # values in this config will be automatically populated.\n  cni_network_config: |-\n    {\n      \"name\": \"k8s-pod-network\",\n      \"cniVersion\": \"0.3.0\",\n      \"plugins\": [\n        {\n          \"type\": \"calico\",\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n          \"mtu\": {{.MTU}},\n{{- end}}\n{{- end}}\n          \"log_level\": \"WARNING\",\n          \"datastore_type\": \"kubernetes\",\n          \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n          \"ipam\": {\n            \"type\": \"host-local\",\n            \"subnet\": \"usePodCidr\"\n          },\n          \"policy\": {\n              \"type\": \"k8s\"\n          },\n          \"kubernetes\": {\n              \"kubeconfig\": \"{{.KubeCfg}}\"\n          }\n        },\n        {\n          \"type\": \"portmap\",\n          \"snat\": true,\n          \"capabilities\": {\"portMappings\": true}\n        }\n      ]\n    }\n\n  # Flannel network configuration. Mounted into the flannel container.\n  net-conf.json: |\n    {\n      \"Network\": \"{{.ClusterCIDR}}\",\n      \"Backend\": {\n        \"Type\": \"{{.FlannelBackend.Type}}\"\n      }\n    }\n---\n\n# This manifest installs the calico/node container, as well\n# as the Calico CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: extensions/v1beta1\nmetadata:\n  name: canal\n  namespace: kube-system\n  labels:\n    k8s-app: canal\nspec:\n  selector:\n    matchLabels:\n      k8s-app: canal\n  updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n    type: RollingUpdate\n    rollingUpdate:\n      maxUnavailable: 1\n{{end}}\n  template:\n    metadata:\n      labels:\n        k8s-app: canal\n      annotations:\n        # This, along with the CriticalAddonsOnly toleration below,\n        # marks the pod as a critical add-on, ensuring it gets\n        # priority scheduling and that its resources are reserved\n        # if it ever gets evicted.\n        scheduler.alpha.kubernetes.io/critical-pod: ''\n    spec:\n      affinity:\n        nodeAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            nodeSelectorTerms:\n              - matchExpressions:\n                - key: beta.kubernetes.io/os\n                  operator: NotIn\n                  values:\n                    - windows\n      hostNetwork: true\n{{if .NodeSelector}}\n      nodeSelector:\n      {{ range $k, $v := .NodeSelector }}\n        {{ $k }}: \"{{ $v }}\"\n      {{ end }}\n{{end}}\n      tolerations:\n        # Make sure canal gets scheduled on all nodes.\n        - effect: NoSchedule\n          operator: Exists\n        # Mark the pod as a critical add-on for rescheduling.\n        - key: CriticalAddonsOnly\n          operator: Exists\n        - effect: NoExecute\n          operator: Exists\n      serviceAccountName: canal\n      # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n      # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n      terminationGracePeriodSeconds: 0\n      initContainers:\n        # This container installs the Calico CNI binaries\n        # and CNI network config file on each node.\n        - name: install-cni\n          image: {{.CNIImage}}\n          command: [\"/install-cni.sh\"]\n          env:\n            # Name of the CNI config file to create.\n            - name: CNI_CONF_NAME\n              value: \"10-canal.conflist\"\n            # The CNI network config to install on each node.\n            - name: CNI_NETWORK_CONFIG\n              valueFrom:\n                configMapKeyRef:\n                  name: canal-config\n                  key: cni_network_config\n            # Set the hostname based on the k8s node name.\n            - name: KUBERNETES_NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n            # Prevents the container from sleeping forever.\n            - name: SLEEP\n              value: \"false\"\n          volumeMounts:\n            - mountPath: /host/opt/cni/bin\n              name: cni-bin-dir\n            - mountPath: /host/etc/cni/net.d\n              name: cni-net-dir\n      containers:\n        # Runs calico/node container on each Kubernetes node.  This\n        # container programs network policy and routes on each\n        # host.\n        - name: calico-node\n          image: {{.NodeImage}}\n          env:\n            # Use Kubernetes API as the backing datastore.\n            - name: DATASTORE_TYPE\n              value: \"kubernetes\"\n            # Wait for the datastore.\n            - name: WAIT_FOR_DATASTORE\n              value: \"true\"\n            # Set based on the k8s node name.\n            - name: NODENAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n            # Don't enable BGP.\n            - name: CALICO_NETWORKING_BACKEND\n              value: \"none\"\n            # Cluster type to identify the deployment type\n            - name: CLUSTER_TYPE\n              value: \"k8s,canal\"\n            # Period, in seconds, at which felix re-applies all iptables state\n            - name: FELIX_IPTABLESREFRESHINTERVAL\n              value: \"60\"\n            # No IP address needed.\n            - name: IP\n              value: \"\"\n            # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n            # chosen from this range. Changing this value after installation will have\n            # no effect. This should fall within --cluster-cidr.\n            - name: CALICO_IPV4POOL_CIDR\n              value: \"192.168.0.0/16\"\n            # Disable file logging so kubectl logs works.\n            - name: CALICO_DISABLE_FILE_LOGGING\n              value: \"true\"\n            # Set Felix endpoint to host default action to ACCEPT.\n            - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n              value: \"ACCEPT\"\n            # Disable IPv6 on Kubernetes.\n            - name: FELIX_IPV6SUPPORT\n              value: \"false\"\n            # Disable felix logging to file\n            - name: FELIX_LOGFILEPATH\n              value: \"none\"\n            # Disable felix logging for syslog\n            - name: FELIX_LOGSEVERITYSYS\n              value: \"\"\n            # Enable felix logging to stdout\n            - name: FELIX_LOGSEVERITYSCREEN\n              value: \"Warning\"\n            - name: FELIX_HEALTHENABLED\n              value: \"true\"\n          securityContext:\n            privileged: true\n          resources:\n            requests:\n              cpu: 250m\n          livenessProbe:\n            httpGet:\n              path: /liveness\n              port: 9099\n              host: localhost\n            periodSeconds: 10\n            initialDelaySeconds: 10\n            failureThreshold: 6\n          readinessProbe:\n            httpGet:\n              path: /readiness\n              port: 9099\n              host: localhost\n            periodSeconds: 10\n          volumeMounts:\n            - mountPath: /lib/modules\n              name: lib-modules\n              readOnly: true\n            - mountPath: /run/xtables.lock\n              name: xtables-lock\n              readOnly: false\n            - mountPath: /var/run/calico\n              name: var-run-calico\n              readOnly: false\n            - mountPath: /var/lib/calico\n              name: var-lib-calico\n              readOnly: false\n        # This container runs flannel using the kube-subnet-mgr backend\n        # for allocating subnets.\n        - name: kube-flannel\n          image: {{.CanalFlannelImg}}\n          command: [ \"/opt/bin/flanneld\", \"--ip-masq\", \"--kube-subnet-mgr\" ]\n          securityContext:\n            privileged: true\n          env:\n            - name: POD_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.name\n            - name: POD_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: FLANNELD_IFACE\n              valueFrom:\n                configMapKeyRef:\n                  name: canal-config\n                  key: canal_iface\n            - name: FLANNELD_IP_MASQ\n              valueFrom:\n                configMapKeyRef:\n                  name: canal-config\n                  key: masquerade\n          volumeMounts:\n          - mountPath: /run/xtables.lock\n            name: xtables-lock\n            readOnly: false\n          - name: flannel-cfg\n            mountPath: /etc/kube-flannel/\n      volumes:\n        # Used by calico/node.\n        - name: lib-modules\n          hostPath:\n            path: /lib/modules\n        - name: var-run-calico\n          hostPath:\n            path: /var/run/calico\n        - name: var-lib-calico\n          hostPath:\n            path: /var/lib/calico\n        - name: xtables-lock\n          hostPath:\n            path: /run/xtables.lock\n            type: FileOrCreate\n        # Used by flannel.\n        - name: flannel-cfg\n          configMap:\n            name: canal-config\n        # Used to install CNI.\n        - name: cni-bin-dir\n          hostPath:\n            path: /opt/cni/bin\n        - name: cni-net-dir\n          hostPath:\n            path: /etc/cni/net.d\n\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: canal\n  namespace: kube-system\n\n---\n\n# Create all the CustomResourceDefinitions needed for\n# Calico policy and networking mode.\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n   name: felixconfigurations.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: FelixConfiguration\n    plural: felixconfigurations\n    singular: felixconfiguration\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: bgpconfigurations.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: BGPConfiguration\n    plural: bgpconfigurations\n    singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: ippools.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: IPPool\n    plural: ippools\n    singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: hostendpoints.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: HostEndpoint\n    plural: hostendpoints\n    singular: hostendpoint\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: clusterinformations.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: ClusterInformation\n    plural: clusterinformations\n    singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: GlobalNetworkPolicy\n    plural: globalnetworkpolicies\n    singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: globalnetworksets.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: GlobalNetworkSet\n    plural: globalnetworksets\n    singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: networkpolicies.crd.projectcalico.org\nspec:\n  scope: Namespaced\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: NetworkPolicy\n    plural: networkpolicies\n    singular: networkpolicy\n",
   "canal-v1.15": "\n{{if eq .RBACConfig \"rbac\"}}\n# Include a clusterrole for the calico-node DaemonSet,\n# and bind it to the calico-node serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n  name: calico\nrules:\n  # The CNI plugin needs to get pods, nodes, and namespaces.\n  - apiGroups: [\"\"]\n    resources:\n      - pods\n      - nodes\n      - namespaces\n    verbs:\n      - get\n  - apiGroups: [\"\"]\n    resources:\n      - endpoints\n      - services\n    verbs:\n      # Used to discover service IPs for advertisement.\n      - watch\n      - list\n      # Used to discover Typhas.\n      - get\n  - apiGroups: [\"\"]\n    resources:\n      - nodes/status\n    verbs:\n      # Needed for clearing NodeNetworkUnavailable flag.\n      - patch\n      # Calico stores some configuration information in node annotations.\n      - update\n  # Watch for changes to Kubernetes NetworkPolicies.\n  - apiGroups: [\"networking.k8s.io\"]\n    resources:\n      - networkpolicies\n    verbs:\n      - watch\n      - list\n  # Used by Calico for policy information.\n  - apiGroups: [\"\"]\n    resources:\n      - pods\n      - namespaces\n      - serviceaccounts\n    verbs:\n      - list\n      - watch\n  # The CNI plugin patches pods/status.\n  - apiGroups: [\"\"]\n    resources:\n      - pods/status\n    verbs:\n      - patch\n  # Calico monitors various CRDs for config.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - globalfelixconfigs\n      - felixconfigurations\n      - bgppeers\n      - globalbgpconfigs\n      - bgpconfigurations\n      - ippools\n      - ipamblocks\n      - globalnetworkpolicies\n      - globalnetworksets\n      - networkpolicies\n      - networksets\n      - clusterinformations\n      - hostendpoints\n    verbs:\n      - get\n      - list\n      - watch\n  # Calico must create and update some CRDs on startup.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - ippools\n      - felixconfigurations\n      - clusterinformations\n    verbs:\n      - create\n      - update\n  # Calico stores some configuration information on the node.\n  - apiGroups: [\"\"]\n    resources:\n      - nodes\n    verbs:\n      - get\n      - list\n      - watch\n  # These permissions are only requried for upgrade from v2.6, and can\n  # be removed after upgrade or on fresh installations.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - bgpconfigurations\n      - bgppeers\n    verbs:\n      - create\n      - update\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n  name: calico-node\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: calico-node\nsubjects:\n- kind: ServiceAccount\n  name: calico-node\n  namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n  kind: Group\n  name: system:nodes\n---\n# Flannel ClusterRole\n# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n  name: flannel\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - pods\n    verbs:\n      - get\n  - apiGroups:\n      - \"\"\n    resources:\n      - nodes\n    verbs:\n      - list\n      - watch\n  - apiGroups:\n      - \"\"\n    resources:\n      - nodes/status\n    verbs:\n      - patch\n---\n# Bind the flannel ClusterRole to the canal ServiceAccount.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n  name: canal-flannel\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: flannel\nsubjects:\n- kind: ServiceAccount\n  name: canal\n  namespace: kube-system\n---\n# Bind the Calico ClusterRole to the canal ServiceAccount.\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n  name: canal-calico\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: calico\nsubjects:\n- kind: ServiceAccount\n  name: canal\n  namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n  kind: Group\n  name: system:nodes\n{{end}}\n\n# Canal Version v3.1.1\n# https://docs.projectcalico.org/v3.1/releases#v3.1.1\n# This manifest includes the following component versions:\n#   calico/node:v3.1.1\n#   calico/cni:v3.1.1\n#   coreos/flannel:v0.9.1\n\n---\n# This ConfigMap is used to configure a self-hosted Canal installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n  name: canal-config\n  namespace: kube-system\ndata:\n  # The interface used by canal for host \u003c-\u003e host communication.\n  # If left blank, then the interface is chosen using the node's\n  # default route.\n  canal_iface: \"{{.CanalInterface}}\"\n\n  # Whether or not to masquerade traffic to destinations not within\n  # the pod network.\n  masquerade: \"true\"\n\n  # The CNI network configuration to install on each node.  The special\n  # values in this config will be automatically populated.\n  cni_network_config: |-\n    {\n      \"name\": \"k8s-pod-network\",\n      \"cniVersion\": \"0.3.0\",\n      \"plugins\": [\n        {\n          \"type\": \"calico\",\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n          \"mtu\": {{.MTU}},\n{{- end}}\n{{- end}}\n          \"log_level\": \"WARNING\",\n          \"datastore_type\": \"kubernetes\",\n          \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n          \"ipam\": {\n            \"type\": \"host-local\",\n            \"subnet\": \"usePodCidr\"\n          },\n          \"policy\": {\n              \"type\": \"k8s\"\n          },\n          \"kubernetes\": {\n              \"kubeconfig\": \"{{.KubeCfg}}\"\n          }\n        },\n        {\n          \"type\": \"portmap\",\n          \"snat\": true,\n          \"capabilities\": {\"portMappings\": true}\n        }\n      ]\n    }\n\n  # Flannel network configuration. Mounted into the flannel container.\n  net-conf.json: |\n    {\n      \"Network\": \"{{.ClusterCIDR}}\",\n      \"Backend\": {\n        \"Type\": \"{{.FlannelBackend.Type}}\"\n      }\n    }\n---\n\n# This manifest installs the calico/node container, as well\n# as the Calico CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: extensions/v1beta1\nmetadata:\n  name: canal\n  namespace: kube-system\n  labels:\n    k8s-app: canal\nspec:\n  selector:\n    matchLabels:\n      k8s-app: canal\n  updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n    type: RollingUpdate\n    rollingUpdate:\n      maxUnavailable: 1\n{{end}}\n  template:\n    metadata:\n      labels:\n        k8s-app: canal\n      annotations:\n        # This, along with the CriticalAddonsOnly toleration below,\n        # marks the pod as a critical add-on, ensuring it gets\n        # priority scheduling and that its resources are reserved\n        # if it ever gets evicted.\n        scheduler.alpha.kubernetes.io/critical-pod: ''\n    spec:\n      affinity:\n        nodeAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            nodeSelectorTerms:\n              - matchExpressions:\n                - key: beta.kubernetes.io/os\n                  operator: NotIn\n                  values:\n                    - windows\n      hostNetwork: true\n{{if .NodeSelector}}\n      nodeSelector:\n      {{ range $k, $v := .NodeSelector }}\n        {{ $k }}: \"{{ $v }}\"\n      {{ end }}\n{{end}}\n      tolerations:\n        # Make sure canal gets scheduled on all nodes.\n        - effect: NoSchedule\n          operator: Exists\n        # Mark the pod as a critical add-on for rescheduling.\n        - key: CriticalAddonsOnly\n          operator: Exists\n        - effect: NoExecute\n          operator: Exists\n      {{if eq .RBACConfig \"rbac\"}}\n      serviceAccountName: canal\n      {{end}}\n      # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n      # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n      terminationGracePeriodSeconds: 0\n      initContainers:\n        # This container installs the Calico CNI binaries\n        # and CNI network config file on each node.\n        - name: install-cni\n          image: {{.CNIImage}}\n          command: [\"/install-cni.sh\"]\n          env:\n            # Name of the CNI config file to create.\n            - name: CNI_CONF_NAME\n              value: \"10-canal.conflist\"\n            # The CNI network config to install on each node.\n            - name: CNI_NETWORK_CONFIG\n              valueFrom:\n                configMapKeyRef:\n                  name: canal-config\n                  key: cni_network_config\n            # Set the hostname based on the k8s node name.\n            - name: KUBERNETES_NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n            # Prevents the container from sleeping forever.\n            - name: SLEEP\n              value: \"false\"\n          volumeMounts:\n            - mountPath: /host/opt/cni/bin\n              name: cni-bin-dir\n            - mountPath: /host/etc/cni/net.d\n              name: cni-net-dir\n      containers:\n        # Runs calico/node container on each Kubernetes node.  This\n        # container programs network policy and routes on each\n        # host.\n        - name: calico-node\n          image: {{.NodeImage}}\n          env:\n            # Use Kubernetes API as the backing datastore.\n            - name: DATASTORE_TYPE\n              value: \"kubernetes\"\n            # Configure route aggregation based on pod CIDR.\n            - name: USE_POD_CIDR\n              value: \"true\"\n            # Wait for the datastore.\n            - name: WAIT_FOR_DATASTORE\n              value: \"true\"\n            # Set based on the k8s node name.\n            - name: NODENAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n            # Don't enable BGP.\n            - name: CALICO_NETWORKING_BACKEND\n              value: \"none\"\n            # Cluster type to identify the deployment type\n            - name: CLUSTER_TYPE\n              value: \"k8s,canal\"\n            # Period, in seconds, at which felix re-applies all iptables state\n            - name: FELIX_IPTABLESREFRESHINTERVAL\n              value: \"60\"\n            # No IP address needed.\n            - name: IP\n              value: \"\"\n            # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n            # chosen from this range. Changing this value after installation will have\n            # no effect. This should fall within --cluster-cidr.\n            - name: CALICO_IPV4POOL_CIDR\n              value: \"192.168.0.0/16\"\n            # Disable file logging so kubectl logs works.\n            - name: CALICO_DISABLE_FILE_LOGGING\n              value: \"true\"\n            # Set Felix endpoint to host default action to ACCEPT.\n            - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n              value: \"ACCEPT\"\n            # Disable IPv6 on Kubernetes.\n            - name: FELIX_IPV6SUPPORT\n              value: \"false\"\n            # Disable felix logging to file\n            - name: FELIX_LOGFILEPATH\n              value: \"none\"\n            # Disable felix logging for syslog\n            - name: FELIX_LOGSEVERITYSYS\n              value: \"\"\n            # Enable felix logging to stdout\n            - name: FELIX_LOGSEVERITYSCREEN\n              value: \"Warning\"\n            - name: FELIX_HEALTHENABLED\n              value: \"true\"\n          securityContext:\n            privileged: true\n          resources:\n            requests:\n              cpu: 250m\n          livenessProbe:\n            httpGet:\n              path: /liveness\n              port: 9099\n              host: localhost\n            periodSeconds: 10\n            initialDelaySeconds: 10\n            failureThreshold: 6\n          readinessProbe:\n            httpGet:\n              path: /readiness\n              port: 9099\n              host: localhost\n            periodSeconds: 10\n          volumeMounts:\n            - mountPath: /lib/modules\n              name: lib-modules\n              readOnly: true\n            - mountPath: /run/xtables.lock\n              name: xtables-lock\n              readOnly: false\n            - mountPath: /var/run/calico\n              name: var-run-calico\n              readOnly: false\n            - mountPath: /var/lib/calico\n              name: var-lib-calico\n              readOnly: false\n        # This container runs flannel using the kube-subnet-mgr backend\n        # for allocating subnets.\n        - name: kube-flannel\n          image: {{.CanalFlannelImg}}\n          command: [ \"/opt/bin/flanneld\", \"--ip-masq\", \"--kube-subnet-mgr\" ]\n          securityContext:\n            privileged: true\n          env:\n            - name: POD_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.name\n            - name: POD_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: FLANNELD_IFACE\n              valueFrom:\n                configMapKeyRef:\n                  name: canal-config\n                  key: canal_iface\n            - name: FLANNELD_IP_MASQ\n              valueFrom:\n                configMapKeyRef:\n                  name: canal-config\n                  key: masquerade\n          volumeMounts:\n          - mountPath: /run/xtables.lock\n            name: xtables-lock\n            readOnly: false\n          - name: flannel-cfg\n            mountPath: /etc/kube-flannel/\n      volumes:\n        # Used by calico/node.\n        - name: lib-modules\n          hostPath:\n            path: /lib/modules\n        - name: var-run-calico\n          hostPath:\n            path: /var/run/calico\n        - name: var-lib-calico\n          hostPath:\n            path: /var/lib/calico\n        - name: xtables-lock\n          hostPath:\n            path: /run/xtables.lock\n            type: FileOrCreate\n        # Used by flannel.\n        - name: flannel-cfg\n          configMap:\n            name: canal-config\n        # Used to install CNI.\n        - name: cni-bin-dir\n          hostPath:\n            path: /opt/cni/bin\n        - name: cni-net-dir\n          hostPath:\n            path: /etc/cni/net.d\n\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: canal\n  namespace: kube-system\n\n---\n\n# Create all the CustomResourceDefinitions needed for\n# Calico policy and networking mode.\n\n---\n# Source: calico/templates/kdd-crds.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n   name: felixconfigurations.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: FelixConfiguration\n    plural: felixconfigurations\n    singular: felixconfiguration\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: bgpconfigurations.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: BGPConfiguration\n    plural: bgpconfigurations\n    singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: ippools.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: IPPool\n    plural: ippools\n    singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: hostendpoints.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: HostEndpoint\n    plural: hostendpoints\n    singular: hostendpoint\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: clusterinformations.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: ClusterInformation\n    plural: clusterinformations\n    singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: GlobalNetworkPolicy\n    plural: globalnetworkpolicies\n    singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: globalnetworksets.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: GlobalNetworkSet\n    plural: globalnetworksets\n    singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: networkpolicies.crd.projectcalico.org\nspec:\n  scope: Namespaced\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: NetworkPolicy\n    plural: networkpolicies\n    singular: networkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: networksets.crd.projectcalico.org\nspec:\n  scope: Namespaced\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: NetworkSet\n    plural: networksets\n    singular: networkset\n",
   "canal-v1.16": "\n---\n# Source: calico/templates/calico-config.yaml\n# This ConfigMap is used to configure a self-hosted Canal installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n  name: canal-config\n  namespace: kube-system\ndata:\n  # Typha is disabled.\n  typha_service_name: \"none\"\n  # The interface used by canal for host \u003c-\u003e host communication.\n  # If left blank, then the interface is chosen using the node's\n  # default route.\n  canal_iface: \"{{.CanalInterface}}\"\n  # Whether or not to masquerade traffic to destinations not within\n  # the pod network.\n  masquerade: \"true\"\n\n  # The CNI network configuration to install on each node.  The special\n  # values in this config will be automatically populated.\n  cni_network_config: |-\n    {\n      \"name\": \"k8s-pod-network\",\n      \"cniVersion\": \"0.3.1\",\n      \"plugins\": [\n        {\n          \"type\": \"calico\",\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n          \"mtu\": {{.MTU}},\n{{- end}}\n{{- end}}\n          \"log_level\": \"WARNING\",\n          \"datastore_type\": \"kubernetes\",\n          \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n          \"ipam\": {\n              \"type\": \"host-local\",\n              \"subnet\": \"usePodCidr\"\n          },\n          \"policy\": {\n              \"type\": \"k8s\",\n              \"k8s_auth_token\": \"__SERVICEACCOUNT_TOKEN__\"\n          },\n          \"kubernetes\": {\n            \"kubeconfig\": \"{{.KubeCfg}}\"\n          }\n        },\n        {\n          \"type\": \"portmap\",\n          \"snat\": true,\n          \"capabilities\": {\"portMappings\": true}\n        }\n      ]\n    }\n\n  # Flannel network configuration. Mounted into the flannel container.\n  net-conf.json: |\n    {\n      \"Network\": \"{{.ClusterCIDR}}\",\n      \"Backend\": {\n        \"Type\": \"{{.FlannelBackend.Type}}\"\n      }\n    }\n\n---\n# Source: calico/templates/kdd-crds.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n   name: felixconfigurations.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: FelixConfiguration\n    plural: felixconfigurations\n    singular: felixconfiguration\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: bgpconfigurations.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: BGPConfiguration\n    plural: bgpconfigurations\n    singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: ippools.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: IPPool\n    plural: ippools\n    singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: hostendpoints.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: HostEndpoint\n    plural: hostendpoints\n    singular: hostendpoint\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: clusterinformations.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: ClusterInformation\n    plural: clusterinformations\n    singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: GlobalNetworkPolicy\n    plural: globalnetworkpolicies\n    singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: globalnetworksets.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: GlobalNetworkSet\n    plural: globalnetworksets\n    singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: networkpolicies.crd.projectcalico.org\nspec:\n  scope: Namespaced\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: NetworkPolicy\n    plural: networkpolicies\n    singular: networkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: networksets.crd.projectcalico.org\nspec:\n  scope: Namespaced\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: NetworkSet\n    plural: networksets\n    singular: networkset\n{{if eq .RBACConfig \"rbac\"}}\n---\n# Source: calico/templates/rbac.yaml\n\n# Include a clusterrole for the calico-node DaemonSet,\n# and bind it to the calico-node serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: calico-node\nrules:\n  # The CNI plugin needs to get pods, nodes, and namespaces.\n  - apiGroups: [\"\"]\n    resources:\n      - pods\n      - nodes\n      - namespaces\n    verbs:\n      - get\n  - apiGroups: [\"\"]\n    resources:\n      - endpoints\n      - services\n    verbs:\n      # Used to discover service IPs for advertisement.\n      - watch\n      - list\n      # Used to discover Typhas.\n      - get\n  - apiGroups: [\"\"]\n    resources:\n      - nodes/status\n    verbs:\n      # Needed for clearing NodeNetworkUnavailable flag.\n      - patch\n      # Calico stores some configuration information in node annotations.\n      - update\n  # Watch for changes to Kubernetes NetworkPolicies.\n  - apiGroups: [\"networking.k8s.io\"]\n    resources:\n      - networkpolicies\n    verbs:\n      - watch\n      - list\n  # Used by Calico for policy information.\n  - apiGroups: [\"\"]\n    resources:\n      - pods\n      - namespaces\n      - serviceaccounts\n    verbs:\n      - list\n      - watch\n  # The CNI plugin patches pods/status.\n  - apiGroups: [\"\"]\n    resources:\n      - pods/status\n    verbs:\n      - patch\n  # Calico monitors various CRDs for config.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - globalfelixconfigs\n      - felixconfigurations\n      - bgppeers\n      - globalbgpconfigs\n      - bgpconfigurations\n      - ippools\n      - ipamblocks\n      - globalnetworkpolicies\n      - globalnetworksets\n      - networkpolicies\n      - networksets\n      - clusterinformations\n      - hostendpoints\n    verbs:\n      - get\n      - list\n      - watch\n  # Calico must create and update some CRDs on startup.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - ippools\n      - felixconfigurations\n      - clusterinformations\n    verbs:\n      - create\n      - update\n  # Calico stores some configuration information on the node.\n  - apiGroups: [\"\"]\n    resources:\n      - nodes\n    verbs:\n      - get\n      - list\n      - watch\n  # These permissions are only requried for upgrade from v2.6, and can\n  # be removed after upgrade or on fresh installations.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - bgpconfigurations\n      - bgppeers\n    verbs:\n      - create\n      - update\n---\n# Flannel ClusterRole\n# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: flannel\nrules:\n  - apiGroups: [\"\"]\n    resources:\n      - pods\n    verbs:\n      - get\n  - apiGroups: [\"\"]\n    resources:\n      - nodes\n    verbs:\n      - list\n      - watch\n  - apiGroups: [\"\"]\n    resources:\n      - nodes/status\n    verbs:\n      - patch\n---\n# Bind the flannel ClusterRole to the canal ServiceAccount.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: flannel\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: flannel\nsubjects:\n- kind: ServiceAccount\n  name: canal\n  namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n  kind: Group\n  name: system:nodes\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: calico-node\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: calico-node\nsubjects:\n- kind: ServiceAccount\n  name: canal\n  namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n  kind: Group\n  name: system:nodes\n{{end}}\n---\n# Source: calico/templates/calico-node.yaml\n# This manifest installs the canal container, as well\n# as the CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: apps/v1\nmetadata:\n  name: canal\n  namespace: kube-system\n  labels:\n    k8s-app: canal\nspec:\n  selector:\n    matchLabels:\n      k8s-app: canal\n  updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n    type: RollingUpdate\n    rollingUpdate:\n      maxUnavailable: 1\n{{end}}\n  template:\n    metadata:\n      labels:\n        k8s-app: canal\n      annotations:\n        # This, along with the CriticalAddonsOnly toleration below,\n        # marks the pod as a critical add-on, ensuring it gets\n        # priority scheduling and that its resources are reserved\n        # if it ever gets evicted.\n        scheduler.alpha.kubernetes.io/critical-pod: ''\n    spec:\n      affinity:\n        nodeAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            nodeSelectorTerms:\n              - matchExpressions:\n                - key: beta.kubernetes.io/os\n                  operator: NotIn\n                  values:\n                    - windows\n      hostNetwork: true\n{{if .NodeSelector}}\n      nodeSelector:\n      {{ range $k, $v := .NodeSelector }}\n        {{ $k }}: \"{{ $v }}\"\n      {{ end }}\n{{end}}\n      tolerations:\n        # Tolerate this effect so the pods will be schedulable at all times\n        - effect: NoSchedule\n          operator: Exists\n        # Mark the pod as a critical add-on for rescheduling.\n        - key: CriticalAddonsOnly\n          operator: Exists\n        - effect: NoExecute\n          operator: Exists\n        - key: \"node-role.kubernetes.io/controlplane\"\n          operator: \"Exists\"\n          effect: \"NoSchedule\"\n        - key: \"node-role.kubernetes.io/etcd\"\n          operator: \"Exists\"\n          effect: \"NoExecute\"\n      {{if eq .RBACConfig \"rbac\"}}\n      serviceAccountName: canal\n      {{end}}\n      # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n      # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n      terminationGracePeriodSeconds: 0\n      priorityClassName: system-node-critical\n      initContainers:\n        # This container installs the CNI binaries\n        # and CNI network config file on each node.\n        - name: install-cni\n          image: {{.CNIImage}}\n          command: [\"/install-cni.sh\"]\n          env:\n            # Name of the CNI config file to create.\n            - name: CNI_CONF_NAME\n              value: \"10-canal.conflist\"\n            # The CNI network config to install on each node.\n            - name: CNI_NETWORK_CONFIG\n              valueFrom:\n                configMapKeyRef:\n                  name: canal-config\n                  key: cni_network_config\n            # Set the hostname based on the k8s node name.\n            - name: KUBERNETES_NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n            # Prevents the container from sleeping forever.\n            - name: SLEEP\n              value: \"false\"\n          volumeMounts:\n            - mountPath: /host/opt/cni/bin\n              name: cni-bin-dir\n            - mountPath: /host/etc/cni/net.d\n              name: cni-net-dir\n        # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes\n        # to communicate with Felix over the Policy Sync API.\n        - name: flexvol-driver\n          image: {{.FlexVolImg}}\n          volumeMounts:\n          - name: flexvol-driver-host\n            mountPath: /host/driver\n      containers:\n        # Runs canal container on each Kubernetes node.  This\n        # container programs network policy and routes on each\n        # host.\n        - name: calico-node\n          image: {{.NodeImage}}\n          env:\n            # Use Kubernetes API as the backing datastore.\n            - name: DATASTORE_TYPE\n              value: \"kubernetes\"\n            # Configure route aggregation based on pod CIDR.\n            - name: USE_POD_CIDR\n              value: \"true\"\n            # Wait for the datastore.\n            - name: WAIT_FOR_DATASTORE\n              value: \"true\"\n            # Set based on the k8s node name.\n            - name: NODENAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n            # Don't enable BGP.\n            - name: CALICO_NETWORKING_BACKEND\n              value: \"none\"\n            # Cluster type to identify the deployment type\n            - name: CLUSTER_TYPE\n              value: \"k8s,canal\"\n            # Period, in seconds, at which felix re-applies all iptables state\n            - name: FELIX_IPTABLESREFRESHINTERVAL\n              value: \"60\"\n            # No IP address needed.\n            - name: IP\n              value: \"\"\n            - name: CALICO_IPV4POOL_CIDR\n              value: \"192.168.0.0/16\"\n            - name: CALICO_DISABLE_FILE_LOGGING\n              value: \"true\"\n            # Set Felix endpoint to host default action to ACCEPT.\n            - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n              value: \"ACCEPT\"\n            # Disable IPv6 on Kubernetes.\n            - name: FELIX_IPV6SUPPORT\n              value: \"false\"\n            # Disable felix logging to file\n            - name: FELIX_LOGFILEPATH\n              value: \"none\"\n            # Disable felix logging for syslog\n            - name: FELIX_LOGSEVERITYSYS\n              value: \"\"\n            # Enable felix logging to stdout\n            - name: FELIX_LOGSEVERITYSCREEN\n              value: \"Warning\"\n            - name: FELIX_HEALTHENABLED\n              value: \"true\"\n          securityContext:\n            privileged: true\n          resources:\n            requests:\n              cpu: 250m\n          livenessProbe:\n            httpGet:\n              path: /liveness\n              port: 9099\n              host: localhost\n            periodSeconds: 10\n            initialDelaySeconds: 10\n            failureThreshold: 6\n          readinessProbe:\n            httpGet:\n              path: /readiness\n              port: 9099\n              host: localhost\n            periodSeconds: 10\n          volumeMounts:\n            - mountPath: /lib/modules\n              name: lib-modules\n              readOnly: true\n            - mountPath: /run/xtables.lock\n              name: xtables-lock\n              readOnly: false\n            - mountPath: /var/run/calico\n              name: var-run-calico\n              readOnly: false\n            - mountPath: /var/lib/calico\n              name: var-lib-calico\n              readOnly: false\n            - name: policysync\n              mountPath: /var/run/nodeagent\n        # This container runs flannel using the kube-subnet-mgr backend\n        # for allocating subnets.\n        - name: kube-flannel\n          image: {{.CanalFlannelImg}}\n          command: [ \"/opt/bin/flanneld\", \"--ip-masq\", \"--kube-subnet-mgr\" ]\n          securityContext:\n            privileged: true\n          env:\n            - name: POD_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.name\n            - name: POD_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: FLANNELD_IFACE\n              valueFrom:\n                configMapKeyRef:\n                  name: canal-config\n                  key: canal_iface\n            - name: FLANNELD_IP_MASQ\n              valueFrom:\n                configMapKeyRef:\n                  name: canal-config\n                  key: masquerade\n          volumeMounts:\n          - mountPath: /run/xtables.lock\n            name: xtables-lock\n            readOnly: false\n          - name: flannel-cfg\n            mountPath: /etc/kube-flannel/\n      volumes:\n        # Used by canal.\n        - name: lib-modules\n          hostPath:\n            path: /lib/modules\n        - name: var-run-calico\n          hostPath:\n            path: /var/run/calico\n        - name: var-lib-calico\n          hostPath:\n            path: /var/lib/calico\n        - name: xtables-lock\n          hostPath:\n            path: /run/xtables.lock\n            type: FileOrCreate\n        # Used by flannel.\n        - name: flannel-cfg\n          configMap:\n            name: canal-config\n        # Used to install CNI.\n        - name: cni-bin-dir\n          hostPath:\n            path: /opt/cni/bin\n        - name: cni-net-dir\n          hostPath:\n            path: /etc/cni/net.d\n        # Used to create per-pod Unix Domain Sockets\n        - name: policysync\n          hostPath:\n            type: DirectoryOrCreate\n            path: /var/run/nodeagent\n        # Used to install Flex Volume Driver\n        - name: flexvol-driver-host\n          hostPath:\n            type: DirectoryOrCreate\n{{- if .FlexVolPluginDir }}\n            path: {{.FlexVolPluginDir}}\n{{- else }}\n            path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds\n{{- end }}\n---\n\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: canal\n  namespace: kube-system\n",
   "canal-v1.17": "\n---\n# Source: calico/templates/calico-config.yaml\n# This ConfigMap is used to configure a self-hosted Canal installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n  name: canal-config\n  namespace: kube-system\ndata:\n  # Typha is disabled.\n  typha_service_name: \"none\"\n  # The interface used by canal for host \u003c-\u003e host communication.\n  # If left blank, then the interface is chosen using the node's\n  # default route.\n  canal_iface: \"{{.CanalInterface}}\"\n  # Whether or not to masquerade traffic to destinations not within\n  # the pod network.\n  masquerade: \"true\"\n\n  # The CNI network configuration to install on each node.  The special\n  # values in this config will be automatically populated.\n  cni_network_config: |-\n    {\n      \"name\": \"k8s-pod-network\",\n      \"cniVersion\": \"0.3.1\",\n      \"plugins\": [\n        {\n          \"type\": \"calico\",\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n          \"mtu\": {{.MTU}},\n{{- end}}\n{{- end}}\n          \"log_level\": \"WARNING\",\n          \"datastore_type\": \"kubernetes\",\n          \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n          \"ipam\": {\n              \"type\": \"host-local\",\n              \"subnet\": \"usePodCidr\"\n          },\n          \"policy\": {\n              \"type\": \"k8s\",\n              \"k8s_auth_token\": \"__SERVICEACCOUNT_TOKEN__\"\n          },\n          \"kubernetes\": {\n            \"kubeconfig\": \"{{.KubeCfg}}\"\n          }\n        },\n        {\n          \"type\": \"portmap\",\n          \"snat\": true,\n          \"capabilities\": {\"portMappings\": true}\n        }\n      ]\n    }\n\n  # Flannel network configuration. Mounted into the flannel container.\n  net-conf.json: |\n    {\n      \"Network\": \"{{.ClusterCIDR}}\",\n      \"Backend\": {\n        \"Type\": \"{{.FlannelBackend.Type}}\"\n      }\n    }\n\n---\n# Source: calico/templates/kdd-crds.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n   name: felixconfigurations.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: FelixConfiguration\n    plural: felixconfigurations\n    singular: felixconfiguration\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: bgpconfigurations.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: BGPConfiguration\n    plural: bgpconfigurations\n    singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: ippools.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: IPPool\n    plural: ippools\n    singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: hostendpoints.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: HostEndpoint\n    plural: hostendpoints\n    singular: hostendpoint\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: clusterinformations.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: ClusterInformation\n    plural: clusterinformations\n    singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: GlobalNetworkPolicy\n    plural: globalnetworkpolicies\n    singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: globalnetworksets.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: GlobalNetworkSet\n    plural: globalnetworksets\n    singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: networkpolicies.crd.projectcalico.org\nspec:\n  scope: Namespaced\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: NetworkPolicy\n    plural: networkpolicies\n    singular: networkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: networksets.crd.projectcalico.org\nspec:\n  scope: Namespaced\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: NetworkSet\n    plural: networksets\n    singular: networkset\n{{if eq .RBACConfig \"rbac\"}}\n---\n# Source: calico/templates/rbac.yaml\n\n# Include a clusterrole for the calico-node DaemonSet,\n# and bind it to the calico-node serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: calico-node\nrules:\n  # The CNI plugin needs to get pods, nodes, and namespaces.\n  - apiGroups: [\"\"]\n    resources:\n      - pods\n      - nodes\n      - namespaces\n    verbs:\n      - get\n  - apiGroups: [\"\"]\n    resources:\n      - endpoints\n      - services\n    verbs:\n      # Used to discover service IPs for advertisement.\n      - watch\n      - list\n      # Used to discover Typhas.\n      - get\n  - apiGroups: [\"\"]\n    resources:\n      - nodes/status\n    verbs:\n      # Needed for clearing NodeNetworkUnavailable flag.\n      - patch\n      # Calico stores some configuration information in node annotations.\n      - update\n  # Watch for changes to Kubernetes NetworkPolicies.\n  - apiGroups: [\"networking.k8s.io\"]\n    resources:\n      - networkpolicies\n    verbs:\n      - watch\n      - list\n  # Used by Calico for policy information.\n  - apiGroups: [\"\"]\n    resources:\n      - pods\n      - namespaces\n      - serviceaccounts\n    verbs:\n      - list\n      - watch\n  # The CNI plugin patches pods/status.\n  - apiGroups: [\"\"]\n    resources:\n      - pods/status\n    verbs:\n      - patch\n  # Calico monitors various CRDs for config.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - globalfelixconfigs\n      - felixconfigurations\n      - bgppeers\n      - globalbgpconfigs\n      - bgpconfigurations\n      - ippools\n      - ipamblocks\n      - globalnetworkpolicies\n      - globalnetworksets\n      - networkpolicies\n      - networksets\n      - clusterinformations\n      - hostendpoints\n      - blockaffinities\n    verbs:\n      - get\n      - list\n      - watch\n  # Calico must create and update some CRDs on startup.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - ippools\n      - felixconfigurations\n      - clusterinformations\n    verbs:\n      - create\n      - update\n  # Calico stores some configuration information on the node.\n  - apiGroups: [\"\"]\n    resources:\n      - nodes\n    verbs:\n      - get\n      - list\n      - watch\n  # These permissions are only requried for upgrade from v2.6, and can\n  # be removed after upgrade or on fresh installations.\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - bgpconfigurations\n      - bgppeers\n    verbs:\n      - create\n      - update\n---\n# Flannel ClusterRole\n# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: flannel\nrules:\n  - apiGroups: [\"\"]\n    resources:\n      - pods\n    verbs:\n      - get\n  - apiGroups: [\"\"]\n    resources:\n      - nodes\n    verbs:\n      - list\n      - watch\n  - apiGroups: [\"\"]\n    resources:\n      - nodes/status\n    verbs:\n      - patch\n---\n# Bind the flannel ClusterRole to the canal ServiceAccount.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: flannel\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: flannel\nsubjects:\n- kind: ServiceAccount\n  name: canal\n  namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n  kind: Group\n  name: system:nodes\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: calico-node\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: calico-node\nsubjects:\n- kind: ServiceAccount\n  name: canal\n  namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n  kind: Group\n  name: system:nodes\n{{end}}\n---\n# Source: calico/templates/calico-node.yaml\n# This manifest installs the canal container, as well\n# as the CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: apps/v1\nmetadata:\n  name: canal\n  namespace: kube-system\n  labels:\n    k8s-app: canal\nspec:\n  selector:\n    matchLabels:\n      k8s-app: canal\n  updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n    type: RollingUpdate\n    rollingUpdate:\n      maxUnavailable: 1\n{{end}}\n  template:\n    metadata:\n      labels:\n        k8s-app: canal\n      annotations:\n        # This, along with the CriticalAddonsOnly toleration below,\n        # marks the pod as a critical add-on, ensuring it gets\n        # priority scheduling and that its resources are reserved\n        # if it ever gets evicted.\n        scheduler.alpha.kubernetes.io/critical-pod: ''\n    spec:\n      affinity:\n        nodeAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            nodeSelectorTerms:\n              - matchExpressions:\n                - key: beta.kubernetes.io/os\n                  operator: NotIn\n                  values:\n                    - windows\n      hostNetwork: true\n{{if .NodeSelector}}\n      nodeSelector:\n      {{ range $k, $v := .NodeSelector }}\n        {{ $k }}: \"{{ $v }}\"\n      {{ end }}\n{{end}}\n      tolerations:\n        # Tolerate this effect so the pods will be schedulable at all times\n        - effect: NoSchedule\n          operator: Exists\n        # Mark the pod as a critical add-on for rescheduling.\n        - key: CriticalAddonsOnly\n          operator: Exists\n        - effect: NoExecute\n          operator: Exists\n        - key: \"node-role.kubernetes.io/controlplane\"\n          operator: \"Exists\"\n          effect: \"NoSchedule\"\n        - key: \"node-role.kubernetes.io/etcd\"\n          operator: \"Exists\"\n          effect: \"NoExecute\"\n      {{if eq .RBACConfig \"rbac\"}}\n      serviceAccountName: canal\n      {{end}}\n      # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n      # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n      terminationGracePeriodSeconds: 0\n      priorityClassName: system-node-critical\n      initContainers:\n        # This container installs the CNI binaries\n        # and CNI network config file on each node.\n        - name: install-cni\n          image: {{.CNIImage}}\n          command: [\"/install-cni.sh\"]\n          env:\n            # Name of the CNI config file to create.\n            - name: CNI_CONF_NAME\n              value: \"10-canal.conflist\"\n            # The CNI network config to install on each node.\n            - name: CNI_NETWORK_CONFIG\n              valueFrom:\n                configMapKeyRef:\n                  name: canal-config\n                  key: cni_network_config\n            # Set the hostname based on the k8s node name.\n            - name: KUBERNETES_NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n            # Prevents the container from sleeping forever.\n            - name: SLEEP\n              value: \"false\"\n          volumeMounts:\n            - mountPath: /host/opt/cni/bin\n              name: cni-bin-dir\n            - mountPath: /host/etc/cni/net.d\n              name: cni-net-dir\n        # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes\n        # to communicate with Felix over the Policy Sync API.\n        - name: flexvol-driver\n          image: {{.FlexVolImg}}\n          volumeMounts:\n          - name: flexvol-driver-host\n            mountPath: /host/driver\n      containers:\n        # Runs canal container on each Kubernetes node.  This\n        # container programs network policy and routes on each\n        # host.\n        - name: calico-node\n          image: {{.NodeImage}}\n          env:\n            # Use Kubernetes API as the backing datastore.\n            - name: DATASTORE_TYPE\n              value: \"kubernetes\"\n            # Configure route aggregation based on pod CIDR.\n            - name: USE_POD_CIDR\n              value: \"true\"\n            # Wait for the datastore.\n            - name: WAIT_FOR_DATASTORE\n              value: \"true\"\n            # Set based on the k8s node name.\n            - name: NODENAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n            # Don't enable BGP.\n            - name: CALICO_NETWORKING_BACKEND\n              value: \"none\"\n            # Cluster type to identify the deployment type\n            - name: CLUSTER_TYPE\n              value: \"k8s,canal\"\n            # Period, in seconds, at which felix re-applies all iptables state\n            - name: FELIX_IPTABLESREFRESHINTERVAL\n              value: \"60\"\n            # No IP address needed.\n            - name: IP\n              value: \"\"\n            - name: CALICO_IPV4POOL_CIDR\n              value: \"192.168.0.0/16\"\n            - name: CALICO_DISABLE_FILE_LOGGING\n              value: \"true\"\n            # Set Felix endpoint to host default action to ACCEPT.\n            - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n              value: \"ACCEPT\"\n            # Disable IPv6 on Kubernetes.\n            - name: FELIX_IPV6SUPPORT\n              value: \"false\"\n            # Disable felix logging to file\n            - name: FELIX_LOGFILEPATH\n              value: \"none\"\n            # Disable felix logging for syslog\n            - name: FELIX_LOGSEVERITYSYS\n              value: \"\"\n            # Enable felix logging to stdout\n            - name: FELIX_LOGSEVERITYSCREEN\n              value: \"Warning\"\n            - name: FELIX_HEALTHENABLED\n              value: \"true\"\n          securityContext:\n            privileged: true\n          resources:\n            requests:\n              cpu: 250m\n          livenessProbe:\n            httpGet:\n              path: /liveness\n              port: 9099\n              host: localhost\n            periodSeconds: 10\n            initialDelaySeconds: 10\n            failureThreshold: 6\n          readinessProbe:\n            httpGet:\n              path: /readiness\n              port: 9099\n              host: localhost\n            periodSeconds: 10\n          volumeMounts:\n            - mountPath: /lib/modules\n              name: lib-modules\n              readOnly: true\n            - mountPath: /run/xtables.lock\n              name: xtables-lock\n              readOnly: false\n            - mountPath: /var/run/calico\n              name: var-run-calico\n              readOnly: false\n            - mountPath: /var/lib/calico\n              name: var-lib-calico\n              readOnly: false\n            - name: policysync\n              mountPath: /var/run/nodeagent\n        # This container runs flannel using the kube-subnet-mgr backend\n        # for allocating subnets.\n        - name: kube-flannel\n          image: {{.CanalFlannelImg}}\n          command: [ \"/opt/bin/flanneld\", \"--ip-masq\", \"--kube-subnet-mgr\" ]\n          securityContext:\n            privileged: true\n          env:\n            - name: POD_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.name\n            - name: POD_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: FLANNELD_IFACE\n              valueFrom:\n                configMapKeyRef:\n                  name: canal-config\n                  key: canal_iface\n            - name: FLANNELD_IP_MASQ\n              valueFrom:\n                configMapKeyRef:\n                  name: canal-config\n                  key: masquerade\n          volumeMounts:\n          - mountPath: /run/xtables.lock\n            name: xtables-lock\n            readOnly: false\n          - name: flannel-cfg\n            mountPath: /etc/kube-flannel/\n      volumes:\n        # Used by canal.\n        - name: lib-modules\n          hostPath:\n            path: /lib/modules\n        - name: var-run-calico\n          hostPath:\n            path: /var/run/calico\n        - name: var-lib-calico\n          hostPath:\n            path: /var/lib/calico\n        - name: xtables-lock\n          hostPath:\n            path: /run/xtables.lock\n            type: FileOrCreate\n        # Used by flannel.\n        - name: flannel-cfg\n          configMap:\n            name: canal-config\n        # Used to install CNI.\n        - name: cni-bin-dir\n          hostPath:\n            path: /opt/cni/bin\n        - name: cni-net-dir\n          hostPath:\n            path: /etc/cni/net.d\n        # Used to create per-pod Unix Domain Sockets\n        - name: policysync\n          hostPath:\n            type: DirectoryOrCreate\n            path: /var/run/nodeagent\n        # Used to install Flex Volume Driver\n        - name: flexvol-driver-host\n          hostPath:\n            type: DirectoryOrCreate\n{{- if .FlexVolPluginDir }}\n            path: {{.FlexVolPluginDir}}\n{{- else }}\n            path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds\n{{- end }}\n---\n\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: canal\n  namespace: kube-system\n",
   "canal-v1.8": "\n{{if eq .RBACConfig \"rbac\"}}\n# Calico Roles\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n  name: calico\nrules:\n  - apiGroups: [\"\"]\n    resources:\n      - namespaces\n    verbs:\n      - get\n      - list\n      - watch\n  - apiGroups: [\"\"]\n    resources:\n      - pods/status\n    verbs:\n      - update\n  - apiGroups: [\"\"]\n    resources:\n      - pods\n    verbs:\n      - get\n      - list\n      - watch\n      - patch\n  - apiGroups: [\"\"]\n    resources:\n      - services\n    verbs:\n      - get\n  - apiGroups: [\"\"]\n    resources:\n      - endpoints\n    verbs:\n      - get\n  - apiGroups: [\"\"]\n    resources:\n      - nodes\n    verbs:\n      - get\n      - list\n      - update\n      - watch\n  - apiGroups: [\"networking.k8s.io\"]\n    resources:\n      - networkpolicies\n    verbs:\n      - get\n      - list\n      - watch\n  - apiGroups: [\"crd.projectcalico.org\"]\n    resources:\n      - globalfelixconfigs\n      - felixconfigurations\n      - bgppeers\n      - globalbgpconfigs\n      - bgpconfigurations\n      - ippools\n      - globalnetworkpolicies\n      - networkpolicies\n      - clusterinformations\n      - hostendpoints\n      - globalnetworksets\n    verbs:\n      - create\n      - get\n      - list\n      - update\n      - watch\n\n---\n\n# Flannel roles\n# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n  name: flannel\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - pods\n    verbs:\n      - get\n  - apiGroups:\n      - \"\"\n    resources:\n      - nodes\n    verbs:\n      - list\n      - watch\n  - apiGroups:\n      - \"\"\n    resources:\n      - nodes/status\n    verbs:\n      - patch\n---\n\n# Bind the flannel ClusterRole to the canal ServiceAccount.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n  name: canal-flannel\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: flannel\nsubjects:\n- kind: ServiceAccount\n  name: canal\n  namespace: kube-system\n\n---\n\n# Bind the calico ClusterRole to the canal ServiceAccount.\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n  name: canal-calico\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: calico\nsubjects:\n- kind: ServiceAccount\n  name: canal\n  namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n  kind: Group\n  name: system:nodes\n{{end}}\n\n# Canal Version v3.1.1\n# https://docs.projectcalico.org/v3.1/releases#v3.1.1\n# This manifest includes the following component versions:\n#   calico/node:v3.1.1\n#   calico/cni:v3.1.1\n#   coreos/flannel:v0.9.1\n\n---\n# This ConfigMap can be used to configure a self-hosted Canal installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n  name: canal-config\n  namespace: kube-system\ndata:\n  # The interface used by canal for host \u003c-\u003e host communication.\n  # If left blank, then the interface is chosen using the node's\n  # default route.\n  canal_iface: \"{{.CanalInterface}}\"\n\n  # Whether or not to masquerade traffic to destinations not within\n  # the pod network.\n  masquerade: \"true\"\n\n  # The CNI network configuration to install on each node.\n  cni_network_config: |-\n    {\n      \"name\": \"k8s-pod-network\",\n      \"cniVersion\": \"0.3.0\",\n      \"plugins\": [\n        {\n          \"type\": \"calico\",\n          \"log_level\": \"WARNING\",\n          \"datastore_type\": \"kubernetes\",\n          \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n          \"ipam\": {\n            \"type\": \"host-local\",\n            \"subnet\": \"usePodCidr\"\n          },\n          \"policy\": {\n            \"type\": \"k8s\",\n            \"k8s_auth_token\": \"__SERVICEACCOUNT_TOKEN__\"\n          },\n          \"kubernetes\": {\n            \"k8s_api_root\": \"https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__\",\n            \"kubeconfig\": \"{{.KubeCfg}}\"\n          }\n        },\n        {\n          \"type\": \"portmap\",\n          \"snat\": true,\n          \"capabilities\": {\"portMappings\": true}\n        }\n      ]\n    }\n\n  # Flannel network configuration. Mounted into the flannel container.\n  net-conf.json: |\n    {\n      \"Network\": \"{{.ClusterCIDR}}\",\n      \"Backend\": {\n        \"Type\": \"{{.FlannelBackend.Type}}\",\n        \"VNI\": {{.FlannelBackend.VNI}},\n        \"Port\": {{.FlannelBackend.Port}}\n      }\n    }\n\n---\n\n# This manifest installs the calico/node container, as well\n# as the Calico CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: extensions/v1beta1\nmetadata:\n  name: canal\n  namespace: kube-system\n  labels:\n    k8s-app: canal\nspec:\n  selector:\n    matchLabels:\n      k8s-app: canal\n  updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n    type: RollingUpdate\n    rollingUpdate:\n      maxUnavailable: 1\n{{end}}\n  template:\n    metadata:\n      labels:\n        k8s-app: canal\n      annotations:\n        scheduler.alpha.kubernetes.io/critical-pod: ''\n    spec:\n      affinity:\n        nodeAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            nodeSelectorTerms:\n              - matchExpressions:\n                - key: beta.kubernetes.io/os\n                  operator: NotIn\n                  values:\n                    - windows\n      hostNetwork: true\n{{if .NodeSelector}}\n      nodeSelector:\n      {{ range $k, $v := .NodeSelector }}\n        {{ $k }}: \"{{ $v }}\"\n      {{ end }}\n{{end}}\n      serviceAccountName: canal\n      tolerations:\n        # Tolerate this effect so the pods will be schedulable at all times\n        - effect: NoSchedule\n          operator: Exists\n        # Mark the pod as a critical add-on for rescheduling.\n        - key: CriticalAddonsOnly\n          operator: Exists\n        - effect: NoExecute\n          operator: Exists\n        - key: \"node-role.kubernetes.io/controlplane\"\n          operator: \"Exists\"\n          effect: \"NoSchedule\"\n        - key: \"node-role.kubernetes.io/etcd\"\n          operator: \"Exists\"\n          effect: \"NoExecute\"\n      # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n      # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n      terminationGracePeriodSeconds: 0\n      containers:\n        # Runs calico/node container on each Kubernetes node.  This\n        # container programs network policy and routes on each\n        # host.\n        - name: calico-node\n          image: {{.NodeImage}}\n          env:\n            # Use Kubernetes API as the backing datastore.\n            - name: DATASTORE_TYPE\n              value: \"kubernetes\"\n            # Disable felix logging to file\n            - name: FELIX_LOGFILEPATH\n              value: \"none\"\n            # Disable felix logging for syslog\n            - name: FELIX_LOGSEVERITYSYS\n              value: \"\"\n            # Enable felix logging to stdout\n            - name: FELIX_LOGSEVERITYSCREEN\n              value: \"Warning\"\n            # Don't enable BGP.\n            - name: CALICO_NETWORKING_BACKEND\n              value: \"none\"\n            # Cluster type to identify the deployment type\n            - name: CLUSTER_TYPE\n              value: \"k8s,canal\"\n            # Disable file logging so kubectl logs works.\n            - name: CALICO_DISABLE_FILE_LOGGING\n              value: \"true\"\n            # Period, in seconds, at which felix re-applies all iptables state\n            - name: FELIX_IPTABLESREFRESHINTERVAL\n              value: \"60\"\n            # Disable IPV6 support in Felix.\n            - name: FELIX_IPV6SUPPORT\n              value: \"false\"\n            # Wait for the datastore.\n            - name: WAIT_FOR_DATASTORE\n              value: \"true\"\n            # No IP address needed.\n            - name: IP\n              value: \"\"\n            - name: NODENAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n            # Set Felix endpoint to host default action to ACCEPT.\n            - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n              value: \"ACCEPT\"\n            - name: FELIX_HEALTHENABLED\n              value: \"true\"\n          securityContext:\n            privileged: true\n          resources:\n            requests:\n              cpu: 250m\n          livenessProbe:\n            httpGet:\n              path: /liveness\n              port: 9099\n            periodSeconds: 10\n            initialDelaySeconds: 10\n            failureThreshold: 6\n          readinessProbe:\n            httpGet:\n              path: /readiness\n              port: 9099\n            periodSeconds: 10\n          volumeMounts:\n            - mountPath: /lib/modules\n              name: lib-modules\n              readOnly: true\n            - mountPath: /var/run/calico\n              name: var-run-calico\n              readOnly: false\n            - mountPath: /var/lib/calico\n              name: var-lib-calico\n              readOnly: false\n        # This container installs the Calico CNI binaries\n        # and CNI network config file on each node.\n        - name: install-cni\n          image: {{.CNIImage}}\n          command: [\"/install-cni.sh\"]\n          env:\n            - name: CNI_CONF_NAME\n              value: \"10-calico.conflist\"\n            # The CNI network config to install on each node.\n            - name: CNI_NETWORK_CONFIG\n              valueFrom:\n                configMapKeyRef:\n                  name: canal-config\n                  key: cni_network_config\n            - name: KUBERNETES_NODE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: spec.nodeName\n          volumeMounts:\n            - mountPath: /host/opt/cni/bin\n              name: cni-bin-dir\n            - mountPath: /host/etc/cni/net.d\n              name: cni-net-dir\n        # This container runs flannel using the kube-subnet-mgr backend\n        # for allocating subnets.\n        - name: kube-flannel\n          image: {{.CanalFlannelImg}}\n          command: [ \"/opt/bin/flanneld\", \"--ip-masq\", \"--kube-subnet-mgr\" ]\n          securityContext:\n            privileged: true\n          env:\n            - name: POD_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.name\n            - name: POD_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n            - name: FLANNELD_IFACE\n              valueFrom:\n                configMapKeyRef:\n                  name: canal-config\n                  key: canal_iface\n            - name: FLANNELD_IP_MASQ\n              valueFrom:\n                configMapKeyRef:\n                  name: canal-config\n                  key: masquerade\n          volumeMounts:\n          - name: run\n            mountPath: /run\n          - name: flannel-cfg\n            mountPath: /etc/kube-flannel/\n          - name: xtables-lock\n            mountPath: /run/xtables.lock\n            readOnly: false\n      volumes:\n        # Used by calico/node.\n        - name: lib-modules\n          hostPath:\n            path: /lib/modules\n        - name: var-run-calico\n          hostPath:\n            path: /var/run/calico\n        - name: var-lib-calico\n          hostPath:\n            path: /var/lib/calico\n        # Used to install CNI.\n        - name: cni-bin-dir\n          hostPath:\n            path: /opt/cni/bin\n        - name: cni-net-dir\n          hostPath:\n            path: /etc/cni/net.d\n        # Used by flannel.\n        - name: run\n          hostPath:\n            path: /run\n        - name: flannel-cfg\n          configMap:\n            name: canal-config\n        - name: xtables-lock\n          hostPath:\n            path: /run/xtables.lock\n            type: FileOrCreate\n\n# Create all the CustomResourceDefinitions needed for\n# Calico policy-only mode.\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n   name: felixconfigurations.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: FelixConfiguration\n    plural: felixconfigurations\n    singular: felixconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: bgpconfigurations.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: BGPConfiguration\n    plural: bgpconfigurations\n    singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: ippools.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: IPPool\n    plural: ippools\n    singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: clusterinformations.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: ClusterInformation\n    plural: clusterinformations\n    singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: GlobalNetworkPolicy\n    plural: globalnetworkpolicies\n    singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: networkpolicies.crd.projectcalico.org\nspec:\n  scope: Namespaced\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: NetworkPolicy\n    plural: networkpolicies\n    singular: networkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: globalnetworksets.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: GlobalNetworkSet\n    plural: globalnetworksets\n    singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: hostendpoints.crd.projectcalico.org\nspec:\n  scope: Cluster\n  group: crd.projectcalico.org\n  version: v1\n  names:\n    kind: HostEndpoint\n    plural: hostendpoints\n    singular: hostendpoint\n\n---\n\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: canal\n  namespace: kube-system\n",
   "coredns-v1.16": "\n---\n{{- if eq .RBACConfig \"rbac\"}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: coredns\n  namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  labels:\n    kubernetes.io/bootstrapping: rbac-defaults\n  name: system:coredns\nrules:\n- apiGroups:\n  - \"\"\n  resources:\n  - endpoints\n  - services\n  - pods\n  - namespaces\n  verbs:\n  - list\n  - watch\n- apiGroups:\n  - \"\"\n  resources:\n  - nodes\n  verbs:\n  - get\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  annotations:\n    rbac.authorization.kubernetes.io/autoupdate: \"true\"\n  labels:\n    kubernetes.io/bootstrapping: rbac-defaults\n  name: system:coredns\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: system:coredns\nsubjects:\n- kind: ServiceAccount\n  name: coredns\n  namespace: kube-system\n{{- end }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: coredns\n  namespace: kube-system\ndata:\n  Corefile: |\n    .:53 {\n        errors\n        health\n        ready\n        kubernetes {{.ClusterDomain}} {{ if .ReverseCIDRs }}{{ .ReverseCIDRs }}{{ else }}{{ \"in-addr.arpa ip6.arpa\" }}{{ end }} {\n          pods insecure\n          fallthrough in-addr.arpa ip6.arpa\n        }\n        prometheus :9153\n\t{{- if .UpstreamNameservers }}\n        forward . {{range $i, $v := .UpstreamNameservers}}{{if $i}} {{end}}{{.}}{{end}}\n\t{{- else }}\n        forward . \"/etc/resolv.conf\"\n\t{{- end }}\n        cache 30\n        loop\n        reload\n        loadbalance\n    }\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: coredns\n  namespace: kube-system\n  labels:\n    k8s-app: kube-dns\n    kubernetes.io/name: \"CoreDNS\"\nspec:\n  strategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n    type: RollingUpdate\n    rollingUpdate:\n      maxUnavailable: 1\n{{end}}\n  selector:\n    matchLabels:\n      k8s-app: kube-dns\n  template:\n    metadata:\n      labels:\n        k8s-app: kube-dns\n      annotations:\n        seccomp.security.alpha.kubernetes.io/pod: 'docker/default'\n    spec:\n      priorityClassName: system-cluster-critical\n{{- if eq .RBACConfig \"rbac\"}}\n      serviceAccountName: coredns\n{{- end }}\n      tolerations:\n        - key: \"CriticalAddonsOnly\"\n          operator: \"Exists\"\n        - effect: NoExecute\n          operator: Exists\n        - effect: NoSchedule\n          operator: Exists\n      nodeSelector:\n        beta.kubernetes.io/os: linux\n      {{ range $k, $v := .NodeSelector }}\n        {{ $k }}: \"{{ $v }}\"\n      {{ end }}\n      affinity:\n        nodeAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            nodeSelectorTerms:\n              - matchExpressions:\n                - key: node-role.kubernetes.io/worker\n                  operator: Exists\n      containers:\n      - name: coredns\n        image: {{.CoreDNSImage}}\n        imagePullPolicy: IfNotPresent\n        resources:\n          limits:\n            memory: 170Mi\n          requests:\n            cpu: 100m\n            memory: 70Mi\n        args: [ \"-conf\", \"/etc/coredns/Corefile\" ]\n        volumeMounts:\n        - name: config-volume\n          mountPath: /etc/coredns\n          readOnly: true\n        ports:\n        - containerPort: 53\n          name: dns\n          protocol: UDP\n        - containerPort: 53\n          name: dns-tcp\n          protocol: TCP\n        - containerPort: 9153\n          name: metrics\n          protocol: TCP\n        livenessProbe:\n          httpGet:\n            path: /health\n            port: 8080\n            scheme: HTTP\n          initialDelaySeconds: 60\n          timeoutSeconds: 5\n          successThreshold: 1\n          failureThreshold: 5\n        readinessProbe:\n          httpGet:\n            path: /ready\n            port: 8181\n            scheme: HTTP\n        securityContext:\n          allowPrivilegeEscalation: false\n          capabilities:\n            add:\n            - NET_BIND_SERVICE\n            drop:\n            - all\n          readOnlyRootFilesystem: true\n      dnsPolicy: Default\n      volumes:\n        - name: config-volume\n          configMap:\n            name: coredns\n            items:\n            - key: Corefile\n              path: Corefile\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: kube-dns\n  namespace: kube-system\n  annotations:\n    prometheus.io/port: \"9153\"\n    prometheus.io/scrape: \"true\"\n  labels:\n    k8s-app: kube-dns\n    kubernetes.io/cluster-service: \"true\"\n    kubernetes.io/name: \"CoreDNS\"\nspec:\n  selector:\n    k8s-app: kube-dns\n  clusterIP: {{.ClusterDNSServer}}\n  ports:\n  - name: dns\n    port: 53\n    protocol: UDP\n  - name: dns-tcp\n    port: 53\n    protocol: TCP\n  - name: metrics\n    port: 9153\n    protocol: TCP\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: coredns-autoscaler\n  namespace: kube-system\n  labels:\n    k8s-app: coredns-autoscaler\nspec:\n  selector:\n    matchLabels:\n      k8s-app: coredns-autoscaler\n  template:\n    metadata:\n      labels:\n        k8s-app: coredns-autoscaler\n    spec:\n{{- if eq .RBACConfig \"rbac\"}}\n      serviceAccountName: coredns-autoscaler\n{{- end }}\n      nodeSelector:\n        beta.kubernetes.io/os: linux\n      affinity:\n        nodeAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            nodeSelectorTerms:\n              - matchExpressions:\n                - key: node-role.kubernetes.io/worker\n                  operator: Exists\n      tolerations:\n      - effect: NoExecute\n        operator: Exists\n      - effect: NoSchedule\n        operator: Exists\n      containers:\n      - name: autoscaler\n        image: {{.CoreDNSAutoScalerImage}}\n        resources:\n            requests:\n                cpu: \"20m\"\n                memory: \"10Mi\"\n        command:\n          - /cluster-proportional-autoscaler\n          - --namespace=kube-system\n          - --configmap=coredns-autoscaler\n          - --target=Deployment/coredns\n          # When cluster is using large nodes(with more cores), \"coresPerReplica\" should dominate.\n          # If using small nodes, \"nodesPerReplica\" should dominate.\n{{if .LinearAutoscalerParams}}\n          - --default-params={\"linear\":{{.LinearAutoscalerParams}}}\n{{else}}\n          - --default-params={\"linear\":{\"coresPerReplica\":128,\"nodesPerReplica\":4,\"min\":1,\"preventSinglePointFailure\":true}}\n{{end}}\n          - --logtostderr=true\n          - --v=2\n{{- if eq .RBACConfig \"rbac\"}}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: coredns-autoscaler\n  namespace: kube-system\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: system:coredns-autoscaler\nrules:\n  - apiGroups: [\"\"]\n    resources: [\"nodes\"]\n    verbs: [\"list\", \"watch\"]\n  - apiGroups: [\"\"]\n    resources: [\"replicationcontrollers/scale\"]\n    verbs: [\"get\", \"update\"]\n  - apiGroups: [\"extensions\",\"apps\"]\n    resources: [\"deployments/scale\", \"replicasets/scale\"]\n    verbs: [\"get\", \"update\"]\n  - apiGroups: [\"\"]\n    resources: [\"configmaps\"]\n    verbs: [\"get\", \"create\"]\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: system:coredns-autoscaler\nsubjects:\n  - kind: ServiceAccount\n    name: coredns-autoscaler\n    namespace: kube-system\nroleRef:\n  kind: ClusterRole\n  name: system:coredns-autoscaler\n  apiGroup: rbac.authorization.k8s.io\n{{- end }}",
   "coredns-v1.17": "\n---\n{{- if eq .RBACConfig \"rbac\"}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: coredns\n  namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  labels:\n    kubernetes.io/bootstrapping: rbac-defaults\n  name: system:coredns\nrules:\n- apiGroups:\n  - \"\"\n  resources:\n  - endpoints\n  - services\n  - pods\n  - namespaces\n  verbs:\n  - list\n  - watch\n- apiGroups:\n  - \"\"\n  resources:\n  - nodes\n  verbs:\n  - get\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  annotations:\n    rbac.authorization.kubernetes.io/autoupdate: \"true\"\n  labels:\n    kubernetes.io/bootstrapping: rbac-defaults\n  name: system:coredns\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: system:coredns\nsubjects:\n- kind: ServiceAccount\n  name: coredns\n  namespace: kube-system\n{{- end }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: coredns\n  namespace: kube-system\ndata:\n  Corefile: |\n    .:53 {\n        errors\n        health {\n          lameduck 5s\n        }\n        ready\n        kubernetes {{.ClusterDomain}} {{ if .ReverseCIDRs }}{{ .ReverseCIDRs }}{{ else }}{{ \"in-addr.arpa ip6.arpa\" }}{{ end }} {\n          pods insecure\n          fallthrough in-addr.arpa ip6.arpa\n        }\n        prometheus :9153\n\t{{- if .UpstreamNameservers }}\n        forward . {{range $i, $v := .UpstreamNameservers}}{{if $i}} {{end}}{{.}}{{end}}\n\t{{- else }}\n        forward . \"/etc/resolv.conf\"\n\t{{- end }}\n        cache 30\n        loop\n        reload\n        loadbalance\n    }\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: coredns\n  namespace: kube-system\n  labels:\n    k8s-app: kube-dns\n    kubernetes.io/name: \"CoreDNS\"\nspec:\n  replicas: 1\n  strategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n    type: RollingUpdate\n    rollingUpdate:\n      maxUnavailable: 1\n{{end}}\n  selector:\n    matchLabels:\n      k8s-app: kube-dns\n  template:\n    metadata:\n      labels:\n        k8s-app: kube-dns\n      annotations:\n        seccomp.security.alpha.kubernetes.io/pod: 'docker/default'\n    spec:\n      priorityClassName: system-cluster-critical\n{{- if eq .RBACConfig \"rbac\"}}\n      serviceAccountName: coredns\n{{- end }}\n      tolerations:\n        - key: \"CriticalAddonsOnly\"\n          operator: \"Exists\"\n        - effect: NoExecute\n          operator: Exists\n        - effect: NoSchedule\n          operator: Exists\n      nodeSelector:\n        beta.kubernetes.io/os: linux\n      {{ range $k, $v := .NodeSelector }}\n        {{ $k }}: \"{{ $v }}\"\n      {{ end }}\n      affinity:\n        nodeAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            nodeSelectorTerms:\n              - matchExpressions:\n                - key: node-role.kubernetes.io/worker\n                  operator: Exists\n        podAntiAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            - labelSelector:\n                matchExpressions:\n                - key: k8s-app\n                  operator: In\n                  values: [\"kube-dns\"]\n              topologyKey: kubernetes.io/hostname\n      containers:\n      - name: coredns\n        image: {{.CoreDNSImage}}\n        imagePullPolicy: IfNotPresent\n        resources:\n          limits:\n            memory: 170Mi\n          requests:\n            cpu: 100m\n            memory: 70Mi\n        args: [ \"-conf\", \"/etc/coredns/Corefile\" ]\n        volumeMounts:\n        - name: config-volume\n          mountPath: /etc/coredns\n          readOnly: true\n        ports:\n        - containerPort: 53\n          name: dns\n          protocol: UDP\n        - containerPort: 53\n          name: dns-tcp\n          protocol: TCP\n        - containerPort: 9153\n          name: metrics\n          protocol: TCP\n        livenessProbe:\n          httpGet:\n            path: /health\n            port: 8080\n            scheme: HTTP\n          initialDelaySeconds: 60\n          timeoutSeconds: 5\n          successThreshold: 1\n          failureThreshold: 5\n        readinessProbe:\n          httpGet:\n            path: /ready\n            port: 8181\n            scheme: HTTP\n        securityContext:\n          allowPrivilegeEscalation: false\n          capabilities:\n            add:\n            - NET_BIND_SERVICE\n            drop:\n            - all\n          readOnlyRootFilesystem: true\n      dnsPolicy: Default\n      volumes:\n        - name: config-volume\n          configMap:\n            name: coredns\n            items:\n            - key: Corefile\n              path: Corefile\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: kube-dns\n  namespace: kube-system\n  annotations:\n    prometheus.io/port: \"9153\"\n    prometheus.io/scrape: \"true\"\n  labels:\n    k8s-app: kube-dns\n    kubernetes.io/cluster-service: \"true\"\n    kubernetes.io/name: \"CoreDNS\"\nspec:\n  selector:\n    k8s-app: kube-dns\n  clusterIP: {{.ClusterDNSServer}}\n  ports:\n  - name: dns\n    port: 53\n    protocol: UDP\n  - name: dns-tcp\n    port: 53\n    protocol: TCP\n  - name: metrics\n    port: 9153\n    protocol: TCP\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: coredns-autoscaler\n  namespace: kube-system\n  labels:\n    k8s-app: coredns-autoscaler\nspec:\n  selector:\n    matchLabels:\n      k8s-app: coredns-autoscaler\n  template:\n    metadata:\n      labels:\n        k8s-app: coredns-autoscaler\n    spec:\n{{- if eq .RBACConfig \"rbac\"}}\n      serviceAccountName: coredns-autoscaler\n{{- end }}\n      nodeSelector:\n        beta.kubernetes.io/os: linux\n      affinity:\n        nodeAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            nodeSelectorTerms:\n              - matchExpressions:\n                - key: node-role.kubernetes.io/worker\n                  operator: Exists\n      tolerations:\n      - effect: NoExecute\n        operator: Exists\n      - effect: NoSchedule\n        operator: Exists\n      containers:\n      - name: autoscaler\n        image: {{.CoreDNSAutoScalerImage}}\n        resources:\n            requests:\n                cpu: \"20m\"\n                memory: \"10Mi\"\n        command:\n          - /cluster-proportional-autoscaler\n          - --namespace=kube-system\n          - --configmap=coredns-autoscaler\n          - --target=Deployment/coredns\n          # When cluster is using large nodes(with more cores), \"coresPerReplica\" should dominate.\n          # If using small nodes, \"nodesPerReplica\" should dominate.\n{{if .LinearAutoscalerParams}}\n          - --default-params={\"linear\":{{.LinearAutoscalerParams}}}\n{{else}}\n          - --default-params={\"linear\":{\"coresPerReplica\":128,\"nodesPerReplica\":4,\"min\":1,\"preventSinglePointFailure\":true}}\n{{end}}\n          - --nodelabels=node-role.kubernetes.io/worker=true,beta.kubernetes.io/os=linux\n          - --logtostderr=true\n          - --v=2\n{{- if eq .RBACConfig \"rbac\"}}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: coredns-autoscaler\n  namespace: kube-system\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: system:coredns-autoscaler\nrules:\n  - apiGroups: [\"\"]\n    resources: [\"nodes\"]\n    verbs: [\"list\", \"watch\"]\n  - apiGroups: [\"\"]\n    resources: [\"replicationcontrollers/scale\"]\n    verbs: [\"get\", \"update\"]\n  - apiGroups: [\"extensions\",\"apps\"]\n    resources: [\"deployments/scale\", \"replicasets/scale\"]\n    verbs: [\"get\", \"update\"]\n  - apiGroups: [\"\"]\n    resources: [\"configmaps\"]\n    verbs: [\"get\", \"create\"]\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: system:coredns-autoscaler\nsubjects:\n  - kind: ServiceAccount\n    name: coredns-autoscaler\n    namespace: kube-system\nroleRef:\n  kind: ClusterRole\n  name: system:coredns-autoscaler\n  apiGroup: rbac.authorization.k8s.io\n{{- end }}",
   "coredns-v1.8": "\n---\n{{- if eq .RBACConfig \"rbac\"}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: coredns\n  namespace: kube-system\n  labels:\n      kubernetes.io/cluster-service: \"true\"\n      addonmanager.kubernetes.io/mode: Reconcile\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  labels:\n    kubernetes.io/bootstrapping: rbac-defaults\n    addonmanager.kubernetes.io/mode: Reconcile\n  name: system:coredns\nrules:\n- apiGroups:\n  - \"\"\n  resources:\n  - endpoints\n  - services\n  - pods\n  - namespaces\n  verbs:\n  - list\n  - watch\n- apiGroups:\n  - \"\"\n  resources:\n  - nodes\n  verbs:\n  - get\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  annotations:\n    rbac.authorization.kubernetes.io/autoupdate: \"true\"\n  labels:\n    kubernetes.io/bootstrapping: rbac-defaults\n    addonmanager.kubernetes.io/mode: EnsureExists\n  name: system:coredns\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: system:coredns\nsubjects:\n- kind: ServiceAccount\n  name: coredns\n  namespace: kube-system\n{{- end }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: coredns\n  namespace: kube-system\n  labels:\n      addonmanager.kubernetes.io/mode: EnsureExists\ndata:\n  Corefile: |\n    .:53 {\n        errors\n        health\n        kubernetes {{.ClusterDomain}} {{ if .ReverseCIDRs }}{{ .ReverseCIDRs }}{{ else }}{{ \"in-addr.arpa ip6.arpa\" }}{{ end }} {\n          pods insecure\n          upstream\n          fallthrough in-addr.arpa ip6.arpa\n          ttl 30\n        }\n        prometheus :9153\n\t{{- if .UpstreamNameservers }}\n        forward . {{range $i, $v := .UpstreamNameservers}}{{if $i}} {{end}}{{.}}{{end}}\n\t{{- else }}\n        forward . \"/etc/resolv.conf\"\n\t{{- end }}\n        cache 30\n        loop\n        reload\n        loadbalance\n    }\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: coredns\n  namespace: kube-system\n  labels:\n    k8s-app: kube-dns\n    kubernetes.io/cluster-service: \"true\"\n    addonmanager.kubernetes.io/mode: Reconcile\n    kubernetes.io/name: \"CoreDNS\"\nspec:\n  strategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n    type: RollingUpdate\n    rollingUpdate:\n      maxUnavailable: 1\n{{end}}\n  selector:\n    matchLabels:\n      k8s-app: kube-dns\n  template:\n    metadata:\n      labels:\n        k8s-app: kube-dns\n      annotations:\n        seccomp.security.alpha.kubernetes.io/pod: 'docker/default'\n    spec:\n      priorityClassName: system-cluster-critical\n{{- if eq .RBACConfig \"rbac\"}}\n      serviceAccountName: coredns\n{{- end }}\n      tolerations:\n        - key: \"CriticalAddonsOnly\"\n          operator: \"Exists\"\n        - effect: NoExecute\n          operator: Exists\n        - effect: NoSchedule\n          operator: Exists\n      nodeSelector:\n        beta.kubernetes.io/os: linux\n      {{ range $k, $v := .NodeSelector }}\n        {{ $k }}: \"{{ $v }}\"\n      {{ end }}\n      affinity:\n        nodeAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            nodeSelectorTerms:\n              - matchExpressions:\n                - key: node-role.kubernetes.io/worker\n                  operator: Exists\n      containers:\n      - name: coredns\n        image: {{.CoreDNSImage}}\n        imagePullPolicy: IfNotPresent\n        resources:\n          limits:\n            memory: 170Mi\n          requests:\n            cpu: 100m\n            memory: 70Mi\n        args: [ \"-conf\", \"/etc/coredns/Corefile\" ]\n        volumeMounts:\n        - name: config-volume\n          mountPath: /etc/coredns\n          readOnly: true\n        ports:\n        - containerPort: 53\n          name: dns\n          protocol: UDP\n        - containerPort: 53\n          name: dns-tcp\n          protocol: TCP\n        - containerPort: 9153\n          name: metrics\n          protocol: TCP\n        livenessProbe:\n          httpGet:\n            path: /health\n            port: 8080\n            scheme: HTTP\n          initialDelaySeconds: 60\n          timeoutSeconds: 5\n          successThreshold: 1\n          failureThreshold: 5\n        readinessProbe:\n          httpGet:\n            path: /health\n            port: 8080\n            scheme: HTTP\n        securityContext:\n          allowPrivilegeEscalation: false\n          capabilities:\n            add:\n            - NET_BIND_SERVICE\n            drop:\n            - all\n          readOnlyRootFilesystem: true\n      dnsPolicy: Default\n      volumes:\n        - name: config-volume\n          configMap:\n            name: coredns\n            items:\n            - key: Corefile\n              path: Corefile\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: kube-dns\n  namespace: kube-system\n  annotations:\n    prometheus.io/port: \"9153\"\n    prometheus.io/scrape: \"true\"\n  labels:\n    k8s-app: kube-dns\n    kubernetes.io/cluster-service: \"true\"\n    addonmanager.kubernetes.io/mode: Reconcile\n    kubernetes.io/name: \"CoreDNS\"\nspec:\n  selector:\n    k8s-app: kube-dns\n  clusterIP: {{.ClusterDNSServer}}\n  ports:\n  - name: dns\n    port: 53\n    protocol: UDP\n  - name: dns-tcp\n    port: 53\n    protocol: TCP\n  - name: metrics\n    port: 9153\n    protocol: TCP\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: coredns-autoscaler\n  namespace: kube-system\n  labels:\n    k8s-app: coredns-autoscaler\nspec:\n  selector:\n    matchLabels:\n      k8s-app: coredns-autoscaler\n  template:\n    metadata:\n      labels:\n        k8s-app: coredns-autoscaler\n    spec:\n{{- if eq .RBACConfig \"rbac\"}}\n      serviceAccountName: coredns-autoscaler\n{{- end }}\n      nodeSelector:\n        beta.kubernetes.io/os: linux\n      affinity:\n        nodeAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            nodeSelectorTerms:\n              - matchExpressions:\n                - key: node-role.kubernetes.io/worker\n                  operator: Exists\n      tolerations:\n      - effect: NoExecute\n        operator: Exists\n      - effect: NoSchedule\n        operator: Exists\n      containers:\n      - name: autoscaler\n        image: {{.CoreDNSAutoScalerImage}}\n        resources:\n            requests:\n                cpu: \"20m\"\n                memory: \"10Mi\"\n        command:\n          - /cluster-proportional-autoscaler\n          - --namespace=kube-system\n          - --configmap=coredns-autoscaler\n          - --target=Deployment/coredns\n          # When cluster is using large nodes(with more cores), \"coresPerReplica\" should dominate.\n          # If using small nodes, \"nodesPerReplica\" should dominate.\n{{if .LinearAutoscalerParams}}\n          - --default-params={\"linear\":{{.LinearAutoscalerParams}}}\n{{else}}\n          - --default-params={\"linear\":{\"coresPerReplica\":128,\"nodesPerReplica\":4,\"min\":1}}\n{{end}}\n          - --logtostderr=true\n          - --v=2\n{{- if eq .RBACConfig \"rbac\"}}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: coredns-autoscaler\n  namespace: kube-system\n  labels:\n    kubernetes.io/cluster-service: \"true\"\n    addonmanager.kubernetes.io/mode: Reconcile\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: system:coredns-autoscaler\nrules:\n  - apiGroups: [\"\"]\n    resources: [\"nodes\"]\n    verbs: [\"list\", \"watch\"]\n  - apiGroups: [\"\"]\n    resources: [\"replicationcontrollers/scale\"]\n    verbs: [\"get\", \"update\"]\n  - apiGroups: [\"extensions\"]\n    resources: [\"deployments/scale\", \"replicasets/scale\"]\n    verbs: [\"get\", \"update\"]\n  - apiGroups: [\"\"]\n    resources: [\"configmaps\"]\n    verbs: [\"get\", \"create\"]\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: system:coredns-autoscaler\nsubjects:\n  - kind: ServiceAccount\n    name: coredns-autoscaler\n    namespace: kube-system\nroleRef:\n  kind: ClusterRole\n  name: system:coredns-autoscaler\n  apiGroup: rbac.authorization.k8s.io\n{{- end }}",
   "flannel-v1.15": "\n{{- if eq .RBACConfig \"rbac\"}}\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n  name: flannel\nrules:\n  - apiGroups: ['extensions']\n    resources: ['podsecuritypolicies']\n    verbs: ['use']\n    resourceNames: ['psp.flannel.unprivileged']\n  - apiGroups:\n      - \"\"\n    resources:\n      - pods\n    verbs:\n      - get\n  - apiGroups:\n      - \"\"\n    resources:\n      - nodes\n    verbs:\n      - list\n      - watch\n  - apiGroups:\n      - \"\"\n    resources:\n      - nodes/status\n    verbs:\n      - patch\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n  name: flannel\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: flannel\nsubjects:\n- kind: ServiceAccount\n  name: flannel\n  namespace: kube-system\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: flannel\n  namespace: kube-system\n{{end}}\n---\napiVersion: extensions/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n  name: psp.flannel.unprivileged\n  annotations:\n    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default\n    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default\n    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default\n    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default\nspec:\n  privileged: false\n  volumes:\n    - configMap\n    - secret\n    - emptyDir\n    - hostPath\n  allowedHostPaths:\n    - pathPrefix: \"/etc/cni/net.d\"\n    - pathPrefix: \"/etc/kube-flannel\"\n    - pathPrefix: \"/run/flannel\"\n  readOnlyRootFilesystem: false\n  # Users and groups\n  runAsUser:\n    rule: RunAsAny\n  supplementalGroups:\n    rule: RunAsAny\n  fsGroup:\n    rule: RunAsAny\n  # Privilege Escalation\n  allowPrivilegeEscalation: false\n  defaultAllowPrivilegeEscalation: false\n  # Capabilities\n  allowedCapabilities: ['NET_ADMIN']\n  defaultAddCapabilities: []\n  requiredDropCapabilities: []\n  # Host namespaces\n  hostPID: false\n  hostIPC: false\n  hostNetwork: true\n  hostPorts:\n  - min: 0\n    max: 65535\n  # SELinux\n  seLinux:\n    # SELinux is unsed in CaaSP\n    rule: 'RunAsAny'\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n  name: kube-flannel-cfg\n  namespace: kube-system\n  labels:\n    tier: node\n    app: flannel\ndata:\n  cni-conf.json: |\n    {\n      \"name\": \"cbr0\",\n      \"cniVersion\":\"0.3.1\",\n      \"plugins\": [\n        {\n          \"type\": \"flannel\",\n          \"delegate\": {\n            \"hairpinMode\": true,\n            \"isDefaultGateway\": true\n          }\n        },\n        {\n          \"type\": \"portmap\",\n          \"capabilities\": {\n            \"portMappings\": true\n          }\n        }\n      ]\n    }\n  net-conf.json: |\n    {\n      \"Network\": \"{{.ClusterCIDR}}\",\n      \"Backend\": {\n        \"Type\": \"{{.FlannelBackend.Type}}\",\n        \"VNI\": {{.FlannelBackend.VNI}},\n        \"Port\": {{.FlannelBackend.Port}}\n      }\n    }\n---\napiVersion: extensions/v1beta1\nkind: DaemonSet\nmetadata:\n  name: kube-flannel\n  namespace: kube-system\n  labels:\n    tier: node\n    k8s-app: flannel\nspec:\n  template:\n    metadata:\n      labels:\n        tier: node\n        k8s-app: flannel\n    spec:\n      affinity:\n        nodeAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            nodeSelectorTerms:\n              - matchExpressions:\n                - key: beta.kubernetes.io/os\n                  operator: NotIn\n                  values:\n                    - windows\n      hostNetwork: true\n{{if .NodeSelector}}\n      nodeSelector:\n      {{ range $k, $v := .NodeSelector }}\n        {{ $k }}: \"{{ $v }}\"\n      {{ end }}\n{{end}}\n      tolerations:\n      - operator: Exists\n      {{- if eq .RBACConfig \"rbac\"}}\n      serviceAccountName: flannel\n      {{end}}\n      containers:\n      - name: kube-flannel\n        image: {{.Image}}\n        command:\n        - /opt/bin/flanneld\n        args:\n        - --ip-masq\n        - --kube-subnet-mgr\n        {{- if .FlannelInterface}}\n        - --iface={{.FlannelInterface}}\n        {{end}}\n        resources:\n          requests:\n            cpu: \"100m\"\n            memory: \"50Mi\"\n          limits:\n            cpu: \"100m\"\n            memory: \"50Mi\"\n        securityContext:\n          privileged: false\n          capabilities:\n             add: [\"NET_ADMIN\"]\n        env:\n        - name: POD_NAME\n          valueFrom:\n            fieldRef:\n              fieldPath: metadata.name\n        - name: POD_NAMESPACE\n          valueFrom:\n            fieldRef:\n              fieldPath: metadata.namespace\n        volumeMounts:\n        - name: run\n          mountPath: /run\n        - name: cni\n          mountPath: /etc/cni/net.d\n        - name: flannel-cfg\n          mountPath: /etc/kube-flannel/\n      - name: install-cni\n        image: {{.CNIImage}}\n        command: [\"/install-cni.sh\"]\n        env:\n        # The CNI network config to install on each node.\n        - name: CNI_NETWORK_CONFIG\n          valueFrom:\n            configMapKeyRef:\n              name: kube-flannel-cfg\n              key: cni-conf.json\n        - name: CNI_CONF_NAME\n          value: \"10-flannel.conflist\"\n        volumeMounts:\n        - name: cni\n          mountPath: /host/etc/cni/net.d\n        - name: host-cni-bin\n          mountPath: /host/opt/cni/bin/\n      volumes:\n        - name: run\n          hostPath:\n            path: /run\n        - name: cni\n          hostPath:\n            path: /etc/cni/net.d\n        - name: flannel-cfg\n          configMap:\n            name: kube-flannel-cfg\n        - name: host-cni-bin\n          hostPath:\n            path: /opt/cni/bin\n  updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n    type: RollingUpdate\n    rollingUpdate:\n      maxUnavailable: 20%\n{{end}}\n",
   "flannel-v1.16": "\n{{- if eq .RBACConfig \"rbac\"}}\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n  name: flannel\nrules:\n  - apiGroups: ['extensions']\n    resources: ['podsecuritypolicies']\n    verbs: ['use']\n    resourceNames: ['psp.flannel.unprivileged']\n  - apiGroups:\n      - \"\"\n    resources:\n      - pods\n    verbs:\n      - get\n  - apiGroups:\n      - \"\"\n    resources:\n      - nodes\n    verbs:\n      - list\n      - watch\n  - apiGroups:\n      - \"\"\n    resources:\n      - nodes/status\n    verbs:\n      - patch\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n  name: flannel\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: flannel\nsubjects:\n- kind: ServiceAccount\n  name: flannel\n  namespace: kube-system\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: flannel\n  namespace: kube-system\n{{end}}\n---\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n  name: psp.flannel.unprivileged\n  annotations:\n    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default\n    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default\n    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default\n    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default\nspec:\n  privileged: false\n  volumes:\n    - configMap\n    - secret\n    - emptyDir\n    - hostPath\n  allowedHostPaths:\n    - pathPrefix: \"/etc/cni/net.d\"\n    - pathPrefix: \"/etc/kube-flannel\"\n    - pathPrefix: \"/run/flannel\"\n  readOnlyRootFilesystem: false\n  # Users and groups\n  runAsUser:\n    rule: RunAsAny\n  supplementalGroups:\n    rule: RunAsAny\n  fsGroup:\n    rule: RunAsAny\n  # Privilege Escalation\n  allowPrivilegeEscalation: false\n  defaultAllowPrivilegeEscalation: false\n  # Capabilities\n  allowedCapabilities: ['NET_ADMIN']\n  defaultAddCapabilities: []\n  requiredDropCapabilities: []\n  # Host namespaces\n  hostPID: false\n  hostIPC: false\n  hostNetwork: true\n  hostPorts:\n  - min: 0\n    max: 65535\n  # SELinux\n  seLinux:\n    # SELinux is unsed in CaaSP\n    rule: 'RunAsAny'\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n  name: kube-flannel-cfg\n  namespace: kube-system\n  labels:\n    tier: node\n    app: flannel\ndata:\n  cni-conf.json: |\n    {\n      \"name\": \"cbr0\",\n      \"cniVersion\":\"0.3.1\",\n      \"plugins\": [\n        {\n          \"type\": \"flannel\",\n          \"delegate\": {\n            \"hairpinMode\": true,\n            \"isDefaultGateway\": true\n          }\n        },\n        {\n          \"type\": \"portmap\",\n          \"capabilities\": {\n            \"portMappings\": true\n          }\n        }\n      ]\n    }\n  net-conf.json: |\n    {\n      \"Network\": \"{{.ClusterCIDR}}\",\n      \"Backend\": {\n        \"Type\": \"{{.FlannelBackend.Type}}\",\n        \"VNI\": {{.FlannelBackend.VNI}},\n        \"Port\": {{.FlannelBackend.Port}}\n      }\n    }\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: kube-flannel\n  namespace: kube-system\n  labels:\n    tier: node\n    k8s-app: flannel\nspec:\n  selector:\n    matchLabels:\n      k8s-app: flannel\n  template:\n    metadata:\n      labels:\n        tier: node\n        k8s-app: flannel\n    spec:\n      affinity:\n        nodeAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            nodeSelectorTerms:\n              - matchExpressions:\n                - key: beta.kubernetes.io/os\n                  operator: NotIn\n                  values:\n                    - windows\n      hostNetwork: true\n{{if .NodeSelector}}\n      nodeSelector:\n      {{ range $k, $v := .NodeSelector }}\n        {{ $k }}: \"{{ $v }}\"\n      {{ end }}\n{{end}}\n      tolerations:\n      - operator: Exists\n      {{- if eq .RBACConfig \"rbac\"}}\n      serviceAccountName: flannel\n      {{end}}\n      containers:\n      - name: kube-flannel\n        image: {{.Image}}\n        command:\n        - /opt/bin/flanneld\n        args:\n        - --ip-masq\n        - --kube-subnet-mgr\n        {{- if .FlannelInterface}}\n        - --iface={{.FlannelInterface}}\n        {{end}}\n        resources:\n          requests:\n            cpu: \"100m\"\n            memory: \"50Mi\"\n          limits:\n            cpu: \"100m\"\n            memory: \"50Mi\"\n        securityContext:\n          privileged: false\n          capabilities:\n             add: [\"NET_ADMIN\"]\n        env:\n        - name: POD_NAME\n          valueFrom:\n            fieldRef:\n              fieldPath: metadata.name\n        - name: POD_NAMESPACE\n          valueFrom:\n            fieldRef:\n              fieldPath: metadata.namespace\n        volumeMounts:\n        - name: run\n          mountPath: /run\n        - name: cni\n          mountPath: /etc/cni/net.d\n        - name: flannel-cfg\n          mountPath: /etc/kube-flannel/\n      - name: install-cni\n        image: {{.CNIImage}}\n        command: [\"/install-cni.sh\"]\n        env:\n        # The CNI network config to install on each node.\n        - name: CNI_NETWORK_CONFIG\n          valueFrom:\n            configMapKeyRef:\n              name: kube-flannel-cfg\n              key: cni-conf.json\n        - name: CNI_CONF_NAME\n          value: \"10-flannel.conflist\"\n        volumeMounts:\n        - name: cni\n          mountPath: /host/etc/cni/net.d\n        - name: host-cni-bin\n          mountPath: /host/opt/cni/bin/\n      volumes:\n        - name: run\n          hostPath:\n            path: /run\n        - name: cni\n          hostPath:\n            path: /etc/cni/net.d\n        - name: flannel-cfg\n          configMap:\n            name: kube-flannel-cfg\n        - name: host-cni-bin\n          hostPath:\n            path: /opt/cni/bin\n  updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n    type: RollingUpdate\n    rollingUpdate:\n      maxUnavailable: 20%\n{{end}}\n",
   "flannel-v1.8": "\n{{- if eq .RBACConfig \"rbac\"}}\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: flannel\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: flannel\nsubjects:\n- kind: ServiceAccount\n  name: flannel\n  namespace: kube-system\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: flannel\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - pods\n    verbs:\n      - get\n  - apiGroups:\n      - \"\"\n    resources:\n      - nodes\n    verbs:\n      - list\n      - watch\n  - apiGroups:\n      - \"\"\n    resources:\n      - nodes/status\n    verbs:\n      - patch\n{{- end}}\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n  name: kube-flannel-cfg\n  namespace: \"kube-system\"\n  labels:\n    tier: node\n    app: flannel\ndata:\n  cni-conf.json: |\n    {\n      \"name\":\"cbr0\",\n      \"cniVersion\":\"0.3.1\",\n      \"plugins\":[\n        {\n          \"type\":\"flannel\",\n          \"delegate\":{\n            \"forceAddress\":true,\n            \"isDefaultGateway\":true\n          }\n        },\n        {\n          \"type\":\"portmap\",\n          \"capabilities\":{\n            \"portMappings\":true\n          }\n        }\n      ]\n    }\n  net-conf.json: |\n    {\n      \"Network\": \"{{.ClusterCIDR}}\",\n      \"Backend\": {\n        \"Type\": \"{{.FlannelBackend.Type}}\",\n        \"VNI\": {{.FlannelBackend.VNI}},\n        \"Port\": {{.FlannelBackend.Port}}\n      }\n    }\n---\napiVersion: extensions/v1beta1\nkind: DaemonSet\nmetadata:\n  name: kube-flannel\n  namespace: \"kube-system\"\n  labels:\n    tier: node\n    k8s-app: flannel\nspec:\n  template:\n    metadata:\n      labels:\n        tier: node\n        k8s-app: flannel\n    spec:\n      affinity:\n        nodeAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            nodeSelectorTerms:\n              - matchExpressions:\n                - key: beta.kubernetes.io/os\n                  operator: NotIn\n                  values:\n                    - windows\n{{if .NodeSelector}}\n      nodeSelector:\n      {{ range $k, $v := .NodeSelector }}\n        {{ $k }}: \"{{ $v }}\"\n      {{ end }}\n{{end}}\n      serviceAccountName: flannel\n      containers:\n      - name: kube-flannel\n        image: {{.Image}}\n        imagePullPolicy: IfNotPresent\n        resources:\n          limits:\n            cpu: 300m\n            memory: 500M\n          requests:\n            cpu: 150m\n            memory: 64M\n        {{- if .FlannelInterface}}\n        command: [\"/opt/bin/flanneld\",\"--ip-masq\",\"--kube-subnet-mgr\",\"--iface={{.FlannelInterface}}\"]\n        {{- else}}\n        command: [\"/opt/bin/flanneld\",\"--ip-masq\",\"--kube-subnet-mgr\"]\n        {{- end}}\n        securityContext:\n          privileged: true\n        env:\n        - name: POD_NAME\n          valueFrom:\n            fieldRef:\n              fieldPath: metadata.name\n        - name: POD_NAMESPACE\n          valueFrom:\n            fieldRef:\n              fieldPath: metadata.namespace\n        volumeMounts:\n        - name: run\n          mountPath: /run\n        - name: cni\n          mountPath: /etc/cni/net.d\n        - name: flannel-cfg\n          mountPath: /etc/kube-flannel/\n      - name: install-cni\n        image: {{.CNIImage}}\n        command: [\"/install-cni.sh\"]\n        env:\n        # The CNI network config to install on each node.\n        - name: CNI_NETWORK_CONFIG\n          valueFrom:\n            configMapKeyRef:\n              name: kube-flannel-cfg\n              key: cni-conf.json\n        - name: CNI_CONF_NAME\n          value: \"10-flannel.conflist\"\n        volumeMounts:\n        - name: cni\n          mountPath: /host/etc/cni/net.d\n        - name: host-cni-bin\n          mountPath: /host/opt/cni/bin/\n      hostNetwork: true\n      tolerations:\n      {{- if ge .ClusterVersion \"v1.12\" }}\n      - operator: Exists\n        effect: NoSchedule\n      - operator: Exists\n        effect: NoExecute\n      {{- else }}\n      - key: node-role.kubernetes.io/controlplane\n        operator: Exists\n        effect: NoSchedule\n      - key: node-role.kubernetes.io/etcd\n        operator: Exists\n        effect: NoExecute\n      {{- end }}\n      - key: node.kubernetes.io/not-ready\n        effect: NoSchedule\n        operator: Exists\n      volumes:\n        - name: run\n          hostPath:\n            path: /run\n        - name: cni\n          hostPath:\n            path: /etc/cni/net.d\n        - name: flannel-cfg\n          configMap:\n            name: kube-flannel-cfg\n        - name: host-cni-bin\n          hostPath:\n            path: /opt/cni/bin\n  updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n    type: RollingUpdate\n    rollingUpdate:\n      maxUnavailable: 20%\n{{end}}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: flannel\n  namespace: kube-system\n",
   "kubedns-v1.16": "\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: kube-dns-autoscaler\n  namespace: kube-system\n  labels:\n    k8s-app: kube-dns-autoscaler\nspec:\n  selector:\n    matchLabels:\n      k8s-app: kube-dns-autoscaler\n  template:\n    metadata:\n      labels:\n        k8s-app: kube-dns-autoscaler\n    spec:\n      affinity:\n        nodeAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            nodeSelectorTerms:\n              - matchExpressions:\n                - key: beta.kubernetes.io/os\n                  operator: NotIn\n                  values:\n                    - windows\n                - key: node-role.kubernetes.io/worker\n                  operator: Exists\n      serviceAccountName: kube-dns-autoscaler\n      tolerations:\n      - effect: NoExecute\n        operator: Exists\n      - effect: NoSchedule\n        operator: Exists\n      containers:\n      - name: autoscaler\n        image: {{.KubeDNSAutoScalerImage}}\n        resources:\n            requests:\n                cpu: \"20m\"\n                memory: \"10Mi\"\n        command:\n          - /cluster-proportional-autoscaler\n          - --namespace=kube-system\n          - --configmap=kube-dns-autoscaler\n          - --target=Deployment/kube-dns\n          # When cluster is using large nodes(with more cores), \"coresPerReplica\" should dominate.\n          # If using small nodes, \"nodesPerReplica\" should dominate.\n{{if .LinearAutoscalerParams}}\n          - --default-params={\"linear\":{{.LinearAutoscalerParams}}}\n{{else}}\n          - --default-params={\"linear\":{\"coresPerReplica\":128,\"nodesPerReplica\":4,\"min\":1,\"preventSinglePointFailure\":true}}\n{{end}}\n          - --logtostderr=true\n          - --v=2\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: kube-dns-autoscaler\n  namespace: kube-system\n  labels:\n    kubernetes.io/cluster-service: \"true\"\n    addonmanager.kubernetes.io/mode: Reconcile\n{{- if eq .RBACConfig \"rbac\"}}\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: system:kube-dns-autoscaler\nrules:\n  - apiGroups: [\"\"]\n    resources: [\"nodes\"]\n    verbs: [\"list\", \"watch\"]\n  - apiGroups: [\"\"]\n    resources: [\"replicationcontrollers/scale\"]\n    verbs: [\"get\", \"update\"]\n  - apiGroups: [\"extensions\",\"apps\"]\n    resources: [\"deployments/scale\", \"replicasets/scale\"]\n    verbs: [\"get\", \"update\"]\n  - apiGroups: [\"\"]\n    resources: [\"configmaps\"]\n    verbs: [\"get\", \"create\"]\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: system:kube-dns-autoscaler\nsubjects:\n  - kind: ServiceAccount\n    name: kube-dns-autoscaler\n    namespace: kube-system\nroleRef:\n  kind: ClusterRole\n  name: system:kube-dns-autoscaler\n  apiGroup: rbac.authorization.k8s.io\n{{- end }}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: kube-dns\n  namespace: kube-system\n  labels:\n    kubernetes.io/cluster-service: \"true\"\n    addonmanager.kubernetes.io/mode: Reconcile\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: kube-dns\n  namespace: kube-system\n  labels:\n    k8s-app: kube-dns\n    kubernetes.io/cluster-service: \"true\"\n    addonmanager.kubernetes.io/mode: Reconcile\nspec:\n  # replicas: not specified here:\n  # 1. In order to make Addon Manager do not reconcile this replicas parameter.\n  # 2. Default is 1.\n  # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.\n  strategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n    rollingUpdate:\n      maxSurge: 10%\n      maxUnavailable: 0\n{{end}}\n  selector:\n    matchLabels:\n      k8s-app: kube-dns\n  template:\n    metadata:\n      labels:\n        k8s-app: kube-dns\n      annotations:\n        scheduler.alpha.kubernetes.io/critical-pod: ''\n    spec:\n{{if .NodeSelector}}\n      nodeSelector:\n      {{ range $k, $v := .NodeSelector }}\n        {{ $k }}: \"{{ $v }}\"\n      {{ end }}\n{{end}}\n      affinity:\n        podAntiAffinity:\n          preferredDuringSchedulingIgnoredDuringExecution:\n          - weight: 100\n            podAffinityTerm:\n              labelSelector:\n                matchExpressions:\n                  - key: k8s-app\n                    operator: In\n                    values: [\"kube-dns\"]\n              topologyKey: kubernetes.io/hostname\n        nodeAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            nodeSelectorTerms:\n              - matchExpressions:\n                - key: beta.kubernetes.io/os\n                  operator: NotIn\n                  values:\n                    - windows\n                - key: node-role.kubernetes.io/worker\n                  operator: Exists\n      tolerations:\n      - key: \"CriticalAddonsOnly\"\n        operator: \"Exists\"\n      - effect: NoExecute\n        operator: Exists\n      - effect: NoSchedule\n        operator: Exists\n      volumes:\n      - name: kube-dns-config\n        configMap:\n          name: kube-dns\n          optional: true\n      containers:\n      - name: kubedns\n        image: {{.KubeDNSImage}}\n        resources:\n          # TODO: Set memory limits when we've profiled the container for large\n          # clusters, then set request = limit to keep this container in\n          # guaranteed class. Currently, this container falls into the\n          # \"burstable\" category so the kubelet doesn't backoff from restarting it.\n          limits:\n            memory: 170Mi\n          requests:\n            cpu: 100m\n            memory: 70Mi\n        livenessProbe:\n          httpGet:\n            path: /healthcheck/kubedns\n            port: 10054\n            scheme: HTTP\n          initialDelaySeconds: 60\n          timeoutSeconds: 5\n          successThreshold: 1\n          failureThreshold: 5\n        readinessProbe:\n          httpGet:\n            path: /readiness\n            port: 8081\n            scheme: HTTP\n          # we poll on pod startup for the Kubernetes master service and\n          # only setup the /readiness HTTP server once that's available.\n          initialDelaySeconds: 3\n          timeoutSeconds: 5\n        args:\n        - --domain={{.ClusterDomain}}.\n        - --dns-port=10053\n        - --config-dir=/kube-dns-config\n        - --v=2\n        env:\n        - name: PROMETHEUS_PORT\n          value: \"10055\"\n        ports:\n        - containerPort: 10053\n          name: dns-local\n          protocol: UDP\n        - containerPort: 10053\n          name: dns-tcp-local\n          protocol: TCP\n        - containerPort: 10055\n          name: metrics\n          protocol: TCP\n        volumeMounts:\n        - name: kube-dns-config\n          mountPath: /kube-dns-config\n      - name: dnsmasq\n        image: {{.DNSMasqImage}}\n        livenessProbe:\n          httpGet:\n            path: /healthcheck/dnsmasq\n            port: 10054\n            scheme: HTTP\n          initialDelaySeconds: 60\n          timeoutSeconds: 5\n          successThreshold: 1\n          failureThreshold: 5\n        args:\n        - -v=2\n        - -logtostderr\n        - -configDir=/etc/k8s/dns/dnsmasq-nanny\n        - -restartDnsmasq=true\n        - --\n        - -k\n        - --cache-size=1000\n        - --log-facility=-\n        - --server=/{{.ClusterDomain}}/127.0.0.1#10053\n\t{{- if .ReverseCIDRs }}\n\t{{- range .ReverseCIDRs }}\n        - --server=/{{.}}/127.0.0.1#10053\n\t{{- end }}\n\t{{- else }}\n        - --server=/in-addr.arpa/127.0.0.1#10053\n        - --server=/ip6.arpa/127.0.0.1#10053\n\t{{- end }}\n        ports:\n        - containerPort: 53\n          name: dns\n          protocol: UDP\n        - containerPort: 53\n          name: dns-tcp\n          protocol: TCP\n        # see: https://github.com/kubernetes/kubernetes/issues/29055 for details\n        resources:\n          requests:\n            cpu: 150m\n            memory: 20Mi\n        volumeMounts:\n        - name: kube-dns-config\n          mountPath: /etc/k8s/dns/dnsmasq-nanny\n      - name: sidecar\n        image: {{.KubeDNSSidecarImage}}\n        livenessProbe:\n          httpGet:\n            path: /metrics\n            port: 10054\n            scheme: HTTP\n          initialDelaySeconds: 60\n          timeoutSeconds: 5\n          successThreshold: 1\n          failureThreshold: 5\n        args:\n        - --v=2\n        - --logtostderr\n        - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{.ClusterDomain}},5,A\n        - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{.ClusterDomain}},5,A\n        ports:\n        - containerPort: 10054\n          name: metrics\n          protocol: TCP\n        resources:\n          requests:\n            memory: 20Mi\n            cpu: 10m\n      dnsPolicy: Default  # Don't use cluster DNS.\n      serviceAccountName: kube-dns\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: kube-dns\n  namespace: kube-system\n  labels:\n    k8s-app: kube-dns\n    kubernetes.io/cluster-service: \"true\"\n    addonmanager.kubernetes.io/mode: Reconcile\n    kubernetes.io/name: \"KubeDNS\"\nspec:\n  selector:\n    k8s-app: kube-dns\n  clusterIP: {{.ClusterDNSServer}}\n  ports:\n  - name: dns\n    port: 53\n    protocol: UDP\n  - name: dns-tcp\n    port: 53\n    protocol: TCP\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: kube-dns\n  namespace: kube-system\ndata:\n{{- if .UpstreamNameservers }}\n  upstreamNameservers: |\n    [{{range $i, $v := .UpstreamNameservers}}{{if $i}}, {{end}}{{printf \"%q\" .}}{{end}}]\n{{- end }}\n{{- if .StubDomains }}\n  stubDomains: |\n    {{ GetKubednsStubDomains .StubDomains }}\n{{- end }}",
   "kubedns-v1.8": "\n---\napiVersion: apps/v1beta1\nkind: Deployment\nmetadata:\n  name: kube-dns-autoscaler\n  namespace: kube-system\n  labels:\n    k8s-app: kube-dns-autoscaler\nspec:\n  template:\n    metadata:\n      labels:\n        k8s-app: kube-dns-autoscaler\n    spec:\n      affinity:\n        nodeAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            nodeSelectorTerms:\n              - matchExpressions:\n                - key: beta.kubernetes.io/os\n                  operator: NotIn\n                  values:\n                    - windows\n                - key: node-role.kubernetes.io/worker\n                  operator: Exists\n      serviceAccountName: kube-dns-autoscaler\n      tolerations:\n      - effect: NoExecute\n        operator: Exists\n      - effect: NoSchedule\n        operator: Exists\n      containers:\n      - name: autoscaler\n        image: {{.KubeDNSAutoScalerImage}}\n        resources:\n            requests:\n                cpu: \"20m\"\n                memory: \"10Mi\"\n        command:\n          - /cluster-proportional-autoscaler\n          - --namespace=kube-system\n          - --configmap=kube-dns-autoscaler\n          - --target=Deployment/kube-dns\n          # When cluster is using large nodes(with more cores), \"coresPerReplica\" should dominate.\n          # If using small nodes, \"nodesPerReplica\" should dominate.\n{{if .LinearAutoscalerParams}}\n          - --default-params={\"linear\":{{.LinearAutoscalerParams}}}\n{{else}}\n          - --default-params={\"linear\":{\"coresPerReplica\":128,\"nodesPerReplica\":4,\"min\":1}}\n{{end}}\n          - --logtostderr=true\n          - --v=2\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: kube-dns-autoscaler\n  namespace: kube-system\n  labels:\n    kubernetes.io/cluster-service: \"true\"\n    addonmanager.kubernetes.io/mode: Reconcile\n{{- if eq .RBACConfig \"rbac\"}}\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: system:kube-dns-autoscaler\nrules:\n  - apiGroups: [\"\"]\n    resources: [\"nodes\"]\n    verbs: [\"list\", \"watch\"]\n  - apiGroups: [\"\"]\n    resources: [\"replicationcontrollers/scale\"]\n    verbs: [\"get\", \"update\"]\n  - apiGroups: [\"extensions\"]\n    resources: [\"deployments/scale\", \"replicasets/scale\"]\n    verbs: [\"get\", \"update\"]\n  - apiGroups: [\"\"]\n    resources: [\"configmaps\"]\n    verbs: [\"get\", \"create\"]\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: system:kube-dns-autoscaler\nsubjects:\n  - kind: ServiceAccount\n    name: kube-dns-autoscaler\n    namespace: kube-system\nroleRef:\n  kind: ClusterRole\n  name: system:kube-dns-autoscaler\n  apiGroup: rbac.authorization.k8s.io\n{{- end }}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: kube-dns\n  namespace: kube-system\n  labels:\n    kubernetes.io/cluster-service: \"true\"\n    addonmanager.kubernetes.io/mode: Reconcile\n---\napiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n  name: kube-dns\n  namespace: kube-system\n  labels:\n    k8s-app: kube-dns\n    kubernetes.io/cluster-service: \"true\"\n    addonmanager.kubernetes.io/mode: Reconcile\nspec:\n  # replicas: not specified here:\n  # 1. In order to make Addon Manager do not reconcile this replicas parameter.\n  # 2. Default is 1.\n  # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.\n  strategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n    rollingUpdate:\n      maxSurge: 10%\n      maxUnavailable: 0\n{{end}}\n  selector:\n    matchLabels:\n      k8s-app: kube-dns\n  template:\n    metadata:\n      labels:\n        k8s-app: kube-dns\n      annotations:\n        scheduler.alpha.kubernetes.io/critical-pod: ''\n    spec:\n{{if .NodeSelector}}\n      nodeSelector:\n      {{ range $k, $v := .NodeSelector }}\n        {{ $k }}: \"{{ $v }}\"\n      {{ end }}\n{{end}}\n      affinity:\n        podAntiAffinity:\n          preferredDuringSchedulingIgnoredDuringExecution:\n          - weight: 100\n            podAffinityTerm:\n              labelSelector:\n                matchExpressions:\n                  - key: k8s-app\n                    operator: In\n                    values: [\"kube-dns\"]\n              topologyKey: kubernetes.io/hostname\n        nodeAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            nodeSelectorTerms:\n              - matchExpressions:\n                - key: beta.kubernetes.io/os\n                  operator: NotIn\n                  values:\n                    - windows\n                - key: node-role.kubernetes.io/worker\n                  operator: Exists\n      tolerations:\n      - key: \"CriticalAddonsOnly\"\n        operator: \"Exists\"\n      - effect: NoExecute\n        operator: Exists\n      - effect: NoSchedule\n        operator: Exists\n      volumes:\n      - name: kube-dns-config\n        configMap:\n          name: kube-dns\n          optional: true\n      containers:\n      - name: kubedns\n        image: {{.KubeDNSImage}}\n        resources:\n          # TODO: Set memory limits when we've profiled the container for large\n          # clusters, then set request = limit to keep this container in\n          # guaranteed class. Currently, this container falls into the\n          # \"burstable\" category so the kubelet doesn't backoff from restarting it.\n          limits:\n            memory: 170Mi\n          requests:\n            cpu: 100m\n            memory: 70Mi\n        livenessProbe:\n          httpGet:\n            path: /healthcheck/kubedns\n            port: 10054\n            scheme: HTTP\n          initialDelaySeconds: 60\n          timeoutSeconds: 5\n          successThreshold: 1\n          failureThreshold: 5\n        readinessProbe:\n          httpGet:\n            path: /readiness\n            port: 8081\n            scheme: HTTP\n          # we poll on pod startup for the Kubernetes master service and\n          # only setup the /readiness HTTP server once that's available.\n          initialDelaySeconds: 3\n          timeoutSeconds: 5\n        args:\n        - --domain={{.ClusterDomain}}.\n        - --dns-port=10053\n        - --config-dir=/kube-dns-config\n        - --v=2\n        env:\n        - name: PROMETHEUS_PORT\n          value: \"10055\"\n        ports:\n        - containerPort: 10053\n          name: dns-local\n          protocol: UDP\n        - containerPort: 10053\n          name: dns-tcp-local\n          protocol: TCP\n        - containerPort: 10055\n          name: metrics\n          protocol: TCP\n        volumeMounts:\n        - name: kube-dns-config\n          mountPath: /kube-dns-config\n      - name: dnsmasq\n        image: {{.DNSMasqImage}}\n        livenessProbe:\n          httpGet:\n            path: /healthcheck/dnsmasq\n            port: 10054\n            scheme: HTTP\n          initialDelaySeconds: 60\n          timeoutSeconds: 5\n          successThreshold: 1\n          failureThreshold: 5\n        args:\n        - -v=2\n        - -logtostderr\n        - -configDir=/etc/k8s/dns/dnsmasq-nanny\n        - -restartDnsmasq=true\n        - --\n        - -k\n        - --cache-size=1000\n        - --log-facility=-\n        - --server=/{{.ClusterDomain}}/127.0.0.1#10053\n\t{{- if .ReverseCIDRs }}\n\t{{- range .ReverseCIDRs }}\n        - --server=/{{.}}/127.0.0.1#10053\n\t{{- end }}\n\t{{- else }}\n        - --server=/in-addr.arpa/127.0.0.1#10053\n        - --server=/ip6.arpa/127.0.0.1#10053\n\t{{- end }}\n        ports:\n        - containerPort: 53\n          name: dns\n          protocol: UDP\n        - containerPort: 53\n          name: dns-tcp\n          protocol: TCP\n        # see: https://github.com/kubernetes/kubernetes/issues/29055 for details\n        resources:\n          requests:\n            cpu: 150m\n            memory: 20Mi\n        volumeMounts:\n        - name: kube-dns-config\n          mountPath: /etc/k8s/dns/dnsmasq-nanny\n      - name: sidecar\n        image: {{.KubeDNSSidecarImage}}\n        livenessProbe:\n          httpGet:\n            path: /metrics\n            port: 10054\n            scheme: HTTP\n          initialDelaySeconds: 60\n          timeoutSeconds: 5\n          successThreshold: 1\n          failureThreshold: 5\n        args:\n        - --v=2\n        - --logtostderr\n        - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{.ClusterDomain}},5,A\n        - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{.ClusterDomain}},5,A\n        ports:\n        - containerPort: 10054\n          name: metrics\n          protocol: TCP\n        resources:\n          requests:\n            memory: 20Mi\n            cpu: 10m\n      dnsPolicy: Default  # Don't use cluster DNS.\n      serviceAccountName: kube-dns\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: kube-dns\n  namespace: kube-system\n  labels:\n    k8s-app: kube-dns\n    kubernetes.io/cluster-service: \"true\"\n    addonmanager.kubernetes.io/mode: Reconcile\n    kubernetes.io/name: \"KubeDNS\"\nspec:\n  selector:\n    k8s-app: kube-dns\n  clusterIP: {{.ClusterDNSServer}}\n  ports:\n  - name: dns\n    port: 53\n    protocol: UDP\n  - name: dns-tcp\n    port: 53\n    protocol: TCP\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: kube-dns\n  namespace: kube-system\ndata:\n{{- if .UpstreamNameservers }}\n  upstreamNameservers: |\n    [{{range $i, $v := .UpstreamNameservers}}{{if $i}}, {{end}}{{printf \"%q\" .}}{{end}}]\n{{- end }}\n{{- if .StubDomains }}\n  stubDomains: |\n    {{ GetKubednsStubDomains .StubDomains }}\n{{- end }}",
   "metricsserver-v1.8": "\n{{- if eq .RBACConfig \"rbac\"}}\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n  name: metrics-server:system:auth-delegator\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: system:auth-delegator\nsubjects:\n- kind: ServiceAccount\n  name: metrics-server\n  namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: RoleBinding\nmetadata:\n  name: metrics-server-auth-reader\n  namespace: kube-system\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: extension-apiserver-authentication-reader\nsubjects:\n- kind: ServiceAccount\n  name: metrics-server\n  namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: system:metrics-server\nrules:\n- apiGroups:\n  - \"\"\n  resources:\n  - pods\n  - nodes\n  - nodes/stats\n  - namespaces\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - \"extensions\"\n  resources:\n  - deployments\n  verbs:\n  - get\n  - list\n  - watch\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: system:metrics-server\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: system:metrics-server\nsubjects:\n- kind: ServiceAccount\n  name: metrics-server\n  namespace: kube-system\n{{- end }}\n---\napiVersion: apiregistration.k8s.io/v1beta1\nkind: APIService\nmetadata:\n  name: v1beta1.metrics.k8s.io\nspec:\n  service:\n    name: metrics-server\n    namespace: kube-system\n  group: metrics.k8s.io\n  version: v1beta1\n  insecureSkipTLSVerify: true\n  groupPriorityMinimum: 100\n  versionPriority: 100\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: metrics-server\n  namespace: kube-system\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: metrics-server\n  namespace: kube-system\n  labels:\n    k8s-app: metrics-server\nspec:\n{{if .Replicas}}\n  replicas: {{.Replicas}}\n{{end}}\n  selector:\n    matchLabels:\n      k8s-app: metrics-server\n{{if .UpdateStrategy}}\n  strategy:\n{{ toYaml .UpdateStrategy | indent 4}}\n{{end}}\n  template:\n    metadata:\n      name: metrics-server\n      labels:\n        k8s-app: metrics-server\n    spec:\n      affinity:\n        nodeAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            nodeSelectorTerms:\n              - matchExpressions:\n                - key: beta.kubernetes.io/os\n                  operator: NotIn\n                  values:\n                    - windows\n                - key: node-role.kubernetes.io/worker\n                  operator: Exists\n{{if .NodeSelector}}\n      nodeSelector:\n      {{ range $k, $v := .NodeSelector }}\n        {{ $k }}: \"{{ $v }}\"\n      {{ end }}\n{{end}}\n      serviceAccountName: metrics-server\n      tolerations:\n      - effect: NoExecute\n        operator: Exists\n      - effect: NoSchedule\n        operator: Exists\n      containers:\n      - name: metrics-server\n        image: {{ .MetricsServerImage }}\n        imagePullPolicy: Always\n        command:\n        - /metrics-server\n        {{- if eq .Version \"v0.3\" }}\n        - --kubelet-insecure-tls\n        - --kubelet-preferred-address-types=InternalIP\n        - --logtostderr\n        {{- else }}\n        - --source=kubernetes.summary_api:https://kubernetes.default.svc?kubeletHttps=true\u0026kubeletPort=10250\u0026useServiceAccount=true\u0026insecure=true\n        {{- end }}\n        {{ range $k,$v := .Options }}\n        -  --{{ $k }}={{ $v }}\n        {{ end }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: metrics-server\n  namespace: kube-system\n  labels:\n    kubernetes.io/name: \"Metrics-server\"\nspec:\n  selector:\n    k8s-app: metrics-server\n  ports:\n  - port: 443\n    protocol: TCP\n    targetPort: 443\n",
   "nginxingress-v1.15": "\napiVersion: v1\nkind: Namespace\nmetadata:\n  name: ingress-nginx\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n  name: nginx-configuration\n  namespace: ingress-nginx\n  labels:\n    app: ingress-nginx\ndata:\n{{ range $k,$v := .Options }}\n  {{ $k }}: \"{{ $v }}\"\n{{ end }}\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n  name: tcp-services\n  namespace: ingress-nginx\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n  name: udp-services\n  namespace: ingress-nginx\n{{if eq .RBACConfig \"rbac\"}}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: nginx-ingress-serviceaccount\n  namespace: ingress-nginx\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRole\nmetadata:\n  name: nginx-ingress-clusterrole\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - configmaps\n      - endpoints\n      - nodes\n      - pods\n      - secrets\n    verbs:\n      - list\n      - watch\n  - apiGroups:\n      - \"\"\n    resources:\n      - nodes\n    verbs:\n      - get\n  - apiGroups:\n      - \"\"\n    resources:\n      - services\n    verbs:\n      - get\n      - list\n      - watch\n  - apiGroups:\n      - \"extensions\"\n      - \"networking.k8s.io\"\n    resources:\n      - ingresses\n      - daemonsets\n    verbs:\n      - get\n      - list\n      - watch\n  - apiGroups:\n      - \"\"\n    resources:\n        - events\n    verbs:\n        - create\n        - patch\n  - apiGroups:\n      - \"extensions\"\n      - \"networking.k8s.io\"\n    resources:\n      - ingresses/status\n    verbs:\n      - update\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: Role\nmetadata:\n  name: nginx-ingress-role\n  namespace: ingress-nginx\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - configmaps\n      - pods\n      - secrets\n      - namespaces\n    verbs:\n      - get\n  - apiGroups:\n      - \"\"\n    resources:\n      - configmaps\n    resourceNames:\n      # Defaults to \"\u003celection-id\u003e-\u003cingress-class\u003e\"\n      # Here: \"\u003cingress-controller-leader\u003e-\u003cnginx\u003e\"\n      # This has to be adapted if you change either parameter\n      # when launching the nginx-ingress-controller.\n      - \"ingress-controller-leader-nginx\"\n    verbs:\n      - get\n      - update\n  - apiGroups:\n      - \"\"\n    resources:\n      - configmaps\n    verbs:\n      - create\n  - apiGroups:\n      - \"\"\n    resources:\n      - endpoints\n    verbs:\n      - get\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: RoleBinding\nmetadata:\n  name: nginx-ingress-role-nisa-binding\n  namespace: ingress-nginx\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: nginx-ingress-role\nsubjects:\n  - kind: ServiceAccount\n    name: nginx-ingress-serviceaccount\n    namespace: ingress-nginx\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n  name: nginx-ingress-clusterrole-nisa-binding\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: nginx-ingress-clusterrole\nsubjects:\n  - kind: ServiceAccount\n    name: nginx-ingress-serviceaccount\n    namespace: ingress-nginx\n{{ end }}\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: nginx-ingress-controller\n  namespace: ingress-nginx\nspec:\n  selector:\n    matchLabels:\n      app: ingress-nginx\n{{if .UpdateStrategy}}\n  updateStrategy:\n{{ toYaml .UpdateStrategy | indent 4}}\n{{end}}\n  template:\n    metadata:\n      labels:\n        app: ingress-nginx\n      annotations:\n        prometheus.io/port: '10254'\n        prometheus.io/scrape: 'true'\n    spec:\n      affinity:\n        nodeAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            nodeSelectorTerms:\n              - matchExpressions:\n                - key: beta.kubernetes.io/os\n                  operator: NotIn\n                  values:\n                    - windows\n                - key: node-role.kubernetes.io/worker\n                  operator: Exists\n      hostNetwork: true\n      {{if .DNSPolicy}}\n      dnsPolicy: {{.DNSPolicy}}\n      {{end}}\n{{if .NodeSelector}}\n      nodeSelector:\n      {{ range $k, $v := .NodeSelector }}\n        {{ $k }}: \"{{ $v }}\"\n      {{ end }}\n{{end}}\n      {{if eq .RBACConfig \"rbac\"}}\n      serviceAccountName: nginx-ingress-serviceaccount\n      {{ end }}\n      tolerations:\n      - effect: NoExecute\n        operator: Exists\n      - effect: NoSchedule\n        operator: Exists\n      {{- if ne .AlpineImage \"\"}}\n      initContainers:\n      - command:\n        - sh\n        - -c\n        - sysctl -w net.core.somaxconn=32768; sysctl -w net.ipv4.ip_local_port_range=\"1024 65535\"\n        image: {{.AlpineImage}}\n        imagePullPolicy: IfNotPresent\n        name: sysctl\n        securityContext:\n          privileged: true\n      {{- end }}\n      containers:\n        - name: nginx-ingress-controller\n          image: {{.IngressImage}}\n          args:\n            - /nginx-ingress-controller\n            - --default-backend-service=$(POD_NAMESPACE)/default-http-backend\n            - --configmap=$(POD_NAMESPACE)/nginx-configuration\n            - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services\n            - --udp-services-configmap=$(POD_NAMESPACE)/udp-services\n            - --annotations-prefix=nginx.ingress.kubernetes.io\n          {{ range $k, $v := .ExtraArgs }}\n            - --{{ $k }}{{if ne $v \"\" }}={{ $v }}{{end}}\n          {{ end }}\n          {{- if eq .AlpineImage \"\"}}\n          securityContext:\n            capabilities:\n                drop:\n                - ALL\n                add:\n                - NET_BIND_SERVICE\n            runAsUser: 33\n          {{- end }}\n          env:\n            - name: POD_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.name\n            - name: POD_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n{{if .ExtraEnvs}}\n{{ toYaml .ExtraEnvs | indent 12}}\n{{end}}\n          ports:\n          - name: http\n            containerPort: 80\n          - name: https\n            containerPort: 443\n          livenessProbe:\n            failureThreshold: 3\n            httpGet:\n              path: /healthz\n              port: 10254\n              scheme: HTTP\n            initialDelaySeconds: 10\n            periodSeconds: 10\n            successThreshold: 1\n            timeoutSeconds: 1\n          readinessProbe:\n            failureThreshold: 3\n            httpGet:\n              path: /healthz\n              port: 10254\n              scheme: HTTP\n            periodSeconds: 10\n            successThreshold: 1\n            timeoutSeconds: 1\n{{if .ExtraVolumeMounts}}\n          volumeMounts:\n{{ toYaml .ExtraVolumeMounts | indent 12}}\n{{end}}\n{{if .ExtraVolumes}}\n      volumes:\n{{ toYaml .ExtraVolumes | indent 8}}\n{{end}}\n\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: default-http-backend\n  labels:\n    app: default-http-backend\n  namespace: ingress-nginx\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: default-http-backend\n  template:\n    metadata:\n      labels:\n        app: default-http-backend\n    spec:\n      affinity:\n        nodeAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            nodeSelectorTerms:\n              - matchExpressions:\n                - key: beta.kubernetes.io/os\n                  operator: NotIn\n                  values:\n                    - windows\n                - key: node-role.kubernetes.io/worker\n                  operator: Exists\n      terminationGracePeriodSeconds: 60\n      tolerations:\n      - effect: NoExecute\n        operator: Exists\n      - effect: NoSchedule\n        operator: Exists\n      containers:\n      - name: default-http-backend\n        # Any image is permissable as long as:\n        # 1. It serves a 404 page at /\n        # 2. It serves 200 on a /healthz endpoint\n        image: {{.IngressBackend}}\n        livenessProbe:\n          httpGet:\n            path: /healthz\n            port: 8080\n            scheme: HTTP\n          initialDelaySeconds: 30\n          timeoutSeconds: 5\n        ports:\n        - containerPort: 8080\n        resources:\n          limits:\n            cpu: 10m\n            memory: 20Mi\n          requests:\n            cpu: 10m\n            memory: 20Mi\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: default-http-backend\n  namespace: ingress-nginx\n  labels:\n    app: default-http-backend\nspec:\n  ports:\n  - port: 80\n    targetPort: 8080\n  selector:\n    app: default-http-backend\n",
   "nginxingress-v1.8": "\napiVersion: v1\nkind: Namespace\nmetadata:\n  name: ingress-nginx\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n  name: nginx-configuration\n  namespace: ingress-nginx\n  labels:\n    app: ingress-nginx\ndata:\n{{ range $k,$v := .Options }}\n  {{ $k }}: \"{{ $v }}\"\n{{ end }}\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n  name: tcp-services\n  namespace: ingress-nginx\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n  name: udp-services\n  namespace: ingress-nginx\n{{if eq .RBACConfig \"rbac\"}}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: nginx-ingress-serviceaccount\n  namespace: ingress-nginx\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRole\nmetadata:\n  name: nginx-ingress-clusterrole\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - configmaps\n      - endpoints\n      - nodes\n      - pods\n      - secrets\n    verbs:\n      - list\n      - watch\n  - apiGroups:\n      - \"\"\n    resources:\n      - nodes\n    verbs:\n      - get\n  - apiGroups:\n      - \"\"\n    resources:\n      - services\n    verbs:\n      - get\n      - list\n      - watch\n  - apiGroups:\n      - \"extensions\"\n    resources:\n      - ingresses\n      - daemonsets\n    verbs:\n      - get\n      - list\n      - watch\n  - apiGroups:\n      - \"\"\n    resources:\n        - events\n    verbs:\n        - create\n        - patch\n  - apiGroups:\n      - \"extensions\"\n    resources:\n      - ingresses/status\n    verbs:\n      - update\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: Role\nmetadata:\n  name: nginx-ingress-role\n  namespace: ingress-nginx\nrules:\n  - apiGroups:\n      - \"\"\n    resources:\n      - configmaps\n      - pods\n      - secrets\n      - namespaces\n    verbs:\n      - get\n  - apiGroups:\n      - \"\"\n    resources:\n      - configmaps\n    resourceNames:\n      # Defaults to \"\u003celection-id\u003e-\u003cingress-class\u003e\"\n      # Here: \"\u003cingress-controller-leader\u003e-\u003cnginx\u003e\"\n      # This has to be adapted if you change either parameter\n      # when launching the nginx-ingress-controller.\n      - \"ingress-controller-leader-nginx\"\n    verbs:\n      - get\n      - update\n  - apiGroups:\n      - \"\"\n    resources:\n      - configmaps\n    verbs:\n      - create\n  - apiGroups:\n      - \"\"\n    resources:\n      - endpoints\n    verbs:\n      - get\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: RoleBinding\nmetadata:\n  name: nginx-ingress-role-nisa-binding\n  namespace: ingress-nginx\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: nginx-ingress-role\nsubjects:\n  - kind: ServiceAccount\n    name: nginx-ingress-serviceaccount\n    namespace: ingress-nginx\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n  name: nginx-ingress-clusterrole-nisa-binding\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: nginx-ingress-clusterrole\nsubjects:\n  - kind: ServiceAccount\n    name: nginx-ingress-serviceaccount\n    namespace: ingress-nginx\n{{ end }}\n---\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: nginx-ingress-controller\n  namespace: ingress-nginx\nspec:\n  selector:\n    matchLabels:\n      app: ingress-nginx\n{{if .UpdateStrategy}}\n  updateStrategy:\n{{ toYaml .UpdateStrategy | indent 4}}\n{{end}}\n  template:\n    metadata:\n      labels:\n        app: ingress-nginx\n      annotations:\n        prometheus.io/port: '10254'\n        prometheus.io/scrape: 'true'\n    spec:\n      affinity:\n        nodeAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            nodeSelectorTerms:\n              - matchExpressions:\n                - key: beta.kubernetes.io/os\n                  operator: NotIn\n                  values:\n                    - windows\n                - key: node-role.kubernetes.io/worker\n                  operator: Exists\n      hostNetwork: true\n      {{if .DNSPolicy}}\n      dnsPolicy: {{.DNSPolicy}}\n      {{end}}\n{{if .NodeSelector}}\n      nodeSelector:\n      {{ range $k, $v := .NodeSelector }}\n        {{ $k }}: \"{{ $v }}\"\n      {{ end }}\n{{end}}\n      {{if eq .RBACConfig \"rbac\"}}\n      serviceAccountName: nginx-ingress-serviceaccount\n      {{ end }}\n      tolerations:\n      - effect: NoExecute\n        operator: Exists\n      - effect: NoSchedule\n        operator: Exists\n      {{- if ne .AlpineImage \"\"}}\n      initContainers:\n      - command:\n        - sh\n        - -c\n        - sysctl -w net.core.somaxconn=32768; sysctl -w net.ipv4.ip_local_port_range=\"1024 65535\"\n        image: {{.AlpineImage}}\n        imagePullPolicy: IfNotPresent\n        name: sysctl\n        securityContext:\n          privileged: true\n      {{- end }}\n      containers:\n        - name: nginx-ingress-controller\n          image: {{.IngressImage}}\n          args:\n            - /nginx-ingress-controller\n            - --default-backend-service=$(POD_NAMESPACE)/default-http-backend\n            - --configmap=$(POD_NAMESPACE)/nginx-configuration\n            - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services\n            - --udp-services-configmap=$(POD_NAMESPACE)/udp-services\n            - --annotations-prefix=nginx.ingress.kubernetes.io\n          {{ range $k, $v := .ExtraArgs }}\n            - --{{ $k }}{{if ne $v \"\" }}={{ $v }}{{end}}\n          {{ end }}\n          {{- if eq .AlpineImage \"\"}}\n          securityContext:\n            capabilities:\n                drop:\n                - ALL\n                add:\n                - NET_BIND_SERVICE\n            runAsUser: 33\n          {{- end }}\n          env:\n            - name: POD_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.name\n            - name: POD_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.namespace\n{{if .ExtraEnvs}}\n{{ toYaml .ExtraEnvs | indent 12}}\n{{end}}\n          ports:\n          - name: http\n            containerPort: 80\n          - name: https\n            containerPort: 443\n          livenessProbe:\n            failureThreshold: 3\n            httpGet:\n              path: /healthz\n              port: 10254\n              scheme: HTTP\n            initialDelaySeconds: 10\n            periodSeconds: 10\n            successThreshold: 1\n            timeoutSeconds: 1\n          readinessProbe:\n            failureThreshold: 3\n            httpGet:\n              path: /healthz\n              port: 10254\n              scheme: HTTP\n            periodSeconds: 10\n            successThreshold: 1\n            timeoutSeconds: 1\n{{if .ExtraVolumeMounts}}\n          volumeMounts:\n{{ toYaml .ExtraVolumeMounts | indent 12}}\n{{end}}\n{{if .ExtraVolumes}}\n      volumes:\n{{ toYaml .ExtraVolumes | indent 8}}\n{{end}}\n\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: default-http-backend\n  labels:\n    app: default-http-backend\n  namespace: ingress-nginx\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: default-http-backend\n  template:\n    metadata:\n      labels:\n        app: default-http-backend\n    spec:\n      affinity:\n        nodeAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            nodeSelectorTerms:\n              - matchExpressions:\n                - key: beta.kubernetes.io/os\n                  operator: NotIn\n                  values:\n                    - windows\n                - key: node-role.kubernetes.io/worker\n                  operator: Exists\n      terminationGracePeriodSeconds: 60\n      tolerations:\n      - effect: NoExecute\n        operator: Exists\n      - effect: NoSchedule\n        operator: Exists\n      containers:\n      - name: default-http-backend\n        # Any image is permissable as long as:\n        # 1. It serves a 404 page at /\n        # 2. It serves 200 on a /healthz endpoint\n        image: {{.IngressBackend}}\n        livenessProbe:\n          httpGet:\n            path: /healthz\n            port: 8080\n            scheme: HTTP\n          initialDelaySeconds: 30\n          timeoutSeconds: 5\n        ports:\n        - containerPort: 8080\n        resources:\n          limits:\n            cpu: 10m\n            memory: 20Mi\n          requests:\n            cpu: 10m\n            memory: 20Mi\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: default-http-backend\n  namespace: ingress-nginx\n  labels:\n    app: default-http-backend\nspec:\n  ports:\n  - port: 80\n    targetPort: 8080\n  selector:\n    app: default-http-backend\n",
   "weave-v1.16": "\n---\n# This ConfigMap can be used to configure a self-hosted Weave Net installation.\napiVersion: v1\nkind: List\nitems:\n  - apiVersion: v1\n    kind: ServiceAccount\n    metadata:\n      name: weave-net\n      namespace: kube-system\n  - apiVersion: apps/v1\n    kind: DaemonSet\n    metadata:\n      name: weave-net\n      labels:\n        name: weave-net\n      namespace: kube-system\n    spec:\n      selector:\n        matchLabels:\n          name: weave-net\n      template:\n        metadata:\n          annotations:\n            scheduler.alpha.kubernetes.io/critical-pod: ''\n            scheduler.alpha.kubernetes.io/tolerations: \u003e-\n              [{\"key\":\"dedicated\",\"operator\":\"Equal\",\"value\":\"master\",\"effect\":\"NoSchedule\"}]\n          labels:\n            name: weave-net\n        spec:\n          affinity:\n            nodeAffinity:\n              requiredDuringSchedulingIgnoredDuringExecution:\n                nodeSelectorTerms:\n                  - matchExpressions:\n                    - key: beta.kubernetes.io/os\n                      operator: NotIn\n                      values:\n                        - windows\n{{if .NodeSelector}}\n          nodeSelector:\n            {{ range $k, $v := .NodeSelector }}\n              {{ $k }}: \"{{ $v }}\"\n            {{ end }}\n{{end}}\n          containers:\n            - name: weave\n              command:\n                - /home/weave/launch.sh\n              env:\n                - name: HOSTNAME\n                  valueFrom:\n                    fieldRef:\n                      apiVersion: v1\n                      fieldPath: spec.nodeName\n                - name: IPALLOC_RANGE\n                  value: \"{{.ClusterCIDR}}\"\n                {{- if .WeavePassword}}\n                - name: WEAVE_PASSWORD\n                  value: \"{{.WeavePassword}}\"\n                {{- end}}\n                {{- if .MTU }}\n                {{- if ne .MTU 0 }}\n                - name: WEAVE_MTU\n                  value: \"{{.MTU}}\"\n                {{- end }}\n                {{- end }}\n              image: {{.Image}}\n              readinessProbe:\n                httpGet:\n                  host: 127.0.0.1\n                  path: /status\n                  port: 6784\n                initialDelaySeconds: 30\n              resources:\n                requests:\n                  cpu: 10m\n              securityContext:\n                privileged: true\n              volumeMounts:\n                - name: weavedb\n                  mountPath: /weavedb\n                - name: cni-bin\n                  mountPath: /host/opt\n                - name: cni-bin2\n                  mountPath: /host/home\n                - name: cni-conf\n                  mountPath: /host/etc\n                - name: dbus\n                  mountPath: /host/var/lib/dbus\n                - name: lib-modules\n                  mountPath: /lib/modules\n                - name: xtables-lock\n                  mountPath: /run/xtables.lock\n            - name: weave-npc\n              env:\n                - name: HOSTNAME\n                  valueFrom:\n                    fieldRef:\n                      apiVersion: v1\n                      fieldPath: spec.nodeName\n              image: {{.CNIImage}}\n              resources:\n                requests:\n                  cpu: 10m\n              securityContext:\n                privileged: true\n              volumeMounts:\n                - name: xtables-lock\n                  mountPath: /run/xtables.lock\n            - name: weave-plugins\n              command:\n                - /opt/rke-tools/weave-plugins-cni.sh\n              image: {{.WeaveLoopbackImage}}\n              securityContext:\n                privileged: true\n              volumeMounts:\n                - name: cni-bin\n                  mountPath: /opt\n          hostNetwork: true\n          hostPID: true\n          restartPolicy: Always\n          securityContext:\n            seLinuxOptions: {}\n          serviceAccountName: weave-net\n          tolerations:\n          - operator: Exists\n            effect: NoSchedule\n          - operator: Exists\n            effect: NoExecute\n          volumes:\n            - name: weavedb\n              hostPath:\n                path: /var/lib/weave\n            - name: cni-bin\n              hostPath:\n                path: /opt\n            - name: cni-bin2\n              hostPath:\n                path: /home\n            - name: cni-conf\n              hostPath:\n                path: /etc\n            - name: dbus\n              hostPath:\n                path: /var/lib/dbus\n            - name: lib-modules\n              hostPath:\n                path: /lib/modules\n            - name: xtables-lock\n              hostPath:\n                path: /run/xtables.lock\n      updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 8}}\n{{end}}\n        type: RollingUpdate\n{{- if eq .RBACConfig \"rbac\"}}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: weave-net\n  labels:\n    name: weave-net\n  namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRole\nmetadata:\n  name: weave-net\n  labels:\n    name: weave-net\nrules:\n  - apiGroups:\n      - ''\n    resources:\n      - pods\n      - namespaces\n      - nodes\n    verbs:\n      - get\n      - list\n      - watch\n  - apiGroups:\n      - networking.k8s.io\n    resources:\n      - networkpolicies\n    verbs:\n      - get\n      - list\n      - watch\n  - apiGroups:\n      - ''\n    resources:\n      - nodes/status\n    verbs:\n      - patch\n      - update\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n  name: weave-net\n  labels:\n    name: weave-net\nroleRef:\n  kind: ClusterRole\n  name: weave-net\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - kind: ServiceAccount\n    name: weave-net\n    namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: Role\nmetadata:\n  name: weave-net\n  labels:\n    name: weave-net\n  namespace: kube-system\nrules:\n  - apiGroups:\n      - ''\n    resourceNames:\n      - weave-net\n    resources:\n      - configmaps\n    verbs:\n      - get\n      - update\n  - apiGroups:\n      - ''\n    resources:\n      - configmaps\n    verbs:\n      - create\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: RoleBinding\nmetadata:\n  name: weave-net\n  labels:\n    name: weave-net\n  namespace: kube-system\nroleRef:\n  kind: Role\n  name: weave-net\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - kind: ServiceAccount\n    name: weave-net\n    namespace: kube-system\n{{- end}}\n",
   "weave-v1.8": "\n---\n# This ConfigMap can be used to configure a self-hosted Weave Net installation.\napiVersion: v1\nkind: List\nitems:\n  - apiVersion: v1\n    kind: ServiceAccount\n    metadata:\n      name: weave-net\n      namespace: kube-system\n  - apiVersion: extensions/v1beta1\n    kind: DaemonSet\n    metadata:\n      name: weave-net\n      labels:\n        name: weave-net\n      namespace: kube-system\n    spec:\n      template:\n        metadata:\n          annotations:\n            scheduler.alpha.kubernetes.io/critical-pod: ''\n            scheduler.alpha.kubernetes.io/tolerations: \u003e-\n              [{\"key\":\"dedicated\",\"operator\":\"Equal\",\"value\":\"master\",\"effect\":\"NoSchedule\"}]\n          labels:\n            name: weave-net\n        spec:\n          affinity:\n            nodeAffinity:\n              requiredDuringSchedulingIgnoredDuringExecution:\n                nodeSelectorTerms:\n                  - matchExpressions:\n                    - key: beta.kubernetes.io/os\n                      operator: NotIn\n                      values:\n                        - windows\n{{if .NodeSelector}}\n          nodeSelector:\n            {{ range $k, $v := .NodeSelector }}\n              {{ $k }}: \"{{ $v }}\"\n            {{ end }}\n{{end}}\n          containers:\n            - name: weave\n              command:\n                - /home/weave/launch.sh\n              env:\n                - name: HOSTNAME\n                  valueFrom:\n                    fieldRef:\n                      apiVersion: v1\n                      fieldPath: spec.nodeName\n                - name: IPALLOC_RANGE\n                  value: \"{{.ClusterCIDR}}\"\n                {{- if .WeavePassword}}\n                - name: WEAVE_PASSWORD\n                  value: \"{{.WeavePassword}}\"\n                {{- end}}\n                {{- if .MTU }}\n                {{- if ne .MTU 0 }}\n                - name: WEAVE_MTU\n                  value: \"{{.MTU}}\"\n                {{- end }}\n                {{- end }}\n              image: {{.Image}}\n              readinessProbe:\n                httpGet:\n                  host: 127.0.0.1\n                  path: /status\n                  port: 6784\n                initialDelaySeconds: 30\n              resources:\n                requests:\n                  cpu: 10m\n              securityContext:\n                privileged: true\n              volumeMounts:\n                - name: weavedb\n                  mountPath: /weavedb\n                - name: cni-bin\n                  mountPath: /host/opt\n                - name: cni-bin2\n                  mountPath: /host/home\n                - name: cni-conf\n                  mountPath: /host/etc\n                - name: dbus\n                  mountPath: /host/var/lib/dbus\n                - name: lib-modules\n                  mountPath: /lib/modules\n                - name: xtables-lock\n                  mountPath: /run/xtables.lock\n            - name: weave-npc\n              env:\n                - name: HOSTNAME\n                  valueFrom:\n                    fieldRef:\n                      apiVersion: v1\n                      fieldPath: spec.nodeName\n              image: {{.CNIImage}}\n              resources:\n                requests:\n                  cpu: 10m\n              securityContext:\n                privileged: true\n              volumeMounts:\n                - name: xtables-lock\n                  mountPath: /run/xtables.lock\n            - name: weave-plugins\n              command:\n                - /opt/rke-tools/weave-plugins-cni.sh\n              image: {{.WeaveLoopbackImage}}\n              securityContext:\n                privileged: true\n              volumeMounts:\n                - name: cni-bin\n                  mountPath: /opt\n          hostNetwork: true\n          hostPID: true\n          restartPolicy: Always\n          securityContext:\n            seLinuxOptions: {}\n          serviceAccountName: weave-net\n          tolerations:\n          - operator: Exists\n            effect: NoSchedule\n          - operator: Exists\n            effect: NoExecute\n          volumes:\n            - name: weavedb\n              hostPath:\n                path: /var/lib/weave\n            - name: cni-bin\n              hostPath:\n                path: /opt\n            - name: cni-bin2\n              hostPath:\n                path: /home\n            - name: cni-conf\n              hostPath:\n                path: /etc\n            - name: dbus\n              hostPath:\n                path: /var/lib/dbus\n            - name: lib-modules\n              hostPath:\n                path: /lib/modules\n            - name: xtables-lock\n              hostPath:\n                path: /run/xtables.lock\n      updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 8}}\n{{end}}\n        type: RollingUpdate\n{{- if eq .RBACConfig \"rbac\"}}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: weave-net\n  labels:\n    name: weave-net\n  namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRole\nmetadata:\n  name: weave-net\n  labels:\n    name: weave-net\nrules:\n  - apiGroups:\n      - ''\n    resources:\n      - pods\n      - namespaces\n      - nodes\n    verbs:\n      - get\n      - list\n      - watch\n  - apiGroups:\n      - networking.k8s.io\n    resources:\n      - networkpolicies\n    verbs:\n      - get\n      - list\n      - watch\n  - apiGroups:\n      - ''\n    resources:\n      - nodes/status\n    verbs:\n      - patch\n      - update\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n  name: weave-net\n  labels:\n    name: weave-net\nroleRef:\n  kind: ClusterRole\n  name: weave-net\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - kind: ServiceAccount\n    name: weave-net\n    namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: Role\nmetadata:\n  name: weave-net\n  labels:\n    name: weave-net\n  namespace: kube-system\nrules:\n  - apiGroups:\n      - ''\n    resourceNames:\n      - weave-net\n    resources:\n      - configmaps\n    verbs:\n      - get\n      - update\n  - apiGroups:\n      - ''\n    resources:\n      - configmaps\n    verbs:\n      - create\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: RoleBinding\nmetadata:\n  name: weave-net\n  labels:\n    name: weave-net\n  namespace: kube-system\nroleRef:\n  kind: Role\n  name: weave-net\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - kind: ServiceAccount\n    name: weave-net\n    namespace: kube-system\n{{- end}}\n"
  },
  "weave": {
   "\u003e=1.16.0-alpha": "weave-v1.16",
   "\u003e=1.8.0-rancher0 \u003c1.16.0-alpha": "weave-v1.8"
  }
 },
 "K8sVersionInfo": {
  "v1.10": {
   "maxRKEVersion": "0.2.2",
   "maxRancherVersion": "2.2"
  },
  "v1.10.1-rancher1": {
   "deprecateRKEVersion": "0.2.2",
   "deprecateRancherVersion": "2.2"
  },
  "v1.11": {
   "maxRKEVersion": "0.2.2",
   "maxRancherVersion": "2.2"
  },
  "v1.12": {
   "maxRKEVersion": "0.2.2",
   "maxRancherVersion": "2.2"
  },
  "v1.13": {
   "maxRKEVersion": "0.3.1",
   "maxRancherVersion": "2.3.1"
  },
  "v1.14": {
   "maxRKEVersion": "1.0.0",
   "maxRancherVersion": "2.3.3"
  },
  "v1.15.5-rancher1-1": {
   "maxRKEVersion": "0.2.8",
   "maxRancherVersion": "2.2.9"
  },
  "v1.8": {
   "maxRKEVersion": "0.2.2",
   "maxRancherVersion": "2.2"
  },
  "v1.8.10-rancher1-1": {
   "deprecateRKEVersion": "0.2.2",
   "deprecateRancherVersion": "2.2"
  },
  "v1.8.11-rancher1": {
   "deprecateRKEVersion": "0.2.2",
   "deprecateRancherVersion": "2.2"
  },
  "v1.9": {
   "maxRKEVersion": "0.2.2",
   "maxRancherVersion": "2.2"
  },
  "v1.9.7-rancher1": {
   "deprecateRKEVersion": "0.2.2",
   "deprecateRancherVersion": "2.2"
  }
 },
 "RancherDefaultK8sVersions": {
  "2.3": "v1.17.x",
  "2.3.0": "v1.15.x",
  "2.3.1": "v1.15.x",
  "2.3.2": "v1.15.x",
  "2.3.3": "v1.16.x",
  "default": "v1.17.x"
 },
 "RKEDefaultK8sVersions": {
  "0.3": "v1.16.3-rancher1-1",
  "default": "v1.17.3-rancher1-1"
 },
 "K8sVersionDockerInfo": {
  "1.10": [
   "1.11.x",
   "1.12.x",
   "1.13.x",
   "17.03.x",
   "18.06.x",
   "18.09.x",
   "19.03.x"
  ],
  "1.11": [
   "1.11.x",
   "1.12.x",
   "1.13.x",
   "17.03.x",
   "18.06.x",
   "18.09.x",
   "19.03.x"
  ],
  "1.12": [
   "1.11.x",
   "1.12.x",
   "1.13.x",
   "17.03.x",
   "17.06.x",
   "17.09.x",
   "18.06.x",
   "18.09.x",
   "19.03.x"
  ],
  "1.13": [
   "1.11.x",
   "1.12.x",
   "1.13.x",
   "17.03.x",
   "17.06.x",
   "17.09.x",
   "18.06.x",
   "18.09.x",
   "19.03.x"
  ],
  "1.14": [
   "1.13.x",
   "17.03.x",
   "17.06.x",
   "17.09.x",
   "18.06.x",
   "18.09.x",
   "19.03.x"
  ],
  "1.15": [
   "1.13.x",
   "17.03.x",
   "17.06.x",
   "17.09.x",
   "18.06.x",
   "18.09.x",
   "19.03.x"
  ],
  "1.16": [
   "1.13.x",
   "17.03.x",
   "17.06.x",
   "17.09.x",
   "18.06.x",
   "18.09.x",
   "19.03.x"
  ],
  "1.17": [
   "1.13.x",
   "17.03.x",
   "17.06.x",
   "17.09.x",
   "18.06.x",
   "18.09.x",
   "19.03.x"
  ],
  "1.8": [
   "1.11.x",
   "1.12.x",
   "1.13.x",
   "17.03.x"
  ],
  "1.9": [
   "1.11.x",
   "1.12.x",
   "1.13.x",
   "17.03.x",
   "18.06.x",
   "18.09.x",
   "19.03.x"
  ]
 },
 "K8sVersionWindowsServiceOptions": {
  "v1.15": {
   "etcd": null,
   "kubeapi": null,
   "kubelet": {
    "address": "0.0.0.0",
    "anonymous-auth": "false",
    "authentication-token-webhook": "true",
    "authorization-mode": "Webhook",
    "cert-dir": "[PREFIX_PATH]/var/lib/kubelet/pki",
    "cgroups-per-qos": "false",
    "cni-bin-dir": "[PREFIX_PATH]/opt/cni/bin",
    "cni-conf-dir": "[PREFIX_PATH]/etc/cni/net.d",
    "enforce-node-allocatable": "''",
    "event-qps": "0",
    "feature-gates": "HyperVContainer=true,WindowsGMSA=true",
    "image-pull-progress-deadline": "30m",
    "kube-reserved": "cpu=500m,memory=500Mi,ephemeral-storage=1Gi",
    "make-iptables-util-chains": "true",
    "network-plugin": "cni",
    "read-only-port": "0",
    "resolv-conf": "''",
    "streaming-connection-idle-timeout": "30m",
    "system-reserved": "cpu=1000m,memory=2Gi,ephemeral-storage=2Gi",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
    "v": "2",
    "volume-plugin-dir": "[PREFIX_PATH]/var/lib/kubelet/volumeplugins"
   },
   "kubeproxy": {
    "enable-dsr": "false",
    "feature-gates": "WinOverlay=true",
    "healthz-bind-address": "127.0.0.1",
    "proxy-mode": "kernelspace",
    "v": "2"
   },
   "kubeController": null,
   "scheduler": null
  },
  "v1.16": {
   "etcd": null,
   "kubeapi": null,
   "kubelet": {
    "address": "0.0.0.0",
    "anonymous-auth": "false",
    "authentication-token-webhook": "true",
    "authorization-mode": "Webhook",
    "cert-dir": "[PREFIX_PATH]/var/lib/kubelet/pki",
    "cgroups-per-qos": "false",
    "cni-bin-dir": "[PREFIX_PATH]/opt/cni/bin",
    "cni-conf-dir": "[PREFIX_PATH]/etc/cni/net.d",
    "enforce-node-allocatable": "''",
    "event-qps": "0",
    "feature-gates": "HyperVContainer=true,WindowsGMSA=true",
    "image-pull-progress-deadline": "30m",
    "kube-reserved": "cpu=500m,memory=500Mi,ephemeral-storage=1Gi",
    "make-iptables-util-chains": "true",
    "network-plugin": "cni",
    "read-only-port": "0",
    "resolv-conf": "''",
    "streaming-connection-idle-timeout": "30m",
    "system-reserved": "cpu=1000m,memory=2Gi,ephemeral-storage=2Gi",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
    "v": "2",
    "volume-plugin-dir": "[PREFIX_PATH]/var/lib/kubelet/volumeplugins"
   },
   "kubeproxy": {
    "enable-dsr": "false",
    "feature-gates": "WinOverlay=true",
    "healthz-bind-address": "127.0.0.1",
    "proxy-mode": "kernelspace",
    "v": "2"
   },
   "kubeController": null,
   "scheduler": null
  },
  "v1.17": {
   "etcd": null,
   "kubeapi": null,
   "kubelet": {
    "address": "0.0.0.0",
    "anonymous-auth": "false",
    "authentication-token-webhook": "true",
    "authorization-mode": "Webhook",
    "cert-dir": "[PREFIX_PATH]/var/lib/kubelet/pki",
    "cgroups-per-qos": "false",
    "cni-bin-dir": "[PREFIX_PATH]/opt/cni/bin",
    "cni-conf-dir": "[PREFIX_PATH]/etc/cni/net.d",
    "enforce-node-allocatable": "''",
    "event-qps": "0",
    "feature-gates": "HyperVContainer=true,WindowsGMSA=true",
    "image-pull-progress-deadline": "30m",
    "kube-reserved": "cpu=500m,memory=500Mi,ephemeral-storage=1Gi",
    "make-iptables-util-chains": "true",
    "network-plugin": "cni",
    "read-only-port": "0",
    "resolv-conf": "''",
    "streaming-connection-idle-timeout": "30m",
    "system-reserved": "cpu=1000m,memory=2Gi,ephemeral-storage=2Gi",
    "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
    "v": "2",
    "volume-plugin-dir": "[PREFIX_PATH]/var/lib/kubelet/volumeplugins"
   },
   "kubeproxy": {
    "enable-dsr": "false",
    "feature-gates": "WinOverlay=true",
    "healthz-bind-address": "127.0.0.1",
    "proxy-mode": "kernelspace",
    "v": "2"
   },
   "kubeController": null,
   "scheduler": null
  }
 },
 "CisConfigParams": {
  "default": {
   "benchmarkVersion": "rke-cis-1.5"
  },
  "v1.15": {
   "benchmarkVersion": "rke-cis-1.5"
  },
  "v1.16": {
   "benchmarkVersion": "rke-cis-1.5"
  },
  "v1.17": {
   "benchmarkVersion": "rke-cis-1.5"
  },
  "v1.18": {
   "benchmarkVersion": "rke-cis-1.5"
  }
 },
 "CisBenchmarkVersionInfo": {
  "cis-1.4": {
   "managed": false,
   "minKubernetesVersion": "1.13",
   "skippedChecks": null,
   "notApplicableChecks": null
  },
  "cis-1.5": {
   "managed": false,
   "minKubernetesVersion": "1.15",
   "skippedChecks": null,
   "notApplicableChecks": null
  },
  "rke-cis-1.4": {
   "managed": true,
   "minKubernetesVersion": "1.13",
   "skippedChecks": {},
   "notApplicableChecks": {}
  },
  "rke-cis-1.5": {
   "managed": true,
   "minKubernetesVersion": "1.15",
   "skippedChecks": {
    "5.2.2": "Enabling Pod Security Policy can cause issues with many helm chart installations",
    "5.2.3": "Enabling Pod Security Policy can cause issues with many helm chart installations",
    "5.2.4": "Enabling Pod Security Policy can cause issues with many helm chart installations",
    "5.2.5": "Enabling Pod Security Policy can cause issues with many helm chart installations",
    "5.3.2": "Enabling Network Policies can cause lot of unintended network traffic disruptions",
    "5.6.4": "A default namespace provides a flexible workspace to try out various deployments"
   },
   "notApplicableChecks": {
    "1.1.1": "Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver.\nAll configuration is passed in as arguments at container run time.",
    "1.1.2": "Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver.\nAll configuration is passed in as arguments at container run time.",
    "1.1.3": "Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver.\nAll configuration is passed in as arguments at container run time.",
    "1.1.4": "Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver.\nAll configuration is passed in as arguments at container run time.",
    "1.1.5": "Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver.\nAll configuration is passed in as arguments at container run time.",
    "1.1.6": "Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver.\nAll configuration is passed in as arguments at container run time.",
    "1.1.7": "Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver.\nAll configuration is passed in as arguments at container run time.",
    "1.1.8": "Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver.\nAll configuration is passed in as arguments at container run time."
   }
  }
 }
}