Created
April 15, 2019 19:26
-
-
Save b23prodtm/0f7ce71ca73f6525b9c732a17288f614 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
[all] | |
raspberrypib ansible_ssh_host=192.168.0.36 ansible_user=ubuntu ansible_become=yes ansible_host=192.168.0.36 ip=192.168.0.36 etcd_member_name=etcd1 | |
raspberrypia ansible_ssh_host=192.168.0.37 ansible_user=ubuntu ansible_become=yes ansible_host=192.168.0.37 ip=192.168.0.37 | |
# [bastion] | |
# macrouter | |
[kube-master] | |
raspberrypib | |
[etcd] | |
raspberrypib | |
[kube-node] | |
raspberrypia | |
[calico-rr] | |
[k8s-cluster:children] | |
kube-master | |
kube-node | |
[rack0] | |
raspberrypia | |
raspberrypib | |
[rack0:vars] | |
cluster_id="1.0.0.1" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
--- | |
# Kubernetes configuration dirs and system namespace. | |
# Those are where all the additional config stuff goes | |
# the kubernetes normally puts in /srv/kubernetes. | |
# This puts them in a sane location and namespace. | |
# Editing those values will almost surely break something. | |
kube_config_dir: /etc/kubernetes | |
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" | |
kube_manifest_dir: "{{ kube_config_dir }}/manifests" | |
# This is where all the cert scripts and certs will be located | |
kube_cert_dir: "{{ kube_config_dir }}/ssl" | |
# This is where all of the bearer tokens will be stored | |
kube_token_dir: "{{ kube_config_dir }}/tokens" | |
# This is where to save basic auth file | |
kube_users_dir: "{{ kube_config_dir }}/users" | |
kube_api_anonymous_auth: true | |
## Change this to use another Kubernetes version, e.g. a current beta release | |
kube_version: v1.13.5 | |
# kubernetes image repo define | |
kube_image_repo: "gcr.io/google-containers" | |
# Where the binaries will be downloaded. | |
# Note: ensure that you've enough disk space (about 1G) | |
local_release_dir: "/tmp/releases" | |
# Random shifts for retrying failed ops like pushing/downloading | |
retry_stagger: 5 | |
# This is the group that the cert creation scripts chgrp the | |
# cert files to. Not really changeable... | |
kube_cert_group: kube-cert | |
# Cluster Loglevel configuration | |
kube_log_level: 2 | |
# Directory where credentials will be stored | |
credentials_dir: "{{ inventory_dir }}/credentials" | |
# Users to create for basic auth in Kubernetes API via HTTP | |
# Optionally add groups for user | |
kube_api_pwd: "{{ lookup('password', credentials_dir + '/kube_user.creds length=15 chars=ascii_letters,digits') }}" | |
kube_users: | |
kube: | |
pass: "{{kube_api_pwd}}" | |
role: admin | |
groups: | |
- system:masters | |
## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth) | |
# kube_oidc_auth: false | |
# kube_basic_auth: false | |
# kube_token_auth: false | |
## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ | |
## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) | |
# kube_oidc_url: https:// ... | |
# kube_oidc_client_id: kubernetes | |
## Optional settings for OIDC | |
# kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem" | |
# kube_oidc_username_claim: sub | |
# kube_oidc_username_prefix: oidc: | |
# kube_oidc_groups_claim: groups | |
# kube_oidc_groups_prefix: oidc: | |
# Choose network plugin (cilium, calico, contiv, weave or flannel) | |
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing | |
kube_network_plugin: calico | |
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni | |
kube_network_plugin_multus: false | |
# Kubernetes internal network for services, unused block of space. | |
kube_service_addresses: 10.233.0.0/18 | |
# internal network. When used, it will assign IP | |
# addresses from this range to individual pods. | |
# This network must be unused in your network infrastructure! | |
kube_pods_subnet: 10.233.64.0/18 | |
# internal network node size allocation (optional). This is the size allocated | |
# to each node on your network. With these defaults you should have | |
# room for 4096 nodes with 254 pods per node. | |
kube_network_node_prefix: 24 | |
# The port the API Server will be listening on. | |
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" | |
kube_apiserver_port: 6443 # (https) | |
# kube_apiserver_insecure_port: 8080 # (http) | |
# Set to 0 to disable insecure port - Requires RBAC in authorization_modes and kube_api_anonymous_auth: true | |
kube_apiserver_insecure_port: 0 # (disabled) | |
# Kube-proxy proxyMode configuration. | |
# Can be ipvs (ingress controller), iptables (behind firewall) | |
kube_proxy_mode: ipvs | |
# A string slice of values which specify the addresses to use for NodePorts. | |
# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). | |
# The default empty string slice ([]) means to use all local addresses. | |
# kube_proxy_nodeport_addresses_cidr is retained for legacy config | |
kube_proxy_nodeport_addresses: >- | |
{%- if kube_proxy_nodeport_addresses_cidr is defined -%} | |
[{{ kube_proxy_nodeport_addresses_cidr }}] | |
{%- else -%} | |
[] | |
{%- endif -%} | |
# If non-empty, will use this string as identification instead of the actual hostname | |
# kube_override_hostname: >- | |
# {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} | |
# {%- else -%} | |
# {{ inventory_hostname }} | |
# {%- endif -%} | |
## Encrypting Secret Data at Rest (experimental) | |
kube_encrypt_secret_data: false | |
# DNS configuration. | |
# Kubernetes cluster name, also will be used as DNS domain | |
cluster_name: cluster.local | |
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods | |
ndots: 2 | |
# Can be coredns, coredns_dual, manual or none | |
dns_mode: coredns | |
# Set manual server if using a custom cluster DNS server | |
# manual_dns_server: 10.x.x.x | |
# Enable nodelocal dns cache | |
enable_nodelocaldns: False | |
nodelocaldns_ip: 169.254.25.10 | |
# Can be docker_dns, host_resolvconf or none | |
resolvconf_mode: docker_dns | |
# Deploy netchecker app to verify DNS resolve as an HTTP service | |
deploy_netchecker: false | |
# Ip address of the kubernetes skydns service | |
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" | |
skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}" | |
dns_domain: "{{ cluster_name }}" | |
## Container runtime | |
## docker for docker and crio for cri-o. | |
container_manager: docker | |
## Settings for containerized control plane (etcd/kubelet/secrets) | |
etcd_deployment_type: docker | |
kubelet_deployment_type: host | |
helm_deployment_type: host | |
# K8s image pull policy (imagePullPolicy) | |
k8s_image_pull_policy: IfNotPresent | |
# audit log for kubernetes | |
kubernetes_audit: false | |
# dynamic kubelet configuration | |
dynamic_kubelet_configuration: false | |
# define kubelet config dir for dynamic kubelet | |
# kubelet_config_dir: | |
default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir" | |
dynamic_kubelet_configuration_dir: "{{ kubelet_config_dir | default(default_kubelet_config_dir) }}" | |
# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled) | |
podsecuritypolicy_enabled: false | |
# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts | |
kubeconfig_localhost: true | |
# Download kubectl onto the host that runs Ansible in {{ bin_dir }} | |
kubectl_localhost: true | |
# dnsmasq | |
# dnsmasq_upstream_dns_servers: | |
# - /resolvethiszone.with/10.0.4.250 | |
# - 8.8.8.8 | |
# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true) | |
# kubelet_cgroups_per_qos: true | |
# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. | |
# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "". | |
# kubelet_enforce_node_allocatable: pods | |
## Supplementary addresses that can be added in kubernetes ssl keys. | |
## That can be useful for example to setup a keepalived virtual IP | |
# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3] | |
## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler. | |
## See https://github.com/kubernetes-sigs/kubespray/issues/2141 | |
## Set this variable to true to get rid of this issue | |
volume_cross_zone_attachment: false | |
# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now ) | |
persistent_volumes_enabled: false | |
## Container Engine Acceleration | |
## Enable container acceleration feature, for example use gpu acceleration in containers | |
# nvidia_accelerator_enabled: true | |
## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset. | |
## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2' | |
## Array with nvida_gpu_nodes, leave empty or comment if you dont't want to install drivers. | |
## Labels and taints won't be set to nodes if they are not in the array. | |
# nvidia_gpu_nodes: | |
# - kube-gpu-001 | |
# nvidia_driver_version: "384.111" | |
## flavor can be tesla or gtx | |
# nvidia_gpu_flavor: gtx | |
## NVIDIA driver installer images. Change them if you have trouble accessing gcr.io. | |
# nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2 | |
# nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63 | |
## NVIDIA GPU device plugin image. | |
# nvidia_gpu_device_plugin_container: "k8s.gcr.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e" |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment