Skip to content

Instantly share code, notes, and snippets.

@piersharding
Created June 26, 2019 21:40
Show Gist options
  • Save piersharding/a05e60b1cb02e35f152ec2ba67a605f9 to your computer and use it in GitHub Desktop.
Save piersharding/a05e60b1cb02e35f152ec2ba67a605f9 to your computer and use it in GitHub Desktop.
---
- name: Set the host grouping
set_fact:
host_grouping: "{% if inventory_hostname != groups['k8s_master'][0] %}master{% else %}worker{% endif %}"
- name: set cluster API address.
set_fact:
# cluster_api_address: "{{ hostvars[groups['k8s_master'][0]].ansible_default_ipv4.address }}"
cluster_api_address: "{{ hostvars[groups['k8s_master'][0]]['secondary_ip'] if ('secondary_ip' in hostvars[groups['k8s_master'][0]]) else hostvars[groups['k8s_master'][0]]['ansible_default_ipv4']['address'] }}"
# when: "inventory_hostname in groups['k8s_master']"
- name: Add kubernetes DNS
blockinfile:
dest: "/etc/resolvconf/resolv.conf.d/head"
insertbefore: EOF
create: yes
owner: root
group: root
mode: 0644
block: |
nameserver 10.96.0.10
search default.svc.cluster.local svc.cluster.local cluster.local
options ndots:5
marker: "# {mark} ANSIBLE MANAGED BLOCK for {{ inventory_hostname }} k8s node"
register: flushresolveconf
- name: Flush resolvconf
shell: /sbin/resolvconf -u
when: flushresolveconf.changed
- name: Set net.bridge.bridge-nf-call-iptables to 1
sysctl: name=net.bridge.bridge-nf-call-iptables value=1 state=present ignoreerrors=yes
- name: Set net.bridge.bridge-nf-call-ip6tables to 1
sysctl: name=net.bridge.bridge-nf-call-ip6tables value=1 state=present ignoreerrors=yes
- name: Set net.bridge.bridge-nf-call-arptables to 1
sysctl: name=net.bridge.bridge-nf-call-arptables value=1 state=present ignoreerrors=yes
- sysctl:
name: net.ipv4.ip_forward
value: 1
state: present
# required for k8s ipvs proxy
- name: Load ipvs kernel drivers
modprobe:
name: "{{ item }}"
state: present
with_items:
- br_netfilter
- ip_vs_sh
- nf_conntrack_ipv4
- ip_vs
- ip_vs_rr
- ip_vs_wrr
- ip_tables
- name: Install packages required for docker
apt:
name: "{{ item }}"
state: latest
with_items:
- apt-transport-https
- ca-certificates
- "linux-image-extra-{{ ansible_kernel }}"
- linux-image-extra-virtual
- aufs-tools
- ppp
- name: Add dockerproject apt key
apt_key:
url: https://download.docker.com/linux/ubuntu/gpg
- name: Add dockerproject apt source
lineinfile:
line: 'deb [arch=amd64] https://download.docker.com/linux/ubuntu xenial stable'
dest: /etc/apt/sources.list.d/docker.list
create: yes
owner: root
group: root
mode: 0644
register: installeddocker
- name: Update apt cache
apt:
update_cache: yes
- name: Install docker ce
apt:
# name: "docker-ce=18.03.1~ce-0~ubuntu"
name: docker-ce
force: yes
# state: present
state: latest
notify:
- restart docker
- name: Add ubuntu user to docker group
user:
name: ubuntu
append: yes
groups: docker
- name: Configure docker server
template: src=docker.conf.j2 dest="/etc/systemd/system/docker.service.d/docker.conf"
notify:
- reload systemd
- restart docker
- meta: flush_handlers
- name: Add k8s apt key
apt_key:
url: https://packages.cloud.google.com/apt/doc/apt-key.gpg
state: present
- name: Add k8s apt source
lineinfile:
line: 'deb http://apt.kubernetes.io/ kubernetes-xenial main'
path: /etc/apt/sources.list.d/k8s.list
create: yes
owner: root
group: root
mode: 0644
- name: Update apt cache - again
apt:
update_cache: yes
# - name: Install kubeadm
# apt:
# name: "{{ item }}"
# force: yes
# state: present
# with_items:
# - "kubernetes-cni={{ kubernetes_cni_version }}"
# - "kubelet={{ kubelet_version }}"
# - "kubectl={{ kubectl_version }}"
# - "kubeadm={{ kubeadm_version }}"
# notify:
# - restart kubelet
- name: Install k8s dependent packages
apt:
name: "{{ item }}"
force: yes
state: present
with_items:
- "conntrack"
- "ipvsadm"
notify:
- restart kubelet
- name: Install kubeadm
apt:
name: "{{ item }}"
force: yes
state: present
with_items:
# - "kubernetes-cni"
- "kubelet"
- "kubectl"
- "kubeadm"
notify:
- restart kubelet
- name: Configure calico
template: src=calico-3.1.yaml.j2 dest="/home/ubuntu/calico-3.1.yaml"
when: inventory_hostname in groups['k8s_master']
# - meta: flush_handlers
# - name: "Show nodes info"
# debug: var=hostvars[inventory_hostname]
# when: debug
- name: set cluster API address.
set_fact:
# cluster_api_address: "{{ hostvars[groups['k8s_master'][0]].ansible_default_ipv4.address }}"
cluster_api_address: "{{ hostvars[groups['k8s_master'][0]]['secondary_ip'] if ('secondary_ip' in hostvars[groups['k8s_master'][0]]) else hostvars[groups['k8s_master'][0]]['ansible_default_ipv4']['address'] }}"
cluster_address_list: "{{ hostvars[groups['k8s_master'][0]]['ansible_default_ipv4']['address'] }}"
# when: "inventory_hostname in groups['k8s_master']"
- name:
stat: path=/etc/kubernetes/admin.conf
register: admin_conf
delegate_to: "{{ groups['k8s_master'][0] }}"
delegate_facts: True
- name: "Show cluster_api_address"
debug: var=cluster_api_address
when: debug
- name: Check if "Cluster is active" is enabled.
shell: kubectl --kubeconfig=/etc/kubernetes/admin.conf version
changed_when: False
register: kubectl_version
ignore_errors: true
delegate_to: "{{ groups['k8s_master'][0] }}"
delegate_facts: True
# when: "inventory_hostname in groups['k8s_master']"
tags:
- skip_ansible_lint # Suppressing the linter
- name: "Show kubectl_version"
debug: var=kubectl_version
when: debug
# --kubernetes-version stable-1.10
# --feature-gates=CoreDNS=false
- name: Init Cluster on the first master.
shell: /usr/bin/kubeadm init
--pod-network-cidr=192.168.0.0/16
--apiserver-advertise-address {{ cluster_api_address }}
--apiserver-cert-extra-sans {{ cluster_address_list }}
when: "kubectl_version.stdout.find('Server Version:') == -1
and inventory_hostname in groups['k8s_master']"
tags:
- skip_ansible_lint # Suppressing the linter
- name: Wait for API Server to come up
wait_for:
host: "{{ cluster_api_address }}"
port: 6443
delay: 10
- name: retrieve kubectl config
fetch: src="/etc/kubernetes/admin.conf" dest="/tmp/admin.conf" flat=yes
when: "inventory_hostname in groups['k8s_master']"
- name: Ensure the /home/ubuntu/.kube directory exists
file:
path: /home/ubuntu/.kube
state: directory
mode: 0700
owner: ubuntu
group: ubuntu
- name: Setup kubectl config
copy:
src: /tmp/admin.conf
dest: /home/ubuntu/.kube/config
mode: 0644
owner: ubuntu
group: ubuntu
- name: Check if "Networking is active"
shell: kubectl --kubeconfig=/etc/kubernetes/admin.conf get deployment calico-kube-controllers --namespace kube-system
# shell: kubectl --kubeconfig=/etc/kubernetes/admin.conf get deployment kube-router --namespace kube-system
changed_when: False
register: kubectl_calico
ignore_errors: true
delegate_to: "{{ groups['k8s_master'][0] }}"
delegate_facts: True
# when: "inventory_hostname in groups['k8s_master']"
tags:
- skip_ansible_lint # Suppressing the linter
- name: "Show kubectl_calico"
debug: var=kubectl_calico
when: debug
- name: Init Cluster networking on the first master.
shell: kubectl --kubeconfig=/etc/kubernetes/admin.conf
apply -f /home/ubuntu/calico-3.1.yaml
when: "not kubectl_calico.rc == 0
and inventory_hostname in groups['k8s_master']"
tags:
- skip_ansible_lint # Suppressing the linter
# - name: RABAC - Init Cluster networking on the first master.
# shell: kubectl --kubeconfig=/etc/kubernetes/admin.conf
# apply -f https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml
# when: "not kubectl_calico.rc == 0
# and inventory_hostname in groups['k8s_master']"
# tags:
# - skip_ansible_lint # Suppressing the linter
# - name: Init Cluster networking on the first master.
# shell: kubectl --kubeconfig=/etc/kubernetes/admin.conf
# apply -f https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml
# when: "not kubectl_calico.rc == 0
# and inventory_hostname in groups['k8s_master']"
# tags:
# - skip_ansible_lint # Suppressing the linter
# - name: Init Cluster networking on the first master.
# shell: kubectl --kubeconfig=/etc/kubernetes/admin.conf
# apply -f https://raw.githubusercontent.com/coreos/flannel/v0.10.0/Documentation/kube-flannel.yml
# when: "not kubectl_calico.rc == 0
# and inventory_hostname in groups['k8s_master']"
# tags:
# - skip_ansible_lint # Suppressing the linter
- name: Wait for Networking to come up
shell: "kubectl --kubeconfig=/etc/kubernetes/admin.conf get pods --field-selector 'status.phase!=Running' --namespace kube-system"
register: calico_check
until: calico_check.stdout_lines | reject('search','^No resources found') | list | count == 0
retries: 20
delay: 30
# delegate_to: "{{ groups['k8s_master'][0] }}"
when: "inventory_hostname in groups['k8s_master']"
- name: Get Join token
shell: /usr/bin/kubeadm token list | grep -v TOKEN | head -1 | awk '{print $1}'
changed_when: False
register: kubectl_token
ignore_errors: true
delegate_to: "{{ groups['k8s_master'][0] }}"
delegate_facts: True
when: "not inventory_hostname in groups['k8s_master']"
tags:
- skip_ansible_lint # Suppressing the linter
- name: "Show kubectl_token"
debug: var=kubectl_token
when: debug
- name: Check already joined
shell: "kubectl --kubeconfig=/etc/kubernetes/admin.conf get nodes | grep {{ inventory_hostname }}"
changed_when: False
register: kubectl_joined
ignore_errors: true
delegate_to: "{{ groups['k8s_master'][0] }}"
delegate_facts: True
# when: "inventory_hostname in groups['k8s_master']"
tags:
- skip_ansible_lint # Suppressing the linter
- name: "Show kubectl_joined"
debug: var=kubectl_joined
when: debug
- name: Init minions.
shell: "/usr/bin/kubeadm join
--token {{ kubectl_token.stdout }} {{ cluster_api_address }}:6443
--discovery-token-unsafe-skip-ca-verification"
when: "not inventory_hostname in groups['k8s_master']
and (kubectl_joined.stdout_lines | list | count == 0)"
tags:
- skip_ansible_lint # Suppressing the linter
- name: Ensure the /etc/kubernetes/manifests directory exists
file:
path: /etc/kubernetes/manifests
state: directory
mode: 0755
when: "not inventory_hostname in groups['k8s_master']"
- name: Check joining
shell: "kubectl --kubeconfig=/etc/kubernetes/admin.conf get nodes | grep {{ inventory_hostname }}"
changed_when: False
register: kubectl_joined
delegate_to: "{{ groups['k8s_master'][0] }}"
when: "not inventory_hostname in groups['k8s_master']"
tags:
- skip_ansible_lint # Suppressing the linter
- name: "Show kubectl_joined"
debug: var=kubectl_joined
when: debug
- name: Wait for nodes to join
shell: "kubectl --kubeconfig=/etc/kubernetes/admin.conf get nodes | grep {{ inventory_hostname }}"
register: calico_check
until: calico_check is success
retries: 10
delay: 30
delegate_to: "{{ groups['k8s_master'][0] }}"
when: "not inventory_hostname in groups['k8s_master']"
- name: Wait for calico to settle
shell: "kubectl --kubeconfig=/etc/kubernetes/admin.conf get pods --field-selector 'status.phase!=Running' --namespace kube-system"
register: calico_check
until: calico_check.stdout_lines | reject('search','^No resources found') | list | count == 0
retries: 20
delay: 30
when: "inventory_hostname in groups['k8s_master']"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment