Last active
April 16, 2020 14:34
-
-
Save amon-ra/995f5c6bf05c8c8d1a88194754cfea11 to your computer and use it in GitHub Desktop.
K3S Cluster formation
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/bash | |
# FIRSTHOST="hiro" | |
# MYNET="10.2.85." | |
# CARPNET="10.174.1." | |
# N1="167" | |
# N2="168" | |
# N3="169" | |
# CARPPASS="rascaldev2020" | |
# WORMHOLE="false" | |
# ##---------------- Finish config | |
# systemctl restart networking | |
# # IP1="${MYNET}${N1}" | |
# # IP2="${MYNET}${N2}" | |
# # IP3="${MYNET}${N3}" | |
# CARP1="${CARPNET}${N1}" | |
# CARP2="${CARPNET}${N2}" | |
# CARP3="${CARPNET}${N3}" | |
# HOSTNAME="$(hostname -s)" | |
# EXTIP=$(ip route get 8.8.8.8 | awk -F"src " 'NR==1{split($2,a," ");print a[1]}') | |
# MY=$(ip route get ${MYNET}1 | awk -F"src " 'NR==1{split($2,a," ");print a[1]}') | |
# MYCARP=$(ip route get ${CARPNET}1 | awk -F"src " 'NR==1{split($2,a," ");print a[1]}') | |
# IPID=$(echo $MY | cut -d"." -f4) | |
# CARPID=$(echo $MYCARP | cut -d"." -f4) | |
if [ "${K3S_DATASTORE_ENDPOINT}" != "" ] | |
then | |
apt-get install -y etcd | |
if [ "${KS3_NODES}" != "1" ] | |
then | |
systemctl stop etcd | |
rm -rf /var/lib/etcd/* | |
cat > /etc/default/etcd <<ETCDCONF | |
ETCD_INITIAL_CLUSTER="k3s-etcd${N1}=http://${CARP1}:2380,k3s-etcd${N2}=http://${CARP2}:2380,k3s-etcd${N3}=http://${CARP3}:2380" | |
ETCD_INITIAL_CLUSTER_STATE="new" | |
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-01" | |
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://${MYCARP}:2380" | |
ETCD_LISTEN_PEER_URLS="http://${MYCARP}:2380" | |
ETCD_LISTEN_CLIENT_URLS="http://${MYCARP}:2379,http://127.0.0.1:2379" | |
ETCD_ADVERTISE_CLIENT_URLS="http://${MYCARP}:2379" | |
ETCD_NAME="k3s-etcd${CARPID}" | |
#ETCD_FORCE_NEW_CLUSTER="true" | |
ETCDCONF | |
systemctl start etcd | |
fi | |
fi | |
#---- server code--------------------------------------------------- | |
if [ "${HOSTNAME}" == "${FIRSTHOST}" ] | |
then | |
if [ "$IPFAILOVER" != "" ] | |
then | |
cat > /etc/network/interfaces.d/99-ipfailover.cfg <<NETWORK | |
auto eth0:1 | |
iface eth0:1 inet static | |
address $IPFAILOVER | |
netmask 255.255.255.255 | |
broadcast $IPFAILOVER | |
NETWORK | |
fi | |
mkdir -p /root/manifests /var/lib/rancher/k3s/server/manifests /var/lib/edgefs | |
#manifest after 50 are not copied | |
cat > /root/manifests/95-rook.yml <<'ROOK' | |
--- | |
apiVersion: v1 | |
kind: Namespace | |
metadata: | |
name: rook-edgefs-system | |
--- | |
apiVersion: apiextensions.k8s.io/v1beta1 | |
kind: CustomResourceDefinition | |
metadata: | |
name: clusters.edgefs.rook.io | |
spec: | |
group: edgefs.rook.io | |
names: | |
kind: Cluster | |
listKind: ClusterList | |
plural: clusters | |
singular: cluster | |
scope: Namespaced | |
version: v1 | |
validation: | |
openAPIV3Schema: | |
properties: | |
spec: | |
properties: | |
edgefsImageName: | |
type: string | |
dataDirHostPath: | |
pattern: ^/(\S+) | |
type: string | |
devicesResurrectMode: | |
pattern: ^(restore|restoreZap|restoreZapWait)$ | |
type: string | |
dashboard: | |
properties: | |
localAddr: | |
type: string | |
network: | |
properties: | |
serverIfName: | |
type: string | |
brokerIfName: | |
type: string | |
skipHostPrepare: | |
type: boolean | |
storage: | |
properties: | |
nodes: | |
items: {} | |
type: array | |
useAllDevices: {} | |
useAllNodes: | |
type: boolean | |
required: | |
- edgefsImageName | |
- dataDirHostPath | |
additionalPrinterColumns: | |
- name: Image | |
type: string | |
description: Edgefs target image | |
JSONPath: .spec.edgefsImageName | |
- name: HostPath | |
type: string | |
description: Directory used on the Kubernetes nodes to store Edgefs data | |
JSONPath: .spec.dataDirHostPath | |
- name: Age | |
type: date | |
JSONPath: .metadata.creationTimestamp | |
- name: State | |
type: string | |
description: Current State | |
JSONPath: .status.state | |
--- | |
apiVersion: apiextensions.k8s.io/v1beta1 | |
kind: CustomResourceDefinition | |
metadata: | |
name: smbs.edgefs.rook.io | |
spec: | |
group: edgefs.rook.io | |
names: | |
kind: SMB | |
listKind: SMBList | |
plural: smbs | |
singular: smb | |
scope: Namespaced | |
version: v1 | |
validation: | |
openAPIV3Schema: | |
properties: | |
spec: | |
properties: | |
instances: | |
type: integer | |
minimum: 1 | |
required: | |
- instances | |
additionalPrinterColumns: | |
- name: Instances | |
type: string | |
description: Edgefs's service instances count | |
JSONPath: .spec.instances | |
--- | |
apiVersion: apiextensions.k8s.io/v1beta1 | |
kind: CustomResourceDefinition | |
metadata: | |
name: nfss.edgefs.rook.io | |
spec: | |
group: edgefs.rook.io | |
names: | |
kind: NFS | |
listKind: NFSList | |
plural: nfss | |
singular: nfs | |
scope: Namespaced | |
version: v1 | |
validation: | |
openAPIV3Schema: | |
properties: | |
spec: | |
properties: | |
instances: | |
type: integer | |
minimum: 1 | |
required: | |
- instances | |
additionalPrinterColumns: | |
- name: Instances | |
type: string | |
description: Edgefs's service instances count | |
JSONPath: .spec.instances | |
--- | |
apiVersion: apiextensions.k8s.io/v1beta1 | |
kind: CustomResourceDefinition | |
metadata: | |
name: swifts.edgefs.rook.io | |
spec: | |
group: edgefs.rook.io | |
names: | |
kind: SWIFT | |
listKind: SWIFTList | |
plural: swifts | |
singular: swift | |
scope: Namespaced | |
version: v1 | |
validation: | |
openAPIV3Schema: | |
properties: | |
spec: | |
properties: | |
instances: | |
type: integer | |
minimum: 1 | |
required: | |
- instances | |
additionalPrinterColumns: | |
- name: Instances | |
type: string | |
description: Edgefs's service instances count | |
JSONPath: .spec.instances | |
--- | |
apiVersion: apiextensions.k8s.io/v1beta1 | |
kind: CustomResourceDefinition | |
metadata: | |
name: s3s.edgefs.rook.io | |
spec: | |
group: edgefs.rook.io | |
names: | |
kind: S3 | |
listKind: S3List | |
plural: s3s | |
singular: s3 | |
scope: Namespaced | |
version: v1 | |
validation: | |
openAPIV3Schema: | |
properties: | |
spec: | |
properties: | |
instances: | |
type: integer | |
minimum: 1 | |
required: | |
- instances | |
additionalPrinterColumns: | |
- name: Instances | |
type: string | |
description: Edgefs's service instances count | |
JSONPath: .spec.instances | |
--- | |
apiVersion: apiextensions.k8s.io/v1beta1 | |
kind: CustomResourceDefinition | |
metadata: | |
name: s3xs.edgefs.rook.io | |
spec: | |
group: edgefs.rook.io | |
names: | |
kind: S3X | |
listKind: S3XList | |
plural: s3xs | |
singular: s3x | |
scope: Namespaced | |
version: v1 | |
validation: | |
openAPIV3Schema: | |
properties: | |
spec: | |
properties: | |
instances: | |
type: integer | |
minimum: 1 | |
required: | |
- instances | |
additionalPrinterColumns: | |
- name: Instances | |
type: string | |
description: Edgefs's service instances count | |
JSONPath: .spec.instances | |
--- | |
apiVersion: apiextensions.k8s.io/v1beta1 | |
kind: CustomResourceDefinition | |
metadata: | |
name: iscsis.edgefs.rook.io | |
spec: | |
group: edgefs.rook.io | |
names: | |
kind: ISCSI | |
listKind: ISCSIList | |
plural: iscsis | |
singular: iscsi | |
scope: Namespaced | |
version: v1 | |
--- | |
apiVersion: apiextensions.k8s.io/v1beta1 | |
kind: CustomResourceDefinition | |
metadata: | |
name: isgws.edgefs.rook.io | |
spec: | |
group: edgefs.rook.io | |
names: | |
kind: ISGW | |
listKind: ISGWList | |
plural: isgws | |
singular: isgw | |
scope: Namespaced | |
version: v1 | |
validation: | |
openAPIV3Schema: | |
properties: | |
spec: | |
properties: | |
direction: | |
type: string | |
pattern: ^(send|receive|send\+receive)$ | |
remoteURL: | |
type: string | |
config: | |
type: object | |
properties: | |
server: | |
type: string | |
clients: | |
type: array | |
items: | |
type: string | |
required: | |
- direction | |
additionalPrinterColumns: | |
- name: Direction | |
type: string | |
description: ISGW service direction | |
JSONPath: .spec.direction | |
- name: RemoteEndpoint | |
type: string | |
description: Remote ISGW service endpoint | |
JSONPath: .spec.remoteURL | |
- name: Server | |
type: string | |
JSONPath: .spec.config.server | |
description: ISGW server' service name | |
- name: Clients | |
type: string | |
JSONPath: .spec.config.clients | |
description: ISGW client' service names | |
--- | |
# The cluster role for managing all the cluster-specific resources in a namespace | |
apiVersion: rbac.authorization.k8s.io/v1beta1 | |
kind: ClusterRole | |
metadata: | |
name: rook-edgefs-cluster-mgmt | |
labels: | |
operator: rook | |
storage-backend: edgefs | |
rules: | |
- apiGroups: [""] | |
resources: ["secrets", "pods", "nodes", "services", "configmaps", "endpoints"] | |
verbs: ["get", "list", "watch", "patch", "create", "update", "delete"] | |
- apiGroups: ["apps"] | |
resources: ["statefulsets", "statefulsets/scale"] | |
verbs: ["create", "delete", "deletecollection", "patch", "update"] | |
- apiGroups: ["apps"] | |
resources: ["deployments", "daemonsets", "replicasets", "statefulsets"] | |
verbs: ["get", "list", "watch", "create", "update", "delete"] | |
--- | |
# The role for the operator to manage resources in the system namespace | |
apiVersion: rbac.authorization.k8s.io/v1beta1 | |
kind: Role | |
metadata: | |
name: rook-edgefs-system | |
namespace: rook-edgefs-system | |
labels: | |
operator: rook | |
storage-backend: edgefs | |
rules: | |
- apiGroups: [""] | |
resources: ["pods", "nodes", "configmaps"] | |
verbs: ["get", "list", "watch", "patch", "create", "update", "delete"] | |
- apiGroups: ["apps"] | |
resources: ["daemonsets"] | |
verbs: ["get", "list", "watch", "create", "update", "delete"] | |
--- | |
# The cluster role for managing the Rook CRDs | |
apiVersion: rbac.authorization.k8s.io/v1beta1 | |
kind: ClusterRole | |
metadata: | |
name: rook-edgefs-global | |
labels: | |
operator: rook | |
storage-backend: edgefs | |
rules: | |
- apiGroups: [""] | |
# Pod access is needed for fencing | |
# Node access is needed for determining nodes where mons should run | |
resources: ["pods", "nodes", "nodes/proxy"] | |
verbs: ["get", "list", "watch", "update", "patch"] | |
- apiGroups: [""] | |
# PVs and PVCs are managed by the Rook provisioner | |
resources: ["events", "persistentvolumes", "persistentvolumeclaims"] | |
verbs: ["get", "list", "watch", "patch", "create", "update", "delete"] | |
- apiGroups: ["storage.k8s.io"] | |
resources: ["storageclasses"] | |
verbs: ["get", "list", "watch"] | |
- apiGroups: ["batch"] | |
resources: ["jobs"] | |
verbs: ["get", "list", "watch", "create", "update", "delete"] | |
- apiGroups: ["edgefs.rook.io"] | |
resources: ["*"] | |
verbs: ["*"] | |
- apiGroups: ["rook.io"] | |
resources: ["*"] | |
verbs: ["*"] | |
--- | |
# The rook system service account used by the operator, agent, and discovery pods | |
apiVersion: v1 | |
kind: ServiceAccount | |
metadata: | |
name: rook-edgefs-system | |
namespace: rook-edgefs-system | |
labels: | |
operator: rook | |
storage-backend: edgefs | |
--- | |
# Grant the operator, agent, and discovery agents access to resources in its own namespace | |
kind: RoleBinding | |
apiVersion: rbac.authorization.k8s.io/v1beta1 | |
metadata: | |
name: rook-edgefs-system | |
namespace: rook-edgefs-system | |
labels: | |
operator: rook | |
storage-backend: edgefs | |
roleRef: | |
apiGroup: rbac.authorization.k8s.io | |
kind: Role | |
name: rook-edgefs-system | |
subjects: | |
- kind: ServiceAccount | |
name: rook-edgefs-system | |
namespace: rook-edgefs-system | |
--- | |
# Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes | |
kind: ClusterRoleBinding | |
apiVersion: rbac.authorization.k8s.io/v1beta1 | |
metadata: | |
name: rook-edgefs-global | |
labels: | |
operator: rook | |
storage-backend: edgefs | |
roleRef: | |
apiGroup: rbac.authorization.k8s.io | |
kind: ClusterRole | |
name: rook-edgefs-global | |
subjects: | |
- kind: ServiceAccount | |
name: rook-edgefs-system | |
namespace: rook-edgefs-system | |
--- | |
# The deployment for the rook operator | |
apiVersion: apps/v1 | |
kind: Deployment | |
metadata: | |
name: rook-edgefs-operator | |
namespace: rook-edgefs-system | |
labels: | |
operator: rook | |
storage-backend: edgefs | |
spec: | |
selector: | |
matchLabels: | |
app: rook-edgefs-operator | |
replicas: 1 | |
template: | |
metadata: | |
labels: | |
app: rook-edgefs-operator | |
spec: | |
serviceAccountName: rook-edgefs-system | |
containers: | |
- name: rook-edgefs-operator | |
image: rook/edgefs:v1.3.1 | |
imagePullPolicy: "Always" | |
args: ["edgefs", "operator"] | |
env: | |
- name: ROOK_LOG_LEVEL | |
value: "INFO" | |
- name: POD_NAME | |
valueFrom: | |
fieldRef: | |
fieldPath: metadata.name | |
- name: POD_NAMESPACE | |
valueFrom: | |
fieldRef: | |
fieldPath: metadata.namespace | |
# Rook Discover toleration. Will tolerate all taints with all keys. | |
# Choose between NoSchedule, PreferNoSchedule and NoExecute: | |
# - name: DISCOVER_TOLERATION | |
# value: "NoSchedule" | |
# (Optional) Rook Discover toleration key. Set this to the key of the taint you want to tolerate | |
# - name: DISCOVER_TOLERATION_KEY | |
# value: "<KeyOfTheTaintToTolerate>" | |
--- | |
apiVersion: v1 | |
kind: Namespace | |
metadata: | |
name: rook-edgefs | |
--- | |
apiVersion: v1 | |
kind: ServiceAccount | |
metadata: | |
name: rook-edgefs-cluster | |
namespace: rook-edgefs | |
--- | |
kind: Role | |
apiVersion: rbac.authorization.k8s.io/v1beta1 | |
metadata: | |
name: rook-edgefs-cluster | |
namespace: rook-edgefs | |
rules: | |
- apiGroups: [""] | |
resources: ["configmaps", "endpoints"] | |
verbs: [ "get", "list", "watch", "create", "update", "delete" ] | |
- apiGroups: ["edgefs.rook.io"] | |
resources: ["*"] | |
verbs: ["*"] | |
- apiGroups: [""] | |
resources: ["pods"] | |
verbs: [ "get", "list" ] | |
- apiGroups: ["extensions"] | |
resources: ["deployments/scale"] | |
verbs: [ "get", "update" ] | |
--- | |
# Allow the operator to create resources in this cluster's namespace | |
kind: RoleBinding | |
apiVersion: rbac.authorization.k8s.io/v1beta1 | |
metadata: | |
name: rook-edgefs-cluster-mgmt | |
namespace: rook-edgefs | |
roleRef: | |
apiGroup: rbac.authorization.k8s.io | |
kind: ClusterRole | |
name: rook-edgefs-cluster-mgmt | |
subjects: | |
- kind: ServiceAccount | |
name: rook-edgefs-system | |
namespace: rook-edgefs-system | |
--- | |
# Allow the pods in this namespace to work with configmaps | |
kind: RoleBinding | |
apiVersion: rbac.authorization.k8s.io/v1beta1 | |
metadata: | |
name: rook-edgefs-cluster | |
namespace: rook-edgefs | |
roleRef: | |
apiGroup: rbac.authorization.k8s.io | |
kind: Role | |
name: rook-edgefs-cluster | |
subjects: | |
- kind: ServiceAccount | |
name: rook-edgefs-cluster | |
namespace: rook-edgefs | |
--- | |
apiVersion: policy/v1beta1 | |
kind: PodSecurityPolicy | |
metadata: | |
name: privileged | |
spec: | |
fsGroup: | |
rule: RunAsAny | |
privileged: true | |
runAsUser: | |
rule: RunAsAny | |
seLinux: | |
rule: RunAsAny | |
supplementalGroups: | |
rule: RunAsAny | |
volumes: | |
- '*' | |
allowedCapabilities: | |
- '*' | |
hostPID: true | |
hostIPC: true | |
hostNetwork: false | |
--- | |
apiVersion: rbac.authorization.k8s.io/v1 | |
kind: ClusterRole | |
metadata: | |
name: privileged-psp-user | |
rules: | |
- apiGroups: | |
- apps | |
resources: | |
- podsecuritypolicies | |
resourceNames: | |
- privileged | |
verbs: | |
- use | |
--- | |
apiVersion: rbac.authorization.k8s.io/v1 | |
kind: ClusterRoleBinding | |
metadata: | |
name: rook-edgefs-system-psp | |
roleRef: | |
apiGroup: rbac.authorization.k8s.io | |
kind: ClusterRole | |
name: privileged-psp-user | |
subjects: | |
- kind: ServiceAccount | |
name: rook-edgefs-system | |
namespace: rook-edgefs-system | |
--- | |
apiVersion: rbac.authorization.k8s.io/v1 | |
kind: ClusterRoleBinding | |
metadata: | |
name: rook-edgefs-cluster-psp | |
roleRef: | |
apiGroup: rbac.authorization.k8s.io | |
kind: ClusterRole | |
name: privileged-psp-user | |
subjects: | |
- kind: ServiceAccount | |
name: rook-edgefs-cluster | |
namespace: rook-edgefs | |
--- | |
apiVersion: edgefs.rook.io/v1 | |
kind: Cluster | |
metadata: | |
name: rook-edgefs | |
namespace: rook-edgefs | |
spec: | |
edgefsImageName: edgefs/edgefs:latest # specify version here, i.e. edgefs/edgefs:1.2.117 etc | |
serviceAccount: rook-edgefs-cluster | |
dataDirHostPath: /var/lib/edgefs | |
#dataVolumeSize: 10Gi | |
#devicesResurrectMode: "restoreZapWait" | |
#dashboard: | |
# localAddr: 10.3.30.75 | |
#network: # cluster level networking configuration | |
# provider: host | |
# selectors: | |
# server: "enp2s0f0" | |
# broker: "enp2s0f0" | |
#skipHostPrepare: true | |
#maxContainerCapacity: 132Ti | |
#sysRepCount: 1 # SystemReplicationCount [1..n](default is 3) | |
#failureDomain: "device" # Cluster's failureDomain ["device", "host", "zone"] (default is "host") | |
#trlogProcessingInterval: 2 # set transaction log processing interval to 2s to speed up ISGW Link delivery | |
#trlogKeepDays: 2 # keep up to 2 days of transaction log interval batches to reduce local storage overhead | |
#useHostLocalTime: true | |
storage: # cluster level storage configuration and selection | |
useAllNodes: true | |
# directories: | |
# - path: /mnt/disks/ssd0 | |
# - path: /mnt/disks/ssd1 | |
# - path: /mnt/disks/ssd2 | |
useAllDevices: true | |
# config: | |
# mdReserved: "30" # allocate only 30% of offloaded SSD/NVMe slice for Metadata, the rest keep for BCache | |
# hddReadAhead: "2048" # speed up reads of 2MB+ chunks of HDD (offload use case) | |
# rtVerifyChid: "0" # may improve CPU utilization | |
# lmdbPageSize: "32768" # larger value can improve stream operations | |
# lmdbMdPageSize: "4096" # smaller value can improve metadata offload device utilization | |
# useMetadataOffload: "true" # enable use of SSD device as metadata offload | |
# useBCache: "true" # enable SSD cache device and read-cache | |
# useBCacheWB: "true" # enable SSD write-cache | |
# useMetadataMask: "0x7d" # all metadata on SSD except second level manifests | |
# rtPLevelOverride: "4" # enable large device partitioning, only needed if automatic not working | |
# sync: "0" # highest performance, consistent on pod/software failures, not-consistent on power failures | |
# useAllSSD: "true" # use only SSDs during deployment | |
# zone: "1" # defines failure domain's zone number for all edgefs nodes | |
# nodes: | |
# - name: node3071ub16 | |
# - name: node3072ub16 | |
# - name: node3073ub16 | |
# - name: node3074ub16 # node level storage configuration | |
# devices: # specific devices to use for storage can be specified for each node | |
# - name: "sdb" | |
# - name: "sdc" | |
# config: # configuration can be specified at the node level which overrides the cluster level config | |
# rtPLevelOverride: 8 | |
# zone: "2" # defines failure domain's zone number for specific node(node3074ub16) | |
#resources: | |
# limits: | |
# cpu: "2" | |
# memory: "4096Mi" | |
# requests: | |
# cpu: "2" | |
# memory: "4096Mi" | |
# A key value list of annotations | |
#annotations: | |
# all: | |
# key: value | |
# mgr: | |
# prepare: | |
# target: | |
#placement: | |
# all: | |
# nodeAffinity: | |
# requiredDuringSchedulingIgnoredDuringExecution: | |
# nodeSelectorTerms: | |
# - matchExpressions: | |
# - key: nodekey | |
# operator: In | |
# values: | |
# - edgefs-target | |
# tolerations: | |
# - key: taintKey | |
# operator: Exists | |
ROOK | |
#https://github.com/helm/charts/tree/master/stable/nginx-ingress | |
#controller.extraArgs.v=2 | |
cat > /root/manifests/05-ingress.yml <<INGRESS | |
--- | |
apiVersion: v1 | |
kind: Namespace | |
metadata: | |
name: ingress-nginx | |
--- | |
apiVersion: helm.cattle.io/v1 | |
kind: HelmChart | |
metadata: | |
name: ingress | |
namespace: kube-system | |
spec: | |
chart: nginx-ingress | |
repo: https://kubernetes-charts.storage.googleapis.com/ | |
targetNamespace: ingress-nginx | |
set: | |
rbac.create: "true" | |
controller.kind: "DaemonSet" | |
controller.hostNetwork: "true" | |
controller.daemonset.useHostPort: "true" | |
controller.metrics.enabled: "true" | |
INGRESS | |
# --set ingressShim.defaultIssuerName=letsencrypt-prod \ | |
# --set ingressShim.defaultIssuerKind=ClusterIssuer \ | |
# --set ingressShim.defaultIssuerGroup=cert-manager.io | |
# - --default-issuer-name=letsencrypt-prod | |
# - --default-issuer-kind=ClusterIssuer | |
# - --default-issuer-group=cert-manager.io | |
cat > /root/manifests/06-cert-manager.yml <<CERTMANAGER | |
--- | |
apiVersion: v1 | |
kind: Namespace | |
metadata: | |
name: cert-manager | |
labels: | |
certmanager.k8s.io/disable-validation: "true" | |
--- | |
apiVersion: cert-manager.io/v1alpha2 | |
kind: ClusterIssuer | |
metadata: | |
name: letsencrypt-staging | |
spec: | |
acme: | |
email: [email protected] | |
server: https://acme-staging-v02.api.letsencrypt.org/directory | |
privateKeySecretRef: | |
name: letsencrypt-staging | |
solvers: | |
- http01: | |
ingress: | |
class: nginx | |
--- | |
apiVersion: cert-manager.io/v1alpha2 | |
kind: Issuer | |
metadata: | |
name: letsencrypt-prod | |
spec: | |
acme: | |
server: https://acme-v02.api.letsencrypt.org/directory | |
email: [email protected] | |
privateKeySecretRef: | |
name: letsencrypt-prod | |
solvers: | |
- http01: | |
ingress: | |
class: nginx | |
CERTMANAGER | |
curl -sfL "https://github.com/jetstack/cert-manager/releases/download/v0.14.2/cert-manager.yaml" > /root/manifests/05-cert-manager.yaml | |
cat > /root/manifests/01-ucarp.yml <<UCARP | |
--- | |
UCARP | |
cat > /root/manifests/06-rancher.yml <<RANCHER | |
--- | |
apiVersion: v1 | |
kind: Namespace | |
metadata: | |
name: cattle-system | |
--- | |
apiVersion: helm.cattle.io/v1 | |
kind: HelmChart | |
metadata: | |
name: rancher | |
namespace: kube-system | |
spec: | |
chart: rancher | |
repo: https://releases.rancher.com/server-charts/stable | |
targetNamespace: cattle-system | |
set: | |
hostname: manager.cloud.oondeo.es | |
ingress.tls.source: rancher | |
RANCHER | |
if [ "${WORMHOLE}" == "true" ] | |
then | |
cat > /root/manifests/00-wormhole.yml <<'WORMHOLE' | |
--- | |
apiVersion: apiextensions.k8s.io/v1beta1 | |
kind: CustomResourceDefinition | |
metadata: | |
name: wgnodes.wormhole.gravitational.io | |
spec: | |
group: wormhole.gravitational.io | |
names: | |
kind: Wgnode | |
plural: wgnodes | |
scope: Namespaced | |
version: v1beta1 | |
status: | |
acceptedNames: | |
kind: "" | |
plural: "" | |
conditions: [] | |
storedVersions: [] | |
--- | |
apiVersion: v1 | |
kind: Namespace | |
metadata: | |
name: wormhole | |
--- | |
apiVersion: scheduling.k8s.io/v1beta1 | |
kind: PriorityClass | |
metadata: | |
name: wormhole-high-priority | |
value: 1000000 | |
globalDefault: false | |
description: "This priority class should be used for wormhole controller pods only." | |
--- | |
apiVersion: policy/v1beta1 | |
kind: PodSecurityPolicy | |
metadata: | |
annotations: | |
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' | |
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' | |
name: wormhole | |
namespace: wormhole | |
spec: | |
allowedCapabilities: | |
- NET_ADMIN | |
- NET_RAW | |
- CHOWN | |
fsGroup: | |
rule: RunAsAny | |
hostPorts: | |
- max: 65535 | |
min: 1024 | |
runAsUser: | |
rule: RunAsAny | |
seLinux: | |
rule: RunAsAny | |
supplementalGroups: | |
rule: RunAsAny | |
volumes: | |
- '*' | |
hostNetwork: true | |
--- | |
kind: ClusterRole | |
apiVersion: rbac.authorization.k8s.io/v1 | |
metadata: | |
name: wormhole | |
rules: | |
- apiGroups: | |
- "" | |
resources: | |
- nodes | |
verbs: | |
- list | |
- watch | |
- get | |
- apiGroups: | |
- wormhole.gravitational.io | |
resources: | |
- "*" | |
verbs: | |
- "*" | |
- apiGroups: | |
- policy | |
resources: | |
- podsecuritypolicies | |
verbs: | |
- use | |
resourceNames: | |
- wormhole | |
--- | |
kind: ClusterRoleBinding | |
apiVersion: rbac.authorization.k8s.io/v1 | |
metadata: | |
name: wormhole | |
roleRef: | |
apiGroup: rbac.authorization.k8s.io | |
kind: ClusterRole | |
name: wormhole | |
subjects: | |
- kind: ServiceAccount | |
name: wormhole | |
namespace: wormhole | |
--- | |
kind: Role | |
apiVersion: rbac.authorization.k8s.io/v1 | |
metadata: | |
namespace: wormhole | |
name: wormhole | |
rules: | |
- apiGroups: | |
- "" | |
resources: | |
- secrets | |
verbs: | |
- get | |
- watch | |
- list | |
- create | |
- update | |
- apiGroups: | |
- "" | |
resources: | |
- pods | |
verbs: | |
- get | |
--- | |
kind: Role | |
apiVersion: rbac.authorization.k8s.io/v1 | |
metadata: | |
namespace: kube-system | |
name: wormhole | |
rules: | |
- apiGroups: | |
- "" | |
resources: | |
- configmaps | |
verbs: | |
- get | |
resourceNames: | |
- kubeadm-config | |
--- | |
kind: RoleBinding | |
apiVersion: rbac.authorization.k8s.io/v1 | |
metadata: | |
namespace: wormhole | |
name: wormhole | |
roleRef: | |
apiGroup: rbac.authorization.k8s.io | |
kind: Role | |
name: wormhole | |
subjects: | |
- kind: ServiceAccount | |
name: wormhole | |
namespace: wormhole | |
--- | |
kind: RoleBinding | |
apiVersion: rbac.authorization.k8s.io/v1 | |
metadata: | |
namespace: kube-system | |
name: wormhole | |
roleRef: | |
apiGroup: rbac.authorization.k8s.io | |
kind: Role | |
name: wormhole | |
subjects: | |
- kind: ServiceAccount | |
name: wormhole | |
namespace: wormhole | |
--- | |
apiVersion: v1 | |
kind: ServiceAccount | |
metadata: | |
name: wormhole | |
namespace: wormhole | |
--- | |
apiVersion: apps/v1 | |
kind: DaemonSet | |
metadata: | |
name: wormhole | |
namespace: wormhole | |
labels: | |
app: wormhole | |
spec: | |
selector: | |
matchLabels: | |
k8s-app: wormhole | |
updateStrategy: | |
type: RollingUpdate | |
rollingUpdate: | |
maxUnavailable: 1 | |
template: | |
metadata: | |
labels: | |
k8s-app: wormhole | |
annotations: | |
scheduler.alpha.kubernetes.io/critical-pod: '' | |
seccomp.security.alpha.kubernetes.io/pod: runtime/default | |
spec: | |
hostNetwork: true | |
serviceAccountName: wormhole | |
# Short duration for rolling restarts | |
terminationGracePeriodSeconds: 5 | |
selector: | |
nodeSelector: | |
beta.kubernetes.io/arch: amd64 | |
tolerations: | |
# Tolerate all taints | |
- effect: NoSchedule | |
operator: Exists | |
- effect: NoExecute | |
operator: Exists | |
priorityClassName: wormhole-high-priority | |
containers: | |
# Run a wormhole container on each node | |
# Configures wireguard / CNI on each node | |
- name: wormhole | |
image: oondeo/wormhole:0.2.1-cillium | |
command: | |
- /wormhole | |
args: | |
- controller | |
- --overlay-cidr=10.244.0.0/16 | |
env: | |
- name: POD_NAME | |
valueFrom: | |
fieldRef: | |
fieldPath: metadata.name | |
- name: POD_NAMESPACE | |
valueFrom: | |
fieldRef: | |
fieldPath: metadata.namespace | |
securityContext: | |
allowPrivilegeEscalation: false | |
readOnlyRootFilesystem: true | |
runAsNonRoot: false | |
runAsUser: 0 | |
capabilities: | |
drop: | |
- all | |
add: | |
- NET_ADMIN | |
- NET_RAW | |
- CHOWN | |
imagePullPolicy: Always | |
resources: | |
requests: | |
cpu: 100m | |
memory: 100M | |
limits: | |
cpu: 500m | |
memory: 200M | |
volumeMounts: | |
- mountPath: /host/opt/cni/bin | |
name: cni-bin-dir | |
- mountPath: /host/etc/cni/net.d | |
name: cni-net-dir | |
- mountPath: /tmp | |
name: tmpfs | |
volumes: | |
# Used to install CNI. | |
- name: cni-bin-dir | |
hostPath: | |
path: /opt/cni/bin | |
- name: cni-net-dir | |
hostPath: | |
path: /etc/cni/net.d | |
- name: tmpfs | |
emptyDir: Memory | |
WORMHOLE | |
fi | |
cat > /root/manifests/02-cillium.yml <<CILLIUM | |
--- | |
# Source: cilium/charts/agent/templates/serviceaccount.yaml | |
apiVersion: v1 | |
kind: ServiceAccount | |
metadata: | |
name: cilium | |
namespace: kube-system | |
--- | |
# Source: cilium/charts/operator/templates/serviceaccount.yaml | |
apiVersion: v1 | |
kind: ServiceAccount | |
metadata: | |
name: cilium-operator | |
namespace: kube-system | |
--- | |
# Source: cilium/charts/config/templates/configmap.yaml | |
apiVersion: v1 | |
kind: ConfigMap | |
metadata: | |
name: cilium-config | |
namespace: kube-system | |
data: | |
# Identity allocation mode selects how identities are shared between cilium | |
# nodes by setting how they are stored. The options are "crd" or "kvstore". | |
# - "crd" stores identities in kubernetes as CRDs (custom resource definition). | |
# These can be queried with: | |
# kubectl get ciliumid | |
# - "kvstore" stores identities in a kvstore, etcd or consul, that is | |
# configured below. Cilium versions before 1.6 supported only the kvstore | |
# backend. Upgrades from these older cilium versions should continue using | |
# the kvstore by commenting out the identity-allocation-mode below, or | |
# setting it to "kvstore". | |
identity-allocation-mode: crd | |
kvstore: '' | |
kvstore-opt: '' | |
etcd-config: '' | |
# kvstore: etcd | |
# kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config"}' | |
# etcd-config: |- | |
# --- | |
# endpoints: | |
# - http://${CARP1}:2379 | |
# - http://${CARP2}:2379 | |
# - http://${CARP3}:2379 | |
# If you want to run cilium in debug mode change this value to true | |
debug: "false" | |
# Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4 | |
# address. | |
enable-ipv4: "true" | |
# Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6 | |
# address. | |
enable-ipv6: "false" | |
# If you want cilium monitor to aggregate tracing for packets, set this level | |
# to "low", "medium", or "maximum". The higher the level, the less packets | |
# that will be seen in monitor output. | |
monitor-aggregation: medium | |
# The monitor aggregation interval governs the typical time between monitor | |
# notification events for each allowed connection. | |
# | |
# Only effective when monitor aggregation is set to "medium" or higher. | |
monitor-aggregation-interval: 5s | |
# The monitor aggregation flags determine which TCP flags which, upon the | |
# first observation, cause monitor notifications to be generated. | |
# | |
# Only effective when monitor aggregation is set to "medium" or higher. | |
monitor-aggregation-flags: all | |
# ct-global-max-entries-* specifies the maximum number of connections | |
# supported across all endpoints, split by protocol: tcp or other. One pair | |
# of maps uses these values for IPv4 connections, and another pair of maps | |
# use these values for IPv6 connections. | |
# | |
# If these values are modified, then during the next Cilium startup the | |
# tracking of ongoing connections may be disrupted. This may lead to brief | |
# policy drops or a change in loadbalancing decisions for a connection. | |
# | |
# For users upgrading from Cilium 1.2 or earlier, to minimize disruption | |
# during the upgrade process, comment out these options. | |
bpf-ct-global-tcp-max: "524288" | |
bpf-ct-global-any-max: "262144" | |
# Pre-allocation of map entries allows per-packet latency to be reduced, at | |
# the expense of up-front memory allocation for the entries in the maps. The | |
# default value below will minimize memory usage in the default installation; | |
# users who are sensitive to latency may consider setting this to "true". | |
# | |
# This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore | |
# this option and behave as though it is set to "true". | |
# | |
# If this value is modified, then during the next Cilium startup the restore | |
# of existing endpoints and tracking of ongoing connections may be disrupted. | |
# This may lead to policy drops or a change in loadbalancing decisions for a | |
# connection for some time. Endpoints may need to be recreated to restore | |
# connectivity. | |
# | |
# If this option is set to "false" during an upgrade from 1.3 or earlier to | |
# 1.4 or later, then it may cause one-time disruptions during the upgrade. | |
preallocate-bpf-maps: "false" | |
# Regular expression matching compatible Istio sidecar istio-proxy | |
# container image names | |
sidecar-istio-proxy-image: "cilium/istio_proxy" | |
# Encapsulation mode for communication between nodes | |
# Possible values: | |
# - disabled | |
# - vxlan (default) | |
# - geneve | |
tunnel: vxlan | |
# --set global.datapathMode=ipvlan | |
# --set global.ipvlan.masterDevice=bond0 | |
# --set global.tunnel=disabled | |
# --set global.masquerade=true | |
# --set global.installIptablesRules=false | |
# --set global.autoDirectNodeRoutes=true | |
# Name of the cluster. Only relevant when building a mesh of clusters. | |
cluster-name: default | |
# DNS Polling periodically issues a DNS lookup for each matchName from | |
# cilium-agent. The result is used to regenerate endpoint policy. | |
# DNS lookups are repeated with an interval of 5 seconds, and are made for | |
# A(IPv4) and AAAA(IPv6) addresses. Should a lookup fail, the most recent IP | |
# data is used instead. An IP change will trigger a regeneration of the Cilium | |
# policy for each endpoint and increment the per cilium-agent policy | |
# repository revision. | |
# | |
# This option is disabled by default starting from version 1.4.x in favor | |
# of a more powerful DNS proxy-based implementation, see [0] for details. | |
# Enable this option if you want to use FQDN policies but do not want to use | |
# the DNS proxy. | |
# | |
# To ease upgrade, users may opt to set this option to "true". | |
# Otherwise please refer to the Upgrade Guide [1] which explains how to | |
# prepare policy rules for upgrade. | |
# | |
# [0] http://docs.cilium.io/en/stable/policy/language/#dns-based | |
# [1] http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action | |
tofqdns-enable-poller: "false" | |
# wait-bpf-mount makes init container wait until bpf filesystem is mounted | |
wait-bpf-mount: "false" | |
masquerade: "true" | |
enable-xt-socket-fallback: "true" | |
install-iptables-rules: "true" | |
auto-direct-node-routes: "false" | |
kube-proxy-replacement: "probe" | |
enable-host-reachable-services: "false" | |
enable-external-ips: "true" | |
enable-node-port: "true" | |
enable-auto-protect-node-port-range: "true" | |
# Chaining mode is set to portmap, enable health checking | |
enable-endpoint-health-checking: "true" | |
enable-well-known-identities: "false" | |
enable-remote-node-identity: "true" | |
--- | |
# Source: cilium/charts/agent/templates/clusterrole.yaml | |
apiVersion: rbac.authorization.k8s.io/v1 | |
kind: ClusterRole | |
metadata: | |
name: cilium | |
rules: | |
- apiGroups: | |
- networking.k8s.io | |
resources: | |
- networkpolicies | |
verbs: | |
- get | |
- list | |
- watch | |
- apiGroups: | |
- discovery.k8s.io | |
resources: | |
- endpointslices | |
verbs: | |
- get | |
- list | |
- watch | |
- apiGroups: | |
- "" | |
resources: | |
- namespaces | |
- services | |
- nodes | |
- endpoints | |
verbs: | |
- get | |
- list | |
- watch | |
- apiGroups: | |
- "" | |
resources: | |
- pods | |
- nodes | |
verbs: | |
- get | |
- list | |
- watch | |
- update | |
- apiGroups: | |
- "" | |
resources: | |
- nodes | |
- nodes/status | |
verbs: | |
- patch | |
- apiGroups: | |
- apiextensions.k8s.io | |
resources: | |
- customresourcedefinitions | |
verbs: | |
- create | |
- get | |
- list | |
- watch | |
- update | |
- apiGroups: | |
- cilium.io | |
resources: | |
- ciliumnetworkpolicies | |
- ciliumnetworkpolicies/status | |
- ciliumclusterwidenetworkpolicies | |
- ciliumclusterwidenetworkpolicies/status | |
- ciliumendpoints | |
- ciliumendpoints/status | |
- ciliumnodes | |
- ciliumnodes/status | |
- ciliumidentities | |
- ciliumidentities/status | |
verbs: | |
- '*' | |
--- | |
# Source: cilium/charts/operator/templates/clusterrole.yaml | |
apiVersion: rbac.authorization.k8s.io/v1 | |
kind: ClusterRole | |
metadata: | |
name: cilium-operator | |
rules: | |
- apiGroups: | |
- "" | |
resources: | |
# to automatically delete [core|kube]dns pods so that are starting to being | |
# managed by Cilium | |
- pods | |
verbs: | |
- get | |
- list | |
- watch | |
- delete | |
- apiGroups: | |
- discovery.k8s.io | |
resources: | |
- endpointslices | |
verbs: | |
- get | |
- list | |
- watch | |
- apiGroups: | |
- "" | |
resources: | |
# to automatically read from k8s and import the node's pod CIDR to cilium's | |
# etcd so all nodes know how to reach another pod running in in a different | |
# node. | |
- nodes | |
# to perform the translation of a CNP that contains ToGroup to its endpoints | |
- services | |
- endpoints | |
# to check apiserver connectivity | |
- namespaces | |
verbs: | |
- get | |
- list | |
- watch | |
- apiGroups: | |
- cilium.io | |
resources: | |
- ciliumnetworkpolicies | |
- ciliumnetworkpolicies/status | |
- ciliumclusterwidenetworkpolicies | |
- ciliumclusterwidenetworkpolicies/status | |
- ciliumendpoints | |
- ciliumendpoints/status | |
- ciliumnodes | |
- ciliumnodes/status | |
- ciliumidentities | |
- ciliumidentities/status | |
verbs: | |
- '*' | |
--- | |
# Source: cilium/charts/agent/templates/clusterrolebinding.yaml | |
apiVersion: rbac.authorization.k8s.io/v1 | |
kind: ClusterRoleBinding | |
metadata: | |
name: cilium | |
roleRef: | |
apiGroup: rbac.authorization.k8s.io | |
kind: ClusterRole | |
name: cilium | |
subjects: | |
- kind: ServiceAccount | |
name: cilium | |
namespace: kube-system | |
--- | |
# Source: cilium/charts/operator/templates/clusterrolebinding.yaml | |
apiVersion: rbac.authorization.k8s.io/v1 | |
kind: ClusterRoleBinding | |
metadata: | |
name: cilium-operator | |
roleRef: | |
apiGroup: rbac.authorization.k8s.io | |
kind: ClusterRole | |
name: cilium-operator | |
subjects: | |
- kind: ServiceAccount | |
name: cilium-operator | |
namespace: kube-system | |
--- | |
# Source: cilium/charts/agent/templates/daemonset.yaml | |
apiVersion: apps/v1 | |
kind: DaemonSet | |
metadata: | |
labels: | |
k8s-app: cilium | |
name: cilium | |
namespace: kube-system | |
spec: | |
selector: | |
matchLabels: | |
k8s-app: cilium | |
template: | |
metadata: | |
annotations: | |
# This annotation plus the CriticalAddonsOnly toleration makes | |
# cilium to be a critical pod in the cluster, which ensures cilium | |
# gets priority scheduling. | |
# https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/ | |
scheduler.alpha.kubernetes.io/critical-pod: "" | |
labels: | |
k8s-app: cilium | |
spec: | |
containers: | |
- args: | |
- --config-dir=/tmp/cilium/config-map | |
command: | |
- cilium-agent | |
livenessProbe: | |
exec: | |
command: | |
- cilium | |
- status | |
- --brief | |
failureThreshold: 10 | |
# The initial delay for the liveness probe is intentionally large to | |
# avoid an endless kill & restart cycle if in the event that the initial | |
# bootstrapping takes longer than expected. | |
initialDelaySeconds: 120 | |
periodSeconds: 30 | |
successThreshold: 1 | |
timeoutSeconds: 5 | |
readinessProbe: | |
exec: | |
command: | |
- cilium | |
- status | |
- --brief | |
failureThreshold: 3 | |
initialDelaySeconds: 5 | |
periodSeconds: 30 | |
successThreshold: 1 | |
timeoutSeconds: 5 | |
env: | |
- name: K8S_NODE_NAME | |
valueFrom: | |
fieldRef: | |
apiVersion: v1 | |
fieldPath: spec.nodeName | |
- name: CILIUM_K8S_NAMESPACE | |
valueFrom: | |
fieldRef: | |
apiVersion: v1 | |
fieldPath: metadata.namespace | |
- name: CILIUM_FLANNEL_MASTER_DEVICE | |
valueFrom: | |
configMapKeyRef: | |
key: flannel-master-device | |
name: cilium-config | |
optional: true | |
- name: CILIUM_FLANNEL_UNINSTALL_ON_EXIT | |
valueFrom: | |
configMapKeyRef: | |
key: flannel-uninstall-on-exit | |
name: cilium-config | |
optional: true | |
- name: CILIUM_CLUSTERMESH_CONFIG | |
value: /var/lib/cilium/clustermesh/ | |
- name: CILIUM_CNI_CHAINING_MODE | |
valueFrom: | |
configMapKeyRef: | |
key: cni-chaining-mode | |
name: cilium-config | |
optional: true | |
- name: CILIUM_CUSTOM_CNI_CONF | |
valueFrom: | |
configMapKeyRef: | |
key: custom-cni-conf | |
name: cilium-config | |
optional: true | |
image: "docker.io/cilium/cilium:v1.7.2" | |
imagePullPolicy: IfNotPresent | |
lifecycle: | |
postStart: | |
exec: | |
command: | |
- "/cni-install.sh" | |
- "--enable-debug=false" | |
preStop: | |
exec: | |
command: | |
- /cni-uninstall.sh | |
name: cilium-agent | |
securityContext: | |
capabilities: | |
add: | |
- NET_ADMIN | |
- SYS_MODULE | |
privileged: true | |
volumeMounts: | |
- mountPath: /var/lib/etcd-config | |
name: etcd-config-path | |
readOnly: true | |
- mountPath: /sys/fs/bpf | |
name: bpf-maps | |
- mountPath: /var/run/cilium | |
name: cilium-run | |
- mountPath: /host/opt/cni/bin | |
name: cni-path | |
- mountPath: /host/etc/cni/net.d | |
name: etc-cni-netd | |
- mountPath: /var/lib/cilium/clustermesh | |
name: clustermesh-secrets | |
readOnly: true | |
- mountPath: /tmp/cilium/config-map | |
name: cilium-config-path | |
readOnly: true | |
# Needed to be able to load kernel modules | |
- mountPath: /lib/modules | |
name: lib-modules | |
readOnly: true | |
- mountPath: /run/xtables.lock | |
name: xtables-lock | |
hostNetwork: true | |
initContainers: | |
- command: | |
- /init-container.sh | |
env: | |
- name: CILIUM_ALL_STATE | |
valueFrom: | |
configMapKeyRef: | |
key: clean-cilium-state | |
name: cilium-config | |
optional: true | |
- name: CILIUM_BPF_STATE | |
valueFrom: | |
configMapKeyRef: | |
key: clean-cilium-bpf-state | |
name: cilium-config | |
optional: true | |
- name: CILIUM_WAIT_BPF_MOUNT | |
valueFrom: | |
configMapKeyRef: | |
key: wait-bpf-mount | |
name: cilium-config | |
optional: true | |
image: "docker.io/cilium/cilium:v1.7.2" | |
imagePullPolicy: IfNotPresent | |
name: clean-cilium-state | |
securityContext: | |
capabilities: | |
add: | |
- NET_ADMIN | |
privileged: true | |
volumeMounts: | |
- mountPath: /sys/fs/bpf | |
name: bpf-maps | |
mountPropagation: HostToContainer | |
- mountPath: /var/run/cilium | |
name: cilium-run | |
restartPolicy: Always | |
priorityClassName: system-node-critical | |
serviceAccount: cilium | |
serviceAccountName: cilium | |
terminationGracePeriodSeconds: 1 | |
tolerations: | |
- operator: Exists | |
volumes: | |
# To keep state between restarts / upgrades | |
- hostPath: | |
path: /var/run/cilium | |
type: DirectoryOrCreate | |
name: cilium-run | |
# To keep state between restarts / upgrades for bpf maps | |
- hostPath: | |
path: /sys/fs/bpf | |
type: DirectoryOrCreate | |
name: bpf-maps | |
# To install cilium cni plugin in the host | |
- hostPath: | |
path: /opt/cni/bin | |
type: DirectoryOrCreate | |
name: cni-path | |
# To install cilium cni configuration in the host | |
- hostPath: | |
path: /etc/cni/net.d | |
type: DirectoryOrCreate | |
name: etc-cni-netd | |
# To be able to load kernel modules | |
- hostPath: | |
path: /lib/modules | |
name: lib-modules | |
# To access iptables concurrently with other processes (e.g. kube-proxy) | |
- hostPath: | |
path: /run/xtables.lock | |
type: FileOrCreate | |
name: xtables-lock | |
# To read the clustermesh configuration | |
- name: clustermesh-secrets | |
secret: | |
defaultMode: 420 | |
optional: true | |
secretName: cilium-clustermesh | |
# To read the configuration from the config map | |
- configMap: | |
name: cilium-config | |
name: cilium-config-path | |
- configMap: | |
defaultMode: 420 | |
items: | |
- key: etcd-config | |
path: etcd.config | |
name: cilium-config | |
name: etcd-config-path | |
updateStrategy: | |
rollingUpdate: | |
maxUnavailable: 2 | |
type: RollingUpdate | |
--- | |
# Source: cilium/charts/operator/templates/deployment.yaml | |
apiVersion: apps/v1 | |
kind: Deployment | |
metadata: | |
labels: | |
io.cilium/app: operator | |
name: cilium-operator | |
name: cilium-operator | |
namespace: kube-system | |
spec: | |
replicas: 1 | |
selector: | |
matchLabels: | |
io.cilium/app: operator | |
name: cilium-operator | |
strategy: | |
rollingUpdate: | |
maxSurge: 1 | |
maxUnavailable: 1 | |
type: RollingUpdate | |
template: | |
metadata: | |
annotations: | |
labels: | |
io.cilium/app: operator | |
name: cilium-operator | |
spec: | |
containers: | |
- args: | |
- --debug=\$(CILIUM_DEBUG) | |
- --identity-allocation-mode=\$(CILIUM_IDENTITY_ALLOCATION_MODE) | |
- --synchronize-k8s-nodes=true | |
command: | |
- cilium-operator | |
env: | |
- name: CILIUM_K8S_NAMESPACE | |
valueFrom: | |
fieldRef: | |
apiVersion: v1 | |
fieldPath: metadata.namespace | |
- name: K8S_NODE_NAME | |
valueFrom: | |
fieldRef: | |
apiVersion: v1 | |
fieldPath: spec.nodeName | |
- name: CILIUM_DEBUG | |
valueFrom: | |
configMapKeyRef: | |
key: debug | |
name: cilium-config | |
optional: true | |
- name: CILIUM_CLUSTER_NAME | |
valueFrom: | |
configMapKeyRef: | |
key: cluster-name | |
name: cilium-config | |
optional: true | |
- name: CILIUM_CLUSTER_ID | |
valueFrom: | |
configMapKeyRef: | |
key: cluster-id | |
name: cilium-config | |
optional: true | |
- name: CILIUM_IPAM | |
valueFrom: | |
configMapKeyRef: | |
key: ipam | |
name: cilium-config | |
optional: true | |
- name: CILIUM_DISABLE_ENDPOINT_CRD | |
valueFrom: | |
configMapKeyRef: | |
key: disable-endpoint-crd | |
name: cilium-config | |
optional: true | |
- name: CILIUM_KVSTORE | |
valueFrom: | |
configMapKeyRef: | |
key: kvstore | |
name: cilium-config | |
optional: true | |
- name: CILIUM_KVSTORE_OPT | |
valueFrom: | |
configMapKeyRef: | |
key: kvstore-opt | |
name: cilium-config | |
optional: true | |
- name: AWS_ACCESS_KEY_ID | |
valueFrom: | |
secretKeyRef: | |
key: AWS_ACCESS_KEY_ID | |
name: cilium-aws | |
optional: true | |
- name: AWS_SECRET_ACCESS_KEY | |
valueFrom: | |
secretKeyRef: | |
key: AWS_SECRET_ACCESS_KEY | |
name: cilium-aws | |
optional: true | |
- name: AWS_DEFAULT_REGION | |
valueFrom: | |
secretKeyRef: | |
key: AWS_DEFAULT_REGION | |
name: cilium-aws | |
optional: true | |
- name: CILIUM_IDENTITY_ALLOCATION_MODE | |
valueFrom: | |
configMapKeyRef: | |
key: identity-allocation-mode | |
name: cilium-config | |
optional: true | |
image: "docker.io/cilium/operator:v1.7.2" | |
imagePullPolicy: IfNotPresent | |
name: cilium-operator | |
livenessProbe: | |
httpGet: | |
host: '127.0.0.1' | |
path: /healthz | |
port: 9234 | |
scheme: HTTP | |
initialDelaySeconds: 60 | |
periodSeconds: 10 | |
timeoutSeconds: 3 | |
volumeMounts: | |
- mountPath: /var/lib/etcd-config | |
name: etcd-config-path | |
readOnly: true | |
hostNetwork: true | |
restartPolicy: Always | |
serviceAccount: cilium-operator | |
serviceAccountName: cilium-operator | |
volumes: | |
- configMap: | |
defaultMode: 420 | |
items: | |
- key: etcd-config | |
path: etcd.config | |
name: cilium-config | |
name: etcd-config-path | |
CILLIUM | |
fi | |
#---- /server code--------------------------------------------------- | |
# Last install k3s | |
# --disable-cloud-controller --kube-controller-arg cloud-provider=external --kube-apiserver-arg cloud-provider=external --kube-apiserver-arg allow-privileged=true --kube-apiserver-arg feature-gates=CSIPersistentVolume=true,MountPropagation=true,VolumeSnapshotDataSource=true,CSINodeInfo=true,CSIDriverRegistry=true --kubelet-arg CSIPersistentVolume=true,MountPropagation=true,VolumeSnapshotDataSource=true,KubeletPluginsWatcher=true,CSINodeInfo=true,CSIDriverRegistry=true | |
# --container-runtime-endpoint=/run/crio/crio.sock --flannel-backend=none | |
echo "curl -sfL https://get.k3s.io | sh -s - --datastore-endpoint=\"http://127.0.0.1:2379\" --flannel-backend=none $K3S_ENDPOINT --no-deploy local-storage --no-deploy traefik --no-deploy servicelb --node-ip ${MY} --node-external-ip ${EXTIP} --service-cidr 10.43.0.0/16 --cluster-cidr 10.42.0.0/16 --cluster-dns 10.43.0.10 --cluster-domain cluster.local" | |
curl -sfL https://get.k3s.io | sh -s - server ${K3S_DATASTORE_ENDPOINT} $K3S_ENDPOINT $K3S_OPTIONS |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/bash | |
IPFAILOVER="" | |
FIRSTHOST="hiro" | |
KUBE_VER="1.17" | |
MYNET="10.2.85." | |
CARPNET="10.174.1." | |
N1="167" | |
N2="168" | |
N3="169" | |
CARPPASS="rascaldev2020" | |
# WORMHOLE="false" | |
# CRIO="false" | |
K3S_NODES="1" | |
K3S_DATASTORE_ENDPOINT='--datastore-endpoint="http://127.0.0.1:2379"' | |
# K3S_DATASTORE_ENDPOINT='' | |
K3S_ENDPOINT="" | |
# K3S_ENDPOINT="--container-runtime-endpoint=/run/crio/crio.sock" | |
##---------------- Finish config | |
cat > /root/.openrc.sh <<'OSCONFIG' | |
export OS_AUTH_URL=https://auth.cloud.ovh.net/v3 | |
# With the addition of Keystone we have standardized on the term **project** | |
# as the entity that owns the resources. | |
export OS_PROJECT_ID=dee7bcc22888437cb0ce308f81628eb1 | |
export OS_PROJECT_NAME="6038446989216404" | |
export OS_USER_DOMAIN_NAME="Default" | |
if [ -z "$OS_USER_DOMAIN_NAME" ]; then unset OS_USER_DOMAIN_NAME; fi | |
export OS_PROJECT_DOMAIN_ID="default" | |
if [ -z "$OS_PROJECT_DOMAIN_ID" ]; then unset OS_PROJECT_DOMAIN_ID; fi | |
# unset v2.0 items in case set | |
unset OS_TENANT_ID | |
unset OS_TENANT_NAME | |
# export OS_TENANT_ID=dee7bcc22888437cb0ce308f81628eb1 | |
# export OS_TENANT_NAME="6038446989216404" | |
# In addition to the owning entity (tenant), OpenStack stores the entity | |
# performing the action as the **user**. | |
export OS_USERNAME="sz3JpMWWFxnV" | |
# With Keystone you pass the keystone password. | |
export OS_PASSWORD="Zfcj5YHdJZrmt88GUBQXU7xgcKffz2fx" | |
if [ -z "$OS_PASSWORD" ]; then | |
echo "Please enter your OpenStack Password for project $OS_PROJECT_NAME as user $OS_USERNAME: " | |
read -sr OS_PASSWORD_INPUT | |
export OS_PASSWORD=$OS_PASSWORD_INPUT | |
fi | |
# If your configuration has multiple regions, we set that information here. | |
# OS_REGION_NAME is optional and only valid in certain environments. | |
export OS_REGION_NAME="GRA1" | |
# Don't leave a blank variable, unset it if it was empty | |
if [ -z "$OS_REGION_NAME" ]; then unset OS_REGION_NAME; fi | |
export OS_INTERFACE=public | |
export OS_IDENTITY_API_VERSION=3 | |
OSCONFIG | |
# systemctl restart networking | |
# IP1="${MYNET}${N1}" | |
# IP2="${MYNET}${N2}" | |
# IP3="${MYNET}${N3}" | |
CARP1="${CARPNET}${N1}" | |
CARP2="${CARPNET}${N2}" | |
CARP3="${CARPNET}${N3}" | |
HOSTNAME="$(hostname -s)" | |
#fix only one network in rancher provision | |
if [ -e "/root/.openrc.sh" ] | |
then | |
. /root/.openrc.sh | |
apt-get update | |
apt-get install -y python-openstackclient | |
python2-openstack server add network $HOSTNAME OONDEO | |
systemctl restart network | |
fi | |
EXTIP=$(ip route get 8.8.8.8 | awk -F"src " 'NR==1{split($2,a," ");print a[1]}') | |
MY=$(ip route get ${MYNET}1 | awk -F"src " 'NR==1{split($2,a," ");print a[1]}') | |
MYCARP=$(ip route get ${CARPNET}1 | awk -F"src " 'NR==1{split($2,a," ");print a[1]}') | |
IPID=$(echo $MY | cut -d"." -f4) | |
CARPID=$(echo $MYCARP | cut -d"." -f4) | |
K3S_OPTIONS="--node-ip ${MY} --node-external-ip ${EXTIP} --service-cidr 10.43.0.0/16 --cluster-cidr 10.42.0.0/16 --cluster-dns 10.43.0.10 --cluster-domain cluster.local" | |
# K3S_OPTIONS="--flannel-backend=none --no-deploy local-storage --no-deploy traefik --no-deploy servicelb --node-ip ${MY} --node-external-ip ${EXTIP} --service-cidr 10.43.0.0/16 --cluster-cidr 10.42.0.0/16 --cluster-dns 10.43.0.10 --cluster-domain cluster.local" | |
# skip already configured hosts | |
if [ "$(grep -e '^bpffs' /etc/fstab)" == "" ] | |
then | |
#5.4 grub: mitigations=off | |
#grub: noibrs noibpb nopti nospectre_v2 nospectre_v1 l1tf=off nospec_store_bypass_disable no_stf_barrier mds=off mitigations=off | |
sed -i 's/^GRUB_CMDLINE_LINUX_DEFAULT\="/GRUB_CMDLINE_LINUX_DEFAULT\="apparmor=0 mitigations=off /' /etc/default/grub | |
echo "bpffs /sys/fs/bpf bpf defaults 0 0" >> /etc/fstab | |
mount /sys/fs/bpf | |
modprobe overlay | |
modprobe br_netfilter | |
cat > /etc/sysctl.d/99-kubernetes.conf <<EOF | |
net.bridge.bridge-nf-call-iptables = 1 | |
net.ipv4.ip_forward = 1 | |
net.bridge.bridge-nf-call-ip6tables = 1 | |
net.core.rmem_default = 80331648 | |
net.core.rmem_max = 80331648 | |
net.core.wmem_default = 33554432 | |
net.core.wmem_max = 50331648 | |
vm.dirty_ratio = 10 | |
vm.dirty_background_ratio = 5 | |
vm.swappiness = 15 | |
EOF | |
sysctl --system | |
systemctl stop apparmor | |
systemctl disable apparmor | |
apt-get purge --autoremove -y apparmor | |
apt-get update | |
apt-get dist-upgrade -y | |
apt-get install -y curl gnupg2 | |
# echo "deb http://deb.debian.org/debian buster-backports main" | tee /etc/apt/sources.list.d/backports.list | |
echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_10/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list | |
curl -sfL https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/Debian_10/Release.key | apt-key add - | |
apt-get update | |
apt-get install -y -t buster-backports wireguard linux-headers-cloud-amd64 linux-image-cloud-amd64 | |
ls /var/lib/initramfs-tools | \ | |
sudo xargs -n1 /usr/lib/dkms/dkms_autoinstaller start | |
echo wireguard >> /etc/modules | |
#reboot | |
if [ "$CRIO" == "true" ] | |
then | |
apt-get purge --autoremove -y docker-ce | |
apt-get install -y cri-o-${KUBE_VER} | |
mkdir -p /etc/crio/crio.conf.d/ | |
cat > /etc/crio/crio.conf.d/01-k3s.conf <<CRIO | |
[crio.runtime] | |
cgroup_manager = "cgroupfs" | |
selinux = false | |
CRIO | |
systemctl enable crio | |
systemctl start crio | |
fi | |
cat > /usr/local/bin/get-kubeconfig <<'GETKUBECONFIG' | |
mkdir -p /etc/kubernetes | |
cat > /etc/kubernetes/kubeconfig <<KUBECONFIG | |
--- | |
apiVersion: v1 | |
kind: Config | |
clusters: | |
- cluster: | |
api-version: v1 | |
server: "https://$KUBERNETES_SERVICE_PORT_HTTPS:$KUBERNETES_SERVICE_PORT" | |
certificate-authority: $(cat /var/run/secrets/kubernetes.io/serviceaccount/ca.crt) | |
name: "Default" | |
contexts: | |
- context: | |
cluster: "Default" | |
user: "Default" | |
name: "Default" | |
current-context: "Default" | |
users: | |
- name: "Default" | |
user: | |
token: "$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" | |
KUBECONFIG | |
GETKUBECONFIG | |
cat > /usr/local/bin/kube-proxy <<'KUBEPROXY' | |
#!/bin/bash | |
export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin | |
trap "exit 0" SIGTERM SIGINT SIGHUP SIGQUIT SIGKILL | |
if [[ ${KUBE_PROXY_NOOP} -eq 1 ]];then | |
clean-install netcat | |
# mimic healthz server | |
while true; do | |
DATE="$(date -u "+%F %T.%N %z %Z m=+")$(perl -w -MTime::HiRes=clock_gettime,CLOCK_MONOTONIC -E 'say clock_gettime(CLOCK_MONOTONIC)')" | |
CONTENT="{\"lastUpdated\": \"${DATE}\",\"currentTime\": \"${DATE}\"}" | |
cat << EOF | perl -pe 'chomp if eof' | nc -s 127.0.0.1 -lp 10256 -q 1 | |
HTTP/1.1 200 OK$(printf "\r") | |
Content-Type: application/json$(printf "\r") | |
X-Content-Type-Options: nosniff$(printf "\r") | |
Date: $(date -u)$(printf "\r") | |
Content-Length: $((${#CONTENT}+0))$(printf "\r") | |
$(printf "\r") | |
${CONTENT} | |
EOF | |
done | |
else | |
# /usr/local/bin/kube-proxy | |
exec kube-proxy "$@" | |
fi | |
KUBEPROXY | |
chmod +x /usr/local/bin/* | |
if [ "K3S_NODES" != "" ] | |
then | |
curl -sfL "https://gist.githubusercontent.com/amon-ra/995f5c6bf05c8c8d1a88194754cfea11/raw/k3s-server.sh" > /tmp/k3s-server.sh | |
. /tmp/k3s-server.sh | |
cp /root/manifests/06-rancher.yml /var/lib/rancher/k3s/server/manifests || true | |
# cp /root/manifests/0* /var/lib/rancher/k3s/server/manifests || true | |
# cp /root/manifests/1* /var/lib/rancher/k3s/server/manifests || true | |
fi | |
fi | |
exit 0 |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
USAGE: