Skip to content

Instantly share code, notes, and snippets.

@vrabbi
Last active April 14, 2024 17:55
Show Gist options
  • Save vrabbi/40df7e843712a3840b31f8176abf6838 to your computer and use it in GitHub Desktop.
Save vrabbi/40df7e843712a3840b31f8176abf6838 to your computer and use it in GitHub Desktop.
tkg multi md - cc manipulation - static ips and ext ip pool generation
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
labels:
app: antrea-interworking
name: antreaccpadapterinfos.clusterinformation.antrea-interworking.tanzu.vmware.com
spec:
group: clusterinformation.antrea-interworking.tanzu.vmware.com
names:
kind: AntreaCCPAdapterInfo
plural: antreaccpadapterinfos
shortNames:
- ccpainfo
singular: antreaccpadapterinfo
scope: Cluster
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
type: object
x-kubernetes-preserve-unknown-fields: true
served: true
storage: true
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
labels:
app: antrea-interworking
name: antreampadapterinfos.clusterinformation.antrea-interworking.tanzu.vmware.com
spec:
group: clusterinformation.antrea-interworking.tanzu.vmware.com
names:
kind: AntreaMPAdapterInfo
plural: antreampadapterinfos
shortNames:
- mpainfo
singular: antreampadapterinfo
scope: Cluster
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
type: object
x-kubernetes-preserve-unknown-fields: true
served: true
storage: true
---
apiVersion: v1
kind: Namespace
metadata:
name: vmware-system-antrea
labels:
app: antrea-interworking
openshift.io/run-level: '0'
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/enforce-version: latest
pod-security.kubernetes.io/audit: privileged
pod-security.kubernetes.io/audit-version: latest
pod-security.kubernetes.io/warn: privileged
pod-security.kubernetes.io/warn-version: latest
---
apiVersion: v1
kind: ConfigMap
metadata:
name: cluster-id
namespace: vmware-system-antrea
# NOTE: Register job will generate the ConfigMap data like below:
# data:
# cluster-id.conf:
# clusterID: A-UUID-String
---
apiVersion: v1
kind: ConfigMap
metadata:
labels:
app: antrea-interworking
name: antrea-interworking-config
namespace: vmware-system-antrea
data:
mp-adapter.conf: |
# NSXRemoteAuth indicates whether to use NSX remote authentication (vIDM integration).
NSXRemoteAuth: false
# Path to the client authentication certificate file.
NSXClientAuthCertFile: /etc/antrea/nsx-cert/tls.crt
# Path to the client authentication key file.
NSXClientAuthKeyFile: /etc/antrea/nsx-cert/tls.key
# Path to the CA file which is used for validating NSX server certificate.
NSXCAFile: ""
# NSXInsecure indicates whether to validate NSX server certificate..
NSXInsecure: true
# Timeout in seconds for NSX client.
NSXClientTimeout: 120
# InventoryBatchSize is the max objects in one inventory update request.
InventoryBatchSize: 50
# InventoryBatchPeriod is the time in seconds to send out request even if the max batch size is not reached.
InventoryBatchPeriod: 5
# The time in seconds to run inventory garbage collection
InventoryGCPeriod: 60
# NsxRpcConnType is the NSX connection type: either mock or tnproxy.
NSXRPCConnType: tnproxy
# EnableDebugServer indicates whether to enable the debug server.
EnableDebugServer: false
# DebugServerPort is the port for the mp-adapter to call restful API for debugging. Defaults to 16666.
DebugServerPort: 16666
# NSXRPCDebug indicates whether to enable NSX RPC debug mode.
NSXRPCDebug: false
# Timeout (seconds) for monitored health conditions. If a health condition is not updated within this timeout, the condition will be treated as false.
ConditionTimeout: 150
# Port on which the mp-adapter API server listens. API server is for liveness probes.
APIServerPort: 16664
# ClusterType represents the type of the cluster.
#clusterType: kubernetes
ccp-adapter.conf: |
# Port on which the ccp-adapter API server listens. API server is for liveness probes.
APIServerPort: 16665
# EnableDebugServer indicates whether to enable the debug server.
EnableDebugServer: false
# DebugServerPort is the port for the ccp-adapter to call restful API for debugging. Defaults to 16667.
DebugServerPort: 16667
# NSXRPCDebug indicates whether to enable NSX RPC debug mode.
NSXRPCDebug: false
# Timeout in second to wait for acnp/cg/tier realization.
RealizeTimeoutSeconds: 60
# An interval for regularly report latest realization error in background.
RealizeErrorSyncIntervalSeconds: 600
# Number of workers for reconciler.
ReconcilerWorkerCount: 8
# Kubernetes client rate limit setting. Average QPS = ReconcilerWorkerCount * ReconcilerQPS
ReconcilerQPS: 5.0
# Kubernetes client rate limit setting. Peak QPS = ReconcilerWorkerCount * ReconcilerBurst
ReconcilerBurst: 10
# Period in seconds for reconciler to resync the applied K8s resources.
ReconcilerResyncSeconds: 86400
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: antrea-interworking
name: register
namespace: vmware-system-antrea
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app: antrea-interworking
name: register
namespace: vmware-system-antrea
rules:
- apiGroups:
- ""
resources:
- configmaps
- secrets
verbs:
- get
- list
- create
- update
- patch
- delete
- apiGroups:
- "batch"
resources:
- jobs
verbs:
- delete
- apiGroups:
- "apps"
resources:
- deployments
verbs:
- get
- delete
- apiGroups:
- policy
resourceNames:
- vmware-system-privileged
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app: antrea-interworking
name: register
namespace: vmware-system-antrea
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: register
subjects:
- kind: ServiceAccount
name: register
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app: antrea-interworking
name: vmware-system-antrea-register
namespace: default
rules:
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app: antrea-interworking
name: vmware-system-antrea-register
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: vmware-system-antrea-register
subjects:
- kind: ServiceAccount
name: register
namespace: vmware-system-antrea
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: antrea-interworking
name: interworking
namespace: vmware-system-antrea
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: antrea-interworking
name: antrea-interworking
rules:
- apiGroups:
- ""
resources:
- nodes
- namespaces
- pods
- services
- endpoints
- configmaps
verbs:
- get
- watch
- list
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- '*'
- apiGroups:
- ""
resources:
- pods
verbs:
- patch
- apiGroups:
- clusterinformation.antrea-interworking.tanzu.vmware.com
resources:
- antreaccpadapterinfos
- antreampadapterinfos
verbs:
- get
- watch
- list
- create
- update
- patch
- delete
- apiGroups:
- ""
resourceNames:
- extension-apiserver-authentication
- bootstrap-config
resources:
- configmaps
verbs:
- get
- list
- watch
- update
- patch
- apiGroups:
- crd.antrea.io
resources:
- antreaagentinfos
- antreacontrollerinfos
- egresses
- ippools
verbs:
- get
- watch
- list
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
- ingresses
verbs:
- get
- watch
- list
- apiGroups:
- crd.antrea.io
resources:
- traceflows
- traceflows/status
verbs:
- get
- watch
- list
- update
- patch
- create
- delete
- apiGroups:
- crd.antrea.io
resources:
- clusternetworkpolicies
- networkpolicies
- tiers
- clustergroups
verbs:
- get
- watch
- list
- create
- update
- patch
- delete
- apiGroups:
- controlplane.antrea.tanzu.vmware.com
- controlplane.antrea.io
resources:
- clustergroupmembers
- groupassociations
verbs:
- get
- list
- apiGroups:
- crd.antrea.tanzu.vmware.com
resources:
- tierentitlementbindings
- tierentitlements
- nsxregistrations
verbs:
- get
- watch
- list
- create
- update
- patch
- delete
- apiGroups:
- stats.antrea.io
resources:
- antreaclusternetworkpolicystats
verbs:
- get
- list
- apiGroups:
- gateway.networking.k8s.io
resources:
- gateways
verbs:
- get
- watch
- list
- apiGroups:
- config.openshift.io
resources:
- networks
verbs:
- get
- watch
- list
- apiGroups:
- policy
resourceNames:
- vmware-system-privileged
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: antrea-interworking
name: antrea-interworking
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: antrea-interworking
subjects:
- kind: ServiceAccount
name: interworking
namespace: vmware-system-antrea
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: antrea-interworking
name: antrea-interworking-supportbundle
rules:
- apiGroups:
- ""
resources:
- pods
- pods/log
- nodes
- configmaps
verbs:
- get
- list
- apiGroups:
- "apps"
resources:
- deployments
- replicasets
- daemonsets
verbs:
- list
- apiGroups:
- system.antrea.io
resources:
- supportbundles
verbs:
- get
- create
- apiGroups:
- system.antrea.io
resources:
- controllerinfos
- supportbundles/download
- nodecpuinfos
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: antrea-interworking
name: antrea-interworking-supportbundle
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: antrea-interworking-supportbundle
subjects:
- kind: ServiceAccount
name: interworking
namespace: vmware-system-antrea
---
apiVersion: batch/v1
kind: Job
metadata:
name: register
labels:
app: antrea-interworking
component: register
namespace: vmware-system-antrea
spec:
template:
spec:
containers:
- name: register
image: projects.registry.vmware.com/antreainterworking/interworking-ubuntu:0.13.0_vmware.1
imagePullPolicy: IfNotPresent
command: [ "/usr/local/bin/cluster-registry" ]
args:
- register
- --logtostderr=false
- --log_dir=/var/log/interworking
- --alsologtostderr
- --log_file_max_size=5
- --log_file_max_num=4
volumeMounts:
- mountPath: /etc/antrea
name: projected-configs
readOnly: true
- mountPath: /var/log/interworking
name: host-var-log-interworking
restartPolicy: OnFailure
serviceAccountName: register
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
nodeSelector:
kubernetes.io/os: linux
volumes:
- name: host-var-log-interworking
hostPath:
path: /var/log/interworking
type: DirectoryOrCreate
- name: projected-configs
projected:
sources:
- configMap:
name: bootstrap-config
items:
- key: bootstrap.conf
path: bootstrap.conf
- configMap:
name: cluster-id
items:
- key: cluster-id.conf
path: cluster-id.conf
optional: true
- secret:
name: nsx-cert
items:
- key: tls.crt
path: nsx-cert/tls.crt
- key: tls.key
path: nsx-cert/tls.key
optional: true
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
backoffLimit: 3
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: antrea-interworking
component: interworking
name: interworking
namespace: vmware-system-antrea
spec:
replicas: 1
selector:
matchLabels:
app: antrea-interworking
component: interworking
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
template:
metadata:
labels:
app: antrea-interworking
component: interworking
spec:
containers:
- name: election-runner
command:
- /usr/local/bin/election-runner
args:
- --id=$(POD_NAME)
- --namespace=vmware-system-antrea
- --ttl=60s
- --logtostderr=false
- --log_dir=/var/log/interworking/election-runner
- --alsologtostderr
- --log_file_max_size=5
- --log_file_max_num=2
- --v=4
image: projects.registry.vmware.com/antreainterworking/interworking-ubuntu:0.13.0_vmware.1
imagePullPolicy: IfNotPresent
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
volumeMounts:
- mountPath: /var/run/antrea-interworking
name: host-var-run-antrea-interworking
- mountPath: /var/log/interworking
name: host-var-log-interworking
- name: mp-adapter
command:
- /usr/local/bin/election-watcher
args:
- --cmd=mp-adapter
- --args=--bootstrap-config,/etc/antrea/bootstrap.conf,--config,/etc/antrea/mp-adapter.conf,--cluster-id-config,/etc/antrea/cluster-id.conf,--logtostderr=false,--log_dir=/var/log/interworking/mp-adapter,--alsologtostderr,--log_file_max_size=25,--log_file_max_num=4,--v=4
- --logtostderr=false
- --log_dir=/var/log/interworking/mp-adapter
- --alsologtostderr
- --log_file_max_size=5
- --log_file_max_num=2
- --v=4
image: projects.registry.vmware.com/antreainterworking/interworking-ubuntu:0.13.0_vmware.1
imagePullPolicy: IfNotPresent
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CONTAINER_NAME
value: mp-adapter
livenessProbe:
httpGet:
host: localhost
path: /livez
port: api
scheme: HTTPS
initialDelaySeconds: 90
timeoutSeconds: 15
periodSeconds: 60
failureThreshold: 3
ports:
- name: api
containerPort: 16664
protocol: TCP
volumeMounts:
- mountPath: /etc/antrea
name: projected-configs
readOnly: true
- mountPath: /var/run/vmware
name: var-run-vmware
readOnly: true
- mountPath: /var/run/antrea-interworking
name: host-var-run-antrea-interworking
- mountPath: /var/log/interworking
name: host-var-log-interworking
- mountPath: /etc/vmware/nsx
name: etc-vmware-nsx
resources:
limits:
memory: "4096Mi"
requests:
memory: "256Mi"
- name: tn-proxy
command:
- /usr/local/bin/election-watcher
args:
- --cmd=tn-proxy-init.sh
- --logtostderr=false
- --log_dir=/var/log/interworking/tn-proxy
- --alsologtostderr
- --log_file_max_size=25
- --log_file_max_num=4
- --logChild=true
image: projects.registry.vmware.com/antreainterworking/interworking-ubuntu:0.13.0_vmware.1
imagePullPolicy: IfNotPresent
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: CONTAINER_NAME
value: tn-proxy
volumeMounts:
- mountPath: /var/run/vmware
name: var-run-vmware
- mountPath: /etc/vmware/nsx
name: etc-vmware-nsx
- mountPath: /var/run/antrea-interworking
name: host-var-run-antrea-interworking
- mountPath: /etc/antrea
name: projected-configs
readOnly: true
- mountPath: /var/log/interworking
name: host-var-log-interworking
livenessProbe:
exec:
command:
- /bin/bash
- -c
- (nsx-appctl -t /var/run/vmware/nsx-proxy/nsx-proxy-cli get/aph-conn-status
| grep '"CONNECTED"') && /usr/local/bin/tnproxy-prober
failureThreshold: 3
initialDelaySeconds: 15
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 15
- name: ccp-adapter
command:
- /usr/local/bin/election-watcher
args:
- --cmd=ccp-adapter
- --args=--config,/etc/antrea/ccp-adapter.conf,--cluster-id-config,/etc/antrea/cluster-id.conf,--logtostderr=false,--log_dir=/var/log/interworking/ccp-adapter,--alsologtostderr,--log_file_max_size=25,--log_file_max_num=4,--v=4
- --logtostderr=false
- --log_dir=/var/log/interworking/ccp-adapter
- --alsologtostderr
- --log_file_max_size=5
- --log_file_max_num=2
- --v=4
image: projects.registry.vmware.com/antreainterworking/interworking-ubuntu:0.13.0_vmware.1
imagePullPolicy: IfNotPresent
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: CONTAINER_NAME
value: ccp-adapter
volumeMounts:
- mountPath: /var/run/vmware
name: var-run-vmware
- mountPath: /var/lib/vmware
name: var-lib-vmware
- mountPath: /var/run/antrea-interworking
name: host-var-run-antrea-interworking
- mountPath: /etc/antrea
name: projected-configs
readOnly: true
- mountPath: /var/log/interworking
name: host-var-log-interworking
resources:
limits:
memory: "4096Mi"
requests:
memory: "256Mi"
livenessProbe:
httpGet:
host: localhost
path: /livez
port: api
scheme: HTTPS
timeoutSeconds: 15
periodSeconds: 60
failureThreshold: 3
ports:
- name: api
containerPort: 16665
protocol: TCP
hostNetwork: true
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: interworking
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
volumes:
- hostPath:
path: /var/run/antrea-interworking
type: DirectoryOrCreate
name: host-var-run-antrea-interworking
- hostPath:
path: /var/log/interworking
type: DirectoryOrCreate
name: host-var-log-interworking
- name: projected-configs
projected:
sources:
- configMap:
name: antrea-interworking-config
items:
- key: mp-adapter.conf
path: mp-adapter.conf
- key: ccp-adapter.conf
path: ccp-adapter.conf
- configMap:
name: bootstrap-config
items:
- key: bootstrap.conf
path: bootstrap.conf
- configMap:
name: cluster-id
items:
- key: cluster-id.conf
path: cluster-id.conf
optional: true
- secret:
name: nsx-cert
items:
- key: tls.crt
path: nsx-cert/tls.crt
- key: tls.key
path: nsx-cert/tls.key
- name: etc-vmware-nsx
emptyDir: { }
- name: var-run-vmware
emptyDir: { }
- name: var-lib-vmware
emptyDir: { }
nsx_vip: "172.16.20.103"
nsx_hosts: "172.16.20.100,172.16.20.101,172.16.20.102"
nsx_username: "USERNAME"
nsx_password: "PASSWORD"
tier1_gateway_name: "T1-Policy"
transport_zone_name: "OVERLAY-TZ"
tkg_cluster_name: "CLUSTER_NAME"
machine_deployments:
- tier: mgmt
- tier: web
- tier: app
- tier: db
subnet: 172.16.231.0/24
gateway_address: 172.16.231.1/24
- hosts: localhost
connection: local
gather_facts: false
vars_files:
- vars.yaml
tasks:
- name: NSX Segment
vmware.ansible_for_nsxt.nsxt_policy_segment:
hostname: "{{ nsx_vip }}"
username: "{{ nsx_username }}"
password: "{{ nsx_password }}"
validate_certs: false
state: present
display_name: "{{ tkg_cluster_name }}-nodes"
tier1_display_name: "{{ tier1_gateway_name }}"
transport_zone_display_name: "{{ transport_zone_name }}"
subnets:
- gateway_address: "{{ gateway_address }}"
- name: Create Tier Groups
vmware.ansible_for_nsxt.nsxt_policy_group:
hostname: "{{ nsx_vip }}"
username: "{{ nsx_username }}"
password: "{{ nsx_password }}"
validate_certs: false
id: "{{ tkg_cluster_name }}-{{ machine_deployment.tier }}"
display_name: "{{ tkg_cluster_name }}-{{ machine_deployment.tier }}"
state: "present"
domain_id: "default"
expression:
- member_type: "VirtualMachine"
value: "{{ tkg_cluster_name }}-{{ machine_deployment.tier }}-"
key: "Name"
operator: "STARTSWITH"
resource_type: "Condition"
loop: "{{ machine_deployments }}"
loop_control:
loop_var: machine_deployment
- name: Create Cluster Group
vmware.ansible_for_nsxt.nsxt_policy_group:
hostname: "{{ nsx_vip }}"
username: "{{ nsx_username }}"
password: "{{ nsx_password }}"
validate_certs: false
id: "{{ tkg_cluster_name }}-all-nodes"
display_name: "{{ tkg_cluster_name }}-all-nodes"
state: "present"
domain_id: "default"
expression:
- member_type: "VirtualMachine"
value: "{{ tkg_cluster_name }}-"
key: "Name"
operator: "STARTSWITH"
resource_type: "Condition"
- name: Create Cluster Control Plane Group
vmware.ansible_for_nsxt.nsxt_policy_group:
hostname: "{{ nsx_vip }}"
username: "{{ nsx_username }}"
password: "{{ nsx_password }}"
validate_certs: false
id: "{{ tkg_cluster_name }}-control-plane-nodes"
display_name: "{{ tkg_cluster_name }}-control-plane-nodes"
state: "present"
domain_id: "default"
expression:
- member_type: "VirtualMachine"
value: "{{ tkg_cluster_name }}-controlplane-"
key: "Name"
operator: "STARTSWITH"
resource_type: "Condition"
- name: Create NSX Antrea Manifest
ansible.builtin.shell: "./antreansxctl bootstrap --user '{{ nsx_username }}' --password '{{ nsx_password }}' --nsx-managers '{{ nsx_hosts }}' --cluster-name {{ tkg_cluster_name }}"
args:
creates: "{{ tkg_cluster_name }}-bootstrap-config.yaml"
#@ load("@ytt:overlay","overlay")
#@ load("@ytt:data","data")
#@ load("@ytt:template", "template")
#@ load("@ytt:yaml", "yaml")
---
apiVersion: ipam.cluster.x-k8s.io/v1alpha2
kind: InClusterIPPool
metadata:
name: #@ "{}-ipam".format(data.values.clusterName)
namespace: default
spec:
gateway: #@ data.values.ipam.nodes.gateway
addresses:
- #@ data.values.ipam.nodes.ipRange
prefix: #@ data.values.ipam.nodes.subnetPrefix
---
#@ def extIPPool(first, last, role):
apiVersion: crd.antrea.io/v1alpha2
kind: ExternalIPPool
metadata:
name: #@ "{}-external-ip-pool".format(role)
spec:
ipRanges:
- start: #@ first
end: #@ last
nodeSelector:
matchLabels:
_: #@ template.replace({"node-role.kubernetes.io/" + role :""})
#@ end
---
apiVersion: addons.cluster.x-k8s.io/v1beta1
kind: ClusterResourceSet
metadata:
name: #@ "{}-external-ip-mgmt".format(data.values.clusterName)
labels:
cluster.x-k8s.io/cluster-name: #@ data.values.clusterName
spec:
strategy: "Reconcile"
clusterSelector:
matchLabels:
tkg.tanzu.vmware.com/cluster-name: #@ data.values.clusterName
resources:
#@ for md in data.values.machineDeployments:
- name: #@ "{}-{}-ext-ip-pool".format(data.values.clusterName, md.name)
kind: ConfigMap
#@ end
#@ for md in data.values.machineDeployments:
---
apiVersion: v1
kind: ConfigMap
metadata:
name: #@ "{}-{}-ext-ip-pool".format(data.values.clusterName, md.name)
data:
data: #@ yaml.encode(extIPPool(md.externalIPs.first, md.externalIPs.last, md.role))
#@ end
---
#@ def mds():
#@ for md in data.values.machineDeployments:
- name: #@ md.name
class: tkg-worker
metadata:
annotations:
cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size: #@ md.maxReplicas
cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size: #@ md.minReplicas
run.tanzu.vmware.com/resolve-os-image: image-type=ova,os-name=ubuntu
labels:
_: #@ template.replace({"node-role.kubernetes.io/" + md.role :""})
node.cluster.x-k8s.io/tier: #@ md.role
variables:
overrides:
- name: worker
value:
machine:
diskGiB: #@ md.diskGB
memoryMiB: #@ md.memoryMB
numCPUs: #@ md.numCPUs
network:
nameservers: #@ data.values.nameservers
searchDomains: #@ data.values.searchDomains
#@ end
#@ end
#@overlay/match by=overlay.subset({"kind": "Cluster"})
---
spec:
topology:
workers:
#@overlay/replace
machineDeployments: #@ mds()
#@data/values
---
clusterName: qa-cls-01
ipam:
nodes:
gateway: 172.16.229.1
subnetPrefix: 24
ipRange: 172.16.229.10-172.16.229.100
nameservers:
- 172.16.20.10
searchDomains:
- terasky.local
machineDeployments:
- name: md-app
role: app
diskGB: 40
memoryMB: 4096
numCPUs: 2
maxReplicas: "10"
minReplicas: "3"
externalIPs:
first: 172.16.229.101
last: 172.16.229.120
- name: md-web
role: web
diskGB: 50
memoryMB: 8192
numCPUs: 4
maxReplicas: 5
minReplicas: 1
externalIPs:
first: 172.16.229.121
last: 172.16.229.140
- name: md-db
role: db
diskGB: 40
memoryMB: 4096
numCPUs: 2
maxReplicas: "10"
minReplicas: "3"
externalIPs:
first: 172.16.229.101
last: 172.16.229.120
- name: md-mgmt
role: mgmt
diskGB: 50
memoryMB: 8192
numCPUs: 4
maxReplicas: 5
minReplicas: 1
externalIPs:
first: 172.16.229.121
last: 172.16.229.140
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment