Last active
April 25, 2023 07:11
-
-
Save rm3l/f571fc462efe32dddf5a7b862e633134 to your computer and use it in GitHub Desktop.
Convenience script to install my local Kubernetes KinD cluster, with the following resources (a local image registry, a Devfile registry). Requires kind, docker, kubectl, helm, curl, openssl, operator-sdk, yq commands.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/sh | |
set -o errexit | |
set -x | |
#alias docker=podman | |
#KIND_EXPERIMENTAL_PROVIDER=podman | |
cluster_name=${1:-local-k8s-cluster} | |
registry_port=${2:-5000} | |
# create registry container unless it already exists | |
registry_name='local-registry' | |
running="$(docker container inspect -f '{{.State.Running}}' "${registry_name}" 2>/dev/null || true)" | |
if [ "${running}" != 'true' ]; then | |
registry_image_tag='2.8' | |
docker container run \ | |
--pull=always \ | |
-d --restart=always \ | |
-p "${registry_port}:5000" \ | |
-e REGISTRY_STORAGE_DELETE_ENABLED="true" \ | |
--name "${registry_name}" \ | |
registry:${registry_image_tag} | |
fi | |
if ! kind get clusters | grep -q "$cluster_name"; then | |
# Images from https://hub.docker.com/r/kindest/node/tags | |
kindest_node_image_tag='v1.27.1' | |
if [ ! -f ~/.docker/config.json ]; then | |
mkdir -p ~/.docker | |
touch ~/.docker/config.json | |
fi | |
cat <<EOF | kind create cluster --config=- | |
kind: Cluster | |
apiVersion: kind.x-k8s.io/v1alpha4 | |
name: "${cluster_name}" | |
containerdConfigPatches: | |
- |- | |
[plugins."io.containerd.grpc.v1.cri".containerd] | |
disable_snapshot_annotations = true | |
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${registry_port}"] | |
endpoint = ["http://${registry_name}:${registry_port}"] | |
nodes: | |
- role: control-plane | |
image: kindest/node:${kindest_node_image_tag} | |
kubeadmConfigPatches: | |
- | | |
kind: InitConfiguration | |
nodeRegistration: | |
kubeletExtraArgs: | |
node-labels: "ingress-ready=true" | |
extraPortMappings: | |
- containerPort: 80 | |
hostPort: 80 | |
protocol: TCP | |
- containerPort: 443 | |
hostPort: 443 | |
protocol: TCP | |
extraMounts: | |
- containerPath: /var/lib/kubelet/config.json | |
hostPath: ${HOME}/.docker/config.json | |
- role: worker | |
image: kindest/node:${kindest_node_image_tag} | |
kubeadmConfigPatches: | |
- | | |
kind: JoinConfiguration | |
nodeRegistration: | |
kubeletExtraArgs: | |
node-labels: "kubernetes.io/os=linux" | |
extraMounts: | |
- containerPath: /var/lib/kubelet/config.json | |
hostPath: ${HOME}/.docker/config.json | |
networking: | |
# WARNING: It is _strongly_ recommended that you keep this the default | |
# (127.0.0.1) for security reasons. However it is possible to change this. | |
# Listening on all IP addresses so the cluster can be reached from my local network. | |
apiServerAddress: "0.0.0.0" | |
# By default the API server listens on a random open port. | |
# You may choose a specific port but probably don't need to in most cases. | |
# Using a random port makes it easier to spin up multiple clusters. | |
apiServerPort: 6443 | |
# disable kindnet, which does not support Network Policies | |
disableDefaultCNI: true | |
# set to Calico's default subnet | |
podSubnet: 192.168.0.0/16 | |
EOF | |
fi | |
# connect the registry to the cluster network | |
# (the network may already be connected) | |
docker network connect "kind" "${registry_name}" || true | |
kubectl_ctx="kind-${cluster_name}" | |
# Communicate the local registry to external local tools | |
# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry | |
cat <<EOF | kubectl --context="${kubectl_ctx}" apply -f - | |
apiVersion: v1 | |
kind: ConfigMap | |
metadata: | |
name: local-registry-hosting | |
namespace: kube-public | |
data: | |
localRegistryHosting.v1: | | |
host: "localhost:${registry_port}" | |
help: "https://kind.sigs.k8s.io/docs/user/local-registry/" | |
EOF | |
# Annotate the cluster node to use the registry | |
# https://docs.tilt.dev/choosing_clusters.html#discovering-the-registry | |
for node in $(kind get nodes --name "${cluster_name}"); do | |
kubectl --context="${kubectl_ctx}" \ | |
annotate --overwrite node "${node}" "kind.x-k8s.io/registry=localhost:${registry_port}"; | |
done | |
# Update Helm repos in case they are already present | |
helm repo update | |
## Install Calico | |
helm repo add projectcalico https://docs.tigera.io/calico/charts | |
kubectl --context="${kubectl_ctx}" create namespace tigera-operator --dry-run=client -o yaml | kubectl --context="${kubectl_ctx}" apply -f - | |
helm --kube-context="${kubectl_ctx}" \ | |
upgrade --install calico \ | |
projectcalico/tigera-operator \ | |
--version v3.25.0 \ | |
--namespace tigera-operator | |
# cf. https://kind.sigs.k8s.io/docs/user/ingress/#ingress-nginx | |
ingress_nginx_controller_tag="main" | |
kubectl --context="${kubectl_ctx}" \ | |
apply -f \ | |
"https://raw.githubusercontent.com/kubernetes/ingress-nginx/${ingress_nginx_controller_tag}/deploy/static/provider/kind/deploy.yaml" | |
### Wait ingress nginx pod creation | |
echo -n "Wait for pod app.kubernetes.io/component=controller to be created." | |
while : ; do | |
[ ! -z "`kubectl --context="${kubectl_ctx}" -n ingress-nginx get pod --selector=app.kubernetes.io/component=controller 2> /dev/null`" ] && echo && break | |
sleep 2 | |
echo -n "." | |
done | |
echo -n "Waiting for NGINX Ingress Controller pod to be ready (timeout in 600s)..." | |
kubectl --context="${kubectl_ctx}" wait --namespace ingress-nginx \ | |
--for=condition=ready pod \ | |
--selector=app.kubernetes.io/component=controller \ | |
--timeout=600s | |
# | |
# BEGIN user creation | |
# | |
# Create a limited user that has access to a single namespace. Useful for testing permissions. | |
# | |
USER_CERTS=~/.local/var/kind-certs/${kubectl_ctx} | |
mkdir -p "$USER_CERTS" | |
kubectl --context="${kubectl_ctx}" create namespace user-ns --dry-run=client -o yaml | kubectl --context="${kubectl_ctx}" apply -f - | |
openssl genrsa -out "$USER_CERTS"/user.key 2048 | |
openssl req -new -key "$USER_CERTS"/user.key -out "$USER_CERTS"/user.csr -subj "/CN=user/O=redhat" | |
docker container cp "${cluster_name}-control-plane":/etc/kubernetes/pki/ca.crt "$USER_CERTS"/k8s_ca.crt | |
docker container cp "${cluster_name}-control-plane":/etc/kubernetes/pki/ca.key "$USER_CERTS"/k8s_ca.key | |
openssl x509 -req -in "$USER_CERTS"/user.csr -CA "$USER_CERTS"/k8s_ca.crt -CAkey "$USER_CERTS"/k8s_ca.key -CAcreateserial -out "$USER_CERTS"/user.crt -days 500 | |
# Remove previous entries from ~/.kube/config | |
cp -vf ~/.kube/config ~/.kube/config.bak | |
yq eval 'del( .contexts[] | select(.name == "'${cluster_name}-user-context'"))' ~/.kube/config > ~/.kube/config.tmp | |
mv -f ~/.kube/config.tmp ~/.kube/config | |
yq eval 'del( .users[] | select(.name == "user"))' ~/.kube/config > ~/.kube/config.tmp | |
mv -f ~/.kube/config.tmp ~/.kube/config | |
chmod 600 ~/.kube/config | |
kubectl config set-credentials user --client-certificate="$USER_CERTS"/user.crt --client-key="$USER_CERTS"/user.key | |
kubectl config set-context "kind-${cluster_name}-user-context" --cluster="kind-$cluster_name" --namespace=user-ns --user=user | |
# kubectl --context="kind-${cluster_name}-user-context" get pods || true | |
# Create the role for managing deployments | |
cat <<EOF | kubectl --context="${kubectl_ctx}" apply -f - | |
kind: Role | |
apiVersion: rbac.authorization.k8s.io/v1 | |
metadata: | |
namespace: user-ns | |
name: user-role | |
rules: | |
- apiGroups: ["", "extensions", "apps"] | |
resources: ["deployments", "replicasets", "pods", "services"] | |
verbs: ["*"] | |
# verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] | |
EOF | |
# Bind the role to the user | |
cat <<EOF | kubectl --context="${kubectl_ctx}" apply -f - | |
kind: RoleBinding | |
apiVersion: rbac.authorization.k8s.io/v1 | |
metadata: | |
name: user-role-binding | |
namespace: user-ns | |
subjects: | |
- kind: User | |
name: user | |
apiGroup: "" | |
roleRef: | |
kind: Role | |
name: user-role | |
apiGroup: "" | |
EOF | |
echo "Context 'kind-${cluster_name}-user-context' created with user that has access to a limited namespace: user-ns" | |
# END User creation | |
# | |
# Services in a single namespace | |
# | |
SERVICES_NS="shared-services" | |
kubectl --context="${kubectl_ctx}" create namespace "$SERVICES_NS" --dry-run=client -o yaml | kubectl --context="${kubectl_ctx}" apply -f - | |
## Devfile Registry | |
REGISTRY_SUPPORT_LOCAL_REPO_PARENT="$HOME/.local/var/devfile" | |
REGISTRY_SUPPORT_LOCAL_REPO="$REGISTRY_SUPPORT_LOCAL_REPO_PARENT/registry-support" | |
mkdir -p "$REGISTRY_SUPPORT_LOCAL_REPO_PARENT" | |
git clone --depth=1 https://github.com/devfile/registry-support "$REGISTRY_SUPPORT_LOCAL_REPO" 2>/dev/null || \ | |
git -C "$REGISTRY_SUPPORT_LOCAL_REPO" pull | |
# currentNs=$(kubectl --context="${kubectl_ctx}" config view --minify -o jsonpath='{..namespace}') | |
ingressDomain="${SERVICES_NS}.${kubectl_ctx}.$(docker network inspect bridge --format='{{(index .IPAM.Config 0).Gateway}}').sslip.io" | |
# The registry operator will validate that any registries listed in the {Cluster,}DevfileRegistriesList are reachable, | |
# so we need to wait a little bit until $ingressDomain is reachable. | |
# devfile-registry prefix is added by the operator. | |
inClusterRegistryUrl="http://my-devfile-registry.$ingressDomain" | |
helm --kube-context="${kubectl_ctx}" upgrade -n "$SERVICES_NS" --create-namespace --install my-devfile-registry \ | |
--wait --timeout 5m \ | |
"$REGISTRY_SUPPORT_LOCAL_REPO/deploy/chart/devfile-registry" \ | |
--set global.ingress.domain="$ingressDomain" \ | |
--set global.ingress.class="nginx" \ | |
--set hostnameOverride="my-devfile-registry" | |
curl -kL "$inClusterRegistryUrl" --retry 60 --retry-all-errors --retry-max-time 600 --retry-delay 10 --fail > /dev/null | |
# Errors out on K8s 1.27 | |
# Logs: | |
#+ kubectl --context=kind-local-k8s-cluster -n shared-services apply -k https://github.com/devfile/registry-operator/config/crd | |
## Warning: 'patchesStrategicMerge' is deprecated. Please use 'patches' instead. Run 'kustomize edit fix' to update your Kustomization automatically. | |
## customresourcedefinition.apiextensions.k8s.io/devfileregistries.registry.devfile.io created | |
## unable to decode "https://github.com/devfile/registry-operator/config/crd": parsing time "null" as "2006-01-02T15:04:05Z07:00": cannot parse "null" as "2006" | |
## unable to decode "https://github.com/devfile/registry-operator/config/crd": parsing time "null" as "2006-01-02T15:04:05Z07:00": cannot parse "null" as "2006" | |
kubectl --context="${kubectl_ctx}" -n "$SERVICES_NS" apply -k https://github.com/devfile/registry-operator/config/crd || for r in devfileregistries clusterdevfileregistrieslists devfileregistrieslists; do kubectl --context="${kubectl_ctx}" -n "$SERVICES_NS" apply -f "https://github.com/devfile/registry-operator/raw/main/config/crd/bases/registry.devfile.io_${r}.yaml" ; done | |
cat <<EOF | kubectl --context="${kubectl_ctx}" apply -f - | |
apiVersion: registry.devfile.io/v1alpha1 | |
kind: DevfileRegistriesList | |
metadata: | |
name: ns-devfile-registries | |
spec: | |
devfileRegistries: | |
- name: ns-devfile-registry | |
url: $inClusterRegistryUrl | |
skipTLSVerify: true | |
- name: ns-devfile-staging | |
url: 'https://registry.stage.devfile.io' | |
EOF | |
cat <<EOF | kubectl --context="${kubectl_ctx}" apply -f - | |
apiVersion: registry.devfile.io/v1alpha1 | |
kind: ClusterDevfileRegistriesList | |
metadata: | |
name: cluster-devfile-registries | |
spec: | |
devfileRegistries: | |
- name: cluster-devfile-registry | |
url: $inClusterRegistryUrl | |
skipTLSVerify: true | |
- name: cluster-devfile-staging | |
url: 'https://registry.stage.devfile.io' | |
- name: cluster-devfile-prod | |
url: 'https://registry.devfile.io' | |
skipTLSVerify: false | |
EOF | |
# if command -v operator-sdk > /dev/null; then | |
# ## In-Cluster Devfile registry | |
# if ! kubectl --context="${kubectl_ctx}" get DevfileRegistry > /dev/null 2>&1; then | |
# echo "Installing Devfile registry in the cluster.." | |
# operator-sdk run bundle quay.io/devfile/registry-operator-bundle:next | |
# fi | |
# currentNs=$(kubectl --context="${kubectl_ctx}" config view --minify -o jsonpath='{..namespace}') | |
# ingressDomain="${kubectl_ctx}.${currentNs:-default}.$(docker network inspect bridge --format='{{(index .IPAM.Config 0).Gateway}}').nip.io" | |
# cat <<EOF | kubectl --context="${kubectl_ctx}" -n "$SERVICES_NS" apply -f - | |
# apiVersion: registry.devfile.io/v1alpha1 | |
# kind: DevfileRegistry | |
# metadata: | |
# name: devfile-registry | |
# spec: | |
# devfileIndexImage: quay.io/devfile/devfile-index:next | |
# tls: | |
# enabled: false | |
# k8s: | |
# ingressDomain: $ingressDomain | |
# telemetry: | |
# registryName: test | |
# key: $TELEMETRY_KEY | |
# # registryViewerWriteKey: $TELEMETRY_KEY | |
# EOF | |
# # The registry operator will validate that any registries listed in the {Cluster,}DevfileRegistriesList are reachable, | |
# # so we need to wait a little bit until $ingressDomain is reachable. | |
# # devfile-registry prefix is added by the operator. | |
# inClusterRegistryUrl="http://devfile-registry.$ingressDomain" | |
# curl -kL "$inClusterRegistryUrl" --retry 60 --retry-all-errors --retry-max-time 600 --retry-delay 10 --fail > /dev/null | |
# cat <<EOF | kubectl --context="${kubectl_ctx}" apply -f - | |
# apiVersion: registry.devfile.io/v1alpha1 | |
# kind: DevfileRegistriesList | |
# metadata: | |
# name: ns-devfile-registries | |
# spec: | |
# devfileRegistries: | |
# - name: ns-devfile-registry | |
# url: $inClusterRegistryUrl | |
# skipTLSVerify: true | |
# - name: ns-devfile-staging | |
# url: 'https://registry.stage.devfile.io' | |
# EOF | |
# cat <<EOF | kubectl --context="${kubectl_ctx}" apply -f - | |
# apiVersion: registry.devfile.io/v1alpha1 | |
# kind: ClusterDevfileRegistriesList | |
# metadata: | |
# name: cluster-devfile-registries | |
# spec: | |
# devfileRegistries: | |
# - name: cluster-devfile-registry | |
# url: $inClusterRegistryUrl | |
# skipTLSVerify: true | |
# - name: cluster-devfile-staging | |
# url: 'https://registry.stage.devfile.io' | |
# - name: cluster-devfile-prod | |
# url: 'https://registry.devfile.io' | |
# skipTLSVerify: false | |
# EOF | |
# else | |
# echo "[warn] operator-sdk CLI not found" | |
# fi | |
# Operator Lifecycle Manager (OLM) | |
olm_version="0.24.0" | |
if ! kubectl --context="${kubectl_ctx}" get deployment olm-operator -n olm > /dev/null 2>&1; then | |
curl -sL "https://github.com/operator-framework/operator-lifecycle-manager/releases/download/v$olm_version/install.sh" | bash -s "v$olm_version" | |
echo -n "Wait for pod app.kubernetes.io/component=controller to be created." | |
while : ; do | |
[ ! -z "`kubectl --context="${kubectl_ctx}" -n olm get pod --selector=app=olm-operator 2> /dev/null`" ] && echo && break | |
sleep 2 | |
echo -n "." | |
done | |
echo -n "Waiting for OLM Operator pod to be ready (timeout in 600s)..." | |
kubectl --context="${kubectl_ctx}" wait --namespace olm \ | |
--for=condition=ready pod \ | |
--selector=app=olm-operator \ | |
--timeout=600s | |
fi | |
# # Manual install from install.sh script, to support running the same operations twice | |
# olm_url="https://github.com/operator-framework/operator-lifecycle-manager/releases/download/v$olm_version" | |
# olm_ns=olm | |
# kubectl --context="${kubectl_ctx}" replace -f "${olm_url}/crds.yaml" || | |
# ( kubectl --context="${kubectl_ctx}" delete -f "${olm_url}/crds.yaml" || true; kubectl --context="${kubectl_ctx}" create -f "${olm_url}/crds.yaml") | |
# kubectl --context="${kubectl_ctx}" wait --for=condition=Established -f "${olm_url}/crds.yaml" | |
# kubectl --context="${kubectl_ctx}" apply -f "${olm_url}/olm.yaml" | |
# kubectl --context="${kubectl_ctx}" rollout status -w deployment/olm-operator --namespace="${olm_ns}" | |
# kubectl --context="${kubectl_ctx}" rollout status -w deployment/catalog-operator --namespace="${olm_ns}" | |
# retries=30 | |
# until [[ $retries == 0 ]]; do | |
# new_csv_phase=$(kubectl --context="${kubectl_ctx}" get csv -n "${olm_ns}" packageserver -o jsonpath='{.status.phase}' 2>/dev/null || echo "Waiting for CSV to appear") | |
# if [[ $new_csv_phase != "$csv_phase" ]]; then | |
# csv_phase=$new_csv_phase | |
# echo "Package server phase: $csv_phase" | |
# fi | |
# if [[ "$new_csv_phase" == "Succeeded" ]]; then | |
# break | |
# fi | |
# sleep 10 | |
# retries=$((retries - 1)) | |
# done | |
# if [ $retries == 0 ]; then | |
# echo "CSV \"packageserver\" failed to reach phase succeeded" | |
# exit 1 | |
# fi | |
# kubectl --context="${kubectl_ctx}" rollout status -w deployment/packageserver --namespace="${olm_ns}" | |
# sleep 15 | |
# # | |
# # Service Binding Operator (SBO), for odo | |
# # | |
# Service Binding Operator (SBO), for ODO | |
kubectl --context="${kubectl_ctx}" apply -f https://operatorhub.io/install/service-binding-operator.yaml | |
echo -n "Wait for SBO Pod to be created." | |
while : ; do | |
[ ! -z "`kubectl --context="${kubectl_ctx}" -n operators get pod --selector=control-plane=service-binding-controller-manager 2> /dev/null`" ] && echo && break | |
sleep 2 | |
echo -n "." | |
done | |
echo -n "Waiting for SBO Pod pod to be ready (timeout in 600s)..." | |
kubectl --context="${kubectl_ctx}" wait --namespace operators \ | |
--for=condition=ready pod \ | |
--selector=control-plane=service-binding-controller-manager \ | |
--timeout=600s | |
# Helm install of SBO | |
# # # Need to install CRDs from OLM | |
# # olm_version="0.22.0" | |
# # kubectl --context="${kubectl_ctx}" apply -f https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/v$olm_version/deploy/upstream/quickstart/crds.yaml | |
# # cert-manager is a requirement if OLM is not installed | |
# cert_manager_version="1.10.0" | |
# # kubectl --context="${kubectl_ctx}" apply -f https://github.com/cert-manager/cert-manager/releases/download/v$cert_manager_version/cert-manager.crds.yaml | |
# helm repo add jetstack https://charts.jetstack.io | |
# helm upgrade --install cert-manager jetstack/cert-manager \ | |
# --kube-context="${kubectl_ctx}" \ | |
# --namespace cert-manager \ | |
# --create-namespace \ | |
# --version v$cert_manager_version \ | |
# --set installCRDs=true | |
# helm repo add service-binding-operator-helm-chart https://redhat-developer.github.io/service-binding-operator-helm-chart/ | |
# helm upgrade --install service-binding-operator-release \ | |
# --kube-context="${kubectl_ctx}" \ | |
# --namespace service-binding-operator \ | |
# --create-namespace \ | |
# service-binding-operator-helm-chart/service-binding-operator \ | |
# --version 1.3.0 | |
## Operators | |
#for op in postgresql cloud-native-postgresql percona-server-mongodb-operator redis-operator; do | |
kubectl --context="${kubectl_ctx}" apply -f https://operatorhub.io/install/stable-v1.19/cloud-native-postgresql.yaml | |
kubectl --context="${kubectl_ctx}" apply -f https://operatorhub.io/install/redis-operator.yaml | |
#for op in cloud-native-postgresql redis-operator; do | |
# kubectl --context="${kubectl_ctx}" apply -f "https://operatorhub.io/install/${op}.yaml" | |
# done | |
echo -n "Wait for Cloud Native PosgreSQL Controller Manager to be created." | |
while : ; do | |
[ ! -z "`kubectl --context="${kubectl_ctx}" -n operators get pod --selector=app.kubernetes.io/name=cloud-native-postgresql 2> /dev/null`" ] && echo && break | |
sleep 2 | |
echo -n "." | |
done | |
echo -n "Waiting for Cloud Native PosgreSQL Controller Manager pod to be ready (timeout in 600s)..." | |
kubectl --context="${kubectl_ctx}" wait --namespace operators \ | |
--for=condition=ready pod \ | |
--selector=app.kubernetes.io/name=cloud-native-postgresql \ | |
--timeout=600s | |
echo -n "Wait for Redis Controller Manager to be created." | |
while : ; do | |
[ ! -z "`kubectl --context="${kubectl_ctx}" -n operators get pod --selector=control-plane=redis-operator 2> /dev/null`" ] && echo && break | |
sleep 2 | |
echo -n "." | |
done | |
echo -n "Waiting for Redis Controller Manager Pod pod to be ready (timeout in 600s)..." | |
kubectl --context="${kubectl_ctx}" wait --namespace operators \ | |
--for=condition=ready pod \ | |
--selector=control-plane=redis-operator \ | |
--timeout=600s | |
# PostgreSQL Cloud Native | |
# kubectl --context="${kubectl_ctx}" -n "$SERVICES_NS" apply -f https://operatorhub.io/install/cloud-native-postgresql.yaml | |
# sleep 90 | |
cat << EOF | kubectl --context="${kubectl_ctx}" -n "$SERVICES_NS" apply -f - | |
apiVersion: postgresql.k8s.enterprisedb.io/v1 | |
kind: Cluster | |
metadata: | |
name: my-postgresql | |
spec: | |
instances: 1 | |
logLevel: info | |
primaryUpdateStrategy: unsupervised | |
storage: | |
size: 1Gi | |
EOF | |
# # Crunchy Postgresql | |
# if false; then | |
# cat << EOF | kubectl --context="${kubectl_ctx}" -n "$SERVICES_NS" apply -f - | |
# apiVersion: postgres-operator.crunchydata.com/v1beta1 | |
# kind: PostgresCluster | |
# metadata: | |
# name: crunchy-postgresql | |
# spec: | |
# image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-14.5-1 | |
# postgresVersion: 14 | |
# instances: | |
# - name: instance1 | |
# replicas: 1 | |
# resources: | |
# limits: | |
# cpu: 2.0 | |
# memory: 4Gi | |
# dataVolumeClaimSpec: | |
# accessModes: | |
# - "ReadWriteOnce" | |
# resources: | |
# requests: | |
# storage: 1Gi | |
# backups: | |
# pgbackrest: | |
# image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.40-1 | |
# repos: | |
# - name: repo1 | |
# volume: | |
# volumeClaimSpec: | |
# accessModes: | |
# - "ReadWriteOnce" | |
# resources: | |
# requests: | |
# storage: 1Gi | |
# patroni: | |
# dynamicConfiguration: | |
# postgresql: | |
# parameters: | |
# max_parallel_workers: 2 | |
# max_worker_processes: 2 | |
# shared_buffers: 1GB | |
# work_mem: 2MB | |
# EOF | |
# # Percona MongoDB Operator | |
# # kubectl --context="${kubectl_ctx}" -n "$SERVICES_NS" apply -f https://operatorhub.io/install/percona-server-mongodb-operator.yaml | |
# # sleep 90 | |
# cat <<EOF | kubectl --context="${kubectl_ctx}" -n "my-percona-server-mongodb-operator" apply -f - | |
# apiVersion: v1 | |
# kind: Secret | |
# metadata: | |
# name: percona-mongodb-secrets | |
# type: Opaque | |
# data: | |
# MONGODB_BACKUP_USER: YmFja3Vw | |
# MONGODB_BACKUP_PASSWORD: YmFja3VwMTIzNDU2 | |
# MONGODB_CLUSTER_ADMIN_USER: Y2x1c3RlckFkbWlu | |
# MONGODB_CLUSTER_ADMIN_PASSWORD: Y2x1c3RlckFkbWluMTIzNDU2 | |
# MONGODB_CLUSTER_MONITOR_USER: Y2x1c3Rlck1vbml0b3I= | |
# MONGODB_CLUSTER_MONITOR_PASSWORD: Y2x1c3Rlck1vbml0b3IxMjM0NTY= | |
# MONGODB_USER_ADMIN_USER: dXNlckFkbWlu | |
# MONGODB_USER_ADMIN_PASSWORD: dXNlckFkbWluMTIzNDU2 | |
# PMM_SERVER_API_KEY: dXNlckFkbWluMTIzNDU2 | |
# --- | |
# apiVersion: psmdb.percona.com/v1 | |
# kind: PerconaServerMongoDB | |
# metadata: | |
# name: percona-mongodb | |
# finalizers: | |
# - delete-psmdb-pods-in-order | |
# spec: | |
# crVersion: 1.13.0 | |
# image: 'percona/percona-server-mongodb:5.0.11-10' | |
# imagePullPolicy: Always | |
# allowUnsafeConfigurations: false | |
# updateStrategy: SmartUpdate | |
# multiCluster: | |
# enabled: false | |
# DNSSuffix: svc.clusterset.local | |
# upgradeOptions: | |
# versionServiceEndpoint: 'https://check.percona.com' | |
# apply: disabled | |
# schedule: 0 2 * * * | |
# setFCV: false | |
# secrets: | |
# users: percona-mongodb-secrets | |
# encryptionKey: percona-mongodb-mongodb-encryption-key | |
# pmm: | |
# enabled: false | |
# image: 'percona/pmm-client:2.30.0' | |
# serverHost: monitoring-service | |
# replsets: | |
# - name: rs0 | |
# size: 1 | |
# affinity: | |
# antiAffinityTopologyKey: kubernetes.io/hostname | |
# podDisruptionBudget: | |
# maxUnavailable: 1 | |
# expose: | |
# enabled: false | |
# exposeType: ClusterIP | |
# resources: | |
# limits: | |
# cpu: 300m | |
# memory: 0.5G | |
# requests: | |
# cpu: 300m | |
# memory: 0.5G | |
# volumeSpec: | |
# persistentVolumeClaim: | |
# resources: | |
# requests: | |
# storage: 3Gi | |
# nonvoting: | |
# enabled: false | |
# size: 1 | |
# affinity: | |
# antiAffinityTopologyKey: kubernetes.io/hostname | |
# podDisruptionBudget: | |
# maxUnavailable: 1 | |
# resources: | |
# limits: | |
# cpu: 300m | |
# memory: 0.5G | |
# requests: | |
# cpu: 300m | |
# memory: 0.5G | |
# volumeSpec: | |
# persistentVolumeClaim: | |
# resources: | |
# requests: | |
# storage: 3Gi | |
# arbiter: | |
# enabled: false | |
# size: 1 | |
# affinity: | |
# antiAffinityTopologyKey: kubernetes.io/hostname | |
# sharding: | |
# enabled: false | |
# configsvrReplSet: | |
# size: 1 | |
# affinity: | |
# antiAffinityTopologyKey: kubernetes.io/hostname | |
# podDisruptionBudget: | |
# maxUnavailable: 1 | |
# expose: | |
# enabled: false | |
# exposeType: ClusterIP | |
# resources: | |
# limits: | |
# cpu: 300m | |
# memory: 0.5G | |
# requests: | |
# cpu: 300m | |
# memory: 0.5G | |
# volumeSpec: | |
# persistentVolumeClaim: | |
# resources: | |
# requests: | |
# storage: 3Gi | |
# mongos: | |
# size: 1 | |
# affinity: | |
# antiAffinityTopologyKey: kubernetes.io/hostname | |
# podDisruptionBudget: | |
# maxUnavailable: 1 | |
# resources: | |
# limits: | |
# cpu: 300m | |
# memory: 0.5G | |
# requests: | |
# cpu: 300m | |
# memory: 0.5G | |
# expose: | |
# exposeType: ClusterIP | |
# backup: | |
# enabled: true | |
# image: 'percona/percona-backup-mongodb:1.8.1' | |
# serviceAccountName: percona-server-mongodb-operator | |
# pitr: | |
# enabled: false | |
# compressionType: gzip | |
# compressionLevel: 6 | |
# EOF | |
#kubectl --context="${kubectl_ctx}" -n "$SERVICES_NS" apply -f https://operatorhub.io/install/percona-server-mongodb-operator.yaml | |
#sleep 40 | |
#cat << EOF | kubectl --context="${kubectl_ctx}" -n "$SERVICES_NS" apply -f - | |
#EOF | |
#fi | |
# Redis Operator | |
# kubectl --context="${kubectl_ctx}" -n "$SERVICES_NS" apply -f https://operatorhub.io/install/redis-operator.yaml | |
# sleep 90 | |
cat << EOF | kubectl --context="${kubectl_ctx}" -n "$SERVICES_NS" apply -f - | |
apiVersion: redis.redis.opstreelabs.in/v1beta1 | |
kind: Redis | |
metadata: | |
name: my-redis | |
spec: | |
kubernetesConfig: | |
image: 'quay.io/opstree/redis:v7.0.5' | |
imagePullPolicy: IfNotPresent | |
storage: | |
volumeClaimTemplate: | |
spec: | |
accessModes: | |
- ReadWriteOnce | |
resources: | |
requests: | |
storage: 1Gi | |
redisExporter: | |
enabled: true | |
image: 'quay.io/opstree/redis-exporter:v1.44.0' | |
imagePullPolicy: IfNotPresent | |
securityContext: | |
runAsUser: 1000 | |
fsGroup: 1000 | |
EOF | |
# MariaDB operator | |
# kubectl --context="${kubectl_ctx}" apply -f https://operatorhub.io/install/mariadb-operator-app.yaml | |
# sleep 90 | |
# cat << EOF | kubectl --context="${kubectl_ctx}" -n "my-mariadb-operator-app" apply -f - | |
# apiVersion: mariadb.persistentsys/v1alpha1 | |
# kind: MariaDB | |
# metadata: | |
# name: mariadb | |
# spec: | |
# database: test-db | |
# username: db-user | |
# password: db-user | |
# rootpwd: password | |
# size: 1 | |
# image: 'mariadb/server:10.3' | |
# dataStoragePath: /mnt/data | |
# dataStorageSize: 1Gi | |
# EOF | |
# Knative Operator(might be useful in some cases) | |
# kubectl --context="${kubectl_ctx}" -n "$SERVICES_NS" apply -f https://operatorhub.io/install/knative-operator.yaml | |
# sleep 90 | |
# # for ns in eventing serving; do | |
# # kubectl --context="${kubectl_ctx}" create namespace "knative-$ns" --dry-run=client -o yaml | kubectl --context="${kubectl_ctx}" apply -f - | |
# # done | |
# cat << EOF | kubectl --context="${kubectl_ctx}" -n "$SERVICES_NS" apply -f - | |
# apiVersion: operator.knative.dev/v1beta1 | |
# kind: KnativeEventing | |
# metadata: | |
# name: knative | |
# # namespace: knative-eventing | |
# --- | |
# apiVersion: operator.knative.dev/v1beta1 | |
# kind: KnativeServing | |
# metadata: | |
# name: knative-serving | |
# # namespace: knative-serving | |
# EOF | |
#kubectl apply -f https://operatorhub.io/install/knative-operator.yaml | |
#kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.3.0/serving-crds.yaml | |
#kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.3.0/serving-core.yaml | |
#kubectl apply -f https://github.com/knative/net-kourier/releases/download/knative-v1.3.0/kourier.yaml | |
#kubectl patch configmap/config-network \ | |
# --namespace knative-serving \ | |
# --type merge \ | |
# --patch '{"data":{"ingress.class":"kourier.ingress.networking.knative.dev"}}' | |
##kubectl --namespace kourier-system get service kourier | |
#kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.3.0/serving-default-domain.yaml | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment