Created
August 29, 2023 11:40
-
-
Save rm3l/f7a178d1bf51e825fb77e11dfa7bfded to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/sh | |
set -o errexit | |
set -x | |
cluster_name=${1:-local-k8s-cluster} | |
registry_port=${2:-35000} | |
# create registry container unless it already exists | |
registry_name='local-registry.localhost' | |
if ! k3d registry list | grep -q "$registry_name"; then | |
k3d registry create "${registry_name}" --port "$registry_port" | |
fi | |
KUBECONFIG="$HOME/.kube/config" | |
if k3d cluster list | grep -q "$cluster_name"; then | |
k3d cluster delete "${cluster_name}" | |
fi | |
k3s_image_arg="" | |
if command -v skopeo &> /dev/null; then | |
k3s_image=$(skopeo --override-os linux list-tags docker://rancher/k3s | jq --raw-output '.Tags[]' | grep -E '^v.*-k3s1-amd64$' | tail -n1) | |
k3s_image_arg="--image=docker.io/rancher/k3s:$k3s_image" | |
fi | |
k3d cluster create "${cluster_name}" \ | |
--registry-use "k3d-${registry_name}:$registry_port" \ | |
$k3s_image_arg \ | |
--k3s-arg '--disable=traefik@server:*' \ | |
--k3s-node-label 'ingress-ready=true@agent:*' \ | |
--agents 2 \ | |
--api-port 0.0.0.0:36443 \ | |
--servers 1 \ | |
-p "80:80" \ | |
-p "443:443" | |
# --k3s-arg '--flannel-backend=none@server:*' \ | |
# --k3s-arg '--disable-network-policy@server:*' \ | |
kubectl_ctx="k3d-${cluster_name}" | |
# Communicate the local registry to external local tools | |
# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry | |
cat <<EOF | kubectl --context="${kubectl_ctx}" apply -f - | |
apiVersion: v1 | |
kind: ConfigMap | |
metadata: | |
name: local-registry-hosting | |
namespace: kube-public | |
data: | |
localRegistryHosting.v1: | | |
host: "k3d-${registry_name}:$registry_port" | |
help: "https://kind.sigs.k8s.io/docs/user/local-registry/" | |
EOF | |
# Update Helm repos in case they are already present | |
helm repo update | |
# ## Install Calico | |
# helm repo add projectcalico https://docs.tigera.io/calico/charts | |
# kubectl --context="${kubectl_ctx}" create namespace tigera-operator --dry-run=client -o yaml | kubectl --context="${kubectl_ctx}" apply -f - | |
# helm --kube-context="${kubectl_ctx}" \ | |
# upgrade --install calico \ | |
# projectcalico/tigera-operator \ | |
# --version v3.26.1 \ | |
# --namespace tigera-operator | |
# cf. https://kind.sigs.k8s.io/docs/user/ingress/#ingress-nginx | |
ingress_nginx_controller_tag="main" | |
kubectl --context="${kubectl_ctx}" \ | |
apply -f \ | |
"https://raw.githubusercontent.com/kubernetes/ingress-nginx/${ingress_nginx_controller_tag}/deploy/static/provider/kind/deploy.yaml" | |
### Wait ingress nginx pod creation | |
echo -n "Wait for pod app.kubernetes.io/component=controller to be created." | |
while : ; do | |
[ ! -z "`kubectl --context="${kubectl_ctx}" -n ingress-nginx get pod --selector=app.kubernetes.io/component=controller 2> /dev/null`" ] && echo && break | |
sleep 2 | |
echo -n "." | |
done | |
echo -n "Waiting for NGINX Ingress Controller pod to be ready (timeout in 600s)..." | |
kubectl --context="${kubectl_ctx}" wait --namespace ingress-nginx \ | |
--for=condition=ready pod \ | |
--selector=app.kubernetes.io/component=controller \ | |
--timeout=600s | |
# | |
# BEGIN user creation | |
# | |
# Create a limited user that has access to a single namespace. Useful for testing permissions. | |
# | |
USER_CERTS=~/.local/var/k3d-certs/${kubectl_ctx} | |
mkdir -p "$USER_CERTS" | |
kubectl --context="${kubectl_ctx}" create namespace user-ns --dry-run=client -o yaml | kubectl --context="${kubectl_ctx}" apply -f - | |
openssl genrsa -out "$USER_CERTS"/user.key 2048 | |
openssl req -new -key "$USER_CERTS"/user.key -out "$USER_CERTS"/user.csr -subj "/CN=user/O=redhat" | |
docker container cp "k3d-${cluster_name}-server-0":/var/lib/rancher/k3s/server/tls/server-ca.crt "$USER_CERTS"/k8s_ca.crt | |
docker container cp "k3d-${cluster_name}-server-0":/var/lib/rancher/k3s/server/tls/server-ca.key "$USER_CERTS"/k8s_ca.key | |
openssl x509 -req -in "$USER_CERTS"/user.csr -CA "$USER_CERTS"/k8s_ca.crt -CAkey "$USER_CERTS"/k8s_ca.key -CAcreateserial -out "$USER_CERTS"/user.crt -days 500 | |
# Remove previous entries from ~/.kube/config | |
cp -vf ~/.kube/config ~/.kube/config.bak | |
yq eval 'del( .contexts[] | select(.name == "'${cluster_name}-user-context'"))' ~/.kube/config > ~/.kube/config.tmp | |
mv -f ~/.kube/config.tmp ~/.kube/config | |
yq eval 'del( .users[] | select(.name == "user"))' ~/.kube/config > ~/.kube/config.tmp | |
mv -f ~/.kube/config.tmp ~/.kube/config | |
chmod 600 ~/.kube/config | |
kubectl config set-credentials user --client-certificate="$USER_CERTS"/user.crt --client-key="$USER_CERTS"/user.key | |
kubectl config set-context "k3d-${cluster_name}-user-context" --cluster="k3d-$cluster_name" --namespace=user-ns --user=user | |
# kubectl --context="k3d-${cluster_name}-user-context" get pods || true | |
# Create the role for managing deployments | |
cat <<EOF | kubectl --context="${kubectl_ctx}" apply -f - | |
kind: Role | |
apiVersion: rbac.authorization.k8s.io/v1 | |
metadata: | |
namespace: user-ns | |
name: user-role | |
rules: | |
- apiGroups: ["", "extensions", "apps"] | |
resources: ["deployments", "replicasets", "pods", "services"] | |
verbs: ["*"] | |
# verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] | |
EOF | |
# Bind the role to the user | |
cat <<EOF | kubectl --context="${kubectl_ctx}" apply -f - | |
kind: RoleBinding | |
apiVersion: rbac.authorization.k8s.io/v1 | |
metadata: | |
name: user-role-binding | |
namespace: user-ns | |
subjects: | |
- kind: User | |
name: user | |
apiGroup: "" | |
roleRef: | |
kind: Role | |
name: user-role | |
apiGroup: "" | |
EOF | |
echo "Context 'k3d-${cluster_name}-user-context' created with user that has access to a limited namespace: user-ns" | |
# END User creation | |
# | |
# Services in a single namespace | |
# | |
SERVICES_NS="shared-services" | |
kubectl --context="${kubectl_ctx}" create namespace "$SERVICES_NS" --dry-run=client -o yaml | kubectl --context="${kubectl_ctx}" apply -f - | |
## Devfile Registry | |
REGISTRY_SUPPORT_LOCAL_REPO_PARENT="$HOME/.local/var/devfile" | |
REGISTRY_SUPPORT_LOCAL_REPO="$REGISTRY_SUPPORT_LOCAL_REPO_PARENT/registry-support" | |
mkdir -p "$REGISTRY_SUPPORT_LOCAL_REPO_PARENT" | |
git clone --depth=1 https://github.com/devfile/registry-support "$REGISTRY_SUPPORT_LOCAL_REPO" 2>/dev/null || \ | |
git -C "$REGISTRY_SUPPORT_LOCAL_REPO" pull | |
# currentNs=$(kubectl --context="${kubectl_ctx}" config view --minify -o jsonpath='{..namespace}') | |
ingressDomain="${SERVICES_NS}.${kubectl_ctx}.$(docker network inspect bridge --format='{{(index .IPAM.Config 0).Gateway}}').sslip.io" | |
# The registry operator will validate that any registries listed in the {Cluster,}DevfileRegistriesList are reachable, | |
# so we need to wait a little bit until $ingressDomain is reachable. | |
# devfile-registry prefix is added by the operator. | |
inClusterRegistryUrl="http://my-devfile-registry.$ingressDomain" | |
helm --kube-context="${kubectl_ctx}" upgrade -n "$SERVICES_NS" --create-namespace --install my-devfile-registry \ | |
--wait --timeout 5m \ | |
"$REGISTRY_SUPPORT_LOCAL_REPO/deploy/chart/devfile-registry" \ | |
--set global.ingress.domain="$ingressDomain" \ | |
--set global.ingress.class="nginx" \ | |
--set hostnameOverride="my-devfile-registry" | |
curl -kL "$inClusterRegistryUrl" --retry 60 --retry-all-errors --retry-max-time 600 --retry-delay 10 --fail > /dev/null | |
# Errors out on K8s 1.27 | |
# Logs: | |
#+ kubectl --context=k3d-local-k8s-cluster -n shared-services apply -k https://github.com/devfile/registry-operator/config/crd | |
## Warning: 'patchesStrategicMerge' is deprecated. Please use 'patches' instead. Run 'kustomize edit fix' to update your Kustomization automatically. | |
## customresourcedefinition.apiextensions.k8s.io/devfileregistries.registry.devfile.io created | |
## unable to decode "https://github.com/devfile/registry-operator/config/crd": parsing time "null" as "2006-01-02T15:04:05Z07:00": cannot parse "null" as "2006" | |
## unable to decode "https://github.com/devfile/registry-operator/config/crd": parsing time "null" as "2006-01-02T15:04:05Z07:00": cannot parse "null" as "2006" | |
kubectl --context="${kubectl_ctx}" -n "$SERVICES_NS" apply -k https://github.com/devfile/registry-operator/config/crd || for r in devfileregistries clusterdevfileregistrieslists devfileregistrieslists; do kubectl --context="${kubectl_ctx}" -n "$SERVICES_NS" apply -f "https://github.com/devfile/registry-operator/raw/main/config/crd/bases/registry.devfile.io_${r}.yaml" ; done | |
cat <<EOF | kubectl --context="${kubectl_ctx}" apply -f - | |
apiVersion: registry.devfile.io/v1alpha1 | |
kind: DevfileRegistriesList | |
metadata: | |
name: ns-devfile-registries | |
spec: | |
devfileRegistries: | |
- name: ns-devfile-registry | |
url: $inClusterRegistryUrl | |
skipTLSVerify: true | |
- name: ns-devfile-staging | |
url: 'https://registry.stage.devfile.io' | |
EOF | |
cat <<EOF | kubectl --context="${kubectl_ctx}" apply -f - | |
apiVersion: registry.devfile.io/v1alpha1 | |
kind: ClusterDevfileRegistriesList | |
metadata: | |
name: cluster-devfile-registries | |
spec: | |
devfileRegistries: | |
- name: cluster-devfile-registry | |
url: $inClusterRegistryUrl | |
skipTLSVerify: true | |
- name: cluster-devfile-staging | |
url: 'https://registry.stage.devfile.io' | |
- name: cluster-devfile-prod | |
url: 'https://registry.devfile.io' | |
skipTLSVerify: false | |
EOF | |
# Operator Lifecycle Manager (OLM) | |
olm_version="0.25.0" | |
if ! kubectl --context="${kubectl_ctx}" get deployment olm-operator -n olm > /dev/null 2>&1; then | |
curl -sL "https://github.com/operator-framework/operator-lifecycle-manager/releases/download/v$olm_version/install.sh" | bash -s "v$olm_version" | |
echo -n "Wait for pod app.kubernetes.io/component=controller to be created." | |
while : ; do | |
[ ! -z "`kubectl --context="${kubectl_ctx}" -n olm get pod --selector=app=olm-operator 2> /dev/null`" ] && echo && break | |
sleep 2 | |
echo -n "." | |
done | |
echo -n "Waiting for OLM Operator pod to be ready (timeout in 600s)..." | |
kubectl --context="${kubectl_ctx}" wait --namespace olm \ | |
--for=condition=ready pod \ | |
--selector=app=olm-operator \ | |
--timeout=600s | |
fi | |
# # | |
# # Service Binding Operator (SBO), for odo | |
# # | |
# Service Binding Operator (SBO), for ODO | |
kubectl --context="${kubectl_ctx}" apply -f https://operatorhub.io/install/service-binding-operator.yaml | |
echo -n "Wait for SBO Pod to be created." | |
while : ; do | |
[ ! -z "`kubectl --context="${kubectl_ctx}" -n operators get pod --selector=control-plane=service-binding-controller-manager 2> /dev/null`" ] && echo && break | |
sleep 2 | |
echo -n "." | |
done | |
echo -n "Waiting for SBO Pod pod to be ready (timeout in 600s)..." | |
kubectl --context="${kubectl_ctx}" wait --namespace operators \ | |
--for=condition=ready pod \ | |
--selector=control-plane=service-binding-controller-manager \ | |
--timeout=600s | |
## Operators | |
#for op in postgresql cloud-native-postgresql percona-server-mongodb-operator redis-operator; do | |
kubectl --context="${kubectl_ctx}" apply -f https://operatorhub.io/install/stable-v1.19/cloud-native-postgresql.yaml | |
# kubectl --context="${kubectl_ctx}" apply -f https://operatorhub.io/install/redis-operator.yaml | |
echo -n "Wait for Cloud Native PosgreSQL Controller Manager to be created." | |
while : ; do | |
[ ! -z "`kubectl --context="${kubectl_ctx}" -n operators get pod --selector=app.kubernetes.io/name=cloud-native-postgresql 2> /dev/null`" ] && echo && break | |
sleep 2 | |
echo -n "." | |
done | |
echo -n "Waiting for Cloud Native PosgreSQL Controller Manager pod to be ready (timeout in 600s)..." | |
kubectl --context="${kubectl_ctx}" wait --namespace operators \ | |
--for=condition=ready pod \ | |
--selector=app.kubernetes.io/name=cloud-native-postgresql \ | |
--timeout=600s | |
# echo -n "Wait for Redis Controller Manager to be created." | |
# while : ; do | |
# [ ! -z "`kubectl --context="${kubectl_ctx}" -n operators get pod --selector=control-plane=redis-operator 2> /dev/null`" ] && echo && break | |
# sleep 2 | |
# echo -n "." | |
# done | |
# echo -n "Waiting for Redis Controller Manager Pod pod to be ready (timeout in 600s)..." | |
# kubectl --context="${kubectl_ctx}" wait --namespace operators \ | |
# --for=condition=ready pod \ | |
# --selector=control-plane=redis-operator \ | |
# --timeout=600s | |
# PostgreSQL Cloud Native | |
cat << EOF | kubectl --context="${kubectl_ctx}" -n "$SERVICES_NS" apply -f - | |
apiVersion: postgresql.k8s.enterprisedb.io/v1 | |
kind: Cluster | |
metadata: | |
name: my-postgresql | |
spec: | |
imageName: quay.io/enterprisedb/postgresql:latest-multiarch | |
imagePullPolicy: Always | |
instances: 1 | |
logLevel: info | |
primaryUpdateStrategy: unsupervised | |
storage: | |
size: 1Gi | |
EOF | |
# # Redis Operator | |
# # kubectl --context="${kubectl_ctx}" -n "$SERVICES_NS" apply -f https://operatorhub.io/install/redis-operator.yaml | |
# # sleep 90 | |
# cat << EOF | kubectl --context="${kubectl_ctx}" -n "$SERVICES_NS" apply -f - | |
# apiVersion: redis.redis.opstreelabs.in/v1beta1 | |
# kind: Redis | |
# metadata: | |
# name: my-redis | |
# spec: | |
# kubernetesConfig: | |
# image: 'quay.io/opstree/redis:v7.0.5' | |
# imagePullPolicy: IfNotPresent | |
# storage: | |
# volumeClaimTemplate: | |
# spec: | |
# accessModes: | |
# - ReadWriteOnce | |
# resources: | |
# requests: | |
# storage: 1Gi | |
# redisExporter: | |
# enabled: true | |
# image: 'quay.io/opstree/redis-exporter:v1.44.0' | |
# imagePullPolicy: IfNotPresent | |
# securityContext: | |
# runAsUser: 1000 | |
# fsGroup: 1000 | |
# EOF |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment