Skip to content

Instantly share code, notes, and snippets.

@okikio
Created March 22, 2026 00:55
Show Gist options
  • Select an option

  • Save okikio/b23c435c0278acf5bf2dd15ce5ce5c91 to your computer and use it in GitHub Desktop.

Select an option

Save okikio/b23c435c0278acf5bf2dd15ce5ce5c91 to your computer and use it in GitHub Desktop.
kubeblock-fulkordb-redis-graph
#!/usr/bin/env bash
set -euo pipefail
# check-k8s-netfilter-arch.sh
#
# Checks common Kubernetes networking prerequisites on Arch-based systems:
# - br_netfilter module exists
# - br_netfilter module is loaded
# - net.bridge.bridge-nf-call-iptables = 1
# - net.bridge.bridge-nf-call-ip6tables = 1
# - net.ipv4.ip_forward = 1
#
# Modes:
# default Check only, print guidance if something is missing
# --fix Apply runtime fixes, ask before persisting
# --persist Apply runtime fixes and persist without prompting
# --help Show help
usage() {
cat <<'EOF'
Usage:
check-k8s-netfilter-arch.sh [--fix | --persist | --help]
Options:
--fix Apply runtime fixes. Ask before persisting.
--persist Apply runtime fixes and persist them without prompting.
--help Show this help text.
EOF
}
have_cmd() {
command -v "$1" >/dev/null 2>&1
}
get_sysctl_value() {
local key="$1"
sysctl -n "$key" 2>/dev/null || true
}
is_module_loaded() {
lsmod | awk '{print $1}' | grep -qx 'br_netfilter'
}
print_status() {
local status="$1"
shift
printf '%s: %s\n' "$status" "$*"
}
ask_yes_no() {
local prompt="$1"
local answer
read -r -p "$prompt [y/N]: " answer
[[ "$answer" =~ ^[Yy]$ ]]
}
apply_runtime_fixes() {
local need_module_load="$1"
local need_bridge_iptables="$2"
local need_bridge_ip6tables="$3"
local need_ip_forward="$4"
if [[ "$need_module_load" == "true" ]]; then
print_status "INFO" "Loading br_netfilter"
sudo modprobe br_netfilter
fi
if [[ "$need_bridge_iptables" == "true" ]]; then
print_status "INFO" "Setting net.bridge.bridge-nf-call-iptables=1"
sudo sysctl -w net.bridge.bridge-nf-call-iptables=1 >/dev/null
fi
if [[ "$need_bridge_ip6tables" == "true" ]]; then
print_status "INFO" "Setting net.bridge.bridge-nf-call-ip6tables=1"
sudo sysctl -w net.bridge.bridge-nf-call-ip6tables=1 >/dev/null
fi
if [[ "$need_ip_forward" == "true" ]]; then
print_status "INFO" "Setting net.ipv4.ip_forward=1"
sudo sysctl -w net.ipv4.ip_forward=1 >/dev/null
fi
}
persist_fixes() {
print_status "INFO" "Writing /etc/modules-load.d/br_netfilter.conf"
echo br_netfilter | sudo tee /etc/modules-load.d/br_netfilter.conf >/dev/null
print_status "INFO" "Writing /etc/sysctl.d/99-kubernetes.conf"
sudo tee /etc/sysctl.d/99-kubernetes.conf >/dev/null <<'EOF'
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
print_status "INFO" "Reloading sysctl settings"
sudo sysctl --system >/dev/null
}
mode="check"
case "${1:-}" in
"")
;;
--fix)
mode="fix"
;;
--persist)
mode="persist"
;;
--help|-h)
usage
exit 0
;;
*)
usage >&2
exit 2
;;
esac
if ! have_cmd modinfo; then
print_status "FAIL" "modinfo is not available"
exit 1
fi
if ! have_cmd lsmod; then
print_status "FAIL" "lsmod is not available"
exit 1
fi
if ! have_cmd sysctl; then
print_status "FAIL" "sysctl is not available"
exit 1
fi
need_fix=false
need_module_load=false
need_bridge_iptables=false
need_bridge_ip6tables=false
need_ip_forward=false
if modinfo br_netfilter >/dev/null 2>&1; then
print_status "OK" "br_netfilter module exists"
else
print_status "FAIL" "br_netfilter module was not found in the running kernel"
cat <<'EOF'
This kernel does not appear to provide br_netfilter.
On Arch-based systems, use a kernel/modules package that includes it.
EOF
exit 1
fi
if is_module_loaded; then
print_status "OK" "br_netfilter module is loaded"
else
print_status "FAIL" "br_netfilter module is not loaded"
need_fix=true
need_module_load=true
fi
bridge_iptables="$(get_sysctl_value net.bridge.bridge-nf-call-iptables)"
if [[ "$bridge_iptables" == "1" ]]; then
print_status "OK" "net.bridge.bridge-nf-call-iptables = 1"
else
print_status "FAIL" "net.bridge.bridge-nf-call-iptables = ${bridge_iptables:-missing}"
need_fix=true
need_bridge_iptables=true
fi
bridge_ip6tables="$(get_sysctl_value net.bridge.bridge-nf-call-ip6tables)"
if [[ "$bridge_ip6tables" == "1" ]]; then
print_status "OK" "net.bridge.bridge-nf-call-ip6tables = 1"
else
print_status "FAIL" "net.bridge.bridge-nf-call-ip6tables = ${bridge_ip6tables:-missing}"
need_fix=true
need_bridge_ip6tables=true
fi
ip_forward="$(get_sysctl_value net.ipv4.ip_forward)"
if [[ "$ip_forward" == "1" ]]; then
print_status "OK" "net.ipv4.ip_forward = 1"
else
print_status "FAIL" "net.ipv4.ip_forward = ${ip_forward:-missing}"
need_fix=true
need_ip_forward=true
fi
if [[ "$need_fix" == false ]]; then
printf '\n'
print_status "OK" "All Kubernetes networking prerequisites are satisfied"
exit 0
fi
printf '\n'
case "$mode" in
check)
print_status "INFO" "This host is missing one or more Kubernetes networking prerequisites"
cat <<'EOF'
To apply runtime fixes now:
sudo modprobe br_netfilter
sudo sysctl -w net.bridge.bridge-nf-call-iptables=1
sudo sysctl -w net.bridge.bridge-nf-call-ip6tables=1
sudo sysctl -w net.ipv4.ip_forward=1
To persist these settings:
echo br_netfilter | sudo tee /etc/modules-load.d/br_netfilter.conf
sudo tee /etc/sysctl.d/99-kubernetes.conf >/dev/null <<'EOT'
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOT
sudo sysctl --system
Or rerun this script with:
--fix
--persist
EOF
exit 1
;;
fix)
apply_runtime_fixes \
"$need_module_load" \
"$need_bridge_iptables" \
"$need_bridge_ip6tables" \
"$need_ip_forward"
printf '\n'
print_status "OK" "Runtime fixes applied"
if ask_yes_no "Persist these settings across reboot?"; then
persist_fixes
print_status "OK" "Persistent configuration written"
else
print_status "INFO" "Persistent configuration not written"
fi
;;
persist)
apply_runtime_fixes \
"$need_module_load" \
"$need_bridge_iptables" \
"$need_bridge_ip6tables" \
"$need_ip_forward"
persist_fixes
printf '\n'
print_status "OK" "Runtime and persistent fixes applied"
;;
esac
#!/usr/bin/env bash
set -e
VERSION="1.0.2"
NAMESPACE="kb-system"
echo "==> Installing KubeBlocks version v${VERSION}"
echo "==> Installing CRDs..."
kubectl create -f "https://github.com/apecloud/kubeblocks/releases/download/v${VERSION}/kubeblocks_crds.yaml" || true
kubectl apply -f "https://raw.githubusercontent.com/rancher/local-path-provisioner/v0.0.35/deploy/local-path-storage.yaml" || true
# Label the local-path-storage namespace with privileged Pod Security Standards to allow KubeBlocks to use it for storage provisioning.
kubectl label namespace local-path-storage \
pod-security.kubernetes.io/enforce=privileged \
pod-security.kubernetes.io/audit=privileged \
pod-security.kubernetes.io/warn=privileged \
--overwrite
echo "==> Patching local-path-provisioner tolerations for Talos control-plane taints..."
kubectl -n local-path-storage patch deployment local-path-provisioner --type merge -p '{
"spec": {
"template": {
"spec": {
"tolerations": [
{
"key": "node-role.kubernetes.io/control-plane",
"operator": "Exists",
"effect": "NoSchedule"
},
{
"key": "node-role.kubernetes.io/master",
"operator": "Exists",
"effect": "NoSchedule"
}
]
}
}
}
}'
echo "==> Adding Helm repo..."
helm repo add kubeblocks https://apecloud.github.io/helm-charts || true
helm repo add kubeblocks-addons https://apecloud.github.io/helm-charts || true
helm repo update
echo "==> Installing KubeBlocks controller..."
helm upgrade --install kubeblocks kubeblocks/kubeblocks \
--create-namespace \
--namespace "${NAMESPACE}" \
--version "${VERSION}"
echo "==> Waiting for local-path-provisioner..."
kubectl rollout status deployment/local-path-provisioner -n local-path-storage --timeout=120s
echo "==> Waiting for KubeBlocks pods..."
kubectl rollout status deployment/kubeblocks -n "${NAMESPACE}" --timeout=120s
echo "==> Updating kbcli addon index..."
kbcli addon index update kubeblocks
echo "==> Checking FalkorDB addon versions..."
kbcli addon search falkordb
echo "==> Installing FalkorDB addon with kbcli..."
kbcli addon install falkordb \
--version="1.0.1" \
--namespace="${NAMESPACE}"
echo "==> Verifying addon..."
kbcli addon list | grep falkordb
echo "==> Verifying installation..."
kubectl get pods -n local-path-storage
kubectl get pods -n "${NAMESPACE}"
kubectl get clusterdefinitions || true
echo "==> KubeBlocks installation complete!"
apiVersion: v1
kind: Namespace
metadata:
name: datastores
---
# Redis for ingestion and append-style stream workflows.
#
# Why replication instead of cluster mode?
# Redis Streams is a data model feature, not a separate deployment topology.
# For an append-heavy ingestion pipeline, a primary + replica topology keeps the
# mental model simple, gives you HA via Sentinel, and avoids the extra routing
# and slotting complexity of Redis Cluster mode.
apiVersion: apps.kubeblocks.io/v1
kind: Cluster
metadata:
name: redis-streams
namespace: datastores
spec:
terminationPolicy: Delete
clusterDef: redis
topology: replication
componentSpecs:
- name: redis
disableExporter: false
replicas: 2
resources:
requests:
cpu: "250m"
memory: "256Mi"
limits:
cpu: "500m"
memory: "512Mi"
volumeClaimTemplates:
- name: data
spec:
storageClassName: local-path
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 3Gi
- name: redis-sentinel
replicas: 3
resources:
requests:
cpu: "100m"
memory: "128Mi"
limits:
cpu: "250m"
memory: "256Mi"
volumeClaimTemplates:
- name: data
spec:
storageClassName: local-path
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
---
# Redis Cluster mode for cache keys, distributed rate limits, sessions, and
# other horizontally partitioned ephemeral workloads.
#
# This uses the documented sharding shape with 3 shards and 2 replicas per
# shard. That gives you the classic 3 primaries + 3 replicas layout.
apiVersion: apps.kubeblocks.io/v1
kind: Cluster
metadata:
name: redis-cache
namespace: datastores
spec:
terminationPolicy: Delete
shardings:
- name: shard
shards: 3
template:
name: redis
componentDef: redis-cluster-8
replicas: 2
resources:
requests:
cpu: "250m"
memory: "256Mi"
limits:
cpu: "500m"
memory: "512Mi"
volumeClaimTemplates:
- name: data
spec:
storageClassName: local-path
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 3Gi
services:
- name: redis-advertised
podService: true
serviceType: NodePort
---
# FalkorDB for graph workloads.
#
# Replication is the safer default here because graph reads can continue from
# secondaries after failover, while your application still gets one logical
# primary for writes. It is heavier than standalone, but much closer to a real
# service shape.
apiVersion: apps.kubeblocks.io/v1
kind: Cluster
metadata:
name: falkordb-graph
namespace: datastores
spec:
terminationPolicy: Delete
clusterDef: falkordb
topology: replication
componentSpecs:
- name: falkordb
serviceVersion: "4.12.5"
disableExporter: false
replicas: 2
resources:
requests:
cpu: "250m"
memory: "512Mi"
limits:
cpu: "750m"
memory: "1Gi"
volumeClaimTemplates:
- name: data
spec:
storageClassName: local-path
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 3Gi
- name: falkordb-sent
serviceVersion: "4.12.5"
replicas: 3
resources:
requests:
cpu: "100m"
memory: "128Mi"
limits:
cpu: "250m"
memory: "256Mi"
volumeClaimTemplates:
- name: data
spec:
storageClassName: local-path
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment