Website
GitHub
Docs
Main Features
Release 1.26
Kubectl Plugin
Quick Start
Services
Backups
Backup Plugin
Failure
Failover
Reading Logs
Logs Plugin
Rolling Upgrades
PostgreSQL Upgrades
Recovery
Replica Cluster
Distributed Topology
Github Discussions
Blog
Slack
Linkedin
kc(){
## Custom Kubectl for different regions
if [[ $# -lt 1 ]]
then
echo "kc <REGION> <command>"
return 1
fi
REGION=${1}
shift
kubectl --context=kind-k8s-${REGION} $@
}
kcnpg(){
## Custom Kubectl for different regions
if [[ $# -lt 1 ]]
then
echo "kcnpg <REGION> <command>"
return 1
fi
REGION=${1}
shift
kubectl cnpg --context=kind-k8s-${REGION} $@
}
for region in {eu,us}
do
eval alias k${region}=\"kc \${region}\"
eval alias kcnpg${region}=\"kcnpg \${region}\"
done
git clone [email protected]:cloudnative-pg/cnpg-playground.git
sudo sysctl fs.inotify.max_user_watches=524288 fs.inotify.max_user_instances=512
cd cnpg-playground
./scripts/setup.sh
export KUBECONFIG=<path-to>/cnpg-playground/k8s/kube-config.yaml
kubectl config use-context kind-k8s-eu
. ./bash_aliases.sh
curl -sSfL \
https://github.com/cloudnative-pg/cloudnative-pg/raw/main/hack/install-cnpg-plugin.sh | \
sudo sh -s -- -b /usr/local/bin
kubectl cnpg install generate --control-plane --version 1.25.1 \
| kubectl apply -f - --server-side
kubectl get pods -w
cat <<EOF > ./cluster-example.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: cluster-example
spec:
instances: 3
storage:
size: 1Gi
EOF
kubectl apply -f ./cluster-example.yaml
kubectl get clusters,pods,pvc,svc,ep,secrets
cat <<EOF > ./cluster-example-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: cluster-example-backup
spec:
instances: 3
storage:
size: 1Gi
backup:
barmanObjectStore:
destinationPath: s3://backups/
endpointURL: http://minio-eu:9000
s3Credentials:
accessKeyId:
name: minio-eu
key: ACCESS_KEY_ID
secretAccessKey:
name: minio-eu
key: ACCESS_SECRET_KEY
wal:
compression: gzip
EOF
kubectl apply -f ./cluster-example-backup.yaml
kubectl cnpg status cluster-example-backup
kubectl cnpg psql cluster-example-backup -- app
CREATE TABLE numbers(x int);
INSERT INTO numbers (SELECT generate_series(1,1000000));
\q
kubectl cnpg backup cluster-example-backup
kubectl get backup
kubectl get backup -o yaml
kubectl cnpg status cluster-example-backup
# find primary
kubectl get cluster cluster-example-backup
kubectl get pods -w
kubectl delete pod cluster-example-backup-#
kubectl cnpg status cluster-example-backup
# find primary
kubectl get cluster cluster-example-backup
kubectl get pods -w
kubectl delete pod,pvc cluster-example-backup-#
kubectl cnpg status cluster-example-backup
kubectl logs cluster-example-backup-#
kubectl cnpg logs cluster cluster-example-backup \
| kubectl cnpg logs pretty
# Monitor resources
kubectl get pod -n cnpg-system
kubectl cnpg install generate --control-plane --version 1.26.0 \
| kubectl apply -f - --server-side
# Create cluster example YAML file
cat <<EOF > ./cluster-example.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: cluster-example
spec:
imageName: ghcr.io/cloudnative-pg/postgresql:16.3
instances: 3
storage:
size: 1Gi
EOF
kubectl get pods -w
kubectl apply -f ./cluster-example.yaml
sed -i 's/16\.3/16\.9/' cluster-example.yaml
cat cluster-example.yaml
kubectl apply -f ./cluster-example.yaml \
&& kubectl cnpg status cluster-example
# Edit Cluster manifest file and verify
sed -i 's/16\.9/17\.5/' cluster-example.yaml
cat cluster-example.yaml
kubectl apply -f ./cluster-example.yaml \
&& kubectl cnpg status cluster-example
# Create a cluster manifest with bootstrap method: recovery
cat <<EOF > ./cluster-recovery.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: cluster-recovery
spec:
instances: 3
storage:
size: 1Gi
bootstrap:
recovery:
source: origin
externalClusters:
- name: origin
barmanObjectStore:
serverName: cluster-example-backup
destinationPath: s3://backups/
endpointURL: http://minio-eu:9000
s3Credentials:
accessKeyId:
name: minio-eu
key: ACCESS_KEY_ID
secretAccessKey:
name: minio-eu
key: ACCESS_SECRET_KEY
wal:
compression: gzip
EOF
kubectl get pods -w
kubectl apply -f ./cluster-recovery.yaml
kubectl cnpg status cluster-recovery
kubectl cnpg psql cluster-recovery -- app
SELECT COUNT(*) numbers;
# Ensure to connect to the k8s-eu cluster
kubectl config use-context kind-k8s-eu
kubectl apply -f <path-to>/cnpg-playground/demo/yaml/eu/pg-eu-legacy.yaml
kubectl cnpg status pg-eu
kubectl cnpg backup pg-eu
# Get context and set it
./scripts/info.sh
kubectl config use-context kind-k8s-us
kubectl get pods -n cnpg-system
kubectl config current-context
kubectl cnpg install generate \
--control-plane | \
kubectl apply -f - --server-side
kubectl get deployment -n cnpg-system
kubectl get crd | grep cnpg
kubectl get pods -w
kubectl apply -f <path-to>/cnpg-playground/demo/yaml/us/pg-us-legacy.yaml
kubectl cnpg status pg-us
kubectl get cluster pg-us -o yaml
kubectl --context kind-k8s-eu get cluster pg-eu -o yaml
# Monitor pods in both clusters
kubectl --context kind-k8s-eu get pods -w
kubectl --context kind-k8s-us get pods -w
kubectl --context kind-k8s-eu edit cluster pg-eu
kubectl --context kind-k8s-eu get cluster pg-eu -o jsonpath='{.status.demotionToken}'
kubectl --context kind-k8s-us edit cluster pg-us