helm search repo $REPO_NAME
helm search hub $CHART_NAME
helm list -A
helm list -n $NAMESPACE
helm show values $REPO/$CHART --version $VERSION
helm get values -n $NAMESPACE $RELEASE
helm search repo $REPO_NAME
helm search hub $CHART_NAME
helm list -A
helm list -n $NAMESPACE
helm show values $REPO/$CHART --version $VERSION
helm get values -n $NAMESPACE $RELEASE
KUBECONFIG=<KUBECONFIG_1>:<KUBECONFIG_2> kubectl config view --flatten > <MERGED_KUBECONFIG>
kubectl get pod $POD_NAME -o jsonpath="{.spec.containers[*].image}"
kubectl debug node/$NODE_NAME -it --image=alpine -- sh
# Internal IP
kubectl get nodes -o jsonpath='{range .items[*]}{.status.addresses[?(@.type=="InternalIP")].address}{"\n"}{end}'
# External IP
kubectl get nodes -o jsonpath='{range .items[*]}{.status.addresses[?(@.type=="ExternalIP")].address}{"\n"}{end}'
# Format for nginx-ingress whitelist with trailing /32 CIDR
kubectl get nodes -o jsonpath='{range .items[*]}{.status.addresses[?(@.type=="ExternalIP")].address}{"/32, "}{end}'
kubectl delete job $(kubectl get job -o=jsonpath='{.items[?(@.status.failed==1)].metadata.name}')
workarounds with cat and tee when kubectl cp is not possible
# copy single file from pod to host
kubectl exec -i $POD_NAME -c $CONTAINER_NAME -- cat /etc/passwd > ./container_passwd
# copy single file from host to pod
cat /etc/passwd | kubectl exec -i $POD_NAME -c $CONTAINER_NAME -- tee /tmp/host_passwd >/dev/null
velero get backups -l velero.io/storage-location=scaleway-keycloak
kubectl get backupstoragelocations.velero.io -n velero $LOCATION_NAME -o yaml | yq 'del(.status) | del(.metadata.annotations.kubectl*,.metadata.creationTimestamp,.metadata.generation,.metadata.resourceVersion,.metadata.uid) | .spec.accessMode += "ReaOnly"' -
Excluding some resources which won't work on another cluster
velero create restore --from-backup=$BACKUP \
--include-namespaces=$NS \
--exclude-resources=CustomResourceDefinition,Certificate,Challenge,CertificateRequest,Order,Ingress
--exclude-resources=CustomResourceDefinition,CertificateRequest,Order,Ingress
velero create restore --from-backup=keycloak-daily-20230224033035 \
--include-namespaces=keycloak \
--include-resources=pvc,pv
Sometimes velero restores got stuck on
Phase: New
for long time, so velero needs to be restarted : vmware-tanzu/velero#3216
kubectl rollout restart deployment/velero -n velero