export OADP_NS=velero
export DATA_NS=rfelix-w-1
export STORAGE_CN=ocs-storagecluster-cephfs
export CLOUD_S3_CREDENTIAL_NAME=cloud-credentials
export CLOUD_S3_CREDENTIAL_KEY=cloud
export CLOUD_S3_API_PROFILE=default
export CLOUD_S3_API_USER="minio"
export CLOUD_S3_API_PASS="minio123"
export CLOUD_S3_PROFILE=default
export CLOUD_S3_REGION=my-minio
export CLOUD_S3_BUCKET=my-demo
export CLOUD_S3_PREFIX=workshop
export MINIO_IMAGE=quay.io/minio/minio:RELEASE.2022-08-02T23-59-16Z
export OCP_DNS_NAME=$(oc whoami --show-console | awk -F. '{sub($1 FS, ".")}1')
export MINIO_API_DNS_NAME="minio${OCP_DNS_NAME}"
export MINIO_CONSOLE="minio-console${OCP_DNS_NAME}"
export CLOUD_S3_URL=${MINIO_API_DNS_NAME}
# create a project for segregation
$ oc new-project minio
$ oc version
Client Version: v4.2.0-alpha.0-1420-gf1f09a3
Server Version: 4.8.47
Kubernetes Version: v1.21.11+31d53a1
$ helm version
version.BuildInfo{Version:"v3.6.0", GitCommit:"7f2df6467771a75f5646b7f12afb408590ed1755", GitTreeState:"dirty", GoVersion:"go1.16.4"}
$ helm repo add minio https://charts.min.io/
$ helm search repo minio
NAME CHART VERSION APP VERSION DESCRIPTION
minio/minio 4.0.14 RELEASE.2022-09-01T23-53-36Z Multi-Cloud Object Storage
helm install --namespace minio --set rootUser=${CLOUD_S3_API_USER},rootPassword=${CLOUD_S3_API_PASS} --generate-name minio/minio
MINIO_IMAGE=quay.io/minio/minio:RELEASE.2022-08-02T23-59-16Z
OCP_DNS_NAME=$(oc whoami --show-console | awk -F. '{sub($1 FS, ".")}1')
MINIO_API_DNS_NAME="minio${OCP_DNS_NAME}"
MINIO_CONSOLE="minio-console${OCP_DNS_NAME}"
# view values
cat -n << EOF
MINIO_IMAGE=$MINIO_IMAGE
OCP_DNS_NAME=$OCP_DNS_NAME
MINIO_API_DNS_NAME=$MINIO_API_DNS_NAME
MINIO_CONSOLE=$MINIO_CONSOLE
CLOUD_S3_API_USER=$CLOUD_S3_API_USER
CLOUD_S3_API_PASS=$CLOUD_S3_API_PASS
EOF
$ oc new-project minio
cat <<EOF | oc apply -f -
---
# Source: secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: minio-creds
labels:
app: minio
stringData:
MINIO_ROOT_USER: "minio"
MINIO_ROOT_PASSWORD: "minio123"
---
# Source: pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: minio-pvc
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
storageClassName: ${STORAGE_CN}
resources:
requests:
storage: 10Gi
---
# Source: services.yaml
apiVersion: v1
kind: Service
metadata:
name: minio
spec:
ports:
- protocol: TCP
name: minio
port: 9000
selector:
app: minio
---
# Source: services.yaml
apiVersion: v1
kind: Service
metadata:
name: minio-console
spec:
ports:
- protocol: TCP
name: minio-console
port: 9001
selector:
app: minio
---
# Source: deployment.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
name: minio
labels:
app: minio
spec:
replicas: 1
selector:
matchLabels:
app: minio
template:
metadata:
labels:
app: minio
spec:
containers:
- name: minio
image: "${MINIO_IMAGE}"
args: ['server', '/data', '--console-address', ':9001']
ports:
- name: minio
containerPort: 9000
volumeMounts:
- name: minio-volume
mountPath: /data
env:
- name: MINIO_SERVER_URL
value: "https://${MINIO_API_DNS_NAME}"
- name: MINIO_BROWSER_REDIRECT_URL
value: "https://${MINIO_CONSOLE}"
- name: MINIO_ROOT_USER
valueFrom:
secretKeyRef:
name: minio-creds
key: MINIO_ROOT_USER
- name: MINIO_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: minio-creds
key: MINIO_ROOT_PASSWORD
volumes:
- name: minio-volume
persistentVolumeClaim:
claimName: minio-pvc
---
kind: Route
apiVersion: route.openshift.io/v1
metadata:
name: minio
spec:
host: ${MINIO_API_DNS_NAME}
to:
kind: Service
name: minio
weight: 100
port:
targetPort: minio
tls:
termination: edge
insecureEdgeTerminationPolicy: Allow
wildcardPolicy: None
---
kind: Route
apiVersion: route.openshift.io/v1
metadata:
name: minio-console
spec:
host: ${MINIO_CONSOLE}
to:
kind: Service
name: minio-console
weight: 100
port:
targetPort: minio-console
tls:
termination: edge
insecureEdgeTerminationPolicy: Allow
wildcardPolicy: None
EOF
create using WebConsole de operator for namespace
${OADP_NS}
Reference - https://github.com/redhat-cop/gitops-catalog/tree/main/openshift-api-for-data-protection-operator
cat <<EOF | oc apply -f -
---
apiVersion: v1
kind: Namespace
metadata:
name: ${OADP_NS}
EOF
cat <<EOF | oc apply -f -
---
apiVersion: operators.coreos.com/v1
kind: OperatorGroup
metadata:
name: oadp-og
namespace: ${OADP_NS}
spec:
targetNamespaces:
- ${OADP_NS}
EOF
cat <<EOF | oc apply -f -
---
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
labels:
operators.coreos.com/redhat-oadp-operator.${OADP_NS}: ""
name: redhat-oadp-operator
namespace: ${OADP_NS}
spec:
channel: stable-1.1
installPlanApproval: Automatic
name: redhat-oadp-operator
source: redhat-operators
sourceNamespace: openshift-marketplace
startingCSV: oadp-operator.v1.1.0
EOF
oc get storageclass
oc get sub,csv,installplan,pod -n $OADP_NS
oc get volumesnapshot -n ${DATA_NS}
$ oc get nodes --no-headers | egrep -v '(infra|master)'
ip-10-0-145-149.us-east-2.compute.internal Ready worker 87m v1.21.11+31d53a1
ip-10-0-186-132.us-east-2.compute.internal Ready worker 87m v1.21.11+31d53a1
ip-10-0-240-103.us-east-2.compute.internal Ready worker 87m v1.21.11+31d53a1
$ oc get nodes --no-headers | egrep -v '(infra|master)' | wc -l
cat <<EOF >01_cloud-credentials.txt
[${CLOUD_S3_API_PROFILE}]
aws_access_key_id=${CLOUD_S3_API_USER}
aws_secret_access_key=${CLOUD_S3_API_PASS}
EOF
oc create secret generic ${CLOUD_S3_CREDENTIAL_NAME} -n ${OADP_NS} --from-file ${CLOUD_S3_CREDENTIAL_KEY}=01_cloud-credentials.txt
SVC_MINIO=$(oc get svc minio -o go-template --template='{{.metadata.name}}.{{.metadata.namespace}}.svc:9000' -n minio)
cat <<EOF | oc apply -f -
---
kind: DataProtectionApplication
apiVersion: oadp.openshift.io/v1alpha1
metadata:
name: velero
namespace: ${OADP_NS}
spec:
backupImages: true
backupLocations:
- velero:
config:
profile: ${CLOUD_S3_PROFILE}
region: ${CLOUD_S3_REGION}
s3ForcePathStyle: 'true'
publicUrl : "https://${CLOUD_S3_URL}"
s3Url: "http://${SVC_MINIO}"
insecureSkipTLSVerify: "true"
credential:
key: ${CLOUD_S3_CREDENTIAL_KEY}
name: ${CLOUD_S3_CREDENTIAL_NAME}
default: true
objectStorage:
bucket: ${CLOUD_S3_BUCKET}
prefix: ${CLOUD_S3_PREFIX}
provider: aws
configuration:
restic:
enable: true
velero:
defaultPlugins:
- openshift
- aws
- csi
featureFlags:
- EnableCSI
snapshotLocations:
- velero:
config:
profile: ${CLOUD_S3_PROFILE}
region: ${CLOUD_S3_REGION}
provider: aws
EOF
oc get pod -n ${OADP_NS}
NAME READY STATUS RESTARTS AGE
openshift-adp-controller-manager-845c4dc57-hzr8t 1/1 Running 0 45m
restic-gbs5j 1/1 Running 0 32s
restic-s7sjr 1/1 Running 0 32s
restic-tn4hj 1/1 Running 0 32s
velero-f55985d5f-q8hfm 1/1 Running 0 32s
$ oc get storageclass
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
nas-csi csi.trident.netapp.io Delete Immediate false 140d
thin (default) kubernetes.io/vsphere-volume Delete Immediate false 146d
thin-csi csi.vsphere.vmware.com Delete WaitForFirstConsumer true 61d
thin2 kubernetes.io/vsphere-volume Delete Immediate false 120d
vsphere kubernetes.io/vsphere-volume Delete Immediate false 56d
vsphere-csi csi.vsphere.vmware.com Delete WaitForFirstConsumer true 34d
$ cat <<EOF | oc apply -f -
---
apiVersion: snapshot.storage.k8s.io/v1
deletionPolicy: Retain
driver: csi.trident.netapp.io
kind: VolumeSnapshotClass
metadata:
annotations:
snapshot.storage.kubernetes.io/is-default-class: "true"
labels:
velero.io/csi-volumesnapshot-class: "true"
name: netapp-snapclass
EOF
$ cat <<EOF | oc apply -f -
---
apiVersion: snapshot.storage.k8s.io/v1
deletionPolicy: Retain
driver: csi.vsphere.vmware.com
kind: VolumeSnapshotClass
metadata:
annotations:
snapshot.storage.kubernetes.io/is-default-class: "true"
labels:
velero.io/csi-volumesnapshot-class: "true"
name: vsphere-snapclass
EOF
oc new-project $DATA_NS
cat <<EOF | oc apply -f -
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: my-pvc
namespace: ${DATA_NS}
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
storageClassName: ${STORAGE_CN}
resources:
requests:
storage: 1Gi
EOF
$ oc get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
my-pvc Bound pvc-5bc3674b-b1cc-46cb-a417-e50368b61c48 1Gi RWO ocs-storagecluster-cephfs 14s
$ oc new-app image-registry.openshift-image-registry.svc:5000/openshift/httpd:latest
$ oc set volume deploy/httpd --add --type pvc --mount-path /csi --name nas-volume --claim-name my-pvc
# Get info before backup
oc get pod
NAME READY STATUS RESTARTS AGE
httpd-759cbd686d-d4kng 1/1 Running 0 118s
httpd-769b6b5c5-dhqv8 0/1 ContainerCreating 0 8s
# Create data
$ oc exec -it $(oc get pod -o name) -- bash -c 'ls -lah /csi/'
$ oc exec -it $(oc get pod -o name) -- bash -c 'date > /csi/e'
$ oc exec -it $(oc get pod -o name) -- bash -c 'ls -lah /csi/'
total 512
drwxrwxrwx. 2 root root 1 Sep 9 13:44 .
dr-xr-xr-x. 1 root root 72 Sep 9 13:39 ..
-rw-r--r--. 1 1000970000 root 29 Sep 9 13:44 e
$oc exec -it $(oc get pod -o name) -- bash -c 'find /csi/ -type f -print0 | sort -z | xargs -0 sha1sum | sha1sum'
fd10ae358867a72a0eaac67818c7ba37a778e999 -
$ oc exec -it $(oc get pod -o name) -- bash -c 'du -sh /csi/'
512 /csi/
# label it
$ oc label deploy,svc,pvc --all -n $DATA_NS backup=yes
persistentvolumeclaim/my-pvc labeled
service/httpd labeled
deployment.apps/httpd labeled
$ oc get deploy,svc,pvc -l backup=yes -n $DATA_NS
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/httpd 1/1 1 1 4m2s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/httpd ClusterIP 172.30.156.211 <none> 8080/TCP,8443/TCP 4m2s
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
persistentvolumeclaim/my-pvc Bound pvc-5bc3674b-b1cc-46cb-a417-e50368b61c48 1Gi RWO ocs-storagecluster-cephfs 4m51s
# Check enviroment before
❯ velero backup get -n $OADP_NS
❯ oc get backup -n ${OADP_NS}
No resources found in velero namespace.
# create the backup
$ velero backup create --snapshot-volumes=true --exclude-resources=imagestream.image.openshift.io,image.openshift.io --include-namespaces=${DATA_NS} --ttl='3h0m0s' -n $OADP_NS d-001
velero backup describe d-001
Name: d-001
Namespace: velero
Labels: velero.io/storage-location=velero-1
Annotations: velero.io/source-cluster-k8s-gitversion=v1.21.11+31d53a1
velero.io/source-cluster-k8s-major-version=1
velero.io/source-cluster-k8s-minor-version=21
Phase: Failed (run `velero backup logs d-001` for more information)
Errors: 0
Warnings: 0
Namespaces:
Included: rfelix-w-1
Excluded: <none>
Resources:
Included: *
Excluded: imagestream.image.openshift.io, image.openshift.io
Cluster-scoped: auto
Label selector: <none>
Storage Location: velero-1
Velero-Native Snapshot PVs: true
TTL: 3h0m0s
Hooks: <none>
Backup Format Version: 1.1.0
Started: 2022-09-09 14:37:14 -0300 -03
Completed: 2022-09-09 14:37:15 -0300 -03
Expiration: 2022-09-09 17:37:14 -0300 -03
Velero-Native Snapshots: <none included>
$ velero backup logs d-001
An error occurred: Get "https://minio.apps.cluster-fngqh.fngqh.sandbox651.opentlc.com/my-demo/workshop/backups/d-001/d-001-logs.gz?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=minio%2F20220909%2Fmy-minio%2Fs3%2Faws4_request&X-Amz-Date=20220909T173827Z&X-Amz-Expires=600&X-Amz-SignedHeaders=host&X-Amz-Signature=3d875e275c6bc129e79adab9327543a4421e51a8a69c7c72ae69d8fe7f1544b3": x509: “ingress-operator@1662738308” certificate is not trusted
$ cat <<EOF | oc apply -f -
---
apiVersion: velero.io/v1
kind: Backup
metadata:
name: t-101
namespace: ${OADP_NS}
spec:
defaultVolumesToRestic: true
snapshotVolumes: false
includedNamespaces:
- ${DATA_NS}
labelSelector:
matchLabels:
"backup": "yes"
ttl: 3h0m1s
EOF
$ date; oc get backup -n ${OADP_NS} -o jsonpath='{.items..status}' | jq
Mon Sep 5 16:59:53 -03 2022
{
"completionTimestamp": "2022-09-05T19:58:52Z",
"expiration": "2022-09-05T22:58:20Z",
"formatVersion": "1.1.0",
"phase": "Completed",
"progress": {
"itemsBackedUp": 6,
"totalItems": 6
},
"startTimestamp": "2022-09-05T19:58:19Z",
"version": 1
}
$ velero backup describe t-102 -n $OADP_NS
Name: t-102
Namespace: rfelix-oadp
Labels: velero.io/storage-location=velero-netapp-1
Annotations: kubectl.kubernetes.io/last-applied-configuration={"apiVersion":"velero.io/v1","kind":"Backup","metadata":{"annotations":{},"name":"t-102","namespace":"rfelix-oadp"},"spec":{"defaultVolumesToRestic":true,"includedNamespaces":["rfelix-storage-1"],"labelSelector":{"matchLabels":{"backup":"yes"}},"snapshotVolumes":false,"ttl":"3h0m1s"}}
velero.io/source-cluster-k8s-gitversion=v1.23.5+3afdacb
velero.io/source-cluster-k8s-major-version=1
velero.io/source-cluster-k8s-minor-version=23
Phase: Completed
Errors: 0
Warnings: 0
Namespaces:
Included: rfelix-storage-1
Excluded: <none>
Resources:
Included: *
Excluded: <none>
Cluster-scoped: auto
Label selector: backup=yes
Storage Location: velero-netapp-1
Velero-Native Snapshot PVs: false
TTL: 3h0m1s
Hooks: <none>
Backup Format Version: 1.1.0
Started: 2022-09-05 16:58:19 -0300 -03
Completed: 2022-09-05 16:58:52 -0300 -03
Expiration: 2022-09-05 19:58:20 -0300 -03
Total items to be backed up: 6
Items backed up: 6
Velero-Native Snapshots: <none included>
$ cat <<EOF | oc apply -f -
---
apiVersion: velero.io/v1
kind: Restore
metadata:
name: t-102
namespace: ${OADP_NS}
spec:
backupName: t-102
namespaceMapping:
${DATA_NS}: rfelix-restore-1
EOF
date; oc get restore -n ${OADP_NS} -o jsonpath='{.items..status}' | jq
Mon Sep 5 16:31:07 -03 2022
{
"completionTimestamp": "2022-09-05T19:30:04Z",
"phase": "Completed",
"progress": {
"itemsRestored": 35,
"totalItems": 35
},
"startTimestamp": "2022-09-05T19:28:52Z",
"warnings": 7
}
❯
❯ oc project rfelix-restore-1
# teste 16h
$ oc exec -it $(oc get pod -o name) -- bash -c 'find /nas-csi/ -type f -print0 | sort -z | xargs -0 sha1sum | sha1sum'
ca50724009215273b7e8c7d1ea2ac888f20e7750 -
$ oc exec -it $(oc get pod -o name) -- bash -c 'du -sh /nas-csi/'
4.0K /nas-csi/
$ oc exec -it $(oc get pod -o name) -- bash -c 'du -sh /nas-csi/'
Defaulted container "httpd" out of: httpd, restic-wait (init)
8.0K /nas-csi/
$ oc exec -it $(oc get pod -o name) -- bash -c 'ls -lah /nas-csi/'
Defaulted container "httpd" out of: httpd, restic-wait (init)
total 8.0K
drwxrwxrwx. 3 99 99 4.0K Sep 5 19:29 .
dr-xr-xr-x. 1 root root 76 Sep 5 19:29 ..
drwxr-xr-x. 2 99 99 4.0K Sep 5 19:29 .velero
-rw-r--r--. 1 1001300000 99 29 Sep 5 19:01 d
$ oc exec -it $(oc get pod -o name) -- bash -c 'cat /nas-csi/d'
Defaulted container "httpd" out of: httpd, restic-wait (init)
Mon Sep 5 19:01:09 UTC 2022
- https://docs.openshift.com/container-platform/4.10/backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.html#oadp-about-backup-snapshot-locations_installing-oadp-mcg
- https://velero.io/docs/v1.7/locations/
- https://velero.io/docs/v1.9/restic/#how-backup-and-restore-work-with-restic
- https://github.com/openshift/oadp-operator/
- Plugins https://docs.openshift.com/container-platform/4.11/backup_and_restore/application_backup_and_restore/oadp-features-plugins.html#oadp-plugins_oadp-features-plugins
- Install - https://github.com/aws/aws-cli
- Generic Examples - https://library.netapp.com/ecmdocs/ECMP12031314/html/GUID-45466061-7CD6-42FB-A06F-EFD372498F09.html
- Output Format - https://docs.aws.amazon.com/cli/latest/userguide/cli-usage-output-format.html
- s3api reference - https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3api/index.html
- s3 rm reference - https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3/rm.html
- s3 ls reference - https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3/ls.html
- https://access.redhat.com/documentation/en-us/openshift_container_platform/4.11/html-single/backup_and_restore/index#application-backup-and-restore
- https://examples.openshift.pub/misc/velero/
- https://www.digitalocean.com/community/tutorials/how-to-back-up-and-restore-a-kubernetes-cluster-on-digitalocean-using-velero
- https://docs.pivotal.io/tkgi/1-13/velero-stateless-label.html
- Change hostnames in a Route
openshift.io/host.generated
- Known issue - https://velero.io/docs/v1.9/troubleshooting/#known-issue-with-restoring-loadbalancer-service
Explorar DR sem Volume Snapshot - ele recupera do s3 as informações
Requirements StorageClass
oc get storageclass
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
gp2 (default) kubernetes.io/aws-ebs Delete WaitForFirstConsumer true 102m
gp2-csi ebs.csi.aws.com Delete WaitForFirstConsumer true 102m
ocs-storagecluster-ceph-rbd openshift-storage.rbd.csi.ceph.com Delete Immediate true 73m
ocs-storagecluster-cephfs openshift-storage.cephfs.csi.ceph.com Delete Immediate true 73m
openshift-storage.noobaa.io openshift-storage.noobaa.io/obc Delete Immediate false 67m