Skip to content

Instantly share code, notes, and snippets.

@TyeolRik
Last active January 2, 2024 06:27
Show Gist options
  • Save TyeolRik/861e95e7e47e05f0272c40ae28113dc7 to your computer and use it in GitHub Desktop.
Save TyeolRik/861e95e7e47e05f0272c40ae28113dc7 to your computer and use it in GitHub Desktop.
How to remove CEPH. OS: CentOS 8 Stream, CEPH Version: Pacific (v16.2.9)
#!/bin/bash
################# README #################
# Please run this scripts on admin node! #
# Please set whether all SSH keys are removed from all HOSTs
ssh_remove=false
##########################################
# https://www.flamingbytes.com/posts/uninstall-ceph/
#################################################
# Variable targetDisk should be configured. #
# > lsblk #
# and find out what disk should I clear. #
#################################################
NC='\033[0m' # No Color
YELLOW='\033[1;33m'
alert() { # Helper to view message
printf "${YELLOW}$1${NC}\n"
}
alert_noNewline() { # Helper to view message
printf "${YELLOW}$1${NC}"
}
complete() {
printf "${YELLOW}Complete!\n\n${NC}"
}
cd /tmp/install-registry &&\
make clean
htpasswd -D /etc/containers/registries.d/.htpasswd admin
cd ~
dnf install -y jq
slave_name=( `ceph orch host ls --format json-pretty | jq -r '.[] | select(.labels | index("_admin") | not) | '.hostname''` ) # Except admin, which is running this scripts.
slave_IP=( `ceph orch host ls --format json-pretty | jq -r '.[] | select(.labels | index("_admin") | not) | '.addr''` ) # Except admin, which is running this scripts.
# Down OSD
for i in `seq 0 $(ceph osd tree --format json-pretty | jq -r '[.nodes[].id | tonumber] | max')`
do
ceph osd down $i && ceph osd destroy $i --force
done
# Need to check daemons
# Remove Host
hosts=( `ceph orch host ls --format json-pretty | jq -r '.[].hostname'` )
for host in "${hosts[@]}"
do
ceph orch host drain ${host}
ceph orch host rm ${host} --force
done
# Remove slave nodes ceph configuration and packages
for slaveIP in "${slave_IP[@]}"
do
ssh -f root@${slaveIP} "\
wget https://gist.github.com/TyeolRik/861e95e7e47e05f0272c40ae28113dc7/raw/remove_CEPH2_rm_cluster_and_files.sh &>/dev/null &&\
sh remove_CEPH2_rm_cluster_and_files.sh &>/dev/null &&\
rm -f remove_CEPH2_rm_cluster_and_files.sh
"
done
wget https://gist.github.com/TyeolRik/861e95e7e47e05f0272c40ae28113dc7/raw/remove_CEPH2_rm_cluster_and_files.sh &&\
sh remove_CEPH2_rm_cluster_and_files.sh &&\
rm -f remove_CEPH2_rm_cluster_and_files.sh
# Check all done
alert "Check slave nodes remove status"
for i in ${!slave_IP[@]}
do
alert_noNewline "${slave_name[$i]}(${slave_IP[$i]}) "
until [[ $(ssh root@${slave_IP[$i]} 'cat /tmp/ceph_remove_status.txt 2>/dev/null') = true ]]; do sleep 1; done
alert "is READY"
done
if [[ "${ssh_remove}" = true ]]
then
for slaveIP in "${slave_IP[@]}"
do
ssh -f root@${slaveIP} "rm -f /etc/yum.repos.d/ceph.repo ~/.ssh/*"
done
fi
for i in ${!slave_IP[@]}
do
ssh root@${slave_IP[$i]} 'reboot now'
done
rm -rf CEPH_Install.sh cluster_spec.yaml password.txt ~/.ssh/* /etc/yum.repos.d/ceph.repo /tmp/ceph_install /tmp/install-registry
reboot now
#!/bin/bash
NC='\033[0m' # No Color
YELLOW='\033[1;33m'
alert() { # Helper to view message
printf "${YELLOW}$1${NC}\n"
}
alert_noNewline() { # Helper to view message
printf "${YELLOW}$1${NC}"
}
complete() {
printf "${YELLOW}Complete!\n\n${NC}"
}
echo 'false' > /tmp/ceph_remove_status.txt
sed -i '/location = \"172.22.4.101:5000\"/d' /etc/containers/registries.conf &&\
sed -i '/insecure = true/d' /etc/containers/registries.conf &&\
sed -i '$ d' /etc/containers/registries.conf # Could be [[registry]]
# Remove Cluster
cephadm rm-cluster --fsid $(cephadm ls | jq -r '.[0].fsid') --force
cephadm ls
# targetDisk is the array of disks who needs to be reset.
targetDisk=($(lsblk -d -n -o NAME))
for i in "${!targetDisk[@]}"; do if [[ ${targetDisk[i]} = 'sda' || ${targetDisk[i]} = 'sr0' ]]; then unset 'targetDisk[i]'; fi; done
targetDisk=("${targetDisk[@]}")
# targetDisk=(sdb sdc sdd nvme0n1 nvme0n2 nvme0n3) # Looks like this.
for disk in "${targetDisk[@]}"
do
sgdisk --zap-all /dev/${disk}
dd if=/dev/zero of=/dev/${disk} bs=1M count=100 oflag=direct,dsync
blkdiscard /dev/${disk}
done
# If this node has been installed CEPH before, below commands need to be executed.
ls /dev/mapper/ceph-* | xargs -I% -- dmsetup remove %
rm -rf /dev/ceph-*
# Cleanup the ceph configuration files
rm -rf /etc/ceph
rm -rf /var/lib/ceph*
rm -rf /etc/systemd/system/ceph* # Service files
podman rmi $(podman images -a -q) --force
# dnf remove centos-release-ceph-* ceph-common cephadm ceph-mgr-dashboard docker podman -y
dnf remove -y podman cephadm ceph-common
# Cleanup /tmp/folders
rm -rf /tmp/ceph_install /tmp/install-Registry
# Cleared disks should be shown.
# ceph orch device ls --refresh
echo 'true' > /tmp/ceph_remove_status.txt
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment