-
-
Save JamesOBenson/f7e695e9547cca48a832e9260e7feeae to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/sh | |
# USE ON MASTER NODE as UBUNTU/root: | |
export MASTER1="r2-710-01" | |
export MASTER2="r2-710-03" | |
export MASTER3="r2-710-05" | |
export CLIENT_NODES="r2-710-07 r2-710-09 r2-710-11 r2-710-13" | |
export ALL_NODES="r2-710-01 r2-710-03 r2-710-05 r2-710-07 r2-710-09 r2-710-11 r2-710-13" | |
#Basic setup | |
#wget -q -O- 'https://download.ceph.com/keys/release.asc' | sudo apt-key add - | |
for i in $ALL_NODES; do ssh $i "wget -q -O- 'https://download.ceph.com/keys/release.asc' | sudo apt-key add -; echo deb https://download.ceph.com/debian-luminous/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list; sudo apt update"; done; | |
#sudo apt update | |
# sudo apt -y upgrade # docker-engine will be upgraded! DON'T DO! | |
sudo apt -y install ceph-deploy ntp | |
# CREATE USER AND COPY SSH KEYS OVER | |
ssh-keygen -N "" -t rsa -b 4096 -C "ceph_deploy" -f /home/ubuntu/id_rsa | |
for i in $ALL_NODES; do ssh $i "sudo useradd -u 1050 -m -d /home/ceph-deploy ceph-deploy"; done; | |
for i in $ALL_NODES; do ssh $i "sudo useradd -u 1050 -d /home/ceph-deploy -m ceph-deploy"; done; | |
for i in $ALL_NODES; do ssh $i "sudo mkdir /home/ceph-deploy/.ssh; sudo chmod 700 /home/ceph-deploy/.ssh"; done; | |
for i in $ALL_NODES; do scp /home/ubuntu/id_rsa $i:/home/ubuntu/id_rsa; done; | |
for i in $ALL_NODES; do scp /home/ubuntu/id_rsa.pub $i:/home/ubuntu/id_rsa.pub; done; | |
for i in $ALL_NODES; do ssh $i "sudo mv /home/ubuntu/id_rsa* /home/ceph-deploy/.ssh/"; done; | |
#for i in $ALL_NODES; do ssh $i "sudo mv /home/ubuntu/id_rsa.pub /home/ceph-deploy/.ssh/id_rsa.pub"; done; | |
for i in $ALL_NODES; do ssh $i "sudo chown ceph-deploy:ceph-deploy /home/ceph-deploy/.ssh"; done; | |
for i in $ALL_NODES; do ssh $i "sudo chown ceph-deploy:ceph-deploy /home/ceph-deploy/.ssh/id_rsa"; done; | |
for i in $ALL_NODES; do ssh $i "sudo chown ceph-deploy:ceph-deploy /home/ceph-deploy/.ssh/id_rsa.pub"; done; | |
for i in $ALL_NODES; do ssh $i "sudo touch /home/ceph-deploy/.ssh/authorized_keys"; done; | |
for i in $ALL_NODES; do ssh $i "sudo cp /home/ceph-deploy/.ssh/id_rsa.pub /home/ceph-deploy/.ssh/authorized_keys"; done; | |
#for i in $ALL_NODES; do ssh $i "sudo cat /home/ceph-deploy/.ssh/id_rsa.pub >> /home/ceph-deploy/.ssh/authorized_keys"; done; | |
for i in $ALL_NODES; do ssh $i "echo 'ceph-deploy ALL = (root) NOPASSWD:ALL' | sudo tee /etc/sudoers.d/ceph-deploy; sudo chmod 0440 /etc/sudoers.d/ceph-deploy;"; done; | |
for i in $ALL_NODES; do ssh $i "sudo apt -y install python2.7"; done; | |
#For monitors: | |
sudo firewall-cmd --zone=public --add-service=ceph-mon --permanent | |
sudo firewall-cmd --reload | |
# sudo iptables -A INPUT -i {iface} -p tcp -s {ip-address}/{netmask} --dport 6789 -j ACCEPT | |
# /sbin/service iptables save | |
# For OSD's and MDSs | |
for i in $ALL_NODES; do ssh $i "sudo firewall-cmd --zone=public --add-service=ceph --permanent; sudo firewall-cmd --reload"; done; | |
# For CentOS | |
# for i in $ALL_NODES; do ssh $i "sudo setenforce 0"; done; | |
AS CEPH-DEPLOY: | |
# Create Admin node (creates ceph.mon.keyring, log, and ceph.conf) | |
ceph-deploy new $MASTER1 | |
echo "public network = 10.245.122.0/24 | |
cluster network = 192.168.2.0/24 | |
mon_host = 10.245.122.1 # MODIFY THIS | |
# ( OSD * 100)/ (osd pool default size - typically 3)= rounded to nearest 2^n number | |
# ( 4 * 100)/ 3 = 133.3 = 2^8 = 256 | |
# So Total No of PGs per pool is 256 | |
# Total PGs = ((Total_number_of_OSD * 100) / max_replication_count) / pool count | |
# Total PGs = (( 4 * 100 ) / 3 ) / 8 = 16.6 = 16 | |
# | |
osd pool default pg num = 128 | |
osd pool default pgp num = 128 | |
" >> ~/ceph.conf | |
# Install ceph packages on all nodes. | |
# for i in $ALL_NODES; do ssh $i "echo deb https://download.ceph.com/debian-luminous/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list; sudo apt update;"; done; | |
ceph-deploy install --release luminous $ALL_NODES | |
# Deploy the initial monitor and gather/deploy the keys: | |
ceph-deploy mon create-initial | |
ceph-deploy admin $ALL_NODES | |
ceph-deploy mgr create $MASTER1 | |
#export DRIVE="/dev/sdb /dev/sdc /dev/sdd /dev/sde" | |
export DRIVE="/dev/sdb" | |
export SERVERS=`for i in $CLIENT_NODES; do for j in $DRIVE; do echo "$i:$j"; done; done` | |
for i in $SERVERS; do ceph-deploy osd create $i; done | |
# ceph osd new | |
# Check health | |
sudo ceph health | |
or | |
sudo ceph -s | |
health_ok | |
#CREATE METADATA SERVER (2 max) | |
ceph-deploy mds create $MASTER1 $MASTER2 | |
#ADD MONITORS | |
ceph-deploy mon add $MASTER2 | |
ceph-deploy mon add $MASTER3 | |
#CHECK STATUS | |
sudo ceph quorum_status --format json-pretty | |
# ADD ADDITIONAL MANAGER DAEMONS?? | |
ceph-deploy mgr create $MASTER2 $MASTER3 | |
ssh $MASTER1 sudo ceph -s | |
# ADD RGW? | |
ceph-deploy rgw create $MASTER1 | |
#Enable the dashboard | |
sudo ceph mgr module enable dashboard | |
# Add OpenStack Pools | |
# ceph osd pool create <pool-name> <pg-number> <pgp-number> | |
sudo ceph osd crush rule create-simple disks default host firstn | |
sudo ceph osd pool create volumes 128 128 replicated disks | |
sudo ceph osd pool create backups 128 128 replicated disks | |
sudo ceph osd pool create images 128 128 replicated disks | |
sudo ceph osd pool create vms 128 128 replicated disks | |
sudo ceph osd pool create gnocchi 128 128 replicated disks | |
# sudo ceph osd pool set {pool-name} pgp_num {pgp_num} | |
# sudo ceph osd pool set {pool-name} pg_num {pg_num} | |
# Add OpenStack Users: | |
sudo ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images, allow rwx pool=images-cache' | |
sudo ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=volumes-cache, allow rwx pool=vms, allow rwx pool=vms-cache, allow rx pool=images, allow rx pool=images-cache' | |
sudo ceph auth get-or-create client.cinder-backup mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=backups, allow rwx pool=backups-cache' | |
sudo ceph auth get-or-create client.nova mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=volumes-cache, allow rwx pool=vms, allow rwx pool=vms-cache, allow rwx pool=images, allow rwx pool=images-cache' | |
sudo ceph auth get-or-create client.gnocchi mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=gnocchi, allow rwx pool=gnocchi-cache' | |
#### NOTES: | |
# ceph mgr module enable dashboard | |
# Dashboard is on port 7000 by default | |
# http://$IP:$PORT/$PREFIX | |
# ceph config-key set mgr/dashboard/server_addr $IP | |
# ceph config-key set mgr/dashboard/server_port $PORT | |
# If the address it not configured, the web app will bind to ::, which corresponds to all available IPv4 and IPv6 addresses. | |
#================ | |
# DELETING OSD's | |
#================ | |
$ ceph osd crush reweight osd.<ID> 0.0 | |
$ ceph osd out <ID> | |
$ service ceph stop osd.<ID> | |
$ ceph osd crush remove osd.<ID> | |
$ ceph auth del osd.<ID> | |
$ ceph osd rm <ID> | |
If systemd config persists: | |
first run "systemctl disable <unit>" then remove it. | |
=========== | |
FINISHED | |
=========== | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment