-
-
Save woakes070048/b7478a7fe8efa3c4c2b7e04964078ab6 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# A sample configuration file to setup a converged ovirt/gluster install | |
# using gdeploy: https://github.com/gluster/gdeploy | |
# edit hosts to match your environment | |
[hosts] | |
ovirt01.nlcsvr.net | |
[yum0] | |
action=install | |
packages=http://resources.ovirt.org/pub/yum-repo/ovirt-release40.rpm | |
[yum1] | |
action=install | |
gpgcheck=yes | |
packages=vdsm,vdsm-gluster,ovirt-hosted-engine-setup,screen,firewalld | |
update=yes | |
# Setup ntp on the servers before any other operations are done | |
# Disable the existing public servers | |
[shell1] | |
action=execute | |
command=sed -i 's/^\(server .*iburst\)/#\1/' /etc/ntp.conf | |
# Add custom NTP server | |
[update-file1] | |
action=add | |
dest=/etc/ntp.conf | |
line=server clock.redhat.com iburst | |
[service1] | |
action=enable | |
service=ntpd | |
[service2] | |
action=restart | |
service=ntpd | |
[shell2] | |
action=execute | |
command=vdsm-tool configure --force | |
[service3] | |
action=start | |
service=vdsmd | |
# Disable multipath | |
[script1] | |
action=execute | |
file=/usr/share/ansible/gdeploy/scripts/disable-multipath.sh | |
# Edit to match your environment, below assumes a single | |
# device for gluster at /dev/vdb | |
#[pv] | |
#action=create | |
#devices=sdb2 | |
#[vg1] | |
#action=create | |
#vgname=gluster | |
#pvname=sdb2 | |
#[lv1] | |
#action=create | |
#vgname=gluster | |
#lvname=engine | |
#lvtype=thick | |
#size=25GB | |
#mount=/gluster/brick1 | |
#[lv2] | |
#action=create | |
#vgname=gluster | |
#poolname=lvthinpool | |
#lvtype=thinpool | |
#poolmetadatasize=20MB | |
#chunksize=1024k | |
#size=70GB | |
#[lv3] | |
#action=create | |
#lvname=data | |
#poolname=lvthinpool | |
#vgname=gluster | |
#lvtype=thinlv | |
#mount=/gluster/brick2 | |
#virtualsize=50GB | |
#[lv4] | |
#action=create | |
#lvname=export | |
#poolname=lvthinpool | |
#vgname=gluster | |
#lvtype=thinlv | |
#mount=/gluster/brick3 | |
#virtualsize=50GB | |
#[lv5] | |
#action=create | |
#lvname=iso | |
#poolname=lvthinpool | |
#vgname=gluster | |
#lvtype=thinlv | |
#mount=/gluster/brick4 | |
#virtualsize=50GB | |
[selinux] | |
yes | |
[service4] | |
action=stop | |
service=NetworkManager | |
[service5] | |
action=disable | |
service=NetworkManager | |
[shell3] | |
action=execute | |
command=mkdir /etc/systemd/system/glusterd.service.d, | |
[shell4] | |
action=execute | |
command=echo -e "[Service]\nCPUAccounting=yes\nSlice=glusterfs.slice" >> /etc/systemd/system/glusterd.service.d/99-cpu.conf, | |
[shell5] | |
action=execute | |
command=echo -e "[Slice]\nCPUQuota=400%" >> /etc/systemd/system/glusterfs.slice,systemctl daemon-reload | |
[service6] | |
action=restart | |
service=glusterd | |
[service7] | |
action=start | |
service=firewalld | |
[firewalld] | |
action=add | |
ports=111/tcp,2049/tcp,54321/tcp,5900/tcp,5900-6923/tcp,5666/tcp,16514/tcp | |
services=glusterfs | |
[script2] | |
action=execute | |
file=/usr/share/ansible/gdeploy/scripts/disable-gluster-hooks.sh | |
[volume1] | |
action=create | |
volname=engine | |
transport=tcp | |
#replica=yes | |
#replica_count=3 | |
#arbiter_count=1 | |
key=group,storage.owner-uid,storage.owner-gid,features.shard,features.shard-block-size,performance.low-prio-threads,cluster.data-self-heal-algorithm,cluster.locking-scheme,cluster.shd-wait-qlength,cluster.shd-max-threads,network.ping-timeout,user.cifs,nfs.disable,performance.strict-o-direct,network.remote-dio | |
value=virt,36,36,on,512MB,32,full,granular,10000,6,30,off,on,on,off | |
brick_dirs=/vms/brick1/engine | |
[volume2] | |
action=create | |
volname=data | |
transport=tcp | |
#replica=yes | |
#replica_count=3 | |
#arbiter_count=1 | |
key=group,storage.owner-uid,storage.owner-gid,features.shard,features.shard-block-size,performance.low-prio-threads,cluster.data-self-heal-algorithm,cluster.locking-scheme,cluster.shd-wait-qlength,cluster.shd-max-threads,network.ping-timeout,user.cifs,nfs.disable,performance.strict-o-direct,network.remote-dio | |
value=virt,36,36,on,512MB,32,full,granular,10000,6,30,off,on,on,off | |
brick_dirs=/vms/brick2/data | |
[volume3] | |
action=create | |
volname=export | |
transport=tcp | |
#replica=yes | |
#replica_count=3 | |
#arbiter_count=1 | |
key=group,storage.owner-uid,storage.owner-gid,features.shard,features.shard-block-size,performance.low-prio-threads,cluster.data-self-heal-algorithm,cluster.locking-scheme,cluster.shd-wait-qlength,cluster.shd-max-threads,network.ping-timeout,user.cifs,nfs.disable,performance.strict-o-direct,network.remote-dio | |
value=virt,36,36,on,512MB,32,full,granular,10000,6,30,off,on,on,off | |
brick_dirs=/vms/brick3/export | |
[volume4] | |
action=create | |
volname=iso | |
transport=tcp | |
#replica=yes | |
#replica_count=3 | |
#arbiter_count=1 | |
key=group,storage.owner-uid,storage.owner-gid,features.shard,features.shard-block-size,performance.low-prio-threads,cluster.data-self-heal-algorithm,cluster.locking-scheme,cluster.shd-wait-qlength,cluster.shd-max-threads,network.ping-timeout,user.cifs,nfs.disable,performance.strict-o-direct,network.remote-dio | |
value=virt,36,36,on,512MB,32,full,granular,10000,6,30,off,on,on,off | |
brick_dirs=/vms/brick4/iso | |
# modify to match your host1 | |
[yum2:ovirt01.nlcsvr.net] | |
action=install | |
gpgcheck=no | |
packages=ovirt-engine-appliance |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment