Created
September 14, 2018 23:59
-
-
Save heschlie/ea7139a1004e36bfdca37a79071bd2bf to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
all.yml | |
--- | |
# Variables here are applicable to all host groups NOT roles | |
# This sample file generated by generate_group_vars_sample.sh | |
# Dummy variable to avoid error because ansible does not recognize the | |
# file as a good configuration file when no variable in it. | |
dummy: | |
# You can override vars by using host or group vars | |
########### | |
# GENERAL # | |
########### | |
###################################### | |
# Releases name to number dictionary # | |
###################################### | |
#ceph_release_num: | |
# dumpling: 0.67 | |
# emperor: 0.72 | |
# firefly: 0.80 | |
# giant: 0.87 | |
# hammer: 0.94 | |
# infernalis: 9 | |
# jewel: 10 | |
# kraken: 11 | |
# luminous: 12 | |
# mimic: 13 | |
# nautilus: 14 | |
# Directory to fetch cluster fsid, keys etc... | |
#fetch_directory: fetch/ | |
# The 'cluster' variable determines the name of the cluster. | |
# Changing the default value to something else means that you will | |
# need to change all the command line calls as well, for example if | |
# your cluster name is 'foo': | |
# "ceph health" will become "ceph --cluster foo health" | |
# | |
# An easier way to handle this is to use the environment variable CEPH_ARGS | |
# So run: "export CEPH_ARGS="--cluster foo" | |
# With that you will be able to run "ceph health" normally | |
#cluster: ceph | |
# Inventory host group variables | |
#mon_group_name: mons | |
#osd_group_name: osds | |
#rgw_group_name: rgws | |
#mds_group_name: mdss | |
#nfs_group_name: nfss | |
#restapi_group_name: restapis | |
#rbdmirror_group_name: rbdmirrors | |
#client_group_name: clients | |
#iscsi_gw_group_name: iscsigws | |
#mgr_group_name: mgrs | |
# If check_firewall is true, then ansible will try to determine if the | |
# Ceph ports are blocked by a firewall. If the machine running ansible | |
# cannot reach the Ceph ports for some other reason, you may need or | |
# want to set this to False to skip those checks. | |
#check_firewall: False | |
# If configure_firewall is true, then ansible will try to configure the | |
# appropriate firewalling rules so that Ceph daemons can communicate | |
# with each others. | |
#configure_firewall: False | |
# Open ports on corresponding nodes if firewall is installed on it | |
#ceph_mon_firewall_zone: public | |
#ceph_osd_firewall_zone: public | |
#ceph_rgw_firewall_zone: public | |
#ceph_mds_firewall_zone: public | |
#ceph_nfs_firewall_zone: public | |
#ceph_restapi_firewall_zone: public | |
#ceph_rbdmirror_firewall_zone: public | |
#ceph_iscsi_firewall_zone: public | |
############ | |
# PACKAGES # | |
############ | |
#debian_package_dependencies: | |
# - python-pycurl | |
# - hdparm | |
#centos_package_dependencies: | |
# - python-pycurl | |
# - hdparm | |
# - epel-release | |
# - python-setuptools | |
# - libselinux-python | |
#redhat_package_dependencies: | |
# - python-pycurl | |
# - hdparm | |
# - python-setuptools | |
#suse_package_dependencies: | |
# - python-pycurl | |
# - python-xml | |
# - hdparm | |
# - python-setuptools | |
# Whether or not to install the ceph-test package. | |
#ceph_test: false | |
# Enable the ntp service by default to avoid clock skew on | |
# ceph nodes | |
#ntp_service_enabled: true | |
# Set uid/gid to default '64045' for bootstrap directories. | |
# '64045' is used for debian based distros. It must be set to 167 in case of rhel based distros. | |
# These values have to be set according to the base OS used by the container image, NOT the host. | |
#bootstrap_dirs_owner: "64045" | |
#bootstrap_dirs_group: "64045" | |
# This variable determines if ceph packages can be updated. If False, the | |
# package resources will use "state=present". If True, they will use | |
# "state=latest". | |
#upgrade_ceph_packages: False | |
#ceph_use_distro_backports: false # DEBIAN ONLY | |
########### | |
# INSTALL # | |
########### | |
#ceph_repository_type: repository | |
# ORIGIN SOURCE | |
# | |
# Choose between: | |
# - 'repository' means that you will get ceph installed through a new repository. Later below choose between 'community', 'rhcs', 'dev' or 'obs' | |
# - 'distro' means that no separate repo file will be added | |
# you will get whatever version of Ceph is included in your Linux distro. | |
# 'local' means that the ceph binaries will be copied over from the local machine | |
ceph_origin: repository | |
#valid_ceph_origins: | |
# - repository | |
# - distro | |
# - local | |
ceph_repository: community | |
#valid_ceph_repository: | |
# - community | |
# - rhcs | |
# - dev | |
# - uca | |
# - custom | |
# - obs | |
# REPOSITORY: COMMUNITY VERSION | |
# | |
# Enabled when ceph_repository == 'community' | |
# | |
ceph_mirror: http://download.ceph.com | |
ceph_stable_key: https://download.ceph.com/keys/release.asc | |
ceph_stable_release: mimic | |
#ceph_stable_repo: "{{ ceph_mirror }}/debian-{{ ceph_stable_release }}" | |
#nfs_ganesha_stable: true # use stable repos for nfs-ganesha | |
#nfs_ganesha_stable_branch: V2.6-stable | |
#nfs_ganesha_stable_deb_repo: "{{ ceph_mirror }}/nfs-ganesha/deb-{{ nfs_ganesha_stable_branch }}/{{ ceph_stable_release }}" | |
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions | |
# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/ | |
# for more info read: https://github.com/ceph/ceph-ansible/issues/305 | |
#ceph_stable_distro_source: "{{ ansible_lsb.codename }}" | |
# This option is needed for _both_ stable and dev version, so please always fill the right version | |
# # for supported distros, see http://download.ceph.com/rpm-{{ ceph_stable_release }}/ | |
#ceph_stable_redhat_distro: el7 | |
# REPOSITORY: RHCS VERSION RED HAT STORAGE (from 1.3) | |
# | |
# Enabled when ceph_repository == 'rhcs' | |
# | |
# This version is only supported on RHEL >= 7.1 | |
# As of RHEL 7.1, libceph.ko and rbd.ko are now included in Red Hat's kernel | |
# packages natively. The RHEL 7.1 kernel packages are more stable and secure than | |
# using these 3rd-party kmods with RHEL 7.0. Please update your systems to RHEL | |
# 7.1 or later if you want to use the kernel RBD client. | |
# | |
# The CephFS kernel client is undergoing rapid development upstream, and we do | |
# not recommend running the CephFS kernel module on RHEL 7's 3.10 kernel at this | |
# time. Please use ELRepo's latest upstream 4.x kernels if you want to run CephFS | |
# on RHEL 7. | |
# | |
# | |
#ceph_rhcs_version: "{{ ceph_stable_rh_storage_version | default(2) }}" | |
#valid_ceph_repository_type: | |
# - cdn | |
# - iso | |
#ceph_rhcs_iso_path: "{{ ceph_stable_rh_storage_iso_path | default('') }}" | |
#ceph_rhcs_mount_path: "{{ ceph_stable_rh_storage_mount_path | default('/tmp/rh-storage-mount') }}" | |
#ceph_rhcs_repository_path: "{{ ceph_stable_rh_storage_repository_path | default('/tmp/rh-storage-repo') }}" # where to copy iso's content | |
# RHCS installation in Debian systems | |
#ceph_rhcs_cdn_debian_repo: https://customername:[email protected] | |
#ceph_rhcs_cdn_debian_repo_version: "/3-release/" # for GA, later for updates use /3-updates/ | |
# REPOSITORY: UBUNTU CLOUD ARCHIVE | |
# | |
# Enabled when ceph_repository == 'uca' | |
# | |
# This allows the install of Ceph from the Ubuntu Cloud Archive. The Ubuntu Cloud Archive | |
# usually has newer Ceph releases than the normal distro repository. | |
# | |
# | |
#ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu" | |
#ceph_stable_openstack_release_uca: liberty | |
#ceph_stable_release_uca: "{{ansible_lsb.codename}}-updates/{{ceph_stable_openstack_release_uca}}" | |
# REPOSITORY: openSUSE OBS | |
# | |
# Enabled when ceph_repository == 'obs' | |
# | |
# This allows the install of Ceph from the openSUSE OBS repository. The OBS repository | |
# usually has newer Ceph releases than the normal distro repository. | |
# | |
# | |
#ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/luminous/openSUSE_Leap_{{ ansible_distribution_version }}/" | |
# REPOSITORY: DEV | |
# | |
# Enabled when ceph_repository == 'dev' | |
# | |
#ceph_dev_branch: master # development branch you would like to use e.g: master, wip-hack | |
#ceph_dev_sha1: latest # distinct sha1 to use, defaults to 'latest' (as in latest built) | |
#nfs_ganesha_dev: false # use development repos for nfs-ganesha | |
# Set this to choose the version of ceph dev libraries used in the nfs-ganesha packages from shaman | |
# flavors so far include: ceph_master, ceph_jewel, ceph_kraken, ceph_luminous | |
#nfs_ganesha_flavor: "ceph_master" | |
#ceph_iscsi_config_dev: true # special repo for deploying iSCSI gateways | |
# REPOSITORY: CUSTOM | |
# | |
# Enabled when ceph_repository == 'custom' | |
# | |
# Use a custom repository to install ceph. For RPM, ceph_custom_repo should be | |
# a URL to the .repo file to be installed on the targets. For deb, | |
# ceph_custom_repo should be the URL to the repo base. | |
# | |
#ceph_custom_repo: https://server.domain.com/ceph-custom-repo | |
# ORIGIN: LOCAL CEPH INSTALLATION | |
# | |
# Enabled when ceph_repository == 'local' | |
# | |
# Path to DESTDIR of the ceph install | |
#ceph_installation_dir: "/path/to/ceph_installation/" | |
# Whether or not to use installer script rundep_installer.sh | |
# This script takes in rundep and installs the packages line by line onto the machine | |
# If this is set to false then it is assumed that the machine ceph is being copied onto will already have | |
# all runtime dependencies installed | |
#use_installer: false | |
# Root directory for ceph-ansible | |
#ansible_dir: "/path/to/ceph-ansible" | |
###################### | |
# CEPH CONFIGURATION # | |
###################### | |
## Ceph options | |
# | |
# Each cluster requires a unique, consistent filesystem ID. By | |
# default, the playbook generates one for you and stores it in a file | |
# in `fetch_directory`. If you want to customize how the fsid is | |
# generated, you may find it useful to disable fsid generation to | |
# avoid cluttering up your ansible repo. If you set `generate_fsid` to | |
# false, you *must* generate `fsid` in another way. | |
# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT | |
#fsid: "{{ cluster_uuid.stdout }}" | |
#generate_fsid: true | |
#ceph_conf_key_directory: /etc/ceph | |
#cephx: true | |
## Client options | |
# | |
#rbd_cache: "true" | |
#rbd_cache_writethrough_until_flush: "true" | |
#rbd_concurrent_management_ops: 20 | |
#rbd_client_directories: true # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions | |
# Permissions for the rbd_client_log_path and | |
# rbd_client_admin_socket_path. Depending on your use case for Ceph | |
# you may want to change these values. The default, which is used if | |
# any of the variables are unset or set to a false value (like `null` | |
# or `false`) is to automatically determine what is appropriate for | |
# the Ceph version with non-OpenStack workloads -- ceph:ceph and 0770 | |
# for infernalis releases, and root:root and 1777 for pre-infernalis | |
# releases. | |
# | |
# For other use cases, including running Ceph with OpenStack, you'll | |
# want to set these differently: | |
# | |
# For OpenStack on RHEL, you'll want: | |
# rbd_client_directory_owner: "qemu" | |
# rbd_client_directory_group: "libvirtd" (or "libvirt", depending on your version of libvirt) | |
# rbd_client_directory_mode: "0755" | |
# | |
# For OpenStack on Ubuntu or Debian, set: | |
# rbd_client_directory_owner: "libvirt-qemu" | |
# rbd_client_directory_group: "kvm" | |
# rbd_client_directory_mode: "0755" | |
# | |
# If you set rbd_client_directory_mode, you must use a string (e.g., | |
# 'rbd_client_directory_mode: "0755"', *not* | |
# 'rbd_client_directory_mode: 0755', or Ansible will complain: mode | |
# must be in octal or symbolic form | |
#rbd_client_directory_owner: null | |
#rbd_client_directory_group: null | |
#rbd_client_directory_mode: null | |
#rbd_client_log_path: /var/log/ceph | |
#rbd_client_log_file: "{{ rbd_client_log_path }}/qemu-guest-$pid.log" # must be writable by QEMU and allowed by SELinux or AppArmor | |
#rbd_client_admin_socket_path: /var/run/ceph # must be writable by QEMU and allowed by SELinux or AppArmor | |
## Monitor options | |
# | |
# You must define either monitor_interface, monitor_address or monitor_address_block. | |
# These variables must be defined at least in all.yml and overrided if needed (inventory host file or group_vars/*.yml). | |
# Eg. If you want to specify for each monitor which address the monitor will bind to you can set it in your **inventory host file** by using 'monitor_address' variable. | |
# Preference will go to monitor_address if both monitor_address and monitor_interface are defined. | |
#monitor_interface: interface | |
monitor_address: 192.168.1.205 | |
#monitor_address_block: subnet | |
# set to either ipv4 or ipv6, whichever your network is using | |
ip_version: ipv4 | |
#mon_use_fqdn: false # if set to true, the MON name used will be the fqdn in the ceph.conf | |
########## | |
# CEPHFS # | |
########## | |
#cephfs: cephfs # name of the ceph filesystem | |
#cephfs_data: cephfs_data # name of the data pool for a given filesystem | |
#cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem | |
#cephfs_pools: | |
# - { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" } | |
# - { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" } | |
## OSD options | |
# | |
#journal_size: 5120 # OSD journal size in MB | |
#public_network: 0.0.0.0/0 | |
#cluster_network: "{{ public_network | regex_replace(' ', '') }}" | |
#osd_mkfs_type: xfs | |
#osd_mkfs_options_xfs: -f -i size=2048 | |
#osd_mount_options_xfs: noatime,largeio,inode64,swalloc | |
#osd_objectstore: filestore | |
# xattrs. by default, 'filestore xattr use omap' is set to 'true' if | |
# 'osd_mkfs_type' is set to 'ext4'; otherwise it isn't set. This can | |
# be set to 'true' or 'false' to explicitly override those | |
# defaults. Leave it 'null' to use the default for your chosen mkfs | |
# type. | |
#filestore_xattr_use_omap: null | |
## MDS options | |
# | |
#mds_use_fqdn: false # if set to true, the MDS name used will be the fqdn in the ceph.conf | |
#mds_max_mds: 1 | |
## Rados Gateway options | |
# | |
#radosgw_frontend_type: civetweb # For additionnal frontends see: http://docs.ceph.com/docs/mimic/radosgw/frontends/ | |
#radosgw_civetweb_port: 8080 | |
#radosgw_civetweb_num_threads: 100 | |
#radosgw_civetweb_options: "num_threads={{ radosgw_civetweb_num_threads }}" | |
# For additional civetweb configuration options available such as SSL, logging, | |
# keepalive, and timeout settings, please see the civetweb docs at | |
# https://github.com/civetweb/civetweb/blob/master/docs/UserManual.md | |
#radosgw_frontend_port: "{{ radosgw_civetweb_port if radosgw_frontend_type == 'civetweb' else '8080' }}" | |
#radosgw_frontend_options: "{{ radosgw_civetweb_options if radosgw_frontend_type == 'civetweb' }}" | |
# You must define either radosgw_interface, radosgw_address. | |
# These variables must be defined at least in all.yml and overrided if needed (inventory host file or group_vars/*.yml). | |
# Eg. If you want to specify for each radosgw node which address the radosgw will bind to you can set it in your **inventory host file** by using 'radosgw_address' variable. | |
# Preference will go to radosgw_address if both radosgw_address and radosgw_interface are defined. | |
#radosgw_interface: interface | |
#radosgw_address: address | |
#radosgw_address_block: subnet | |
#radosgw_keystone_ssl: false # activate this when using keystone PKI keys | |
# Rados Gateway options | |
#email_address: [email protected] | |
## REST API options | |
# | |
#restapi_interface: "{{ monitor_interface }}" | |
restapi_address: "{{ monitor_address }}" | |
restapi_port: 5000 | |
## Testing mode | |
# enable this mode _only_ when you have a single node | |
# if you don't want it keep the option commented | |
#common_single_host_mode: true | |
## Handlers - restarting daemons after a config change | |
# if for whatever reasons the content of your ceph configuration changes | |
# ceph daemons will be restarted as well. At the moment, we can not detect | |
# which config option changed so all the daemons will be restarted. Although | |
# this restart will be serialized for each node, in between a health check | |
# will be performed so we make sure we don't move to the next node until | |
# ceph is not healthy | |
# Obviously between the checks (for monitors to be in quorum and for osd's pgs | |
# to be clean) we have to wait. These retries and delays can be configurable | |
# for both monitors and osds. | |
# | |
# Monitor handler checks | |
#handler_health_mon_check_retries: 5 | |
#handler_health_mon_check_delay: 10 | |
# | |
# OSD handler checks | |
#handler_health_osd_check_retries: 40 | |
#handler_health_osd_check_delay: 30 | |
#handler_health_osd_check: true | |
# | |
# MDS handler checks | |
#handler_health_mds_check_retries: 5 | |
#handler_health_mds_check_delay: 10 | |
# | |
# RGW handler checks | |
#handler_health_rgw_check_retries: 5 | |
#handler_health_rgw_check_delay: 10 | |
# NFS handler checks | |
#handler_health_nfs_check_retries: 5 | |
#handler_health_nfs_check_delay: 10 | |
# RBD MIRROR handler checks | |
#handler_health_rbd_mirror_check_retries: 5 | |
#handler_health_rbd_mirror_check_delay: 10 | |
# MGR handler checks | |
#handler_health_mgr_check_retries: 5 | |
#handler_health_mgr_check_delay: 10 | |
############### | |
# NFS-GANESHA # | |
############### | |
# Confiure the type of NFS gatway access. At least one must be enabled for an | |
# NFS role to be useful | |
# | |
# Set this to true to enable File access via NFS. Requires an MDS role. | |
nfs_file_gw: true | |
# Set this to true to enable Object access via NFS. Requires an RGW role. | |
nfs_obj_gw: true | |
################### | |
# CONFIG OVERRIDE # | |
################### | |
# Ceph configuration file override. | |
# This allows you to specify more configuration options | |
# using an INI style format. | |
# The following sections are supported: [global], [mon], [osd], [mds], [rgw] | |
# | |
# Example: | |
# ceph_conf_overrides: | |
# global: | |
# foo: 1234 | |
# bar: 5678 | |
# | |
ceph_conf_overrides: | |
global: | |
osd_pool_default_pg_num: 128 | |
############# | |
# OS TUNING # | |
############# | |
#disable_transparent_hugepage: true | |
os_tuning_params: | |
- { name: fs.file-max, value: 26234859 } | |
- { name: vm.zone_reclaim_mode, value: 0 } | |
- { name: vm.swappiness, value: 10 } | |
- { name: vm.min_free_kbytes, value: "{{ vm_min_free_kbytes }}" } | |
# For Debian & Red Hat/CentOS installs set TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES | |
# Set this to a byte value (e.g. 134217728) | |
# A value of 0 will leave the package default. | |
#ceph_tcmalloc_max_total_thread_cache: 0 | |
########## | |
# DOCKER # | |
########## | |
#docker_exec_cmd: | |
#docker: false | |
#ceph_docker_image: "ceph/daemon" | |
#ceph_docker_image_tag: latest | |
#ceph_docker_registry: docker.io | |
#ceph_docker_enable_centos_extra_repo: false | |
#ceph_docker_on_openstack: false | |
#containerized_deployment: False | |
############ | |
# KV store # | |
############ | |
#containerized_deployment_with_kv: false | |
#mon_containerized_default_ceph_conf_with_kv: false | |
#kv_type: etcd | |
#kv_endpoint: 127.0.0.1 | |
#kv_port: 2379 | |
# this is only here for usage with the rolling_update.yml playbook | |
# do not ever change this here | |
#rolling_update: false | |
##################### | |
# Docker pull retry # | |
##################### | |
#docker_pull_retry: 3 | |
#docker_pull_timeout: "300s" | |
############# | |
# OPENSTACK # | |
############# | |
#openstack_config: false | |
#openstack_glance_pool: | |
# name: "images" | |
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" | |
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" | |
# rule_name: "replicated_rule" | |
# type: 1 | |
# erasure_profile: "" | |
# expected_num_objects: "" | |
# application: "rbd" | |
#openstack_cinder_pool: | |
# name: "volumes" | |
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" | |
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" | |
# rule_name: "replicated_rule" | |
# type: 1 | |
# erasure_profile: "" | |
# expected_num_objects: "" | |
# application: "rbd" | |
#openstack_nova_pool: | |
# name: "vms" | |
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" | |
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" | |
# rule_name: "replicated_rule" | |
# type: 1 | |
# erasure_profile: "" | |
# expected_num_objects: "" | |
# application: "rbd" | |
#openstack_cinder_backup_pool: | |
# name: "backups" | |
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" | |
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" | |
# rule_name: "replicated_rule" | |
# type: 1 | |
# erasure_profile: "" | |
# expected_num_objects: "" | |
# application: "rbd" | |
#openstack_gnocchi_pool: | |
# name: "metrics" | |
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" | |
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" | |
# rule_name: "replicated_rule" | |
# type: 1 | |
# erasure_profile: "" | |
# expected_num_objects: "" | |
# application: "rbd" | |
#openstack_pools: | |
# - "{{ openstack_glance_pool }}" | |
# - "{{ openstack_cinder_pool }}" | |
# - "{{ openstack_nova_pool }}" | |
# - "{{ openstack_cinder_backup_pool }}" | |
# - "{{ openstack_gnocchi_pool }}" | |
# The value for 'key' can be a pre-generated key, | |
# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ==" | |
# By default, keys will be auto-generated. | |
# | |
#openstack_keys: | |
# - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool=volumes, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" } | |
# - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" } | |
# - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" } | |
# - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", } | |
# - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" } | |
############### | |
# DEPRECATION # | |
############### | |
#use_fqdn_yes_i_am_sure: false | |
mdss.yml | |
--- | |
# Variables here are applicable to all host groups NOT roles | |
# This sample file generated by generate_group_vars_sample.sh | |
# Dummy variable to avoid error because ansible does not recognize the | |
# file as a good configuration file when no variable in it. | |
dummy: | |
# You can override vars by using host or group vars | |
########### | |
# GENERAL # | |
########### | |
# Even though MDS nodes should not have the admin key | |
# at their disposal, some people might want to have it | |
# distributed on MDS nodes. Setting 'copy_admin_key' to 'true' | |
# will copy the admin key to the /etc/ceph/ directory | |
#copy_admin_key: false | |
########## | |
# DOCKER # | |
########## | |
# Resource limitation | |
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints | |
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations | |
# These options can be passed using the 'ceph_mds_docker_extra_env' variable. | |
#ceph_mds_docker_memory_limit: 4g | |
#ceph_mds_docker_cpu_limit: 1 | |
# we currently for MDS_NAME to hostname because of a bug in ceph-docker | |
# fix here: https://github.com/ceph/ceph-docker/pull/770 | |
# this will go away soon. | |
#ceph_mds_docker_extra_env: -e MDS_NAME={{ ansible_hostname }} | |
#ceph_config_keys: [] # DON'T TOUCH ME | |
########### | |
# SYSTEMD # | |
########### | |
# ceph_mds_systemd_overrides will override the systemd settings | |
# for the ceph-mds services. | |
# For example,to set "PrivateDevices=false" you can specify: | |
#ceph_mds_systemd_overrides: | |
# Service: | |
# PrivateDevices: False | |
mons.yml | |
--- | |
# Variables here are applicable to all host groups NOT roles | |
# This sample file generated by generate_group_vars_sample.sh | |
# Dummy variable to avoid error because ansible does not recognize the | |
# file as a good configuration file when no variable in it. | |
dummy: | |
# You can override vars by using host or group vars | |
########### | |
# GENERAL # | |
########### | |
#mon_group_name: mons | |
# ACTIVATE BOTH FSID AND MONITOR_SECRET VARIABLES FOR NON-VAGRANT DEPLOYMENT | |
#monitor_secret: "{{ monitor_keyring.stdout }}" | |
#admin_secret: 'admin_secret' | |
#mgr_secret: 'mgr_secret' | |
# Secure your cluster | |
# This will set the following flags on all the pools: | |
# * nosizechange | |
# * nopgchange | |
# * nodelete | |
#secure_cluster: false | |
#secure_cluster_flags: | |
# - nopgchange | |
# - nodelete | |
# - nosizechange | |
# Enable the Calamari-backed REST API on a Monitor | |
#calamari: false | |
# Enable debugging for Calamari | |
#calamari_debug: false | |
############### | |
# CRUSH RULES # | |
############### | |
#crush_rule_config: false | |
#crush_rule_hdd: | |
# name: HDD | |
# root: HDD | |
# type: host | |
# default: false | |
#crush_rule_ssd: | |
# name: SSD | |
# root: SSD | |
# type: host | |
# default: false | |
#crush_rules: | |
# - "{{ crush_rule_hdd }}" | |
# - "{{ crush_rule_ssd }}" | |
# Caution: this will create crush roots and racks according to hostvars {{ osd_crush_location }} | |
# and will move hosts into them which might lead to significant data movement in the cluster! | |
# | |
# In order for the playbook to create CRUSH hierarchy, you have to setup your Ansible inventory file like so: | |
# | |
# [osds] | |
# ceph-osd-01 osd_crush_location="{ 'root': 'mon-roottt', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'ceph-osd-01' }" | |
# | |
# Note that 'host' is mandatory and that you need to submit at least two bucket type (including the host) | |
#create_crush_tree: false | |
########## | |
# DOCKER # | |
########## | |
# Resource limitation | |
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints | |
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations | |
# These options can be passed using the 'ceph_mon_docker_extra_env' variable. | |
#ceph_mon_docker_memory_limit: 3g | |
#ceph_mon_docker_cpu_limit: 1 | |
# Use this variable to add extra env configuration to run your mon container. | |
# If you want to set a custom admin keyring you can set this variable like following: | |
# ceph_mon_docker_extra_env: -e ADMIN_SECRET={{ admin_secret }} | |
#ceph_mon_docker_extra_env: | |
#mon_docker_privileged: false | |
#mon_docker_net_host: true | |
#ceph_config_keys: [] # DON'T TOUCH ME | |
########### | |
# SYSTEMD # | |
########### | |
# ceph_mon_systemd_overrides will override the systemd settings | |
# for the ceph-mon services. | |
# For example,to set "PrivateDevices=false" you can specify: | |
#ceph_mon_systemd_overrides: | |
# Service: | |
# PrivateDevices: False | |
nfss.yml | |
--- | |
# Variables here are applicable to all host groups NOT roles | |
# This sample file generated by generate_group_vars_sample.sh | |
# Dummy variable to avoid error because ansible does not recognize the | |
# file as a good configuration file when no variable in it. | |
dummy: | |
# You can override vars by using host or group vars | |
########### | |
# GENERAL # | |
########### | |
# Even though NFS nodes should not have the admin key | |
# at their disposal, some people might want to have it | |
# distributed on RGW nodes. Setting 'copy_admin_key' to 'true' | |
# will copy the admin key to the /etc/ceph/ directory | |
#copy_admin_key: false | |
# Whether docker container or systemd service should be enabled | |
# and started, it's useful to set it to false if nfs-ganesha | |
# service is managed by pacemaker | |
#ceph_nfs_enable_service: true | |
# ceph-nfs systemd service uses ansible's hostname as an instance id, | |
# so service name is ceph-nfs@{{ ansible_hostname }}, this is not | |
# ideal when ceph-nfs is managed by pacemaker across multiple hosts - in | |
# such case it's better to have constant instance id instead which | |
# can be set by 'ceph_nfs_service_suffix' | |
# ceph_nfs_service_suffix: ansible_hostname | |
####################### | |
# Access type options # | |
####################### | |
# These are currently in ceph-common defaults because nfs_obj_gw shared with ceph-rgw | |
# Enable NFS File access | |
# If set to true, then ganesha is set up to export the root of the | |
# Ceph filesystem, and ganesha's attribute and directory caching is disabled | |
# as much as possible since libcephfs clients also caches the same | |
# information. | |
#nfs_file_gw: false | |
# Enable NFS Object access | |
#nfs_obj_gw: true | |
###################### | |
# NFS Ganesha Config # | |
###################### | |
#ceph_nfs_log_file: "/var/log/ganesha/ganesha.log" | |
#ceph_nfs_dynamic_exports: false | |
# If set to true then rados is used to store ganesha exports | |
# and client sessions information, this is useful if you | |
# run multiple nfs-ganesha servers in active/passive mode and | |
# want to do failover | |
#ceph_nfs_rados_backend: false | |
# Name of the rados object used to store a list of the export rados | |
# object URLS | |
#ceph_nfs_rados_export_index: "ganesha-export-index" | |
# Address ganesha service should listen on, by default ganesha listens on all | |
# addresses. (Note: ganesha ignores this parameter in current version due to | |
# this bug: https://github.com/nfs-ganesha/nfs-ganesha/issues/217) | |
# ceph_nfs_bind_addr: 0.0.0.0 | |
# If set to true, then ganesha's attribute and directory caching is disabled | |
# as much as possible. Currently, ganesha caches by default. | |
# When using ganesha as CephFS's gateway, it is recommended to turn off | |
# ganesha's caching as the libcephfs clients also cache the same information. | |
# Note: Irrespective of this option's setting, ganesha's caching is disabled | |
# when setting 'nfs_file_gw' option as true. | |
#ceph_nfs_disable_caching: false | |
#################### | |
# FSAL Ceph Config # | |
#################### | |
#ceph_nfs_ceph_export_id: 20133 | |
#ceph_nfs_ceph_pseudo_path: "/cephfile" | |
#ceph_nfs_ceph_protocols: "3,4" | |
#ceph_nfs_ceph_access_type: "RW" | |
#ceph_nfs_ceph_user: "admin" | |
################### | |
# FSAL RGW Config # | |
################### | |
#ceph_nfs_rgw_export_id: 20134 | |
#ceph_nfs_rgw_pseudo_path: "/cephobject" | |
#ceph_nfs_rgw_protocols: "3,4" | |
#ceph_nfs_rgw_access_type: "RW" | |
#ceph_nfs_rgw_user: "cephnfs" | |
# Note: keys are optional and can be generated, but not on containerized, where | |
# they must be configered. | |
#ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY" | |
#ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C" | |
#rgw_client_name: client.rgw.{{ ansible_hostname }} | |
################### | |
# CONFIG OVERRIDE # | |
################### | |
# Ganesha configuration file override. | |
# These multiline strings will be appended to the contents of the blocks in ganesha.conf and | |
# must be in the correct ganesha.conf format seen here: | |
# https://github.com/nfs-ganesha/nfs-ganesha/blob/next/src/config_samples/ganesha.conf.example | |
# | |
# Example: | |
#CACHEINODE { | |
# #Entries_HWMark = 100000; | |
#} | |
# | |
#ganesha_ceph_export_overrides: | |
#ganesha_rgw_export_overrides: | |
#ganesha_rgw_section_overrides: | |
#ganesha_log_overrides: | |
#ganesha_conf_overrides: | | |
# CACHEINODE { | |
# #Entries_HWMark = 100000; | |
# } | |
########## | |
# DOCKER # | |
########## | |
#ceph_docker_image: "ceph/daemon" | |
#ceph_docker_image_tag: latest | |
#ceph_nfs_docker_extra_env: | |
#ceph_config_keys: [] # DON'T TOUCH ME | |
osds.yml | |
--- | |
# Variables here are applicable to all host groups NOT roles | |
# This sample file generated by generate_group_vars_sample.sh | |
# Dummy variable to avoid error because ansible does not recognize the | |
# file as a good configuration file when no variable in it. | |
dummy: | |
# You can override default vars defined in defaults/main.yml here, | |
# but I would advice to use host or group vars instead | |
########### | |
# GENERAL # | |
########### | |
# Even though OSD nodes should not have the admin key | |
# at their disposal, some people might want to have it | |
# distributed on OSD nodes. Setting 'copy_admin_key' to 'true' | |
# will copy the admin key to the /etc/ceph/ directory | |
#copy_admin_key: false | |
############## | |
# CEPH OPTIONS | |
############## | |
# Devices to be used as OSDs | |
# You can pre-provision disks that are not present yet. | |
# Ansible will just skip them. Newly added disk will be | |
# automatically configured during the next run. | |
# | |
# Declare devices to be used as OSDs | |
# All scenario(except 3rd) inherit from the following device declaration | |
# Note: This scenario uses the ceph-disk tool to provision OSDs | |
devices: | |
- /dev/sda | |
# - /dev/sdc | |
# - /dev/sdd | |
# - /dev/sde | |
#devices: [] | |
#'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above. | |
# You can use this option with First and Forth and Fifth OSDS scenario. | |
# Device discovery is based on the Ansible fact 'ansible_devices' | |
# which reports all the devices on a system. If chosen all the disks | |
# found will be passed to ceph-disk. You should not be worried on using | |
# this option since ceph-disk has a built-in check which looks for empty devices. | |
# Thus devices with existing partition tables will not be used. | |
# | |
#osd_auto_discovery: false | |
# Encrypt your OSD device using dmcrypt | |
# If set to True, no matter which osd_objecstore and osd_scenario you use the data will be encrypted | |
#dmcrypt: False | |
# I. First scenario: collocated | |
# | |
# To enable this scenario do: osd_scenario: collocated | |
# | |
# | |
# If osd_objectstore: filestore is enabled both 'ceph data' and 'ceph journal' partitions | |
# will be stored on the same device. | |
# | |
# If osd_objectstore: bluestore is enabled 'ceph data', 'ceph block', 'ceph block.db', 'ceph block.wal' will be stored | |
# on the same device. The device will get 2 partitions: | |
# - One for 'data', called 'ceph data' | |
# - One for 'ceph block', 'ceph block.db', 'ceph block.wal' called 'ceph block' | |
# | |
# Example of what you will get: | |
# [root@ceph-osd0 ~]# blkid /dev/sda* | |
# /dev/sda: PTTYPE="gpt" | |
# /dev/sda1: UUID="9c43e346-dd6e-431f-92d8-cbed4ccb25f6" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="749c71c9-ed8f-4930-82a7-a48a3bcdb1c7" | |
# /dev/sda2: PARTLABEL="ceph block" PARTUUID="e6ca3e1d-4702-4569-abfa-e285de328e9d" | |
# | |
# Note: This scenario uses the ceph-disk tool to provision OSDs | |
osd_scenario: collocated | |
#valid_osd_scenarios: | |
# - collocated | |
# - non-collocated | |
# - lvm | |
# II. Second scenario: non-collocated | |
# | |
# To enable this scenario do: osd_scenario: non-collocated | |
# | |
# If osd_objectstore: filestore is enabled 'ceph data' and 'ceph journal' partitions | |
# will be stored on different devices: | |
# - 'ceph data' will be stored on the device listed in 'devices' | |
# - 'ceph journal' will be stored on the device listed in 'dedicated_devices' | |
# | |
# Let's take an example, imagine 'devices' was declared like this: | |
# | |
# devices: | |
# - /dev/sda | |
# - /dev/sdb | |
# - /dev/sdc | |
# - /dev/sdd | |
# | |
# And 'dedicated_devices' was declared like this: | |
# | |
# dedicated_devices: | |
# - /dev/sdf | |
# - /dev/sdf | |
# - /dev/sdg | |
# - /dev/sdg | |
# | |
# This will result in the following mapping: | |
# - /dev/sda will have /dev/sdf1 as a journal | |
# - /dev/sdb will have /dev/sdf2 as a journal | |
# - /dev/sdc will have /dev/sdg1 as a journal | |
# - /dev/sdd will have /dev/sdg2 as a journal | |
# | |
# | |
# If osd_objectstore: bluestore is enabled, both 'ceph block.db' and 'ceph block.wal' partitions will be stored | |
# on a dedicated device. | |
# | |
# So the following will happen: | |
# - The devices listed in 'devices' will get 2 partitions, one for 'block' and one for 'data'. | |
# 'data' is only 100MB big and do not store any of your data, it's just a bunch of Ceph metadata. | |
# 'block' will store all your actual data. | |
# - The devices in 'dedicated_devices' will get 1 partition for RocksDB DB, called 'block.db' | |
# and one for RocksDB WAL, called 'block.wal' | |
# | |
# By default dedicated_devices will represent block.db | |
# | |
# Example of what you will get: | |
# [root@ceph-osd0 ~]# blkid /dev/sd* | |
# /dev/sda: PTTYPE="gpt" | |
# /dev/sda1: UUID="c6821801-2f21-4980-add0-b7fc8bd424d5" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="f2cc6fa8-5b41-4428-8d3f-6187453464d0" | |
# /dev/sda2: PARTLABEL="ceph block" PARTUUID="ea454807-983a-4cf2-899e-b2680643bc1c" | |
# /dev/sdb: PTTYPE="gpt" | |
# /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="af5b2d74-4c08-42cf-be57-7248c739e217" | |
# /dev/sdb2: PARTLABEL="ceph block.wal" PARTUUID="af3f8327-9aa9-4c2b-a497-cf0fe96d126a" | |
# | |
# Note: This scenario uses the ceph-disk tool to provision OSDs | |
#dedicated_devices: [] | |
# More device granularity for Bluestore | |
# | |
# ONLY if osd_objectstore: bluestore is enabled. | |
# | |
# By default, if 'bluestore_wal_devices' is empty, it will get the content of 'dedicated_devices'. | |
# If set, then you will have a dedicated partition on a specific device for block.wal. | |
# | |
# Example of what you will get: | |
# [root@ceph-osd0 ~]# blkid /dev/sd* | |
# /dev/sda: PTTYPE="gpt" | |
# /dev/sda1: UUID="39241ae9-d119-4335-96b3-0898da8f45ce" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="961e7313-bdb7-49e7-9ae7-077d65c4c669" | |
# /dev/sda2: PARTLABEL="ceph block" PARTUUID="bff8e54e-b780-4ece-aa16-3b2f2b8eb699" | |
# /dev/sdb: PTTYPE="gpt" | |
# /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="0734f6b6-cc94-49e9-93de-ba7e1d5b79e3" | |
# /dev/sdc: PTTYPE="gpt" | |
# /dev/sdc1: PARTLABEL="ceph block.wal" PARTUUID="824b84ba-6777-4272-bbbd-bfe2a25cecf3" | |
# Note: This option uses the ceph-disk tool | |
#bluestore_wal_devices: "{{ dedicated_devices }}" | |
# III. Use ceph-volume to create OSDs from logical volumes. | |
# Use 'osd_scenario: lvm' to enable this scenario. | |
# when using lvm, not collocated journals. | |
# lvm_volumes is a list of dictionaries. | |
# | |
# Filestore: Each dictionary must contain a data, journal and vg_name key. Any | |
# logical volume or logical group used must be a name and not a path. data | |
# can be a logical volume, device or partition. journal can be either a lv or partition. | |
# You can not use the same journal for many data lvs. | |
# data_vg must be the volume group name of the data lv, only applicable when data is an lv. | |
# journal_vg is optional and must be the volume group name of the journal lv, if applicable. | |
# For example: | |
# lvm_volumes: | |
# - data: data-lv1 | |
# data_vg: vg1 | |
# journal: journal-lv1 | |
# journal_vg: vg2 | |
# crush_device_class: foo | |
# - data: data-lv2 | |
# journal: /dev/sda1 | |
# data_vg: vg1 | |
# - data: data-lv3 | |
# journal: /dev/sdb1 | |
# data_vg: vg2 | |
# - data: /dev/sda | |
# journal: /dev/sdb1 | |
# - data: /dev/sda1 | |
# journal: /dev/sdb1 | |
# | |
# Bluestore: Each dictionary must contain at least data. When defining wal or | |
# db, it must have both the lv name and vg group (db and wal are not required). | |
# This allows for four combinations: just data, data and wal, data and wal and | |
# db, data and db. | |
# For example: | |
# lvm_volumes: | |
# - data: data-lv1 | |
# data_vg: vg1 | |
# wal: wal-lv1 | |
# wal_vg: vg1 | |
# crush_device_class: foo | |
# - data: data-lv2 | |
# db: db-lv2 | |
# db_vg: vg2 | |
# - data: data-lv3 | |
# wal: wal-lv1 | |
# wal_vg: vg3 | |
# db: db-lv3 | |
# db_vg: vg3 | |
# - data: data-lv4 | |
# data_vg: vg4 | |
# - data: /dev/sda | |
# - data: /dev/sdb1 | |
#lvm_volumes: [] | |
########## | |
# DOCKER # | |
########## | |
#ceph_config_keys: [] # DON'T TOUCH ME | |
# Resource limitation | |
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints | |
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations | |
# These options can be passed using the 'ceph_osd_docker_extra_env' variable. | |
#ceph_osd_docker_memory_limit: 5g | |
#ceph_osd_docker_cpu_limit: 1 | |
# The next two variables are undefined, and thus, unused by default. | |
# If `lscpu | grep NUMA` returned the following: | |
# NUMA node0 CPU(s): 0,2,4,6,8,10,12,14,16 | |
# NUMA node1 CPU(s): 1,3,5,7,9,11,13,15,17 | |
# then, the following would run the OSD on the first NUMA node only. | |
#ceph_osd_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16" | |
#ceph_osd_docker_cpuset_mems: "0" | |
# PREPARE DEVICE | |
# | |
# WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above | |
# | |
#ceph_osd_docker_devices: "{{ devices }}" | |
#ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} | |
# ACTIVATE DEVICE | |
# | |
#ceph_osd_docker_extra_env: | |
#ceph_osd_docker_run_script_path: "/usr/share" # script called by systemd to run the docker command | |
########### | |
# SYSTEMD # | |
########### | |
# ceph_osd_systemd_overrides will override the systemd settings | |
# for the ceph-osd services. | |
# For example,to set "PrivateDevices=false" you can specify: | |
#ceph_osd_systemd_overrides: | |
# Service: | |
# PrivateDevices: False | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment