Created
August 16, 2013 12:13
-
-
Save demofly/6249332 to your computer and use it in GitHub Desktop.
site.pp for dev-exp2
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# | |
# Parameter values in this file should be changed, taking into consideration your | |
# networking setup and desired OpenStack settings. | |
# | |
# Please consult with the latest Fuel User Guide before making edits. | |
# | |
### GENERAL CONFIG ### | |
# This section sets main parameters such as hostnames and IP addresses of different nodes | |
# This is the name of the public interface. The public network provides address space for Floating IPs, as well as public IP accessibility to the API endpoints. | |
$public_interface = "eth1" | |
$public_br = 'br-ex' | |
# This is the name of the internal interface. It will be attached to the management network, where data exchange between components of the OpenStack cluster will happen. | |
$internal_interface = "eth0" | |
$internal_br = 'br-mgmt' | |
# This is the name of the private interface. All traffic within OpenStack tenants' networks will go through this interface. | |
$private_interface = "eth2" | |
# Public and Internal VIPs. These virtual addresses are required by HA topology and will be managed by keepalived. | |
$internal_virtual_ip = "10.0.0.127" | |
# Change this IP to IP routable from your 'public' network, | |
# e. g. Internet or your office LAN, in which your public | |
# interface resides | |
$public_virtual_ip = "10.8.8.66" | |
$nodes_harr = [ | |
{ | |
'name' => 'master', | |
'role' => 'master', | |
'internal_address' => '10.0.0.101', | |
'public_address' => '10.0.204.101', | |
'mountpoints'=> "1 1\n2 1", | |
'storage_local_net_ip' => '10.0.0.101', | |
}, | |
{ | |
'name' => 'fuel-cobbler', | |
'role' => 'cobbler', | |
'internal_address' => '10.0.0.102', | |
'public_address' => '10.0.204.102', | |
'mountpoints'=> "1 1\n2 1", | |
'storage_local_net_ip' => '10.0.0.102', | |
}, | |
{ | |
'name' => 'fuel-controller-01', | |
'role' => 'primary-controller', | |
'internal_address' => '10.0.0.103', | |
'public_address' => '10.0.204.103', | |
'swift_zone' => 1, | |
'mountpoints'=> "1 1\n2 1", | |
'storage_local_net_ip' => '10.0.0.103', | |
}, | |
{ | |
'name' => 'fuel-controller-02', | |
'role' => 'controller', | |
'internal_address' => '10.0.0.104', | |
'public_address' => '10.0.204.104', | |
'swift_zone' => 2, | |
'mountpoints'=> "1 2\n 2 1", | |
'storage_local_net_ip' => '10.0.0.110', | |
}, | |
{ | |
'name' => 'fuel-controller-03', | |
'role' => 'controller', | |
'internal_address' => '10.0.0.105', | |
'public_address' => '10.0.204.105', | |
'swift_zone' => 3, | |
'mountpoints'=> "1 2\n 2 1", | |
'storage_local_net_ip' => '10.0.0.110', | |
}, | |
{ | |
'name' => 'fuel-compute-01', | |
'role' => 'compute', | |
'internal_address' => '10.0.0.106', | |
'public_address' => '10.0.204.106', | |
}, | |
{ | |
'name' => 'fuel-compute-02', | |
'role' => 'compute', | |
'internal_address' => '10.0.0.107', | |
'public_address' => '10.0.204.107', | |
}, | |
] | |
$nodes = [{"name" => "fuel-cobbler","public_address" => "10.8.8.100","internal_address" => "10.0.0.100","role" => "cobbler"},{"name" => "fuel-controller-01","storage_local_net_ip" => "10.0.0.101","public_address" => "10.8.8.101","mountpoints" => "1 2\n 2 1","swift_zone" => 1,"internal_address" => "10.0.0.101","role" => "primary-controller"},{"name" => "fuel-controller-02","storage_local_net_ip" => "10.0.0.102","public_address" => "10.8.8.102","mountpoints" => "1 2\n 2 1","swift_zone" => 2,"internal_address" => "10.0.0.102","role" => "controller"},{"name" => "fuel-controller-03","storage_local_net_ip" => "10.0.0.103","public_address" => "10.8.8.103","mountpoints" => "1 2\n 2 1","swift_zone" => 3,"internal_address" => "10.0.0.103","role" => "controller"},{"name" => "fuel-compute-01","public_address" => "10.8.8.105","internal_address" => "10.0.0.105","role" => "compute"},{"name" => "fuel-baremetal-01","public_address" => "10.8.8.122","internal_address" => "10.0.0.122","role" => "baremetal"},{"name" => "fuel-vcenter-01","public_address" => "10.8.8.121","internal_address" => "10.0.0.121","role" => "vcenter"},{"name" => "fuel-swift-01","storage_local_net_ip" => "10.0.0.108","public_address" => "10.8.8.108","mountpoints" => "1 2\n 2 1","swift_zone" => 4,"internal_address" => "10.0.0.108","role" => "storage"},{"name" => "fuel-swift-02","storage_local_net_ip" => "10.0.0.109","public_address" => "10.8.8.109","mountpoints" => "1 2\n 2 1","swift_zone" => 5,"internal_address" => "10.0.0.109","role" => "storage"},{"name" => "fuel-swift-03","storage_local_net_ip" => "10.0.0.110","public_address" => "10.8.8.110","mountpoints" => "1 2\n 2 1","swift_zone" => 6,"internal_address" => "10.0.0.110","role" => "storage"},{"name" => "fuel-vcenter-01","public_address" => "10.8.8.121","internal_address" => "10.0.0.121","role" => "vcenter"},{"name" => "fuel-baremetal-01","public_address" => "10.8.8.122","internal_address" => "10.0.0.122","role" => "baremetal"}] | |
#$nodes = [{"internal_address" => "10.0.0.100","public_address" => "10.8.8.100","name" => "fuel-cobbler","role" => "cobbler"},{"internal_address" => "10.0.0.101","mountpoints" => "1 2\n 2 1","public_address" => "10.8.8.101","name" => "fuel-controller-01","swift_zone" => 1,"storage_local_net_ip" => "10.0.0.101","role" => "primary-controller"},{"internal_address" => "10.0.0.102","mountpoints" => "1 2\n 2 1","public_address" => "10.8.8.102","name" => "fuel-controller-02","swift_zone" => 2,"storage_local_net_ip" => "10.0.0.102","role" => "controller"},{"internal_address" => "10.0.0.103","mountpoints" => "1 2\n 2 1","public_address" => "10.8.8.103","name" => "fuel-controller-03","swift_zone" => 3,"storage_local_net_ip" => "10.0.0.103","role" => "controller"},{"internal_address" => "10.0.0.105","public_address" => "10.8.8.105","name" => "fuel-compute-01","role" => "compute"},{"internal_address" => "10.0.0.108","mountpoints" => "1 2\n 2 1","public_address" => "10.8.8.108","name" => "fuel-swift-01","swift_zone" => 4,"storage_local_net_ip" => "10.0.0.108","role" => "storage"},{"internal_address" => "10.0.0.109","mountpoints" => "1 2\n 2 1","public_address" => "10.8.8.109","name" => "fuel-swift-02","swift_zone" => 5,"storage_local_net_ip" => "10.0.0.109","role" => "storage"},{"internal_address" => "10.0.0.110","mountpoints" => "1 2\n 2 1","public_address" => "10.8.8.110","name" => "fuel-swift-03","swift_zone" => 6,"storage_local_net_ip" => "10.0.0.110","role" => "storage"},{"internal_address" => "10.0.0.111","public_address" => "10.8.8.111","name" => "fuel-swiftproxy-01","role" => "primary-swift-proxy"},{"internal_address" => "10.0.0.112","public_address" => "10.8.8.112","name" => "fuel-swiftproxy-02","role" => "swift-proxy"},{"internal_address" => "10.0.0.121","public_address" => "10.8.8.121","name" => "fuel-vcenter-01","role" => "vcenter"},{"internal_address" => "10.0.0.122","public_address" => "10.8.8.122","name" => "fuel-baremetal-01","role" => "baremetal"}] | |
$default_gateway = "10.8.8.8" | |
# Specify nameservers here. | |
# Need points to cobbler node IP, or to special prepared nameservers if you known what you do. | |
$dns_nameservers = ["10.0.0.100","10.8.8.8","8.8.8.8"] | |
# Specify netmasks for internal and external networks. | |
$internal_netmask = "255.255.0.0" | |
$public_netmask = "255.255.255.0" | |
$node = filter_nodes($nodes,'name',$::hostname) | |
if empty($node) { | |
fail("Node $::hostname is not defined in the hash structure") | |
} | |
$internal_address = $node[0]['internal_address'] | |
$public_address = $node[0]['public_address'] | |
$controllers = merge_arrays(filter_nodes($nodes,'role','primary-controller'), filter_nodes($nodes,'role','controller')) | |
$controller_internal_addresses = nodes_to_hash($controllers,'name','internal_address') | |
$controller_public_addresses = nodes_to_hash($controllers,'name','public_address') | |
$controller_hostnames = keys($controller_internal_addresses) | |
#Set this to anything other than pacemaker if you do not want Quantum HA | |
#Also, if you do not want Quantum HA, you MUST enable $quantum_network_node | |
#on the ONLY controller | |
$ha_provider = 'pacemaker' | |
$use_unicast_corosync = true | |
$nagios = false | |
# Set nagios master fqdn | |
$nagios_master = "fuel-controller-01.c7200.d" | |
## proj_name name of environment nagios configuration | |
$proj_name = 'test' | |
#Specify if your installation contains multiple Nova controllers. Defaults to true as it is the most common scenario. | |
$multi_host = true | |
# Specify different DB credentials for various services | |
$mysql_root_password = 'nova' | |
$admin_email = '[email protected]' | |
$admin_password = 'nova' | |
$keystone_db_password = 'nova' | |
$keystone_admin_token = 'nova' | |
$glance_db_password = 'nova' | |
$glance_user_password = 'nova' | |
$nova_db_password = 'nova' | |
$nova_user_password = 'nova' | |
$rabbit_password = 'nova' | |
$rabbit_user = 'nova' | |
$swift_user_password = 'swift_pass' | |
$swift_shared_secret = 'changeme' | |
$quantum_user_password = 'quantum_pass' | |
$quantum_db_password = 'quantum_pass' | |
$quantum_db_user = 'quantum' | |
$quantum_db_dbname = 'quantum' | |
# End DB credentials section | |
### GENERAL CONFIG END ### | |
### NETWORK/QUANTUM ### | |
# Specify network/quantum specific settings | |
# Should we use quantum or nova-network(deprecated). | |
# Consult OpenStack documentation for differences between them. | |
$quantum = true | |
$quantum_netnode_on_cnt = true | |
$quantum_use_namespaces = true | |
# a string "password" value that should be configured to authenticate requests for metadata | |
# from quantum-metadata-proxy to nova-api | |
$quantum_metadata_proxy_shared_secret = "connecting_nova-api_and_quantum-metadata-agent" | |
# Specify network creation criteria: | |
# Should puppet automatically create networks? | |
$create_networks = true | |
# Fixed IP addresses are typically used for communication between VM instances. | |
$fixed_range = "192.168.0.0/16" | |
# Floating IP addresses are used for communication of VM instances with the outside world (e.g. Internet). | |
$floating_range = "10.8.8.0/24" | |
# These parameters are passed to the previously specified network manager , e.g. nova-manage network create. | |
# Not used in Quantum. | |
# Consult openstack docs for corresponding network manager. | |
# https://fuel-dev.mirantis.com/docs/0.2/pages/0050-installation-instructions.html#network-setup | |
$num_networks = 1 | |
$network_size = 31 | |
$vlan_start = 300 | |
# Quantum | |
# Segmentation type for isolating traffic between tenants | |
# Consult Openstack Quantum docs | |
$tenant_network_type = "gre" | |
# Which IP address will be used for creating GRE tunnels. | |
$quantum_gre_bind_addr = $internal_address | |
# If $external_ipinfo option is not defined, the addresses will be allocated automatically from $floating_range: | |
# the first address will be defined as an external default router, | |
# the second address will be attached to an uplink bridge interface, | |
# the remaining addresses will be utilized for the floating IP address pool. | |
$external_ipinfo = {"pool_end" => "10.8.8.239","ext_bridge" => "10.8.8.201","public_net_router" => "10.8.8.8","pool_start" => "10.8.8.225"} | |
## $external_ipinfo = { | |
## 'public_net_router' => '10.0.74.129', | |
## 'ext_bridge' => '10.0.74.130', | |
## 'pool_start' => '10.0.74.131', | |
## 'pool_end' => '10.0.74.142', | |
## } | |
# Quantum segmentation range. | |
# For VLAN networks: valid VLAN VIDs can be 1 through 4094. | |
# For GRE networks: Valid tunnel IDs can be any 32-bit unsigned integer. | |
$segment_range = "900:999" | |
# Set up OpenStack network manager. It is used ONLY in nova-network. | |
# Consult Openstack nova-network docs for possible values. | |
$network_manager = "nova.network.manager.FlatDHCPManager" | |
# Assign floating IPs to VMs on startup automatically? | |
$auto_assign_floating_ip = true | |
# Database connection for Quantum configuration (quantum.conf) | |
#todo: check passing following line to quantum::* | |
$quantum_sql_connection = "mysql://${quantum_db_user}:${quantum_db_password}@${$internal_virtual_ip}/${quantum_db_dbname}" | |
if $quantum { | |
$public_int = $public_br | |
$internal_int = $internal_br | |
} else { | |
$public_int = $public_interface | |
$internal_int = $internal_interface | |
} | |
$vips = { # Do not convert to ARRAY, It's can't work in 2.7 | |
public_old => { | |
nic => $public_int, | |
ip => $public_virtual_ip, | |
}, | |
management_old => { | |
nic => $internal_int, | |
ip => $internal_virtual_ip, | |
}, | |
} | |
#Stages configuration | |
stage {'first': } -> | |
stage {'openstack-custom-repo': } -> | |
stage {'netconfig': } -> | |
stage {'corosync_setup': } -> | |
stage {'cluster_head': } -> | |
stage {'openstack-firewall': } -> Stage['main'] | |
#Network configuration | |
class {'l23network': use_ovs=>$quantum, stage=> 'netconfig'} | |
class node_netconfig ( | |
$mgmt_ipaddr, | |
$mgmt_netmask = '255.255.255.0', | |
$public_ipaddr = undef, | |
$public_netmask= '255.255.255.0', | |
$save_default_gateway=false, | |
$quantum = $quantum, | |
) { | |
if $quantum { | |
l23network::l3::create_br_iface {'mgmt': | |
interface => $internal_interface, # !!! NO $internal_int /sv !!! | |
bridge => $internal_br, | |
ipaddr => $mgmt_ipaddr, | |
netmask => $mgmt_netmask, | |
dns_nameservers => $dns_nameservers, | |
save_default_gateway => $save_default_gateway, | |
} -> | |
l23network::l3::create_br_iface {'ex': | |
interface => $public_interface, # !! NO $public_int /sv !!! | |
bridge => $public_br, | |
ipaddr => $public_ipaddr, | |
netmask => $public_netmask, | |
gateway => $default_gateway, | |
} | |
} else { | |
# nova-network mode | |
l23network::l3::ifconfig {$public_int: | |
ipaddr => $public_ipaddr, | |
netmask => $public_netmask, | |
gateway => $default_gateway, | |
} | |
l23network::l3::ifconfig {$internal_int: | |
ipaddr => $mgmt_ipaddr, | |
netmask => $mgmt_netmask, | |
dns_nameservers => $dns_nameservers, | |
} | |
} | |
l23network::l3::ifconfig {$private_interface: ipaddr=>'none' } | |
} | |
### NETWORK/QUANTUM END ### | |
# This parameter specifies the the identifier of the current cluster. This is needed in case of multiple environments. | |
# installation. Each cluster requires a unique integer value. | |
# Valid identifier range is 1 to 254 | |
$deployment_id = "53" | |
# Below you can enable or disable various services based on the chosen deployment topology: | |
### CINDER/VOLUME ### | |
# Should we use cinder or nova-volume(obsolete) | |
# Consult openstack docs for differences between them | |
$cinder = true | |
# Choose which nodes to install cinder onto | |
# 'compute' -> compute nodes will run cinder | |
# 'controller' -> controller nodes will run cinder | |
# 'storage' -> storage nodes will run cinder | |
# 'fuel-controller-XX' -> specify particular host(s) by hostname | |
# 'XXX.XXX.XXX.XXX' -> specify particular host(s) by IP address | |
# 'all' -> compute, controller, and storage nodes will run cinder (excluding swift and proxy nodes) | |
$cinder_nodes = ["compute"] | |
#Set it to true if your want cinder-volume been installed to the host | |
#Otherwise it will install api and scheduler services | |
$manage_volumes = true | |
# Setup network address, which Cinder uses to export iSCSI targets. | |
$cinder_iscsi_bind_addr = $internal_address | |
# Below you can add physical volumes to cinder. Please replace values with the actual names of devices. | |
# This parameter defines which partitions to aggregate into cinder-volumes or nova-volumes LVM VG | |
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! | |
# USE EXTREME CAUTION WITH THIS SETTING! IF THIS PARAMETER IS DEFINED, | |
# IT WILL AGGREGATE THE VOLUMES INTO AN LVM VOLUME GROUP | |
# AND ALL THE DATA THAT RESIDES ON THESE VOLUMES WILL BE LOST! | |
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! | |
# Leave this parameter empty if you want to create [cinder|nova]-volumes VG by yourself | |
$nv_physical_volume = ["/dev/vdb"] | |
#Evaluate cinder node selection | |
if ($cinder) { | |
if (member($cinder_nodes,'all')) { | |
$is_cinder_node = true | |
} elsif (member($cinder_nodes,$::hostname)) { | |
$is_cinder_node = true | |
} elsif (member($cinder_nodes,$internal_address)) { | |
$is_cinder_node = true | |
} elsif ($node[0]['role'] =~ /controller/ ) { | |
$is_cinder_node = member($cinder_nodes,'controller') | |
} else { | |
$is_cinder_node = member($cinder_nodes,$node[0]['role']) | |
} | |
} else { | |
$is_cinder_node = false | |
} | |
### CINDER/VOLUME END ### | |
### GLANCE and SWIFT ### | |
# Which backend to use for glance | |
# Supported backends are "swift" and "file" | |
$glance_backend = 'swift' | |
# Use loopback device for swift: | |
# set 'loopback' or false | |
# This parameter controls where swift partitions are located: | |
# on physical partitions or inside loopback devices. | |
$swift_loopback = "loopback" | |
# Which IP address to bind swift components to: e.g., which IP swift-proxy should listen on | |
$swift_local_net_ip = $internal_address | |
# IP node of controller used during swift installation | |
# and put into swift configs | |
$controller_node_public = $internal_virtual_ip | |
# Hash of proxies hostname|fqdn => ip mappings. | |
# This is used by controller_ha.pp manifests for haproxy setup | |
# of swift_proxy backends | |
$swift_proxies = $controller_internal_addresses | |
# Set hostname of swift_master. | |
# It tells on which swift proxy node to build | |
# *ring.gz files. Other swift proxies/storages | |
# will rsync them. | |
if $node[0]['role'] == 'primary-controller' { | |
$primary_proxy = true | |
} else { | |
$primary_proxy = false | |
} | |
if $node[0]['role'] == 'primary-controller' { | |
$primary_controller = true | |
} else { | |
$primary_controller = false | |
} | |
$master_swift_proxy_nodes = filter_nodes($nodes,'role','primary-controller') | |
$master_swift_proxy_ip = $master_swift_proxy_nodes[0]['internal_address'] | |
### Glance and swift END ### | |
### Syslog ### | |
# Enable error messages reporting to rsyslog. Rsyslog must be installed in this case. | |
$use_syslog = true | |
# Default log level would have been used, if non verbose and non debug | |
$syslog_log_level = 'ERROR' | |
# Syslog facilities for main openstack services, choose any, may overlap if needed | |
# local0 is reserved for HA provisioning and orchestration services, | |
# local1 is reserved for openstack-dashboard | |
$syslog_log_facility_glance = 'LOCAL2' | |
$syslog_log_facility_cinder = 'LOCAL3' | |
$syslog_log_facility_quantum = 'LOCAL4' | |
$syslog_log_facility_nova = 'LOCAL6' | |
$syslog_log_facility_keystone = 'LOCAL7' | |
if $use_syslog { | |
class { "::openstack::logging": | |
stage => 'first', | |
role => 'client', | |
show_timezone => true, | |
# log both locally include auth, and remote | |
log_remote => true, | |
log_local => true, | |
log_auth_local => true, | |
# keep four weekly log rotations, force rotate if 300M size have exceeded | |
rotation => 'weekly', | |
keep => '4', | |
# should be > 30M | |
limitsize => '300M', | |
# remote servers to send logs to | |
rservers => [{'remote_type'=>'udp', 'server'=>'master', 'port'=>'514'},], | |
# should be true, if client is running at virtual node | |
virtual => true, | |
# facilities | |
syslog_log_facility_glance => $syslog_log_facility_glance, | |
syslog_log_facility_cinder => $syslog_log_facility_cinder, | |
syslog_log_facility_quantum => $syslog_log_facility_quantum, | |
syslog_log_facility_nova => $syslog_log_facility_nova, | |
syslog_log_facility_keystone => $syslog_log_facility_keystone, | |
# Rabbit doesn't support syslog directly, should be >= syslog_log_level, | |
# otherwise none rabbit's messages would have gone to syslog | |
rabbit_log_level => $syslog_log_level, | |
} | |
} | |
# Example for server role class definition for remote logging node: | |
# class {::openstack::logging: | |
# role => 'server', | |
# log_remote => false, | |
# log_local => true, | |
# log_auth_local => true, | |
# rotation => 'daily', | |
# keep => '7', | |
# limitsize => '100M', | |
# port => '514', | |
# proto => 'udp', | |
# #high precision timespamps | |
# show_timezone => true, | |
# #should be true, if server is running at virtual node | |
# #virtual => false, | |
# } | |
### Syslog END ### | |
case $::osfamily { | |
"Debian": { | |
$rabbitmq_version_string = '2.8.7-1' | |
} | |
"RedHat": { | |
$rabbitmq_version_string = '2.8.7-2.el6' | |
} | |
} | |
# | |
# OpenStack packages and customized component versions to be installed. | |
# Use 'latest' to get the most recent ones or specify exact version if you need to install custom version. | |
$openstack_version = { | |
'keystone' => 'latest', | |
'glance' => 'latest', | |
'horizon' => 'latest', | |
'nova' => 'latest', | |
'novncproxy' => 'latest', | |
'cinder' => 'latest', | |
'rabbitmq_version' => $rabbitmq_version_string, | |
} | |
# Which package repo mirror to use. Currently "default". | |
# "custom" is used by Mirantis for testing purposes. | |
# Local puppet-managed repo option planned for future releases. | |
# If you want to set up a local repository, you will need to manually adjust mirantis_repos.pp, | |
# though it is NOT recommended. | |
$mirror_type = "default" | |
$enable_test_repo = false | |
$repo_proxy = "http://10.0.0.100:3128" | |
# This parameter specifies the verbosity level of log messages | |
# in openstack components config. | |
# Debug would have set DEBUG level and ignore verbose settings, if any. | |
# Verbose would have set INFO level messages | |
# In case of non debug and non verbose - WARNING, default level would have set. | |
# Note: if syslog on, this default level may be configured (for syslog) with syslog_log_level option. | |
$verbose = true | |
$debug = true | |
#Rate Limits for cinder and Nova | |
#Cinder and Nova can rate-limit your requests to API services. | |
#These limits can be reduced for your installation or usage scenario. | |
#Change the following variables if you want. They are measured in requests per minute. | |
$nova_rate_limits = { | |
'POST' => 1000, | |
'POST_SERVERS' => 1000, | |
'PUT' => 1000, 'GET' => 1000, | |
'DELETE' => 1000 | |
} | |
$cinder_rate_limits = { | |
'POST' => 1000, | |
'POST_SERVERS' => 1000, | |
'PUT' => 1000, 'GET' => 1000, | |
'DELETE' => 1000 | |
} | |
Exec { logoutput => true } | |
#Specify desired NTP servers here. | |
#If you leave it undef pool.ntp.org | |
#will be used | |
$ntp_servers = ['pool.ntp.org'] | |
class {'openstack::clocksync': ntp_servers=>$ntp_servers} | |
#Exec clocksync from openstack::clocksync before services | |
#connectinq to AMQP server are started. | |
Exec<| title == 'clocksync' |>->Nova::Generic_service<| |> | |
Exec<| title == 'clocksync' |>->Service<| title == 'quantum-l3' |> | |
Exec<| title == 'clocksync' |>->Service<| title == 'quantum-dhcp-service' |> | |
Exec<| title == 'clocksync' |>->Service<| title == 'quantum-ovs-plugin-service' |> | |
Exec<| title == 'clocksync' |>->Service<| title == 'cinder-volume' |> | |
Exec<| title == 'clocksync' |>->Service<| title == 'cinder-api' |> | |
Exec<| title == 'clocksync' |>->Service<| title == 'cinder-scheduler' |> | |
Exec<| title == 'clocksync' |>->Exec<| title == 'keystone-manage db_sync' |> | |
Exec<| title == 'clocksync' |>->Exec<| title == 'glance-manage db_sync' |> | |
Exec<| title == 'clocksync' |>->Exec<| title == 'nova-manage db sync' |> | |
Exec<| title == 'clocksync' |>->Exec<| title == 'initial-db-sync' |> | |
Exec<| title == 'clocksync' |>->Exec<| title == 'post-nova_config' |> | |
### END OF PUBLIC CONFIGURATION PART ### | |
# Normally, you do not need to change anything after this string | |
# Globally apply an environment-based tag to all resources on each node. | |
tag("${::deployment_id}::${::environment}") | |
class { 'openstack::mirantis_repos': | |
stage => 'openstack-custom-repo', | |
type=>$mirror_type, | |
enable_test_repo=>$enable_test_repo, | |
repo_proxy=>$repo_proxy, | |
} | |
class { '::openstack::firewall': | |
stage => 'openstack-firewall' | |
} | |
if !defined(Class['selinux']) and ($::osfamily == 'RedHat') { | |
class { 'selinux': | |
mode=>"disabled", | |
stage=>"openstack-custom-repo" | |
} | |
} | |
if $::operatingsystem == 'Ubuntu' { | |
class { 'openstack::apparmor::disable': stage => 'openstack-custom-repo' } | |
} | |
sysctl::value { 'net.ipv4.conf.all.rp_filter': value => '0' } | |
# Dashboard(horizon) https/ssl mode | |
# false: normal mode with no encryption | |
# 'default': uses keys supplied with the ssl module package | |
# 'exist': assumes that the keys (domain name based certificate) are provisioned in advance | |
# 'custom': require fileserver static mount point [ssl_certs] and hostname based certificate existence | |
$horizon_use_ssl = false | |
# Class for calling corosync::virtual_ip in the specifis stage | |
$vip_keys = keys($vips) | |
class virtual_ips () { | |
cluster::virtual_ips { $vip_keys: | |
vips => $vips, | |
} | |
} | |
class compact_controller ( | |
$quantum_network_node = $quantum_netnode_on_cnt | |
) { | |
class { 'openstack::controller_ha': | |
controller_public_addresses => $controller_public_addresses, | |
controller_internal_addresses => $controller_internal_addresses, | |
internal_address => $internal_address, | |
public_interface => $public_int, | |
internal_interface => $internal_int, | |
private_interface => $private_interface, | |
internal_virtual_ip => $internal_virtual_ip, | |
public_virtual_ip => $public_virtual_ip, | |
primary_controller => $primary_controller, | |
floating_range => $floating_range, | |
fixed_range => $fixed_range, | |
multi_host => $multi_host, | |
network_manager => $network_manager, | |
num_networks => $num_networks, | |
network_size => $network_size, | |
network_config => { 'vlan_start' => $vlan_start }, | |
verbose => $verbose, | |
debug => $debug, | |
auto_assign_floating_ip => $auto_assign_floating_ip, | |
mysql_root_password => $mysql_root_password, | |
admin_email => $admin_email, | |
admin_password => $admin_password, | |
keystone_db_password => $keystone_db_password, | |
keystone_admin_token => $keystone_admin_token, | |
glance_db_password => $glance_db_password, | |
glance_user_password => $glance_user_password, | |
nova_db_password => $nova_db_password, | |
nova_user_password => $nova_user_password, | |
rabbit_password => $rabbit_password, | |
rabbit_user => $rabbit_user, | |
rabbit_nodes => $controller_hostnames, | |
memcached_servers => $controller_hostnames, | |
export_resources => false, | |
glance_backend => $glance_backend, | |
swift_proxies => $swift_proxies, | |
quantum => $quantum, | |
quantum_user_password => $quantum_user_password, | |
quantum_db_password => $quantum_db_password, | |
quantum_db_user => $quantum_db_user, | |
quantum_db_dbname => $quantum_db_dbname, | |
quantum_network_node => $quantum_network_node, | |
quantum_netnode_on_cnt => $quantum_netnode_on_cnt, | |
quantum_gre_bind_addr => $quantum_gre_bind_addr, | |
quantum_external_ipinfo => $external_ipinfo, | |
tenant_network_type => $tenant_network_type, | |
segment_range => $segment_range, | |
cinder => $cinder, | |
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr, | |
manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node }, | |
galera_nodes => $controller_hostnames, | |
nv_physical_volume => $nv_physical_volume, | |
use_syslog => $use_syslog, | |
syslog_log_level => $syslog_log_level, | |
syslog_log_facility_glance => $syslog_log_facility_glance, | |
syslog_log_facility_cinder => $syslog_log_facility_cinder, | |
syslog_log_facility_quantum => $syslog_log_facility_quantum, | |
syslog_log_facility_nova => $syslog_log_facility_nova, | |
syslog_log_facility_keystone => $syslog_log_facility_keystone, | |
nova_rate_limits => $nova_rate_limits, | |
cinder_rate_limits => $cinder_rate_limits, | |
horizon_use_ssl => $horizon_use_ssl, | |
use_unicast_corosync => $use_unicast_corosync, | |
ha_provider => $ha_provider | |
} | |
class { 'swift::keystone::auth': | |
password => $swift_user_password, | |
public_address => $public_virtual_ip, | |
internal_address => $internal_virtual_ip, | |
admin_address => $internal_virtual_ip, | |
} | |
} | |
# Definition of OpenStack controller nodes. | |
node /fuel-controller-[\d+]/ { | |
include stdlib | |
class { 'operatingsystem::checksupported': | |
stage => 'first' | |
} | |
class {'::node_netconfig': | |
mgmt_ipaddr => $::internal_address, | |
mgmt_netmask => $::internal_netmask, | |
public_ipaddr => $::public_address, | |
public_netmask => $::public_netmask, | |
stage => 'netconfig', | |
} | |
if $nagios { | |
class {'nagios': | |
proj_name => $proj_name, | |
services => [ | |
'host-alive','nova-novncproxy','keystone', 'nova-scheduler', | |
'nova-consoleauth', 'nova-cert', 'haproxy', 'nova-api', 'glance-api', | |
'glance-registry','horizon', 'rabbitmq', 'mysql', 'swift-proxy', | |
'swift-account', 'swift-container', 'swift-object', | |
], | |
whitelist => ['127.0.0.1', $nagios_master], | |
hostgroup => 'controller', | |
} | |
} | |
### | |
# cluster init | |
class { '::cluster': stage => 'corosync_setup' } -> | |
class { 'virtual_ips': | |
stage => 'corosync_setup' | |
} | |
include ::haproxy::params | |
class { 'cluster::haproxy': | |
global_options => merge($::haproxy::params::global_options, {'log' => "/dev/log local0"}), | |
defaults_options => merge($::haproxy::params::defaults_options, {'mode' => 'http'}), | |
stage => 'cluster_head', | |
} | |
# | |
### | |
class { compact_controller: } | |
$swift_zone = $node[0]['swift_zone'] | |
class { 'openstack::swift::storage_node': | |
storage_type => $swift_loopback, | |
swift_zone => $swift_zone, | |
swift_local_net_ip => $swift_local_net_ip, | |
master_swift_proxy_ip => $master_swift_proxy_ip, | |
sync_rings => ! $primary_proxy, | |
#disable cinder in storage-node in order to avoid | |
#duplicate classes call with different parameters | |
cinder => false, | |
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr, | |
manage_volumes => false, | |
nv_physical_volume => $nv_physical_volume, | |
db_host => $internal_virtual_ip, | |
service_endpoint => $internal_virtual_ip, | |
cinder_rate_limits => $cinder_rate_limits, | |
rabbit_nodes => $controller_hostnames, | |
rabbit_password => $rabbit_password, | |
rabbit_user => $rabbit_user, | |
rabbit_ha_virtual_ip => $internal_virtual_ip, | |
syslog_log_level => $syslog_log_level, | |
syslog_log_facility_cinder => $syslog_log_facility_cinder, | |
} | |
if $primary_proxy { | |
ring_devices {'all': | |
storages => $controllers | |
} | |
} | |
class { 'openstack::swift::proxy': | |
swift_user_password => $swift_user_password, | |
swift_proxies => $swift_proxies, | |
primary_proxy => $primary_proxy, | |
controller_node_address => $internal_virtual_ip, | |
swift_local_net_ip => $swift_local_net_ip, | |
master_swift_proxy_ip => $master_swift_proxy_ip, | |
} | |
Class ['openstack::swift::proxy'] -> Class['openstack::swift::storage_node'] | |
} | |
# Definition of OpenStack compute nodes. | |
node /fuel-compute-[\d+]/ { | |
## Uncomment lines bellow if You want | |
## configure network of this nodes | |
## by puppet. | |
class {'::node_netconfig': | |
mgmt_ipaddr => $::internal_address, | |
mgmt_netmask => $::internal_netmask, | |
public_ipaddr => $::public_address, | |
public_netmask => $::public_netmask, | |
stage => 'netconfig', | |
} | |
include stdlib | |
class { 'operatingsystem::checksupported': | |
stage => 'first' | |
} | |
if $nagios { | |
class {'nagios': | |
proj_name => $proj_name, | |
services => [ | |
'host-alive', 'nova-compute','nova-network','libvirt' | |
], | |
whitelist => ['127.0.0.1', $nagios_master], | |
hostgroup => 'compute', | |
} | |
} | |
class { 'openstack::compute': | |
public_interface => $public_int, | |
private_interface => $private_interface, | |
internal_address => $internal_address, | |
libvirt_type => 'kvm', | |
fixed_range => $fixed_range, | |
network_manager => $network_manager, | |
network_config => { 'vlan_start' => $vlan_start }, | |
multi_host => $multi_host, | |
auto_assign_floating_ip => $auto_assign_floating_ip, | |
sql_connection => "mysql://nova:${nova_db_password}@${internal_virtual_ip}/nova", | |
rabbit_nodes => $controller_hostnames, | |
rabbit_password => $rabbit_password, | |
rabbit_user => $rabbit_user, | |
rabbit_ha_virtual_ip => $internal_virtual_ip, | |
glance_api_servers => "${internal_virtual_ip}:9292", | |
vncproxy_host => $public_virtual_ip, | |
verbose => $verbose, | |
debug => $debug, | |
vnc_enabled => true, | |
nova_user_password => $nova_user_password, | |
cache_server_ip => $controller_hostnames, | |
service_endpoint => $internal_virtual_ip, | |
quantum => $quantum, | |
quantum_sql_connection => $quantum_sql_connection, | |
quantum_user_password => $quantum_user_password, | |
quantum_host => $internal_virtual_ip, | |
tenant_network_type => $tenant_network_type, | |
segment_range => $segment_range, | |
cinder => $cinder, | |
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr, | |
manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node }, | |
nv_physical_volume => $nv_physical_volume, | |
db_host => $internal_virtual_ip, | |
cinder_rate_limits => $cinder_rate_limits, | |
ssh_private_key => 'puppet:///ssh_keys/openstack', | |
ssh_public_key => 'puppet:///ssh_keys/openstack.pub', | |
use_syslog => $use_syslog, | |
syslog_log_level => $syslog_log_level, | |
syslog_log_facility_quantum => $syslog_log_facility_quantum, | |
syslog_log_facility_cinder => $syslog_log_facility_cinder, | |
nova_rate_limits => $nova_rate_limits, | |
} | |
} | |
node /fuel-vcenter-[\d+]/ { | |
## Uncomment lines bellow if You want | |
## configure network of this nodes | |
## by puppet. | |
class {'::node_netconfig': | |
mgmt_ipaddr => $::internal_address, | |
mgmt_netmask => $::internal_netmask, | |
public_ipaddr => $::public_address, | |
public_netmask => $::public_netmask, | |
stage => 'netconfig', | |
} | |
include stdlib | |
class { 'operatingsystem::checksupported': | |
stage => 'first' | |
} | |
if $nagios { | |
class {'nagios': | |
proj_name => $proj_name, | |
services => [ | |
'host-alive', 'nova-compute','nova-network','libvirt' | |
], | |
whitelist => ['127.0.0.1', $nagios_master], | |
hostgroup => 'compute', | |
} | |
} | |
class { 'openstack::compute_vcenter': | |
public_interface => $public_int, | |
private_interface => $private_interface, | |
internal_address => $internal_address, | |
libvirt_type => 'kvm', | |
fixed_range => $fixed_range, | |
network_manager => $network_manager, | |
network_config => { 'vlan_start' => $vlan_start }, | |
multi_host => $multi_host, | |
auto_assign_floating_ip => $auto_assign_floating_ip, | |
sql_connection => "mysql://nova:${nova_db_password}@${internal_virtual_ip}/nova", | |
rabbit_nodes => $controller_hostnames, | |
rabbit_password => $rabbit_password, | |
rabbit_user => $rabbit_user, | |
rabbit_ha_virtual_ip => $internal_virtual_ip, | |
glance_api_servers => "${internal_virtual_ip}:9292", | |
vncproxy_host => $public_virtual_ip, | |
verbose => $verbose, | |
debug => $debug, | |
vnc_enabled => true, | |
nova_user_password => $nova_user_password, | |
cache_server_ip => $controller_hostnames, | |
service_endpoint => $internal_virtual_ip, | |
quantum => $quantum, | |
quantum_sql_connection => $quantum_sql_connection, | |
quantum_user_password => $quantum_user_password, | |
quantum_host => $internal_virtual_ip, | |
tenant_network_type => $tenant_network_type, | |
segment_range => $segment_range, | |
cinder => $cinder, | |
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr, | |
manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node }, | |
nv_physical_volume => $nv_physical_volume, | |
db_host => $internal_virtual_ip, | |
cinder_rate_limits => $cinder_rate_limits, | |
ssh_private_key => 'puppet:///ssh_keys/openstack', | |
ssh_public_key => 'puppet:///ssh_keys/openstack.pub', | |
use_syslog => $use_syslog, | |
syslog_log_level => $syslog_log_level, | |
syslog_log_facility_quantum => $syslog_log_facility_quantum, | |
syslog_log_facility_cinder => $syslog_log_facility_cinder, | |
nova_rate_limits => $nova_rate_limits, | |
# compute_driver => 'vmwareapi.VMwareVCDriver', | |
vmwareapi_server_ip => '127.127.127.127', | |
vmwareapi_username => 'admin', | |
vmwareapi_password => 'pwd', | |
vmwareapi_clustername => 'MyLittleCloud', | |
integration_bridge => 'br-int', | |
# use_linked_clone => '', | |
# vmwareapi_api_retry_count => '3', | |
# vmwareapi_task_poll_interval => '2', | |
# vmwareapi_vlan_interface => 'vmnic0', | |
} | |
} | |
node /fuel-baremetal-[\d+]/ { | |
## Uncomment lines bellow if You want | |
## configure network of this nodes | |
## by puppet. | |
class {'::node_netconfig': | |
mgmt_ipaddr => $::internal_address, | |
mgmt_netmask => $::internal_netmask, | |
public_ipaddr => $::public_address, | |
public_netmask => $::public_netmask, | |
stage => 'netconfig', | |
} | |
include stdlib | |
class { 'operatingsystem::checksupported': | |
stage => 'first' | |
} | |
if $nagios { | |
class {'nagios': | |
proj_name => $proj_name, | |
services => [ | |
'host-alive', 'nova-compute','nova-network' | |
], | |
whitelist => ['127.0.0.1', $nagios_master], | |
hostgroup => 'compute', | |
} | |
} | |
class { 'openstack::compute_bm': | |
public_interface => $public_int, | |
private_interface => $private_interface, | |
internal_address => $internal_address, | |
libvirt_type => 'kvm', | |
fixed_range => $fixed_range, | |
network_manager => $network_manager, | |
network_config => { 'vlan_start' => $vlan_start }, | |
multi_host => $multi_host, | |
auto_assign_floating_ip => $auto_assign_floating_ip, | |
sql_connection => "mysql://nova:${nova_db_password}@${internal_virtual_ip}/nova", | |
rabbit_nodes => $controller_hostnames, | |
rabbit_password => $rabbit_password, | |
rabbit_user => $rabbit_user, | |
rabbit_ha_virtual_ip => $internal_virtual_ip, | |
glance_api_servers => "${internal_virtual_ip}:9292", | |
vncproxy_host => $public_virtual_ip, | |
verbose => $verbose, | |
vnc_enabled => true, | |
nova_user_password => $nova_user_password, | |
cache_server_ip => $controller_hostnames, | |
service_endpoint => $internal_virtual_ip, | |
quantum => $quantum, | |
quantum_sql_connection => $quantum_sql_connection, | |
quantum_user_password => $quantum_user_password, | |
quantum_host => $internal_virtual_ip, | |
tenant_network_type => $tenant_network_type, | |
segment_range => $segment_range, | |
cinder => $cinder, | |
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr, | |
manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node }, | |
nv_physical_volume => $nv_physical_volume, | |
db_host => $internal_virtual_ip, | |
cinder_rate_limits => $cinder_rate_limits, | |
ssh_private_key => 'puppet:///ssh_keys/openstack', | |
ssh_public_key => 'puppet:///ssh_keys/openstack.pub', | |
use_syslog => $use_syslog, | |
nova_rate_limits => $nova_rate_limits, | |
# baremetal optional parameters | |
baremetal_dnsmasq_bind_iface => eth2, | |
} | |
} | |
# Definition of OpenStack Quantum node. | |
node /fuel-quantum/ { | |
include stdlib | |
class { 'operatingsystem::checksupported': | |
stage => 'first' | |
} | |
class {'::node_netconfig': | |
mgmt_ipaddr => $::internal_address, | |
mgmt_netmask => $::internal_netmask, | |
public_ipaddr => 'none', | |
save_default_gateway => true, | |
stage => 'netconfig', | |
} | |
if ! $quantum_netnode_on_cnt { | |
class { 'openstack::quantum_router': | |
db_host => $internal_virtual_ip, | |
service_endpoint => $internal_virtual_ip, | |
auth_host => $internal_virtual_ip, | |
nova_api_vip => $internal_virtual_ip, | |
internal_address => $internal_address, | |
public_interface => $public_int, | |
private_interface => $private_interface, | |
floating_range => $floating_range, | |
fixed_range => $fixed_range, | |
create_networks => $create_networks, | |
verbose => $verbose, | |
debug => $debug, | |
rabbit_password => $rabbit_password, | |
rabbit_user => $rabbit_user, | |
rabbit_nodes => $controller_hostnames, | |
rabbit_ha_virtual_ip => $internal_virtual_ip, | |
quantum => $quantum, | |
quantum_user_password => $quantum_user_password, | |
quantum_db_password => $quantum_db_password, | |
quantum_db_user => $quantum_db_user, | |
quantum_db_dbname => $quantum_db_dbname, | |
quantum_netnode_on_cnt=> false, | |
quantum_network_node => true, | |
tenant_network_type => $tenant_network_type, | |
segment_range => $segment_range, | |
external_ipinfo => $external_ipinfo, | |
api_bind_address => $internal_address, | |
use_syslog => $use_syslog, | |
syslog_log_level => $syslog_log_level, | |
syslog_log_facility_quantum => $syslog_log_facility_quantum, | |
} | |
class { 'openstack::auth_file': | |
admin_password => $admin_password, | |
keystone_admin_token => $keystone_admin_token, | |
controller_node => $internal_virtual_ip, | |
before => Class['openstack::quantum_router'], | |
} | |
} | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment