Created
February 17, 2014 20:13
-
-
Save johann8384/9058179 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| class role::hadoop::node { | |
| include ::eluna::misc | |
| include ::java::jdk | |
| include ::java::mysql | |
| ::nagios::client::add_to_hostgroup { 'opentsdb_client': } | |
| ::ssh::allow_group { ['ops', 'dwh-admins']: } | |
| ::sudo::add_access { "dwh-admins sudo access": | |
| user => '%dwh-admins', | |
| commands => 'ALL', | |
| descr => '', | |
| } | |
| case $hostname { | |
| /^(tsdb|dwh)-(data|name|edge)(1|3)\d\d\d$/: { | |
| $cluster = 'dwh' | |
| } | |
| /^tsdb-(data|name|edge)4\d\d\d$/: { | |
| $cluster = 'tsdb' | |
| } | |
| /^dwh-(data|name|edge)4\d\d\d$/: { | |
| $cluster = 'dwh' | |
| } | |
| /^dwh-(data|name|edge)\d\d$/: { | |
| $cluster = 'dwh' | |
| } | |
| default: { | |
| #Stand-alone cluster | |
| $cluster = "${hostname}" | |
| } | |
| } | |
| $full_cluster_id = "${cluster}_${datacenter}" | |
| $cluster_name = "hadoop_${full_cluster_id}" | |
| # Collect all the nodes in all datacenters for building | |
| # puppet'd iptables configuration to secure the cluster | |
| ::magic::register { "hadoop_node_${full_cluster_id}": | |
| value => $fqdn | |
| } | |
| ::magic::register { "hadoop_node_ip_${full_cluster_id}": | |
| value => $ipaddress | |
| } | |
| ::magic::register { "hadoop_node": | |
| value => $fqdn | |
| } | |
| ::magic::register { "hadoop_node_ip": | |
| value => $ipaddress | |
| } | |
| $all_nodes = collect_registrations("hadoop_node") | |
| $all_node_ips = collect_registrations("hadoop_node_ip") | |
| $all_edgenodes = collect_registrations("hadoop_edgenode") | |
| $all_edgenode_ips = collect_registrations("hadoop_edgenode_ip") | |
| $masters = collect_registrations("hadoop_master_${full_cluster_id}") | |
| $slaves = collect_registrations("hadoop_slave_${full_cluster_id}") | |
| $nodes = collect_registrations("hadoop_node_${full_cluster_id}") | |
| $node_ips = collect_registrations("hadoop_node_ip_${full_cluster_id}") | |
| $datanodes = collect_registrations("hadoop_datanode_${full_cluster_id}") | |
| $tasktrackers = collect_registrations("hadoop_mapreduce_${full_cluster_id}") | |
| $regionservers = collect_registrations("hadoop_regionserver_${full_cluster_id}") | |
| $zookeepers = collect_registrations("hadoop_zookeeper_${full_cluster_id}") | |
| $jobtrackers = collect_registrations("hadoop_jobtracker_${full_cluster_id}") | |
| $hues = collect_registrations("hadoop_hue_${full_cluster_id}") | |
| $hmasters = collect_registrations("hadoop_hmaster_${full_cluster_id}") | |
| $hive_metastores = collect_registrations("hadoop_hive_${full_cluster_id}") | |
| $namenodes = collect_registrations("hadoop_namenode_${full_cluster_id}") | |
| $namenode_ips = collect_registrations("hadoop_namenode_ip_${full_cluster_id}") | |
| $journalnodes = collect_registrations("hadoop_journalnode_${full_cluster_id}") | |
| $journalnode_ips = collect_registrations("hadoop_journalnode_ip_${full_cluster_id}") | |
| $secondary_namenodes = collect_registrations("hadoop_secondary_namenode_${full_cluster_id}") | |
| $secondary_namenode_ips = collect_registrations("hadoop_secondary_namenode_ip_${full_cluster_id}") | |
| $elasticsearch_heads = collect_registrations("elasticsearch_head_${full_cluster_id}") | |
| $elasticsearch_indexers = collect_registrations("elasticsearch_indexer_${full_cluster_id}") | |
| $elasticsearch_datas = collect_registrations("elasticsearch_data_${full_cluster_id}") | |
| $storm_supervisors = collect_registrations("storm_supervisor_${full_cluster_id}") | |
| $storm_nimbuses = collect_registrations("storm_nimbus_${full_cluster_id}") | |
| $hmaster = inline_template('<%= hmasters.length == 0 ? "localhost" : hmasters[0] %>') | |
| $hive_metastore = inline_template('<%= hive_metastores.length == 0 ? "localhost" : hive_metastores[0] %>') | |
| $hue = inline_template('<%= hues.length == 0 ? "localhost" : hues[0] %>') | |
| $jobtracker = inline_template('<%= jobtrackers.length == 0 ? "localhost" : jobtrackers[0] %>') | |
| $namenode = inline_template('<%= namenodes.length == 0 ? "localhost" : namenodes[0] %>') | |
| $namenode_ip = inline_template('<%= namenode_ips.length == 0 ? "127.0.0.1" : namenode_ips[0] %>') | |
| $secondary_namenode = inline_template('<%= secondary_namenodes.length == 0 ? "localhost" : secondary_namenodes[0] %>') | |
| $secondary_namenode_ip = inline_template('<%= secondary_namenode_ips.length == 0 ? "127.0.0.1" : secondary_namenode_ips[0] %>') | |
| $storm_nimbus = inline_template('<%= storm_nimbuses.length == 0 ? "localhost" : storm_nimbuses[0] %>') | |
| $zookeeper = inline_template('<%= zookeepers.length == 0 ? fqdn : zookeepers[0] %>') | |
| ::ganglia::node { $cluster_name: | |
| } | |
| $dwh_db_name = 'foo_dwh' | |
| $dwh_db_user = 'dwh' | |
| $dwh_db_pass = decrypt("7dOUeSsOCasdfadsfasdfasdfasd==") | |
| case $hostname { | |
| /^dwh-data1\d\d\d$/, /^dwh-data4\d\d\d$/: { | |
| $dfs_data_dirs = ['/data/1/dfs/data', '/data/2/dfs/data', | |
| '/data/3/dfs/data', '/data/4/dfs/data', | |
| '/data/5/dfs/data', '/data/6/dfs/data', | |
| '/data/7/dfs/data', '/data/8/dfs/data', | |
| '/data/9/dfs/data', '/data/10/dfs/data'] | |
| $dfs_dirs = ['/data/1/dfs', '/data/2/dfs', | |
| '/data/3/dfs', '/data/4/dfs', | |
| '/data/5/dfs', '/data/6/dfs', | |
| '/data/7/dfs', '/data/8/dfs', | |
| '/data/9/dfs', '/data/10/dfs'] | |
| $mapred_data_dirs = ['/data/1/mapred', '/data/2/mapred', | |
| '/data/3/mapred', '/data/4/mapred', | |
| '/data/5/mapred', '/data/6/mapred', | |
| '/data/7/mapred', '/data/8/mapred', | |
| '/data/9/mapred', '/data/10/mapred'] | |
| $dfs_name_dirs = ['/data/1/dfs/nn'] | |
| $dfs_replication = 3 | |
| } | |
| /^tsdb-data4\d\d\d$/: { | |
| $dfs_data_dirs = ['/data/1/dfs/data', '/data/2/dfs/data'] | |
| $dfs_dirs = ['/data/1/dfs', '/data/2/dfs'] | |
| $mapred_data_dirs = ['/data/1/mapred', '/data/2/mapred'] | |
| $dfs_name_dirs = ['/data/1/dfs/nn'] | |
| $dfs_replication = 3 | |
| } | |
| default: { | |
| case $domain { | |
| 'dc1.example.org', 'dc3.example.org', 'dc2.example.org': { | |
| $dfs_data_dirs = ['/data/1/dfs/data', '/data/2/dfs/data'] | |
| $dfs_dirs = ['/data/1/dfs', '/data/2/dfs'] | |
| $mapred_data_dirs = ['/data/1/mapred', '/data/2/mapred'] | |
| $dfs_name_dirs = ['/data/1/dfs/nn'] | |
| $dfs_replication = 6 | |
| } | |
| default: { | |
| $dfs_data_dirs = ['/data/1/dfs/data'] | |
| $dfs_dirs = ['/data/1/dfs'] | |
| $mapred_data_dirs = ['/data/1/mapred'] | |
| $dfs_name_dirs = ['/data/1/dfs/nn'] | |
| $dfs_replication = 1 | |
| } | |
| } | |
| } | |
| } | |
| case $cluster_name { | |
| 'hadoop_tsdb_ecr': { | |
| $oozie_db_name = 'oozie' | |
| $oozie_db_user = 'oozie' | |
| $oozie_db_pass = decrypt("asdfasdfasdf") | |
| $hive_db_name = 'hive' | |
| $hive_db_user = 'hive' | |
| $hive_db_pass = decrypt("7asdfasdfasdfasdfa") | |
| $zk_quorum = ['tsdb-data01.lab.example.org', 'tsdb-data02.lab.example.org', 'tsdb-name02.lab.example.org'] | |
| $hadoop_heapsize = '10240' | |
| $stats_db_host = 'stats-db01.lab.example.org' | |
| $oozie_db_driver = 'com.mysql.jdbc.Driver' | |
| $oozie_db_url = "jdbc:mysql://${stats_db_host}:3306/${oozie_db_name}" | |
| $hive_db_driver = 'com.mysql.jdbc.Driver' | |
| $hive_db_url = "jdbc:mysql://${stats_db_host}:3306/${hive_db_name}" | |
| } | |
| 'hadoop_dwh_ecr': { | |
| $oozie_db_name = 'oozie' | |
| $oozie_db_user = 'oozie' | |
| $oozie_db_pass = decrypt("asdfasdfasdfasdfasdfasdf==") | |
| $hive_db_name = 'hive' | |
| $hive_db_user = 'hive' | |
| $hive_db_pass = decrypt("asdfasdfasdfasdfasdfasdf==") | |
| $zk_quorum = ['dwh-name01.lab.example.org', 'dwh-name02.lab.example.org', 'dwh-name03.lab.example.org'] | |
| $hadoop_heapsize = '10240' | |
| $stats_db_host = 'stats-db01.lab.example.org' | |
| $oozie_db_driver = 'com.mysql.jdbc.Driver' | |
| $oozie_db_url = "jdbc:mysql://${stats_db_host}:3306/${oozie_db_name}" | |
| $hive_db_driver = 'com.mysql.jdbc.Driver' | |
| $hive_db_url = "jdbc:mysql://${stats_db_host}:3306/${hive_db_name}" | |
| } | |
| 'hadoop_dwh_ve': { | |
| $oozie_db_name = 'oozie' | |
| $oozie_db_user = 'zebra_oozie' | |
| $oozie_db_pass = decrypt("asdfaghdshdfgh=") | |
| $hive_db_name = 'hive' | |
| $hive_db_user = 'zebra_hive' | |
| $hive_db_pass = decrypt("asdfasdfagh") | |
| $zk_quorum = ['dwh-name1001.dc2.example.org', 'dwh-name1002.dc2.example.org', 'dwh-name1003.dc2.example.org'] | |
| $hadoop_heapsize = '10240' | |
| $stats_db_host = 'stats-db1002.dc2.example.org' | |
| $oozie_db_driver = 'com.mysql.jdbc.Driver' | |
| $oozie_db_url = "jdbc:mysql://${stats_db_host}:3306/${oozie_db_name}" | |
| $hive_db_driver = 'com.mysql.jdbc.Driver' | |
| $hive_db_url = "jdbc:mysql://${stats_db_host}:3306/${hive_db_name}" | |
| } | |
| default: { | |
| $oozie_db_name = 'oozie' | |
| $oozie_db_user = 'oozie' | |
| $oozie_db_pass = decrypt("asdfasdfasdfasdfasdfasdf==") | |
| $hive_db_name = 'hive' | |
| $hive_db_user = 'hive' | |
| $hive_db_pass = decrypt("asdfasdfasdfasdfasdfasdf==") | |
| $zk_quorum = $namenodes | |
| $stats_db_host = 'localhost' | |
| $hadoop_heapsize = '2048' | |
| $oozie_db_driver = 'org.apache.derby.jdbc.EmbeddedDriver' | |
| $oozie_db_url = 'jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true' | |
| $hive_db_driver = 'org.apache.derby.jdbc.EmbeddedDriver' | |
| $hive_db_url = 'jdbc:derby:;databaseName=metastore_db;create=true' | |
| } | |
| } | |
| if $fqdn in $zk_quorum { | |
| class { 'zookeeperv3': | |
| quorum_servers => $::role::hadoop::node::zk_quorum, | |
| } | |
| } else { | |
| class { 'zookeeperv3::client': | |
| quorum_servers => $::role::hadoop::node::zk_quorum, | |
| } | |
| } | |
| } |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment