Skip to content

Instantly share code, notes, and snippets.

@acidprime
Last active March 9, 2016 18:29
Show Gist options
  • Save acidprime/23b45d085cc8f93f08cc to your computer and use it in GitHub Desktop.
Save acidprime/23b45d085cc8f93f08cc to your computer and use it in GitHub Desktop.
#!/opt/puppetlabs/puppet/bin/ruby
require 'puppetclassify'
require 'getoptlong'
require 'puppet'
require 'hiera'
require 'facter'
require 'r10k/environment'
require 'net/ssh'
require 'net/scp'
require 'resolv'
require 'io/console'
require 'pty'
ACTIVE_GROUP_NAME = 'PE HA Active Master'
PASSIVE_GROUP_NAME = 'PE HA Passive Master'
ACTIVE_GROUP_UUID = '937f05eb-8185-4517-a609-3e64d05191c5'
PASSIVE_GROUP_UUID = '76926f43-be06-4ee9-ad69-08681d224c10'
DEFAULT_NC_GROUP = '00000000-0000-4000-8000-000000000000'
MODULEPATH = "modules:site:/opt/puppetlabs/puppet/modules"
PE_SERVICES = [
'pe-puppetserver',
'puppet',
'mcollective',
'pe-console-services',
'pe-activemq',
'pe-puppetdb',
'pe-postgresql'
]
opts = GetoptLong.new(
[ '--loadbalancer', '-l', GetoptLong::REQUIRED_ARGUMENT],
[ '--passive', '-p', GetoptLong::REQUIRED_ARGUMENT],
[ '--active', '-a', GetoptLong::OPTIONAL_ARGUMENT ],
[ '--pg_user', '-u', GetoptLong::REQUIRED_ARGUMENT],
[ '--pg_pass', '-g', GetoptLong::REQUIRED_ARGUMENT],
[ '--ssh_pass', '-s', GetoptLong::OPTIONAL_ARGUMENT ],
[ '--help', '-h', GetoptLong::NO_ARGUMENT ],
[ '--modules_only', '-m', GetoptLong::NO_ARGUMENT ],
)
def show_usage()
puts <<-EOF
bootstrap.rb [OPTIONS]
The passive, active, and loadbalancer servers must be defined from
the command line or in Hiera.
-h, --help
show help
-a, --active [cn]
The CN of the certificate and reachable address of the active all in one master
defaults to local hiera lookup of 'pe_ha::active_master'
example: aio-master-2.vm
-p, --passive [cn]
The CN of the certificate and reachable address of the passive all in one master
defaults to local hiera lookup of 'pe_ha::passive_master'
example: aio-master-1.vm
-l, --loadbalancer [alt name]
The address of the load balancer ( as listed in the dns alt names of the certificates)
defaults to local hiera lookup of 'pe_ha::load_balancer'
example: aio-puppet.vm
-m, --modules_only
Just install the required modules (with r10k) and then exit
-u, --pg_user
PostgreSQL replication user name
-g, --pg_pass [password]
PostgreSQL replication password
-s, --ssh_pass [password]
The password to use for SSH connections
EOF
exit 1
end
# Have puppet parse its config so we can call its settings
Puppet.initialize_settings
opts.each do |opt,arg|
case opt
when '--help'
show_usage
when '--active'
@active_cn = arg || hiera.lookup('pe_ha::active_master', {}, Facter.values, nil, :hash)
@active_ip = Resolv.getaddress(@active_cn)
when '--passive'
@passive_cn = arg || hiera.lookup('pe_ha::passive_master', {}, Facter.values, nil, :hash)
@passive_ip = Resolv.getaddress(@passive_cn)
when '--loadbalancer'
@proxy = arg || hiera.lookup('pe_ha::load_balancer', {}, Facter.values, nil, :hash)
when '--modules_only'
@modules_only = true
when '--pg_user'
@pg_username = arg || hiera.lookup('pe_ha::replication_user', {}, Facter.values, nil, :hash)
when '--pg_pass'
@pg_pass = arg
when '--ssh_pass'
@ssh_options = {:password => arg}
end
end
if (@active_cn.nil? or
@passive_cn.nil? or
@proxy.nil? or
@pg_username.nil? or
@pg_pass.nil?) and @modules_only.nil?
show_usage
end
# Use puppet to manage git installation as rugged does not support
# ssh agents and configuration options
def install_git()
cputs "Ensuring git is installed"
git = Puppet::Resource.new('package','git', :parameters => {
:ensure =>'present',
})
result, report = Puppet::Resource.indirection.save(git)
puts "Resource: #{report.status}"
puts report.logs
end
def ssh_keygen(user, key_file, comment)
system(wrap_command(
user,
"ssh-keygen -C '#{comment}' -f #{key_file} -q -N ''")
)
end
def wrap_command(user, command)
if user == ENV['USER']
wrapped = command
else
wrapped = "su #{user} -l -s /bin/sh -c \"#{command}\""
end
end
def ssh_copy_id(ssh_user, remote_user, key_file, host, ssh_options)
public_key_file = "#{key_file}.pub"
key_data = File.read(public_key_file)
Net::SSH.start(host, ssh_user, ssh_options) do |ssh|
command = "su -s /bin/sh #{remote_user} -c \"umask 077; test -d ~/.ssh || mkdir ~/.ssh ; echo '#{key_data}' >> ~/.ssh/authorized_keys && (test -x /sbin/restorecon && /sbin/restorecon ~/.ssh ~/.ssh/authorized_keys >/dev/null 2>&1 || true)\"\n"
ssh.exec!(command)
end
end
def copy_authorized_keys(passive_cn = @passive_cn)
@ca_key_file = "/root/.ssh/puppet_ca_id_rsa"
@pg_key_file = "/opt/puppetlabs/server/data/postgresql/.ssh/pgha_id_rsa"
cputs "Generating ssh keys"
ssh_keygen("root", @ca_key_file, "pe_ha")
ssh_keygen("pe-postgres", @pg_key_file, "pe_ha_pg")
cputs "Adding known_host entry for #{passive_cn}"
add_known_host("root", "/root", passive_cn)
add_known_host("pe-postgres", "/opt/puppetlabs/server/data/postgresql", passive_cn)
cputs "Copying local key to #{passive_cn}'s authorized_keys"
ssh_copy_id(
"root",
"root",
@ca_key_file,
passive_cn,
@ssh_options
)
ssh_copy_id(
"root",
"pe-postgres",
@pg_key_file,
passive_cn,
@ssh_options
)
end
def wait_ask_pass(cout)
buffer = ""
matched = false
begin
# consume entire input stream; break if we see P(ass)word
loop do
buffer << cout.getc.chr
if buffer =~ /ass/
matched = true
break
end
end
rescue # end of stream needs no handling
end
return buffer, matched
end
# Use puppet to manage host keys
def add_known_host(user, homedir, hostname)
ssh_dir = "#{homedir}/.ssh/"
known_hosts = "#{ssh_dir}/known_hosts"
keyscan = `ssh-keyscan #{hostname} 2>/dev/null`.split
status = $?.exitstatus
if status == 0
FileUtils.mkdir_p(ssh_dir)
FileUtils.touch(known_hosts)
if user != "root"
FileUtils.chown(user, nil, [ssh_dir, known_hosts])
end
raise "keyscan returned empty result for (#{hostname})" if keyscan.empty?
sshkey = Puppet::Resource.new('sshkey', keyscan[0], :parameters => {
:ensure =>'present',
:target => known_hosts,
:type => keyscan[1],
:key => keyscan[2],
})
result, report = Puppet::Resource.indirection.save(sshkey)
else
puts "ssh-keyscan failed for host #{hostname}, check your network conection"
exit 1
end
end
# r10k puppetfile install
def r10k_puppetfile_install
if File.exist?('Puppetfile')
add_known_host(ENV['USER'], ENV['HOME'], 'github.com')
# Sync the current environment from this directory as
# its a checkout of the control repo
# TODO: Make sure that r10k actually rewrites origin
environment = R10K::Environment::Git.new(
Puppet[:environment],
Puppet.settings[:environmentpath],
Puppet[:environment],
{
:remote => Dir.pwd,
:ref => Puppet[:environment],
}
)
environment_dir = "#{Puppet.settings[:environmentpath]}/#{Puppet[:environment]}"
environment_module_dir = "#{environment_dir}/modules"
cputs "Cloning this repo as #{Puppet[:environment]} environment via r10k"
environment.sync
# Sync the Puppetfile with SSH_AUTH_SOCK in vagrant or master creds IRL
# TODO: vistor pattern or error handling
puppetfile = environment.puppetfile
puppetfile.load!
# Puppetfile installation of the modules
cputs "Installing Modules in Puppetfile"
puppetfile.modules.each do |mod|
source_mod_dir = Dir.pwd + "/modules/#{mod.name}"
target_mod_dir = "#{environment_module_dir}/#{mod.name}"
if File.exist?(source_mod_dir) and not File.exist?(target_mod_dir)
if not File.exist?(environment_module_dir)
Dir.mkdir(environment_module_dir)
end
cputs "Creating symlink for #{mod.name}"
FileUtils.ln_s(source_mod_dir, target_mod_dir)
# Attempted to use R10K API to create a new Module instance with a
# different URI but this cannot work because modules are originally
# checked out on the laptop so the R10K update will never be able to
# read the .git files in ~/.r10k
# Example of how to use the R10K git API
# mod = R10K::Module.new(
# mod.name,
# mod.basedir,
# :git => mod_dir ,
# )
elsif not File.symlink?(target_mod_dir)
begin
puts "Syncing module: #{mod.name}"
mod.sync
rescue Exception => msg
puts "Failed to download #{mod.name} (#{msg})"
puts "Ensure ssh-agent is running on your main system and that the key you use to access github is loaded. See https://help.github.com/articles/generating-ssh-keys/ for instructions"
exit 1
end
else
cputs "#{mod.name} already symlinked to #{source_mod_dir}"
end
end
load_classifier
# Tell the classifier to refresh given we just updated the environmentpath
begin
@classifier.update_classes.update
rescue Exception => msg
puts "Error telling classifier to refresh classes via #{@classifier_url}: \n #{msg}"
exit 1
end
else
raise "No Puppetfile exists, please run from control repo checkout"
end
end
# Apply the stahnma/epel class to the masters
def setup_epel
cputs "Applying EPEL class locally..."
system("puppet apply -e 'include epel'")
cputs "Applying EPEL class to Passive Master..."
ssh_run_command(@passive_cn, "puppet apply -e 'include epel'")
end
# Read classifier.yaml for split installation compatibility
def load_classifier_config
configfile = File.join Puppet.settings[:confdir], 'classifier.yaml'
if File.exist?(configfile)
classifier_yaml = YAML.load_file(configfile)
@classifier_url = "https://#{classifier_yaml['server']}:#{classifier_yaml['port']}/classifier-api"
else
Puppet.debug "Config file #{configfile} not found"
puts "no config file! - wanted #{configfile}"
exit 2
end
end
# Create classifier instance var
# Uses the local hostcertificate for auth ( assume we are
# running from master in whitelist entry of classifier ).
def load_classifier()
auth_info = {
'ca_certificate_path' => Puppet[:localcacert],
'certificate_path' => Puppet[:hostcert],
'private_key_path' => Puppet[:hostprivkey],
}
unless @classifier
load_classifier_config
@classifier = PuppetClassify.new(@classifier_url, auth_info)
end
end
# Add parent group as PE Infrasture so we can steal the params
# from there that the default install lays down
def create_group(group_name,group_uuid,classes = {},node)
load_classifier
groups = @classifier.groups
current_group = groups.get_groups.select { |group| group['name'] == group_name}
if current_group.empty?
cputs "Creating #{group_name} group in classifier"
groups.create_group({
'name' => group_name,
'id' => group_uuid,
'classes' => classes,
'parent' => groups.get_group_id('PE Infrastructure'),
'rule' => ["or", ["=", "name", node]]
})
end
end
# Add the passive master to the PE groups as required
# group as this will allow classification to fix
# configurations.
def add_passive_to_pe_node_group(node_group, passive_cn = @passive_cn)
cputs "Adding #{passive_cn} to #{node_group} group"
load_classifier
groups = @classifier.groups
# The current NC API does not allow you edit rule sets
# Basically you have merge whats there, however this data is
# an array so we shift the rule i.e. 'or' and append the pinned nodes.
current_group = groups.get_groups.select { |group| group['name'] == node_group}
raise 'PE Master group missing!' if current_group.empty?
# TODO: make this idempotent
if current_group[0]['rule'][0] == 'or'
rule = ["or", ["=", "name", passive_cn]] + current_group[0]['rule'].drop(1)
puts "Merging ruleset #{rule} into #{node_group} group"
else
raise "Unable to parse PE Master ruleset, changed post installation?"
end
group_hash = {
'name' => node_group,
'id' => groups.get_group_id(node_group),
'classes' => {},
'parent' => groups.get_group_id('PE Infrastructure'),
'rule' => rule
}
groups.update_group(group_hash)
end
def update_console_group(proxy = @proxy)
cputs "Correcting VIP name for Console"
load_classifier
groups = @classifier.groups
pe_console = groups.get_groups.select { |group| group['name'] == 'PE Console'}
classes = pe_console.first['classes']
puppet_console = classes['puppet_enterprise::profile::console']
puppet_console.update(puppet_console.merge('certname' => proxy))
group_hash = pe_console.first.merge({ "classes" => {"puppet_enterprise::profile::console" => puppet_console}})
groups.update_group(group_hash)
end
def rewrite_pe_infrasture_group(active_cn = @active_cn,proxy = @proxy)
cputs "Rewriting the puppet_enterprise group with load balancer settings"
get_passwords
load_classifier
groups = @classifier.groups
pe_infrastructure = groups.get_groups.select { |group| group['name'] == 'PE Infrastructure'}
classes = pe_infrastructure.first['classes']
puppet_enterprise = classes['puppet_enterprise']
puppet_enterprise.update(puppet_enterprise) do |key,value|
case value
when String
value.replace(proxy) if value == active_cn
value
when Array
# TODO: Make this work with more then strings
value.map!{|v| (v.replace(proxy) if v == active_cn) || v }
else
value
end
end
# update the console-services locations too
puppet_enterprise.update(puppet_enterprise.merge('activity_database_password' => @activity_password,
'dashboard_database_password' => @console_password,
'classifier_database_password' => @classifier_password,
'puppetdb_database_password' => @puppetdb_password,
'rbac_database_password' => @rbac_password))
group_hash = pe_infrastructure.first.merge({ "classes" => {"puppet_enterprise" => puppet_enterprise}} )
groups.update_group(group_hash)
end
def get_passwords()
File.open('/etc/puppetlabs/installer/database_info.install') do |f|
f.each_line do |line|
case line
when /q_activity_database_password/
@activity_password = line.split("=")[1].strip
when /q_puppet_enterpriseconsole_database_password/
@console_password = line.split("=")[1].strip
when /q_classifier_database_password/
@classifier_password = line.split("=")[1].strip
when /q_puppetdb_database_password/
@puppetdb_password = line.split("=")[1].strip
when /q_rbac_database_password/
@rbac_password = line.split("=")[1].strip
end
end
end
end
# Create the "PE HA *" groups
def create_ha_classification_groups( passive_cn = @passive_cn, active_cn = @active_cn )
active_classes = {
'pe_ha::active' => {
'active_ip_address' => @active_ip,
'common_certname' => @proxy,
'passive_master' => @passive_cn,
'standby_ip_address' => @passive_ip,
'replication_user_hash' => @replication_passwd,
}
}
passive_classes = {
'pe_ha::passive' => {
'active_hostname' => @active_cn,
'active_ip_address' => @active_ip,
'standby_ip_address' => @passive_ip,
'common_certname' => @proxy,
'replication_passwd' => @pg_pass,
}
}
# Create the active group
create_group(
ACTIVE_GROUP_NAME,
ACTIVE_GROUP_UUID,
active_classes,
active_cn
)
# Create the passive group
create_group(
PASSIVE_GROUP_NAME,
PASSIVE_GROUP_UUID,
passive_classes,
passive_cn
)
end
def create_password_hash(username = @pg_username, password = @pg_pass)
@replication_passwd = Digest::MD5.hexdigest("#{password}#{username}")
end
def ssh_run_command(host,command)
Net::SSH.start(host, 'root', @ssh_options ) do|ssh|
result = ssh.exec!(command)
puts result
end
end
def scp_file(host, local_file, remote_file)
Net::SCP.start(host, 'root', @ssh_options) do |scp|
result = scp.upload! local_file, remote_file
puts result
end
end
# Stop the PE services, in order!
def stop_pe_services(passive_cn = @passive_cn)
cputs "Stopping PE services on #{passive_cn}"
PE_SERVICES.each do |pe_svc_name|
ssh_run_command(passive_cn,"service #{pe_svc_name} stop")
end
end
# Sync the SSL directories. The passive_cn can be pulled from Hiera possibly?
def rsync_ssldir(passive_cn = @passive_cn, ssldir = Puppet[:ssldir])
cputs "Rsync'ing ssl directory to #{passive_cn}"
ssh_run_command(passive_cn,"rm -rf '#{ssldir}'")
command = "rsync -e 'ssh -i #{@ca_key_file}' -auv #{ssldir}/ root@#{passive_cn}:#{ssldir}/"
cputs command
system(command)
end
# Set the dns_alt_names on the passive before running Puppet
def set_passive_dns_alt_names(passive_cn = @passive_cn, proxy = @proxy)
cputs "Setting dns_alt_names on #{passive_cn} with 'puppet config set'"
ssh_run_command(passive_cn,"/opt/puppetlabs/bin/puppet config set dns_alt_names #{proxy} --section main")
end
# Run puppet on the passive to pull classification and start PE
def run_puppet(node, puppet_master)
cputs "*** Triggering a puppet run on #{node} using #{puppet_master}"
ssh_run_command(node,"/opt/puppetlabs/bin/puppet agent -t --server #{puppet_master}")
end
def sign_certificate(passive_cn = @passive_cn)
cputs "Signing #{passive_cn} certificate"
system("/opt/puppetlabs/bin/puppet cert --allow-dns-alt-names sign #{passive_cn}")
end
# Start PE services in order, excluding pe_puppet.
# This may not need this because the puppet run may start everything.
def start_pe_services(passive_cn = @passive_cn)
cputs "Starting PE services on #{passive_cn}"
PE_SERVICES.reverse.each do |pe_svc_name|
ssh_run_command(passive_cn,"service #{pe_svc_name} start")
end
end
def cputs(string)
puts "\033[1m#{string}\033[0m"
end
def install_modules()
install_git
r10k_puppetfile_install
end
def proxy_cert(proxy = @proxy, ssldir = Puppet[:ssldir], passive_cn = @passive_cn, active_cn = @active_cn)
cputs "Sign and generate #{proxy} certificate"
# delete any existing certificate (request?), then generate a new one. This
# seems to be the only way to generate a new cert, public and private key on
# this machine(!) and gets us round the chicken-and-egg problem of bringing
# up aio-master.vm (load balancer) before the primary puppet server
system("find #{ssldir} -name #{proxy}.pem -delete")
system("puppet cert generate #{proxy}")
system("puppet cert clean #{proxy}")
system("puppet cert generate #{proxy}")
# WARNING do not run puppet or ssh commands on the proxy from here - we
# changed the host record to point back to the active master, remember(!)
private_key = "#{ssldir}/private_keys/#{proxy}.pem"
public_key = "#{ssldir}/public_keys/#{proxy}.pem"
cert = "#{ssldir}/certs/#{proxy}.pem"
proxy_host = "#{proxy}.real"
scp_file(proxy_host, private_key, private_key)
scp_file(proxy_host, public_key, public_key)
scp_file(proxy_host, cert, cert)
cputs "finished #{proxy} certificate generation"
end
def copy_certs(proxy = @proxy,ssldir = Puppet[:ssldir])
cputs "Copying #{proxy} certificates to PostgreSQL"
FileUtils.cp("#{ssldir}/certs/#{proxy}.pem", "/opt/puppetlabs/server/data/postgresql/9.4/data/certs/#{proxy}.cert.pem")
FileUtils.cp("#{ssldir}/private_keys/#{proxy}.pem", "/opt/puppetlabs/server/data/postgresql/9.4/data/certs/#{proxy}.private_key.pem")
FileUtils.cp("#{ssldir}/public_keys/#{proxy}.pem", "/opt/puppetlabs/server/data/postgresql/9.4/data/certs/#{proxy}.public_key.pem")
FileUtils.chmod(0600, "/opt/puppetlabs/server/data/postgresql/9.4/data/certs/#{proxy}.cert.pem")
FileUtils.chmod(0600, "/opt/puppetlabs/server/data/postgresql/9.4/data/certs/#{proxy}.private_key.pem")
FileUtils.chmod(0600, "/opt/puppetlabs/server/data/postgresql/9.4/data/certs/#{proxy}.public_key.pem")
FileUtils.chown_R("pe-postgres","pe-postgres","/opt/puppetlabs/server/data/postgresql/9.4/data/certs/")
cputs "...done copying certificates!"
end
def fix_postgres_auth(active_ip = @active_ip)
cputs "Fixing PostgreSQL Authentication"
f = File.open('/opt/puppetlabs/server/data/postgresql/9.4/data/pg_hba.conf','r+')
if f
contents = f.read
contents.gsub!(/^host\s+all\s+pe-postgres\s+127\.0\.0\.1\/32\s+md5/,"host all pe-postgres 127.0.0.1/32 md5\nhost all pe-postgres #{active_ip}/32 md5")
f.write(contents)
f.close()
end
end
def mod_postgres_config(proxy = @proxy)
cputs "Modifying PostgreSQL config"
f = File.open('/opt/puppetlabs/server/data/postgresql/9.4/data/postgresql.conf','r+')
if f
contents = f.read
contents.gsub!(/^ssl_cert_file.*/,"ssl_cert_file = 'certs/#{proxy}.cert.pem'")
contents.gsub!(/^ssl_key_file.*/,"ssl_cert_file = 'certs/#{proxy}.private_key.pem'")
f.write(contents)
f.close()
end
end
install_modules
if ! @modules_only
create_password_hash
fix_postgres_auth
mod_postgres_config
create_ha_classification_groups
add_passive_to_pe_node_group('PE Master')
add_passive_to_pe_node_group('PE Console')
add_passive_to_pe_node_group('PE PuppetDB')
add_passive_to_pe_node_group('PE ActiveMQ Broker')
update_console_group
rewrite_pe_infrasture_group
setup_epel
copy_authorized_keys
stop_pe_services
rsync_ssldir
set_passive_dns_alt_names
proxy_cert
copy_certs
run_puppet(@active_cn, @active_cn)
run_puppet(@passive_cn, @active_cn)
sign_certificate
run_puppet(@passive_cn, @active_cn)
start_pe_services
end
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment