Created
March 17, 2014 18:42
-
-
Save nitecoder/9605618 to your computer and use it in GitHub Desktop.
This is a Chef plugin for StarCluster. It's really my first ever StarCluster plugin, plus I'm very new to Chef as well. So please take with a grain of salt. Any feedback is welcome! Especially, a few things are not quite to my liking: chef's authentication system seems to presume that validation.pem file is widely distributed. I'm attempting to…
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from starcluster.clustersetup import ClusterSetup | |
from starcluster.logger import log | |
import subprocess | |
from starcluster import threadpool | |
class ChefPlugin(ClusterSetup): | |
def __init__(self, | |
run_list, | |
provision_host, | |
chef_server_url, | |
master_chef_node_template, | |
chef_env, | |
dry_run, | |
disable_threads=False, | |
num_threads=20): | |
"""Parameters: | |
provision_host - any host that has a configured chef client and | |
for which the current user has ssh access. | |
TODO: support default of None when localhost itself has chef client. | |
chef_server_url - Chef server URL | |
master_chef_node_template - append to the node's alias in order to create Chef's node name | |
chef_env - Chef environment to use | |
""" | |
self.run_list = run_list | |
self.provision_host = provision_host | |
self.chef_server_url = chef_server_url | |
self.chef_env = chef_env | |
self.master_chef_node_template = master_chef_node_template | |
self.dry_run = dry_run | |
self._disable_threads = disable_threads | |
self._num_threads = num_threads | |
self._pool = None | |
log.info("chef_plugin: *********** Chef.init: DRY_RUN=%s" % dry_run) | |
@property | |
def pool(self): | |
if not self._pool: | |
self._pool = threadpool.get_thread_pool(self._num_threads, | |
self._disable_threads) | |
return self._pool | |
def run (self, nodes, master, user, user_shell, volumes): | |
if (self._isChefClientRunning(master)): | |
log.info("chef_plugin: skipping because chef_client is already running on %s", master) | |
return | |
log.info("chef_plugin: *********** Chef.run: DRY_RUN=%s, master %s" % (self.dry_run, master)) | |
nodeName = self.makeChefNodeName(master) | |
self.pool.simple_job(self._bootstrapNode, (master, nodeName), jobid=master.alias) | |
self.pool.wait(1) | |
#self._bootstrapNode(master, nodeName) | |
self._configureKnife(master) | |
def on_add_node (self, node, nodes, master, user, user_shell, volumes): | |
log.info("chef_plugin: *********** Adding node %s", node) | |
def on_remove_node (self, node, nodes, master, user, user_shell, volumes): | |
log.info("chef_plugin: ************ Removing node %s", node) | |
def on_shutdown (self, nodes, master, user, user_shell, volumes): | |
log.info("chef_plugin: ************ Shutdown %s", nodes) | |
# Shutdown seems to run when doing stop as well as terminate. | |
# If we delete the client and node on stop, we can't then reconnect | |
# when doing start -x. Client seems to hang then. | |
nodeName = self.makeChefNodeName(master) | |
self._removeNode(master, master, nodeName) | |
def makeChefNodeName (self, node): | |
return self.master_chef_node_template.format(alias=node.alias) | |
def createFullCommandLine(self, knifeCmd): | |
provisionSSH = "ssh -A {provision_host}".format( | |
provision_host=self.provision_host | |
) | |
return "{ssh} {cmd}".format( | |
ssh=provisionSSH, | |
cmd=knifeCmd) | |
# ========================================================================= | |
# Implementation of remote calls | |
# ========================================================================= | |
def _isChefClientRunning(self, node): | |
log.debug("chef_plugin: *** checking if chef client already runs on %s", node) | |
res = node.ssh.execute("pgrep chef-client", ignore_exit_status=True, raise_on_failure=False) | |
log.info("chef_plugin: *** running? '%s'", res) | |
return res | |
def _removeNode(self, knifeNode, node, chefNodeName): | |
#log.info("chef_plugin: *** removing chef node %s", chefNodeName) | |
#cmd1 = "knife node --yes delete {node_name} ".format(node_name=chefNodeName) | |
#r = knifeNode.ssh.execute(cmd1) | |
#log.info(r) | |
log.info("chef_plugin: *** removing chef client %s", chefNodeName) | |
cmd2 = "knife client --yes delete {node_name}".format(node_name=chefNodeName) | |
r = knifeNode.ssh.execute(cmd2, ignore_exit_status=True, raise_on_failure=False) | |
log.info(r) | |
def _bootstrapNode(self, node, chefNodeName): | |
log.info("chef_plugin: *** bootstraping chef node %s as %s", node, chefNodeName) | |
knifeCmd = \ | |
("knife bootstrap --yes {node_ip} -x root --sudo " + \ | |
"--server-url {server_url} " + \ | |
"--node-name {node_name} " + \ | |
"--run-list {run_list} " + \ | |
"--environment {env}").format( | |
node_ip=node.addr, | |
server_url=self.chef_server_url, | |
node_name=chefNodeName, | |
run_list=self.run_list, | |
env=self.chef_env | |
) | |
return self._executeKnifeCommandViaGateway(knifeCmd) | |
# for node in nodes: | |
# log.info("************ Chef.run: runList = %s on %s" % (self.run_list, node.alias)) | |
# #r = node.ssh.execute('hostname -f; ls /etc/') | |
# #log.info(" result %s" % r) | |
# #log.info(" plugins: %s" % node.get_plugins()) | |
# #log.info(" volumes: %s" % node.get_volumes()) | |
# #log.info(" tags: %s" % node.tags) | |
# log.info(" groups: %s" % node.groups) | |
# log.info(" cluster_groups: %s" % node.cluster_groups) | |
def _configureKnife(self, node): | |
return node.ssh.execute("mkdir -p .chef; [ -e .chef/knife.rb ] || cp /etc/chef/client.rb .chef/knife.rb") | |
def _executeKnifeCommandViaGateway(self, knifeCmd): | |
fullCmd = self.createFullCommandLine(knifeCmd) | |
log.info("chef_plugin: executing %s", fullCmd) | |
if self.dry_run: | |
log.warn("chef_plugin: DRY-RUN ONLY - exit without executing") | |
return | |
r = subprocess.check_call([ | |
"/bin/sh", "-c", fullCmd], | |
stderr=subprocess.STDOUT) | |
return r | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment