Created
March 28, 2015 17:38
-
-
Save keithchambers/4f9e2b2618cb2ecf4085 to your computer and use it in GitHub Desktop.
Composer
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/usr/bin/python -tt | |
| from Composer import Composer | |
| import yaml | |
| import os | |
| from sys import exit | |
| if __name__ == '__main__': | |
| from optparse import OptionParser | |
| parser = OptionParser() | |
| parser.add_option("--infile", type="string", | |
| help="Name of the model file for this build", | |
| dest="infile") | |
| parser.add_option("--version", type="string", | |
| help="Tag version for this build") | |
| parser.add_option("-a", "--arch", action="append", default=None, | |
| help=('Package architectures to include (default = noarch + current ' | |
| 'machine arch)')) | |
| parser.add_option("--type", type="string", | |
| help='Compose a service template') | |
| parser.add_option("--outdir", type="string", default=os.getcwd(), | |
| help='Write output to an automatically named file inside of a dir') | |
| parser.add_option("--handler-path", type="string", default="./handlers", | |
| help='Directory path to scan for handler code (default: ./handlers)') | |
| parser.add_option("--download-packages", default=None, | |
| help=('Download all packages while composing a service template. The ' | |
| 'resulting packages can either be left as loose RPMs or ' | |
| 'packaged into an image file. Pass "image" for an image file, ' | |
| '"loose" for loose RPMs, or "all".'), dest='download') | |
| parser.add_option("--appliance-configs", action="store_true", | |
| default=False, help='Generate boxgrinder configuration files.') | |
| parser.add_option("--build-images", action="store_true", default=False, | |
| help='Build virtual machine images while composing a service.') | |
| parser.add_option("--debug", action="store_true", dest="debug", | |
| help="Print debugging messages") | |
| (options, args) = parser.parse_args() | |
| if not options.infile: | |
| parser.print_help() | |
| exit(1) | |
| if not options.version: | |
| parser.print_help() | |
| exit(1) | |
| if not options.type: | |
| parser.print_help() | |
| exit(1) | |
| # setup arch_list | |
| arch_list = [] | |
| if options.arch: | |
| for arch in options.arch: | |
| arch_list.append(arch) | |
| else: | |
| arch_list.append("noarch") | |
| arch_list.append("x86_64") | |
| # instantiate object and perform resolution | |
| c = Composer(options.type, options.infile, options.version, | |
| options.handler_path, arch_list, options.download, | |
| options.appliance_configs, boxgrinder_build=options.build_images, | |
| outdir=options.outdir) | |
| # Enable debugging, if requested | |
| if options.debug: | |
| c.set_debug(True) | |
| c.build() | |
| exit(0) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/usr/bin/python -tt | |
| import os | |
| import sys | |
| import shutil | |
| import yaml | |
| import yum | |
| import rpmUtils | |
| import httplib2 | |
| import tempfile | |
| import tarfile | |
| import createrepo | |
| import subprocess | |
| import multiprocessing | |
| import signal | |
| import json | |
| from yum.packages import parsePackages | |
| from ordereddict import OrderedDict | |
| class Composer: | |
| """ Composer - Build roles and service templates | |
| The purpose of this class is to handle the resoltion and composition of an | |
| individual service role, or a full system template. Simplified input files | |
| can be used to describe a role or service, and the logic contained within | |
| this class definition will handle resolving dependant packages and services | |
| using the YUM libraries and our own service repository format. | |
| The result should be a useable role artifact which can be again consumed by | |
| this class to create a service template, or a service template that an | |
| administrator could then upload to a controller node and begin deploying | |
| the service it describes. | |
| Composer can also dump build artifacts such as YUM repositories, appliance | |
| configuration files for use with Boxgrinder, and pre-build machine images. | |
| All build artifacts other than the YAML template file are optional. | |
| """ | |
| schema_version = 0 | |
| debug_enabled = False | |
| cachedir = None | |
| def __init__(self, type, file, version, handler_path, archlist=['noarch'], | |
| download=None, boxgrinder_configs=True, boxgrinder_build=False, | |
| outdir=None): | |
| """ Set up the composer object in preparation for building. | |
| All pre-build logic, including directory creation, searching for | |
| commands, creating objects, etc should be handled within this method. | |
| """ | |
| self.set_type(type) | |
| self.set_archlist(archlist) | |
| self.set_version(version) | |
| self.set_handler_path(handler_path) | |
| self.builddir = tempfile.mkdtemp() | |
| self.download = download | |
| self.boxgrinder_configs = boxgrinder_configs | |
| self.boxgrinder_build = boxgrinder_build | |
| self.file = file | |
| self.roles = [] | |
| self.model = self.load_model(self.file) | |
| self.outdir = outdir | |
| self.boxgrinder_path = self.find_boxgrinder_cmd() | |
| # Define the order that elements should appear in generated artifacts | |
| self.order_service = ['name', 'version', 'schema_version', | |
| 'profiles', 'roles', 'host_types', | |
| 'topologies', 'relations', 'handlers'] | |
| self.order_role = ['name', 'version', 'parameters', 'packages'] | |
| self.order_handler = ['name', 'async', 'code'] | |
| self.order_host_type = ['name', 'profile', 'min', 'max', 'roles'] | |
| self.order_profile = ['name', 'openstack', 'vmware', 'virtualbox', | |
| 'aws'] | |
| self.order_provider = ['name', 'cpus', 'memory', 'partitions'] | |
| self.order_parameter = ['name', 'allowed_pattern', 'min', 'max', | |
| 'min_len', 'max_len', 'applied_scopes', | |
| 'default_value'] | |
| self.order_bg_config = ['name', 'version', 'os', 'default_packages', | |
| 'default_repos', 'summary', 'repos', | |
| 'packages'] | |
| # Add custom YAML handlers for compatible YAML. | |
| yaml.add_representer(self.LiteralUnicode, self.literal_unicode_rep) | |
| yaml.add_representer(OrderedDict, self.ordered_dict_rep) | |
| def __del__(self): | |
| """ During class destruction, clean house a bit. """ | |
| self.teardown_yum() | |
| def set_debug(self, debug=False): | |
| """ Enable / disable debugging mode """ | |
| self.debug_enabled = bool(debug) | |
| def raise_error(self, message): | |
| """ Error handler method. Print a message and exit. """ | |
| print 'Error: %s' % message | |
| sys.exit(1) | |
| def debug(self, message): | |
| """ Log a debugging message to the console. """ | |
| if self.debug_enabled: | |
| print 'Debug: %s' % message | |
| def order(self, order_set, data): | |
| """ Arrange elements in a structure. | |
| Return an OrderedDict object with keys sorted according to the order | |
| defined by the order_set list. | |
| """ | |
| result = OrderedDict() | |
| if type(data) is OrderedDict: | |
| data = dict(data) # coerce back to normal dict for re-order | |
| if type(data) is dict: | |
| for key in order_set: | |
| if data.has_key(key): | |
| result[key] = data[key] | |
| for key in data.keys(): | |
| if not result.has_key(key): | |
| result[key] = data[key] | |
| return result | |
| def set_archlist(self, archs): | |
| """ Validate machine architectures list | |
| Validate the architectures passed in, then set the instance variable. | |
| """ | |
| valid_archs = rpmUtils.arch.getArchList() | |
| for arch in archs: | |
| self.debug('Found arch %s' % arch) | |
| if arch not in valid_archs: | |
| self.raise_error('Valid architectures include: %s' | |
| % ' '.join(valid_archs)) | |
| self.archlist = archs | |
| def set_version(self, version): | |
| """ Sets the version number for the artifact """ | |
| self.version = version | |
| def set_handler_path(self, path): | |
| """ Sets the directory path containing handler artifacts """ | |
| self.handler_path = path | |
| def set_type(self, type): | |
| """ Validate the type passed in, then set the instance variable. """ | |
| valid_types = ['service', 'role'] | |
| if type not in valid_types: | |
| self.raise_error('Valid compose types include: %s' | |
| % ' '.join(valid_types)) | |
| self.debug('Compsing for type %s' % type) | |
| self.type = type | |
| def is_embedded_role(self, role): | |
| """ Determine if a piece of data is a complete role definition. """ | |
| for key in ['name']: | |
| if not key in role: | |
| return False | |
| for key in role.keys(): | |
| if not key in ['name', 'version', 'packages', 'parameters', | |
| 'repositories', 'dependencies']: | |
| return False | |
| return True | |
| def is_stub_role(self, role): | |
| """ Determine if a piece of data is a role stub """ | |
| for key in ['name', 'href']: | |
| if not key in role: | |
| return False | |
| for key in role.keys(): | |
| if not key in ['name', 'href']: | |
| return False | |
| return True | |
| def is_handler(self, handler): | |
| """ Determine if a piece of data is a valid handler """ | |
| for key in ['name', 'async', 'code']: | |
| if not key in handler: | |
| return False | |
| for key in handler.keys(): | |
| if not key in ['name', 'async', 'code']: | |
| return False | |
| return True | |
| def validate_stub_role(self, role): | |
| """ Ensure a piece of data is a role stub. """ | |
| if not self.is_stub_role(role): | |
| self.raise_error('Invalid stub role definition found\n\n%s' | |
| % self.dump_1_0_yaml(role)) | |
| def validate_embedded_role(self, role): | |
| """ Ensure that a piece of data resembles a role definition. """ | |
| if not self.is_embedded_role(role): | |
| self.raise_error('Invalid embedded role definition found\n\n%s' | |
| % self.dump_1_0_yaml(role)) | |
| def validate_handler(self, handler): | |
| """ Ensure that a piece of data is a valid handler """ | |
| if not self.is_handler(handler): | |
| self.raise_error('Invalid handler found\n\n%s' | |
| % self.dump_1_0_yaml(handler)) | |
| def load_model(self, file): | |
| """ Load a model file in from the filesystem. | |
| This method loads a model file's contents, and parses them into a | |
| dictionary. This is largely based on the composition type. | |
| """ | |
| self.debug('Trying to load data from file %s' % file) | |
| try: input = open(file, 'r') | |
| except IOError: self.raise_error('Could not open model file: %s' | |
| % file) | |
| self.debug('Trying to parse YAML from file %s' % file) | |
| try: model = yaml.load(input) | |
| except yaml.scanner.ScannerError, e: | |
| self.raise_error('Failed parsing YAML in file %s\n---\n%s' | |
| % (file, e)) | |
| if self.type == 'role': | |
| if 'name' not in model: | |
| self.raise_error('Input file is missing name') | |
| if 'packages' not in model: | |
| model['packages'] = [] | |
| if type(model['packages']) is not list: | |
| model['packages'] = [] | |
| if 'repositories' not in model: | |
| model['repositories'] = [] | |
| if type(model['repositories']) is not list: | |
| model['repositories'] = [] | |
| elif self.type == 'service': | |
| for i in 'name', 'host_types', 'profiles': | |
| if not i in model: | |
| self.raise_error('Input file is missing %s' % i) | |
| return model | |
| def find_boxgrinder_cmd(self): | |
| """ Locate the boxgrinder-build command on the local machine. | |
| If the boxgrinder-build command is not found, raise an exception. Even | |
| if not composing a service, the boxgrinder-build command should still | |
| be present as it is required by the Composer. | |
| """ | |
| sp = subprocess.Popen('which boxgrinder-build', shell=True, | |
| stdout=subprocess.PIPE) | |
| sp.wait() | |
| if sp.returncode != 0: | |
| self.raise_error('boxgrinder-build could not be found in PATH') | |
| return sp.stdout.read().strip() | |
| def setup_yum(self, in_repos): | |
| """ Initialize YUM repositories. | |
| Using some passed-in repository configurations, create a new YumBase | |
| object containing each of them. The resulting object can then be used | |
| to resolve package sets, download packages, etc. | |
| """ | |
| self.debug('Setting up YUM instance with %d repos' % len(in_repos)) | |
| yb = yum.YumBase() | |
| yb.doConfigSetup(debuglevel = 0, errorlevel = 0) | |
| self.debug('Trying to create temp directory for YUM cache') | |
| self.cachedir = yum.misc.getCacheDir() | |
| if not self.cachedir: | |
| self.raise_error('Error: failed to create a tmp cache directory') | |
| else: | |
| self.debug('Successfully created cache directory %s' | |
| % self.cachedir) | |
| yb.setCacheDir(force=True, reuse=False, tmpdir=self.cachedir) | |
| self.debug('Disabling all YUM repositories') | |
| yb.repos.repos = {} # Clean YUM object (no system repos) | |
| repos = [] | |
| for repo in in_repos: | |
| repo_str = '"%s" (%s)' % (repo['name'], repo['url']) | |
| if repo['url'] in repos: | |
| self.debug('Repository %s is a duplicate, skipping' % repo_str) | |
| continue | |
| self.debug('Using repository %s' % repo_str) | |
| repos.append(repo['url']) | |
| yb.add_enable_repo(repo['name'], baseurls=[repo['url']]) | |
| return yb | |
| def teardown_yum(self): | |
| """ Clean house after a YUM transaction """ | |
| if self.cachedir and os.path.exists(self.cachedir): | |
| self.debug('Removing temporary yum cache in %s' % self.cachedir) | |
| shutil.rmtree(self.cachedir) | |
| def rpm_filename(self, po): | |
| """ Return the file name of an RPM package from a package object """ | |
| return '%s-%s-%s.%s' % (po.name, po.version, po.release, po.arch) | |
| def resolve_pkgs(self, in_repos, in_pkgs, arch_list, exclude=[]): | |
| """ Resolve a full package list from a list of package names | |
| This method accepts a few primitives as arguments, and based on the | |
| repository configuration and desired pacakges, resolves the full set | |
| of all pacakges required for a running system. For example, if you | |
| were to pass in just "httpd", the entire CentOS base plus any of | |
| httpd's dependencies would be returned. | |
| """ | |
| pkgs_dict = {} | |
| deps_dict = {} | |
| yb = self.setup_yum(in_repos) | |
| exclude_pkgs = [] | |
| for pkg in exclude: | |
| pkgs = yb.pkgSack.returnPackages(patterns=[pkg]) | |
| for pkg in pkgs: | |
| exclude_pkgs.append(pkg.name) | |
| self.debug('Beginning pacakge resolution') | |
| for in_pkg in set(in_pkgs): | |
| pkgs = yb.pkgSack.returnPackages(patterns=[in_pkg]) | |
| best_pkg = yb._bestPackageFromList(pkgs) | |
| if not best_pkg: | |
| self.raise_error('Unable to resolve for package %s' % in_pkg) | |
| if best_pkg.arch not in arch_list: | |
| self.raise_error('Best matched package %s is not in arch list' | |
| % best_pkg_full) | |
| best_pkg_name = "%s" % (best_pkg.name) | |
| best_pkg_full = self.rpm_filename(best_pkg) | |
| if not best_pkg_name in exclude_pkgs: | |
| self.debug('Adding %s to package set' | |
| % best_pkg_full) | |
| if best_pkg_name in pkgs_dict: | |
| self.raise_error('Found duplicate packages with name: %s' | |
| % best_pkg_name) | |
| deps_dict[best_pkg] = False | |
| pkgs_dict[best_pkg_name] = best_pkg_full | |
| complete = False | |
| while not complete: | |
| complete = True | |
| for pkg in [n for n, v in deps_dict.iteritems() if v == False]: | |
| deps_dict[pkg] = [] | |
| complete = False | |
| for req in pkg.requires: | |
| reqs = yb.whatProvides(req[0], req[1], req[2]) | |
| best_dep_pkg = yb._bestPackageFromList( | |
| reqs.returnPackages() | |
| ) | |
| if not best_dep_pkg: | |
| self.raise_error('No suitable packages found for: %s' | |
| % req[0]) | |
| if best_dep_pkg.arch in arch_list: | |
| best_dep_pkg_name = best_dep_pkg.name | |
| best_dep_pkg_full = self.rpm_filename(best_dep_pkg) | |
| else: | |
| self.raise_error('Best matched package %s is not in ' | |
| 'arch list' % best_pkg_full) | |
| if best_dep_pkg_name in pkgs_dict: | |
| continue # Already have this dep | |
| if not best_dep_pkg_name in exclude_pkgs: | |
| self.debug('Adding %s to package set' | |
| % best_dep_pkg_full) | |
| pkgs_dict[best_dep_pkg_name] = best_dep_pkg_full | |
| deps_dict[pkg].append(best_dep_pkg) | |
| if not deps_dict.has_key(best_dep_pkg): | |
| deps_dict[best_dep_pkg] = False | |
| result = [v for n, v in pkgs_dict.iteritems() if n not in exclude_pkgs] | |
| result.sort() # pretty it up a bit | |
| self.teardown_yum() # clean up | |
| return result | |
| def resolve_external_deps(self, deps): | |
| """ Recursively pull in dependencies for a role | |
| This method reaches to a remote source to pull role definitions for | |
| dependencies. Each dependency pulled in is also checked for any | |
| dependencies it might have itself, and recursion takes place until all | |
| roles are fully satisfied. | |
| """ | |
| result = [] | |
| for dep in deps: | |
| if self.is_stub_role(dep): | |
| h = httplib2.Http() | |
| resp, content = h.request(dep['href'], 'GET') | |
| if resp['status'] != '200': | |
| self.raise_error('Got %s while talking to %s, expected 200' | |
| % (resp['status'], dep['href'])) | |
| try: | |
| data = yaml.load(content) | |
| except: | |
| self.raise_error('Failed to load YAML data\n%s' % content) | |
| if not data: | |
| self.raise_error('Invalid YAML received from %s' | |
| % dep['href']) | |
| role_name = data['name'] | |
| role_version = data['version'] | |
| # Rewrite unversioned URLs to versioned ones | |
| dep['href'] = "%s/%s-%s.yaml" % (os.path.dirname(dep['href']), | |
| role_name, role_version) | |
| else: | |
| data = dep | |
| role_name = dep['name'] | |
| role_version = dep['version'] | |
| self.debug('Resolving for role "%s" (%s)' | |
| % (role_name, role_version)) | |
| # Handle multiple definitions found for the same role. If a | |
| # duplicate is found with the same version, skip it. | |
| all_roles = [(r['name'], r['version']) for r in self.roles] | |
| if role_name in [r[0] for r in all_roles]: | |
| for v in [r[1] for r in all_roles if r[0] == role_name]: | |
| if v != role_version: | |
| self.raise_error('Conflicting definitions for role "%s"' | |
| % role_name) | |
| else: | |
| self.roles.append({'name': role_name, 'version': role_version}) | |
| if data.has_key('dependencies'): | |
| role_deps = data['dependencies'] | |
| for role_dep in role_deps: | |
| self.validate_stub_role(role_dep) | |
| dep_count = len(role_deps) | |
| self.debug('Found %d dependencies for role "%s"' % (dep_count, | |
| role_name)) | |
| if dep_count > 0: | |
| self.debug('Begin resolving dependencies for role "%s"' | |
| % role_name) | |
| for sub in self.resolve_external_deps(role_deps): | |
| result.append(sub) | |
| result.append(data) | |
| return result | |
| def parse_handler_code(self): | |
| """ Scans a directory path for handler code. | |
| This function is lengthy because it does a few little tricks to get | |
| information about the handler. At the top of each handler file, there | |
| should be a python block quote (which begins and ends with 3 single | |
| quotes), and inside of it, there should be an async flag and the | |
| handler name. The async flag must read "true" or "false" and is | |
| optional. Handlers will execute synchronously by default if not | |
| specified. Flags should be separated by a colon and any amount of white | |
| space (or no white space). | |
| This must be parsed manually because python's built-in inspect class | |
| does not handle inspecting unloaded python code. In this case, we are | |
| examining a large string of text that represents python code. | |
| """ | |
| self.debug('Looking for handler code in %s' % self.handler_path) | |
| handlers = [] | |
| if not os.path.isdir(self.handler_path): | |
| self.debug('Handler code path "%s" does not appear to be a ' | |
| 'directory. Skipping.' % self.handler_path) | |
| return handlers | |
| for path, dirs, files in os.walk(self.handler_path): | |
| for handler in files: | |
| file_path = os.path.join(self.handler_path, handler) | |
| if not os.path.exists(file_path): | |
| continue | |
| self.debug('Examining file "%s" for handler code' % handler) | |
| async = False | |
| name = handler.replace('.py', '') | |
| code = open(file_path).read() | |
| in_block = False | |
| block_count = 0 | |
| line_count = 0 | |
| meta_lines = [] | |
| code_lines = [] | |
| last_delim = None | |
| for line in code.split("\n"): | |
| line_count += 1 | |
| if len(line) > 0 and line.strip() == '': | |
| self.debug('!!WARNING!! Line %s of handler "%s" is a ' | |
| 'string of spaces only. The output may ' | |
| 'look strange.' % (line_count, name)) | |
| if line.strip() == "'''" or line.strip() == '"""': | |
| if not last_delim or last_delim == line.strip(): | |
| last_delim = line.strip() | |
| if in_block: | |
| block_count += 1 | |
| if block_count == 1: | |
| continue | |
| in_block = not in_block | |
| last_delim = None | |
| elif in_block and block_count == 0: | |
| meta_lines.append(line) | |
| if not in_block and block_count < 1 and line.strip() != '': | |
| block_count += 1 | |
| if block_count > 0: | |
| code_lines.append(line) | |
| code = "\n".join(code_lines) | |
| # Data in blockquote should be YAML format to avoid writing | |
| # our own parser. | |
| try: | |
| meta = yaml.load("\n".join(meta_lines)) | |
| except: | |
| self.raise_error('Invalid YAML found in leading block ' | |
| 'quote. The contents of the first block ' | |
| 'quote are used for handler metadata.') | |
| if type(meta) is dict: | |
| if meta.has_key('async'): | |
| if type(meta['async']) is bool: | |
| async = meta['async'] | |
| else: | |
| self.raise_error('Value of "async" in metadata ' | |
| 'block quote must be a bool') | |
| self.debug('Handler "%s" has async flag: %s' % (name, async)) | |
| handlers.append({'name': name, 'async': async, | |
| 'code': self.LiteralUnicode(code)}) | |
| return handlers | |
| def download_packages(self, in_repos, packages, repo_dir, type): | |
| """ Download a set of RPM packages from passed in repositories. | |
| The packages will be placed into a temporary directory. Once all | |
| packages are downloaded, the createrepo library is invoked to create | |
| repository metadata. Once that completes, the resulting directory is | |
| tar'ed up and placed in the output directory, or the current working | |
| directory if none is specified. | |
| """ | |
| if type not in ['image', 'loose', 'all']: | |
| self.raise_error('Invalid download option: %s' % type) | |
| if os.path.exists(repo_dir): | |
| self.debug('Packages have already been downloaded - skipping') | |
| return repo_dir | |
| else: | |
| os.makedirs(repo_dir) | |
| # Make sure all dependencies are in the list. If the list does not | |
| # resolve, there is not much sense in pulling in the packges. | |
| packages = self.resolve_pkgs(in_repos, packages, self.archlist) | |
| yb = self.setup_yum(in_repos) | |
| for repo_id in yb._repos.repos: | |
| yb._repos.repos[repo_id].pkgdir = repo_dir | |
| pkgs = yb.pkgSack.returnPackages(patterns=packages) | |
| self.debug('Downloading all pacakages for service %s' | |
| % self.model['name']) | |
| yb.downloadPkgs(pkgs) | |
| self.debug('Creating repository metadata for service %s' | |
| % self.model['name']) | |
| pwd = os.getcwd() | |
| os.chdir(repo_dir) | |
| conf = createrepo.MetaDataConfig() | |
| conf.directory = '.' | |
| conf.outputdir = '.' | |
| conf.quiet = True | |
| generator = createrepo.SplitMetaDataGenerator(conf) | |
| generator.doPkgMetadata() | |
| generator.doRepoMetadata() | |
| generator.doFinalMove() | |
| os.chdir(pwd) | |
| if type == 'loose': | |
| return repo_dir | |
| if type == 'image' or type == 'all': | |
| self.debug('Archiving complete reposotiry for service %s' | |
| % self.model['name']) | |
| archive_name = '%s-%s' % (self.model['name'], self.version) | |
| tf = tarfile.open('%s/%s-repo.tar.gz' % (self.artifact_dir, | |
| archive_name), 'w:gz') | |
| tf.add(repo_dir, arcname=archive_name) | |
| tf.close() | |
| if type == 'image': | |
| self.debug('Image-only requested, not keeping loose packages') | |
| shutil.rmtree(repo_dir) | |
| return True | |
| if type == 'all': | |
| return repo_dir | |
| def build_role_artifact(self, roledata=None): | |
| """ Compile a role artifact from passed-in YAML. | |
| A role artifact involves doing a full YUM resolution for all packages | |
| required by the role, as well as resolving for all dependent roles so | |
| that there is no overlap in packages, parameters, or incompatible or | |
| unexpected versions being mixed together. | |
| """ | |
| repos = [] | |
| exclude_pkgs = [] | |
| roles = [] | |
| result = {} | |
| dep_roles = [] | |
| write_out = False | |
| if not roledata: | |
| write_out = True | |
| roledata = self.model | |
| if roledata.has_key('repositories'): | |
| repos += roledata['repositories'] | |
| if roledata.has_key('dependencies'): | |
| dep_roles = self.resolve_external_deps(roledata['dependencies']) | |
| # Evaluate dependent role repositories | |
| for dep_role in dep_roles: | |
| if not dep_role.has_key('repositories'): | |
| continue | |
| for repo in dep_role['repositories']: | |
| if repo['name'] in [r['name'] for r in repos]: | |
| if repo in repos: | |
| continue | |
| else: | |
| self.raise_error('Conflicting definitions for repo %s' | |
| % repo['name']) | |
| repos.append(repo) | |
| # Evaluate packages from dependent roles | |
| for dep_role in dep_roles: | |
| if not dep_role.has_key('packages'): | |
| continue | |
| for pkg in dep_role['packages']: | |
| if not pkg in exclude_pkgs: | |
| exclude_pkgs.append(pkg) | |
| result['name'] = roledata['name'] | |
| result['version'] = self.version | |
| if not roledata.has_key('packages'): | |
| roledata['packages'] = [] | |
| result['packages'] = self.resolve_pkgs(repos, roledata['packages'], | |
| self.archlist, exclude_pkgs) | |
| if roledata.has_key('parameters'): | |
| result['parameters'] = [] | |
| for parameter in roledata['parameters']: | |
| result['parameters'].append(self.order(self.order_parameter, | |
| parameter)) | |
| if roledata.has_key('dependencies'): | |
| result['dependencies'] = roledata['dependencies'] | |
| if roledata.has_key('repositories'): | |
| result['repositories'] = roledata['repositories'] | |
| if not write_out: | |
| return result | |
| role = self.dump_1_0_yaml(self.order(self.order_role, result)) | |
| file = '%s-%s.yaml' % (self.model['name'], self.version) | |
| with open(os.path.join(self.outdir, file), 'w') as f: | |
| f.write(role) | |
| def build_service_artifact(self): | |
| """ Build a service template from a model file. | |
| A service build can contain many steps, if requested. By default, only | |
| the service template YAML document will be built. If the class-level | |
| toggles for other artifacts are enabled, other artifacts such as | |
| loose packages, repository images, or machine images can also be built | |
| and dumped to the same output directory. | |
| """ | |
| result = { | |
| 'name': self.model['name'], | |
| 'version': self.version, | |
| 'schema_version': self.schema_version, | |
| 'profiles': [], | |
| 'host_types': [], | |
| 'roles': [], | |
| 'handlers': [] | |
| } | |
| repos = [] | |
| packages = [] | |
| self.debug('Composing service template at schema version %s' | |
| % self.schema_version) | |
| for i in ['profiles', 'host_types']: | |
| if not self.model.has_key(i) or len(self.model[i]) == 0: | |
| self.raise_error('Model file did not contain any %s' % i) | |
| for profile in self.model['profiles']: | |
| for provider in [p for p in profile.keys() if p != 'name']: | |
| data = self.order(self.order_provider, profile[provider]) | |
| profile[provider] = data | |
| result['profiles'].append(self.order(self.order_profile, profile)) | |
| for host_type in self.model['host_types']: | |
| result['host_types'].append(self.order(self.order_host_type, | |
| host_type)) | |
| if self.model.has_key('handlers'): | |
| for handler in self.model['handlers']: | |
| self.validate_handler(handler) | |
| self.debug('Found embedded handler "%s" in service model' | |
| % handler['name']) | |
| handler['code'] = self.LiteralUnicode(handler['code']) | |
| result['handlers'].append(self.order(self.order_handler, | |
| handler)) | |
| for handler in self.parse_handler_code(): | |
| if handler['name'] in [h['name'] for h in result['handlers']]: | |
| self.raise_error('Duplicate handler "%s" found' | |
| % handler['name']) | |
| result['handlers'].append(self.order(self.order_handler, handler)) | |
| roles = [] | |
| all_role_names = [] | |
| self.debug('Attempting to resolve roles for service "%s"' | |
| % self.model['name']) | |
| if self.model.has_key('roles'): | |
| for role in self.model['roles']: | |
| if self.is_stub_role(role): | |
| self.debug('Found stub role "%s" with href "%s"' | |
| % (role['name'], role['href'])) | |
| continue # stub resolution comes later | |
| self.validate_embedded_role(role) | |
| self.debug('Found embedded role "%s" in service definition' | |
| % role['name']) | |
| # For embedded roles, use service version if none specified | |
| if not role.has_key('version'): | |
| role['version'] = self.version | |
| # Resolve embedded role using the same function as role files | |
| self.debug('Building embedded role "%s"' % role['name']) | |
| roles.append(self.build_role_artifact(role)) | |
| for host_type in self.model['host_types']: | |
| role_names = [] | |
| roles_to_resolve = [] | |
| ht_roles = [] | |
| for role in host_type['roles']: | |
| if role in [r['name'] for r in roles]: | |
| for roledata in roles: | |
| if roledata['name'] == role: | |
| if roledata.has_key('dependencies'): | |
| roles_to_resolve += roledata['dependencies'] | |
| ht_roles.append(roledata) | |
| else: | |
| for roledata in self.model['roles']: | |
| if roledata['name'] == role: | |
| roles_to_resolve.append(roledata) | |
| ht_roles += self.resolve_external_deps(roles_to_resolve) | |
| for role in ht_roles: | |
| self.debug('Adding role "%s" to host_type "%s"' | |
| % (role['name'], host_type['name'])) | |
| role_names.append(role['name']) | |
| if role.has_key('dependencies'): | |
| role.pop('dependencies') | |
| if role.has_key('repositories'): | |
| repos += role['repositories'] | |
| role.pop('repositories') | |
| if role.has_key('packages'): | |
| packages += role['packages'] | |
| if role.has_key('parameters'): | |
| parameters_ordered = [] | |
| for parameter in role['parameters']: | |
| parameters_ordered.append( | |
| self.order(self.order_parameter, parameter)) | |
| role['parameters'] = parameters_ordered | |
| if role['name'] not in all_role_names: | |
| self.debug('Adding role "%s" to template' % role['name']) | |
| result['roles'].append(self.order(self.order_role, role)) | |
| all_role_names.append(role['name']) | |
| # Add resolved roles to host type, if they aren't already specified | |
| for role in role_names: | |
| if not role in host_type['roles']: | |
| host_type['roles'].append(role) | |
| if self.model.has_key('relations'): | |
| result['relations'] = self.model['relations'] | |
| result = self.order(self.order_service, result) # Order artifact items | |
| artifact_name = '%s-%s' % (result['name'], result['version']) | |
| artifact_dir = os.path.join(self.builddir, artifact_name) | |
| final_dir = os.path.join(self.outdir, artifact_name) | |
| image_dir = os.path.join(artifact_dir, 'images') | |
| pkg_dir = os.path.join(artifact_dir, 'packages') | |
| appl_dir = os.path.join(artifact_dir, 'appliance_configs') | |
| if os.path.exists(final_dir): | |
| self.raise_error('Directory "%s" already exists' % final_dir) | |
| if not os.path.exists(artifact_dir): | |
| os.makedirs(artifact_dir) | |
| if self.download: | |
| self.download_packages(repos, packages, pkg_dir, self.download) | |
| if self.boxgrinder_configs: | |
| if not os.path.exists(appl_dir): | |
| os.makedirs(appl_dir) | |
| for host_type in result['host_types']: | |
| appl_file = '%s-%s-%s.appl' % (result['name'], | |
| host_type['name'], result['version']) | |
| appl_config = self.get_appliance_config(host_type['name'], | |
| result) | |
| with open(os.path.join(appl_dir, appl_file)) as f: | |
| f.write(appl_config) | |
| if self.boxgrinder_build: | |
| if not os.path.exists(image_dir): | |
| os.makedirs(image_dir) | |
| self.debug('Building machine images') | |
| self.build_images(repos, packages, result, image_dir, pkg_dir) | |
| self.debug('Finished building machine images') | |
| template_file = '%s.yaml' % artifact_name | |
| with open(os.path.join(artifact_dir, template_file), 'w') as f: | |
| f.write(self.dump_1_0_yaml(result)) | |
| # Move artifacts into result directory | |
| shutil.move(artifact_dir, final_dir) | |
| def build(self): | |
| """ Dump all artifacts for a given build type. | |
| Currently two main types are supported: role and service. Roles are | |
| comprised of packages and parameters mostly, whereas services are | |
| collections of roles, with some added resources like handlers and | |
| profiles. | |
| While building roles, a single YAML file is the build artifact. | |
| While building services, a YAML file along is dumped by default, but | |
| may be accompanied by RPM packages and/or machine images, if specified. | |
| """ | |
| if self.type == 'role': | |
| self.build_role_artifact() | |
| elif self.type == 'service': | |
| self.build_service_artifact() | |
| self.debug('Composer has completed.') | |
| def get_appliance_config(self, host_type_name, template, repodir=None): | |
| """ Return a YAML string of appliance config from a host_type. | |
| Using a service template and a host_type, this method will parse out | |
| an appliance definition file and return it as a string. This can | |
| then be written to a file and built into a machine image. | |
| """ | |
| conf = { | |
| 'name': '%s-%s' % (template['name'], host_type_name), | |
| 'version': template['version'], | |
| 'summary': host_type_name, | |
| 'os': { | |
| 'name': 'centos', | |
| 'version': 6, | |
| 'password': 'P@55W0RD' | |
| }, | |
| 'hardware': { | |
| 'partitions': { | |
| '/': { | |
| 'size': 5 | |
| } | |
| } | |
| }, | |
| 'default_packages': False, | |
| 'default_repos': False, | |
| 'repos': [], | |
| 'packages': self.host_type_packages(template, host_type_name), | |
| 'post': { | |
| 'base': [self.get_appliance_post_command()] | |
| } | |
| } | |
| if repodir: | |
| conf['repos'].append({'name': 'composer', 'ephemeral': True, | |
| 'baseurl': 'file://%s' % repodir}) | |
| conf = self.order(self.order_bg_config, conf) | |
| # PyYAML is not good at dumping highly compatible YAML. Since JSON can | |
| # be parsed as YAML, let's use that instead. | |
| return json.dumps(dict(conf), indent=2) | |
| def get_appliance_post_command(self): | |
| """ Return a command to be run from the post-install method. | |
| During appliance builds, after everything has been installed, you can | |
| run a set of commands as a way of pre-configuring a machine image. We | |
| can use this to pre-apply puppet modules, if the runner is installed. | |
| """ | |
| return ('/bin/sh -c \'export RUNLEVEL=3;' | |
| '/bin/rpm -qa > /etc/packages.list;' | |
| '/bin/hostname localhost.localdomain;' | |
| 'command -v /usr/sbin/puppet-module-runner && ' | |
| '/usr/sbin/puppet-module-runner -a\'') | |
| def host_type_packages(self, template, host_type_name): | |
| """ Retrieve a full set of packages from a host_type | |
| This method will take service template and the name of a host_type, and | |
| return all packages required for a comlete system by combining packages | |
| of all other dependent roles. | |
| """ | |
| packages = [] | |
| for host_type in template['host_types']: | |
| if host_type['name'] != host_type_name: | |
| continue | |
| for role_name in host_type['roles']: | |
| for role in template['roles']: | |
| if role['name'] != role_name: | |
| continue | |
| if role.has_key('packages'): | |
| packages += role['packages'] | |
| return sorted(packages) | |
| def boxgrinder_build_cmd(self, dest_dir, config_file): | |
| """ Return a command string for invoking boxgrinder. """ | |
| return ('%s -p vmware --platform-config type:personal,thin_disk:true ' | |
| '-d local --delivery-config package:false,path:%s ' | |
| '--debug %s') % (self.boxgrinder_path, dest_dir, config_file) | |
| def build_images(self, repos, packages, template, image_dir, pkg_dir): | |
| """ Build machine images from a service template. | |
| Pre-building machine images with the Composer is possible by using | |
| BoxGrinder to build machine images using guestfs. By passing in a set | |
| of repositories, a complete list of packages to use for building, and | |
| a service template, we can generate one machine image for each | |
| host_type within the template. | |
| """ | |
| try: | |
| orig_pwd = os.getcwd() | |
| repodir = None | |
| config_file = None | |
| build_dir = None | |
| repodir = self.download_packages(repos, packages, pkg_dir, 'loose') | |
| processes = [] | |
| def build_image(image_dir, config_file): | |
| self.debug('Building machine image %s' % image_name) | |
| if not os.path.exists(image_dir): | |
| os.makedirs(image_dir) | |
| sp = subprocess.Popen(self.boxgrinder_build_cmd(image_dir, | |
| config_file).split(), shell=False, | |
| stdout=subprocess.PIPE, | |
| stderr=subprocess.PIPE) | |
| stdout, stderr = sp.communicate() | |
| os.remove(config_file) | |
| if sp.returncode != 0: | |
| print stdout | |
| print stderr | |
| self.debug('Failed to build image %s' % image_name) | |
| sys.exit(1) | |
| self.debug('Successfully built machine image %s' % image_name) | |
| fpath = os.path.join(image_dir, '%s-%s.vmdk' % ( | |
| template['name'], host_type['name'])) | |
| self.debug('Moving image artifact to %s' % image_name) | |
| os.rename(fpath, os.path.join(image_dir, image_name)) | |
| def stop(): | |
| while len(processes) > 0: | |
| for p in processes: | |
| os.killpg(p.pid, signal.SIGKILL) | |
| if not p.is_alive(): | |
| processes.remove(p) | |
| # Catch user-cancelled or killed signals to terminate threads. | |
| signal.signal(signal.SIGINT, stop) | |
| signal.signal(signal.SIGTERM, stop) | |
| for host_type in template['host_types']: | |
| build_dir = tempfile.mkdtemp() | |
| os.chdir(build_dir) | |
| conf = self.get_appliance_config(host_type['name'], template, | |
| repodir) | |
| _, config_file = tempfile.mkstemp(suffix='.appl') | |
| with open(config_file, 'w') as f: | |
| f.write(conf) | |
| image_name = '%s-%s-%s.vmdk' % (template['name'], | |
| host_type['name'], | |
| template['version']) | |
| # Start parallel builds | |
| p = multiprocessing.Process(target=build_image, args=( | |
| image_dir, config_file)) | |
| p.start() | |
| processes.append(p) | |
| # Wait for all processes to finish | |
| while len(processes) > 0: | |
| for p in processes: | |
| if not p.is_alive(): | |
| if p.exitcode != 0: | |
| stop() | |
| self.raise_error('Failed to build machine images.') | |
| processes.remove(p) | |
| finally: | |
| for file in os.listdir(image_dir): | |
| if not file.endswith('.vmdk'): | |
| self.debug('Removing unneeded file %s' % file) | |
| os.remove(os.path.join(image_dir, file)) | |
| self.debug('Cleaning up image build area') | |
| os.chdir(orig_pwd) | |
| if not self.download and repodir and os.path.exists(repodir): | |
| shutil.rmtree(repodir) | |
| if config_file and os.path.exists(config_file): | |
| os.remove(config_file) | |
| if build_dir and os.path.exists(build_dir): | |
| shutil.rmtree(build_dir) | |
| def dump_1_0_yaml(self, data): | |
| """ Dump a YAML string the best we can for YAML 1.x """ | |
| return yaml.dump(data, Dumper=self.Yaml_1_0_Dumper, | |
| default_flow_style=False) | |
| def literal_unicode_rep(self, dumper, data): | |
| """ A PyYAML representer extension for literal unicode blocks. | |
| This is only used for handler code, where it should be dumped in block | |
| style rather than a long string with escaped newlines. | |
| """ | |
| return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, | |
| style='|') | |
| class Yaml_1_0_Dumper(yaml.Dumper): | |
| """ Yaml_1_0_Dumper | |
| This class exists for maximum compatibility among various YAML parsers. | |
| Since the compact inline notation (introduced in YAML 1.1) is the least | |
| commonly supported notation, this dumper simply disables the indentless | |
| flow. This will make the YAML emitted by this script readable in almost | |
| any YAML parser. | |
| For some reason, in Python's YAML dumper, even if you ask for 1.0 | |
| output, you are still presented with YAML 1.1+ output. Until this is | |
| fixed, this custom dumper needs to remain here. Don't count on it being | |
| fixed, though, because pythonists are probably not very concerned about | |
| older YAML formats. | |
| """ | |
| def increase_indent(self, flow=False, indentless=False): | |
| """Logic to increase the indentation in YAML flows. | |
| This is the part that overrides the default implementation to give | |
| us good old YAML 1.0 output. | |
| """ | |
| return super(Composer.Yaml_1_0_Dumper, self).increase_indent(flow, | |
| False) | |
| class LiteralUnicode(unicode): | |
| """ LiteralUnicode | |
| Force PyYAML's dumper to use literal unicode blocks for handler code. | |
| Without this, the code will be dumped into a long, flat string, which | |
| is harly readable. | |
| """ | |
| pass | |
| def ordered_dict_rep(self, dumper, data): | |
| """ Dump OrderedDict objects in YAML. | |
| A PyYAML representer to pass correct OrderedDict objects throught | |
| without gaining a bunch of object code or other artifacts in the | |
| output. | |
| """ | |
| return dumper.represent_mapping(u'tag:yaml.org,2002:map', data.items(), | |
| flow_style=False) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment